repo_name stringlengths 4 116 | path stringlengths 4 379 | size stringlengths 1 7 | content stringlengths 3 1.05M | license stringclasses 15
values |
|---|---|---|---|---|
eayunstack/neutron | neutron/tests/unit/agent/linux/test_utils.py | 22284 | # Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import signal
import socket
import mock
import six
import testtools
from oslo_config import cfg
import oslo_i18n
from neutron.agent.linux import utils
from neutron.tests import base
from neutron.tests.common import helpers
_marker = object()
class AgentUtilsExecuteTest(base.BaseTestCase):
def setUp(self):
super(AgentUtilsExecuteTest, self).setUp()
self.test_file = self.get_temp_file_path('test_execute.tmp')
open(self.test_file, 'w').close()
self.process = mock.patch('eventlet.green.subprocess.Popen').start()
self.process.return_value.returncode = 0
self.mock_popen = self.process.return_value.communicate
def test_xenapi_root_helper(self):
token = utils.xenapi_root_helper.ROOT_HELPER_DAEMON_TOKEN
self.config(group='AGENT', root_helper_daemon=token)
with mock.patch(
'neutron.agent.linux.utils.xenapi_root_helper.XenAPIClient')\
as mock_xenapi_class:
mock_client = mock_xenapi_class.return_value
cmd_client = utils.RootwrapDaemonHelper.get_client()
self.assertEqual(cmd_client, mock_client)
def test_without_helper(self):
expected = "%s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
result = utils.execute(["ls", self.test_file])
self.assertEqual(result, expected)
def test_with_helper(self):
expected = "ls %s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
self.config(group='AGENT', root_helper='echo')
result = utils.execute(["ls", self.test_file], run_as_root=True)
self.assertEqual(result, expected)
@mock.patch.object(utils.RootwrapDaemonHelper, 'get_client')
def test_with_helper_exception(self, get_client):
client_inst = mock.Mock()
client_inst.execute.side_effect = RuntimeError
get_client.return_value = client_inst
self.config(group='AGENT', root_helper_daemon='echo')
with mock.patch.object(utils, 'LOG') as log:
self.assertRaises(RuntimeError, utils.execute,
['ls'], run_as_root=True)
self.assertTrue(log.error.called)
def test_stderr_true(self):
expected = "%s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
out = utils.execute(["ls", self.test_file], return_stderr=True)
self.assertIsInstance(out, tuple)
self.assertEqual(out, (expected, ""))
def test_check_exit_code(self):
self.mock_popen.return_value = ["", ""]
stdout = utils.execute(["ls", self.test_file[:-1]],
check_exit_code=False)
self.assertEqual("", stdout)
def test_execute_raises(self):
self.mock_popen.side_effect = RuntimeError
self.assertRaises(RuntimeError, utils.execute,
["ls", self.test_file[:-1]])
def test_process_input(self):
expected = "%s\n" % self.test_file[:-1]
self.mock_popen.return_value = [expected, ""]
result = utils.execute(["cat"], process_input="%s\n" %
self.test_file[:-1])
self.assertEqual(result, expected)
def test_with_addl_env(self):
expected = "%s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
result = utils.execute(["ls", self.test_file],
addl_env={'foo': 'bar'})
self.assertEqual(result, expected)
def test_return_code_log_error_raise_runtime(self):
self.mock_popen.return_value = ('', '')
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
self.assertRaises(RuntimeError, utils.execute,
['ls'])
self.assertTrue(log.error.called)
def test_return_code_log_error_no_raise_runtime(self):
self.mock_popen.return_value = ('', '')
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
utils.execute(['ls'], check_exit_code=False)
self.assertTrue(log.error.called)
def test_return_code_log_debug(self):
self.mock_popen.return_value = ('', '')
with mock.patch.object(utils, 'LOG') as log:
utils.execute(['ls'])
self.assertTrue(log.debug.called)
def test_return_code_log_error_change_locale(self):
ja_output = 'std_out in Japanese'
ja_error = 'std_err in Japanese'
ja_message_out = oslo_i18n._message.Message(ja_output)
ja_message_err = oslo_i18n._message.Message(ja_error)
ja_translate_out = oslo_i18n._translate.translate(ja_message_out, 'ja')
ja_translate_err = oslo_i18n._translate.translate(ja_message_err, 'ja')
self.mock_popen.return_value = (ja_translate_out, ja_translate_err)
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
utils.execute(['ls'], check_exit_code=False)
self.assertIn(ja_translate_out, str(log.error.call_args_list))
self.assertIn(ja_translate_err, str(log.error.call_args_list))
def test_return_code_raise_runtime_do_not_log_fail_as_error(self):
self.mock_popen.return_value = ('', '')
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
self.assertRaises(utils.ProcessExecutionError, utils.execute,
['ls'], log_fail_as_error=False)
self.assertFalse(log.error.called)
def test_encode_process_input(self):
str_idata = "%s\n" % self.test_file[:-1]
str_odata = "%s\n" % self.test_file
if six.PY3:
bytes_idata = str_idata.encode(encoding='utf-8')
bytes_odata = str_odata.encode(encoding='utf-8')
self.mock_popen.return_value = [bytes_odata, b'']
result = utils.execute(['cat'], process_input=str_idata)
self.mock_popen.assert_called_once_with(bytes_idata)
else:
self.mock_popen.return_value = [str_odata, '']
result = utils.execute(['cat'], process_input=str_idata)
self.mock_popen.assert_called_once_with(str_idata)
self.assertEqual(str_odata, result)
def test_return_str_data(self):
str_data = "%s\n" % self.test_file
self.mock_popen.return_value = [str_data, '']
result = utils.execute(['ls', self.test_file], return_stderr=True)
self.assertEqual((str_data, ''), result)
@helpers.requires_py3
def test_surrogateescape_in_decoding_out_data(self):
bytes_err_data = b'\xed\xa0\xbd'
err_data = bytes_err_data.decode('utf-8', 'surrogateescape')
out_data = "%s\n" % self.test_file
bytes_out_data = out_data.encode(encoding='utf-8')
self.mock_popen.return_value = [bytes_out_data, bytes_err_data]
result = utils.execute(['ls', self.test_file], return_stderr=True)
self.assertEqual((out_data, err_data), result)
class AgentUtilsExecuteEncodeTest(base.BaseTestCase):
def setUp(self):
super(AgentUtilsExecuteEncodeTest, self).setUp()
self.test_file = self.get_temp_file_path('test_execute.tmp')
open(self.test_file, 'w').close()
def test_decode_return_data(self):
str_data = "%s\n" % self.test_file
result = utils.execute(['ls', self.test_file], return_stderr=True)
self.assertEqual((str_data, ''), result)
class TestFindParentPid(base.BaseTestCase):
def setUp(self):
super(TestFindParentPid, self).setUp()
self.m_execute = mock.patch.object(utils, 'execute').start()
def test_returns_none_for_no_valid_pid(self):
self.m_execute.side_effect = utils.ProcessExecutionError('',
returncode=1)
self.assertIsNone(utils.find_parent_pid(-1))
def test_returns_parent_id_for_good_ouput(self):
self.m_execute.return_value = '123 \n'
self.assertEqual(utils.find_parent_pid(-1), '123')
def test_raises_exception_returncode_0(self):
with testtools.ExpectedException(utils.ProcessExecutionError):
self.m_execute.side_effect = \
utils.ProcessExecutionError('', returncode=0)
utils.find_parent_pid(-1)
def test_raises_unknown_exception(self):
with testtools.ExpectedException(RuntimeError):
self.m_execute.side_effect = RuntimeError()
utils.find_parent_pid(-1)
class TestFindForkTopParent(base.BaseTestCase):
def _test_find_fork_top_parent(self, expected=_marker,
find_parent_pid_retvals=None,
pid_invoked_with_cmdline_retvals=None):
def _find_parent_pid(x):
if find_parent_pid_retvals:
return find_parent_pid_retvals.pop(0)
pid_invoked_with_cmdline = {}
if pid_invoked_with_cmdline_retvals:
pid_invoked_with_cmdline['side_effect'] = (
pid_invoked_with_cmdline_retvals)
else:
pid_invoked_with_cmdline['return_value'] = False
with mock.patch.object(utils, 'find_parent_pid',
side_effect=_find_parent_pid), \
mock.patch.object(utils, 'pid_invoked_with_cmdline',
**pid_invoked_with_cmdline):
actual = utils.find_fork_top_parent(_marker)
self.assertEqual(expected, actual)
def test_returns_own_pid_no_parent(self):
self._test_find_fork_top_parent()
def test_returns_own_pid_nofork(self):
self._test_find_fork_top_parent(find_parent_pid_retvals=['2', '3'])
def test_returns_first_parent_pid_fork(self):
self._test_find_fork_top_parent(
expected='2',
find_parent_pid_retvals=['2', '3', '4'],
pid_invoked_with_cmdline_retvals=[True, False, False])
def test_returns_top_parent_pid_fork(self):
self._test_find_fork_top_parent(
expected='4',
find_parent_pid_retvals=['2', '3', '4'],
pid_invoked_with_cmdline_retvals=[True, True, True])
class TestKillProcess(base.BaseTestCase):
def _test_kill_process(self, pid, raise_exception=False,
kill_signal=signal.SIGKILL, pid_killed=True):
if raise_exception:
exc = utils.ProcessExecutionError('', returncode=0)
else:
exc = None
with mock.patch.object(utils, 'execute',
side_effect=exc) as mock_execute:
with mock.patch.object(utils, 'process_is_running',
return_value=not pid_killed):
utils.kill_process(pid, kill_signal, run_as_root=True)
mock_execute.assert_called_with(['kill', '-%d' % kill_signal, pid],
run_as_root=True)
def test_kill_process_returns_none_for_valid_pid(self):
self._test_kill_process('1')
def test_kill_process_returns_none_for_stale_pid(self):
self._test_kill_process('1', raise_exception=True)
def test_kill_process_raises_exception_for_execute_exception(self):
with testtools.ExpectedException(utils.ProcessExecutionError):
# Simulate that the process is running after trying to kill due to
# any reason such as, for example, Permission denied
self._test_kill_process('1', raise_exception=True,
pid_killed=False)
def test_kill_process_with_different_signal(self):
self._test_kill_process('1', kill_signal=signal.SIGTERM)
class TestFindChildPids(base.BaseTestCase):
def test_returns_empty_list_for_exit_code_1(self):
with mock.patch.object(utils, 'execute',
side_effect=utils.ProcessExecutionError(
'', returncode=1)):
self.assertEqual([], utils.find_child_pids(-1))
def test_returns_empty_list_for_no_output(self):
with mock.patch.object(utils, 'execute', return_value=''):
self.assertEqual([], utils.find_child_pids(-1))
def test_returns_list_of_child_process_ids_for_good_ouput(self):
with mock.patch.object(utils, 'execute', return_value=' 123 \n 185\n'):
self.assertEqual(utils.find_child_pids(-1), ['123', '185'])
def test_returns_list_of_child_process_ids_recursively(self):
with mock.patch.object(utils, 'execute',
side_effect=[' 123 \n 185\n',
' 40 \n', '\n',
'41\n', '\n']):
actual = utils.find_child_pids(-1, True)
self.assertEqual(actual, ['123', '185', '40', '41'])
def test_raises_unknown_exception(self):
with testtools.ExpectedException(RuntimeError):
with mock.patch.object(utils, 'execute',
side_effect=RuntimeError()):
utils.find_child_pids(-1)
class TestGetRoothelperChildPid(base.BaseTestCase):
def _test_get_root_helper_child_pid(self, expected=_marker,
run_as_root=False, pids=None,
cmds=None):
def _find_child_pids(x):
if not pids:
return []
pids.pop(0)
return pids
mock_pid = object()
pid_invoked_with_cmdline = {}
if cmds:
pid_invoked_with_cmdline['side_effect'] = cmds
else:
pid_invoked_with_cmdline['return_value'] = False
with mock.patch.object(utils, 'find_child_pids',
side_effect=_find_child_pids), \
mock.patch.object(utils, 'pid_invoked_with_cmdline',
**pid_invoked_with_cmdline):
actual = utils.get_root_helper_child_pid(
mock_pid, mock.ANY, run_as_root)
if expected is _marker:
expected = str(mock_pid)
self.assertEqual(expected, actual)
def test_returns_process_pid_not_root(self):
self._test_get_root_helper_child_pid()
def test_returns_child_pid_as_root(self):
self._test_get_root_helper_child_pid(expected='2', pids=['1', '2'],
run_as_root=True,
cmds=[True])
def test_returns_last_child_pid_as_root(self):
self._test_get_root_helper_child_pid(expected='3',
pids=['1', '2', '3'],
run_as_root=True,
cmds=[False, True])
def test_returns_first_non_root_helper_child(self):
self._test_get_root_helper_child_pid(
expected='2',
pids=['1', '2', '3'],
run_as_root=True,
cmds=[True, False])
def test_returns_none_as_root(self):
self._test_get_root_helper_child_pid(expected=None, run_as_root=True)
class TestPathUtilities(base.BaseTestCase):
def test_remove_abs_path(self):
self.assertEqual(['ping', '8.8.8.8'],
utils.remove_abs_path(['/usr/bin/ping', '8.8.8.8']))
def test_cmd_matches_expected_matches_abs_path(self):
cmd = ['/bar/../foo']
self.assertTrue(utils.cmd_matches_expected(cmd, cmd))
def test_cmd_matches_expected_matches_script(self):
self.assertTrue(utils.cmd_matches_expected(['python', 'script'],
['script']))
def test_cmd_matches_expected_doesnt_match(self):
self.assertFalse(utils.cmd_matches_expected('foo', 'bar'))
class FakeUser(object):
def __init__(self, name):
self.pw_name = name
class FakeGroup(object):
def __init__(self, name):
self.gr_name = name
class TestBaseOSUtils(base.BaseTestCase):
EUID = 123
EUNAME = 'user'
EGID = 456
EGNAME = 'group'
@mock.patch('os.geteuid', return_value=EUID)
@mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
def test_is_effective_user_id(self, getpwuid, geteuid):
self.assertTrue(utils.is_effective_user(self.EUID))
geteuid.assert_called_once_with()
self.assertFalse(getpwuid.called)
@mock.patch('os.geteuid', return_value=EUID)
@mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
def test_is_effective_user_str_id(self, getpwuid, geteuid):
self.assertTrue(utils.is_effective_user(str(self.EUID)))
geteuid.assert_called_once_with()
self.assertFalse(getpwuid.called)
@mock.patch('os.geteuid', return_value=EUID)
@mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
def test_is_effective_user_name(self, getpwuid, geteuid):
self.assertTrue(utils.is_effective_user(self.EUNAME))
geteuid.assert_called_once_with()
getpwuid.assert_called_once_with(self.EUID)
@mock.patch('os.geteuid', return_value=EUID)
@mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
def test_is_not_effective_user(self, getpwuid, geteuid):
self.assertFalse(utils.is_effective_user('wrong'))
geteuid.assert_called_once_with()
getpwuid.assert_called_once_with(self.EUID)
@mock.patch('os.getegid', return_value=EGID)
@mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
def test_is_effective_group_id(self, getgrgid, getegid):
self.assertTrue(utils.is_effective_group(self.EGID))
getegid.assert_called_once_with()
self.assertFalse(getgrgid.called)
@mock.patch('os.getegid', return_value=EGID)
@mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
def test_is_effective_group_str_id(self, getgrgid, getegid):
self.assertTrue(utils.is_effective_group(str(self.EGID)))
getegid.assert_called_once_with()
self.assertFalse(getgrgid.called)
@mock.patch('os.getegid', return_value=EGID)
@mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
def test_is_effective_group_name(self, getgrgid, getegid):
self.assertTrue(utils.is_effective_group(self.EGNAME))
getegid.assert_called_once_with()
getgrgid.assert_called_once_with(self.EGID)
@mock.patch('os.getegid', return_value=EGID)
@mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
def test_is_not_effective_group(self, getgrgid, getegid):
self.assertFalse(utils.is_effective_group('wrong'))
getegid.assert_called_once_with()
getgrgid.assert_called_once_with(self.EGID)
class TestUnixDomainHttpConnection(base.BaseTestCase):
def test_connect(self):
with mock.patch.object(utils, 'cfg') as cfg:
cfg.CONF.metadata_proxy_socket = '/the/path'
with mock.patch('socket.socket') as socket_create:
conn = utils.UnixDomainHTTPConnection('169.254.169.254',
timeout=3)
conn.connect()
socket_create.assert_has_calls([
mock.call(socket.AF_UNIX, socket.SOCK_STREAM),
mock.call().settimeout(3),
mock.call().connect('/the/path')]
)
self.assertEqual(conn.timeout, 3)
class TestUnixDomainHttpProtocol(base.BaseTestCase):
def test_init_empty_client(self):
for addr in ('', b''):
u = utils.UnixDomainHttpProtocol(mock.Mock(), addr, mock.Mock())
self.assertEqual(u.client_address, ('<local>', 0))
def test_init_with_client(self):
u = utils.UnixDomainHttpProtocol(mock.Mock(), 'foo', mock.Mock())
self.assertEqual(u.client_address, 'foo')
class TestUnixDomainWSGIServer(base.BaseTestCase):
def setUp(self):
super(TestUnixDomainWSGIServer, self).setUp()
self.eventlet_p = mock.patch.object(utils, 'eventlet')
self.eventlet = self.eventlet_p.start()
def test_start(self):
self.server = utils.UnixDomainWSGIServer('test')
mock_app = mock.Mock()
with mock.patch.object(self.server, '_launch') as launcher:
self.server.start(mock_app, '/the/path', workers=5, backlog=128)
self.eventlet.assert_has_calls([
mock.call.listen(
'/the/path',
family=socket.AF_UNIX,
backlog=128
)]
)
launcher.assert_called_once_with(mock_app, workers=5)
def test_run(self):
self.server = utils.UnixDomainWSGIServer('test')
self.server._run('app', 'sock')
self.eventlet.wsgi.server.assert_called_once_with(
'sock',
'app',
protocol=utils.UnixDomainHttpProtocol,
log=mock.ANY,
log_format=cfg.CONF.wsgi_log_format,
max_size=self.server.num_threads
)
def test_num_threads(self):
num_threads = 8
self.server = utils.UnixDomainWSGIServer('test',
num_threads=num_threads)
self.server._run('app', 'sock')
self.eventlet.wsgi.server.assert_called_once_with(
'sock',
'app',
protocol=utils.UnixDomainHttpProtocol,
log=mock.ANY,
log_format=cfg.CONF.wsgi_log_format,
max_size=num_threads
)
| apache-2.0 |
ldrygala/bootzooka | backend/src/test/scala/com/softwaremill/bootzooka/common/crypto/Argon2dPasswordHashingSpec.scala | 953 | package com.softwaremill.bootzooka.common.crypto
import com.softwaremill.bootzooka.test.TestHelpers
import com.typesafe.config.Config
import org.scalatest.{FlatSpec, Matchers}
class Argon2dPasswordHashingSpec extends FlatSpec with Matchers with TestHelpers {
val withChangedParams = new Argon2dPasswordHashing(new CryptoConfig {
override def rootConfig: Config = ???
override lazy val iterations = 3
override lazy val memory = 16383
override lazy val parallelism = 2
})
behavior of "Argon2d Password Hashing"
it should "not indicate rehashing necessity when config doesn't change" in {
val hash = passwordHashing.hashPassword("password", "salt")
passwordHashing.requiresRehashing(hash) shouldBe false
}
it should "indicate rehashing necessity upon config change" in {
val hash = passwordHashing.hashPassword("password", "salt")
withChangedParams.requiresRehashing(hash) shouldBe true
}
}
| apache-2.0 |
zapper59/RangerHale---Space-Adventure | src/org/enemies/FireSkull.java | 1877 | package org.enemies;
import static java.lang.Math.*;
import java.awt.Graphics;
import java.awt.image.BufferedImage;
import org.items.Health;
import org.players.Player;
import org.resources.Collisions;
import org.resources.Element;
import org.resources.ImagePack;
import org.rooms.Room;
import org.walls.Wall;
public class FireSkull extends Enemy {
public static final BufferedImage[] ani = new BufferedImage[]{ImagePack.getImage("firelevel/skull1.png"),
ImagePack.getImage("firelevel/skull2.png"), ImagePack.getImage("firelevel/skull3.png"),
ImagePack.getImage("firelevel/skull4.png"),};
// public boolean rolling=false;
// public boolean left=false;
int counter = (int) round(1000 * random());
public FireSkull(int a, int b) {
// w=h=26;
w = 40;
h = 63;
x = a;
y = b;
vx = 2;
life = lifeCapacity = 40;
}
public void run() {
Player p = Player.player;
if (life <= 0) {
Health.add(this, 7);
dead = true;
}
counter++;
boolean onSurface = false;
for (Wall wal : Room.walls) {
if (Collisions.onTop(this, wal))
onSurface = true;
}
if (counter % 20 == 0) {
vx = (x + w / 2 < p.x + p.w / 2 ? 4 : -4);
}
if (onSurface && counter % 50 == 0) {
vy = -7;
vx = 2 * (vx < 0 ? -1 : 1);
}
vy += .1;
if (Collisions.collides(this, p)) {
Player.damage(2);
vx = -vx;
}
vMult1();
for (Wall wal : Room.walls) {
if (Collisions.willCollide(this, wal, vx, 0)) {
if (vx > 0) {
x = wal.x - w;
} else {
x = wal.x + wal.w;
}
vx = 0;
}
if (Collisions.willCollide(this, wal, vx, vy)) {
if (vy > 0) {
y = wal.y - h;
} else {
y = wal.y + wal.h;
}
vy = 0;
}
}
x += vx;
y += vy;
vMult2();
if (counter % 5 == 0)
image = ani[(int) (random() * 4)];
}
public void draw(Graphics g) {
g.drawImage(image, round(x) - 10, round(y) - 27, null);
}
}
| apache-2.0 |
IrrilevantHappyLlamas/Runnest | app/src/androidTest/java/ch/ihl/runnest/AppRunnestTest.java | 1335 | package ch.ihl.runnest;
import org.junit.Assert;
import org.junit.Test;
import ch.ihl.runnest.Model.TestUser;
/**
* Test suite for AppRunnest
*/
public class AppRunnestTest {
@Test(expected = IllegalArgumentException.class)
public void setUserThrowsIllegalArgument() {
new AppRunnest().setUser(null);
}
@Test(expected = IllegalArgumentException.class)
public void setApiClientThrowsIllegalArgument() {
new AppRunnest().setApiClient(null);
}
@Test
public void setUserWorks() {
AppRunnest testApp = new AppRunnest();
testApp.setUser(new TestUser());
Assert.assertTrue(testApp.getUser().getName().equals("Test User"));
}
@Test
public void unsetNetworkHandlerIsNull() {
Assert.assertTrue(new AppRunnest().getNetworkHandler() == null);
}
@Test
public void setTestSessionWorks() {
AppRunnest testApp = new AppRunnest();
Assert.assertFalse(testApp.isTestSession());
testApp.setTestSession(true);
Assert.assertTrue(testApp.isTestSession());
testApp.setTestSession(false);
Assert.assertFalse(testApp.isTestSession());
}
@Test
public void getApiClientWorks() {
AppRunnest testApp = new AppRunnest();
Assert.assertNull(testApp.getApiClient());
}
}
| apache-2.0 |
ReactiveX/RxJava | src/test/java/io/reactivex/rxjava3/observable/ObservableFuseableTest.java | 3054 | /*
* Copyright (c) 2016-present, RxJava Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex.rxjava3.observable;
import java.util.Arrays;
import org.junit.Test;
import io.reactivex.rxjava3.core.*;
import io.reactivex.rxjava3.operators.QueueFuseable;
import io.reactivex.rxjava3.testsupport.TestHelper;
public class ObservableFuseableTest extends RxJavaTest {
@Test
public void syncRange() {
Observable.range(1, 10)
.to(TestHelper.<Integer>testConsumer(QueueFuseable.ANY, false))
.assertFusionMode(QueueFuseable.SYNC)
.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertNoErrors()
.assertComplete();
}
@Test
public void syncArray() {
Observable.fromArray(new Integer[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 })
.to(TestHelper.<Integer>testConsumer(QueueFuseable.ANY, false))
.assertFusionMode(QueueFuseable.SYNC)
.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertNoErrors()
.assertComplete();
}
@Test
public void syncIterable() {
Observable.fromIterable(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
.to(TestHelper.<Integer>testConsumer(QueueFuseable.ANY, false))
.assertFusionMode(QueueFuseable.SYNC)
.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertNoErrors()
.assertComplete();
}
@Test
public void syncRangeHidden() {
Observable.range(1, 10).hide()
.to(TestHelper.<Integer>testConsumer(QueueFuseable.ANY, false))
.assertNotFuseable()
.assertFusionMode(QueueFuseable.NONE)
.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertNoErrors()
.assertComplete();
}
@Test
public void syncArrayHidden() {
Observable.fromArray(new Integer[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 })
.hide()
.to(TestHelper.<Integer>testConsumer(QueueFuseable.ANY, false))
.assertNotFuseable()
.assertFusionMode(QueueFuseable.NONE)
.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertNoErrors()
.assertComplete();
}
@Test
public void syncIterableHidden() {
Observable.fromIterable(Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
.hide()
.to(TestHelper.<Integer>testConsumer(QueueFuseable.ANY, false))
.assertNotFuseable()
.assertFusionMode(QueueFuseable.NONE)
.assertValues(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
.assertNoErrors()
.assertComplete();
}
}
| apache-2.0 |
cstamas/vertx-orientdb | vertx-orientdb/src/main/java/org/cstamas/vertx/orientdb/DocumentDatabase.java | 265 | package org.cstamas.vertx.orientdb;
import com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx;
/**
* OrientDB pooled document database instance.
*/
public interface DocumentDatabase
extends Database<DocumentDatabase, ODatabaseDocumentTx>
{
}
| apache-2.0 |
pamelasanchezvi/kubernetes | pkg/proxy/types.go | 1555 | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/proxy/vmturbo"
)
// ProxyProvider is the interface provided by proxier implementations.
type ProxyProvider interface {
// OnServiceUpdate manages the active set of service proxies.
// Active service proxies are reinitialized if found in the update set or
// removed if missing from the update set.
OnServiceUpdate(services []api.Service)
// SyncLoop runs periodic work.
// This is expected to run as a goroutine or as the main loop of the app.
// It does not return.
SyncLoop()
GetTransactionCounter() *vmturbo.TransactionCounter
}
// ServicePortName carries a namespace + name + portname. This is the unique
// identfier for a load-balanced service.
type ServicePortName struct {
types.NamespacedName
Port string
}
func (spn ServicePortName) String() string {
return fmt.Sprintf("%s:%s", spn.NamespacedName.String(), spn.Port)
}
| apache-2.0 |
ppbizapps/kairosdb | src/main/java/org/kairosdb/core/http/rest/MetricsResource.java | 27026 | /*
* Copyright 2016 KairosDB Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kairosdb.core.http.rest;
import com.google.common.annotations.VisibleForTesting;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonIOException;
import com.google.gson.JsonSyntaxException;
import com.google.gson.stream.MalformedJsonException;
import com.google.inject.Inject;
import com.google.inject.name.Named;
import org.kairosdb.core.DataPointSet;
import org.kairosdb.core.KairosDataPointFactory;
import org.kairosdb.core.datapoints.LongDataPointFactory;
import org.kairosdb.core.datapoints.LongDataPointFactoryImpl;
import org.kairosdb.core.datapoints.StringDataPointFactory;
import org.kairosdb.core.datastore.*;
import org.kairosdb.core.exception.InvalidServerTypeException;
import org.kairosdb.core.formatter.DataFormatter;
import org.kairosdb.core.formatter.FormatterException;
import org.kairosdb.core.formatter.JsonFormatter;
import org.kairosdb.core.formatter.JsonResponse;
import org.kairosdb.core.http.rest.json.*;
import org.kairosdb.core.reporting.KairosMetricReporter;
import org.kairosdb.core.reporting.ThreadReporter;
import org.kairosdb.eventbus.FilterEventBus;
import org.kairosdb.eventbus.Publisher;
import org.kairosdb.events.DataPointEvent;
import org.kairosdb.util.MemoryMonitorException;
import org.kairosdb.util.SimpleStats;
import org.kairosdb.util.SimpleStatsReporter;
import org.kairosdb.util.StatsMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.StreamingOutput;
import java.io.*;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.zip.GZIPInputStream;
import static com.google.common.base.Preconditions.checkNotNull;
import static javax.ws.rs.core.Response.ResponseBuilder;
enum NameType
{
METRIC_NAMES,
TAG_KEYS,
TAG_VALUES
}
enum ServerType
{
INGEST,
QUERY,
DELETE
}
@Path("/api/v1")
public class MetricsResource implements KairosMetricReporter
{
public static final Logger logger = LoggerFactory.getLogger(MetricsResource.class);
public static final String QUERY_TIME = "kairosdb.http.query_time";
public static final String REQUEST_TIME = "kairosdb.http.request_time";
public static final String INGEST_COUNT = "kairosdb.http.ingest_count";
public static final String INGEST_TIME = "kairosdb.http.ingest_time";
public static final String QUERY_URL = "/datapoints/query";
private final KairosDatastore datastore;
private final Publisher<DataPointEvent> m_publisher;
private final Map<String, DataFormatter> formatters = new HashMap<>();
private final QueryParser queryParser;
//Used for parsing incoming metrics
private final Gson gson;
//These two are used to track rate of ingestion
private final AtomicInteger m_ingestedDataPoints = new AtomicInteger();
private final AtomicInteger m_ingestTime = new AtomicInteger();
private final StatsMap m_statsMap = new StatsMap();
private final KairosDataPointFactory m_kairosDataPointFactory;
@Inject
private LongDataPointFactory m_longDataPointFactory = new LongDataPointFactoryImpl();
@Inject
private StringDataPointFactory m_stringDataPointFactory = new StringDataPointFactory();
@Inject(optional = true)
private QueryPreProcessorContainer m_queryPreProcessor = new QueryPreProcessorContainer()
{
@Override
public Query preProcess(Query query)
{
return query;
}
};
@Inject(optional = true)
@Named("kairosdb.queries.aggregate_stats")
private boolean m_aggregatedQueryMetrics = false;
@Inject(optional = true)
@Named("kairosdb.log.queries.enable")
private boolean m_logQueries = false;
@Inject(optional = true)
@Named("kairosdb.log.queries.ttl")
private int m_logQueriesTtl = 86400;
@Inject(optional = true)
@Named("kairosdb.log.queries.greater_than")
private int m_logQueriesLongerThan = 60;
@Inject
@Named("HOSTNAME")
private String hostName = "localhost";
//Used for setting which API methods are enabled
private EnumSet<ServerType> m_serverType = EnumSet.of(ServerType.INGEST, ServerType.QUERY, ServerType.DELETE);
@Inject(optional = true)
@VisibleForTesting
void setServerType(@Named("kairosdb.server.type") String serverType)
{
if (serverType.equals("ALL")) return;
String serverTypeString = serverType.replaceAll("\\s+","");
List<String> serverTypeList = Arrays.asList(serverTypeString.split(","));
m_serverType = EnumSet.noneOf(ServerType.class);
for (String stString : serverTypeList)
{
m_serverType.add(ServerType.valueOf(stString));
}
logger.info("KairosDB server type set to: " + m_serverType.toString());
}
@Inject
private SimpleStatsReporter m_simpleStatsReporter = new SimpleStatsReporter();
@Inject
public MetricsResource(KairosDatastore datastore, QueryParser queryParser,
KairosDataPointFactory dataPointFactory, FilterEventBus eventBus)
{
this.datastore = checkNotNull(datastore);
this.queryParser = checkNotNull(queryParser);
m_kairosDataPointFactory = dataPointFactory;
m_publisher = checkNotNull(eventBus).createPublisher(DataPointEvent.class);
formatters.put("json", new JsonFormatter());
GsonBuilder builder = new GsonBuilder();
gson = builder.disableHtmlEscaping().create();
}
public static ResponseBuilder setHeaders(ResponseBuilder responseBuilder)
{
responseBuilder.header("Access-Control-Allow-Origin", "*");
responseBuilder.header("Pragma", "no-cache");
responseBuilder.header("Cache-Control", "no-cache");
responseBuilder.header("Expires", 0);
return (responseBuilder);
}
private void checkServerType(ServerType methodServerType, String methodName, String requestType) throws InvalidServerTypeException
{
checkServerTypeStatic(m_serverType, methodServerType, methodName, requestType);
}
@VisibleForTesting
static void checkServerTypeStatic(EnumSet<ServerType> serverType, ServerType methodServerType, String methodName, String requestType) throws InvalidServerTypeException
{
logger.debug("checkServerType() - KairosDB ServerType set to " + serverType.toString());
if (!serverType.contains(methodServerType))
{
String logtext = "Disabled request type: " + methodServerType.name() + ", " + requestType + " request via URI \"" + methodName + "\"";
logger.info(logtext);
String exceptionMessage = "{\"errors\": [\"Forbidden: " + methodServerType.toString() + " API methods are disabled on this KairosDB instance.\"]}";
throw new InvalidServerTypeException(exceptionMessage);
}
}
@OPTIONS
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/version")
public Response corsPreflightVersion(@HeaderParam("Access-Control-Request-Headers") final String requestHeaders,
@HeaderParam("Access-Control-Request-Method") final String requestMethod)
{
ResponseBuilder responseBuilder = getCorsPreflightResponseBuilder(requestHeaders, requestMethod);
return (responseBuilder.build());
}
@GET
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/version")
public Response getVersion()
{
Package thisPackage = getClass().getPackage();
String versionString = thisPackage.getImplementationTitle() + " " + thisPackage.getImplementationVersion();
ResponseBuilder responseBuilder = Response.status(Response.Status.OK).entity("{\"version\": \"" + versionString + "\"}\n");
setHeaders(responseBuilder);
return responseBuilder.build();
}
@OPTIONS
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/metricnames")
public Response corsPreflightMetricNames(@HeaderParam("Access-Control-Request-Headers") final String requestHeaders,
@HeaderParam("Access-Control-Request-Method") final String requestMethod) throws InvalidServerTypeException
{
checkServerType(ServerType.QUERY, "/metricnames", "OPTIONS");
ResponseBuilder responseBuilder = getCorsPreflightResponseBuilder(requestHeaders, requestMethod);
return (responseBuilder.build());
}
@GET
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/metricnames")
public Response getMetricNames(@QueryParam("prefix") String prefix) throws InvalidServerTypeException
{
checkServerType(ServerType.QUERY, "/metricnames", "GET");
return executeNameQuery(NameType.METRIC_NAMES, prefix);
}
@OPTIONS
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/datapoints")
public Response corsPreflightDataPoints(@HeaderParam("Access-Control-Request-Headers") String requestHeaders,
@HeaderParam("Access-Control-Request-Method") String requestMethod) throws InvalidServerTypeException
{
checkServerType(ServerType.INGEST, "/datapoints", "OPTIONS");
ResponseBuilder responseBuilder = getCorsPreflightResponseBuilder(requestHeaders, requestMethod);
return (responseBuilder.build());
}
@POST
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Consumes("application/gzip")
@Path("/datapoints")
public Response addGzip(InputStream gzip) throws InvalidServerTypeException
{
checkServerType(ServerType.INGEST, "gzip /datapoints", "POST");
GZIPInputStream gzipInputStream;
try
{
gzipInputStream = new GZIPInputStream(gzip);
}
catch (IOException e)
{
JsonResponseBuilder builder = new JsonResponseBuilder(Response.Status.BAD_REQUEST);
return builder.addError(e.getMessage()).build();
}
return (add(gzipInputStream));
}
@POST
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/datapoints")
public Response add(InputStream json) throws InvalidServerTypeException
{
checkServerType(ServerType.INGEST, "JSON /datapoints", "POST");
try
{
DataPointsParser parser = new DataPointsParser(m_publisher, new InputStreamReader(json, "UTF-8"),
gson, m_kairosDataPointFactory);
ValidationErrors validationErrors = parser.parse();
m_ingestedDataPoints.addAndGet(parser.getDataPointCount());
m_ingestTime.addAndGet(parser.getIngestTime());
if (!validationErrors.hasErrors())
return setHeaders(Response.status(Response.Status.NO_CONTENT)).build();
else
{
JsonResponseBuilder builder = new JsonResponseBuilder(Response.Status.BAD_REQUEST);
for (String errorMessage : validationErrors.getErrors())
{
builder.addError(errorMessage);
}
return builder.build();
}
}
catch (JsonIOException | MalformedJsonException | JsonSyntaxException e)
{
JsonResponseBuilder builder = new JsonResponseBuilder(Response.Status.BAD_REQUEST);
return builder.addError(e.getMessage()).build();
}
catch (Exception e)
{
logger.error("Failed to add metric.", e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
catch (OutOfMemoryError e)
{
logger.error("Out of memory error.", e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
}
@OPTIONS
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/datapoints/query/tags")
public Response corsPreflightQueryTags(@HeaderParam("Access-Control-Request-Headers") final String requestHeaders,
@HeaderParam("Access-Control-Request-Method") final String requestMethod) throws InvalidServerTypeException
{
checkServerType(ServerType.QUERY, "/datapoints/query/tags", "OPTIONS");
ResponseBuilder responseBuilder = getCorsPreflightResponseBuilder(requestHeaders, requestMethod);
return (responseBuilder.build());
}
@POST
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/datapoints/query/tags")
public Response getMeta(String json) throws InvalidServerTypeException
{
checkServerType(ServerType.QUERY, "/datapoints/query/tags", "POST");
checkNotNull(json);
logger.debug(json);
try
{
File respFile = File.createTempFile("kairos", ".json", new File(datastore.getCacheDir()));
BufferedWriter writer = new BufferedWriter(new FileWriter(respFile));
JsonResponse jsonResponse = new JsonResponse(writer);
jsonResponse.begin();
List<QueryMetric> queries = queryParser.parseQueryMetric(json).getQueryMetrics();
for (QueryMetric query : queries)
{
List<DataPointGroup> result = datastore.queryTags(query);
try
{
jsonResponse.formatQuery(result, false, -1);
}
finally
{
for (DataPointGroup dataPointGroup : result)
{
dataPointGroup.close();
}
}
}
jsonResponse.end();
writer.flush();
writer.close();
ResponseBuilder responseBuilder = Response.status(Response.Status.OK).entity(
new FileStreamingOutput(respFile));
setHeaders(responseBuilder);
return responseBuilder.build();
}
catch (JsonSyntaxException | QueryException e)
{
JsonResponseBuilder builder = new JsonResponseBuilder(Response.Status.BAD_REQUEST);
return builder.addError(e.getMessage()).build();
}
catch (BeanValidationException e)
{
JsonResponseBuilder builder = new JsonResponseBuilder(Response.Status.BAD_REQUEST);
return builder.addErrors(e.getErrorMessages()).build();
}
catch (MemoryMonitorException e)
{
logger.error("Query failed.", e);
System.gc();
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
catch (Exception e)
{
logger.error("Query failed.", e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
catch (OutOfMemoryError e)
{
logger.error("Out of memory error.", e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
}
/**
Information for this endpoint was taken from https://developer.mozilla.org/en-US/docs/HTTP/Access_control_CORS.
<p>
<p>Response to a cors preflight request to access data.
*/
@OPTIONS
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path(QUERY_URL)
public Response corsPreflightQuery(@HeaderParam("Access-Control-Request-Headers") final String requestHeaders,
@HeaderParam("Access-Control-Request-Method") final String requestMethod) throws InvalidServerTypeException
{
checkServerType(ServerType.QUERY, QUERY_URL, "OPTIONS");
ResponseBuilder responseBuilder = getCorsPreflightResponseBuilder(requestHeaders, requestMethod);
return (responseBuilder.build());
}
@GET
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path(QUERY_URL)
public Response getQuery(@QueryParam("query") String json, @Context HttpServletRequest request) throws Exception
{
checkServerType(ServerType.QUERY, QUERY_URL, "GET");
return runQuery(json, request.getRemoteAddr());
}
@POST
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path(QUERY_URL)
public Response postQuery(String json, @Context HttpServletRequest request) throws Exception
{
checkServerType(ServerType.QUERY, QUERY_URL, "POST");
return runQuery(json, request.getRemoteAddr());
}
public Response runQuery(String json, String remoteAddr) throws Exception
{
logger.debug(json);
boolean queryFailed = false;
ThreadReporter.setReportTime(System.currentTimeMillis());
ThreadReporter.addTag("host", hostName);
try
{
if (json == null)
throw new BeanValidationException(new QueryParser.SimpleConstraintViolation("query json", "must not be null or empty"), "");
File respFile = File.createTempFile("kairos", ".json", new File(datastore.getCacheDir()));
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(respFile), "UTF-8"));
JsonResponse jsonResponse = new JsonResponse(writer);
jsonResponse.begin();
Query mainQuery = queryParser.parseQueryMetric(json);
mainQuery = m_queryPreProcessor.preProcess(mainQuery);
List<QueryMetric> queries = mainQuery.getQueryMetrics();
int queryCount = 0;
for (QueryMetric query : queries)
{
queryCount++;
ThreadReporter.addTag("metric_name", query.getName());
ThreadReporter.addTag("query_index", String.valueOf(queryCount));
DatastoreQuery dq = datastore.createQuery(query);
long startQuery = System.currentTimeMillis();
try
{
List<DataPointGroup> results = dq.execute();
jsonResponse.formatQuery(results, query.isExcludeTags(), dq.getSampleSize());
ThreadReporter.addDataPoint(QUERY_TIME, System.currentTimeMillis() - startQuery);
}
finally
{
dq.close();
}
}
jsonResponse.end();
writer.flush();
writer.close();
//System.out.println("About to process plugins");
List<QueryPlugin> plugins = mainQuery.getPlugins();
for (QueryPlugin plugin : plugins)
{
if (plugin instanceof QueryPostProcessingPlugin)
{
respFile = ((QueryPostProcessingPlugin) plugin).processQueryResults(respFile);
}
}
ResponseBuilder responseBuilder = Response.status(Response.Status.OK).entity(
new FileStreamingOutput(respFile));
setHeaders(responseBuilder);
return responseBuilder.build();
}
catch (JsonSyntaxException | QueryException e)
{
queryFailed = true;
JsonResponseBuilder builder = new JsonResponseBuilder(Response.Status.BAD_REQUEST);
return builder.addError(e.getMessage()).build();
}
catch (BeanValidationException e)
{
queryFailed = true;
JsonResponseBuilder builder = new JsonResponseBuilder(Response.Status.BAD_REQUEST);
return builder.addErrors(e.getErrorMessages()).build();
}
catch (MemoryMonitorException e)
{
queryFailed = true;
logger.error("Query failed.", e);
Thread.sleep(1000);
System.gc();
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
catch (IOException e)
{
queryFailed = true;
logger.error("Failed to open temp folder " + datastore.getCacheDir(), e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
catch (Exception e)
{
queryFailed = true;
logger.error("Query failed.", e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
catch (OutOfMemoryError e)
{
queryFailed = true;
logger.error("Out of memory error.", e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
finally
{
ThreadReporter.clearTags();
ThreadReporter.addTag("host", hostName);
if (queryFailed)
ThreadReporter.addTag("status", "failed");
else
ThreadReporter.addTag("status", "success");
//write metrics for query logging
long queryTime = System.currentTimeMillis() - ThreadReporter.getReportTime();
if (m_logQueries && ((queryTime / 1000) >= m_logQueriesLongerThan))
{
ThreadReporter.addDataPoint("kairosdb.log.query.remote_address", remoteAddr, m_logQueriesTtl);
ThreadReporter.addDataPoint("kairosdb.log.query.json", json, m_logQueriesTtl);
}
ThreadReporter.addTag("request", QUERY_URL);
ThreadReporter.addDataPoint(REQUEST_TIME, queryTime);
if (m_aggregatedQueryMetrics)
{
ThreadReporter.gatherData(m_statsMap);
}
else
{
ThreadReporter.submitData(m_longDataPointFactory,
m_stringDataPointFactory, m_publisher);
}
ThreadReporter.clear();
}
}
@OPTIONS
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/datapoints/delete")
public Response corsPreflightDelete(@HeaderParam("Access-Control-Request-Headers") final String requestHeaders,
@HeaderParam("Access-Control-Request-Method") final String requestMethod) throws InvalidServerTypeException
{
checkServerType(ServerType.DELETE, "/datapoints/delete", "OPTIONS");
ResponseBuilder responseBuilder = getCorsPreflightResponseBuilder(requestHeaders, requestMethod);
return (responseBuilder.build());
}
@POST
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/datapoints/delete")
public Response delete(String json) throws Exception
{
checkServerType(ServerType.DELETE, "/datapoints/delete", "POST");
checkNotNull(json);
logger.debug(json);
try
{
List<QueryMetric> queries = queryParser.parseQueryMetric(json).getQueryMetrics();
for (QueryMetric query : queries)
{
datastore.delete(query);
}
return setHeaders(Response.status(Response.Status.NO_CONTENT)).build();
}
catch (JsonSyntaxException | QueryException e)
{
JsonResponseBuilder builder = new JsonResponseBuilder(Response.Status.BAD_REQUEST);
return builder.addError(e.getMessage()).build();
}
catch (BeanValidationException e)
{
JsonResponseBuilder builder = new JsonResponseBuilder(Response.Status.BAD_REQUEST);
return builder.addErrors(e.getErrorMessages()).build();
}
catch (MemoryMonitorException e)
{
logger.error("Query failed.", e);
System.gc();
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
catch (Exception e)
{
logger.error("Delete failed.", e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
catch (OutOfMemoryError e)
{
logger.error("Out of memory error.", e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
}
public static ResponseBuilder getCorsPreflightResponseBuilder(final String requestHeaders,
final String requestMethod)
{
ResponseBuilder responseBuilder = Response.status(Response.Status.OK);
responseBuilder.header("Access-Control-Allow-Origin", "*");
responseBuilder.header("Access-Control-Allow-Headers", requestHeaders);
responseBuilder.header("Access-Control-Max-Age", "86400"); // Cache for one day
if (requestMethod != null)
{
responseBuilder.header("Access-Control-Allow_Method", requestMethod);
}
return responseBuilder;
}
@OPTIONS
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/metric/{metricName}")
public Response corsPreflightMetricDelete(@HeaderParam("Access-Control-Request-Headers") String requestHeaders,
@HeaderParam("Access-Control-Request-Method") String requestMethod) throws InvalidServerTypeException
{
checkServerType(ServerType.DELETE, "/metric/{metricName}", "OPTIONS");
ResponseBuilder responseBuilder = getCorsPreflightResponseBuilder(requestHeaders, requestMethod);
return (responseBuilder.build());
}
@DELETE
@Produces(MediaType.APPLICATION_JSON + "; charset=UTF-8")
@Path("/metric/{metricName}")
public Response metricDelete(@PathParam("metricName") String metricName) throws Exception
{
checkServerType(ServerType.DELETE, "/metric/{metricName}", "DELETE");
try
{
QueryMetric query = new QueryMetric(Long.MIN_VALUE, Long.MAX_VALUE, 0, metricName);
datastore.delete(query);
return setHeaders(Response.status(Response.Status.NO_CONTENT)).build();
}
catch (Exception e)
{
logger.error("Delete failed.", e);
return setHeaders(Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(new ErrorResponse(e.getMessage()))).build();
}
}
private Response executeNameQuery(NameType type)
{
return executeNameQuery(type, null);
}
private Response executeNameQuery(NameType type, String prefix)
{
try
{
Iterable<String> values = null;
switch (type)
{
case METRIC_NAMES:
values = datastore.getMetricNames(prefix);
break;
case TAG_KEYS:
values = datastore.getTagNames();
break;
case TAG_VALUES:
values = datastore.getTagValues();
break;
}
DataFormatter formatter = formatters.get("json");
ResponseBuilder responseBuilder = Response.status(Response.Status.OK).entity(
new ValuesStreamingOutput(formatter, values));
setHeaders(responseBuilder);
return responseBuilder.build();
}
catch (Exception e)
{
logger.error("Failed to get " + type, e);
return Response.status(Response.Status.INTERNAL_SERVER_ERROR).entity(
new ErrorResponse(e.getMessage())).build();
}
}
@Override
public List<DataPointSet> getMetrics(long now)
{
int time = m_ingestTime.getAndSet(0);
int count = m_ingestedDataPoints.getAndSet(0);
List<DataPointSet> ret = new ArrayList<>();
if (count != 0)
{
DataPointSet dpsCount = new DataPointSet(INGEST_COUNT);
DataPointSet dpsTime = new DataPointSet(INGEST_TIME);
dpsCount.addTag("host", hostName);
dpsTime.addTag("host", hostName);
dpsCount.addDataPoint(m_longDataPointFactory.createDataPoint(now, count));
dpsTime.addDataPoint(m_longDataPointFactory.createDataPoint(now, time));
ret.add(dpsCount);
ret.add(dpsTime);
}
Map<String, SimpleStats> statsMap = m_statsMap.getStatsMap();
for (Map.Entry<String, SimpleStats> entry : statsMap.entrySet())
{
String metric = entry.getKey();
SimpleStats.Data stats = entry.getValue().getAndClear();
m_simpleStatsReporter.reportStats(stats, now, metric, ret);
}
return ret;
}
public static class ValuesStreamingOutput implements StreamingOutput
{
private DataFormatter m_formatter;
private Iterable<String> m_values;
public ValuesStreamingOutput(DataFormatter formatter, Iterable<String> values)
{
m_formatter = formatter;
m_values = values;
}
@SuppressWarnings("ResultOfMethodCallIgnored")
public void write(OutputStream output) throws IOException, WebApplicationException
{
Writer writer = new OutputStreamWriter(output, "UTF-8");
try
{
m_formatter.format(writer, m_values);
}
catch (FormatterException e)
{
logger.error("Description of what failed:", e);
}
writer.flush();
}
}
public static class FileStreamingOutput implements StreamingOutput
{
private File m_responseFile;
public FileStreamingOutput(File responseFile)
{
m_responseFile = responseFile;
}
@SuppressWarnings("ResultOfMethodCallIgnored")
@Override
public void write(OutputStream output) throws IOException, WebApplicationException
{
try (InputStream reader = new FileInputStream(m_responseFile))
{
byte[] buffer = new byte[1024];
int size;
while ((size = reader.read(buffer)) != -1)
{
output.write(buffer, 0, size);
}
output.flush();
}
finally
{
m_responseFile.delete();
}
}
}
}
| apache-2.0 |
wakim/push-example | PrimeFaces/src/com/primefaces/pusher/service/PusherService.java | 1022 | package com.primefaces.pusher.service;
import java.io.IOException;
import org.apache.http.HttpResponse;
import org.apache.http.ParseException;
import org.apache.http.util.EntityUtils;
import org.primefaces.json.JSONObject;
import com.pusher.Pusher;
public class PusherService {
public void triggerEvent(String channelName, String eventName, JSONObject data) throws IllegalStateException {
triggerEvent(channelName, eventName, data, null);
}
public void triggerEvent(String channelName, String eventName, JSONObject data, String socketId) throws IllegalStateException {
HttpResponse res = Pusher.triggerPush(channelName, eventName, data.toString(), socketId);
if(res.getStatusLine().getStatusCode() != 200) {
try {
throw new IllegalStateException(res.toString() + "\nBody: " + EntityUtils.toString(res.getEntity()));
} catch (ParseException e) {
throw new IllegalStateException(res.toString());
} catch (IOException e) {
throw new IllegalStateException(res.toString());
}
}
}
}
| apache-2.0 |
zhanhongbo1112/trunk | yqboots-menu/yqboots-menu-core/src/main/java/com/yqboots/menu/core/convert/MenuItemGroupsConverter.java | 2304 | /*
*
* * Copyright 2015-2016 the original author or authors.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package com.yqboots.menu.core.convert;
import com.yqboots.menu.core.MenuItem;
import org.springframework.core.convert.converter.Converter;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* Convert from list of MenuItem to Map, representing the hierarchy of menu group, menu item group and menu item.
* <p>It makes 2 levels of menus, the developer should provide menuGroup field in MenuItem.<p/>
*
* @author Eric H B Zhan
* @since 1.0.0
*/
public class MenuItemGroupsConverter implements Converter<List<MenuItem>, Map<String, List<MenuItem>>> {
/**
* Convert the source object of type {@code S} to target type {@code T}.
*
* @param source the source object to convert, which must be an instance of {@code S} (never {@code null})
* @return the converted object, which must be an instance of {@code T} (potentially {@code null})
* @throws IllegalArgumentException if the source cannot be converted to the desired target type
*/
@Override
public Map<String, List<MenuItem>> convert(final List<MenuItem> source) {
final Map<String, List<MenuItem>> results = new LinkedHashMap<>();
for (final MenuItem menuItem : source) {
String menuGroupKey = menuItem.getMenuGroup();
if (results.containsKey(menuGroupKey)) {
results.get(menuGroupKey).add(menuItem);
} else {
List<MenuItem> items = new ArrayList<>();
items.add(menuItem);
results.put(menuGroupKey, items);
}
}
return results;
}
}
| apache-2.0 |
huntxu/neutron | neutron/agent/linux/interface.py | 19111 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import time
import netaddr
from neutron_lib import constants
from oslo_log import log as logging
import six
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ip_lib
from neutron.common import constants as n_const
from neutron.common import exceptions
LOG = logging.getLogger(__name__)
def _get_veth(name1, name2, namespace2):
return (ip_lib.IPDevice(name1),
ip_lib.IPDevice(name2, namespace=namespace2))
@six.add_metaclass(abc.ABCMeta)
class LinuxInterfaceDriver(object):
DEV_NAME_LEN = n_const.LINUX_DEV_LEN
DEV_NAME_PREFIX = constants.TAP_DEVICE_PREFIX
def __init__(self, conf):
self.conf = conf
self._mtu_update_warn_logged = False
@property
def use_gateway_ips(self):
"""Whether to use gateway IPs instead of unique IP allocations.
In each place where the DHCP agent runs, and for each subnet for
which DHCP is handling out IP addresses, the DHCP port needs -
at the Linux level - to have an IP address within that subnet.
Generally this needs to be a unique Neutron-allocated IP
address, because the subnet's underlying L2 domain is bridged
across multiple compute hosts and network nodes, and for HA
there may be multiple DHCP agents running on that same bridged
L2 domain.
However, if the DHCP ports - on multiple compute/network nodes
but for the same network - are _not_ bridged to each other,
they do not need each to have a unique IP address. Instead
they can all share the same address from the relevant subnet.
This works, without creating any ambiguity, because those
ports are not all present on the same L2 domain, and because
no data within the network is ever sent to that address.
(DHCP requests are broadcast, and it is the network's job to
ensure that such a broadcast will reach at least one of the
available DHCP servers. DHCP responses will be sent _from_
the DHCP port address.)
Specifically, for networking backends where it makes sense,
the DHCP agent allows all DHCP ports to use the subnet's
gateway IP address, and thereby to completely avoid any unique
IP address allocation. This behaviour is selected by running
the DHCP agent with a configured interface driver whose
'use_gateway_ips' property is True.
When an operator deploys Neutron with an interface driver that
makes use_gateway_ips True, they should also ensure that a
gateway IP address is defined for each DHCP-enabled subnet,
and that the gateway IP address doesn't change during the
subnet's lifetime.
"""
return False
def init_l3(self, device_name, ip_cidrs, namespace=None,
preserve_ips=None, clean_connections=False):
"""Set the L3 settings for the interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
"""
preserve_ips = preserve_ips or []
device = ip_lib.IPDevice(device_name, namespace=namespace)
# The LLA generated by the operating system is not known to
# Neutron, so it would be deleted if we added it to the 'previous'
# list here
default_ipv6_lla = ip_lib.get_ipv6_lladdr(device.link.address)
cidrs = set()
remove_ips = set()
# normalize all the IP addresses first
for ip_cidr in ip_cidrs:
net = netaddr.IPNetwork(ip_cidr)
# Convert to compact IPv6 address because the return values of
# "ip addr list" are compact.
if net.version == 6:
ip_cidr = str(net)
cidrs.add(ip_cidr)
# Determine the addresses that must be added and removed
for address in device.addr.list():
cidr = address['cidr']
dynamic = address['dynamic']
# skip the IPv6 link-local
if cidr == default_ipv6_lla:
# it's already configured, leave it alone
cidrs.discard(cidr)
continue
if cidr in preserve_ips:
continue
# Statically created addresses are OK, dynamically created
# addresses must be removed and replaced
if cidr in cidrs and not dynamic:
cidrs.remove(cidr)
continue
remove_ips.add(cidr)
# Clean up any old addresses. This must be done first since there
# could be a dynamic address being replaced with a static one.
for ip_cidr in remove_ips:
if clean_connections:
device.delete_addr_and_conntrack_state(ip_cidr)
else:
device.addr.delete(ip_cidr)
# add any new addresses
for ip_cidr in cidrs:
device.addr.add(ip_cidr)
def init_router_port(self,
device_name,
ip_cidrs,
namespace,
preserve_ips=None,
extra_subnets=None,
clean_connections=False):
"""Set the L3 settings for a router interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
clean_connections: Boolean to indicate if we should cleanup connections
associated to removed ips
extra_subnets: An iterable of cidrs to add as routes without address
"""
LOG.debug("init_router_port: device_name(%s), namespace(%s)",
device_name, namespace)
self.init_l3(device_name=device_name,
ip_cidrs=ip_cidrs,
namespace=namespace,
preserve_ips=preserve_ips or [],
clean_connections=clean_connections)
device = ip_lib.IPDevice(device_name, namespace=namespace)
# Manage on-link routes (routes without an associated address)
new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or [])
v4_onlink = device.route.list_onlink_routes(constants.IP_VERSION_4)
v6_onlink = device.route.list_onlink_routes(constants.IP_VERSION_6)
existing_onlink_cidrs = set(r['cidr'] for r in v4_onlink + v6_onlink)
for route in new_onlink_cidrs - existing_onlink_cidrs:
LOG.debug("adding onlink route(%s)", route)
device.route.add_onlink_route(route)
for route in (existing_onlink_cidrs - new_onlink_cidrs -
set(preserve_ips or [])):
LOG.debug("deleting onlink route(%s)", route)
device.route.delete_onlink_route(route)
def add_ipv6_addr(self, device_name, v6addr, namespace, scope='global'):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
net = netaddr.IPNetwork(v6addr)
device.addr.add(str(net), scope)
def delete_ipv6_addr(self, device_name, v6addr, namespace):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
device.delete_addr_and_conntrack_state(v6addr)
def delete_ipv6_addr_with_prefix(self, device_name, prefix, namespace):
"""Delete the first listed IPv6 address that falls within a given
prefix.
"""
device = ip_lib.IPDevice(device_name, namespace=namespace)
net = netaddr.IPNetwork(prefix)
for address in device.addr.list(scope='global', filters=['permanent']):
ip_address = netaddr.IPNetwork(address['cidr'])
if ip_address in net:
device.delete_addr_and_conntrack_state(address['cidr'])
break
def get_ipv6_llas(self, device_name, namespace):
device = ip_lib.IPDevice(device_name,
namespace=namespace)
return device.addr.list(scope='link', ip_version=6)
def check_bridge_exists(self, bridge):
if not ip_lib.device_exists(bridge):
raise exceptions.BridgeDoesNotExist(bridge=bridge)
def get_device_name(self, port):
return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
def remove_vlan_tag(self, bridge, interface_name):
"""Remove vlan tag from given interface.
This method is necessary only for the case when deprecated
option 'external_network_bridge' is used in L3 agent as
external gateway port is then created in this external bridge
directly and it will have DEAD_VLAN_TAG added by default.
"""
# TODO(slaweq): remove it when external_network_bridge option will be
# removed
@staticmethod
def configure_ipv6_ra(namespace, dev_name, value):
"""Configure handling of IPv6 Router Advertisements on an
interface. See common/constants.py for possible values.
"""
cmd = ['net.ipv6.conf.%(dev)s.accept_ra=%(value)s' % {'dev': dev_name,
'value': value}]
ip_lib.sysctl(cmd, namespace=namespace)
@staticmethod
def configure_ipv6_forwarding(namespace, dev_name, enabled):
"""Configure IPv6 forwarding on an interface."""
cmd = ['net.ipv6.conf.%(dev)s.forwarding=%(enabled)s' %
{'dev': dev_name, 'enabled': int(enabled)}]
ip_lib.sysctl(cmd, namespace=namespace)
@abc.abstractmethod
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
"""Plug in the interface only for new devices that don't exist yet."""
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
if not ip_lib.device_exists(device_name,
namespace=namespace):
self.plug_new(network_id, port_id, device_name, mac_address,
bridge, namespace, prefix, mtu)
else:
LOG.info("Device %s already exists", device_name)
if mtu:
self.set_mtu(
device_name, mtu, namespace=namespace, prefix=prefix)
else:
LOG.warning("No MTU configured for port %s", port_id)
@abc.abstractmethod
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
@property
def bridged(self):
"""Whether the DHCP port is bridged to the VM TAP interfaces.
When the DHCP port is bridged to the TAP interfaces for the
VMs for which it is providing DHCP service - as is the case
for most Neutron network implementations - the DHCP server
only needs to listen on the DHCP port, and will still receive
DHCP requests from all the relevant VMs.
If the DHCP port is not bridged to the relevant VM TAP
interfaces, the DHCP server needs to listen explicitly on
those TAP interfaces, and to treat those as aliases of the
DHCP port where the IP subnet is defined.
"""
return True
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
"""Set MTU on the interface."""
if not self._mtu_update_warn_logged:
LOG.warning("Interface driver cannot update MTU for ports")
self._mtu_update_warn_logged = True
class NullDriver(LinuxInterfaceDriver):
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
pass
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
pass
class OVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an OVS bridge."""
DEV_NAME_PREFIX = constants.TAP_DEVICE_PREFIX
def __init__(self, conf):
super(OVSInterfaceDriver, self).__init__(conf)
if self.conf.ovs_use_veth:
self.DEV_NAME_PREFIX = 'ns-'
def _get_tap_name(self, dev_name, prefix=None):
if self.conf.ovs_use_veth:
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
constants.TAP_DEVICE_PREFIX)
return dev_name
def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
internal=True):
attrs = [('external_ids', {'iface-id': port_id,
'iface-status': 'active',
'attached-mac': mac_address})]
if internal:
attrs.insert(0, ('type', 'internal'))
ovs = ovs_lib.OVSBridge(bridge)
ovs.replace_port(device_name, *attrs)
def remove_vlan_tag(self, bridge, interface):
ovs = ovs_lib.OVSBridge(bridge)
ovs.clear_db_attribute("Port", interface, "tag")
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
"""Plug in the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
self.check_bridge_exists(bridge)
ip = ip_lib.IPWrapper()
tap_name = self._get_tap_name(device_name, prefix)
if self.conf.ovs_use_veth:
# Create ns_dev in a namespace if one is configured.
root_dev, ns_dev = ip.add_veth(tap_name,
device_name,
namespace2=namespace)
root_dev.disable_ipv6()
else:
ns_dev = ip.device(device_name)
internal = not self.conf.ovs_use_veth
self._ovs_add_port(bridge, tap_name, port_id, mac_address,
internal=internal)
for i in range(9):
# workaround for the OVS shy port syndrome. ports sometimes
# hide for a bit right after they are first created.
# see bug/1618987
try:
ns_dev.link.set_address(mac_address)
break
except RuntimeError as e:
LOG.warning("Got error trying to set mac, retrying: %s",
str(e))
time.sleep(1)
else:
# didn't break, we give it one last shot without catching
ns_dev.link.set_address(mac_address)
# Add an interface created by ovs to the namespace.
if not self.conf.ovs_use_veth and namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
# NOTE(ihrachys): the order here is significant: we must set MTU after
# the device is moved into a namespace, otherwise OVS bridge does not
# allow to set MTU that is higher than the least of all device MTUs on
# the bridge
if mtu:
self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix)
else:
LOG.warning("No MTU configured for port %s", port_id)
ns_dev.link.set_up()
if self.conf.ovs_use_veth:
root_dev.link.set_up()
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
tap_name = self._get_tap_name(device_name, prefix)
self.check_bridge_exists(bridge)
ovs = ovs_lib.OVSBridge(bridge)
try:
ovs.delete_port(tap_name)
if self.conf.ovs_use_veth:
device = ip_lib.IPDevice(device_name, namespace=namespace)
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error("Failed unplugging interface '%s'",
device_name)
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
if self.conf.ovs_use_veth:
tap_name = self._get_tap_name(device_name, prefix)
root_dev, ns_dev = _get_veth(
tap_name, device_name, namespace2=namespace)
root_dev.link.set_mtu(mtu)
else:
ns_dev = ip_lib.IPWrapper(namespace=namespace).device(device_name)
ns_dev.link.set_mtu(mtu)
class BridgeInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating bridge interfaces."""
DEV_NAME_PREFIX = 'ns-'
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
"""Plugin the interface."""
ip = ip_lib.IPWrapper()
# Enable agent to define the prefix
tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX,
constants.TAP_DEVICE_PREFIX)
# Create ns_veth in a namespace if one is configured.
root_veth, ns_veth = ip.add_veth(tap_name, device_name,
namespace2=namespace)
root_veth.disable_ipv6()
ns_veth.link.set_address(mac_address)
if mtu:
self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix)
else:
LOG.warning("No MTU configured for port %s", port_id)
root_veth.link.set_up()
ns_veth.link.set_up()
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
device = ip_lib.IPDevice(device_name, namespace=namespace)
try:
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error("Failed unplugging interface '%s'",
device_name)
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX,
constants.TAP_DEVICE_PREFIX)
root_dev, ns_dev = _get_veth(
tap_name, device_name, namespace2=namespace)
root_dev.link.set_mtu(mtu)
ns_dev.link.set_mtu(mtu)
| apache-2.0 |
mohiva/silhouette | modules/authenticator/src/main/scala/silhouette/authenticator/pipeline/package.scala | 959 | /**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.authenticator
/**
* An authenticator pipeline represents a step in the authentication process which may be composed of multiple single
* steps.
*/
package object pipeline
| apache-2.0 |
thomasmaurel/ensj-healthcheck | src/org/ensembl/healthcheck/testgroup/Schema.java | 2167 | /*
* Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
* Copyright [2016-2018] EMBL-European Bioinformatics Institute
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ensembl.healthcheck.testgroup;
import org.ensembl.healthcheck.GroupOfTests;
/**
* These are the tests that checking schema and mysql. The tests are:
*
* <ul>
* <li> org.ensembl.healthcheck.testcase.generic.AnalyseTables </li>
* <li> org.ensembl.healthcheck.testcase.generic.AutoIncrement </li>
* <li> org.ensembl.healthcheck.testcase.generic.CompareSchema </li>
* <li> org.ensembl.healthcheck.testcase.generic.MySQLStorageEngine </li>
* <li> org.ensembl.healthcheck.testcase.generic.PartitionedTables </li>
* <li> org.ensembl.healthcheck.testcase.generic.SchemaType </li>
* <li> org.ensembl.healthcheck.testcase.generic.SingleDBCollations </li>
* </ul>
*
* @author Thibaut Hourlier
*
*/
public class Schema extends GroupOfTests {
public Schema() {
addTest(
org.ensembl.healthcheck.testcase.generic.AnalyseTables.class,
org.ensembl.healthcheck.testcase.generic.AutoIncrement.class,
org.ensembl.healthcheck.testcase.generic.CompareSchema.class,
org.ensembl.healthcheck.testcase.generic.MySQLStorageEngine.class,
org.ensembl.healthcheck.testcase.generic.PartitionedTables.class,
org.ensembl.healthcheck.testcase.generic.SchemaType.class,
org.ensembl.healthcheck.testcase.generic.SingleDBCollations.class
);
}
}
| apache-2.0 |
linghp/ZLFAsist | PdfPlugin/src/com/artifex/mupdfdemo/WidgetType.java | 101 | package com.artifex.mupdfdemo;
public enum WidgetType {
NONE,
TEXT,
LISTBOX,
COMBOBOX
}
| apache-2.0 |
michaell-nz/SnakeGame | RPi.SenseHat/RPi.SenseHat.Demo/Demos/Arena.cs | 1713 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Linq.Expressions;
namespace SnakeLogic
{
public enum Food
{
None = 0,
Apple,
Orange
}
public class Arena
{
static object _locker = new object();
public int Width { get; private set; }
public int Height { get; private set; }
public SnakeModel Snake { get; set; }
public Food[,] Cells { get; private set; }
private Random random = new Random();
public Arena(int width, int height)
{
Width = width;
Height = height;
Cells = new Food[width, height];
Snake = new SnakeModel(this);
}
/// <summary>
/// Interface 1: called by a timer to move
/// </summary>
public void Update()
{
lock (_locker)
{
Snake.Move();
if (random.Next(10) <= 4)
{
CreateFood();
}
}
}
/// <summary>
/// Interface 1.5: GET data of area, and the
/// </summary>
/// <returns></returns>
public Tuple<int[,], Point> GetData()
{
Point foodPoint = null;
for (int i = 0; i < Width;i++ )
for (int j = 0; j < Height; j++)
{
if (Cells[i, j] != Food.None)
{
foodPoint = new Point(i, j);
break;
}
}
return new Tuple<int[,],Point>(Snake.GetData(), foodPoint);
}
/// <summary>
/// Interface 2: change direction
/// </summary>
public void ChangeDirection(Direction direction)
{
Snake.ChangeDirection(direction);
}
private void CreateFood()
{
foreach (var cell in Cells)
{
if (cell != Food.None)
return;
}
//Cells[random.Next(0, Width), random.Next(0, Height)] = (Food)random.Next(1, 3);
Cells[random.Next(0, Width), random.Next(0, Height)] = Food.Orange;
}
}
}
| apache-2.0 |
guardian/discussion-avatar | api/src/main/scala/com/gu/adapters/config/Config.scala | 2473 | package com.gu.adapters.config
import com.amazonaws.regions.{Region, Regions}
import com.gu.adapters.http.AvatarServletProperties
import com.gu.adapters.notifications.SnsProperties
import com.gu.adapters.queue.SqsDeletionConsumerProps
import com.gu.core.store.StoreProperties
import com.typesafe.config.{ConfigFactory, Config => TypesafeConfig}
case class ElkConfig(enabled: Boolean, streamName: String, region: String, stage: String)
case class Config(
avatarServletProperties: AvatarServletProperties,
storeProperties: StoreProperties,
deletionEventsProps: SqsDeletionConsumerProps,
elkConfig: ElkConfig,
identityConfig: IdentityConfig
) {
val snsProperties = SnsProperties(storeProperties.awsRegion, avatarServletProperties.snsTopicArn)
}
object Config {
private val pageSize = 10
val secureCookie = "SC_GU_U"
def apply(): Config = {
apply(ConfigFactory.load())
}
def apply(conf: TypesafeConfig): Config =
Config(
avatarServletProperties(conf),
storeProperties(conf),
deletionEventsProps(conf),
elkConfig(conf),
IdentityConfig.fromTypesafeConfig(conf)
)
private def deletionEventsProps(conf: TypesafeConfig): SqsDeletionConsumerProps = {
SqsDeletionConsumerProps(conf.getString("aws.sqs.deleted.url"), conf.getString("aws.region") )
}
protected def storeProperties(conf: TypesafeConfig): StoreProperties =
StoreProperties(
awsRegion = Region.getRegion(Regions.fromName(conf.getString("aws.region"))),
kvTable = conf.getString("aws.dynamodb.table"),
fsIncomingBucket = conf.getString("aws.s3.incoming"),
pageSize = pageSize,
fsProcessedBucket = conf.getString("aws.s3.processed"),
fsPublicBucket = conf.getString("aws.s3.public"),
fsRawBucket = conf.getString("aws.s3.raw"),
kvStatusIndex = "status-index",
kvUserIndex = "user-id-index"
)
private def avatarServletProperties(conf: TypesafeConfig): AvatarServletProperties =
AvatarServletProperties(
apiKeys = conf.getString("api.keys").split(',').toList,
apiUrl = conf.getString("api.baseUrl") + "/v1",
pageSize = pageSize,
snsTopicArn = conf.getString("aws.sns.topic.arn")
)
private def elkConfig(conf: TypesafeConfig): ElkConfig = {
ElkConfig(
conf.getString("elk.logging.enabled").toBoolean,
conf.getString("elk.logging.stream"),
conf.getString("aws.region"),
conf.getString("stage")
)
}
}
| apache-2.0 |
mhurne/aws-sdk-java | aws-java-sdk-ssm/src/main/java/com/amazonaws/services/simplesystemsmanagement/model/transform/ListAssociationsRequestMarshaller.java | 4152 | /*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
* Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.simplesystemsmanagement.model.transform;
import java.io.ByteArrayInputStream;
import java.util.Collections;
import java.util.Map;
import java.util.List;
import java.util.regex.Pattern;
import com.amazonaws.AmazonClientException;
import com.amazonaws.Request;
import com.amazonaws.DefaultRequest;
import com.amazonaws.http.HttpMethodName;
import com.amazonaws.services.simplesystemsmanagement.model.*;
import com.amazonaws.transform.Marshaller;
import com.amazonaws.util.BinaryUtils;
import com.amazonaws.util.StringUtils;
import com.amazonaws.util.IdempotentUtils;
import com.amazonaws.util.StringInputStream;
import com.amazonaws.util.json.*;
/**
* ListAssociationsRequest Marshaller
*/
public class ListAssociationsRequestMarshaller implements
Marshaller<Request<ListAssociationsRequest>, ListAssociationsRequest> {
public Request<ListAssociationsRequest> marshall(
ListAssociationsRequest listAssociationsRequest) {
if (listAssociationsRequest == null) {
throw new AmazonClientException(
"Invalid argument passed to marshall(...)");
}
Request<ListAssociationsRequest> request = new DefaultRequest<ListAssociationsRequest>(
listAssociationsRequest, "AWSSimpleSystemsManagement");
request.addHeader("X-Amz-Target", "AmazonSSM.ListAssociations");
request.setHttpMethod(HttpMethodName.POST);
request.setResourcePath("");
try {
final StructuredJsonGenerator jsonGenerator = SdkJsonProtocolFactory
.createWriter(false, "1.1");
jsonGenerator.writeStartObject();
com.amazonaws.internal.SdkInternalList<AssociationFilter> associationFilterListList = (com.amazonaws.internal.SdkInternalList<AssociationFilter>) listAssociationsRequest
.getAssociationFilterList();
if (!associationFilterListList.isEmpty()
|| !associationFilterListList.isAutoConstruct()) {
jsonGenerator.writeFieldName("AssociationFilterList");
jsonGenerator.writeStartArray();
for (AssociationFilter associationFilterListListValue : associationFilterListList) {
if (associationFilterListListValue != null) {
AssociationFilterJsonMarshaller.getInstance().marshall(
associationFilterListListValue, jsonGenerator);
}
}
jsonGenerator.writeEndArray();
}
if (listAssociationsRequest.getMaxResults() != null) {
jsonGenerator.writeFieldName("MaxResults").writeValue(
listAssociationsRequest.getMaxResults());
}
if (listAssociationsRequest.getNextToken() != null) {
jsonGenerator.writeFieldName("NextToken").writeValue(
listAssociationsRequest.getNextToken());
}
jsonGenerator.writeEndObject();
byte[] content = jsonGenerator.getBytes();
request.setContent(new ByteArrayInputStream(content));
request.addHeader("Content-Length",
Integer.toString(content.length));
request.addHeader("Content-Type", jsonGenerator.getContentType());
} catch (Throwable t) {
throw new AmazonClientException(
"Unable to marshall request to JSON: " + t.getMessage(), t);
}
return request;
}
}
| apache-2.0 |
googleapis/google-api-java-client-services | clients/google-api-services-container/v1/1.30.1/com/google/api/services/container/model/UpgradeEvent.java | 5580 | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.container.model;
/**
* UpgradeEvent is a notification sent to customers by the cluster server when a resource is
* upgrading.
*
* <p> This is the Java data model class that specifies how to parse/serialize into the JSON that is
* transmitted over HTTP when working with the Kubernetes Engine API. For a detailed explanation
* see:
* <a href="https://developers.google.com/api-client-library/java/google-http-java-client/json">https://developers.google.com/api-client-library/java/google-http-java-client/json</a>
* </p>
*
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public final class UpgradeEvent extends com.google.api.client.json.GenericJson {
/**
* Required. The current version before the upgrade.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String currentVersion;
/**
* Required. The operation associated with this upgrade.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String operation;
/**
* Required. The time when the operation was started.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private String operationStartTime;
/**
* Optional. Optional relative path to the resource. For example in node pool upgrades, the
* relative path of the node pool.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/**
* Required. The resource type that is upgrading.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String resourceType;
/**
* Required. The target version for the upgrade.
* The value may be {@code null}.
*/
@com.google.api.client.util.Key
private java.lang.String targetVersion;
/**
* Required. The current version before the upgrade.
* @return value or {@code null} for none
*/
public java.lang.String getCurrentVersion() {
return currentVersion;
}
/**
* Required. The current version before the upgrade.
* @param currentVersion currentVersion or {@code null} for none
*/
public UpgradeEvent setCurrentVersion(java.lang.String currentVersion) {
this.currentVersion = currentVersion;
return this;
}
/**
* Required. The operation associated with this upgrade.
* @return value or {@code null} for none
*/
public java.lang.String getOperation() {
return operation;
}
/**
* Required. The operation associated with this upgrade.
* @param operation operation or {@code null} for none
*/
public UpgradeEvent setOperation(java.lang.String operation) {
this.operation = operation;
return this;
}
/**
* Required. The time when the operation was started.
* @return value or {@code null} for none
*/
public String getOperationStartTime() {
return operationStartTime;
}
/**
* Required. The time when the operation was started.
* @param operationStartTime operationStartTime or {@code null} for none
*/
public UpgradeEvent setOperationStartTime(String operationStartTime) {
this.operationStartTime = operationStartTime;
return this;
}
/**
* Optional. Optional relative path to the resource. For example in node pool upgrades, the
* relative path of the node pool.
* @return value or {@code null} for none
*/
public java.lang.String getResource() {
return resource;
}
/**
* Optional. Optional relative path to the resource. For example in node pool upgrades, the
* relative path of the node pool.
* @param resource resource or {@code null} for none
*/
public UpgradeEvent setResource(java.lang.String resource) {
this.resource = resource;
return this;
}
/**
* Required. The resource type that is upgrading.
* @return value or {@code null} for none
*/
public java.lang.String getResourceType() {
return resourceType;
}
/**
* Required. The resource type that is upgrading.
* @param resourceType resourceType or {@code null} for none
*/
public UpgradeEvent setResourceType(java.lang.String resourceType) {
this.resourceType = resourceType;
return this;
}
/**
* Required. The target version for the upgrade.
* @return value or {@code null} for none
*/
public java.lang.String getTargetVersion() {
return targetVersion;
}
/**
* Required. The target version for the upgrade.
* @param targetVersion targetVersion or {@code null} for none
*/
public UpgradeEvent setTargetVersion(java.lang.String targetVersion) {
this.targetVersion = targetVersion;
return this;
}
@Override
public UpgradeEvent set(String fieldName, Object value) {
return (UpgradeEvent) super.set(fieldName, value);
}
@Override
public UpgradeEvent clone() {
return (UpgradeEvent) super.clone();
}
}
| apache-2.0 |
gracefullife/gerrit | gerrit-gwtexpui/src/main/java/com/google/gwtexpui/safehtml/client/SafeHtmlBuilder.java | 11105 | // Copyright (C) 2009 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gwtexpui.safehtml.client;
import com.google.gwt.core.client.GWT;
/**
* Safely constructs a {@link SafeHtml}, escaping user provided content.
*/
@SuppressWarnings("serial")
public class SafeHtmlBuilder extends SafeHtml {
private static final Impl impl;
static {
if (GWT.isClient()) {
impl = new ClientImpl();
} else {
impl = new ServerImpl();
}
}
private final BufferDirect dBuf;
private Buffer cb;
private BufferSealElement sBuf;
private AttMap att;
public SafeHtmlBuilder() {
cb = dBuf = new BufferDirect();
}
/** @return true if this builder has not had an append occur yet. */
public boolean isEmpty() {
return dBuf.isEmpty();
}
/** @return true if this builder has content appended into it. */
public boolean hasContent() {
return !isEmpty();
}
public SafeHtmlBuilder append(final boolean in) {
cb.append(in);
return this;
}
public SafeHtmlBuilder append(final char in) {
switch (in) {
case '&':
cb.append("&");
break;
case '>':
cb.append(">");
break;
case '<':
cb.append("<");
break;
case '"':
cb.append(""");
break;
case '\'':
cb.append("'");
break;
default:
cb.append(in);
break;
}
return this;
}
public SafeHtmlBuilder append(final int in) {
cb.append(in);
return this;
}
public SafeHtmlBuilder append(final long in) {
cb.append(in);
return this;
}
public SafeHtmlBuilder append(final float in) {
cb.append(in);
return this;
}
public SafeHtmlBuilder append(final double in) {
cb.append(in);
return this;
}
/** Append already safe HTML as-is, avoiding double escaping. */
public SafeHtmlBuilder append(com.google.gwt.safehtml.shared.SafeHtml in) {
if (in != null) {
cb.append(in.asString());
}
return this;
}
/** Append already safe HTML as-is, avoiding double escaping. */
public SafeHtmlBuilder append(final SafeHtml in) {
if (in != null) {
cb.append(in.asString());
}
return this;
}
/** Append the string, escaping unsafe characters. */
public SafeHtmlBuilder append(final String in) {
if (in != null) {
impl.escapeStr(this, in);
}
return this;
}
/** Append the string, escaping unsafe characters. */
public SafeHtmlBuilder append(final StringBuilder in) {
if (in != null) {
append(in.toString());
}
return this;
}
/** Append the string, escaping unsafe characters. */
public SafeHtmlBuilder append(final StringBuffer in) {
if (in != null) {
append(in.toString());
}
return this;
}
/** Append the result of toString(), escaping unsafe characters. */
public SafeHtmlBuilder append(final Object in) {
if (in != null) {
append(in.toString());
}
return this;
}
/** Append the string, escaping unsafe characters. */
public SafeHtmlBuilder append(final CharSequence in) {
if (in != null) {
escapeCS(this, in);
}
return this;
}
/**
* Open an element, appending "<tagName>" to the buffer.
* <p>
* After the element is open the attributes may be manipulated until the next
* <code>append</code>, <code>openElement</code>, <code>closeSelf</code> or
* <code>closeElement</code> call.
*
* @param tagName name of the HTML element to open.
*/
public SafeHtmlBuilder openElement(final String tagName) {
assert isElementName(tagName);
cb.append("<");
cb.append(tagName);
if (sBuf == null) {
att = new AttMap();
sBuf = new BufferSealElement(this);
}
att.reset(tagName);
cb = sBuf;
return this;
}
/**
* Get an attribute of the last opened element.
*
* @param name name of the attribute to read.
* @return the attribute value, as a string. The empty string if the attribute
* has not been assigned a value. The returned string is the raw
* (unescaped) value.
*/
public String getAttribute(final String name) {
assert isAttributeName(name);
assert cb == sBuf;
return att.get(name);
}
/**
* Set an attribute of the last opened element.
*
* @param name name of the attribute to set.
* @param value value to assign; any existing value is replaced. The value is
* escaped (if necessary) during the assignment.
*/
public SafeHtmlBuilder setAttribute(final String name, final String value) {
assert isAttributeName(name);
assert cb == sBuf;
att.set(name, value != null ? value : "");
return this;
}
/**
* Set an attribute of the last opened element.
*
* @param name name of the attribute to set.
* @param value value to assign, any existing value is replaced.
*/
public SafeHtmlBuilder setAttribute(final String name, final int value) {
return setAttribute(name, String.valueOf(value));
}
/**
* Append a new value into a whitespace delimited attribute.
* <p>
* If the attribute is not yet assigned, this method sets the attribute. If
* the attribute is already assigned, the new value is appended onto the end,
* after appending a single space to delimit the values.
*
* @param name name of the attribute to append onto.
* @param value additional value to append.
*/
public SafeHtmlBuilder appendAttribute(final String name, String value) {
if (value != null && value.length() > 0) {
final String e = getAttribute(name);
return setAttribute(name, e.length() > 0 ? e + " " + value : value);
}
return this;
}
/** Set the height attribute of the current element. */
public SafeHtmlBuilder setHeight(final int height) {
return setAttribute("height", height);
}
/** Set the width attribute of the current element. */
public SafeHtmlBuilder setWidth(final int width) {
return setAttribute("width", width);
}
/** Set the CSS class name for this element. */
public SafeHtmlBuilder setStyleName(final String style) {
assert isCssName(style);
return setAttribute("class", style);
}
/**
* Add an additional CSS class name to this element.
*<p>
* If no CSS class name has been specified yet, this method initializes it to
* the single name.
*/
public SafeHtmlBuilder addStyleName(final String style) {
assert isCssName(style);
return appendAttribute("class", style);
}
private void sealElement0() {
assert cb == sBuf;
cb = dBuf;
att.onto(cb, this);
}
Buffer sealElement() {
sealElement0();
cb.append(">");
return cb;
}
/** Close the current element with a self closing suffix ("/ >"). */
public SafeHtmlBuilder closeSelf() {
sealElement0();
cb.append(" />");
return this;
}
/** Append a closing tag for the named element. */
public SafeHtmlBuilder closeElement(final String name) {
assert isElementName(name);
cb.append("</");
cb.append(name);
cb.append(">");
return this;
}
/** Append "&nbsp;" - a non-breaking space, useful in empty table cells. */
public SafeHtmlBuilder nbsp() {
cb.append(" ");
return this;
}
/** Append "<br />" - a line break with no attributes */
public SafeHtmlBuilder br() {
cb.append("<br />");
return this;
}
/** Append "<tr>"; attributes may be set if needed */
public SafeHtmlBuilder openTr() {
return openElement("tr");
}
/** Append "</tr>" */
public SafeHtmlBuilder closeTr() {
return closeElement("tr");
}
/** Append "<td>"; attributes may be set if needed */
public SafeHtmlBuilder openTd() {
return openElement("td");
}
/** Append "</td>" */
public SafeHtmlBuilder closeTd() {
return closeElement("td");
}
/** Append "<th>"; attributes may be set if needed */
public SafeHtmlBuilder openTh() {
return openElement("th");
}
/** Append "</th>" */
public SafeHtmlBuilder closeTh() {
return closeElement("th");
}
/** Append "<div>"; attributes may be set if needed */
public SafeHtmlBuilder openDiv() {
return openElement("div");
}
/** Append "</div>" */
public SafeHtmlBuilder closeDiv() {
return closeElement("div");
}
/** Append "<span>"; attributes may be set if needed */
public SafeHtmlBuilder openSpan() {
return openElement("span");
}
/** Append "</span>" */
public SafeHtmlBuilder closeSpan() {
return closeElement("span");
}
/** Append "<a>"; attributes may be set if needed */
public SafeHtmlBuilder openAnchor() {
return openElement("a");
}
/** Append "</a>" */
public SafeHtmlBuilder closeAnchor() {
return closeElement("a");
}
/** Append "<param name=... value=... />". */
public SafeHtmlBuilder paramElement(final String name, final String value) {
openElement("param");
setAttribute("name", name);
setAttribute("value", value);
return closeSelf();
}
/** @return an immutable {@link SafeHtml} representation of the buffer. */
public SafeHtml toSafeHtml() {
return new SafeHtmlString(asString());
}
@Override
public String asString() {
return cb.toString();
}
private static void escapeCS(final SafeHtmlBuilder b, final CharSequence in) {
for (int i = 0; i < in.length(); i++) {
b.append(in.charAt(i));
}
}
private static boolean isElementName(final String name) {
return name.matches("^[a-zA-Z][a-zA-Z0-9_-]*$");
}
private static boolean isAttributeName(final String name) {
return isElementName(name);
}
private static boolean isCssName(final String name) {
return isElementName(name);
}
private static abstract class Impl {
abstract void escapeStr(SafeHtmlBuilder b, String in);
}
private static class ServerImpl extends Impl {
@Override
void escapeStr(final SafeHtmlBuilder b, final String in) {
SafeHtmlBuilder.escapeCS(b, in);
}
}
private static class ClientImpl extends Impl {
@Override
void escapeStr(final SafeHtmlBuilder b, final String in) {
b.cb.append(escape(in));
}
private static native String escape(String src)
/*-{ return src.replace(/&/g,'&')
.replace(/>/g,'>')
.replace(/</g,'<')
.replace(/"/g,'"')
.replace(/'/g,''');
}-*/;
}
}
| apache-2.0 |
yongxu16/agile-plugin-security | src/main/java/org/agle4j/plugin/security/annotation/User.java | 455 | package org.agle4j.plugin.security.annotation;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* 判断当前用户是否已登录(包括:以认证与已记住)
*
* @author hanyx
* @since 0.0.9
*/
@Target({ElementType.TYPE, ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface User {
}
| apache-2.0 |
joshuamckenty/yolo-octo-wookie | nova/process.py | 8629 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Process pool, still buggy right now.
"""
import logging
import multiprocessing
import StringIO
from nova import vendor
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import process
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.internet import threads
from twisted.python import failure
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('process_pool_size', 4,
'Number of processes to use in the process pool')
# NOTE(termie): this is copied from twisted.internet.utils but since
# they don't export it I've copied and modified
class UnexpectedErrorOutput(IOError):
"""
Standard error data was received where it was not expected. This is a
subclass of L{IOError} to preserve backward compatibility with the previous
error behavior of L{getProcessOutput}.
@ivar processEnded: A L{Deferred} which will fire when the process which
produced the data on stderr has ended (exited and all file descriptors
closed).
"""
def __init__(self, stdout=None, stderr=None):
IOError.__init__(self, "got stdout: %r\nstderr: %r" % (stdout, stderr))
# NOTE(termie): this too
class _BackRelay(protocol.ProcessProtocol):
"""
Trivial protocol for communicating with a process and turning its output
into the result of a L{Deferred}.
@ivar deferred: A L{Deferred} which will be called back with all of stdout
and, if C{errortoo} is true, all of stderr as well (mixed together in
one string). If C{errortoo} is false and any bytes are received over
stderr, this will fire with an L{_UnexpectedErrorOutput} instance and
the attribute will be set to C{None}.
@ivar onProcessEnded: If C{errortoo} is false and bytes are received over
stderr, this attribute will refer to a L{Deferred} which will be called
back when the process ends. This C{Deferred} is also associated with
the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in
this case so that users can determine when the process has actually
ended, in addition to knowing when bytes have been received via stderr.
"""
def __init__(self, deferred, errortoo=0):
self.deferred = deferred
self.s = StringIO.StringIO()
if errortoo:
self.errReceived = self.errReceivedIsGood
else:
self.errReceived = self.errReceivedIsBad
def errReceivedIsBad(self, text):
if self.deferred is not None:
self.onProcessEnded = defer.Deferred()
err = _UnexpectedErrorOutput(text, self.onProcessEnded)
self.deferred.errback(failure.Failure(err))
self.deferred = None
self.transport.loseConnection()
def errReceivedIsGood(self, text):
self.s.write(text)
def outReceived(self, text):
self.s.write(text)
def processEnded(self, reason):
if self.deferred is not None:
self.deferred.callback(self.s.getvalue())
elif self.onProcessEnded is not None:
self.onProcessEnded.errback(reason)
class BackRelayWithInput(_BackRelay):
def __init__(self, deferred, startedDeferred=None, error_ok=0,
input=None):
# Twisted doesn't use new-style classes in most places :(
_BackRelay.__init__(self, deferred, errortoo=error_ok)
self.error_ok = error_ok
self.input = input
self.stderr = StringIO.StringIO()
self.startedDeferred = startedDeferred
def errReceivedIsBad(self, text):
self.stderr.write(text)
self.transport.loseConnection()
def errReceivedIsGood(self, text):
self.stderr.write(text)
def connectionMade(self):
if self.startedDeferred:
self.startedDeferred.callback(self)
if self.input:
self.transport.write(self.input)
self.transport.closeStdin()
def processEnded(self, reason):
if self.deferred is not None:
stdout, stderr = self.s.getvalue(), self.stderr.getvalue()
try:
# NOTE(termie): current behavior means if error_ok is True
# we won't throw an error even if the process
# exited with a non-0 status, so you can't be
# okay with stderr output and not with bad exit
# codes.
if not self.error_ok:
reason.trap(error.ProcessDone)
self.deferred.callback((stdout, stderr))
except:
self.deferred.errback(UnexpectedErrorOutput(stdout, stderr))
def getProcessOutput(executable, args=None, env=None, path=None, reactor=None,
error_ok=0, input=None, startedDeferred=None):
if reactor is None:
from twisted.internet import reactor
args = args and args or ()
env = env and env and {}
d = defer.Deferred()
p = BackRelayWithInput(
d, startedDeferred=startedDeferred, error_ok=error_ok, input=input)
# VISH: commands come in as unicode, but self.executes needs
# strings or process.spawn raises a deprecation warning
executable = str(executable)
if not args is None:
args = [str(x) for x in args]
reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
return d
class ProcessPool(object):
""" A simple process pool implementation using Twisted's Process bits.
This is pretty basic right now, but hopefully the API will be the correct
one so that it can be optimized later.
"""
def __init__(self, size=None):
self.size = size and size or FLAGS.process_pool_size
self._pool = defer.DeferredSemaphore(self.size)
def simpleExecute(self, cmd, **kw):
""" Weak emulation of the old utils.execute() function.
This only exists as a way to quickly move old execute methods to
this new style of code.
NOTE(termie): This will break on args with spaces in them.
"""
parsed = cmd.split(' ')
executable, args = parsed[0], parsed[1:]
return self.execute(executable, args, **kw)
def execute(self, *args, **kw):
d = self._pool.acquire()
def _associateProcess(proto):
d.process = proto.transport
return proto.transport
started = defer.Deferred()
started.addCallback(_associateProcess)
kw.setdefault('startedDeferred', started)
d.process = None
d.started = started
d.addCallback(lambda _: getProcessOutput(*args, **kw))
d.addBoth(self._release)
return d
def _release(self, rv=None):
self._pool.release()
return rv
class Pool(object):
""" A simple process pool implementation around mutliprocessing.
Allows up to `size` processes at a time and queues the rest.
Using workarounds for multiprocessing behavior described in:
http://pypi.python.org/pypi/twisted.internet.processes/1.0b1
"""
def __init__(self, size=None):
self._size = size
self._pool = multiprocessing.Pool(size)
self._registerShutdown()
def _registerShutdown(self):
reactor.addSystemEventTrigger(
'during', 'shutdown', self.shutdown, reactor)
def shutdown(self, reactor=None):
if not self._pool:
return
self._pool.close()
# wait for workers to finish
self._pool.terminate()
self._pool = None
def apply(self, f, *args, **kw):
""" Add a task to the pool and return a deferred. """
result = self._pool.apply_async(f, args, kw)
return threads.deferToThread(result.get)
| apache-2.0 |
chaudum/crate-viz | src/crate/viz/crawler.py | 1649 | # -*- coding: utf-8 -*-
# vim: set fileencodings=utf-8
__docformat__ = "reStructuredText"
import time
from crate import client
from pprint import pprint
from datetime import datetime
def main():
# create table cluster_viz (ts timestamp, name string, num_shards integer, disk_bytes long, memory long, load float) with (number_of_replicas = '0-2')
# local crate
local = client.connect('127.0.0.1:4200', error_trace=True)
lcursor = local.cursor()
# remote crate
conn = client.connect([
'https://demo.crate.io',
])
rcursor = conn.cursor()
while True:
ts = datetime.now().isoformat()
print(ts)
rcursor.execute('''select sys.nodes.name as name,
count(*) as num_shards,
sum(size) as disk_bytes
from sys.shards
group by sys.nodes.name order by 1''')
res1 = rcursor.fetchall()
rcursor.execute('''select heap['used'] as memory,
load['1'] as load
from sys.nodes order by name''')
res2 = rcursor.fetchall()
d = [None for x in xrange(len(res1))]
for x in xrange(len(d)):
dx = [ts,] + res1[x] + res2[x]
lcursor.execute('''insert into cluster_viz (ts, name, num_shards, disk_bytes, memory, load) values (?, ?, ?, ?, ?, ?)''', dx)
d[x] = dx
pprint(d)
# res = lcursor.executemany('''insert into cluster_viz (ts, name, num_shards, disk_bytes, memory, load) values (?, ?, ?, ?, ?, ?)''', d)
time.sleep(5)
| apache-2.0 |
PengJi/gporca-comments | server/src/unittest/dxl/CMDProviderTest.cpp | 7012 | //---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2011 Greenplum, Inc.
//
// @filename:
// CMDProviderTest.cpp
//
// @doc:
// Tests the file-based metadata provider.
//---------------------------------------------------------------------------
#include "gpos/base.h"
#include "gpos/memory/CAutoMemoryPool.h"
#include "gpos/io/ioutils.h"
#include "gpos/io/COstreamString.h"
#include "gpos/task/CAutoTaskProxy.h"
#include "gpos/test/CUnittest.h"
#include "unittest/gpopt/mdcache/CMDProviderTest.h"
#include "unittest/gpopt/CTestUtils.h"
#include "naucrates/md/CMDProviderMemory.h"
#include "naucrates/md/CMDIdRelStats.h"
#include "naucrates/md/CMDIdColStats.h"
#include "naucrates/exception.h"
#include "naucrates/dxl/CDXLUtils.h"
#include "gpopt/mdcache/CAutoMDAccessor.h"
#include "gpopt/mdcache/CMDCache.h"
using namespace gpos;
using namespace gpdxl;
using namespace gpopt;
const CHAR *CMDProviderTest::szFileName = "../data/dxl/metadata/md.xml";
//---------------------------------------------------------------------------
// @function:
// CMDProviderTest::EresUnittest
//
// @doc:
//
//
//---------------------------------------------------------------------------
GPOS_RESULT
CMDProviderTest::EresUnittest()
{
CUnittest rgut[] =
{
GPOS_UNITTEST_FUNC(CMDProviderTest::EresUnittest_Basic),
GPOS_UNITTEST_FUNC(CMDProviderTest::EresUnittest_Stats),
GPOS_UNITTEST_FUNC_THROW
(
CMDProviderTest::EresUnittest_Negative,
gpdxl::ExmaMD,
gpdxl::ExmiMDCacheEntryNotFound
),
};
return CUnittest::EresExecute(rgut, GPOS_ARRAY_SIZE(rgut));
}
//---------------------------------------------------------------------------
// @function:
// CMDProviderTest::EresUnittest_Basic
//
// @doc:
// Test fetching existing metadata objects from a file-based provider
//
//---------------------------------------------------------------------------
GPOS_RESULT
CMDProviderTest::EresUnittest_Basic()
{
// create memory pool
CAutoMemoryPool amp;
IMemoryPool *pmp = amp.Pmp();
// test lookup with a file-based provider
CMDProviderMemory *pmdpFile = GPOS_NEW(pmp) CMDProviderMemory(pmp, szFileName);
pmdpFile->AddRef();
TestMDLookup(pmp, pmdpFile);
pmdpFile->Release();
// test lookup with a memory-based provider
CHAR *szDXL = CDXLUtils::SzRead(pmp, szFileName);
DrgPimdobj *pdrgpmdobj = CDXLUtils::PdrgpmdobjParseDXL(pmp, szDXL, NULL /*szXSDPath*/);
CMDProviderMemory *pmdpMemory = GPOS_NEW(pmp) CMDProviderMemory(pmp, pdrgpmdobj);
pmdpMemory->AddRef();
TestMDLookup(pmp, pmdpMemory);
GPOS_DELETE_ARRAY(szDXL);
pdrgpmdobj->Release();
pmdpMemory->Release();
return GPOS_OK;
}
//---------------------------------------------------------------------------
// @function:
// CMDProviderTest::TestMDLookup
//
// @doc:
// Test looking up objects using given MD provider
//
//---------------------------------------------------------------------------
void
CMDProviderTest::TestMDLookup
(
IMemoryPool *pmp,
IMDProvider *pmdp
)
{
CAutoMDAccessor amda(pmp, pmdp, CTestUtils::m_sysidDefault, CMDCache::Pcache());
// lookup different objects
CMDIdGPDB *pmdid1 = GPOS_NEW(pmp) CMDIdGPDB(GPOPT_MDCACHE_TEST_OID, 1 /* major version */, 1 /* minor version */);
CMDIdGPDB *pmdid2 = GPOS_NEW(pmp) CMDIdGPDB(GPOPT_MDCACHE_TEST_OID, 12 /* version */, 1 /* minor version */);
CWStringBase *pstrMDObject1 = pmdp->PstrObject(pmp, amda.Pmda(), pmdid1);
CWStringBase *pstrMDObject2 = pmdp->PstrObject(pmp, amda.Pmda(), pmdid2);
GPOS_ASSERT(NULL != pstrMDObject1 && NULL != pstrMDObject2);
IMDCacheObject *pimdobj1 = CDXLUtils::PimdobjParseDXL(pmp, pstrMDObject1, NULL);
IMDCacheObject *pimdobj2 = CDXLUtils::PimdobjParseDXL(pmp, pstrMDObject2, NULL);
GPOS_ASSERT(NULL != pimdobj1 && pmdid1->FEquals(pimdobj1->Pmdid()));
GPOS_ASSERT(NULL != pimdobj2 && pmdid2->FEquals(pimdobj2->Pmdid()));
// cleanup
pmdid1->Release();
pmdid2->Release();
GPOS_DELETE(pstrMDObject1);
GPOS_DELETE(pstrMDObject2);
pimdobj1->Release();
pimdobj2->Release();
}
//---------------------------------------------------------------------------
// @function:
// CMDProviderTest::EresUnittest_Stats
//
// @doc:
// Test fetching existing stats objects from a file-based provider
//
//---------------------------------------------------------------------------
GPOS_RESULT
CMDProviderTest::EresUnittest_Stats()
{
// create memory pool
CAutoMemoryPool amp;
IMemoryPool *pmp = amp.Pmp();
CMDProviderMemory *pmdpFile = GPOS_NEW(pmp) CMDProviderMemory(pmp, szFileName);
{
pmdpFile->AddRef();
CAutoMDAccessor amda(pmp, pmdpFile, CTestUtils::m_sysidDefault, CMDCache::Pcache());
// lookup different objects
CMDIdRelStats *pmdidRelStats = GPOS_NEW(pmp) CMDIdRelStats(GPOS_NEW(pmp) CMDIdGPDB(GPOPT_MDCACHE_TEST_OID, 1, 1));
CWStringBase *pstrRelStats = pmdpFile->PstrObject(pmp, amda.Pmda(), pmdidRelStats);
GPOS_ASSERT(NULL != pstrRelStats);
IMDCacheObject *pmdobjRelStats = CDXLUtils::PimdobjParseDXL(pmp, pstrRelStats, NULL);
GPOS_ASSERT(NULL != pmdobjRelStats);
CMDIdColStats *pmdidColStats =
GPOS_NEW(pmp) CMDIdColStats(GPOS_NEW(pmp) CMDIdGPDB(GPOPT_MDCACHE_TEST_OID, 1, 1), 1 /* ulAttno */);
CWStringBase *pstrColStats = pmdpFile->PstrObject(pmp, amda.Pmda(), pmdidColStats);
GPOS_ASSERT(NULL != pstrColStats);
IMDCacheObject *pmdobjColStats = CDXLUtils::PimdobjParseDXL(pmp, pstrColStats, NULL);
GPOS_ASSERT(NULL != pmdobjColStats);
// cleanup
pmdidRelStats->Release();
pmdidColStats->Release();
GPOS_DELETE(pstrRelStats);
GPOS_DELETE(pstrColStats);
pmdobjRelStats->Release();
pmdobjColStats->Release();
}
pmdpFile->Release();
return GPOS_OK;
}
//---------------------------------------------------------------------------
// @function:
// CMDProviderTest::EresUnittest_Negative
//
// @doc:
// Test fetching non-exiting metadata objects from a file-based provider
//
//---------------------------------------------------------------------------
GPOS_RESULT
CMDProviderTest::EresUnittest_Negative()
{
CAutoMemoryPool amp(CAutoMemoryPool::ElcNone);
IMemoryPool *pmp = amp.Pmp();
CMDProviderMemory *pmdpFile = GPOS_NEW(pmp) CMDProviderMemory(pmp, szFileName);
pmdpFile->AddRef();
// we need to use an auto pointer for the cache here to ensure
// deleting memory of cached objects when we throw
CAutoP<CMDAccessor::MDCache> apcache;
apcache = CCacheFactory::PCacheCreate<gpopt::IMDCacheObject*, gpopt::CMDKey*>
(
true, // fUnique
0 /* unlimited cache quota */,
CMDKey::UlHashMDKey,
CMDKey::FEqualMDKey
);
CMDAccessor::MDCache *pcache = apcache.Pt();
{
CAutoMDAccessor amda(pmp, pmdpFile, CTestUtils::m_sysidDefault, pcache);
// lookup a non-existing objects
CMDIdGPDB *pmdid = GPOS_NEW(pmp) CMDIdGPDB(GPOPT_MDCACHE_TEST_OID, 15 /* major version */, 1 /* minor version */);
// call should result in an exception
(void) pmdpFile->PstrObject(pmp, amda.Pmda(), pmdid);
}
return GPOS_FAILED;
}
// EOF
| apache-2.0 |
luky1971/Quantum2D | src/Collision/Q_PolyCollider.cpp | 1702 | /*
Copyright 2015 Ahnaf Siddiqui
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "Q_PolyCollider.h"
#include <algorithm>
#include "duMath.h"
Quantum2D::PolyCollider::PolyCollider(
const BodyList &bodylist, body2d_id body, void *parent,
const std::function<void(void *other)> &onCollision,
const PointList2D &points, QLayer layer)
: Quantum2D::Collider2D(bodylist, ePOLY, body, parent, onCollision, layer),
m_points(points),
m_worldPoints(points.size()) {
// TODO: make this check more than just the first three points!
// Look up efficient algorithm for determining if a set of points
// is clockwise
if (m_points.size() >= 3 &&
Diamond::Math::leftOf(points[2], points[0], points[1])) {
std::reverse(m_points.begin(), m_points.end());
}
}
void Quantum2D::PolyCollider::update(tQ_delta delta) {
using namespace Diamond;
const Rigidbody2D &rbody = bodylist[body];
auto transMat = Math::transMat(Qrot2rad(rbody.rotation()), rbody.scale().x,
rbody.scale().y);
for (int i = 0; i < m_worldPoints.size(); ++i) {
m_worldPoints[i] = rbody.position() + m_points[i].mul(transMat.m);
}
}
| apache-2.0 |
treznick/potlucker | src/main/java/com/potlucker/command/ListAllUsersCommand.java | 988 | package com.potlucker.command;
import java.util.ArrayList;
import com.mongodb.DBObject;
import com.mongodb.util.JSON;
import com.potlucker.model.User;
import com.potlucker.mongo.ConnectionProvider;
import com.mongodb.DBCollection;
import com.mongodb.DBCursor;
import org.codehaus.jackson.map.ObjectMapper;
/**
* Created by treznick on 6/27/14.
*/
public class ListAllUsersCommand {
public ArrayList<User> execute() {
ConnectionProvider conn = new ConnectionProvider();
DBCollection usersCollection = conn.getCollection("users");
DBCursor cursor = usersCollection.find();
ArrayList<User> users = new ArrayList<User>();
GetUserCommand getUser = new GetUserCommand();
try {
while (cursor.hasNext()) {
User u = getUser.execute("_id",cursor.next().get("_id").toString());
users.add(u);
}
} finally {
cursor.close();
}
return users;
}
}
| apache-2.0 |
comdexxsolutionsllc/dcas-laravel55 | config/bugsnag.php | 7538 | <?php
return [
/*
|--------------------------------------------------------------------------
| API Key
|--------------------------------------------------------------------------
|
| You can find your API key on your Bugsnag dashboard.
|
| This api key points the Bugsnag notifier to the project in your account
| which should receive your application's uncaught exceptions.
|
*/
'api_key' => env('BUGSNAG_API_KEY', ''),
/*
|--------------------------------------------------------------------------
| App Type
|--------------------------------------------------------------------------
|
| Set the type of application executing the current code.
|
*/
'app_type' => env('BUGSNAG_APP_TYPE'),
/*
|--------------------------------------------------------------------------
| App Version
|--------------------------------------------------------------------------
|
| Set the version of application executing the current code.
|
*/
'app_version' => env('BUGSNAG_APP_VERSION'),
/*
|--------------------------------------------------------------------------
| Batch Sending
|--------------------------------------------------------------------------
|
| Set to true to send the errors through to Bugsnag when the PHP process
| shuts down, in order to prevent your app waiting on HTTP requests.
|
| Setting this to false will mean the we send an HTTP request straight away
| for each error.
|
*/
'batch_sending' => env('BUGSNAG_BATCH_SENDING'),
/*
|--------------------------------------------------------------------------
| Endpoint
|--------------------------------------------------------------------------
|
| Set what server the Bugsnag notifier should send errors to. By default
| this is set to 'https://notify.bugsnag.com', but for Bugsnag Enterprise
| this should be the URL to your Bugsnag instance.
|
*/
'endpoint' => env('BUGSNAG_ENDPOINT'),
/*
|--------------------------------------------------------------------------
| Filters
|--------------------------------------------------------------------------
|
| Use this if you want to ensure you don't send sensitive data such as
| passwords, and credit card numbers to our servers. Any keys which
| contain these strings will be filtered.
|
*/
'filters' => empty(env('BUGSNAG_FILTERS')) ? ['password'] : explode(',', str_replace(' ', '', env('BUGSNAG_FILTERS'))),
/*
|--------------------------------------------------------------------------
| Hostname
|--------------------------------------------------------------------------
|
| You can set the hostname of your server to something specific for you to
| identify it by if needed.
|
*/
'hostname' => env('BUGSNAG_HOSTNAME'),
/*
|--------------------------------------------------------------------------
| Proxy
|--------------------------------------------------------------------------
|
| This is where you can set the proxy settings you'd like us to use when
| communicating with Bugsnag when reporting errors.
|
*/
'proxy' => array_filter([
'http' => env('HTTP_PROXY'),
'https' => env('HTTPS_PROXY'),
'no' => empty(env('NO_PROXY')) ? null : explode(',', str_replace(' ', '', env('NO_PROXY'))),
]),
/*
|--------------------------------------------------------------------------
| Project Root
|--------------------------------------------------------------------------
|
| Bugsnag marks stacktrace lines as in-project if they come from files
| inside your “project root”. You can set this here.
|
| If this is not set, we will automatically try to detect it.
|
*/
'project_root' => env('BUGSNAG_PROJECT_ROOT'),
/*
|--------------------------------------------------------------------------
| Strip Path
|--------------------------------------------------------------------------
|
| You can set a strip path to have it also trimmed from the start of any
| filepath in your stacktraces.
|
| If this is not set, we will automatically try to detect it.
|
*/
'strip_path' => env('BUGSNAG_STRIP_PATH'),
/*
|--------------------------------------------------------------------------
| Query
|--------------------------------------------------------------------------
|
| Enable this if you'd like us to automatically record all queries executed
| as breadcrumbs.
|
*/
'query' => env('BUGSNAG_QUERY', true),
/*
|--------------------------------------------------------------------------
| Bindings
|--------------------------------------------------------------------------
|
| Enable this if you'd like us to include the query bindings in our query
| breadcrumbs.
|
*/
'bindings' => env('BUGSNAG_QUERY_BINDINGS', false),
/*
|--------------------------------------------------------------------------
| Release Stage
|--------------------------------------------------------------------------
|
| Set the release stage to use when sending notifications to Bugsnag.
|
| Leaving this unset will default to using the application environment.
|
*/
'release_stage' => env('BUGSNAG_RELEASE_STAGE'),
/*
|--------------------------------------------------------------------------
| Notify Release Stages
|--------------------------------------------------------------------------
|
| Set which release stages should send notifications to Bugsnag.
|
*/
'notify_release_stages' => empty(env('BUGSNAG_NOTIFY_RELEASE_STAGES')) ? null : explode(',', str_replace(' ', '', env('BUGSNAG_NOTIFY_RELEASE_STAGES'))),
/*
|--------------------------------------------------------------------------
| Send Code
|--------------------------------------------------------------------------
|
| Bugsnag automatically sends a small snippet of the code that crashed to
| help you diagnose even faster from within your dashboard. If you don’t
| want to send this snippet, then set this to false.
|
*/
'send_code' => env('BUGSNAG_SEND_CODE', true),
/*
|--------------------------------------------------------------------------
| Callbacks
|--------------------------------------------------------------------------
|
| Enable this if you'd like us to enable our default set of notification
| callbacks. These add things like the cookie information and session
| details to the error to be sent to Bugsnag.
|
| If you'd like to add your own callbacks, you can call the
| Bugsnag::registerCallback method from the boot method of your app
| service provider.
|
*/
'callbacks' => env('BUGSNAG_CALLBACKS', true),
/*
|--------------------------------------------------------------------------
| User
|--------------------------------------------------------------------------
|
| Enable this if you'd like us to set the current user logged in via
| Laravel's authentication system.
|
| If you'd like to add your own user resolver, you can do this by using
| callbacks via Bugsnag::registerCallback.
|
*/
'user' => env('BUGSNAG_USER', true),
];
| apache-2.0 |
onosfw/apis | suricata/apis/search/classes_6d.js | 1696 | var searchData=
[
['membuffer_5f',['MemBuffer_',['../structMemBuffer__.html',1,'']]],
['mimedecconfig',['MimeDecConfig',['../structMimeDecConfig.html',1,'']]],
['mimedecentity',['MimeDecEntity',['../structMimeDecEntity.html',1,'']]],
['mimedecfield',['MimeDecField',['../structMimeDecField.html',1,'']]],
['mimedecparsestate',['MimeDecParseState',['../structMimeDecParseState.html',1,'']]],
['mimedecstack',['MimeDecStack',['../structMimeDecStack.html',1,'']]],
['mimedecstacknode',['MimeDecStackNode',['../structMimeDecStackNode.html',1,'']]],
['mimedecurl',['MimeDecUrl',['../structMimeDecUrl.html',1,'']]],
['modbusfunctioncoderange_5f',['ModbusFunctionCodeRange_',['../structModbusFunctionCodeRange__.html',1,'']]],
['modbusheader_5f',['ModbusHeader_',['../structModbusHeader__.html',1,'']]],
['modbusstate_5f',['ModbusState_',['../structModbusState__.html',1,'']]],
['modbustransaction_5f',['ModbusTransaction_',['../structModbusTransaction__.html',1,'']]],
['mpmctx_5f',['MpmCtx_',['../structMpmCtx__.html',1,'']]],
['mpmctxfactorycontainer_5f',['MpmCtxFactoryContainer_',['../structMpmCtxFactoryContainer__.html',1,'']]],
['mpmctxfactoryitem_5f',['MpmCtxFactoryItem_',['../structMpmCtxFactoryItem__.html',1,'']]],
['mpmmatchbucket_5f',['MpmMatchBucket_',['../structMpmMatchBucket__.html',1,'']]],
['mpmpatternidstore_5f',['MpmPatternIdStore_',['../structMpmPatternIdStore__.html',1,'']]],
['mpmpatternidtableelmt_5f',['MpmPatternIdTableElmt_',['../structMpmPatternIdTableElmt__.html',1,'']]],
['mpmtableelmt_5f',['MpmTableElmt_',['../structMpmTableElmt__.html',1,'']]],
['mpmthreadctx_5f',['MpmThreadCtx_',['../structMpmThreadCtx__.html',1,'']]]
];
| apache-2.0 |
SoerenHenning/RadarGun | src/main/java/radargun/output/print/ResultsPrinter.java | 2217 | package radargun.output.print;
import java.io.PrintStream;
import radargun.comparsion.result.TestResult;
/**
* This class prints a {@link TestResult} to the default system's output stream
* or another one if configured.
*
* The format is [{@code <status>}] {@code <test-name>} Score: {@code <score>}
* (Bounds: [{@code <lower-bound>}, {@code upper-bound}], where {@code <status>}
* is one of SUCCESSFULL, FAILED, or NO RESULT, @{@code <test-name>} is the
* test's name, {@code <score>} is the score's textual representation provided
* by JHM, and {@code <lower-bound>} and {@code <upper-bound>} are the
* assertion's bounds.
*
* @author Sören Henning
*
*/
public class ResultsPrinter {
private final PrintStream printStream;
private boolean started = false;
public ResultsPrinter() {
this.printStream = System.out;
}
public ResultsPrinter(final PrintStream printStream) {
this.printStream = printStream;
}
private void startPrinting() {
if (this.started) {
return;
}
this.printStream.println();
this.started = true;
}
public void print(final TestResult result) {
this.startPrinting();
final String status = this.getStatus(result).toString();
final String label = result.getRunResult().getParams().getBenchmark();
final String score = result.getRunResult().getPrimaryResult().toString();
final String lowerBound = String.valueOf(result.getAssertion().getLowerBound());
final String upperBound = String.valueOf(result.getAssertion().getUpperBound());
this.printStream.println("[" + status + "] " + label + " Score: " + score + " (Bounds: [" + lowerBound + ", "
+ upperBound + "])");
}
public Status getStatus(final TestResult result) {
if (result.wasSuccesfull()) {
return Status.SUCCESSFULL;
} else if (result.hasFailed()) {
return Status.FAILED;
} else {
return Status.NO_RESULT;
}
}
public enum Status {
FAILED("FAILED"), SUCCESSFULL("SUCCESSFULL"), NO_RESULT("NO RESULT");
private final String name;
private Status(final String name) {
this.name = name;
}
@Override
public String toString() {
return this.name;
}
}
}
| apache-2.0 |
AceMood/phabricator | src/applications/pholio/controller/PholioController.php | 524 | <?php
abstract class PholioController extends PhabricatorController {
public function buildApplicationMenu() {
return $this->newApplicationMenu()
->setSearchEngine(new PholioMockSearchEngine());
}
protected function buildApplicationCrumbs() {
$crumbs = parent::buildApplicationCrumbs();
$crumbs->addAction(
id(new PHUIListItemView())
->setName(pht('Create Mock'))
->setHref($this->getApplicationURI('new/'))
->setIcon('fa-plus-square'));
return $crumbs;
}
}
| apache-2.0 |
SteelToeOSS/Samples | Connectors/src/AspDotNet4/MySql4/App_Start/RouteConfig.cs | 576 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.Mvc;
using System.Web.Routing;
namespace MySql4
{
public class RouteConfig
{
public static void RegisterRoutes(RouteCollection routes)
{
routes.IgnoreRoute("{resource}.axd/{*pathInfo}");
routes.MapRoute(
name: "Default",
url: "{controller}/{action}/{id}",
defaults: new { controller = "Home", action = "Index", id = UrlParameter.Optional }
);
}
}
}
| apache-2.0 |
felix-tien/TechLab | CSharpLab/TestCFX/Testing/UnitTest/Arch.CFramework.CommandRunner.Test/WorkManagerTest.cs | 11078 | //using Arch.CFramework.CommandRunner;
//using Microsoft.VisualStudio.TestTools.UnitTesting;
//using System;
//using System.Collections.Generic;
//namespace Arch.CFramework.CommandRunner.Test
//{
// /// <summary>
// ///This is a test class for WorkManagerTest and is intended
// ///to contain all WorkManagerTest Unit Tests
// ///</summary>
// [TestClass()]
// public class WorkManagerTest
// {
// private TestContext testContextInstance;
// /// <summary>
// ///Gets or sets the test context which provides
// ///information about and functionality for the current test run.
// ///</summary>
// public TestContext TestContext
// {
// get
// {
// return testContextInstance;
// }
// set
// {
// testContextInstance = value;
// }
// }
// #region Additional test attributes
// //
// //You can use the following additional attributes as you write your tests:
// //
// //Use ClassInitialize to run code before running the first test in the class
// //[ClassInitialize()]
// //public static void MyClassInitialize(TestContext testContext)
// //{
// //}
// //
// //Use ClassCleanup to run code after all tests in a class have run
// //[ClassCleanup()]
// //public static void MyClassCleanup()
// //{
// //}
// //
// //Use TestInitialize to run code before running each test
// //[TestInitialize()]
// //public void MyTestInitialize()
// //{
// //}
// //
// //Use TestCleanup to run code after each test has run
// //[TestCleanup()]
// //public void MyTestCleanup()
// //{
// //}
// //
// #endregion
// /// <summary>
// ///A test for WorkManager Constructor
// ///</summary>
// [TestMethod()]
// public void WorkManagerConstructorTest()
// {
// WorkManager target = new WorkManager();
// Assert.Inconclusive("TODO: Implement code to verify target");
// }
// /// <summary>
// ///A test for AddMonitor
// ///</summary>
// [TestMethod()]
// public void AddMonitorTest()
// {
// IWorkUnit monitor = null; // TODO: Initialize to an appropriate value
// WorkManager.AddMonitor(monitor);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for AddTask
// ///</summary>
// [TestMethod()]
// public void AddTaskTest()
// {
// ITaskInfo task = null; // TODO: Initialize to an appropriate value
// string category = string.Empty; // TODO: Initialize to an appropriate value
// WorkManager.AddTask(task, category);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for AddTaskRange
// ///</summary>
// [TestMethod()]
// public void AddTaskRangeTest()
// {
// IList<ITaskInfo> tasks = null; // TODO: Initialize to an appropriate value
// WorkManager.AddTaskRange(tasks);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for AddTaskToHead
// ///</summary>
// [TestMethod()]
// public void AddTaskToHeadTest()
// {
// ITaskInfo task = null; // TODO: Initialize to an appropriate value
// string category = string.Empty; // TODO: Initialize to an appropriate value
// WorkManager.AddTaskToHead(task, category);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for AddWorker
// ///</summary>
// public void AddWorkerTestHelper<T>()
// where T : class , IWorker, new()
// {
// int num = 0; // TODO: Initialize to an appropriate value
// string category = string.Empty; // TODO: Initialize to an appropriate value
// string taskAndResCategory = string.Empty; // TODO: Initialize to an appropriate value
// WorkManager.AddWorker<T>(num, category, taskAndResCategory);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// [TestMethod()]
// public void AddWorkerTest()
// {
// Assert.Inconclusive("No appropriate type parameter is found to satisfies the type constraint(s) of T. " +
// "Please call AddWorkerTestHelper<T>() with appropriate type parameters.");
// }
// /// <summary>
// ///A test for AddWorker
// ///</summary>
// [TestMethod()]
// public void AddWorkerTest1()
// {
// Action<ITaskInfo> func = null; // TODO: Initialize to an appropriate value
// int num = 0; // TODO: Initialize to an appropriate value
// string category = string.Empty; // TODO: Initialize to an appropriate value
// string taskAndResCategory = string.Empty; // TODO: Initialize to an appropriate value
// WorkManager.AddWorker(func, num, category, taskAndResCategory);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for AddWorkerFunc
// ///</summary>
// [TestMethod()]
// public void AddWorkerFuncTest()
// {
// Func<ITaskInfo, ITaskInfo> func = null; // TODO: Initialize to an appropriate value
// int num = 0; // TODO: Initialize to an appropriate value
// string category = string.Empty; // TODO: Initialize to an appropriate value
// string taskAndResCategory = string.Empty; // TODO: Initialize to an appropriate value
// WorkManager.AddWorkerFunc(func, num, category, taskAndResCategory);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for Backlog
// ///</summary>
// [TestMethod()]
// public void BacklogTest()
// {
// int num = 0; // TODO: Initialize to an appropriate value
// WorkManager.Backlog(num);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for Clear
// ///</summary>
// [TestMethod()]
// public void ClearTest()
// {
// WorkManager.Clear();
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for ClearMonitor
// ///</summary>
// [TestMethod()]
// public void ClearMonitorTest()
// {
// Type type = null; // TODO: Initialize to an appropriate value
// WorkManager.ClearMonitor(type);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for ClearWorker
// ///</summary>
// [TestMethod()]
// public void ClearWorkerTest()
// {
// string category = string.Empty; // TODO: Initialize to an appropriate value
// WorkManager.ClearWorker(category);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for End
// ///</summary>
// [TestMethod()]
// public void EndTest()
// {
// WorkManager.End();
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for EndAndClearAll
// ///</summary>
// [TestMethod()]
// public void EndAndClearAllTest()
// {
// WorkManager.EndAndClearAll();
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for GetTask
// ///</summary>
// [TestMethod()]
// public void GetTaskTest()
// {
// string category = string.Empty; // TODO: Initialize to an appropriate value
// ITaskInfo expected = null; // TODO: Initialize to an appropriate value
// ITaskInfo actual;
// actual = WorkManager.GetTask(category);
// Assert.AreEqual(expected, actual);
// Assert.Inconclusive("Verify the correctness of this test method.");
// }
// /// <summary>
// ///A test for GetTaskNum
// ///</summary>
// [TestMethod()]
// public void GetTaskNumTest()
// {
// Dictionary<string, int> expected = null; // TODO: Initialize to an appropriate value
// Dictionary<string, int> actual;
// actual = WorkManager.GetTaskNum();
// Assert.AreEqual(expected, actual);
// Assert.Inconclusive("Verify the correctness of this test method.");
// }
// /// <summary>
// ///A test for SetConfigParameters
// ///</summary>
// [TestMethod()]
// public void SetConfigParametersTest()
// {
// int completeTime = 0; // TODO: Initialize to an appropriate value
// int thresholdNumber = 0; // TODO: Initialize to an appropriate value
// WorkManager.SetConfigParameters(completeTime, thresholdNumber);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for Start
// ///</summary>
// [TestMethod()]
// public void StartTest()
// {
// WorkManager.Start();
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for WorkComplete
// ///</summary>
// [TestMethod()]
// public void WorkCompleteTest()
// {
// WorkManager.WorkComplete();
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// /// <summary>
// ///A test for WorkNotCompletedOnTime
// ///</summary>
// [TestMethod()]
// public void WorkNotCompletedOnTimeTest()
// {
// Dictionary<string, int> infos = null; // TODO: Initialize to an appropriate value
// WorkManager.WorkNotCompletedOnTime(infos);
// Assert.Inconclusive("A method that does not return a value cannot be verified.");
// }
// }
//}
| apache-2.0 |
jiezcomet/FastDeliver | server/server.go | 112 | package server
import(
"fmt"
)
type Server struct{
}
func (s *Server)Start(){
fmt.Println("Hello Server")
} | apache-2.0 |
MyRobotLab/myrobotlab | src/test/java/org/myrobotlab/opencv/OpenCVFilterMotionDetectTest.java | 1078 | package org.myrobotlab.opencv;
import static org.junit.Assert.assertNotNull;
import java.util.ArrayList;
import java.util.List;
import org.bytedeco.opencv.opencv_core.IplImage;
import org.junit.Before;
public class OpenCVFilterMotionDetectTest extends AbstractOpenCVFilterTest {
@Before
public void setup() {
debug = false;
}
@Override
public OpenCVFilter createFilter() {
return new OpenCVFilterMotionDetect("filter");
}
@Override
public List<IplImage> createTestImages() {
// we need two images. (same resolution i guess?
ArrayList<IplImage> images = new ArrayList<IplImage>();
images.add(defaultImage());
images.add(defaultImage());
return images;
}
@Override
public void verify(OpenCVFilter filter, IplImage input, IplImage output) {
// Make sure we found 5 faces.
log.info("CVData: {}", filter.data);
assertNotNull(output);
// TODO: verify something useful.
// waitOnAnyKey();
}
@Override
public IplImage createTestImage() {
// TODO Auto-generated method stub
return null;
}
}
| apache-2.0 |
zpao/buck | test/com/facebook/buck/cli/AuditRulesCommandTest.java | 3042 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.buck.cli;
import static org.junit.Assert.assertEquals;
import com.facebook.buck.parser.syntax.ImmutableListWithSelects;
import com.facebook.buck.parser.syntax.ImmutableSelectorValue;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.devtools.build.lib.syntax.SkylarkDict;
import org.junit.Test;
public class AuditRulesCommandTest {
@Test
public void testCreateDisplayString() {
assertEquals("None", AuditRulesCommand.createDisplayString(null));
assertEquals("True", AuditRulesCommand.createDisplayString(true));
assertEquals("False", AuditRulesCommand.createDisplayString(false));
assertEquals("42", AuditRulesCommand.createDisplayString(42));
assertEquals("3.14", AuditRulesCommand.createDisplayString(3.14));
assertEquals("\"Hello, world!\"", AuditRulesCommand.createDisplayString("Hello, world!"));
assertEquals("[\n]", AuditRulesCommand.createDisplayString(ImmutableList.<String>of()));
assertEquals(
"[\n \"foo\",\n \"bar\",\n \"baz\",\n]",
AuditRulesCommand.createDisplayString(ImmutableList.of("foo", "bar", "baz")));
assertEquals(
"{\n \"foo\": 1,\n \"bar\": 2,\n \"baz\": 3,\n}",
AuditRulesCommand.createDisplayString(ImmutableMap.of("foo", 1, "bar", 2, "baz", 3)));
assertEquals(
"{\n \"foo\": [\n 1,\n ],\n}",
AuditRulesCommand.createDisplayString(ImmutableMap.of("foo", ImmutableList.of(1))));
SkylarkDict<String, String> testDict = SkylarkDict.of(null, "one", "two");
assertEquals(
"select({\"one\": \"two\"})",
AuditRulesCommand.createDisplayString(
ImmutableListWithSelects.of(
ImmutableList.of(ImmutableSelectorValue.of(testDict, "")), String.class)));
SkylarkDict<String, String> testDict2 = SkylarkDict.of(null, "three", "four");
SkylarkDict<String, String> twoEntryDict = SkylarkDict.plus(testDict, testDict2, null);
assertEquals(
"select({\"one\": \"two\", \"three\": \"four\"})",
AuditRulesCommand.createDisplayString(
ImmutableListWithSelects.of(
ImmutableList.of(ImmutableSelectorValue.of(twoEntryDict, "")), String.class)));
}
@Test(expected = IllegalStateException.class)
public void testCreateDisplayStringRejectsUnknownType() {
AuditRulesCommand.createDisplayString(new Object());
}
}
| apache-2.0 |
edouardKaiser/dashboard | src/test/backend/resource/common/pagination_test.go | 2309 | // Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"reflect"
"testing"
)
func TestNewPaginationQuery(t *testing.T) {
cases := []struct {
itemsPerPage, page int
expected *PaginationQuery
}{
{0, 0, &PaginationQuery{0, 0}},
{1, 10, &PaginationQuery{1, 10}},
}
for _, c := range cases {
actual := NewPaginationQuery(c.itemsPerPage, c.page)
if !reflect.DeepEqual(actual, c.expected) {
t.Errorf("NewPaginationQuery(%+v, %+v) == %+v, expected %+v",
c.itemsPerPage, c.page, actual, c.expected)
}
}
}
func TestIsValidPagination(t *testing.T) {
cases := []struct {
pQuery *PaginationQuery
expected bool
}{
{&PaginationQuery{0, 0}, true},
{&PaginationQuery{5, 0}, true},
{&PaginationQuery{10, 1}, true},
{&PaginationQuery{0, 2}, true},
{&PaginationQuery{10, -1}, false},
{&PaginationQuery{-1, 0}, false},
{&PaginationQuery{-1, -1}, false},
}
for _, c := range cases {
actual := c.pQuery.IsValidPagination()
if actual != c.expected {
t.Errorf("CanPaginate() == %+v, expected %+v", actual, c.expected)
}
}
}
func TestGetPaginationSettings(t *testing.T) {
cases := []struct {
pQuery *PaginationQuery
itemsCount int
startIndex, endIndex int
}{
{&PaginationQuery{0, 0}, 10, 0, 0},
{&PaginationQuery{10, 1}, 10, 10, 10},
{&PaginationQuery{10, 0}, 10, 0, 10},
}
for _, c := range cases {
actualStartIdx, actualEndIdx := c.pQuery.GetPaginationSettings(c.itemsCount)
if actualStartIdx != c.startIndex || actualEndIdx != c.endIndex {
t.Errorf("GetPaginationSettings(%+v) == %+v, %+v, expected %+v, %+v",
c.itemsCount, actualStartIdx, actualEndIdx, c.startIndex, c.endIndex)
}
}
}
| apache-2.0 |
thucydides-webtests/thucydides-smoketests | flying-high-tests/src/test/java/com/bddinaction/flyinghigh/jbehave/steps/EarningStatus.java | 2265 | package com.bddinaction.flyinghigh.jbehave.steps;
import com.bddinaction.flyinghigh.model.FrequentFlyer;
import com.bddinaction.flyinghigh.model.Status;
import org.jbehave.core.annotations.Alias;
import org.jbehave.core.annotations.Given;
import org.jbehave.core.annotations.Then;
import org.jbehave.core.annotations.When;
import static org.fest.assertions.api.Assertions.assertThat;
public class EarningStatus {
String firstName;
String lastName;
@Given("$firstName $lastName is not a Frequent Flyer member")
public void not_a_Frequent_Flyer_member(String firstName, String lastName) {
this.firstName = firstName;
this.lastName = lastName;
}
FrequentFlyer member;
@Given("$firstName $lastName is a $status Frequent Flyer member")
public void a_Frequent_Flyer_member(String firstName, String lastName, Status status) {
member = FrequentFlyer.withFrequentFlyerNumber("12345678")
.named(firstName,lastName);
member.setStatus(status);
}
@Given("a member has a status of <initialStatus>")
@Alias("a member has a status of <status>")
public void a_Frequent_Flyer_member(Status initialStatus) {
member = FrequentFlyer.withFrequentFlyerNumber("12345678").named("Joe","Bloggs");
member.setStatus(initialStatus);
}
@When("he registers on the Frequent Flyer program")
@Alias("she registers on the Frequent Flyer program")
public void registers_on_the_Frequent_Flyer_program() throws Throwable {
member = FrequentFlyer.withFrequentFlyerNumber("123456789")
.named(firstName, lastName);
}
@Given("he has <initialStatusPoints> status points")
public void earned_status_points(int initialStatusPoints) {
member.setStatusPoints(initialStatusPoints);
}
@When("he earns <extraPoints> extra status points")
public void earn_extra_status_points(int extraPoints) {
member.earns(extraPoints).statusPoints();
}
@Then("he should have a status of <finalStatus>")
@Alias("she should have a status of $finalStatus")
public void should_have_status_of(Status finalStatus) {
assertThat(member.getStatus()).isEqualTo(finalStatus);
}
}
| apache-2.0 |
nortal/araneaframework | examples/main/src/org/araneaframework/example/main/web/demo/list/SimpleSubBeanListWidget.java | 4668 | /*
* Copyright 2006 Webmedia Group Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.araneaframework.example.main.web.demo.list;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import org.araneaframework.example.main.TemplateBaseWidget;
import org.araneaframework.example.main.business.model.GeneralMO;
import org.araneaframework.uilib.list.BeanListWidget;
import org.araneaframework.uilib.list.ListWidget;
import org.araneaframework.uilib.list.dataprovider.MemoryBasedListDataProvider;
public class SimpleSubBeanListWidget extends TemplateBaseWidget {
private ListWidget<ContactMO> list;
@Override
protected void init() throws Exception {
setViewSelector("demo/list/subBeanList");
initList();
}
protected void initList() throws Exception {
this.list = new BeanListWidget<ContactMO>(ContactMO.class);
addWidget("list", this.list);
this.list.setDataProvider(new DataProvider());
this.list.setOrderableByDefault(true);
this.list.addField("id", "common.id", false);
this.list.addField("name.firstname", "common.firstname").like();
this.list.addField("name.lastname", "common.lastname").like();
this.list.addField("address.country", "common.Country").like();
this.list.addField("address.city", "common.city").like();
this.list.addEmptyField("dummy", null);
}
private static class DataProvider extends MemoryBasedListDataProvider<ContactMO> {
private long lastId = 0;
protected DataProvider() {
super(ContactMO.class);
}
@Override
public List<ContactMO> loadData() throws Exception {
List<ContactMO> contacts = new ArrayList<ContactMO>(3);
contacts.add(contact("Alice", "", "Wonderland", ""));
contacts.add(contact("Chuck", "Norris", "USA", "Texas"));
contacts.add(contact("Gudmund", "Edmundsdottir", "Iceland", ""));
return contacts;
}
private ContactMO contact(String firstname, String lastname, String country, String city) {
ContactMO contact = new ContactMO();
contact.setId(++this.lastId);
contact.setName(name(firstname, lastname));
contact.setAddress(address(country, city));
return contact;
}
private NameMO name(String firstname, String lastname) {
NameMO name = new NameMO();
name.setFirstname(firstname);
name.setLastname(lastname);
return name;
}
private AddressMO address(String country, String city) {
AddressMO address = new AddressMO();
address.setCountry(country);
address.setCity(city);
return address;
}
}
public static class ContactMO implements GeneralMO {
private Long id;
private NameMO name;
private AddressMO address;
public AddressMO getAddress() {
return this.address;
}
public void setAddress(AddressMO address) {
this.address = address;
}
public Long getId() {
return this.id;
}
public void setId(Long id) {
this.id = id;
}
public NameMO getName() {
return this.name;
}
public void setName(NameMO name) {
this.name = name;
}
}
public static class NameMO implements Serializable {
private String firstname;
private String lastname;
public String getFirstname() {
return this.firstname;
}
public void setFirstname(String firstname) {
this.firstname = firstname;
}
public String getLastname() {
return this.lastname;
}
public void setLastname(String lastname) {
this.lastname = lastname;
}
}
public static class AddressMO implements Serializable {
private String country;
private String city;
public String getCity() {
return this.city;
}
public void setCity(String city) {
this.city = city;
}
public String getCountry() {
return this.country;
}
public void setCountry(String country) {
this.country = country;
}
}
}
| apache-2.0 |
Jarzon/prefetch.js | client/prefetch.js | 953 | function Prefetch() {
this.hooks();
}
Prefetch.prototype.hooks = function () {
let links = document.querySelectorAll('a');
let trigger = false;
let length = links.length;
for (let n = 0; n < length; n++) {
let prefetchFunction = function (event) {
// Left mouse click or Touch
if (!trigger && (event.constructor.name === 'TouchEvent' || event.which === 1)) {
let prefetch = document.createElement('link');
prefetch.rel = 'prefetch';
prefetch.href = this.href;
document.head.appendChild(prefetch);
trigger = true;
}
};
if(links[n].onclick === null && links[n].onmousedown === null && links[n].onmouseup === null) {
['touchstart', 'mousedown'].forEach(value => {
links[n].addEventListener(value, prefetchFunction, false);
});
}
}
};
| apache-2.0 |
xzwc/AndroidProject | AuthProject/app/src/main/java/com/zhy/authproject/data/remote/model/CommonInfo.java | 123 | package com.zhy.authproject.data.remote.model;
/**
* Created by zhanghaoye on 10/31/16.
*/
public class CommonInfo {
}
| apache-2.0 |
gilmario-kpslow/projetos | ProjetosWeb/src/main/java/br/com/truesystem/projetosweb/bean/GerenciadorAtividadeBean.java | 1636 | package br.com.truesystem.projetosweb.bean;
import br.com.truesystem.projetosweb.dominio.gerenciador.Atividade;
import br.com.truesystem.projetosweb.dominio.gerenciador.Funcionalidade;
import br.com.truesystem.projetosweb.negocio.FuncionalidadeNegocio;
import br.com.truesystem.projetosweb.negocio.RegraNegocioNegocio;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.List;
import javax.ejb.EJB;
import javax.enterprise.context.SessionScoped;
import javax.inject.Named;
/**
*
* @author gilmario
*/
@Named
@SessionScoped
public class GerenciadorAtividadeBean implements Serializable {
private Atividade atividade;
private List<Funcionalidade> listaDeFuncionalidades;
@EJB
private FuncionalidadeNegocio funcionalidadeServico;
@EJB
private RegraNegocioNegocio regraNegocioNegocio;
public void atualizar() {
listaDeFuncionalidades = funcionalidadeServico.buscar(atividade);
}
public Atividade getAtividade() {
return atividade;
}
public void setAtividade(Atividade atividade) {
this.atividade = atividade;
atualizar();
}
public List<Funcionalidade> getListaDeFuncionalidades() {
return listaDeFuncionalidades;
}
public void setListaDeFuncionalidades(List<Funcionalidade> listaDeFuncionalidades) {
this.listaDeFuncionalidades = listaDeFuncionalidades;
}
public BigDecimal getPercentual() {
return regraNegocioNegocio.percentualConcluido(atividade);
}
public BigDecimal getPercentual(Funcionalidade f) {
return regraNegocioNegocio.percentualConcluido(f);
}
}
| apache-2.0 |
vishipayyallore/CSharp-DotNet-Core-Samples | SampleCSharp7/ClassesAndGenerics/PlanetExplorer.cs | 1406 | using static System.Console;
namespace ClassesAndGenerics
{
public class PlanetExplorer : SpacePrivate
{
public override void ChartingStarMaps()
{
WriteLine($"-----{GetType().Name}::{System.Reflection.MethodBase.GetCurrentMethod().Name}()-----");
}
public override void BasicCommunicationSkill()
{
WriteLine($"-----{GetType().Name}::{System.Reflection.MethodBase.GetCurrentMethod().Name}()-----");
}
public override void BasicWeaponsTraining()
{
WriteLine($"-----{GetType().Name}::{System.Reflection.MethodBase.GetCurrentMethod().Name}()-----");
}
public override void Negotiation()
{
WriteLine($"-----{GetType().Name}::{System.Reflection.MethodBase.GetCurrentMethod().Name}()-----");
}
public override void AdvancedCommunicationSkill()
{
WriteLine($"-----{GetType().Name}::{System.Reflection.MethodBase.GetCurrentMethod().Name}()-----");
}
public override void AdvancedWeaponsTraining()
{
WriteLine($"-----{GetType().Name}::{System.Reflection.MethodBase.GetCurrentMethod().Name}()-----");
}
public override void Persuader()
{
WriteLine($"-----{GetType().Name}::{System.Reflection.MethodBase.GetCurrentMethod().Name}()-----");
}
}
} | apache-2.0 |
jlz27/kubernetes | pkg/kubectl/cmd/get_test.go | 34111 | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bytes"
encjson "encoding/json"
"io"
"io/ioutil"
"net/http"
"reflect"
"strings"
"testing"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
"k8s.io/apimachinery/pkg/watch"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/rest/fake"
restclientwatch "k8s.io/client-go/rest/watch"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
apitesting "k8s.io/kubernetes/pkg/api/testing"
cmdtesting "k8s.io/kubernetes/pkg/kubectl/cmd/testing"
"k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi"
)
func testData() (*api.PodList, *api.ServiceList, *api.ReplicationControllerList) {
pods := &api.PodList{
ListMeta: metav1.ListMeta{
ResourceVersion: "15",
},
Items: []api.Pod{
{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "10"},
Spec: apitesting.DeepEqualSafePodSpec(),
},
{
ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "test", ResourceVersion: "11"},
Spec: apitesting.DeepEqualSafePodSpec(),
},
},
}
svc := &api.ServiceList{
ListMeta: metav1.ListMeta{
ResourceVersion: "16",
},
Items: []api.Service{
{
ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"},
Spec: api.ServiceSpec{
SessionAffinity: "None",
Type: api.ServiceTypeClusterIP,
},
},
},
}
rc := &api.ReplicationControllerList{
ListMeta: metav1.ListMeta{
ResourceVersion: "17",
},
Items: []api.ReplicationController{
{
ObjectMeta: metav1.ObjectMeta{Name: "rc1", Namespace: "test", ResourceVersion: "18"},
Spec: api.ReplicationControllerSpec{
Replicas: 1,
},
},
},
}
return pods, svc, rc
}
func testComponentStatusData() *api.ComponentStatusList {
good := api.ComponentStatus{
Conditions: []api.ComponentCondition{
{Type: api.ComponentHealthy, Status: api.ConditionTrue, Message: "ok"},
},
ObjectMeta: metav1.ObjectMeta{Name: "servergood"},
}
bad := api.ComponentStatus{
Conditions: []api.ComponentCondition{
{Type: api.ComponentHealthy, Status: api.ConditionFalse, Message: "", Error: "bad status: 500"},
},
ObjectMeta: metav1.ObjectMeta{Name: "serverbad"},
}
unknown := api.ComponentStatus{
Conditions: []api.ComponentCondition{
{Type: api.ComponentHealthy, Status: api.ConditionUnknown, Message: "", Error: "fizzbuzz error"},
},
ObjectMeta: metav1.ObjectMeta{Name: "serverunknown"},
}
return &api.ComponentStatusList{
Items: []api.ComponentStatus{good, bad, unknown},
}
}
// Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get.
func TestGetUnknownSchemaObject(t *testing.T) {
f, tf, _, _ := cmdtesting.NewAPIFactory()
_, _, codec, _ := cmdtesting.NewTestFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, cmdtesting.NewInternalType("", "", "foo"))},
}
tf.Namespace = "test"
tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}}
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Run(cmd, []string{"type", "foo"})
expected := []runtime.Object{cmdtesting.NewInternalType("", "", "foo")}
actual := tf.Printer.(*testPrinter).Objects
if len(actual) != len(expected) {
t.Fatal(actual)
}
for i, obj := range actual {
expectedJSON := runtime.EncodeOrDie(codec, expected[i])
expectedMap := map[string]interface{}{}
if err := encjson.Unmarshal([]byte(expectedJSON), &expectedMap); err != nil {
t.Fatal(err)
}
actualJSON := runtime.EncodeOrDie(api.Codecs.LegacyCodec(), obj)
actualMap := map[string]interface{}{}
if err := encjson.Unmarshal([]byte(actualJSON), &actualMap); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expectedMap, actualMap) {
t.Errorf("unexpected object: \n%#v\n%#v", expectedMap, actualMap)
}
}
}
// Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get.
func TestGetSchemaObject(t *testing.T) {
f, tf, _, _ := cmdtesting.NewAPIFactory()
tf.Mapper = testapi.Default.RESTMapper()
tf.Typer = api.Scheme
codec := testapi.Default.Codec()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &api.ReplicationController{ObjectMeta: metav1.ObjectMeta{Name: "foo"}})},
}
tf.Namespace = "test"
tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: "v1"}}}
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.Run(cmd, []string{"replicationcontrollers", "foo"})
if !strings.Contains(buf.String(), "\"foo\"") {
t.Errorf("unexpected output: %s", buf.String())
}
}
func TestGetObjectsWithOpenAPIOutputFormatPresent(t *testing.T) {
pods, _, _ := testData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
// overide the openAPISchema function to return custom output
// for Pod type.
tf.OpenAPISchemaFunc = testOpenAPISchemaData
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])},
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set(useOpenAPIPrintColumnFlagLabel, "true")
cmd.Run(cmd, []string{"pods", "foo"})
expected := []runtime.Object{&pods.Items[0]}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
type FakeResources struct {
resources map[schema.GroupVersionKind]openapi.Schema
}
func (f FakeResources) LookupResource(s schema.GroupVersionKind) openapi.Schema {
return f.resources[s]
}
var _ openapi.Resources = &FakeResources{}
func testOpenAPISchemaData() (openapi.Resources, error) {
return &FakeResources{
resources: map[schema.GroupVersionKind]openapi.Schema{
{
Version: "v1",
Kind: "Pod",
}: &openapi.Primitive{
BaseSchema: openapi.BaseSchema{
Extensions: map[string]interface{}{
"x-kubernetes-print-columns": "custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion",
},
},
},
},
}, nil
}
func TestGetObjects(t *testing.T) {
pods, _, _ := testData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])},
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Run(cmd, []string{"pods", "foo"})
expected := []runtime.Object{&pods.Items[0]}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestGetObjectsFiltered(t *testing.T) {
initTestErrorHandler(t)
pods, _, _ := testData()
pods.Items[0].Status.Phase = api.PodFailed
first := &pods.Items[0]
second := &pods.Items[1]
testCases := []struct {
args []string
resp runtime.Object
flags map[string]string
expect []runtime.Object
genericPrinter bool
}{
{args: []string{"pods", "foo"}, resp: first, expect: []runtime.Object{first}, genericPrinter: true},
{args: []string{"pods", "foo"}, flags: map[string]string{"show-all": "false"}, resp: first, expect: []runtime.Object{first}, genericPrinter: true},
{args: []string{"pods"}, flags: map[string]string{"show-all": "true"}, resp: pods, expect: []runtime.Object{first, second}},
{args: []string{"pods/foo"}, resp: first, expect: []runtime.Object{first}, genericPrinter: true},
{args: []string{"pods"}, flags: map[string]string{"output": "yaml"}, resp: pods, expect: []runtime.Object{second}},
{args: []string{}, flags: map[string]string{"filename": "../../../examples/storage/cassandra/cassandra-controller.yaml"}, resp: pods, expect: []runtime.Object{first, second}},
{args: []string{"pods"}, resp: pods, expect: []runtime.Object{second}},
{args: []string{"pods"}, flags: map[string]string{"show-all": "true", "output": "yaml"}, resp: pods, expect: []runtime.Object{first, second}},
{args: []string{"pods"}, flags: map[string]string{"show-all": "false"}, resp: pods, expect: []runtime.Object{second}},
}
for i, test := range testCases {
t.Logf("%d", i)
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{GenericPrinter: test.genericPrinter}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, test.resp)},
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
for k, v := range test.flags {
cmd.Flags().Lookup(k).Value.Set(v)
}
cmd.Run(cmd, test.args)
verifyObjects(t, test.expect, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("%d: unexpected empty output", i)
}
}
}
func TestGetObjectIgnoreNotFound(t *testing.T) {
initTestErrorHandler(t)
ns := &api.NamespaceList{
ListMeta: metav1.ListMeta{
ResourceVersion: "1",
},
Items: []api.Namespace{
{
ObjectMeta: metav1.ObjectMeta{Name: "testns", Namespace: "test", ResourceVersion: "11"},
Spec: api.NamespaceSpec{},
},
},
}
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{GenericPrinter: true}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch p, m := req.URL.Path, req.Method; {
case p == "/namespaces/test/pods/nonexistentpod" && m == "GET":
return &http.Response{StatusCode: 404, Header: defaultHeader(), Body: stringBody("")}, nil
case p == "/api/v1/namespaces/test" && m == "GET":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &ns.Items[0])}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("ignore-not-found", "true")
cmd.Flags().Set("output", "yaml")
cmd.Run(cmd, []string{"pods", "nonexistentpod"})
if buf.String() != "" {
t.Errorf("unexpected output: %s", buf.String())
}
}
func TestGetSortedObjects(t *testing.T) {
pods := &api.PodList{
ListMeta: metav1.ListMeta{
ResourceVersion: "15",
},
Items: []api.Pod{
{
ObjectMeta: metav1.ObjectMeta{Name: "c", Namespace: "test", ResourceVersion: "10"},
Spec: apitesting.DeepEqualSafePodSpec(),
},
{
ObjectMeta: metav1.ObjectMeta{Name: "b", Namespace: "test", ResourceVersion: "11"},
Spec: apitesting.DeepEqualSafePodSpec(),
},
{
ObjectMeta: metav1.ObjectMeta{Name: "a", Namespace: "test", ResourceVersion: "9"},
Spec: apitesting.DeepEqualSafePodSpec(),
},
},
}
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)},
}
tf.Namespace = "test"
tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Version: "v1"}}}
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
// sorting with metedata.name
cmd.Flags().Set("sort-by", ".metadata.name")
cmd.Run(cmd, []string{"pods"})
// expect sorted: a,b,c
expected := []runtime.Object{&pods.Items[2], &pods.Items[1], &pods.Items[0]}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func verifyObjects(t *testing.T, expected, actual []runtime.Object) {
var actualObj runtime.Object
var err error
if len(actual) != len(expected) {
t.Fatalf("expected %d, got %d", len(expected), len(actual))
}
for i, obj := range actual {
switch obj.(type) {
case runtime.Unstructured, *runtime.Unknown:
actualObj, err = runtime.Decode(
api.Codecs.UniversalDecoder(),
[]byte(runtime.EncodeOrDie(api.Codecs.LegacyCodec(), obj)))
default:
actualObj = obj
err = nil
}
if err != nil {
t.Fatal(err)
}
if !apiequality.Semantic.DeepEqual(expected[i], actualObj) {
t.Errorf("unexpected object: %d \n%#v\n%#v", i, expected[i], actualObj)
}
}
}
func TestGetObjectsIdentifiedByFile(t *testing.T) {
pods, _, _ := testData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{GenericPrinter: true}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])},
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("filename", "../../../examples/storage/cassandra/cassandra-controller.yaml")
cmd.Run(cmd, []string{})
expected := []runtime.Object{&pods.Items[0]}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestGetListObjects(t *testing.T) {
pods, _, _ := testData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)},
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Run(cmd, []string{"pods"})
expected, err := extractResourceList([]runtime.Object{pods})
if err != nil {
t.Fatal(err)
}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func extractResourceList(objs []runtime.Object) ([]runtime.Object, error) {
finalObjs := []runtime.Object{}
for _, obj := range objs {
items, err := meta.ExtractList(obj)
if err != nil {
return nil, err
}
finalObjs = append(finalObjs, items...)
}
return finalObjs, nil
}
func TestGetAllListObjects(t *testing.T) {
pods, _, _ := testData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)},
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("show-all", "true")
cmd.Run(cmd, []string{"pods"})
expected, err := extractResourceList([]runtime.Object{pods})
if err != nil {
t.Fatal(err)
}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestGetListComponentStatus(t *testing.T) {
statuses := testComponentStatusData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, statuses)},
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Run(cmd, []string{"componentstatuses"})
expected, err := extractResourceList([]runtime.Object{statuses})
if err != nil {
t.Fatal(err)
}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestGetMultipleTypeObjects(t *testing.T) {
pods, svc, _ := testData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch req.URL.Path {
case "/namespaces/test/pods":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, nil
case "/namespaces/test/services":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, svc)}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Run(cmd, []string{"pods,services"})
expected, err := extractResourceList([]runtime.Object{pods, svc})
if err != nil {
t.Fatal(err)
}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestGetMultipleTypeObjectsAsList(t *testing.T) {
pods, svc, _ := testData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{GenericPrinter: true}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch req.URL.Path {
case "/namespaces/test/pods":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, nil
case "/namespaces/test/services":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, svc)}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
tf.ClientConfig = &restclient.Config{ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}}
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("output", "json")
cmd.Run(cmd, []string{"pods,services"})
actual := tf.Printer.(*testPrinter).Objects
fn := func(obj runtime.Object) unstructured.Unstructured {
data, err := runtime.Encode(api.Codecs.LegacyCodec(schema.GroupVersion{Version: "v1"}), obj)
if err != nil {
panic(err)
}
out := &unstructured.Unstructured{Object: make(map[string]interface{})}
if err := encjson.Unmarshal(data, &out.Object); err != nil {
panic(err)
}
return *out
}
expected := &unstructured.UnstructuredList{
Object: map[string]interface{}{"kind": "List", "apiVersion": "v1", "metadata": map[string]interface{}{"selfLink": "", "resourceVersion": ""}},
Items: []unstructured.Unstructured{
fn(&pods.Items[0]),
fn(&pods.Items[1]),
fn(&svc.Items[0]),
},
}
actualBytes, err := encjson.Marshal(actual[0])
if err != nil {
t.Fatal(err)
}
expectedBytes, err := encjson.Marshal(expected)
if err != nil {
t.Fatal(err)
}
if string(actualBytes) != string(expectedBytes) {
t.Errorf("unexpected object:\n%s\n%s", expectedBytes, actualBytes)
}
}
func TestGetMultipleTypeObjectsWithSelector(t *testing.T) {
pods, svc, _ := testData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
if req.URL.Query().Get(metav1.LabelSelectorQueryParam(api.Registry.GroupOrDie(api.GroupName).GroupVersion.String())) != "a=b" {
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
}
switch req.URL.Path {
case "/namespaces/test/pods":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, pods)}, nil
case "/namespaces/test/services":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, svc)}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("selector", "a=b")
cmd.Run(cmd, []string{"pods,services"})
expected, err := extractResourceList([]runtime.Object{pods, svc})
if err != nil {
t.Fatal(err)
}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestGetMultipleTypeObjectsWithDirectReference(t *testing.T) {
_, svc, _ := testData()
node := &api.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: api.NodeSpec{
ExternalID: "ext",
},
}
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch req.URL.Path {
case "/nodes/foo":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, node)}, nil
case "/namespaces/test/services/bar":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &svc.Items[0])}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Run(cmd, []string{"services/bar", "node/foo"})
expected := []runtime.Object{&svc.Items[0], node}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestGetByFormatForcesFlag(t *testing.T) {
pods, _, _ := testData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{GenericPrinter: true}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Resp: &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods.Items[0])},
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Lookup("output").Value.Set("yaml")
cmd.Run(cmd, []string{"pods"})
showAllFlag, _ := cmd.Flags().GetBool("show-all")
if showAllFlag {
t.Errorf("expected showAll to not be true when getting resource")
}
}
func watchTestData() ([]api.Pod, []watch.Event) {
pods := []api.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "test",
ResourceVersion: "9",
},
Spec: apitesting.DeepEqualSafePodSpec(),
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "test",
ResourceVersion: "10",
},
Spec: apitesting.DeepEqualSafePodSpec(),
},
}
events := []watch.Event{
// current state events
{
Type: watch.Added,
Object: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "test",
ResourceVersion: "9",
},
Spec: apitesting.DeepEqualSafePodSpec(),
},
},
{
Type: watch.Added,
Object: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "test",
ResourceVersion: "10",
},
Spec: apitesting.DeepEqualSafePodSpec(),
},
},
// resource events
{
Type: watch.Modified,
Object: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "test",
ResourceVersion: "11",
},
Spec: apitesting.DeepEqualSafePodSpec(),
},
},
{
Type: watch.Deleted,
Object: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: "test",
ResourceVersion: "12",
},
Spec: apitesting.DeepEqualSafePodSpec(),
},
},
}
return pods, events
}
func TestWatchSelector(t *testing.T) {
pods, events := watchTestData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
podList := &api.PodList{
Items: pods,
ListMeta: metav1.ListMeta{
ResourceVersion: "10",
},
}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
if req.URL.Query().Get(metav1.LabelSelectorQueryParam(api.Registry.GroupOrDie(api.GroupName).GroupVersion.String())) != "a=b" {
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
}
switch req.URL.Path {
case "/namespaces/test/pods":
if req.URL.Query().Get("watch") == "true" {
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: watchBody(codec, events[2:])}, nil
} else {
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, podList)}, nil
}
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("watch", "true")
cmd.Flags().Set("selector", "a=b")
cmd.Run(cmd, []string{"pods"})
expected := []runtime.Object{&pods[0], &pods[1], events[2].Object, events[3].Object}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestWatchResource(t *testing.T) {
pods, events := watchTestData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch req.URL.Path {
case "/namespaces/test/pods/foo":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods[1])}, nil
case "/namespaces/test/pods":
if req.URL.Query().Get("watch") == "true" && req.URL.Query().Get("fieldSelector") == "metadata.name=foo" {
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: watchBody(codec, events[1:])}, nil
}
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("watch", "true")
cmd.Run(cmd, []string{"pods", "foo"})
expected := []runtime.Object{&pods[1], events[2].Object, events[3].Object}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestWatchResourceIdentifiedByFile(t *testing.T) {
pods, events := watchTestData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch req.URL.Path {
case "/namespaces/test/replicationcontrollers/cassandra":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods[1])}, nil
case "/namespaces/test/replicationcontrollers":
if req.URL.Query().Get("watch") == "true" && req.URL.Query().Get("fieldSelector") == "metadata.name=cassandra" {
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: watchBody(codec, events[1:])}, nil
}
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("watch", "true")
cmd.Flags().Set("filename", "../../../examples/storage/cassandra/cassandra-controller.yaml")
cmd.Run(cmd, []string{})
expected := []runtime.Object{&pods[1], events[2].Object, events[3].Object}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestWatchOnlyResource(t *testing.T) {
pods, events := watchTestData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch req.URL.Path {
case "/namespaces/test/pods/foo":
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, &pods[1])}, nil
case "/namespaces/test/pods":
if req.URL.Query().Get("watch") == "true" && req.URL.Query().Get("fieldSelector") == "metadata.name=foo" {
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: watchBody(codec, events[1:])}, nil
}
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("watch-only", "true")
cmd.Run(cmd, []string{"pods", "foo"})
expected := []runtime.Object{events[2].Object, events[3].Object}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func TestWatchOnlyList(t *testing.T) {
pods, events := watchTestData()
f, tf, codec, _ := cmdtesting.NewAPIFactory()
tf.Printer = &testPrinter{}
podList := &api.PodList{
Items: pods,
ListMeta: metav1.ListMeta{
ResourceVersion: "10",
},
}
tf.UnstructuredClient = &fake.RESTClient{
APIRegistry: api.Registry,
NegotiatedSerializer: unstructuredSerializer,
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch req.URL.Path {
case "/namespaces/test/pods":
if req.URL.Query().Get("watch") == "true" {
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: watchBody(codec, events[2:])}, nil
} else {
return &http.Response{StatusCode: 200, Header: defaultHeader(), Body: objBody(codec, podList)}, nil
}
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
errBuf := bytes.NewBuffer([]byte{})
cmd := NewCmdGet(f, buf, errBuf)
cmd.SetOutput(buf)
cmd.Flags().Set("watch-only", "true")
cmd.Run(cmd, []string{"pods"})
expected := []runtime.Object{events[2].Object, events[3].Object}
verifyObjects(t, expected, tf.Printer.(*testPrinter).Objects)
if len(buf.String()) == 0 {
t.Errorf("unexpected empty output")
}
}
func watchBody(codec runtime.Codec, events []watch.Event) io.ReadCloser {
buf := bytes.NewBuffer([]byte{})
enc := restclientwatch.NewEncoder(streaming.NewEncoder(buf, codec), codec)
for i := range events {
enc.Encode(&events[i])
}
return json.Framer.NewFrameReader(ioutil.NopCloser(buf))
}
| apache-2.0 |
AyolaJayamaha/phoenix | phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java | 79556 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.util;
import static com.google.common.base.Preconditions.checkNotNull;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ARRAY_SIZE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.LINK_TYPE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ORDINAL_POSITION;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SORT_ORDER;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM;
import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.MetaDataEndpointImpl;
import org.apache.phoenix.coprocessor.MetaDataProtocol;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.MetaDataClient;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PNameFactory;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTable.LinkType;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.SaltingUtil;
import org.apache.phoenix.schema.SortOrder;
import org.apache.phoenix.schema.TableNotFoundException;
import org.apache.phoenix.schema.types.PBinary;
import org.apache.phoenix.schema.types.PBoolean;
import org.apache.phoenix.schema.types.PChar;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.schema.types.PDecimal;
import org.apache.phoenix.schema.types.PDouble;
import org.apache.phoenix.schema.types.PFloat;
import org.apache.phoenix.schema.types.PInteger;
import org.apache.phoenix.schema.types.PLong;
import org.apache.phoenix.schema.types.PVarbinary;
import org.apache.phoenix.schema.types.PVarchar;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
public class UpgradeUtil {
private static final Logger logger = LoggerFactory.getLogger(UpgradeUtil.class);
private static final byte[] SEQ_PREFIX_BYTES = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, Bytes.toBytes("_SEQ_"));
public static final byte[] UPGRADE_TO_4_7_COLUMN_NAME = Bytes.toBytes("UPGRADE_TO_4_7");
public static String UPSERT_BASE_COLUMN_COUNT_IN_HEADER_ROW = "UPSERT "
+ "INTO SYSTEM.CATALOG "
+ "(TENANT_ID, TABLE_SCHEM, TABLE_NAME, COLUMN_NAME, COLUMN_FAMILY, BASE_COLUMN_COUNT) "
+ "VALUES (?, ?, ?, ?, ?, ?) ";
public static String SELECT_BASE_COLUMN_COUNT_FROM_HEADER_ROW = "SELECT "
+ "BASE_COLUMN_COUNT "
+ "FROM SYSTEM.CATALOG "
+ "WHERE "
+ "COLUMN_NAME IS NULL "
+ "AND "
+ "COLUMN_FAMILY IS NULL "
+ "AND "
+ "TENANT_ID %s "
+ "AND "
+ "TABLE_SCHEM %s "
+ "AND "
+ "TABLE_NAME = ? "
;
private static final String UPDATE_LINK =
"UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " +
TENANT_ID + "," +
TABLE_SCHEM + "," +
TABLE_NAME + "," +
COLUMN_FAMILY + "," +
LINK_TYPE + "," +
TABLE_SEQ_NUM +
") SELECT " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + ",'%s' AS "
+ COLUMN_FAMILY + " ," + LINK_TYPE + "," + TABLE_SEQ_NUM + " FROM " + SYSTEM_CATALOG_SCHEMA + ".\""
+ SYSTEM_CATALOG_TABLE + "\" WHERE " + COLUMN_FAMILY + "=? AND " + LINK_TYPE + " = "
+ LinkType.PHYSICAL_TABLE.getSerializedValue();
private static final String DELETE_LINK = "DELETE FROM " + SYSTEM_CATALOG_SCHEMA + "." + SYSTEM_CATALOG_TABLE
+ " WHERE " + COLUMN_FAMILY + "=? AND " + LINK_TYPE + " = " + LinkType.PHYSICAL_TABLE.getSerializedValue();
private UpgradeUtil() {
}
private static byte[] getSequenceSnapshotName() {
return Bytes.toBytes("_BAK_" + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
}
private static void createSequenceSnapshot(HBaseAdmin admin, PhoenixConnection conn) throws SQLException {
byte[] tableName = getSequenceSnapshotName();
HColumnDescriptor columnDesc = new HColumnDescriptor(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES);
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
desc.addFamily(columnDesc);
try {
admin.createTable(desc);
copyTable(conn, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, tableName);
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
}
}
private static void restoreSequenceSnapshot(HBaseAdmin admin, PhoenixConnection conn) throws SQLException {
byte[] tableName = getSequenceSnapshotName();
copyTable(conn, tableName, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
}
private static void deleteSequenceSnapshot(HBaseAdmin admin) throws SQLException {
byte[] tableName = getSequenceSnapshotName();
try {
admin.disableTable(tableName);;
admin.deleteTable(tableName);
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
}
}
@SuppressWarnings("deprecation")
private static void copyTable(PhoenixConnection conn, byte[] sourceName, byte[] targetName) throws SQLException {
int batchSizeBytes = 100 * 1024; // 100K chunks
int sizeBytes = 0;
List<Mutation> mutations = Lists.newArrayListWithExpectedSize(10000);
Scan scan = new Scan();
scan.setRaw(true);
scan.setMaxVersions(MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS);
ResultScanner scanner = null;
HTableInterface source = null;
HTableInterface target = null;
try {
source = conn.getQueryServices().getTable(sourceName);
target = conn.getQueryServices().getTable(targetName);
scanner = source.getScanner(scan);
Result result;
while ((result = scanner.next()) != null) {
for (KeyValue keyValue : result.raw()) {
sizeBytes += keyValue.getLength();
if (KeyValue.Type.codeToType(keyValue.getType()) == KeyValue.Type.Put) {
// Put new value
Put put = new Put(keyValue.getRow());
put.add(keyValue);
mutations.add(put);
} else if (KeyValue.Type.codeToType(keyValue.getType()) == KeyValue.Type.Delete){
// Copy delete marker using new key so that it continues
// to delete the key value preceding it that will be updated
// as well.
Delete delete = new Delete(keyValue.getRow());
delete.addDeleteMarker(keyValue);
mutations.add(delete);
}
}
if (sizeBytes >= batchSizeBytes) {
logger.info("Committing bactch of temp rows");
target.batch(mutations);
mutations.clear();
sizeBytes = 0;
}
}
if (!mutations.isEmpty()) {
logger.info("Committing last bactch of temp rows");
target.batch(mutations);
}
logger.info("Successfully completed copy");
} catch (SQLException e) {
throw e;
} catch (Exception e) {
throw ServerUtil.parseServerException(e);
} finally {
try {
if (scanner != null) scanner.close();
} finally {
try {
if (source != null) source.close();
} catch (IOException e) {
logger.warn("Exception during close of source table",e);
} finally {
try {
if (target != null) target.close();
} catch (IOException e) {
logger.warn("Exception during close of target table",e);
}
}
}
}
}
private static void preSplitSequenceTable(PhoenixConnection conn, int nSaltBuckets) throws SQLException {
HBaseAdmin admin = conn.getQueryServices().getAdmin();
boolean snapshotCreated = false;
boolean success = false;
try {
if (nSaltBuckets <= 0) {
return;
}
logger.warn("Pre-splitting SYSTEM.SEQUENCE table " + nSaltBuckets + "-ways. This may take some time - please do not close window.");
HTableDescriptor desc = admin.getTableDescriptor(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
createSequenceSnapshot(admin, conn);
snapshotCreated = true;
admin.disableTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
admin.deleteTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME);
byte[][] splitPoints = SaltingUtil.getSalteByteSplitPoints(nSaltBuckets);
admin.createTable(desc, splitPoints);
restoreSequenceSnapshot(admin, conn);
success = true;
logger.warn("Completed pre-splitting SYSTEM.SEQUENCE table");
} catch (IOException e) {
throw new SQLException("Unable to pre-split SYSTEM.SEQUENCE table", e);
} finally {
try {
if (snapshotCreated && success) {
try {
deleteSequenceSnapshot(admin);
} catch (SQLException e) {
logger.warn("Exception while deleting SYSTEM.SEQUENCE snapshot during pre-split", e);
}
}
} finally {
try {
admin.close();
} catch (IOException e) {
logger.warn("Exception while closing admin during pre-split", e);
}
}
}
}
@SuppressWarnings("deprecation")
public static boolean upgradeSequenceTable(PhoenixConnection conn, int nSaltBuckets, PTable oldTable) throws SQLException {
logger.info("Upgrading SYSTEM.SEQUENCE table");
byte[] seqTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE);
HTableInterface sysTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
try {
logger.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
KeyValue saltKV = KeyValueUtil.newKeyValue(seqTableKey,
PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES,
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
PInteger.INSTANCE.toBytes(nSaltBuckets));
Put saltPut = new Put(seqTableKey);
saltPut.add(saltKV);
// Prevent multiple clients from doing this upgrade
if (!sysTable.checkAndPut(seqTableKey,
PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, null, saltPut)) {
if (oldTable == null) { // Unexpected, but to be safe just run pre-split code
preSplitSequenceTable(conn, nSaltBuckets);
return true;
}
// If upgrading from 4.2.0, then we need this special case of pre-splitting the table.
// This is needed as a fix for https://issues.apache.org/jira/browse/PHOENIX-1401
if (oldTable.getTimeStamp() == MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0) {
byte[] oldSeqNum = PLong.INSTANCE.toBytes(oldTable.getSequenceNumber());
KeyValue seqNumKV = KeyValueUtil.newKeyValue(seqTableKey,
PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES,
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
PLong.INSTANCE.toBytes(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
Put seqNumPut = new Put(seqTableKey);
seqNumPut.add(seqNumKV);
// Increment TABLE_SEQ_NUM in checkAndPut as semaphore so that only single client
// pre-splits the sequence table.
if (sysTable.checkAndPut(seqTableKey,
PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, oldSeqNum, seqNumPut)) {
preSplitSequenceTable(conn, nSaltBuckets);
return true;
}
}
logger.info("SYSTEM.SEQUENCE table has already been upgraded");
return false;
}
// if the SYSTEM.SEQUENCE table is at 4.1.0 or before then we need to salt the table
// and pre-split it.
if (oldTable.getTimeStamp() <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) {
int batchSizeBytes = 100 * 1024; // 100K chunks
int sizeBytes = 0;
List<Mutation> mutations = Lists.newArrayListWithExpectedSize(10000);
boolean success = false;
Scan scan = new Scan();
scan.setRaw(true);
scan.setMaxVersions(MetaDataProtocol.DEFAULT_MAX_META_DATA_VERSIONS);
HTableInterface seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
try {
boolean committed = false;
logger.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
ResultScanner scanner = seqTable.getScanner(scan);
try {
Result result;
while ((result = scanner.next()) != null) {
for (KeyValue keyValue : result.raw()) {
KeyValue newKeyValue = addSaltByte(keyValue, nSaltBuckets);
if (newKeyValue != null) {
sizeBytes += newKeyValue.getLength();
if (KeyValue.Type.codeToType(newKeyValue.getType()) == KeyValue.Type.Put) {
// Delete old value
byte[] buf = keyValue.getBuffer();
Delete delete = new Delete(keyValue.getRow());
KeyValue deleteKeyValue = new KeyValue(buf, keyValue.getRowOffset(), keyValue.getRowLength(),
buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
keyValue.getTimestamp(), KeyValue.Type.Delete,
ByteUtil.EMPTY_BYTE_ARRAY,0,0);
delete.addDeleteMarker(deleteKeyValue);
mutations.add(delete);
sizeBytes += deleteKeyValue.getLength();
// Put new value
Put put = new Put(newKeyValue.getRow());
put.add(newKeyValue);
mutations.add(put);
} else if (KeyValue.Type.codeToType(newKeyValue.getType()) == KeyValue.Type.Delete){
// Copy delete marker using new key so that it continues
// to delete the key value preceding it that will be updated
// as well.
Delete delete = new Delete(newKeyValue.getRow());
delete.addDeleteMarker(newKeyValue);
mutations.add(delete);
}
}
if (sizeBytes >= batchSizeBytes) {
logger.info("Committing bactch of SYSTEM.SEQUENCE rows");
seqTable.batch(mutations);
mutations.clear();
sizeBytes = 0;
committed = true;
}
}
}
if (!mutations.isEmpty()) {
logger.info("Committing last bactch of SYSTEM.SEQUENCE rows");
seqTable.batch(mutations);
}
preSplitSequenceTable(conn, nSaltBuckets);
logger.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
success = true;
return true;
} catch (InterruptedException e) {
throw ServerUtil.parseServerException(e);
} finally {
try {
scanner.close();
} finally {
if (!success) {
if (!committed) { // Try to recover by setting salting back to off, as we haven't successfully committed anything
// Don't use Delete here as we'd never be able to change it again at this timestamp.
KeyValue unsaltKV = KeyValueUtil.newKeyValue(seqTableKey,
PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES,
MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP,
PInteger.INSTANCE.toBytes(0));
Put unsaltPut = new Put(seqTableKey);
unsaltPut.add(unsaltKV);
try {
sysTable.put(unsaltPut);
success = true;
} finally {
if (!success) logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
}
} else { // We're screwed b/c we've already committed some salted sequences...
logger.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
}
}
}
}
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
} finally {
try {
seqTable.close();
} catch (IOException e) {
logger.warn("Exception during close",e);
}
}
}
return false;
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
} finally {
try {
sysTable.close();
} catch (IOException e) {
logger.warn("Exception during close",e);
}
}
}
@SuppressWarnings("deprecation")
private static KeyValue addSaltByte(KeyValue keyValue, int nSaltBuckets) {
byte[] buf = keyValue.getBuffer();
int length = keyValue.getRowLength();
int offset = keyValue.getRowOffset();
boolean isViewSeq = length > SEQ_PREFIX_BYTES.length && Bytes.compareTo(SEQ_PREFIX_BYTES, 0, SEQ_PREFIX_BYTES.length, buf, offset, SEQ_PREFIX_BYTES.length) == 0;
if (!isViewSeq && nSaltBuckets == 0) {
return null;
}
byte[] newBuf;
if (isViewSeq) { // We messed up the name for the sequences for view indexes so we'll take this opportunity to fix it
if (buf[length-1] == 0) { // Global indexes on views have trailing null byte
length--;
}
byte[][] rowKeyMetaData = new byte[3][];
SchemaUtil.getVarChars(buf, offset, length, 0, rowKeyMetaData);
byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
byte[] unprefixedSchemaName = new byte[schemaName.length - MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length];
System.arraycopy(schemaName, MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length, unprefixedSchemaName, 0, unprefixedSchemaName.length);
byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
PName physicalName = PNameFactory.newName(unprefixedSchemaName);
// Reformulate key based on correct data
newBuf = MetaDataUtil.getViewIndexSequenceKey(tableName == null ? null : Bytes.toString(tableName), physicalName, nSaltBuckets).getKey();
} else {
newBuf = new byte[length + 1];
System.arraycopy(buf, offset, newBuf, SaltingUtil.NUM_SALTING_BYTES, length);
newBuf[0] = SaltingUtil.getSaltingByte(newBuf, SaltingUtil.NUM_SALTING_BYTES, length, nSaltBuckets);
}
return new KeyValue(newBuf, 0, newBuf.length,
buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(),
buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getType()),
buf, keyValue.getValueOffset(), keyValue.getValueLength());
}
/**
* Upgrade the metadata in the catalog table to enable adding columns to tables with views
* @param oldMetaConnection caller should take care of closing the passed connection appropriately
* @throws SQLException
*/
public static void upgradeTo4_5_0(PhoenixConnection oldMetaConnection) throws SQLException {
PhoenixConnection metaConnection = null;
try {
// Need to use own connection with max time stamp to be able to read all data from SYSTEM.CATALOG
metaConnection = new PhoenixConnection(oldMetaConnection, HConstants.LATEST_TIMESTAMP);
logger.info("Upgrading metadata to support adding columns to tables with views");
String getBaseTableAndViews = "SELECT "
+ COLUMN_FAMILY + " AS BASE_PHYSICAL_TABLE, "
+ TENANT_ID + ", "
+ TABLE_SCHEM + " AS VIEW_SCHEMA, "
+ TABLE_NAME + " AS VIEW_NAME "
+ "FROM " + SYSTEM_CATALOG_NAME
+ " WHERE " + COLUMN_FAMILY + " IS NOT NULL " // column_family column points to the physical table name.
+ " AND " + COLUMN_NAME + " IS NULL "
+ " AND " + LINK_TYPE + " = ? ";
// Build a map of base table name -> list of views on the table.
Map<String, List<ViewKey>> parentTableViewsMap = new HashMap<>();
try (PreparedStatement stmt = metaConnection.prepareStatement(getBaseTableAndViews)) {
// Get back view rows that have links back to the base physical table. This takes care
// of cases when we have a hierarchy of views too.
stmt.setByte(1, LinkType.PHYSICAL_TABLE.getSerializedValue());
try (ResultSet rs = stmt.executeQuery()) {
while (rs.next()) {
// this is actually SCHEMANAME.TABLENAME
String parentTable = rs.getString("BASE_PHYSICAL_TABLE");
String tenantId = rs.getString(TENANT_ID);
String viewSchema = rs.getString("VIEW_SCHEMA");
String viewName = rs.getString("VIEW_NAME");
List<ViewKey> viewKeysList = parentTableViewsMap.get(parentTable);
if (viewKeysList == null) {
viewKeysList = new ArrayList<>();
parentTableViewsMap.put(parentTable, viewKeysList);
}
viewKeysList.add(new ViewKey(tenantId, viewSchema, viewName));
}
}
}
boolean clearCache = false;
for (Entry<String, List<ViewKey>> entry : parentTableViewsMap.entrySet()) {
// Fetch column information for the base physical table
String physicalTable = entry.getKey();
String baseTableSchemaName = SchemaUtil.getSchemaNameFromFullName(physicalTable).equals(StringUtil.EMPTY_STRING) ? null : SchemaUtil.getSchemaNameFromFullName(physicalTable);
String baseTableName = SchemaUtil.getTableNameFromFullName(physicalTable);
List<ColumnDetails> basePhysicalTableColumns = new ArrayList<>();
// Columns fetched in order of ordinal position
String fetchColumnInfoForBasePhysicalTable = "SELECT " +
COLUMN_NAME + "," +
COLUMN_FAMILY + "," +
DATA_TYPE + "," +
COLUMN_SIZE + "," +
DECIMAL_DIGITS + "," +
ORDINAL_POSITION + "," +
SORT_ORDER + "," +
ARRAY_SIZE + " " +
"FROM SYSTEM.CATALOG " +
"WHERE " +
"TABLE_SCHEM %s " +
"AND TABLE_NAME = ? " +
"AND COLUMN_NAME IS NOT NULL " +
"ORDER BY " +
ORDINAL_POSITION;
PreparedStatement stmt = null;
if (baseTableSchemaName == null) {
fetchColumnInfoForBasePhysicalTable =
String.format(fetchColumnInfoForBasePhysicalTable, "IS NULL ");
stmt = metaConnection.prepareStatement(fetchColumnInfoForBasePhysicalTable);
stmt.setString(1, baseTableName);
} else {
fetchColumnInfoForBasePhysicalTable =
String.format(fetchColumnInfoForBasePhysicalTable, " = ? ");
stmt = metaConnection.prepareStatement(fetchColumnInfoForBasePhysicalTable);
stmt.setString(1, baseTableSchemaName);
stmt.setString(2, baseTableName);
}
try (ResultSet rs = stmt.executeQuery()) {
while (rs.next()) {
basePhysicalTableColumns.add(new ColumnDetails(rs.getString(COLUMN_FAMILY), rs
.getString(COLUMN_NAME), rs.getInt(ORDINAL_POSITION), rs
.getInt(DATA_TYPE), rs.getInt(COLUMN_SIZE), rs.getInt(DECIMAL_DIGITS),
rs.getInt(SORT_ORDER), rs.getInt(ARRAY_SIZE)));
}
}
// Fetch column information for all the views on the base physical table ordered by ordinal position.
List<ViewKey> viewKeys = entry.getValue();
StringBuilder sb = new StringBuilder();
sb.append("SELECT " +
TENANT_ID + "," +
TABLE_SCHEM + "," +
TABLE_NAME + "," +
COLUMN_NAME + "," +
COLUMN_FAMILY + "," +
DATA_TYPE + "," +
COLUMN_SIZE + "," +
DECIMAL_DIGITS + "," +
ORDINAL_POSITION + "," +
SORT_ORDER + "," +
ARRAY_SIZE + " " +
"FROM SYSTEM.CATALOG " +
"WHERE " +
COLUMN_NAME + " IS NOT NULL " +
"AND " +
ORDINAL_POSITION + " <= ? " + // fetch only those columns that would impact setting of base column count
"AND " +
"(" + TENANT_ID+ ", " + TABLE_SCHEM + ", " + TABLE_NAME + ") IN (");
int numViews = viewKeys.size();
for (int i = 0; i < numViews; i++) {
sb.append(" (?, ?, ?) ");
if (i < numViews - 1) {
sb.append(", ");
}
}
sb.append(" ) ");
sb.append(" GROUP BY " +
TENANT_ID + "," +
TABLE_SCHEM + "," +
TABLE_NAME + "," +
COLUMN_NAME + "," +
COLUMN_FAMILY + "," +
DATA_TYPE + "," +
COLUMN_SIZE + "," +
DECIMAL_DIGITS + "," +
ORDINAL_POSITION + "," +
SORT_ORDER + "," +
ARRAY_SIZE + " " +
"ORDER BY " +
TENANT_ID + "," + TABLE_SCHEM + ", " + TABLE_NAME + ", " + ORDINAL_POSITION);
String fetchViewColumnsSql = sb.toString();
stmt = metaConnection.prepareStatement(fetchViewColumnsSql);
int numColsInBaseTable = basePhysicalTableColumns.size();
stmt.setInt(1, numColsInBaseTable);
int paramIndex = 1;
stmt.setInt(paramIndex++, numColsInBaseTable);
for (ViewKey view : viewKeys) {
stmt.setString(paramIndex++, view.tenantId);
stmt.setString(paramIndex++, view.schema);
stmt.setString(paramIndex++, view.name);
}
String currentTenantId = null;
String currentViewSchema = null;
String currentViewName = null;
try (ResultSet rs = stmt.executeQuery()) {
int numBaseTableColsMatched = 0;
boolean ignore = false;
boolean baseColumnCountUpserted = false;
while (rs.next()) {
String viewTenantId = rs.getString(TENANT_ID);
String viewSchema = rs.getString(TABLE_SCHEM);
String viewName = rs.getString(TABLE_NAME);
if (!(Objects.equal(viewTenantId, currentTenantId) && Objects.equal(viewSchema, currentViewSchema) && Objects.equal(viewName, currentViewName))) {
// We are about to iterate through columns of a different view. Check whether base column count was upserted.
// If it wasn't then it is likely the case that a column inherited from the base table was dropped from view.
if (currentViewName != null && !baseColumnCountUpserted && numBaseTableColsMatched < numColsInBaseTable) {
upsertBaseColumnCountInHeaderRow(metaConnection, currentTenantId, currentViewSchema, currentViewName, DIVERGED_VIEW_BASE_COLUMN_COUNT);
clearCache = true;
}
// reset the values as we are now going to iterate over columns of a new view.
numBaseTableColsMatched = 0;
currentTenantId = viewTenantId;
currentViewSchema = viewSchema;
currentViewName = viewName;
ignore = false;
baseColumnCountUpserted = false;
}
if (!ignore) {
/*
* Iterate over all the columns of the base physical table and the columns of the view. Compare the
* two till one of the following happens:
*
* 1) We run into a view column which is different from column in the base physical table.
* This means that the view has diverged from the base physical table. In such a case
* we will set a special value for the base column count. That special value will also be used
* on the server side to filter out the diverged view so that meta-data changes on the base
* physical table are not propagated to it.
*
* 2) Every physical table column is present in the view. In that case we set the base column count
* as the number of columns in the base physical table. At that point we ignore rest of the columns
* of the view.
*
*/
ColumnDetails baseTableColumn = basePhysicalTableColumns.get(numBaseTableColsMatched);
String columName = rs.getString(COLUMN_NAME);
String columnFamily = rs.getString(COLUMN_FAMILY);
int ordinalPos = rs.getInt(ORDINAL_POSITION);
int dataType = rs.getInt(DATA_TYPE);
int columnSize = rs.getInt(COLUMN_SIZE);
int decimalDigits = rs.getInt(DECIMAL_DIGITS);
int sortOrder = rs.getInt(SORT_ORDER);
int arraySize = rs.getInt(ARRAY_SIZE);
ColumnDetails viewColumn = new ColumnDetails(columnFamily, columName, ordinalPos, dataType, columnSize, decimalDigits, sortOrder, arraySize);
if (baseTableColumn.equals(viewColumn)) {
numBaseTableColsMatched++;
if (numBaseTableColsMatched == numColsInBaseTable) {
upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, numColsInBaseTable);
// No need to ignore the rest of the columns of the view here since the
// query retrieved only those columns that had ordinal position <= numColsInBaseTable
baseColumnCountUpserted = true;
clearCache = true;
}
} else {
// special value to denote that the view has diverged from the base physical table.
upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, DIVERGED_VIEW_BASE_COLUMN_COUNT);
baseColumnCountUpserted = true;
clearCache = true;
// ignore rest of the rows for the view.
ignore = true;
}
}
}
}
// set base column count for the header row of the base table too. We use this information
// to figure out whether the upgrade is in progress or hasn't started.
upsertBaseColumnCountInHeaderRow(metaConnection, null, baseTableSchemaName, baseTableName, BASE_TABLE_BASE_COLUMN_COUNT);
metaConnection.commit();
}
// clear metadata cache on region servers to force loading of the latest metadata
if (clearCache) {
metaConnection.getQueryServices().clearCache();
}
} finally {
if (metaConnection != null) {
metaConnection.close();
}
}
}
private static void upsertBaseColumnCountInHeaderRow(PhoenixConnection metaConnection,
String tenantId, String schemaName, String viewOrTableName, int baseColumnCount)
throws SQLException {
try (PreparedStatement stmt =
metaConnection.prepareStatement(UPSERT_BASE_COLUMN_COUNT_IN_HEADER_ROW)) {
stmt.setString(1, tenantId);
stmt.setString(2, schemaName);
stmt.setString(3, viewOrTableName);
stmt.setString(4, null);
stmt.setString(5, null);
stmt.setInt(6, baseColumnCount);
stmt.executeUpdate();
}
}
private static class ColumnDetails {
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + columnName.hashCode();
result = prime * result + ((columnFamily == null) ? 0 : columnFamily.hashCode());
result = prime * result + arraySize;
result = prime * result + dataType;
result = prime * result + maxLength;
result = prime * result + ordinalValue;
result = prime * result + scale;
result = prime * result + sortOrder;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
ColumnDetails other = (ColumnDetails) obj;
if (!columnName.equals(other.columnName)) return false;
if (columnFamily == null) {
if (other.columnFamily != null) return false;
} else if (!columnFamily.equals(other.columnFamily)) return false;
if (arraySize != other.arraySize) return false;
if (dataType != other.dataType) return false;
if (maxLength != other.maxLength) return false;
if (ordinalValue != other.ordinalValue) return false;
if (scale != other.scale) return false;
if (sortOrder != other.sortOrder) return false;
return true;
}
@Nullable
private final String columnFamily;
@Nonnull
private final String columnName;
private final int ordinalValue;
private final int dataType;
private final int maxLength;
private final int scale;
private final int sortOrder;
private final int arraySize;
ColumnDetails(String columnFamily, String columnName, int ordinalValue, int dataType,
int maxLength, int scale, int sortOrder, int arraySize) {
checkNotNull(columnName);
checkNotNull(ordinalValue);
checkNotNull(dataType);
this.columnFamily = columnFamily;
this.columnName = columnName;
this.ordinalValue = ordinalValue;
this.dataType = dataType;
this.maxLength = maxLength;
this.scale = scale;
this.sortOrder = sortOrder;
this.arraySize = arraySize;
}
}
private static class ViewKey {
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((tenantId == null) ? 0 : tenantId.hashCode());
result = prime * result + name.hashCode();
result = prime * result + ((schema == null) ? 0 : schema.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
ViewKey other = (ViewKey) obj;
if (tenantId == null) {
if (other.tenantId != null) return false;
} else if (!tenantId.equals(other.tenantId)) return false;
if (!name.equals(other.name)) return false;
if (schema == null) {
if (other.schema != null) return false;
} else if (!schema.equals(other.schema)) return false;
return true;
}
@Nullable
private final String tenantId;
@Nullable
private final String schema;
@Nonnull
private final String name;
private ViewKey(String tenantId, String schema, String viewName) {
this.tenantId = tenantId;
this.schema = schema;
this.name = viewName;
}
}
private static String getTableRVC(List<String> tableNames) {
StringBuilder query = new StringBuilder("(");
for (int i = 0; i < tableNames.size(); i+=3) {
String tenantId = tableNames.get(i);
String schemaName = tableNames.get(i+1);
String tableName = tableNames.get(i+2);
query.append('(');
query.append(tenantId == null ? "null" : ("'" + tenantId + "'"));
query.append(',');
query.append(schemaName == null ? "null" : ("'" + schemaName + "'"));
query.append(',');
query.append("'" + tableName + "'");
query.append("),");
}
// Replace trailing , with ) to end IN expression
query.setCharAt(query.length()-1, ')');
return query.toString();
}
private static List<String> addPhysicalTables(PhoenixConnection conn, ResultSet rs, PTableType otherType, Set<String> physicalTables) throws SQLException {
List<String> tableNames = Lists.newArrayListWithExpectedSize(1024);
while (rs.next()) {
tableNames.add(rs.getString(1));
tableNames.add(rs.getString(2));
tableNames.add(rs.getString(3));
}
if (tableNames.isEmpty()) {
return Collections.emptyList();
}
List<String> otherTables = Lists.newArrayListWithExpectedSize(tableNames.size());
// Find the header rows for tables that have not been upgraded already.
// We don't care about views, as the row key cannot be different than the table.
// We need this query to find physical tables which won't have a link row.
String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME,TABLE_TYPE\n" +
"FROM SYSTEM.CATALOG (ROW_KEY_ORDER_OPTIMIZABLE BOOLEAN)\n" +
"WHERE COLUMN_NAME IS NULL\n" +
"AND COLUMN_FAMILY IS NULL\n" +
"AND ROW_KEY_ORDER_OPTIMIZABLE IS NULL\n" +
"AND TABLE_TYPE IN ('" + PTableType.TABLE.getSerializedValue() + "','" + otherType.getSerializedValue() + "')\n" +
"AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN " + getTableRVC(tableNames);
rs = conn.createStatement().executeQuery(query);
while (rs.next()) {
if (PTableType.TABLE.getSerializedValue().equals(rs.getString(4))) {
physicalTables.add(SchemaUtil.getTableName(rs.getString(2), rs.getString(3)));
} else {
otherTables.add(rs.getString(1));
otherTables.add(rs.getString(2));
otherTables.add(rs.getString(3));
}
}
return otherTables;
}
// Return all types that are descending and either:
// 1) variable length, which includes all array types (PHOENIX-2067)
// 2) fixed length with padding (PHOENIX-2120)
// 3) float and double (PHOENIX-2171)
// We exclude VARBINARY as we no longer support DESC for it.
private static String getAffectedDataTypes() {
StringBuilder buf = new StringBuilder("("
+ PVarchar.INSTANCE.getSqlType() + "," +
+ PChar.INSTANCE.getSqlType() + "," +
+ PBinary.INSTANCE.getSqlType() + "," +
+ PFloat.INSTANCE.getSqlType() + "," +
+ PDouble.INSTANCE.getSqlType() + "," +
+ PDecimal.INSTANCE.getSqlType() + ","
);
for (PDataType type : PDataType.values()) {
if (type.isArrayType()) {
buf.append(type.getSqlType());
buf.append(',');
}
}
buf.setCharAt(buf.length()-1, ')');
return buf.toString();
}
/**
* Identify the tables that are DESC VARBINARY as this is no longer supported
*/
public static List<String> getPhysicalTablesWithDescVarbinaryRowKey(PhoenixConnection conn) throws SQLException {
String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" +
"FROM SYSTEM.CATALOG cat1\n" +
"WHERE COLUMN_NAME IS NOT NULL\n" +
"AND COLUMN_FAMILY IS NULL\n" +
"AND SORT_ORDER = " + SortOrder.DESC.getSystemValue() + "\n" +
"AND DATA_TYPE = " + PVarbinary.INSTANCE.getSqlType() + "\n" +
"GROUP BY TENANT_ID,TABLE_SCHEM,TABLE_NAME";
return getPhysicalTablesWithDescRowKey(query, conn);
}
/**
* Identify the tables that need to be upgraded due to PHOENIX-2067 and PHOENIX-2120
*/
public static List<String> getPhysicalTablesWithDescRowKey(PhoenixConnection conn) throws SQLException {
String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" +
"FROM SYSTEM.CATALOG cat1\n" +
"WHERE COLUMN_NAME IS NOT NULL\n" +
"AND COLUMN_FAMILY IS NULL\n" +
"AND ( ( SORT_ORDER = " + SortOrder.DESC.getSystemValue() + "\n" +
" AND DATA_TYPE IN " + getAffectedDataTypes() + ")\n" +
" OR ( SORT_ORDER = " + SortOrder.ASC.getSystemValue() + "\n" +
" AND DATA_TYPE = " + PBinary.INSTANCE.getSqlType() + "\n" +
" AND COLUMN_SIZE > 1 ) )\n" +
"GROUP BY TENANT_ID,TABLE_SCHEM,TABLE_NAME";
return getPhysicalTablesWithDescRowKey(query, conn);
}
/**
* Identify the tables that need to be upgraded due to PHOENIX-2067
*/
private static List<String> getPhysicalTablesWithDescRowKey(String query, PhoenixConnection conn) throws SQLException {
// First query finds column rows of tables that need to be upgraded.
// We cannot tell if the column is from a table, view, or index however.
ResultSet rs = conn.createStatement().executeQuery(query);
Set<String> physicalTables = Sets.newHashSetWithExpectedSize(1024);
List<String> remainingTableNames = addPhysicalTables(conn, rs, PTableType.INDEX, physicalTables);
if (!remainingTableNames.isEmpty()) {
// Find tables/views for index
String indexLinkQuery = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" +
"FROM SYSTEM.CATALOG\n" +
"WHERE COLUMN_NAME IS NULL\n" +
"AND (TENANT_ID, TABLE_SCHEM, COLUMN_FAMILY) IN " + getTableRVC(remainingTableNames) + "\n" +
"AND LINK_TYPE = " + LinkType.INDEX_TABLE.getSerializedValue();
rs = conn.createStatement().executeQuery(indexLinkQuery);
remainingTableNames = addPhysicalTables(conn, rs, PTableType.VIEW, physicalTables);
if (!remainingTableNames.isEmpty()) {
// Find physical table name from views, splitting on '.' to get schema name and table name
String physicalLinkQuery = "SELECT null, " +
" CASE WHEN INSTR(COLUMN_FAMILY,'.') = 0 THEN NULL ELSE SUBSTR(COLUMN_FAMILY,1,INSTR(COLUMN_FAMILY,'.')) END,\n" +
" CASE WHEN INSTR(COLUMN_FAMILY,'.') = 0 THEN COLUMN_FAMILY ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1) END\n" +
"FROM SYSTEM.CATALOG\n" +
"WHERE COLUMN_NAME IS NULL\n" +
"AND COLUMN_FAMILY IS NOT NULL\n" +
"AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN " + getTableRVC(remainingTableNames) + "\n" +
"AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue();
rs = conn.createStatement().executeQuery(physicalLinkQuery);
// Add any tables (which will all be physical tables) which have not already been upgraded.
addPhysicalTables(conn, rs, PTableType.TABLE, physicalTables);
}
}
List<String> sortedPhysicalTables = new ArrayList<String>(physicalTables);
Collections.sort(sortedPhysicalTables);
return sortedPhysicalTables;
}
private static void upgradeDescVarLengthRowKeys(PhoenixConnection upgradeConn, PhoenixConnection globalConn, String schemaName, String tableName, boolean isTable, boolean bypassUpgrade) throws SQLException {
String physicalName = SchemaUtil.getTableName(schemaName, tableName);
long currentTime = System.currentTimeMillis();
String snapshotName = physicalName + "_" + currentTime;
HBaseAdmin admin = null;
if (isTable && !bypassUpgrade) {
admin = globalConn.getQueryServices().getAdmin();
}
boolean restoreSnapshot = false;
boolean success = false;
try {
if (isTable && !bypassUpgrade) {
String msg = "Taking snapshot of physical table " + physicalName + " prior to upgrade...";
System.out.println(msg);
logger.info(msg);
admin.disableTable(physicalName);
admin.snapshot(snapshotName, physicalName);
admin.enableTable(physicalName);
restoreSnapshot = true;
}
String escapedTableName = SchemaUtil.getEscapedTableName(schemaName, tableName);
String tenantInfo = "";
PName tenantId = PName.EMPTY_NAME;
if (upgradeConn.getTenantId() != null) {
tenantId = upgradeConn.getTenantId();
tenantInfo = " for tenant " + tenantId.getString();
}
String msg = "Starting upgrade of " + escapedTableName + tenantInfo + "...";
System.out.println(msg);
logger.info(msg);
ResultSet rs;
if (!bypassUpgrade) {
rs = upgradeConn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + escapedTableName);
rs.next(); // Run query
}
List<String> tableNames = Lists.newArrayListWithExpectedSize(1024);
tableNames.add(tenantId == PName.EMPTY_NAME ? null : tenantId.getString());
tableNames.add(schemaName);
tableNames.add(tableName);
// Find views to mark as upgraded
if (isTable) {
String query =
"SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" +
"FROM SYSTEM.CATALOG\n" +
"WHERE COLUMN_NAME IS NULL\n" +
"AND COLUMN_FAMILY = '" + physicalName + "'" +
"AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue();
rs = globalConn.createStatement().executeQuery(query);
while (rs.next()) {
tableNames.add(rs.getString(1));
tableNames.add(rs.getString(2));
tableNames.add(rs.getString(3));
}
}
// Mark the table and views as upgraded now
for (int i = 0; i < tableNames.size(); i += 3) {
String theTenantId = tableNames.get(i);
String theSchemaName = tableNames.get(i+1);
String theTableName = tableNames.get(i+2);
globalConn.createStatement().execute("UPSERT INTO " + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME +
" (" + PhoenixDatabaseMetaData.TENANT_ID + "," +
PhoenixDatabaseMetaData.TABLE_SCHEM + "," +
PhoenixDatabaseMetaData.TABLE_NAME + "," +
MetaDataEndpointImpl.ROW_KEY_ORDER_OPTIMIZABLE + " BOOLEAN"
+ ") VALUES (" +
"'" + (theTenantId == null ? StringUtil.EMPTY_STRING : theTenantId) + "'," +
"'" + (theSchemaName == null ? StringUtil.EMPTY_STRING : theSchemaName) + "'," +
"'" + theTableName + "'," +
"TRUE)");
}
globalConn.commit();
for (int i = 0; i < tableNames.size(); i += 3) {
String theTenantId = tableNames.get(i);
String theSchemaName = tableNames.get(i+1);
String theTableName = tableNames.get(i+2);
globalConn.getQueryServices().clearTableFromCache(
theTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(theTenantId),
theSchemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName),
Bytes.toBytes(theTableName), HConstants.LATEST_TIMESTAMP);
}
success = true;
msg = "Completed upgrade of " + escapedTableName + tenantInfo;
System.out.println(msg);
logger.info(msg);
} catch (Exception e) {
logger.error("Exception during upgrade of " + physicalName + ":", e);
} finally {
boolean restored = false;
try {
if (!success && restoreSnapshot) {
admin.disableTable(physicalName);
admin.restoreSnapshot(snapshotName, false);
admin.enableTable(physicalName);
String msg = "Restored snapshot of " + physicalName + " due to failure of upgrade";
System.out.println(msg);
logger.info(msg);
}
restored = true;
} catch (Exception e) {
logger.warn("Unable to restoring snapshot " + snapshotName + " after failed upgrade", e);
} finally {
try {
if (restoreSnapshot && restored) {
admin.deleteSnapshot(snapshotName);
}
} catch (Exception e) {
logger.warn("Unable to delete snapshot " + snapshotName + " after upgrade:", e);
} finally {
try {
if (admin != null) {
admin.close();
}
} catch (IOException e) {
logger.warn("Unable to close admin after upgrade:", e);
}
}
}
}
}
private static boolean isInvalidTableToUpgrade(PTable table) throws SQLException {
return (table.getType() != PTableType.TABLE || // Must be a table
table.getTenantId() != null || // Must be global
!table.getPhysicalName().equals(table.getName())); // Must be the physical table
}
/**
* Upgrade tables and their indexes due to a bug causing descending row keys to have a row key that
* prevents them from being sorted correctly (PHOENIX-2067).
*/
public static void upgradeDescVarLengthRowKeys(PhoenixConnection conn, List<String> tablesToUpgrade, boolean bypassUpgrade) throws SQLException {
if (tablesToUpgrade.isEmpty()) {
return;
}
List<PTable> tablesNeedingUpgrading = Lists.newArrayListWithExpectedSize(tablesToUpgrade.size());
List<String> invalidTables = Lists.newArrayListWithExpectedSize(tablesToUpgrade.size());
for (String fullTableName : tablesToUpgrade) {
PTable table = PhoenixRuntime.getTable(conn, fullTableName);
if (isInvalidTableToUpgrade(table)) {
invalidTables.add(fullTableName);
} else {
tablesNeedingUpgrading.add(table);
}
}
if (!invalidTables.isEmpty()) {
StringBuilder buf = new StringBuilder("Only physical tables should be upgraded as their views and indexes will be updated with them: ");
for (String fullTableName : invalidTables) {
buf.append(fullTableName);
buf.append(' ');
}
throw new SQLException(buf.toString());
}
PhoenixConnection upgradeConn = new PhoenixConnection(conn, true);
try {
upgradeConn.setAutoCommit(true);
for (PTable table : tablesNeedingUpgrading) {
boolean wasUpgraded = false;
if (!table.rowKeyOrderOptimizable()) {
wasUpgraded = true;
upgradeDescVarLengthRowKeys(upgradeConn, conn, table.getSchemaName().getString(), table.getTableName().getString(), true, bypassUpgrade);
}
// Upgrade global indexes
for (PTable index : table.getIndexes()) {
if (!index.rowKeyOrderOptimizable() && index.getIndexType() != IndexType.LOCAL) {
wasUpgraded = true;
upgradeDescVarLengthRowKeys(upgradeConn, conn, index.getSchemaName().getString(), index.getTableName().getString(), false, bypassUpgrade);
}
}
String sharedViewIndexName = Bytes.toString(MetaDataUtil.getViewIndexPhysicalName(table.getName().getBytes()));
// Upgrade view indexes
wasUpgraded |= upgradeSharedIndex(upgradeConn, conn, sharedViewIndexName, bypassUpgrade);
String sharedLocalIndexName = Bytes.toString(MetaDataUtil.getLocalIndexPhysicalName(table.getName().getBytes()));
// Upgrade local indexes
wasUpgraded |= upgradeSharedIndex(upgradeConn, conn, sharedLocalIndexName, bypassUpgrade);
if (!wasUpgraded) {
System.out.println("Upgrade not required for this table or its indexes: " + table.getName().getString());
}
}
} finally {
upgradeConn.close();
}
}
/**
* Upgrade shared indexes by querying for all that are associated with our
* physical table.
* @return true if any upgrades were performed and false otherwise.
*/
private static boolean upgradeSharedIndex(PhoenixConnection upgradeConn, PhoenixConnection globalConn, String physicalName, boolean bypassUpgrade) throws SQLException {
String query =
"SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" +
"FROM SYSTEM.CATALOG cat1\n" +
"WHERE COLUMN_NAME IS NULL\n" +
"AND COLUMN_FAMILY = '" + physicalName + "'\n" +
"AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue() + "\n" +
"ORDER BY TENANT_ID";
ResultSet rs = globalConn.createStatement().executeQuery(query);
String lastTenantId = null;
Connection conn = globalConn;
String url = globalConn.getURL();
boolean wasUpgraded = false;
while (rs.next()) {
String fullTableName = SchemaUtil.getTableName(
rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM),
rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
String tenantId = rs.getString(1);
if (tenantId != null && !tenantId.equals(lastTenantId)) {
if (lastTenantId != null) {
conn.close();
}
// Open tenant-specific connection when we find a new one
Properties props = new Properties(globalConn.getClientInfo());
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
conn = DriverManager.getConnection(url, props);
lastTenantId = tenantId;
}
PTable table = PhoenixRuntime.getTable(conn, fullTableName);
String tableTenantId = table.getTenantId() == null ? null : table.getTenantId().getString();
if (Objects.equal(lastTenantId, tableTenantId) && !table.rowKeyOrderOptimizable()) {
upgradeDescVarLengthRowKeys(upgradeConn, globalConn, table.getSchemaName().getString(), table.getTableName().getString(), false, bypassUpgrade);
wasUpgraded = true;
}
}
rs.close();
if (lastTenantId != null) {
conn.close();
}
return wasUpgraded;
}
public static void addRowKeyOrderOptimizableCell(List<Mutation> tableMetadata, byte[] tableHeaderRowKey, long clientTimeStamp) {
Put put = new Put(tableHeaderRowKey, clientTimeStamp);
put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
MetaDataEndpointImpl.ROW_KEY_ORDER_OPTIMIZABLE_BYTES, PBoolean.INSTANCE.toBytes(true));
tableMetadata.add(put);
}
public static boolean truncateStats(HTableInterface metaTable, HTableInterface statsTable)
throws IOException, InterruptedException {
byte[] statsTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,
PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE);
List<Cell> columnCells = metaTable.get(new Get(statsTableKey))
.getColumnCells(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES);
long timestamp;
if (!columnCells.isEmpty() && (timestamp = columnCells.get(0)
.getTimestamp()) < MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_7_0) {
KeyValue upgradeKV = KeyValueUtil.newKeyValue(statsTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
UPGRADE_TO_4_7_COLUMN_NAME, timestamp, PBoolean.INSTANCE.toBytes(true));
Put upgradePut = new Put(statsTableKey);
upgradePut.add(upgradeKV);
// check for null in UPGRADE_TO_4_7_COLUMN_NAME in checkAndPut so that only single client
// drop the rows of SYSTEM.STATS
if (metaTable.checkAndPut(statsTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
UPGRADE_TO_4_7_COLUMN_NAME, null, upgradePut)) {
List<Mutation> mutations = Lists.newArrayListWithExpectedSize(1000);
Scan scan = new Scan();
scan.setRaw(true);
scan.setMaxVersions();
ResultScanner statsScanner = statsTable.getScanner(scan);
Result r;
mutations.clear();
int count = 0;
while ((r = statsScanner.next()) != null) {
Delete delete = null;
for (KeyValue keyValue : r.raw()) {
if (KeyValue.Type.codeToType(keyValue.getType()) == KeyValue.Type.Put) {
if (delete == null) {
delete = new Delete(keyValue.getRow());
}
KeyValue deleteKeyValue = new KeyValue(keyValue.getRowArray(), keyValue.getRowOffset(),
keyValue.getRowLength(), keyValue.getFamilyArray(), keyValue.getFamilyOffset(),
keyValue.getFamilyLength(), keyValue.getQualifierArray(),
keyValue.getQualifierOffset(), keyValue.getQualifierLength(),
keyValue.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0);
delete.addDeleteMarker(deleteKeyValue);
}
}
if (delete != null) {
mutations.add(delete);
if (count > 10) {
statsTable.batch(mutations);
mutations.clear();
count = 0;
}
count++;
}
}
if (!mutations.isEmpty()) {
statsTable.batch(mutations);
}
return true;
}
}
return false;
}
private static void mapTableToNamespace(HBaseAdmin admin, HTableInterface metatable, String srcTableName,
String destTableName, ReadOnlyProps props, Long ts, String phoenixTableName, PTableType pTableType)
throws SnapshotCreationException, IllegalArgumentException, IOException, InterruptedException,
SQLException {
srcTableName = SchemaUtil.normalizeIdentifier(srcTableName);
if (!SchemaUtil.isNamespaceMappingEnabled(pTableType,
props)) { throw new IllegalArgumentException(SchemaUtil.isSystemTable(srcTableName.getBytes())
? "For system table " + QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE
+ " also needs to be enabled along with " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED
: QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled"); }
boolean srcTableExists=admin.tableExists(srcTableName);
// we need to move physical table in actual namespace for TABLE and Index
if (srcTableExists && (PTableType.TABLE.equals(pTableType)
|| PTableType.INDEX.equals(pTableType) || PTableType.SYSTEM.equals(pTableType))) {
boolean destTableExists=admin.tableExists(destTableName);
if (!destTableExists) {
String snapshotName = QueryConstants.UPGRADE_TABLE_SNAPSHOT_PREFIX + srcTableName;
logger.info("Disabling table " + srcTableName + " ..");
admin.disableTable(srcTableName);
logger.info(String.format("Taking snapshot %s of table %s..", snapshotName, srcTableName));
admin.snapshot(snapshotName, srcTableName);
logger.info(
String.format("Restoring snapshot %s in destination table %s..", snapshotName, destTableName));
admin.cloneSnapshot(Bytes.toBytes(snapshotName), Bytes.toBytes(destTableName));
logger.info(String.format("deleting old table %s..", srcTableName));
admin.deleteTable(srcTableName);
logger.info(String.format("deleting snapshot %s..", snapshotName));
admin.deleteSnapshot(snapshotName);
}
}
// Update flag to represent table is mapped to namespace
logger.info(String.format("Updating meta information of phoenix table '%s' to map to namespace..", phoenixTableName));
Put put = new Put(SchemaUtil.getTableKey(null, SchemaUtil.getSchemaNameFromFullName(phoenixTableName),
SchemaUtil.getTableNameFromFullName(phoenixTableName)), ts);
put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES,
PBoolean.INSTANCE.toBytes(Boolean.TRUE));
metatable.put(put);
}
/*
* Method to map existing phoenix table to a namespace. Should not be use if tables has views and indexes ,instead
* use map table utility in psql.py
*/
public static void mapTableToNamespace(HBaseAdmin admin, HTableInterface metatable, String tableName,
ReadOnlyProps props, Long ts, PTableType pTableType) throws SnapshotCreationException,
IllegalArgumentException, IOException, InterruptedException, SQLException {
String destTablename = SchemaUtil
.normalizeIdentifier(SchemaUtil.getPhysicalTableName(tableName, props).getNameAsString());
mapTableToNamespace(admin, metatable, tableName, destTablename, props, ts, tableName, pTableType);
}
public static void upgradeTable(PhoenixConnection conn, String srcTable) throws SQLException,
SnapshotCreationException, IllegalArgumentException, IOException, InterruptedException {
ReadOnlyProps readOnlyProps = conn.getQueryServices().getProps();
if (conn.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB) != null) { throw new SQLException(
"May not specify the TENANT_ID_ATTRIB property when upgrading"); }
if (conn.getSchema() != null) { throw new IllegalArgumentException(
"Schema should not be set for connection!!"); }
if (!SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE,
readOnlyProps)) { throw new IllegalArgumentException(
QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled!!"); }
try (HBaseAdmin admin = conn.getQueryServices().getAdmin();
HTableInterface metatable = conn.getQueryServices()
.getTable(SchemaUtil
.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, readOnlyProps)
.getName());) {
String tableName = SchemaUtil.normalizeIdentifier(srcTable);
String schemaName = SchemaUtil.getSchemaNameFromFullName(tableName);
// Confirm table is not already upgraded
PTable table = PhoenixRuntime.getTable(conn, tableName);
// Upgrade is not required if schemaName is not present.
if (schemaName.equals("") && !PTableType.VIEW
.equals(table.getType())) { throw new IllegalArgumentException("Table doesn't have schema name"); }
if (table.isNamespaceMapped()) { throw new IllegalArgumentException("Table is already upgraded"); }
if (!schemaName.equals("")) {
logger.info(String.format("Creating schema %s..", schemaName));
conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName);
}
String newPhysicalTablename = SchemaUtil.normalizeIdentifier(SchemaUtil
.getPhysicalTableName(table.getPhysicalName().getString(), readOnlyProps).getNameAsString());
logger.info(String.format("Upgrading %s %s..", table.getType(), tableName));
// Upgrade the data or main table
mapTableToNamespace(admin, metatable, tableName, newPhysicalTablename, readOnlyProps,
PhoenixRuntime.getCurrentScn(readOnlyProps), tableName, table.getType());
conn.close();
// clear the cache and get new table
conn.getQueryServices().clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY, table.getSchemaName().getBytes(),
table.getTableName().getBytes(), PhoenixRuntime.getCurrentScn(readOnlyProps));
MetaDataMutationResult result = new MetaDataClient(conn).updateCache(schemaName,
SchemaUtil.getTableNameFromFullName(tableName));
if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) { throw new TableNotFoundException(
tableName); }
table = result.getTable();
// check whether table is properly upgraded before upgrading indexes
if (table.isNamespaceMapped()) {
for (PTable index : table.getIndexes()) {
String srcTableName = index.getPhysicalName().getString();
if (srcTableName.contains(QueryConstants.NAMESPACE_SEPARATOR)
|| (!MetaDataUtil.isViewIndex(srcTableName) && PTableType.VIEW.equals(table.getType()))) {
// this condition occurs in case of multiple views on same table
// as all view indexes uses the same physical table, so if one view is already migrated then we
// can skip migrating the physical table again
logger.info(String.format("skipping as it seems index '%s' is already upgraded..", index.getName()));
continue;
}
String destTableName = null;
String phoenixTableName = index.getName().getString();
boolean updateLink = false;
if (MetaDataUtil.isLocalIndex(srcTableName)) {
logger.info(String.format("local index '%s' found with physical hbase table name ''..",
index.getName(), srcTableName));
destTableName = Bytes
.toString(MetaDataUtil.getLocalIndexPhysicalName(newPhysicalTablename.getBytes()));
// update parent_table property in local index table descriptor
conn.createStatement()
.execute(String.format("ALTER TABLE %s set " + MetaDataUtil.PARENT_TABLE_KEY + "='%s'",
phoenixTableName, table.getPhysicalName()));
updateLink = true;
} else if (MetaDataUtil.isViewIndex(srcTableName)) {
logger.info(String.format("View index '%s' found with physical hbase table name ''..",
index.getName(), srcTableName));
destTableName = Bytes
.toString(MetaDataUtil.getViewIndexPhysicalName(newPhysicalTablename.getBytes()));
updateLink = true;
} else {
logger.info(String.format("Global index '%s' found with physical hbase table name ''..",
index.getName(), srcTableName));
destTableName = SchemaUtil
.getPhysicalTableName(index.getPhysicalName().getString(), readOnlyProps)
.getNameAsString();
}
logger.info(String.format("Upgrading index %s..", index.getName()));
mapTableToNamespace(admin, metatable, srcTableName, destTableName, readOnlyProps,
PhoenixRuntime.getCurrentScn(readOnlyProps), phoenixTableName, index.getType());
if (updateLink) {
logger.info(String.format("Updating link information for index '%s' ..", index.getName()));
updateLink(conn, srcTableName, destTableName);
}
conn.getQueryServices().clearTableFromCache(ByteUtil.EMPTY_BYTE_ARRAY,
index.getSchemaName().getBytes(), index.getTableName().getBytes(),
PhoenixRuntime.getCurrentScn(readOnlyProps));
}
} else {
throw new RuntimeException("Error: problem occured during upgrade. Table is not upgraded successfully");
}
}
}
private static void updateLink(PhoenixConnection conn, String srcTableName, String destTableName)
throws SQLException {
PreparedStatement deleteLinkStatment = conn.prepareStatement(DELETE_LINK);
deleteLinkStatment.setString(1, srcTableName);
PreparedStatement updateLinkStatment = conn.prepareStatement(String.format(UPDATE_LINK, destTableName));
updateLinkStatment.setString(1, srcTableName);
deleteLinkStatment.execute();
updateLinkStatment.execute();
conn.commit();
}
} | apache-2.0 |
iriark01/yotta | get_yotta.py | 6737 | #
# This is a script to install yotta. Eventually I would like to add all OS's to this script
# but for now it just works on windows.
#
# There are some ganky hacks in place holding it together, such as opening
# IE to get certificates that windows doesnt have. Currently the script
# just downloads the dependencies and has the user run through the click
# throughs. Eventually I would like to hack together some silent installers
# which would involve some exe brute forcing and some registry hacking.
#
# copyright ARMmbed 2014
#
# Author: Austin Blackstone
# Date: December 17,2014
import math
import sys
import pip
import os
import subprocess
#
# Downloads to download
#
# Note that on windows the '.exe' extension is necessary to run with a subprocess
downloads = {
"all":{
"cmake.exe":"http://www.cmake.org/files/v3.2/cmake-3.2.1-win32-x86.exe",
"ninja.zip":"https://github.com/martine/ninja/releases/download/v1.5.3/ninja-win.zip",
"gcc.exe":"https://launchpad.net/gcc-arm-embedded/4.8/4.8-2014-q3-update/+download/gcc-arm-none-eabi-4_8-2014q3-20140805-win32.exe"
},
"64bit":{
},
"32bit":{
}
}
#
# Prompt to re-install / download packages
#
def shouldInstall(binName):
import shutil
import shutilwhich
question = "\n\t"+binName+" already exists on your PATH, would you like to reinstall it? (y/n): "
choice = ""
if shutil.which(binName):
sys.stdout.write(question)
choice = raw_input().lower() # check for a y for yes, all others are no
dir(choice)
if choice =='y':
return True #re-install bin
else:
print("\n\tSkipping installation of "+binName)
return False #skip installation
else:
return True; #bin does not exist, install it
#
# Cygwin Install Script - TODO
#
def cygwin():
print("Cygwin is not currently supported. Please install for the windows command line. See http://docs.yottabuild.org/#installing-on-windows for details.");
return;
#
# Linux Install Script - TODO
#
def linux():
print("For Linux install instructions please see http://docs.yottabuild.org/#installing-on-linux");
return;
#
# OSX Install Script - TODO
#
def osx():
print("For OSX install instructions please see http://docs.yottabuild.org/#installing-on-osx");
return;
#
# Windows Install Script
#
def windows():
import wget
import shutil
import shutilwhich
print("\nOpening an Internet Explorer window to launchpad.net to grab security certificate to download GCC.");
w = subprocess.Popen(r'"C:\Program Files\Internet Explorer\iexplore.exe" https://launchpad.net/' ); #hack to get the security certificate in place so we can dowload the file.
print("\nDownloading dependencies...");
# Downloads for both 64bit / 32bit
for key in downloads['all']:
if os.path.isfile(key):
print("\n\t" +key +" already exists in this folder. [Skipped]");
else:
print("\n\tDownloading " +key);
wget.download(downloads['all'][key],key);
w.kill(); #close the internet explorer window hack
# 64bit Downloads
if sys.maxsize > math.pow(2,32):
print("\nWindows 64bit detected");
for key in downloads['64bit']:
if os.path.isfile(key):
print("\n\t" +key +" already exists in this folder.[Skipped]");
else:
print("\n\tDownloading " +key );
wget.download(downloads['64bit'][key],key);
# 32bit Downloads
elif sys.maxsize <= math.pow(2,32):
print("\nWindows 32bit detected");
for key in downloads['32bit']:
if os.path.isfile(key):
print("\n\t" +key +" already exists in this folder. [Skipped]");
else:
print("\n\tDownloading " +key);
wget.download(downloads['32bit'][key],key);
# Install the Packages
print("\nInstalling packages: Please Follow the Click Throughs ");
#Yotta
if shouldInstall("yotta"):
print("\n\tInstalling Yotta from pip ...");
x = subprocess.call(['pip','install','-qU','yotta']);
if x!= 0:
print("\t[**ERROR**]: Yotta install failed. Please run 'pip install yotta -U' from the command line");
else:
print("\t[Installed]");
#cmake
if shouldInstall("cmake"):
print("\n\tInstalling Cmake: Please allow admin permissions and check 'Add CMake to system PATH for all users' option");
x = subprocess.call(['cmake.exe'], shell=True);
if x!=0:
print("\t[**ERROR**]: Cmake install failed, Please re-run installer and give admin rights to installer");
else:
print("\t[Installed]");
#gcc-arm-none-eabi
if shouldInstall("arm-none-eabi-gcc"):
print("\n\tInstalling gcc-none-eabi-gcc : Please allow admin permissions and check 'Add path to enviroment variable' box");
x = subprocess.call(['gcc.exe'], shell=True);
if x!=0:
print("\t[**ERROR**]: gcc-none-eabi-gcc install failed, Please re-run installer and give admin rights to installer");
else:
print("\t[Installed]");
#ninja
if shouldInstall("ninja"):
import zipfile
import shutil
print("\n\tInstalling Ninja...");
zipfile.ZipFile('ninja.zip').extract('ninja.exe');
if not os.path.exists('c:/ninja'):
os.makedirs('c:/ninja');
shutil.copy2('ninja.exe','c:/ninja/ninja.exe')
print("\t**REQUIRED:** Add c:/ninja/ to your PATH to complete ninja install")
#
# install extra packages for python
#
def bootstrap():
# check for Pip
try:
import pip
except ImportError:
print("\n****ERROR: Pip is not installed on this system. Please update your python install and / or install Pip, then retry***");
sys.exit();
return;
#install wget if it doesnt already exist
try:
import wget
except ImportError:
print("\nWget package missing, installing now...");
x = subprocess.call(['pip', 'install', '-q','wget']);
if x!= 0:
print("\t**ERROR** wget did not install correctly!");
sys.exit();
else:
print("[Installed]");
#install shutil.which if it doesnt already exist.
#Python 3 has this already, python 2.7 does not so we need to install it.
try:
import shutilwhich
except ImportError:
print("\nshutilwhich package missing, installing now...");
x = subprocess.call(['pip', 'install', '-q','shutilwhich']);
if x!= 0:
print("\t**ERROR** shutilwhich did not install correctly!");
sys.exit();
else:
print("[Installed]");
return;
#
# The main function figures out what OS is running and calls appropriate handler
#
def main():
chooseOS = {
"win32" : windows, # Windows32 and 64bit
"cygwin": cygwin, # cygwin on windows
"darwin": osx, # Mac OSX
"linux" : linux # Linux
}
if sys.platform in chooseOS:
bootstrap();
chooseOS[sys.platform]();
else:
print("Your OS is not supported!");
return;
if __name__ == "__main__":
main()
| apache-2.0 |
aws/aws-sdk-cpp | aws-cpp-sdk-migrationhubstrategy/source/model/SelfManageTargetDestination.cpp | 3331 | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/migrationhubstrategy/model/SelfManageTargetDestination.h>
#include <aws/core/utils/HashingUtils.h>
#include <aws/core/Globals.h>
#include <aws/core/utils/EnumParseOverflowContainer.h>
using namespace Aws::Utils;
namespace Aws
{
namespace MigrationHubStrategyRecommendations
{
namespace Model
{
namespace SelfManageTargetDestinationMapper
{
static const int None_specified_HASH = HashingUtils::HashString("None specified");
static const int Amazon_Elastic_Cloud_Compute_EC2_HASH = HashingUtils::HashString("Amazon Elastic Cloud Compute (EC2)");
static const int Amazon_Elastic_Container_Service_ECS_HASH = HashingUtils::HashString("Amazon Elastic Container Service (ECS)");
static const int Amazon_Elastic_Kubernetes_Service_EKS_HASH = HashingUtils::HashString("Amazon Elastic Kubernetes Service (EKS)");
SelfManageTargetDestination GetSelfManageTargetDestinationForName(const Aws::String& name)
{
int hashCode = HashingUtils::HashString(name.c_str());
if (hashCode == None_specified_HASH)
{
return SelfManageTargetDestination::None_specified;
}
else if (hashCode == Amazon_Elastic_Cloud_Compute_EC2_HASH)
{
return SelfManageTargetDestination::Amazon_Elastic_Cloud_Compute_EC2;
}
else if (hashCode == Amazon_Elastic_Container_Service_ECS_HASH)
{
return SelfManageTargetDestination::Amazon_Elastic_Container_Service_ECS;
}
else if (hashCode == Amazon_Elastic_Kubernetes_Service_EKS_HASH)
{
return SelfManageTargetDestination::Amazon_Elastic_Kubernetes_Service_EKS;
}
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
overflowContainer->StoreOverflow(hashCode, name);
return static_cast<SelfManageTargetDestination>(hashCode);
}
return SelfManageTargetDestination::NOT_SET;
}
Aws::String GetNameForSelfManageTargetDestination(SelfManageTargetDestination enumValue)
{
switch(enumValue)
{
case SelfManageTargetDestination::None_specified:
return "None specified";
case SelfManageTargetDestination::Amazon_Elastic_Cloud_Compute_EC2:
return "Amazon Elastic Cloud Compute (EC2)";
case SelfManageTargetDestination::Amazon_Elastic_Container_Service_ECS:
return "Amazon Elastic Container Service (ECS)";
case SelfManageTargetDestination::Amazon_Elastic_Kubernetes_Service_EKS:
return "Amazon Elastic Kubernetes Service (EKS)";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue));
}
return {};
}
}
} // namespace SelfManageTargetDestinationMapper
} // namespace Model
} // namespace MigrationHubStrategyRecommendations
} // namespace Aws
| apache-2.0 |
asakusafw/asakusafw-compiler | compiler-project/api/src/main/java/com/asakusafw/lang/compiler/api/reference/PropertyReference.java | 1595 | /**
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.lang.compiler.api.reference;
import com.asakusafw.lang.compiler.model.PropertyName;
import com.asakusafw.lang.compiler.model.description.MethodDescription;
import com.asakusafw.lang.compiler.model.description.TypeDescription;
/**
* Represents a property of data models.
*/
public interface PropertyReference extends Reference {
/**
* Returns the original declaration.
* This typically {@code get<property-name>Option()} method.
* @return the original declaration
*/
MethodDescription getDeclaration();
/**
* Returns the declaring data model.
* @return the declaring data model
*/
DataModelReference getOwner();
/**
* Returns the property name.
* @return the property name
*/
PropertyName getName();
/**
* Returns the property type.
* This typically a sub-type of {@code ValueOption} class.
* @return the property type
*/
TypeDescription getType();
}
| apache-2.0 |
rajeevanv89/developer-studio | esb/org.wso2.developerstudio.eclipse.gmf.esb.diagram/src/org/wso2/developerstudio/eclipse/gmf/esb/diagram/edit/parts/TemplateEndpointEditPart.java | 11791 | package org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.parts;
import org.eclipse.draw2d.IFigure;
import org.eclipse.draw2d.PositionConstants;
import org.eclipse.draw2d.Shape;
import org.eclipse.draw2d.StackLayout;
import org.eclipse.emf.ecore.EObject;
import org.eclipse.gef.EditPart;
import org.eclipse.gef.EditPolicy;
import org.eclipse.gef.Request;
import org.eclipse.gef.commands.Command;
import org.eclipse.gef.editpolicies.LayoutEditPolicy;
import org.eclipse.gef.editpolicies.NonResizableEditPolicy;
import org.eclipse.gef.requests.CreateRequest;
import org.eclipse.gmf.runtime.diagram.ui.editparts.AbstractBorderedShapeEditPart;
import org.eclipse.gmf.runtime.diagram.ui.editparts.IBorderItemEditPart;
import org.eclipse.gmf.runtime.diagram.ui.editparts.IGraphicalEditPart;
import org.eclipse.gmf.runtime.diagram.ui.editpolicies.BorderItemSelectionEditPolicy;
import org.eclipse.gmf.runtime.diagram.ui.editpolicies.DragDropEditPolicy;
import org.eclipse.gmf.runtime.diagram.ui.editpolicies.EditPolicyRoles;
import org.eclipse.gmf.runtime.diagram.ui.figures.BorderItemLocator;
import org.eclipse.gmf.runtime.draw2d.ui.figures.ConstrainedToolbarLayout;
import org.eclipse.gmf.runtime.draw2d.ui.figures.WrappingLabel;
import org.eclipse.gmf.runtime.gef.ui.figures.DefaultSizeNodeFigure;
import org.eclipse.gmf.runtime.gef.ui.figures.NodeFigure;
import org.eclipse.gmf.runtime.notation.View;
import org.eclipse.gmf.tooling.runtime.edit.policies.reparent.CreationEditPolicyWithCustomReparent;
import org.eclipse.swt.SWT;
import org.eclipse.swt.graphics.Color;
import org.eclipse.swt.graphics.Font;
import org.eclipse.swt.graphics.FontData;
import org.wso2.developerstudio.eclipse.gmf.esb.AddressEndPoint;
import org.wso2.developerstudio.eclipse.gmf.esb.ComplexEndpoints;
import org.wso2.developerstudio.eclipse.gmf.esb.Sequences;
import org.wso2.developerstudio.eclipse.gmf.esb.TemplateEndpoint;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.custom.AbstractEndpoint;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.custom.AbstractSequencesEditPart;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.custom.EsbGraphicalShape;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.custom.EsbGraphicalShapeWithLabel;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.custom.FixedBorderItemLocator;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.custom.ShowPropertyViewEditPolicy;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.policies.TemplateEndpointCanonicalEditPolicy;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.edit.policies.TemplateEndpointItemSemanticEditPolicy;
import org.wso2.developerstudio.eclipse.gmf.esb.diagram.part.EsbVisualIDRegistry;
/**
* @generated NOT
*/
public class TemplateEndpointEditPart extends AbstractEndpoint {
/**
* @generated
*/
public static final int VISUAL_ID = 3716;
/**
* @generated
*/
protected IFigure contentPane;
/**
* @generated
*/
public TemplateEndpointEditPart(View view) {
super(view);
}
/**
* @generated NOT
*/
protected void createDefaultEditPolicies() {
installEditPolicy(EditPolicyRoles.CREATION_ROLE,
new CreationEditPolicyWithCustomReparent(
EsbVisualIDRegistry.TYPED_INSTANCE));
super.createDefaultEditPolicies();
installEditPolicy(EditPolicyRoles.SEMANTIC_ROLE,
new TemplateEndpointItemSemanticEditPolicy());
installEditPolicy(EditPolicyRoles.DRAG_DROP_ROLE,
new DragDropEditPolicy());
installEditPolicy(EditPolicyRoles.CANONICAL_ROLE,
new TemplateEndpointCanonicalEditPolicy());
installEditPolicy(EditPolicy.LAYOUT_ROLE, createLayoutEditPolicy());
// For handle Double click Event.
installEditPolicy(EditPolicyRoles.OPEN_ROLE,
new ShowPropertyViewEditPolicy());
// XXX need an SCR to runtime to have another abstract superclass that would let children add reasonable editpolicies
// removeEditPolicy(org.eclipse.gmf.runtime.diagram.ui.editpolicies.EditPolicyRoles.CONNECTION_HANDLES_ROLE);
}
/**
* @generated
*/
protected LayoutEditPolicy createLayoutEditPolicy() {
org.eclipse.gmf.runtime.diagram.ui.editpolicies.LayoutEditPolicy lep = new org.eclipse.gmf.runtime.diagram.ui.editpolicies.LayoutEditPolicy() {
protected EditPolicy createChildEditPolicy(EditPart child) {
View childView = (View) child.getModel();
switch (EsbVisualIDRegistry.getVisualID(childView)) {
case TemplateEndpointInputConnectorEditPart.VISUAL_ID:
case TemplateEndpointOutputConnectorEditPart.VISUAL_ID:
return new BorderItemSelectionEditPolicy();
}
EditPolicy result = child
.getEditPolicy(EditPolicy.PRIMARY_DRAG_ROLE);
if (result == null) {
result = new NonResizableEditPolicy();
}
return result;
}
protected Command getMoveChildrenCommand(Request request) {
return null;
}
protected Command getCreateCommand(CreateRequest request) {
return null;
}
};
return lep;
}
/**
* @generated NOT
*/
protected IFigure createNodeShape() {
return primaryShape = new TemplateEndpointFigure() {
public void setBounds(org.eclipse.draw2d.geometry.Rectangle rect) {
super.setBounds(rect);
if (this.getBounds().getLocation().x != 0
&& this.getBounds().getLocation().y != 0) {
getMostSuitableElementToConnect();
reAllocate(rect);
}
};
};
}
/**
* @generated
*/
public TemplateEndpointFigure getPrimaryShape() {
return (TemplateEndpointFigure) primaryShape;
}
protected boolean addFixedChild(EditPart childEditPart) {
if (childEditPart instanceof TemplateEndpointInputConnectorEditPart) {
double position;
EObject parentEndpoint = ((org.eclipse.gmf.runtime.notation.impl.NodeImpl) (childEditPart
.getParent()).getModel()).getElement();
if (((TemplateEndpoint) parentEndpoint).getInputConnector()
.getIncomingLinks().size() != 0) {
EObject source = ((TemplateEndpoint) parentEndpoint)
.getInputConnector().getIncomingLinks().get(0)
.getSource().eContainer();
/* position = ((source instanceof ComplexEndpoints) || (source
.eContainer().eContainer() instanceof Sequences)) ? 0.5
: 0.25;*/
position = 0.5;
} else {
/* position = ((this.getParent().getParent().getParent() instanceof ComplexEndpointsEditPart) || (this
.getParent().getParent().getParent() instanceof AbstractSequencesEditPart)) ? 0.5
: 0.25;*/
position = 0.5;
}
IFigure borderItemFigure = ((TemplateEndpointInputConnectorEditPart) childEditPart)
.getFigure();
BorderItemLocator locator = new FixedBorderItemLocator(
getMainFigure(), borderItemFigure, PositionConstants.WEST,
position);
getBorderedFigure().getBorderItemContainer().add(borderItemFigure,
locator);
return true;
}
if (childEditPart instanceof TemplateEndpointOutputConnectorEditPart) {
IFigure borderItemFigure = ((TemplateEndpointOutputConnectorEditPart) childEditPart)
.getFigure();
BorderItemLocator locator = new FixedBorderItemLocator(
getMainFigure(), borderItemFigure, PositionConstants.WEST,
0.75);
getBorderedFigure().getBorderItemContainer().add(borderItemFigure,
locator);
return true;
}
if (childEditPart instanceof TemplateEndpointDescriptionEditPart) {
((TemplateEndpointDescriptionEditPart) childEditPart)
.setLabel(getPrimaryShape().getEndpointDescriptionLabel());
return true;
}
return false;
}
protected boolean removeFixedChild(EditPart childEditPart) {
if (childEditPart instanceof TemplateEndpointInputConnectorEditPart) {
getBorderedFigure().getBorderItemContainer().remove(
((TemplateEndpointInputConnectorEditPart) childEditPart)
.getFigure());
return true;
}
if (childEditPart instanceof TemplateEndpointOutputConnectorEditPart) {
getBorderedFigure().getBorderItemContainer().remove(
((TemplateEndpointOutputConnectorEditPart) childEditPart)
.getFigure());
return true;
}
if (childEditPart instanceof TemplateEndpointDescriptionEditPart) {
return true;
}
return false;
}
protected void addChildVisual(EditPart childEditPart, int index) {
if (addFixedChild(childEditPart)) {
return;
}
super.addChildVisual(childEditPart, -1);
}
protected void removeChildVisual(EditPart childEditPart) {
if (removeFixedChild(childEditPart)) {
return;
}
super.removeChildVisual(childEditPart);
}
protected IFigure getContentPaneFor(IGraphicalEditPart editPart) {
if (editPart instanceof IBorderItemEditPart) {
return getBorderedFigure().getBorderItemContainer();
}
return getContentPane();
}
/**
* @generated
*/
protected NodeFigure createNodePlate() {
DefaultSizeNodeFigure result = new DefaultSizeNodeFigure(40, 40);
return result;
}
/**
* Creates figure for this edit part.
*
* Body of this method does not depend on settings in generation model
* so you may safely remove <i>generated</i> tag and modify it.
*
* @generated
*/
protected NodeFigure createMainFigure() {
NodeFigure figure = createNodePlate();
figure.setLayoutManager(new StackLayout());
IFigure shape = createNodeShape();
figure.add(shape);
contentPane = setupContentPane(shape);
return figure;
}
/**
* Default implementation treats passed figure as content pane.
* Respects layout one may have set for generated figure.
* @param nodeShape instance of generated figure class
* @generated
*/
protected IFigure setupContentPane(IFigure nodeShape) {
if (nodeShape.getLayoutManager() == null) {
ConstrainedToolbarLayout layout = new ConstrainedToolbarLayout();
layout.setSpacing(5);
nodeShape.setLayoutManager(layout);
}
return nodeShape; // use nodeShape itself as contentPane
}
/**
* @generated
*/
public IFigure getContentPane() {
if (contentPane != null) {
return contentPane;
}
return super.getContentPane();
}
/**
* @generated
*/
protected void setForegroundColor(Color color) {
if (primaryShape != null) {
primaryShape.setForegroundColor(color);
}
}
/**
* @generated
*/
protected void setBackgroundColor(Color color) {
if (primaryShape != null) {
primaryShape.setBackgroundColor(color);
}
}
/**
* @generated
*/
protected void setLineWidth(int width) {
if (primaryShape instanceof Shape) {
((Shape) primaryShape).setLineWidth(width);
}
}
/**
* @generated
*/
protected void setLineType(int style) {
if (primaryShape instanceof Shape) {
((Shape) primaryShape).setLineStyle(style);
}
}
/**
* @generated
*/
public class TemplateEndpointFigure extends EsbGraphicalShapeWithLabel {
/**
* @generated
*/
private WrappingLabel fFigureTemplateEndPointNamePropertyLabel;
private WrappingLabel endpointDescriptionLabel;
/**
* @generated
*/
public TemplateEndpointFigure() {
this.setBackgroundColor(THIS_BACK);
createContents();
}
/**
* @generated NOT
*/
private void createContents() {
fFigureTemplateEndPointNamePropertyLabel = new WrappingLabel();
endpointDescriptionLabel = getPropertyNameLabel();
}
/**
* @generated
*/
public WrappingLabel getFigureTemplateEndPointNamePropertyLabel() {
return fFigureTemplateEndPointNamePropertyLabel;
}
public WrappingLabel getEndpointDescriptionLabel() {
return endpointDescriptionLabel;
}
public String getIconPath() {
return "icons/ico20/template-endpoint.gif";
}
public String getNodeName() {
return "Template-EP";
}
public Color getBackgroundColor() {
return THIS_BACK;
}
public Color getLabelBackColor() {
return THIS_LABEL_BACK;
}
}
/**
* @generated NOT
*/
static final Color THIS_BACK = new Color(null, 255, 255, 255);
static final Color THIS_LABEL_BACK = new Color(null, 113, 198, 113);
}
| apache-2.0 |
svenkubiak/mangooio | mangooio-maven-plugin/src/main/java/io/mangoo/build/Watcher.java | 7499 | /**
* Copyright (C) 2012-2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mangoo.build;
import static java.nio.file.LinkOption.NOFOLLOW_LINKS;
import static java.nio.file.StandardWatchEventKinds.ENTRY_CREATE;
import static java.nio.file.StandardWatchEventKinds.ENTRY_MODIFY;
import static java.nio.file.StandardWatchEventKinds.OVERFLOW;
import java.io.IOException;
import java.nio.file.FileSystems;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.StandardWatchEventKinds;
import java.nio.file.WatchEvent;
import java.nio.file.WatchKey;
import java.nio.file.WatchService;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* This is a refactored version of
* WatchAndRestartMachine.java from the Ninja Web Framework
*
* Original source code can be found here:
* https://github.com/ninjaframework/ninja/blob/develop/ninja-maven-plugin/src/main/java/ninja/build/WatchAndRestartMachine.java
*
* @author svenkubiak
*
*/
@SuppressWarnings({"unchecked"})
public class Watcher implements Runnable {
private static final Logger LOG = LogManager.getLogger(Watcher.class);
private final Trigger trigger;
private final Set<String> includes;
private final Set<String> excludes;
private final WatchService watchService;
private final Map<WatchKey, Path> watchKeys;
private final AtomicInteger takeCount;
private boolean shutdown;
@SuppressWarnings("all")
public Watcher(Set<Path> watchDirectory, Set<String> includes, Set<String> excludes, Trigger trigger) throws IOException {
this.watchService = FileSystems.getDefault().newWatchService();
this.watchKeys = new HashMap<>();
this.includes = includes; //NOSONAR
this.excludes = excludes; //NOSONAR
this.trigger = trigger;
this.takeCount = new AtomicInteger(0);
for (Path path: watchDirectory) {
registerAll(path);
}
}
public void doShutdown() {
this.shutdown = true;
}
@SuppressWarnings("all")
private void registerAll(final Path path) throws IOException {
Files.walkFileTree(path, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult preVisitDirectory(Path path, BasicFileAttributes attrs) throws IOException {
register(path);
return FileVisitResult.CONTINUE;
}
});
}
@SuppressWarnings("all")
private void register(Path path) throws IOException {
WatchKey watchKey = path.register(
watchService,
new WatchEvent.Kind[]{
StandardWatchEventKinds.ENTRY_CREATE,
StandardWatchEventKinds.ENTRY_MODIFY,
StandardWatchEventKinds.ENTRY_DELETE
});
watchKeys.put(watchKey, path);
}
@Override
@SuppressWarnings("all")
public void run() {
for (;;) {
WatchKey watchKey;
try {
watchKey = watchService.take();
takeCount.incrementAndGet();
} catch (InterruptedException e) {
if (!shutdown) {
LOG.error("Unexpectedly interrupted while waiting for take()", e);
}
return;
}
Path path = watchKeys.get(watchKey);
if (path == null) {
LOG.error("WatchKey not recognized!!");
continue;
}
handleEvents(watchKey, path);
if (!watchKey.reset()) {
watchKeys.remove(watchKey);
if (watchKeys.isEmpty()) {
break;
}
}
}
}
@SuppressWarnings("all")
private void handleEvents(WatchKey watchKey, Path path) {
for (WatchEvent<?> watchEvent : watchKey.pollEvents()) {
WatchEvent.Kind<?> watchEventKind = watchEvent.kind();
if (OVERFLOW.equals(watchEventKind)) {
continue;
}
WatchEvent<Path> ev = (WatchEvent<Path>) watchEvent;
Path name = ev.context();
Path child = path.resolve(name);
if (ENTRY_MODIFY.equals(watchEventKind) && !child.toFile().isDirectory()) {
handleNewOrModifiedFile(child);
}
if (ENTRY_CREATE.equals(watchEventKind)) {
if (!child.toFile().isDirectory()) {
handleNewOrModifiedFile(child);
}
try {
if (Files.isDirectory(child, NOFOLLOW_LINKS)) {
registerAll(child);
}
} catch (IOException e) {
LOG.error("Something fishy happened. Unable to register new dir for watching", e);
}
}
}
}
@SuppressWarnings("all")
public void handleNewOrModifiedFile(Path path) {
String absolutePath = path.toFile().getAbsolutePath();
if (isAsset(absolutePath)) {
Minification.minify(absolutePath);
}
RuleMatch match = matchRule(includes, excludes, absolutePath);
if (match.proceed) {
this.trigger.trigger();
}
}
private boolean isAsset(String absolutePath) {
if (StringUtils.isBlank(absolutePath)) {
return false;
}
return !absolutePath.contains("min") && ( absolutePath.endsWith("css") || absolutePath.endsWith("js") );
}
public enum RuleType {
NONE,
INCLUDE,
EXCLUDE
}
public static class RuleMatch {
private final boolean proceed;
public RuleMatch(boolean proceed) {
this.proceed = proceed;
}
public boolean doProceed() {
return this.proceed;
}
}
public static RuleMatch matchRule(Set<String> includes, Set<String> excludes, String string) {
if (includes != null) {
for (String include : includes) {
if (string.matches(include)) {
return new RuleMatch(true);
}
}
}
if (excludes != null) {
for (String exclude : excludes) {
if (string.matches(exclude)) {
return new RuleMatch(false);
}
}
}
return new RuleMatch(true);
}
public static boolean checkIfWouldBeExcluded(Set<String> patterns, String string) {
return !matchRule(null, patterns, string).doProceed();
}
} | apache-2.0 |
prometheus/prometheus | storage/series.go | 8594 | // Copyright 2020 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"math"
"sort"
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/tsdb/chunkenc"
"github.com/prometheus/prometheus/tsdb/chunks"
"github.com/prometheus/prometheus/tsdb/tsdbutil"
)
type SeriesEntry struct {
Lset labels.Labels
SampleIteratorFn func() chunkenc.Iterator
}
func (s *SeriesEntry) Labels() labels.Labels { return s.Lset }
func (s *SeriesEntry) Iterator() chunkenc.Iterator { return s.SampleIteratorFn() }
type ChunkSeriesEntry struct {
Lset labels.Labels
ChunkIteratorFn func() chunks.Iterator
}
func (s *ChunkSeriesEntry) Labels() labels.Labels { return s.Lset }
func (s *ChunkSeriesEntry) Iterator() chunks.Iterator { return s.ChunkIteratorFn() }
// NewListSeries returns series entry with iterator that allows to iterate over provided samples.
func NewListSeries(lset labels.Labels, s []tsdbutil.Sample) *SeriesEntry {
return &SeriesEntry{
Lset: lset,
SampleIteratorFn: func() chunkenc.Iterator {
return NewListSeriesIterator(samples(s))
},
}
}
// NewListChunkSeriesFromSamples returns chunk series entry that allows to iterate over provided samples.
// NOTE: It uses inefficient chunks encoding implementation, not caring about chunk size.
func NewListChunkSeriesFromSamples(lset labels.Labels, samples ...[]tsdbutil.Sample) *ChunkSeriesEntry {
return &ChunkSeriesEntry{
Lset: lset,
ChunkIteratorFn: func() chunks.Iterator {
chks := make([]chunks.Meta, 0, len(samples))
for _, s := range samples {
chks = append(chks, tsdbutil.ChunkFromSamples(s))
}
return NewListChunkSeriesIterator(chks...)
},
}
}
type listSeriesIterator struct {
samples Samples
idx int
}
type samples []tsdbutil.Sample
func (s samples) Get(i int) tsdbutil.Sample { return s[i] }
func (s samples) Len() int { return len(s) }
// Samples interface allows to work on arrays of types that are compatible with tsdbutil.Sample.
type Samples interface {
Get(i int) tsdbutil.Sample
Len() int
}
// NewListSeriesIterator returns listSeriesIterator that allows to iterate over provided samples.
func NewListSeriesIterator(samples Samples) chunkenc.Iterator {
return &listSeriesIterator{samples: samples, idx: -1}
}
func (it *listSeriesIterator) At() (int64, float64) {
s := it.samples.Get(it.idx)
return s.T(), s.V()
}
func (it *listSeriesIterator) Next() bool {
it.idx++
return it.idx < it.samples.Len()
}
func (it *listSeriesIterator) Seek(t int64) bool {
if it.idx == -1 {
it.idx = 0
}
if it.idx >= it.samples.Len() {
return false
}
// No-op check.
if s := it.samples.Get(it.idx); s.T() >= t {
return true
}
// Do binary search between current position and end.
it.idx += sort.Search(it.samples.Len()-it.idx, func(i int) bool {
s := it.samples.Get(i + it.idx)
return s.T() >= t
})
return it.idx < it.samples.Len()
}
func (it *listSeriesIterator) Err() error { return nil }
type listChunkSeriesIterator struct {
chks []chunks.Meta
idx int
}
// NewListChunkSeriesIterator returns listChunkSeriesIterator that allows to iterate over provided chunks.
func NewListChunkSeriesIterator(chks ...chunks.Meta) chunks.Iterator {
return &listChunkSeriesIterator{chks: chks, idx: -1}
}
func (it *listChunkSeriesIterator) At() chunks.Meta {
return it.chks[it.idx]
}
func (it *listChunkSeriesIterator) Next() bool {
it.idx++
return it.idx < len(it.chks)
}
func (it *listChunkSeriesIterator) Err() error { return nil }
type chunkSetToSeriesSet struct {
ChunkSeriesSet
chkIterErr error
sameSeriesChunks []Series
}
// NewSeriesSetFromChunkSeriesSet converts ChunkSeriesSet to SeriesSet by decoding chunks one by one.
func NewSeriesSetFromChunkSeriesSet(chk ChunkSeriesSet) SeriesSet {
return &chunkSetToSeriesSet{ChunkSeriesSet: chk}
}
func (c *chunkSetToSeriesSet) Next() bool {
if c.Err() != nil || !c.ChunkSeriesSet.Next() {
return false
}
iter := c.ChunkSeriesSet.At().Iterator()
c.sameSeriesChunks = c.sameSeriesChunks[:0]
for iter.Next() {
c.sameSeriesChunks = append(
c.sameSeriesChunks,
newChunkToSeriesDecoder(c.ChunkSeriesSet.At().Labels(), iter.At()),
)
}
if iter.Err() != nil {
c.chkIterErr = iter.Err()
return false
}
return true
}
func (c *chunkSetToSeriesSet) At() Series {
// Series composed of same chunks for the same series.
return ChainedSeriesMerge(c.sameSeriesChunks...)
}
func (c *chunkSetToSeriesSet) Err() error {
if c.chkIterErr != nil {
return c.chkIterErr
}
return c.ChunkSeriesSet.Err()
}
func newChunkToSeriesDecoder(labels labels.Labels, chk chunks.Meta) Series {
return &SeriesEntry{
Lset: labels,
SampleIteratorFn: func() chunkenc.Iterator {
// TODO(bwplotka): Can we provide any chunkenc buffer?
return chk.Chunk.Iterator(nil)
},
}
}
type seriesSetToChunkSet struct {
SeriesSet
}
// NewSeriesSetToChunkSet converts SeriesSet to ChunkSeriesSet by encoding chunks from samples.
func NewSeriesSetToChunkSet(chk SeriesSet) ChunkSeriesSet {
return &seriesSetToChunkSet{SeriesSet: chk}
}
func (c *seriesSetToChunkSet) Next() bool {
if c.Err() != nil || !c.SeriesSet.Next() {
return false
}
return true
}
func (c *seriesSetToChunkSet) At() ChunkSeries {
return NewSeriesToChunkEncoder(c.SeriesSet.At())
}
func (c *seriesSetToChunkSet) Err() error {
return c.SeriesSet.Err()
}
type seriesToChunkEncoder struct {
Series
}
const seriesToChunkEncoderSplit = 120
// NewSeriesToChunkEncoder encodes samples to chunks with 120 samples limit.
func NewSeriesToChunkEncoder(series Series) ChunkSeries {
return &seriesToChunkEncoder{series}
}
func (s *seriesToChunkEncoder) Iterator() chunks.Iterator {
chk := chunkenc.NewXORChunk()
app, err := chk.Appender()
if err != nil {
return errChunksIterator{err: err}
}
mint := int64(math.MaxInt64)
maxt := int64(math.MinInt64)
chks := []chunks.Meta{}
i := 0
seriesIter := s.Series.Iterator()
for seriesIter.Next() {
// Create a new chunk if too many samples in the current one.
if i >= seriesToChunkEncoderSplit {
chks = append(chks, chunks.Meta{
MinTime: mint,
MaxTime: maxt,
Chunk: chk,
})
chk = chunkenc.NewXORChunk()
app, err = chk.Appender()
if err != nil {
return errChunksIterator{err: err}
}
mint = int64(math.MaxInt64)
// maxt is immediately overwritten below which is why setting it here won't make a difference.
i = 0
}
t, v := seriesIter.At()
app.Append(t, v)
maxt = t
if mint == math.MaxInt64 {
mint = t
}
i++
}
if err := seriesIter.Err(); err != nil {
return errChunksIterator{err: err}
}
chks = append(chks, chunks.Meta{
MinTime: mint,
MaxTime: maxt,
Chunk: chk,
})
return NewListChunkSeriesIterator(chks...)
}
type errChunksIterator struct {
err error
}
func (e errChunksIterator) At() chunks.Meta { return chunks.Meta{} }
func (e errChunksIterator) Next() bool { return false }
func (e errChunksIterator) Err() error { return e.err }
// ExpandSamples iterates over all samples in the iterator, buffering all in slice.
// Optionally it takes samples constructor, useful when you want to compare sample slices with different
// sample implementations. if nil, sample type from this package will be used.
func ExpandSamples(iter chunkenc.Iterator, newSampleFn func(t int64, v float64) tsdbutil.Sample) ([]tsdbutil.Sample, error) {
if newSampleFn == nil {
newSampleFn = func(t int64, v float64) tsdbutil.Sample { return sample{t, v} }
}
var result []tsdbutil.Sample
for iter.Next() {
t, v := iter.At()
// NaNs can't be compared normally, so substitute for another value.
if math.IsNaN(v) {
v = -42
}
result = append(result, newSampleFn(t, v))
}
return result, iter.Err()
}
// ExpandChunks iterates over all chunks in the iterator, buffering all in slice.
func ExpandChunks(iter chunks.Iterator) ([]chunks.Meta, error) {
var result []chunks.Meta
for iter.Next() {
result = append(result, iter.At())
}
return result, iter.Err()
}
| apache-2.0 |
cuoretech/dowork | dowork/Model/database_populate.py | 7444 | """
from py2neo import neo4j, ogm, node, rel
from cuorewebpage.Model.Calendar import Calendar
from cuorewebpage.Model.Event import Event
from datetime import datetime
from cuorewebpage.Model.Project import Project
from cuorewebpage.Model.Task import Task, STS_OPEN, STS_IN_PROG
from cuorewebpage.Model.Workspace import Workspace
from database_config import *
#from Person import getCurrentUser, getUser
from Company import Company
from Department import Department
from Title import Title
from User import User, getCurrentUser, getUser
from Blog import Blog
from Post import Post
graph_db = neo4j.GraphDatabaseService(db_config['uri'])
graph_db.clear()
company = Company(Name="Cuore").getNode()
departments = [ Department(name="Business", company=company).getNode(),
Department(name="Applications", company=company).getNode(),
Department(name="Hardware", company=company).getNode(),
Department(name="Systems", company=company).getNode(),
Department(name="Admin", company=company).getNode(), ]
titles = dict(
Pres=Title(name="President", dept=departments[0]).getNode(),
VP=Title(name="Vice-President", dept=departments[0]).getNode(),
AppDev=Title(name="Applications Developer", dept=departments[1]).getNode(),
WebAppDev=Title(name="Web Applications Developer", dept=departments[1]).getNode(),
LeadAppDev=Title(name="Lead Applications Developer", dept=departments[1]).getNode(),
LeadHwEngr=Title(name="Lead Hardware Engineer", dept=departments[2]).getNode(),
LeadSysEngr=Title(name="Lead Systems Engineer", dept=departments[3]).getNode(),
Admin=Title(name="Admin", dept=departments[4]).getNode(),
)
users = dict(
leo=User(uid="0", first_name="Leo", last_name="Schultz", email="leo@cuore.io", confirmed=3, req_title="President").getNode(),
kevin_r=User(uid="1", first_name="Kevin", last_name="Ryan", email="kevin@cuore.io", confirmed=3, req_title="Vice-President").getNode(),
sandy=User(uid="2C1F3V0RiQqS0rJY5qEejQ==", first_name="Sandy", last_name="Siththanandan", email="sandy@cuore.io",
confirmed=3, req_title="Applications Developer", phone="6502695948", city="SF", state="CA", zipcode="94112").getNode(),
kirby=User(uid="7jxjnWJGsgCTBjzKX7Yk3Q==", first_name="Kirby", last_name="Linvill", email="kirby@cuore.io", confirmed=3,
req_title="Applications Developer", photo="cuorewebpage:img/menu_icons/profile.png").getNode(),
vincente=User(uid="ENIFCyZRQceEalwDDBI8nA==", first_name="Vincente", last_name="Ciancio", email="vincente@cuore.io", confirmed=3,
req_title="Applications Developer", ).getNode(),
sergio=User(uid="tFNm//nfbPwzHfYyVYHv6w==", first_name="Sergio", last_name="Peleo", email="sergio@cuore.io", confirmed=3,
req_title="Lead Applications Developer", ).getNode(),
mason=User(uid="5", first_name="Mason", last_name="Borda", email="mason@cuore.io", confirmed=3,
req_title="Lead Hardware Engineer").getNode(),
kevin_a=User(uid="6", first_name="Kevin", last_name="Aloysius", email="luscious@cuore.io", confirmed=3,
req_title="Lead Systems Engineer").getNode(),
test=User(uid="7", first_name="Tester", last_name="Jones", email="TJones@cuore.io", confirmed=0, req_dept="Applications",
req_title="Applications Developer", photo="cuorewebpage:img/menu_icons/profile.png").getNode())
unconfirmedNode=graph_db.get_or_create_indexed_node("Unconfirmed", "name", "unconfirmed", {"name":"unconfirmed"})
graph_db.create((users['test'], REL_UNCONFIRMED, unconfirmedNode))
#Calendar(Name=(user.getFullName() + "'s Calendar"), Owner=user.getNode())
graph_db.create(
(titles['Admin'], REL_HASUSER, users['kirby']),
(titles['Admin'], REL_HASUSER, users['leo']),
# (titles['Admin'], REL_HASUSER, users['sandy']),
(titles['Pres'], REL_HASUSER, users['leo']),
(titles['VP'], REL_HASUSER, users['kevin_r']),
(titles['AppDev'], REL_HASUSER, users['sandy']),
(titles['AppDev'], REL_HASUSER, users['kirby']),
(titles['AppDev'], REL_HASUSER, users['vincente']),
(titles['LeadAppDev'], REL_HASUSER, users['sergio']),
(titles['LeadHwEngr'], REL_HASUSER, users['mason']),
(titles['LeadSysEngr'], REL_HASUSER, users['kevin_a']),
# (users['sandy'], REL_ISMEMBER, departments[1]),
# (users['sandy'], REL_ISMEMBER, departments[4]),
)
for team in departments:
workspaces = list()
workspaces.append(Workspace(Name=(Department(team).getName() + " Workspace"), Owner=Department(team).getNode()))
bus_blog = Blog(Name="Business", Owner=departments[0])
app_blog = Blog(Name="Applications", Owner=departments[1])
hw_blog = Blog(Name="Hardware", Owner=departments[2])
sys_blog = Blog(Name="Systems", Owner=departments[3])
adm_blog = Blog(Name="Admin", Owner=departments[4])
cuore_blog = Blog(Name="Cuore", Owner=company)
cuore_blog.setDescription("Company wide news")
event_meet_time = (datetime.now()-datetime(1970,1,1)).total_seconds()
event_meeting = Event(Name="General Meeting", Owner=users['leo'], sTime=event_meet_time, eTime=event_meet_time)
leo_calendar = Calendar(Name=(User(users['leo']).getFullName() + "'s Calendar"), Owner=User(users['leo']).getNode())
leo_calendar.addEvent(event_meeting.getNode())
app_team = Department(departments[1]).getUsers()
for person in app_team:
mUser = User(person)
workspace = mUser.getWorkspace()
app_calendar = Calendar(Name=(mUser.getFullName() + "'s Calendar"), Owner=mUser.getNode())
event_app_time = (datetime(2014, 1, 19)-datetime(1970,1,1)).total_seconds()
event_app_hack = Event(Name="Applications Hack Event", sTime=event_app_time, eTime=event_app_time)
event_app_hack.addOwner(users['sergio'])
app_calendar.setDescription("Calendar which outlines all of the tasks that are assigned to" + mUser.getFirstName())
app_calendar.addEvent(event_app_hack.getNode())
event_meeting.addInvitee(mUser.getNode())
project = Project(Name="Intranet Project")
task1 = Task(Name="Finish the Intranet", Status=STS_IN_PROG)
task1.assignToUser(mUser.getNode())
workspace.addProject(project.getNode())
# workspace.addOwner(mUser.getNode())
project.addTask(task1.getNode())
for key in users.keys():
mUser = User(users[key])
calendar = Calendar(Name=(mUser.getFullName() + "'s Calendar"), Owner=mUser.getNode())
# workspace = Workspace(Name=(mUser.getFullName() + "'s Workspace"), Owner=mUser.getNode())
sandy = User(users['sandy'])
"""
"""post1 = Post(Name="My Goodness", Content="I am so totally cracked out from doing this all night, I really should" \
" learn not to procrastinate so that I don't have to pull all nighters", Owner=sandy.getNode())
post2 = Post(Name="Quite Exciting", Content="Maybe it is time for me to go to sleep, although looking at the clock" \
" I almost feel like what a wuss, it's only 12:25!", Owner=sandy.getNode())
post3 = Post(Name="Maybe it's the lead paint though", Content="Did you know that lead paint vaporizes around or above 1100 degrees" \
" Fahrenheit? Yeah, so maybe house paint from 1906 and blowtorches aren't the best combination for your health" \
" but what are you going to do?", Owner=sandy.getNode())
post1.setBlog(cuore_blog.getNode())
post2.setBlog(cuore_blog.getNode())
post3.setBlog(cuore_blog.getNode())
"""
| apache-2.0 |
cedral/aws-sdk-cpp | aws-cpp-sdk-mediaconvert/source/model/MotionImageInsertionOffset.cpp | 1875 | /*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/mediaconvert/model/MotionImageInsertionOffset.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace MediaConvert
{
namespace Model
{
MotionImageInsertionOffset::MotionImageInsertionOffset() :
m_imageX(0),
m_imageXHasBeenSet(false),
m_imageY(0),
m_imageYHasBeenSet(false)
{
}
MotionImageInsertionOffset::MotionImageInsertionOffset(JsonView jsonValue) :
m_imageX(0),
m_imageXHasBeenSet(false),
m_imageY(0),
m_imageYHasBeenSet(false)
{
*this = jsonValue;
}
MotionImageInsertionOffset& MotionImageInsertionOffset::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("imageX"))
{
m_imageX = jsonValue.GetInteger("imageX");
m_imageXHasBeenSet = true;
}
if(jsonValue.ValueExists("imageY"))
{
m_imageY = jsonValue.GetInteger("imageY");
m_imageYHasBeenSet = true;
}
return *this;
}
JsonValue MotionImageInsertionOffset::Jsonize() const
{
JsonValue payload;
if(m_imageXHasBeenSet)
{
payload.WithInteger("imageX", m_imageX);
}
if(m_imageYHasBeenSet)
{
payload.WithInteger("imageY", m_imageY);
}
return payload;
}
} // namespace Model
} // namespace MediaConvert
} // namespace Aws
| apache-2.0 |
prowide/prowide-core | src/main/java/com/prowidesoftware/swift/model/MIR.java | 7580 | /*
* Copyright 2006-2021 Prowide
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.prowidesoftware.swift.model;
import com.prowidesoftware.swift.utils.SwiftFormatUtils;
import java.util.Calendar;
import java.util.Objects;
/**
* This class models and parses the Message Input Reference (MIR),
* String of 28 characters, always local to the sender of the message.
* It includes the date the sender sent the message to SWIFT,
* followed by the full LT address of the sender of the
* message, and the sender's session and sequence to SWIFT.
* YYMMDD BANKBEBBAXXX 2222 123456<br><br>
*
* <p>MIR and MOR are messages unique identifiers containing the date,
* logical terminal (including branch code), session and sequence numbers.
* Nevertheless this identifiers can be confusing sometimes because they must
* be thought from SWIFT perspective.
*
* <p>A message created by the sender user/application is considered an
* INPUT message, because it gets into the SWIFT network. When the message
* is delivered and gets out of the network it is considered an OUTPUT message.
* Therefore the headers of a sent message are not exactly the same as the
* headers of the received message at the destination party. Analogous the
* headers of a message that the receiving user/application gets from SWIFT
* are not exactly the same as the headers when the message was created and
* sent by the sending party.
*
* <p>The usage of MIR and MOR are clear when analyzing system messages.
* A non delivery warning for example, includes the original MIR of the
* sent message, but not the MOR because the message was not delivered yet.
* But a delivery confirmation on the other hand, includes both, the sender’s MIR
* and the receiver’s MOR.<br>
* System messages provide MIR/MOR information using fields 106 and 107 respectively.
*
* @since 6.0
*/
public class MIR {
private static final transient java.util.logging.Logger log = java.util.logging.Logger.getLogger(MIR.class.getName());
/**
* 6 characters string containing the date field of the MIR.
*/
private String date;
/**
* String of 12 characters containing the logical terminal field of the MIR
* (address of the sender of the message).
*
* @see "MIR on the WIFE Wiki"
*/
private String logicalTerminal;
/**
* String of 4 characters containing the session number field of the MIR.
*
* @see "MIR on the WIFE Wiki"
*/
private String sessionNumber;
/**
* String of 6 characters containing the sequence number field of the MIR.
*
* @see "MIR on the WIFE Wiki"
*/
private String sequenceNumber;
public MIR(String date, String logicalTerminal, String sessionNumber, String sequenceNumber) {
this.date = date;
this.logicalTerminal = logicalTerminal;
this.sessionNumber = sessionNumber;
this.sequenceNumber = sequenceNumber;
}
/**
* Creates a MIR object parsing the literal string value.
* If the value is incorrect (cannot be parsed) the object will not be initialized.
*
* @param value the MIR value, it is expected to 28 characters length
*/
public MIR(String value) {
if (value != null && value.length() == 28) {
final StringBuilder sb = new StringBuilder(value);
int offset = 0;
int len;
len = 6;
this.date = String.valueOf(sb.subSequence(offset, offset + len));
offset += len;
len = 12;
this.logicalTerminal = String.valueOf(sb.subSequence(offset, offset + len));
offset += len;
len = 4;
this.sessionNumber = String.valueOf(sb.subSequence(offset, offset + len));
offset += len;
len = 6;
this.sequenceNumber = String.valueOf(sb.subSequence(offset, offset + len));
} else {
log.severe("invalid MIR value " + value);
}
}
/**
* Default constructor
*/
public MIR() {
}
/**
* @return the date
*/
public String getDate() {
return date;
}
/**
* @param date a date formatted as YYMMDD
*/
public void setDate(String date) {
this.date = date;
}
/**
* Sets a date from a calendar, formatting it as YYMMDD
*
* @param date a date
* @since 7.10.4
*/
public void setDate(Calendar date) {
this.date = SwiftFormatUtils.getDate2(date);
}
/**
* @return the logical terminal
*/
public String getLogicalTerminal() {
return logicalTerminal;
}
public void setLogicalTerminal(String logicalTerminal) {
this.logicalTerminal = logicalTerminal;
}
/**
* @return the session number
*/
public String getSessionNumber() {
return sessionNumber;
}
public void setSessionNumber(String sessionNumber) {
this.sessionNumber = sessionNumber;
}
/**
* @return the sequence number
*/
public String getSequenceNumber() {
return sequenceNumber;
}
public void setSequenceNumber(String sequenceNumber) {
this.sequenceNumber = sequenceNumber;
}
/**
* Gets the full MIR (Message Input Reference) string of 28
* characters containing the sender's date, LT address,
* session and sequence:<br>
* for example YYMMDDBANKBEBBAXXX2222123456<br>
*
* @return a String with MIR, returns null if all MIR components are null
*/
public String getMIR() {
if (date == null && logicalTerminal == null && sessionNumber == null && sequenceNumber == null) {
return null;
}
final StringBuilder v = new StringBuilder();
if (date != null) {
v.append(date);
}
if (logicalTerminal != null) {
v.append(logicalTerminal);
}
if (sessionNumber != null) {
v.append(sessionNumber);
}
if (sequenceNumber != null) {
v.append(sequenceNumber);
}
return v.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MIR mir = (MIR) o;
return Objects.equals(date, mir.date) &&
Objects.equals(logicalTerminal, mir.logicalTerminal) &&
Objects.equals(sessionNumber, mir.sessionNumber) &&
Objects.equals(sequenceNumber, mir.sequenceNumber);
}
@Override
public int hashCode() {
return Objects.hash(date, logicalTerminal, sessionNumber, sequenceNumber);
}
/**
* Returns this MIR date as Calendar.
* This implementation uses {@link SwiftFormatUtils#getDate2(String)}
*
* @return the parsed date or null if MIR date is invalid or not set
* @since 7.8.8
*/
public final Calendar getDateAsCalendar() {
return SwiftFormatUtils.getDate2(this.date);
}
} | apache-2.0 |
111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_ipv6_ma_oper.py | 93085 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Ipv6MaIfAddrStateEnum' : _MetaInfoEnum('Ipv6MaIfAddrStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper',
{
'active':'active',
'deprecated':'deprecated',
'duplicate':'duplicate',
'inaccessible':'inaccessible',
'tentative':'tentative',
}, 'Cisco-IOS-XR-ipv6-ma-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper']),
'Ipv6MaIfLineStateEnum' : _MetaInfoEnum('Ipv6MaIfLineStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper',
{
'down':'down',
'up':'up',
'unknown':'unknown',
'error':'error',
}, 'Cisco-IOS-XR-ipv6-ma-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper']),
'Ipv6MaOperStateEnum' : _MetaInfoEnum('Ipv6MaOperStateEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper',
{
'oper-up':'oper_up',
'oper-down':'oper_down',
}, 'Cisco-IOS-XR-ipv6-ma-oper', _yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper']),
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief.LinkLocalAddress' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief.LinkLocalAddress',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfAddrStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfAddrStateEnum',
[], [],
''' State of Address
''',
'address_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-anycast', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Anycast address
''',
'is_anycast',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix Length of IPv6 Address
''',
'prefix_length',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('route-tag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route-tag of the Address
''',
'route_tag',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'link-local-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief.Address' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief.Address',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfAddrStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfAddrStateEnum',
[], [],
''' State of Address
''',
'address_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-anycast', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Anycast address
''',
'is_anycast',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix Length of IPv6 Address
''',
'prefix_length',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('route-tag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route-tag of the Address
''',
'route_tag',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' The name of the interface
''',
'interface_name',
'Cisco-IOS-XR-ipv6-ma-oper', True),
_MetaInfoClassMember('address', REFERENCE_LIST, 'Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief.Address',
[], [],
''' Address List
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('line-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfLineStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfLineStateEnum',
[], [],
''' State of Interface Line
''',
'line_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('link-local-address', REFERENCE_CLASS, 'LinkLocalAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief.LinkLocalAddress',
[], [],
''' Link Local Address
''',
'link_local_address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'brief',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs',
False,
[
_MetaInfoClassMember('brief', REFERENCE_LIST, 'Brief' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief',
[], [],
''' Brief interface IPv6 network operational
data for an interface
''',
'brief',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'briefs',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.LinkLocalAddress' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.LinkLocalAddress',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfAddrStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfAddrStateEnum',
[], [],
''' State of Address
''',
'address_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-anycast', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Anycast address
''',
'is_anycast',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix Length of IPv6 Address
''',
'prefix_length',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('route-tag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route-tag of the Address
''',
'route_tag',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'link-local-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.AccessControlList' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.AccessControlList',
False,
[
_MetaInfoClassMember('common-in-bound', ATTRIBUTE, 'str' , None, None,
[], [],
''' Common ACL applied to incoming packets
''',
'common_in_bound',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('common-out-bound', ATTRIBUTE, 'str' , None, None,
[], [],
''' Common ACL applied to outgoing packets
''',
'common_out_bound',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('in-bound', ATTRIBUTE, 'str' , None, None,
[], [],
''' ACL applied to incoming packets
''',
'in_bound',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('out-bound', ATTRIBUTE, 'str' , None, None,
[], [],
''' ACL applied to outgoing packets
''',
'out_bound',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'access-control-list',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.MultiAccessControlList' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.MultiAccessControlList',
False,
[
_MetaInfoClassMember('common', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Common ACLs
''',
'common',
'Cisco-IOS-XR-ipv6-ma-oper', False, max_elements=5),
_MetaInfoClassMember('inbound', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Inbound ACLs
''',
'inbound',
'Cisco-IOS-XR-ipv6-ma-oper', False, max_elements=5),
_MetaInfoClassMember('outbound', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Outbound ACLs
''',
'outbound',
'Cisco-IOS-XR-ipv6-ma-oper', False, max_elements=5),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'multi-access-control-list',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Rpf' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Rpf',
False,
[
_MetaInfoClassMember('allow-default-route', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Allow Default Route
''',
'allow_default_route',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('allow-self-ping', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Allow Self Ping
''',
'allow_self_ping',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable RPF config
''',
'enable',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('mode', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' RPF Mode (loose/strict)
''',
'mode',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'rpf',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa.Input' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa.Input',
False,
[
_MetaInfoClassMember('destination', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable destination accouting
''',
'destination',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Enable BGP PA for ingress/egress
''',
'enable',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('source', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable source accouting
''',
'source',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'input',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa.Output' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa.Output',
False,
[
_MetaInfoClassMember('destination', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable destination accouting
''',
'destination',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Enable BGP PA for ingress/egress
''',
'enable',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('source', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable source accouting
''',
'source',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'output',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa',
False,
[
_MetaInfoClassMember('input', REFERENCE_CLASS, 'Input' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa.Input',
[], [],
''' BGP PA input config
''',
'input',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('output', REFERENCE_CLASS, 'Output' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa.Output',
[], [],
''' BGP PA output config
''',
'output',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'bgp-pa',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Utime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Utime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.IdbUtime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.IdbUtime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'idb-utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.CapsUtime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.CapsUtime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'caps-utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.FwdEnUtime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.FwdEnUtime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'fwd-en-utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.FwdDisUtime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.FwdDisUtime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'fwd-dis-utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.MulticastGroup' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.MulticastGroup',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address of Multicast Group
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'multicast-group',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Address' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Address',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfAddrStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfAddrStateEnum',
[], [],
''' State of Address
''',
'address_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-anycast', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Anycast address
''',
'is_anycast',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix Length of IPv6 Address
''',
'prefix_length',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('route-tag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route-tag of the Address
''',
'route_tag',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.ClientMulticastGroup' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.ClientMulticastGroup',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address of Multicast Group
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'client-multicast-group',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' The name of the interface
''',
'interface_name',
'Cisco-IOS-XR-ipv6-ma-oper', True),
_MetaInfoClassMember('access-control-list', REFERENCE_CLASS, 'AccessControlList' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.AccessControlList',
[], [],
''' IPv6 Access Control List
''',
'access_control_list',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address', REFERENCE_LIST, 'Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Address',
[], [],
''' Address List
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('bgp-pa', REFERENCE_CLASS, 'BgpPa' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa',
[], [],
''' BGP PA config on the interface
''',
'bgp_pa',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('caps-utime', REFERENCE_CLASS, 'CapsUtime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.CapsUtime',
[], [],
''' CAPS Add Time
''',
'caps_utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('client-multicast-group', REFERENCE_LIST, 'ClientMulticastGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.ClientMulticastGroup',
[], [],
''' IPv6 Client Multicast Group
''',
'client_multicast_group',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('flow-tag-dst', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is BGP Flow Tag Destination is enable
''',
'flow_tag_dst',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('flow-tag-src', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is BGP Flow Tag Source is enable
''',
'flow_tag_src',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('fwd-dis-utime', REFERENCE_CLASS, 'FwdDisUtime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.FwdDisUtime',
[], [],
''' FWD DISABLE Time
''',
'fwd_dis_utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('fwd-en-utime', REFERENCE_CLASS, 'FwdEnUtime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.FwdEnUtime',
[], [],
''' FWD ENABLE Time
''',
'fwd_en_utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('idb-utime', REFERENCE_CLASS, 'IdbUtime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.IdbUtime',
[], [],
''' IDB Create Time
''',
'idb_utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-icmp-unreach-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' ICMP unreach Enable
''',
'is_icmp_unreach_enabled',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('line-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfLineStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfLineStateEnum',
[], [],
''' State of Interface Line
''',
'line_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('link-local-address', REFERENCE_CLASS, 'LinkLocalAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.LinkLocalAddress',
[], [],
''' Link Local Address
''',
'link_local_address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('mlacp-active', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is mLACP state Active (valid if RG ID exists)
''',
'mlacp_active',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('mtu', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' IPv6 MTU
''',
'mtu',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('multi-access-control-list', REFERENCE_CLASS, 'MultiAccessControlList' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.MultiAccessControlList',
[], [],
''' Multi IPv6 Access Control List
''',
'multi_access_control_list',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('multicast-group', REFERENCE_LIST, 'MulticastGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.MulticastGroup',
[], [],
''' IPv6 Multicast Group
''',
'multicast_group',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('operation-state', REFERENCE_ENUM_CLASS, 'Ipv6MaOperStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaOperStateEnum',
[], [],
''' IPv6 Operation State
''',
'operation_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('rg-id-exists', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Does ICCP RG ID exist on the interface?
''',
'rg_id_exists',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('rpf', REFERENCE_CLASS, 'Rpf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Rpf',
[], [],
''' RPF config on the interface
''',
'rpf',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('utime', REFERENCE_CLASS, 'Utime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Utime',
[], [],
''' Address Publish Time
''',
'utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'global-detail',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails',
False,
[
_MetaInfoClassMember('global-detail', REFERENCE_LIST, 'GlobalDetail' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail',
[], [],
''' Detail interface IPv6 network operational
data for an interface
''',
'global_detail',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'global-details',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief.LinkLocalAddress' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief.LinkLocalAddress',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfAddrStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfAddrStateEnum',
[], [],
''' State of Address
''',
'address_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-anycast', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Anycast address
''',
'is_anycast',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix Length of IPv6 Address
''',
'prefix_length',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('route-tag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route-tag of the Address
''',
'route_tag',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'link-local-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief.Address' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief.Address',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfAddrStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfAddrStateEnum',
[], [],
''' State of Address
''',
'address_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-anycast', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Anycast address
''',
'is_anycast',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix Length of IPv6 Address
''',
'prefix_length',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('route-tag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route-tag of the Address
''',
'route_tag',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' The name of the interface
''',
'interface_name',
'Cisco-IOS-XR-ipv6-ma-oper', True),
_MetaInfoClassMember('address', REFERENCE_LIST, 'Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief.Address',
[], [],
''' Address List
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('line-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfLineStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfLineStateEnum',
[], [],
''' State of Interface Line
''',
'line_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('link-local-address', REFERENCE_CLASS, 'LinkLocalAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief.LinkLocalAddress',
[], [],
''' Link Local Address
''',
'link_local_address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'global-brief',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs',
False,
[
_MetaInfoClassMember('global-brief', REFERENCE_LIST, 'GlobalBrief' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief',
[], [],
''' Brief interface IPv6 network operational
data for an interface
''',
'global_brief',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'global-briefs',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.LinkLocalAddress' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.LinkLocalAddress',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfAddrStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfAddrStateEnum',
[], [],
''' State of Address
''',
'address_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-anycast', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Anycast address
''',
'is_anycast',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix Length of IPv6 Address
''',
'prefix_length',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('route-tag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route-tag of the Address
''',
'route_tag',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'link-local-address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.AccessControlList' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.AccessControlList',
False,
[
_MetaInfoClassMember('common-in-bound', ATTRIBUTE, 'str' , None, None,
[], [],
''' Common ACL applied to incoming packets
''',
'common_in_bound',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('common-out-bound', ATTRIBUTE, 'str' , None, None,
[], [],
''' Common ACL applied to outgoing packets
''',
'common_out_bound',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('in-bound', ATTRIBUTE, 'str' , None, None,
[], [],
''' ACL applied to incoming packets
''',
'in_bound',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('out-bound', ATTRIBUTE, 'str' , None, None,
[], [],
''' ACL applied to outgoing packets
''',
'out_bound',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'access-control-list',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.MultiAccessControlList' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.MultiAccessControlList',
False,
[
_MetaInfoClassMember('common', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Common ACLs
''',
'common',
'Cisco-IOS-XR-ipv6-ma-oper', False, max_elements=5),
_MetaInfoClassMember('inbound', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Inbound ACLs
''',
'inbound',
'Cisco-IOS-XR-ipv6-ma-oper', False, max_elements=5),
_MetaInfoClassMember('outbound', REFERENCE_LEAFLIST, 'str' , None, None,
[], [],
''' Outbound ACLs
''',
'outbound',
'Cisco-IOS-XR-ipv6-ma-oper', False, max_elements=5),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'multi-access-control-list',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Rpf' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Rpf',
False,
[
_MetaInfoClassMember('allow-default-route', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Allow Default Route
''',
'allow_default_route',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('allow-self-ping', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Allow Self Ping
''',
'allow_self_ping',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable RPF config
''',
'enable',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('mode', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' RPF Mode (loose/strict)
''',
'mode',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'rpf',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa.Input' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa.Input',
False,
[
_MetaInfoClassMember('destination', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable destination accouting
''',
'destination',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Enable BGP PA for ingress/egress
''',
'enable',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('source', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable source accouting
''',
'source',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'input',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa.Output' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa.Output',
False,
[
_MetaInfoClassMember('destination', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable destination accouting
''',
'destination',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Enable BGP PA for ingress/egress
''',
'enable',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('source', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Enable source accouting
''',
'source',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'output',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa',
False,
[
_MetaInfoClassMember('input', REFERENCE_CLASS, 'Input' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa.Input',
[], [],
''' BGP PA input config
''',
'input',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('output', REFERENCE_CLASS, 'Output' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa.Output',
[], [],
''' BGP PA output config
''',
'output',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'bgp-pa',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Utime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Utime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.IdbUtime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.IdbUtime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'idb-utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.CapsUtime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.CapsUtime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'caps-utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.FwdEnUtime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.FwdEnUtime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'fwd-en-utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.FwdDisUtime' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.FwdDisUtime',
False,
[
],
'Cisco-IOS-XR-ipv6-ma-oper',
'fwd-dis-utime',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.MulticastGroup' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.MulticastGroup',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address of Multicast Group
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'multicast-group',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Address' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Address',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfAddrStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfAddrStateEnum',
[], [],
''' State of Address
''',
'address_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-anycast', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Anycast address
''',
'is_anycast',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix Length of IPv6 Address
''',
'prefix_length',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('route-tag', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Route-tag of the Address
''',
'route_tag',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'address',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.ClientMulticastGroup' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.ClientMulticastGroup',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 Address of Multicast Group
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'client-multicast-group',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' The name of the interface
''',
'interface_name',
'Cisco-IOS-XR-ipv6-ma-oper', True),
_MetaInfoClassMember('access-control-list', REFERENCE_CLASS, 'AccessControlList' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.AccessControlList',
[], [],
''' IPv6 Access Control List
''',
'access_control_list',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('address', REFERENCE_LIST, 'Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Address',
[], [],
''' Address List
''',
'address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('bgp-pa', REFERENCE_CLASS, 'BgpPa' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa',
[], [],
''' BGP PA config on the interface
''',
'bgp_pa',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('caps-utime', REFERENCE_CLASS, 'CapsUtime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.CapsUtime',
[], [],
''' CAPS Add Time
''',
'caps_utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('client-multicast-group', REFERENCE_LIST, 'ClientMulticastGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.ClientMulticastGroup',
[], [],
''' IPv6 Client Multicast Group
''',
'client_multicast_group',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('flow-tag-dst', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is BGP Flow Tag Destination is enable
''',
'flow_tag_dst',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('flow-tag-src', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is BGP Flow Tag Source is enable
''',
'flow_tag_src',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('fwd-dis-utime', REFERENCE_CLASS, 'FwdDisUtime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.FwdDisUtime',
[], [],
''' FWD DISABLE Time
''',
'fwd_dis_utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('fwd-en-utime', REFERENCE_CLASS, 'FwdEnUtime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.FwdEnUtime',
[], [],
''' FWD ENABLE Time
''',
'fwd_en_utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('idb-utime', REFERENCE_CLASS, 'IdbUtime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.IdbUtime',
[], [],
''' IDB Create Time
''',
'idb_utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('is-icmp-unreach-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' ICMP unreach Enable
''',
'is_icmp_unreach_enabled',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('line-state', REFERENCE_ENUM_CLASS, 'Ipv6MaIfLineStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaIfLineStateEnum',
[], [],
''' State of Interface Line
''',
'line_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('link-local-address', REFERENCE_CLASS, 'LinkLocalAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.LinkLocalAddress',
[], [],
''' Link Local Address
''',
'link_local_address',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('mlacp-active', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is mLACP state Active (valid if RG ID exists)
''',
'mlacp_active',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('mtu', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' IPv6 MTU
''',
'mtu',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('multi-access-control-list', REFERENCE_CLASS, 'MultiAccessControlList' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.MultiAccessControlList',
[], [],
''' Multi IPv6 Access Control List
''',
'multi_access_control_list',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('multicast-group', REFERENCE_LIST, 'MulticastGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.MulticastGroup',
[], [],
''' IPv6 Multicast Group
''',
'multicast_group',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('operation-state', REFERENCE_ENUM_CLASS, 'Ipv6MaOperStateEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6MaOperStateEnum',
[], [],
''' IPv6 Operation State
''',
'operation_state',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('rg-id-exists', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Does ICCP RG ID exist on the interface?
''',
'rg_id_exists',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('rpf', REFERENCE_CLASS, 'Rpf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Rpf',
[], [],
''' RPF config on the interface
''',
'rpf',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('utime', REFERENCE_CLASS, 'Utime' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Utime',
[], [],
''' Address Publish Time
''',
'utime',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'detail',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details',
False,
[
_MetaInfoClassMember('detail', REFERENCE_LIST, 'Detail' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail',
[], [],
''' Detail interface IPv6 network operational
data for an interface
''',
'detail',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'details',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf',
False,
[
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' The VRF name
''',
'vrf_name',
'Cisco-IOS-XR-ipv6-ma-oper', True),
_MetaInfoClassMember('briefs', REFERENCE_CLASS, 'Briefs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs',
[], [],
''' Brief interface IPv6 network operational
data for a node
''',
'briefs',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('details', REFERENCE_CLASS, 'Details' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details',
[], [],
''' Detail interface IPv4 network operational
data for a node
''',
'details',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('global-briefs', REFERENCE_CLASS, 'GlobalBriefs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs',
[], [],
''' Brief interface IPv6 network operational
data from global data
''',
'global_briefs',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('global-details', REFERENCE_CLASS, 'GlobalDetails' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails',
[], [],
''' Detail interface IPv4 network operational
data for global data
''',
'global_details',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Vrfs' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Vrfs',
False,
[
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf',
[], [],
''' VRF ID of an interface belong to
''',
'vrf',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'vrfs',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Summary.IfUpUp' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Summary.IfUpUp',
False,
[
_MetaInfoClassMember('ip-assigned', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of interfaces with explicit addresses
''',
'ip_assigned',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('ip-unassigned', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unassigned interfaces without explicit
address
''',
'ip_unassigned',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('ip-unnumbered', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unnumbered interfaces with explicit
addresses
''',
'ip_unnumbered',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'if-up-up',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Summary.IfUpDown' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Summary.IfUpDown',
False,
[
_MetaInfoClassMember('ip-assigned', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of interfaces with explicit addresses
''',
'ip_assigned',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('ip-unassigned', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unassigned interfaces without explicit
address
''',
'ip_unassigned',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('ip-unnumbered', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unnumbered interfaces with explicit
addresses
''',
'ip_unnumbered',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'if-up-down',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Summary.IfDownDown' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Summary.IfDownDown',
False,
[
_MetaInfoClassMember('ip-assigned', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of interfaces with explicit addresses
''',
'ip_assigned',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('ip-unassigned', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unassigned interfaces without explicit
address
''',
'ip_unassigned',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('ip-unnumbered', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unnumbered interfaces with explicit
addresses
''',
'ip_unnumbered',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'if-down-down',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Summary.IfShutdownDown' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Summary.IfShutdownDown',
False,
[
_MetaInfoClassMember('ip-assigned', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of interfaces with explicit addresses
''',
'ip_assigned',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('ip-unassigned', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unassigned interfaces without explicit
address
''',
'ip_unassigned',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('ip-unnumbered', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unnumbered interfaces with explicit
addresses
''',
'ip_unnumbered',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'if-shutdown-down',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData.Summary' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData.Summary',
False,
[
_MetaInfoClassMember('if-down-down', REFERENCE_CLASS, 'IfDownDown' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Summary.IfDownDown',
[], [],
''' Number of interfaces (down,down)
''',
'if_down_down',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('if-shutdown-down', REFERENCE_CLASS, 'IfShutdownDown' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Summary.IfShutdownDown',
[], [],
''' Number of interfaces (shutdown,down)
''',
'if_shutdown_down',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('if-up-down', REFERENCE_CLASS, 'IfUpDown' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Summary.IfUpDown',
[], [],
''' Number of interfaces (up,down)
''',
'if_up_down',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('if-up-down-basecaps-up', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of interfaces (up,down) with basecaps up
''',
'if_up_down_basecaps_up',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('if-up-up', REFERENCE_CLASS, 'IfUpUp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Summary.IfUpUp',
[], [],
''' Number of interfaces (up,up)
''',
'if_up_up',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'summary',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node.InterfaceData' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node.InterfaceData',
False,
[
_MetaInfoClassMember('summary', REFERENCE_CLASS, 'Summary' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Summary',
[], [],
''' Summary of IPv6 network operational interface
data on a node
''',
'summary',
'Cisco-IOS-XR-ipv6-ma-oper', False),
_MetaInfoClassMember('vrfs', REFERENCE_CLASS, 'Vrfs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData.Vrfs',
[], [],
''' VRF specific IPv6 network operational
interface data
''',
'vrfs',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'interface-data',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes.Node' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], [b'([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' The node name
''',
'node_name',
'Cisco-IOS-XR-ipv6-ma-oper', True),
_MetaInfoClassMember('interface-data', REFERENCE_CLASS, 'InterfaceData' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node.InterfaceData',
[], [],
''' IPv6 network operational interface data
''',
'interface_data',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network.Nodes' : {
'meta_info' : _MetaInfoClass('Ipv6Network.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes.Node',
[], [],
''' IPv6 network operational data for a particular
node
''',
'node',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
'Ipv6Network' : {
'meta_info' : _MetaInfoClass('Ipv6Network',
False,
[
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper', 'Ipv6Network.Nodes',
[], [],
''' Node-specific IPv6 network operational data
''',
'nodes',
'Cisco-IOS-XR-ipv6-ma-oper', False),
],
'Cisco-IOS-XR-ipv6-ma-oper',
'ipv6-network',
_yang_ns._namespaces['Cisco-IOS-XR-ipv6-ma-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv6_ma_oper'
),
},
}
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief.LinkLocalAddress']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief.Address']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs.Brief']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa.Input']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa.Output']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.LinkLocalAddress']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.AccessControlList']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.MultiAccessControlList']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Rpf']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.BgpPa']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Utime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.IdbUtime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.CapsUtime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.FwdEnUtime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.FwdDisUtime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.MulticastGroup']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.Address']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail.ClientMulticastGroup']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails.GlobalDetail']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief.LinkLocalAddress']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief.Address']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs.GlobalBrief']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa.Input']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa.Output']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.LinkLocalAddress']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.AccessControlList']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.MultiAccessControlList']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Rpf']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.BgpPa']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Utime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.IdbUtime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.CapsUtime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.FwdEnUtime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.FwdDisUtime']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.MulticastGroup']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.Address']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail.ClientMulticastGroup']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details.Detail']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Briefs']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalDetails']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.GlobalBriefs']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf.Details']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs.Vrf']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Summary.IfUpUp']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Summary']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Summary.IfUpDown']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Summary']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Summary.IfDownDown']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Summary']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Summary.IfShutdownDown']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Summary']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Vrfs']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData.Summary']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node.InterfaceData']['meta_info']
_meta_table['Ipv6Network.Nodes.Node.InterfaceData']['meta_info'].parent =_meta_table['Ipv6Network.Nodes.Node']['meta_info']
_meta_table['Ipv6Network.Nodes.Node']['meta_info'].parent =_meta_table['Ipv6Network.Nodes']['meta_info']
_meta_table['Ipv6Network.Nodes']['meta_info'].parent =_meta_table['Ipv6Network']['meta_info']
| apache-2.0 |
linktlh/Toontown-journey | toontown/battle/MovieCamera.py | 38797 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from BattleBase import *
from BattleProps import *
from toontown.toonbase.ToontownBattleGlobals import *
from SuitBattleGlobals import *
from direct.directnotify import DirectNotifyGlobal
import random
import MovieUtil
notify = DirectNotifyGlobal.directNotify.newCategory('MovieCamera')
def chooseHealShot(heals, attackDuration):
isUber = 0
for heal in heals:
if heal['level'] == 6 and not heal.get('petId'):
isUber = 1
if isUber:
openShot = chooseHealOpenShot(heals, attackDuration, isUber)
openDuration = openShot.getDuration()
openName = openShot.getName()
closeShot = chooseHealCloseShot(heals, openDuration, openName, attackDuration * 3, isUber)
track = Sequence(closeShot)
else:
openShot = chooseHealOpenShot(heals, attackDuration, isUber)
openDuration = openShot.getDuration()
openName = openShot.getName()
closeShot = chooseHealCloseShot(heals, openDuration, openName, attackDuration, isUber)
track = Sequence(openShot, closeShot)
return track
def chooseHealOpenShot(heals, attackDuration, isUber = 0):
numHeals = len(heals)
av = None
duration = 2.8
if isUber:
duration = 5.0
shotChoices = [toonGroupShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseHealMidShot(heals, attackDuration, isUber = 0):
numHeals = len(heals)
av = None
duration = 2.1
if isUber:
duration = 2.1
shotChoices = [toonGroupHighShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseHealCloseShot(heals, openDuration, openName, attackDuration, isUber = 0):
av = None
duration = attackDuration - openDuration
shotChoices = [toonGroupShot]
if isUber:
shotChoices = [allGroupLowShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseTrapShot(traps, attackDuration, enterDuration = 0, exitDuration = 0):
enterShot = chooseNPCEnterShot(traps, enterDuration)
openShot = chooseTrapOpenShot(traps, attackDuration)
openDuration = openShot.getDuration()
openName = openShot.getName()
closeShot = chooseTrapCloseShot(traps, openDuration, openName, attackDuration)
exitShot = chooseNPCExitShot(traps, exitDuration)
track = Sequence(enterShot, openShot, closeShot, exitShot)
return track
def chooseTrapOpenShot(traps, attackDuration):
numTraps = len(traps)
av = None
duration = 3.0
shotChoices = [allGroupLowShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseTrapCloseShot(traps, openDuration, openName, attackDuration):
av = None
duration = attackDuration - openDuration
shotChoices = [allGroupLowShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseLureShot(lures, attackDuration, enterDuration = 0.0, exitDuration = 0.0):
enterShot = chooseNPCEnterShot(lures, enterDuration)
openShot = chooseLureOpenShot(lures, attackDuration)
openDuration = openShot.getDuration()
openName = openShot.getName()
closeShot = chooseLureCloseShot(lures, openDuration, openName, attackDuration)
exitShot = chooseNPCExitShot(lures, exitDuration)
track = Sequence(enterShot, openShot, closeShot, exitShot)
return track
def chooseLureOpenShot(lures, attackDuration):
numLures = len(lures)
av = None
duration = 3.0
shotChoices = [allGroupLowShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseLureCloseShot(lures, openDuration, openName, attackDuration):
av = None
duration = attackDuration - openDuration
hasTrainTrackTrap = False
battle = lures[0]['battle']
for suit in battle.suits:
if hasattr(suit, 'battleTrap') and suit.battleTrap == UBER_GAG_LEVEL_INDEX:
hasTrainTrackTrap = True
if hasTrainTrackTrap:
shotChoices = [avatarLureTrainTrackShot]
av = lures[0]['toon']
else:
shotChoices = [allGroupLowShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseSoundShot(sounds, targets, attackDuration, enterDuration = 0.0, exitDuration = 0.0):
enterShot = chooseNPCEnterShot(sounds, enterDuration)
openShot = chooseSoundOpenShot(sounds, targets, attackDuration)
openDuration = openShot.getDuration()
openName = openShot.getName()
closeShot = chooseSoundCloseShot(sounds, targets, openDuration, openName, attackDuration)
exitShot = chooseNPCExitShot(sounds, exitDuration)
track = Sequence(enterShot, openShot, closeShot, exitShot)
return track
def chooseSoundOpenShot(sounds, targets, attackDuration):
duration = 3.1
isUber = 0
for sound in sounds:
if sound['level'] == 6:
isUber = 1
duration = 5.0
numSounds = len(sounds)
av = None
if numSounds == 1:
av = sounds[0]['toon']
if isUber:
shotChoices = [avatarCloseUpThreeQuarterRightShotWide, allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
shotChoices = [avatarCloseUpThreeQuarterRightShot, allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
elif numSounds >= 2 and numSounds <= 4:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of sounds: %s' % numSounds)
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseSoundCloseShot(sounds, targets, openDuration, openName, attackDuration):
numSuits = len(targets)
av = None
duration = attackDuration - openDuration
if numSuits == 1:
av = targets[0]['suit']
shotChoices = [avatarCloseUpThrowShot,
avatarCloseUpThreeQuarterLeftShot,
allGroupLowShot,
suitGroupThreeQuarterLeftBehindShot]
elif numSuits >= 2 and numSuits <= 4:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of suits: %s' % numSuits)
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseThrowShot(throws, suitThrowsDict, attackDuration):
openShot = chooseThrowOpenShot(throws, suitThrowsDict, attackDuration)
openDuration = openShot.getDuration()
openName = openShot.getName()
closeShot = chooseThrowCloseShot(throws, suitThrowsDict, openDuration, openName, attackDuration)
track = Sequence(openShot, closeShot)
return track
def chooseThrowOpenShot(throws, suitThrowsDict, attackDuration):
numThrows = len(throws)
av = None
duration = 3.0
if numThrows == 1:
av = throws[0]['toon']
shotChoices = [avatarCloseUpThrowShot,
avatarCloseUpThreeQuarterRightShot,
avatarBehindShot,
allGroupLowShot,
suitGroupThreeQuarterLeftBehindShot]
elif numThrows >= 2 and numThrows <= 4:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of throws: %s' % numThrows)
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseThrowCloseShot(throws, suitThrowsDict, openDuration, openName, attackDuration):
numSuits = len(suitThrowsDict)
av = None
duration = attackDuration - openDuration
if numSuits == 1:
av = base.cr.doId2do[suitThrowsDict.keys()[0]]
shotChoices = [avatarCloseUpThrowShot,
avatarCloseUpThreeQuarterLeftShot,
allGroupLowShot,
suitGroupThreeQuarterLeftBehindShot]
elif numSuits >= 2 and numSuits <= 4 or numSuits == 0:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of suits: %s' % numSuits)
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseSquirtShot(squirts, suitSquirtsDict, attackDuration):
openShot = chooseSquirtOpenShot(squirts, suitSquirtsDict, attackDuration)
openDuration = openShot.getDuration()
openName = openShot.getName()
closeShot = chooseSquirtCloseShot(squirts, suitSquirtsDict, openDuration, openName, attackDuration)
track = Sequence(openShot, closeShot)
return track
def chooseSquirtOpenShot(squirts, suitSquirtsDict, attackDuration):
numSquirts = len(squirts)
av = None
duration = 3.0
if numSquirts == 1:
av = squirts[0]['toon']
shotChoices = [avatarCloseUpThrowShot,
avatarCloseUpThreeQuarterRightShot,
avatarBehindShot,
allGroupLowShot,
suitGroupThreeQuarterLeftBehindShot]
elif numSquirts >= 2 and numSquirts <= 4:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of squirts: %s' % numSquirts)
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseSquirtCloseShot(squirts, suitSquirtsDict, openDuration, openName, attackDuration):
numSuits = len(suitSquirtsDict)
av = None
duration = attackDuration - openDuration
if numSuits == 1:
av = base.cr.doId2do[suitSquirtsDict.keys()[0]]
shotChoices = [avatarCloseUpThrowShot,
avatarCloseUpThreeQuarterLeftShot,
allGroupLowShot,
suitGroupThreeQuarterLeftBehindShot]
elif numSuits >= 2 and numSuits <= 4:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of suits: %s' % numSuits)
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseDropShot(drops, suitDropsDict, attackDuration, enterDuration = 0.0, exitDuration = 0.0):
enterShot = chooseNPCEnterShot(drops, enterDuration)
openShot = chooseDropOpenShot(drops, suitDropsDict, attackDuration)
openDuration = openShot.getDuration()
openName = openShot.getName()
closeShot = chooseDropCloseShot(drops, suitDropsDict, openDuration, openName, attackDuration)
exitShot = chooseNPCExitShot(drops, exitDuration)
track = Sequence(enterShot, openShot, closeShot, exitShot)
return track
def chooseDropOpenShot(drops, suitDropsDict, attackDuration):
numDrops = len(drops)
av = None
duration = 3.0
if numDrops == 1:
av = drops[0]['toon']
shotChoices = [avatarCloseUpThrowShot,
avatarCloseUpThreeQuarterRightShot,
avatarBehindShot,
allGroupLowShot,
suitGroupThreeQuarterLeftBehindShot]
elif numDrops >= 2 and numDrops <= 4 or numDrops == 0:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of drops: %s' % numDrops)
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseDropCloseShot(drops, suitDropsDict, openDuration, openName, attackDuration):
numSuits = len(suitDropsDict)
av = None
duration = attackDuration - openDuration
if numSuits == 1:
av = base.cr.doId2do[suitDropsDict.keys()[0]]
shotChoices = [avatarCloseUpThrowShot,
avatarCloseUpThreeQuarterLeftShot,
allGroupLowShot,
suitGroupThreeQuarterLeftBehindShot]
elif numSuits >= 2 and numSuits <= 4 or numSuits == 0:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of suits: %s' % numSuits)
choice = random.choice(shotChoices)
track = choice(av, duration)
return track
def chooseNPCEnterShot(enters, entersDuration):
av = None
duration = entersDuration
shotChoices = [toonGroupShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseNPCExitShot(exits, exitsDuration):
av = None
duration = exitsDuration
shotChoices = [toonGroupShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseSuitShot(attack, attackDuration):
duration = attackDuration
if duration < 0:
duration = 1e-06
diedTrack = None
groupStatus = attack['group']
target = attack['target']
if groupStatus == ATK_TGT_SINGLE:
toon = target['toon']
died = attack['target']['died']
if died != 0:
pbpText = attack['playByPlayText']
diedText = toon.getName() + ' was defeated!'
diedTextList = [diedText]
diedTrack = pbpText.getToonsDiedInterval(diedTextList, duration)
elif groupStatus == ATK_TGT_GROUP:
deadToons = []
targetDicts = attack['target']
for targetDict in targetDicts:
died = targetDict['died']
if died != 0:
deadToons.append(targetDict['toon'])
if len(deadToons) > 0:
pbpText = attack['playByPlayText']
diedTextList = []
for toon in deadToons:
pbpText = attack['playByPlayText']
diedTextList.append(toon.getName() + ' was defeated!')
diedTrack = pbpText.getToonsDiedInterval(diedTextList, duration)
suit = attack['suit']
name = attack['id']
battle = attack['battle']
camTrack = Sequence()
def defaultCamera(attack = attack, attackDuration = attackDuration, openShotDuration = 3.5, target = target):
if attack['group'] == ATK_TGT_GROUP:
return randomGroupAttackCam(attack['suit'], target, attack['battle'], attackDuration, openShotDuration)
else:
return randomAttackCam(attack['suit'], target['toon'], attack['battle'], attackDuration, openShotDuration, 'suit')
if name == AUDIT:
camTrack.append(defaultCamera())
elif name == BITE:
camTrack.append(defaultCamera(openShotDuration=2.8))
elif name == BOUNCE_CHECK:
camTrack.append(defaultCamera())
elif name == BRAIN_STORM:
camTrack.append(defaultCamera(openShotDuration=2.4))
elif name == BUZZ_WORD:
camTrack.append(defaultCamera(openShotDuration=4.7))
elif name == CALCULATE:
camTrack.append(defaultCamera())
elif name == CANNED:
camTrack.append(defaultCamera(openShotDuration=2.9))
elif name == CHOMP:
camTrack.append(defaultCamera(openShotDuration=2.8))
elif name == CLIPON_TIE:
camTrack.append(defaultCamera(openShotDuration=3.3))
elif name == CRUNCH:
camTrack.append(defaultCamera(openShotDuration=3.4))
elif name == DEMOTION:
camTrack.append(defaultCamera(openShotDuration=1.7))
elif name == DOUBLE_TALK:
camTrack.append(defaultCamera(openShotDuration=3.9))
elif name == EVICTION_NOTICE:
camTrack.append(defaultCamera(openShotDuration=3.2))
elif name == EVIL_EYE:
camTrack.append(defaultCamera(openShotDuration=2.7))
elif name == FILIBUSTER:
camTrack.append(defaultCamera(openShotDuration=2.7))
elif name == FILL_WITH_LEAD:
camTrack.append(defaultCamera(openShotDuration=3.2))
elif name == FINGER_WAG:
camTrack.append(defaultCamera(openShotDuration=2.3))
elif name == FIRED:
camTrack.append(defaultCamera(openShotDuration=1.7))
elif name == FOUNTAIN_PEN:
camTrack.append(defaultCamera(openShotDuration=2.6))
elif name == FREEZE_ASSETS:
camTrack.append(defaultCamera(openShotDuration=2.5))
elif name == HALF_WINDSOR:
camTrack.append(defaultCamera(openShotDuration=2.8))
elif name == HEAD_SHRINK:
camTrack.append(defaultCamera(openShotDuration=1.3))
elif name == GLOWER_POWER:
camTrack.append(defaultCamera(openShotDuration=1.4))
elif name == GUILT_TRIP:
camTrack.append(defaultCamera(openShotDuration=0.9))
elif name == HANG_UP:
camTrack.append(defaultCamera(openShotDuration=5.1))
elif name == HOT_AIR:
camTrack.append(defaultCamera(openShotDuration=2.5))
elif name == JARGON:
camTrack.append(defaultCamera())
elif name == LEGALESE:
camTrack.append(defaultCamera(openShotDuration=1.5))
elif name == LIQUIDATE:
camTrack.append(defaultCamera(openShotDuration=2.5))
elif name == MARKET_CRASH:
camTrack.append(defaultCamera(openShotDuration=2.9))
elif name == MUMBO_JUMBO:
camTrack.append(defaultCamera(openShotDuration=2.8))
elif name == PARADIGM_SHIFT:
camTrack.append(defaultCamera(openShotDuration=1.6))
elif name == PECKING_ORDER:
camTrack.append(defaultCamera(openShotDuration=2.8))
elif name == PLAY_HARDBALL:
camTrack.append(defaultCamera(openShotDuration=2.3))
elif name == PICK_POCKET:
camTrack.append(allGroupLowShot(suit, 2.7))
elif name == PINK_SLIP:
camTrack.append(defaultCamera(openShotDuration=2.8))
elif name == POUND_KEY:
camTrack.append(defaultCamera(openShotDuration=2.8))
elif name == POWER_TIE:
camTrack.append(defaultCamera(openShotDuration=2.4))
elif name == POWER_TRIP:
camTrack.append(defaultCamera(openShotDuration=1.1))
elif name == QUAKE:
shakeIntensity = 5.15
quake = 1
camTrack.append(suitCameraShakeShot(suit, attackDuration, shakeIntensity, quake))
elif name == RAZZLE_DAZZLE:
camTrack.append(defaultCamera(openShotDuration=2.2))
elif name == RED_TAPE:
camTrack.append(defaultCamera(openShotDuration=3.5))
elif name == RE_ORG:
camTrack.append(defaultCamera(openShotDuration=1.1))
elif name == RESTRAINING_ORDER:
camTrack.append(defaultCamera(openShotDuration=2.8))
elif name == ROLODEX:
camTrack.append(defaultCamera())
elif name == RUBBER_STAMP:
camTrack.append(defaultCamera(openShotDuration=3.2))
elif name == RUB_OUT:
camTrack.append(defaultCamera(openShotDuration=2.2))
elif name == SACKED:
camTrack.append(defaultCamera(openShotDuration=2.9))
elif name == SCHMOOZE:
camTrack.append(defaultCamera(openShotDuration=2.8))
elif name == SHAKE:
shakeIntensity = 1.75
camTrack.append(suitCameraShakeShot(suit, attackDuration, shakeIntensity))
elif name == SHRED:
camTrack.append(defaultCamera(openShotDuration=4.1))
elif name == SPIN:
camTrack.append(defaultCamera(openShotDuration=1.7))
elif name == SYNERGY:
camTrack.append(defaultCamera(openShotDuration=1.7))
elif name == TABULATE:
camTrack.append(defaultCamera())
elif name == TEE_OFF:
camTrack.append(defaultCamera(openShotDuration=4.5))
elif name == TREMOR:
shakeIntensity = 0.25
camTrack.append(suitCameraShakeShot(suit, attackDuration, shakeIntensity))
elif name == WATERCOOLER:
camTrack.append(defaultCamera())
elif name == WITHDRAWAL:
camTrack.append(defaultCamera(openShotDuration=1.2))
elif name == WRITE_OFF:
camTrack.append(defaultCamera())
else:
notify.warning('unknown attack id in chooseSuitShot: %d using default cam' % name)
camTrack.append(defaultCamera())
pbpText = attack['playByPlayText']
displayName = TTLocalizer.SuitAttackNames[attack['name']]
pbpTrack = pbpText.getShowInterval(displayName, 3.5)
track = Parallel(camTrack, pbpTrack)
if diedTrack == None:
return track
pbpTrackDied = Sequence(pbpTrack, diedTrack)
mtrack = Parallel(track, pbpTrackDied)
return mtrack
def chooseSuitCloseShot(attack, openDuration, openName, attackDuration):
av = None
duration = attackDuration - openDuration
if duration < 0:
duration = 1e-06
groupStatus = attack['group']
diedTrack = None
if groupStatus == ATK_TGT_SINGLE:
av = attack['target']['toon']
shotChoices = [avatarCloseUpThreeQuarterRightShot, suitGroupThreeQuarterLeftBehindShot]
died = attack['target']['died']
if died != 0:
pbpText = attack['playByPlayText']
diedText = av.getName() + ' was defeated!'
diedTextList = [diedText]
diedTrack = pbpText.getToonsDiedInterval(diedTextList, duration)
elif groupStatus == ATK_TGT_GROUP:
av = None
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
deadToons = []
targetDicts = attack['target']
for targetDict in targetDicts:
died = targetDict['died']
if died != 0:
deadToons.append(targetDict['toon'])
if len(deadToons) > 0:
pbpText = attack['playByPlayText']
diedTextList = []
for toon in deadToons:
pbpText = attack['playByPlayText']
diedTextList.append(toon.getName() + ' was defeated!')
diedTrack = pbpText.getToonsDiedInterval(diedTextList, duration)
else:
notify.error('Bad groupStatus: %s' % groupStatus)
track = apply(random.choice(shotChoices), [av, duration])
if diedTrack == None:
return track
else:
mtrack = Parallel(track, diedTrack)
return mtrack
return
def makeShot(x, y, z, h, p, r, duration, other = None, name = 'makeShot'):
if other:
return heldRelativeShot(other, x, y, z, h, p, r, duration, name)
else:
return heldShot(x, y, z, h, p, r, duration, name)
def focusShot(x, y, z, duration, target, other = None, splitFocusPoint = None, name = 'focusShot'):
track = Sequence()
if other:
track.append(Func(base.camera.setPos, other, Point3(x, y, z)))
else:
track.append(Func(base.camera.setPos, Point3(x, y, z)))
if splitFocusPoint:
track.append(Func(focusCameraBetweenPoints, target, splitFocusPoint))
else:
track.append(Func(base.camera.lookAt, target))
track.append(Wait(duration))
return track
def moveShot(x, y, z, h, p, r, duration, other = None, name = 'moveShot'):
return motionShot(x, y, z, h, p, r, duration, other, name)
def focusMoveShot(x, y, z, duration, target, other = None, name = 'focusMoveShot'):
base.camera.setPos(Point3(x, y, z))
base.camera.lookAt(target)
hpr = base.camera.getHpr()
return motionShot(x, y, z, hpr[0], hpr[1], hpr[2], duration, other, name)
def chooseSOSShot(av, duration):
shotChoices = [avatarCloseUpThreeQuarterRightShot,
avatarBehindShot,
avatarBehindHighShot,
suitGroupThreeQuarterLeftBehindShot]
track = apply(random.choice(shotChoices), [av, duration])
return track
def chooseRewardShot(av, duration, allowGroupShot = 1):
def chooseRewardShotNow(av):
if av.playingAnim == 'victory' or not allowGroupShot:
shotChoices = [(0,
8,
av.getHeight() * 0.66,
179,
15,
0), (5.2,
5.45,
av.getHeight() * 0.66,
131.5,
3.6,
0)]
shot = random.choice(shotChoices)
base.camera.setPosHpr(av, *shot)
else:
base.camera.setPosHpr(10, 0, 10, 115, -30, 0)
return Sequence(Func(chooseRewardShotNow, av), Wait(duration))
def heldShot(x, y, z, h, p, r, duration, name = 'heldShot'):
track = Sequence(name=name)
track.append(Func(base.camera.setPosHpr, x, y, z, h, p, r))
track.append(Wait(duration))
return track
def heldRelativeShot(other, x, y, z, h, p, r, duration, name = 'heldRelativeShot'):
track = Sequence(name=name)
track.append(Func(base.camera.setPosHpr, other, x, y, z, h, p, r))
track.append(Wait(duration))
return track
def motionShot(x, y, z, h, p, r, duration, other = None, name = 'motionShot'):
if other:
posTrack = LerpPosInterval(camera, duration, pos=Point3(x, y, z), other=other)
hprTrack = LerpHprInterval(base.camera, duration, hpr=Point3(h, p, r), other=other)
else:
posTrack = LerpPosInterval(camera, duration, pos=Point3(x, y, z))
hprTrack = LerpHprInterval(base.camera, duration, hpr=Point3(h, p, r))
return Parallel(posTrack, hprTrack)
def allGroupShot(avatar, duration):
return heldShot(10, 0, 10, 89, -30, 0, duration, 'allGroupShot')
def allGroupLowShot(avatar, duration):
return heldShot(15, 0, 3, 89, 0, 0, duration, 'allGroupLowShot')
def allGroupLowDiagonalShot(avatar, duration):
return heldShot(7, 5, 6, 119, -30, 0, duration, 'allGroupLowShot')
def toonGroupShot(avatar, duration):
return heldShot(10, 0, 10, 115, -30, 0, duration, 'toonGroupShot')
def toonGroupHighShot(avatar, duration):
return heldShot(5, 0, 1, 115, 45, 0, duration, 'toonGroupHighShot')
def suitGroupShot(avatar, duration):
return heldShot(10, 0, 10, 65, -30, 0, duration, 'suitGroupShot')
def suitGroupLowLeftShot(avatar, duration):
return heldShot(8.4, -3.85, 2.75, 36.3, 3.25, 0, duration, 'suitGroupLowLeftShot')
def suitGroupThreeQuarterLeftBehindShot(avatar, duration):
if random.random() > 0.5:
x = 12.37
h = 134.61
else:
x = -12.37
h = -134.61
return heldShot(x, 11.5, 8.16, h, -22.7, 0, duration, 'suitGroupThreeQuarterLeftBehindShot')
def suitWakeUpShot(avatar, duration):
return heldShot(10, -5, 10, 65, -30, 0, duration, 'suitWakeUpShot')
def suitCameraShakeShot(avatar, duration, shakeIntensity, quake = 0):
track = Sequence(name='suitShakeCameraShot')
if quake == 1:
shakeDelay = 1.1
numShakes = 4
else:
shakeDelay = 0.3
numShakes = 5
postShakeDelay = 0.5
shakeTime = (duration - shakeDelay - postShakeDelay) / numShakes
shakeDuration = shakeTime * (1.0 / numShakes)
shakeWaitInterval = shakeTime * ((numShakes - 1.0) / numShakes)
def shakeCameraTrack(intensity, shakeWaitInterval = shakeWaitInterval, quake = quake, shakeDuration = shakeDuration, numShakes = numShakes):
vertShakeTrack = Sequence(Wait(shakeWaitInterval), Func(base.camera.setZ, base.camera.getZ() + intensity / 2), Wait(shakeDuration / 2), Func(base.camera.setZ, base.camera.getZ() - intensity), Wait(shakeDuration / 2), Func(base.camera.setZ, base.camera.getZ() + intensity / 2))
horizShakeTrack = Sequence(Wait(shakeWaitInterval - shakeDuration / 2), Func(base.camera.setY, base.camera.getY() + intensity / 4), Wait(shakeDuration / 2), Func(base.camera.setY, base.camera.getY() - intensity / 2), Wait(shakeDuration / 2), Func(base.camera.setY, base.camera.getY() + intensity / 4), Wait(shakeDuration / 2), Func(base.camera.lookAt, Point3(0, 0, 0)))
shakeTrack = Sequence()
for i in xrange(0, numShakes):
if quake == 0:
shakeTrack.append(vertShakeTrack)
else:
shakeTrack.append(Parallel(vertShakeTrack, horizShakeTrack))
return shakeTrack
x = 10 + random.random() * 3
if random.random() > 0.5:
x = -x
z = 7 + random.random() * 3
track.append(Func(base.camera.setPos, x, -5, z))
track.append(Func(base.camera.lookAt, Point3(0, 0, 0)))
track.append(Wait(shakeDelay))
track.append(shakeCameraTrack(shakeIntensity))
track.append(Wait(postShakeDelay))
return track
def avatarCloseUpShot(avatar, duration):
return heldRelativeShot(avatar, 0, 8, avatar.getHeight() * 0.66, 179, 15, 0, duration, 'avatarCloseUpShot')
def avatarCloseUpThrowShot(avatar, duration):
return heldRelativeShot(avatar, 3, 8, avatar.getHeight() * 0.66, 159, 3.6, 0, duration, 'avatarCloseUpThrowShot')
def avatarCloseUpThreeQuarterRightShot(avatar, duration):
return heldRelativeShot(avatar, 5.2, 5.45, avatar.getHeight() * 0.66, 131.5, 3.6, 0, duration, 'avatarCloseUpThreeQuarterRightShot')
def avatarCloseUpThreeQuarterRightShotWide(avatar, duration):
return heldRelativeShot(avatar, 7.2, 8.45, avatar.getHeight() * 0.66, 131.5, 3.6, 0, duration, 'avatarCloseUpThreeQuarterRightShot')
def avatarCloseUpThreeQuarterLeftShot(avatar, duration):
return heldRelativeShot(avatar, -5.2, 5.45, avatar.getHeight() * 0.66, -131.5, 3.6, 0, duration, 'avatarCloseUpThreeQuarterLeftShot')
def avatarCloseUpThreeQuarterRightFollowShot(avatar, duration):
track = Sequence(name='avatarCloseUpThreeQuarterRightFollowShot')
track.append(heldRelativeShot(avatar, 5.2, 5.45, avatar.getHeight() * 0.66, 131.5, 3.6, 0, duration * 0.65))
track.append(LerpHprInterval(nodePath=camera, other=avatar, duration=duration * 0.2, hpr=Point3(110, 3.6, 0), blendType='easeInOut'))
track.append(Wait(duration * 0.25))
return track
def avatarCloseUpZoomShot(avatar, duration):
track = Sequence('avatarCloseUpZoomShot')
track.append(LerpPosHprInterval(nodePath=camera, other=avatar, duration=duration / 2, startPos=Point3(0, 10, avatar.getHeight()), startHpr=Point3(179, -10, 0), pos=Point3(0, 6, avatar.getHeight()), hpr=Point3(179, -10, 0), blendType='easeInOut'))
track.append(Wait(duration / 2))
return track
def avatarBehindShot(avatar, duration):
return heldRelativeShot(avatar, 5, -7, avatar.getHeight(), 40, -12, 0, duration, 'avatarBehindShot')
def avatarBehindHighShot(avatar, duration):
return heldRelativeShot(avatar, -4, -7, 5 + avatar.getHeight(), -30, -35, 0, duration, 'avatarBehindHighShot')
def avatarBehindHighRightShot(avatar, duration):
return heldRelativeShot(avatar, 4, -7, 5 + avatar.getHeight(), 30, -35, 0, duration, 'avatarBehindHighShot')
def avatarLureTrainTrackShot(avatar, duration):
return heldRelativeShot(avatar, 0, -7.5, 1 + avatar.getHeight(), 0, 0, 0, duration, 'avatarLureTrainTrackShot')
def avatarBehindThreeQuarterRightShot(avatar, duration):
return heldRelativeShot(avatar, 7.67, -8.52, avatar.getHeight() * 0.66, 25, 7.5, 0, duration, 'avatarBehindThreeQuarterRightShot')
def avatarSideFollowAttack(suit, toon, duration, battle):
windupDuration = duration * (0.1 + random.random() * 0.1)
projectDuration = duration * 0.75
impactDuration = duration - windupDuration - projectDuration
suitHeight = suit.getHeight()
toonHeight = toon.getHeight()
suitCentralPoint = suit.getPos(battle)
suitCentralPoint.setZ(suitCentralPoint.getZ() + suitHeight * 0.75)
toonCentralPoint = toon.getPos(battle)
toonCentralPoint.setZ(toonCentralPoint.getZ() + toonHeight * 0.75)
initialX = random.randint(12, 14)
finalX = random.randint(7, 8)
initialY = finalY = random.randint(-3, 0)
initialZ = suitHeight * 0.5 + random.random() * suitHeight
finalZ = toonHeight * 0.5 + random.random() * toonHeight
if random.random() > 0.5:
initialX = -initialX
finalX = -finalX
return Sequence(focusShot(initialX, initialY, initialZ, windupDuration, suitCentralPoint), focusMoveShot(finalX, finalY, finalZ, projectDuration, toonCentralPoint), Wait(impactDuration))
def focusCameraBetweenPoints(point1, point2):
if point1[0] > point2[0]:
x = point2[0] + (point1[0] - point2[0]) * 0.5
else:
x = point1[0] + (point2[0] - point1[0]) * 0.5
if point1[1] > point2[1]:
y = point2[1] + (point1[1] - point2[1]) * 0.5
else:
y = point1[1] + (point2[1] - point1[1]) * 0.5
if point1[2] > point2[2]:
z = point2[2] + (point1[2] - point2[2]) * 0.5
else:
z = point1[2] + (point2[2] - point1[2]) * 0.5
base.camera.lookAt(Point3(x, y, z))
def randomCamera(suit, toon, battle, attackDuration, openShotDuration):
return randomAttackCam(suit, toon, battle, attackDuration, openShotDuration, 'suit')
def randomAttackCam(suit, toon, battle, attackDuration, openShotDuration, attackerString = 'suit'):
if openShotDuration > attackDuration:
openShotDuration = attackDuration
closeShotDuration = attackDuration - openShotDuration
if attackerString == 'suit':
attacker = suit
defender = toon
defenderString = 'toon'
else:
attacker = toon
defender = suit
defenderString = 'suit'
randomDouble = random.random()
if randomDouble > 0.6:
openShot = randomActorShot(attacker, battle, openShotDuration, attackerString)
elif randomDouble > 0.2:
openShot = randomOverShoulderShot(suit, toon, battle, openShotDuration, focus=attackerString)
else:
openShot = randomSplitShot(attacker, defender, battle, openShotDuration)
randomDouble = random.random()
if randomDouble > 0.6:
closeShot = randomActorShot(defender, battle, closeShotDuration, defenderString)
elif randomDouble > 0.2:
closeShot = randomOverShoulderShot(suit, toon, battle, closeShotDuration, focus=defenderString)
else:
closeShot = randomSplitShot(attacker, defender, battle, closeShotDuration)
return Sequence(openShot, closeShot)
def randomGroupAttackCam(suit, targets, battle, attackDuration, openShotDuration):
if openShotDuration > attackDuration:
openShotDuration = attackDuration
closeShotDuration = attackDuration - openShotDuration
openShot = randomActorShot(suit, battle, openShotDuration, 'suit', groupShot=0)
closeShot = randomToonGroupShot(targets, suit, closeShotDuration, battle)
return Sequence(openShot, closeShot)
def randomActorShot(actor, battle, duration, actorType, groupShot = 0):
height = actor.getHeight()
centralPoint = actor.getPos(battle)
centralPoint.setZ(centralPoint.getZ() + height * 0.75)
if actorType == 'suit':
x = 4 + random.random() * 8
y = -2 - random.random() * 4
z = height * 0.5 + random.random() * height * 1.5
if groupShot == 1:
y = -4
z = height * 0.5
else:
x = 2 + random.random() * 8
y = -2 + random.random() * 3
z = height + random.random() * height * 1.5
if groupShot == 1:
y = y + 3
z = height * 0.5
if MovieUtil.shotDirection == 'left':
x = -x
return focusShot(x, y, z, duration, centralPoint)
def randomSplitShot(suit, toon, battle, duration):
suitHeight = suit.getHeight()
toonHeight = toon.getHeight()
suitCentralPoint = suit.getPos(battle)
suitCentralPoint.setZ(suitCentralPoint.getZ() + suitHeight * 0.75)
toonCentralPoint = toon.getPos(battle)
toonCentralPoint.setZ(toonCentralPoint.getZ() + toonHeight * 0.75)
x = 9 + random.random() * 2
y = -2 - random.random() * 2
z = suitHeight * 0.5 + random.random() * suitHeight
if MovieUtil.shotDirection == 'left':
x = -x
return focusShot(x, y, z, duration, toonCentralPoint, splitFocusPoint=suitCentralPoint)
def randomOverShoulderShot(suit, toon, battle, duration, focus):
suitHeight = suit.getHeight()
toonHeight = toon.getHeight()
suitCentralPoint = suit.getPos(battle)
suitCentralPoint.setZ(suitCentralPoint.getZ() + suitHeight * 0.75)
toonCentralPoint = toon.getPos(battle)
toonCentralPoint.setZ(toonCentralPoint.getZ() + toonHeight * 0.75)
x = 2 + random.random() * 10
if focus == 'toon':
y = 8 + random.random() * 6
z = suitHeight * 1.2 + random.random() * suitHeight
else:
y = -10 - random.random() * 6
z = toonHeight * 1.5
if MovieUtil.shotDirection == 'left':
x = -x
return focusShot(x, y, z, duration, toonCentralPoint, splitFocusPoint=suitCentralPoint)
def randomCameraSelection(suit, attack, attackDuration, openShotDuration):
shotChoices = [avatarCloseUpThrowShot,
avatarCloseUpThreeQuarterLeftShot,
allGroupLowShot,
suitGroupLowLeftShot,
avatarBehindHighShot]
if openShotDuration > attackDuration:
openShotDuration = attackDuration
closeShotDuration = attackDuration - openShotDuration
openShot = apply(random.choice(shotChoices), [suit, openShotDuration])
closeShot = chooseSuitCloseShot(attack, closeShotDuration, openShot.getName(), attackDuration)
return Sequence(openShot, closeShot)
def randomToonGroupShot(toons, suit, duration, battle):
sum = 0
for t in toons:
toon = t['toon']
height = toon.getHeight()
sum = sum + height
avgHeight = sum / len(toons) * 0.75
suitPos = suit.getPos(battle)
x = 1 + random.random() * 6
if suitPos.getX() > 0:
x = -x
if random.random() > 0.5:
y = 4 + random.random() * 1
z = avgHeight + random.random() * 6
else:
y = 11 + random.random() * 2
z = 13 + random.random() * 2
focalPoint = Point3(0, -4, avgHeight)
return focusShot(x, y, z, duration, focalPoint)
def chooseFireShot(throws, suitThrowsDict, attackDuration):
openShot = chooseFireOpenShot(throws, suitThrowsDict, attackDuration)
openDuration = openShot.getDuration()
openName = openShot.getName()
closeShot = chooseFireCloseShot(throws, suitThrowsDict, openDuration, openName, attackDuration)
track = Sequence(openShot, closeShot)
return track
def chooseFireOpenShot(throws, suitThrowsDict, attackDuration):
numThrows = len(throws)
av = None
duration = 3.0
if numThrows == 1:
av = throws[0]['toon']
shotChoices = [avatarCloseUpThrowShot,
avatarCloseUpThreeQuarterRightShot,
avatarBehindShot,
allGroupLowShot,
suitGroupThreeQuarterLeftBehindShot]
elif numThrows >= 2 and numThrows <= 4:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of throws: %s' % numThrows)
shotChoice = random.choice(shotChoices)
track = apply(shotChoice, [av, duration])
print 'chooseFireOpenShot %s' % shotChoice
return track
def chooseFireCloseShot(throws, suitThrowsDict, openDuration, openName, attackDuration):
numSuits = len(suitThrowsDict)
av = None
duration = attackDuration - openDuration
if numSuits == 1:
av = base.cr.doId2do[suitThrowsDict.keys()[0]]
shotChoices = [avatarCloseUpFireShot,
avatarCloseUpThreeQuarterLeftFireShot,
allGroupLowShot,
suitGroupThreeQuarterLeftBehindShot]
elif numSuits >= 2 and numSuits <= 4 or numSuits == 0:
shotChoices = [allGroupLowShot, suitGroupThreeQuarterLeftBehindShot]
else:
notify.error('Bad number of suits: %s' % numSuits)
shotChoice = random.choice(shotChoices)
track = apply(shotChoice, [av, duration])
print 'chooseFireOpenShot %s' % shotChoice
return track
def avatarCloseUpFireShot(avatar, duration):
return heldRelativeShot(avatar, 7, 17, avatar.getHeight() * 0.66, 159, 3.6, 0, duration, 'avatarCloseUpFireShot')
def avatarCloseUpThreeQuarterLeftFireShot(avatar, duration):
return heldRelativeShot(avatar, -8.2, 8.45, avatar.getHeight() * 0.66, -131.5, 3.6, 0, duration, 'avatarCloseUpThreeQuarterLeftShot')
| apache-2.0 |
cyberlect/yargon-parsing | test/Yargon.Parsing.Tests/ParserTests.ConcatTests.cs | 6939 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Xml.XPath;
using Virtlink.Utilib.Collections;
using Xunit;
namespace Yargon.Parsing
{
partial class ParserTests
{
/// <summary>
/// Tests the <see cref="Parser.Concat"/> method.
/// </summary>
public sealed class ConcatTests : ParserCombinatorTests
{
[Fact]
public void ReturnedParser_ShouldSucceedAndConcatenateSequences_WhenBothInputParsersSucceed()
{
// Arrange
var firstParser = SuccessParser(new[] {"a", "b"});
var secondParser = SuccessParser(new[] { "c", "d" });
var parser = firstParser.Concat(secondParser);
var tokens = CreateTokenStream(TokenType.Zero, TokenType.One, TokenType.Zero);
// Act
var result = parser(tokens);
// Assert
Assert.True(result.Successful);
Assert.Equal(new [] { "a", "b", "c", "d" }, result.Value);
}
[Fact]
public void ReturnedParser_ShouldFail_WhenFirstInputParserFails()
{
// Arrange
var firstParser = FailParser<IEnumerable<String>>();
var secondParser = SuccessParser(new[] { "c", "d" });
var parser = firstParser.Concat(secondParser);
var tokens = CreateTokenStream(TokenType.Zero, TokenType.One, TokenType.Zero);
// Act
var result = parser(tokens);
// Assert
Assert.False(result.Successful);
}
[Fact]
public void ReturnedParser_ShouldFail_WhenSecondInputParserFails()
{
// Arrange
var firstParser = SuccessParser(new[] { "a", "b" });
var secondParser = FailParser<IEnumerable<String>>();
var parser = firstParser.Concat(secondParser);
var tokens = CreateTokenStream(TokenType.Zero, TokenType.One, TokenType.Zero);
// Act
var result = parser(tokens);
// Assert
Assert.False(result.Successful);
}
[Fact]
public void ReturnedParser_ShouldFail_WhenBothInputParsersFail()
{
// Arrange
var firstParser = FailParser<IEnumerable<String>>();
var secondParser = FailParser<IEnumerable<String>>();
var parser = firstParser.Concat(secondParser);
var tokens = CreateTokenStream(TokenType.Zero, TokenType.One, TokenType.Zero);
// Act
var result = parser(tokens);
// Assert
Assert.False(result.Successful);
}
[Fact]
public void ReturnedParser_ShouldConcatenatedInputParserMessages_WhenInputParsersSucceed()
{
// Arrange
var firstParser = SuccessParser(new[] { "a", "b" }).WithMessage(Message.Error("First parser message."));
var secondParser = SuccessParser(new[] { "c", "d" }).WithMessage(Message.Error("Second parser message."));
var parser = firstParser.Concat(secondParser);
var tokens = CreateTokenStream(TokenType.Zero, TokenType.One, TokenType.Zero);
// Act
var result = parser(tokens);
// Assert
Assert.True(result.Successful);
Assert.Equal(new [] { "First parser message.", "Second parser message." }.OrderBy(m => m), result.Messages.OrderBy(m => m.Text).Select(m => m.Text));
}
[Fact]
public void ReturnedParser_ShouldReturnFirstInputMessages_WhenFirstInputParserFails()
{
// Arrange
var firstParser = FailParser<IEnumerable<String>>().WithMessage(Message.Error("First parser message."));
var secondParser = SuccessParser(new[] { "c", "d" }).WithMessage(Message.Error("Second parser message."));
var parser = firstParser.Concat(secondParser);
var tokens = CreateTokenStream(TokenType.Zero, TokenType.One, TokenType.Zero);
// Act
var result = parser(tokens);
// Assert
Assert.False(result.Successful);
Assert.Equal(new[] { "First parser message." }, result.Messages.Select(m => m.Text));
}
[Fact]
public void ReturnedParser_ShouldReturnFirstAndSecondInputMessages_WhenSecondInputParserFails()
{
// Arrange
var firstParser = SuccessParser(new[] { "a", "b" }).WithMessage(Message.Error("First parser message."));
var secondParser = FailParser<IEnumerable<String>>().WithMessage(Message.Error("Second parser message."));
var parser = firstParser.Concat(secondParser);
var tokens = CreateTokenStream(TokenType.Zero, TokenType.One, TokenType.Zero);
// Act
var result = parser(tokens);
// Assert
Assert.False(result.Successful);
Assert.Equal(new[] { "First parser message.", "Second parser message." }.OrderBy(m => m), result.Messages.OrderBy(m => m.Text).Select(m => m.Text));
}
[Fact]
public void ReturnedParser_ShouldThrowArgumentNullException_WhenInputIsNull()
{
// Arrange
var firstParser = SuccessParser<String>();
var secondParser = SuccessParser<String>();
var parser = firstParser.Concat(secondParser);
// Act
var exception = Record.Exception(() =>
{
parser(null);
});
// Assert
Assert.IsAssignableFrom<ArgumentNullException>(exception);
}
[Fact]
public void ShouldThrowArgumentNullException_WhenFirstParserIsNull()
{
// Act
var exception = Record.Exception(() =>
{
Parser.Concat(null, SuccessParser<IEnumerable<String>>());
});
// Assert
Assert.IsAssignableFrom<ArgumentNullException>(exception);
}
[Fact]
public void ShouldThrowArgumentNullException_WhenUntilParserIsNull()
{
// Act
var exception = Record.Exception(() =>
{
Parser.Concat(SuccessParser<IEnumerable<String>>(), null);
});
// Assert
Assert.IsAssignableFrom<ArgumentNullException>(exception);
}
}
}
}
| apache-2.0 |
sarvex/tensorflow | tensorflow/core/kernels/data/concatenate_dataset_op.cc | 9084 | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/kernels/data/concatenate_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
// See documentation in ../../ops/dataset_ops.cc for a high-level
// description of the following op.
/* static */ constexpr const char* const ConcatenateDatasetOp::kDatasetType;
/* static */ constexpr const char* const ConcatenateDatasetOp::kInputDataset;
/* static */ constexpr const char* const ConcatenateDatasetOp::kAnotherDataset;
/* static */ constexpr const char* const ConcatenateDatasetOp::kOutputTypes;
/* static */ constexpr const char* const ConcatenateDatasetOp::kOutputShapes;
constexpr char kIndex[] = "i";
constexpr char kInputImplUninitialized[] = "input_impl_uninitialized";
class ConcatenateDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, const DatasetBase* input,
const DatasetBase* to_concatenate)
: DatasetBase(DatasetContext(ctx)),
input_(input),
to_concatenate_(to_concatenate) {
input_->Ref();
to_concatenate_->Ref();
auto os_input = input->output_shapes();
auto os_concatenate = to_concatenate->output_shapes();
for (int i = 0; i < os_input.size(); i++) {
output_shapes_.push_back(
MostSpecificCompatibleShape(os_input[i], os_concatenate[i]));
}
}
~Dataset() override {
input_->Unref();
to_concatenate_->Unref();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return absl::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
TF_ASSIGN_OR_RETURN(*split_providers, GetSplitProviders(this));
return Status::OK();
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64 Cardinality() const override {
int64 n1 = input_->Cardinality();
int64 n2 = to_concatenate_->Cardinality();
if (n1 == kInfiniteCardinality || n2 == kInfiniteCardinality) {
return kInfiniteCardinality;
}
if (n1 == kUnknownCardinality || n2 == kUnknownCardinality) {
return kUnknownCardinality;
}
return n1 + n2;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
inputs->push_back(to_concatenate_);
return Status::OK();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(input_->CheckExternalState());
return to_concatenate_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph));
Node* to_concatenate_graph = nullptr;
TF_RETURN_IF_ERROR(
b->AddInputDataset(ctx, to_concatenate_, &to_concatenate_graph));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph, to_concatenate_graph}, output));
return Status::OK();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
Status Initialize(IteratorContext* ctx) override {
TF_ASSIGN_OR_RETURN(input_contexts_,
CreateInputIteratorContexts(ctx, dataset()));
return dataset()->input_->MakeIterator(&input_contexts_[0], this,
strings::StrCat(prefix(), "[0]"),
&input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return Status::OK();
}
while (i_ < 2) {
TF_RETURN_IF_ERROR(input_impl_->GetNext(&input_contexts_[i_],
out_tensors, end_of_sequence));
if (!*end_of_sequence) {
return Status::OK();
}
if (++i_ < 2) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[i_], this, strings::StrCat(prefix(), "[1]"),
&input_impl_));
}
}
*end_of_sequence = true;
input_impl_.reset();
return Status::OK();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
/*ratio=*/1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kIndex), i_));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
} else {
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kInputImplUninitialized), ""));
}
return Status::OK();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kIndex), &i_));
if (reader->Contains(full_name(kInputImplUninitialized))) {
input_impl_.reset();
return Status::OK();
}
if (!TF_PREDICT_TRUE(i_ >= 0 && i_ <= 2))
return errors::InvalidArgument("i_ must be in range [0, 2].");
if (i_ == 1) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
ctx, this, strings::StrCat(prefix(), "[1]"), &input_impl_));
} else if (i_ == 2) {
input_impl_.reset();
}
if (input_impl_) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
}
return Status::OK();
}
private:
mutex mu_;
int64 i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
std::vector<IteratorContext> input_contexts_;
};
static PartialTensorShape MostSpecificCompatibleShape(
const PartialTensorShape& ts1, const PartialTensorShape& ts2) {
if (ts1.dims() != ts2.dims() || ts1.unknown_rank() || ts2.unknown_rank())
return PartialTensorShape();
PartialTensorShape output_tensorshape({});
auto dims1 = ts1.dim_sizes();
auto dims2 = ts2.dim_sizes();
for (int d = 0; d < ts1.dims(); d++) {
if (dims1[d] == dims2[d])
output_tensorshape.AddDim(dims1[d]);
else
output_tensorshape.AddDim(-1);
}
return output_tensorshape;
}
const DatasetBase* input_;
const DatasetBase* to_concatenate_;
std::vector<PartialTensorShape> output_shapes_;
};
ConcatenateDatasetOp::ConcatenateDatasetOp(OpKernelConstruction* ctx)
: BinaryDatasetOpKernel(ctx) {}
void ConcatenateDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase* to_concatenate,
DatasetBase** output) {
OP_REQUIRES(ctx, input->output_dtypes() == to_concatenate->output_dtypes(),
errors::InvalidArgument(
"input dataset and dataset to concatenate"
" have different output_types %s and %s",
(DataTypeVectorString(input->output_dtypes()),
DataTypeVectorString(to_concatenate->output_dtypes()))));
*output = new Dataset(ctx, input, to_concatenate);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ConcatenateDataset").Device(DEVICE_CPU),
ConcatenateDatasetOp);
} // namespace
} // namespace data
} // namespace tensorflow
| apache-2.0 |
wuzhongdehua/fksm | common/src/main/java/com/fksm/common/dto/ServiceInformation.java | 919 | package com.fksm.common.dto;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
/**
* Created by root on 16-4-23.
*/
public class ServiceInformation extends BaseMassageDto implements Serializable {
private String service_path;
public String getService_path() {
return service_path;
}
public void setService_path(String service_path) {
this.service_path = service_path;
}
/**
*
* @param service_id
* @param request_id
* @param server_ip
* @return
*/
public static ServiceInformation build(String service_id, String request_id, String server_ip, Long begin_time){
ServiceInformation info = new ServiceInformation();
info.setServer_ip(server_ip);
info.setRequest_id(request_id);
info.setService_id(service_id);
info.setBegin_time(begin_time);
return info;
}
}
| apache-2.0 |
shisoft/LinkedIn-J | core/src/main/java/com/google/code/linkedinapi/schema/Content.java | 5361 | /*
* Copyright 2010-2011 Nabeel Mukhtar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.google.code.linkedinapi.schema;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element ref="{}id" minOccurs="0"/>
* <element ref="{}title"/>
* <element ref="{}submitted-url"/>
* <element ref="{}shortened-url" minOccurs="0"/>
* <element ref="{}submitted-image-url"/>
* <element ref="{}description" minOccurs="0"/>
* <element ref="{}thumbnail-url" minOccurs="0"/>
* <element ref="{}resolved-url" minOccurs="0"/>
* <element ref="{}eyebrow-url" minOccurs="0"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
public interface Content
extends SchemaEntity
{
/**
* Gets the value of the id property.
*
* @return
* possible object is
* {@link String }
*
*/
String getId();
/**
* Sets the value of the id property.
*
* @param value
* allowed object is
* {@link String }
*
*/
void setId(String value);
/**
* Gets the value of the title property.
*
* @return
* possible object is
* {@link String }
*
*/
String getTitle();
/**
* Sets the value of the title property.
*
* @param value
* allowed object is
* {@link String }
*
*/
void setTitle(String value);
/**
* Gets the value of the submittedUrl property.
*
* @return
* possible object is
* {@link String }
*
*/
String getSubmittedUrl();
/**
* Sets the value of the submittedUrl property.
*
* @param value
* allowed object is
* {@link String }
*
*/
void setSubmittedUrl(String value);
/**
* Gets the value of the shortenedUrl property.
*
* @return
* possible object is
* {@link String }
*
*/
String getShortenedUrl();
/**
* Sets the value of the shortenedUrl property.
*
* @param value
* allowed object is
* {@link String }
*
*/
void setShortenedUrl(String value);
/**
* Gets the value of the submittedImageUrl property.
*
* @return
* possible object is
* {@link String }
*
*/
String getSubmittedImageUrl();
/**
* Sets the value of the submittedImageUrl property.
*
* @param value
* allowed object is
* {@link String }
*
*/
void setSubmittedImageUrl(String value);
/**
* Gets the value of the description property.
*
* @return
* possible object is
* {@link String }
*
*/
String getDescription();
/**
* Sets the value of the description property.
*
* @param value
* allowed object is
* {@link String }
*
*/
void setDescription(String value);
/**
* Gets the value of the thumbnailUrl property.
*
* @return
* possible object is
* {@link String }
*
*/
String getThumbnailUrl();
/**
* Sets the value of the thumbnailUrl property.
*
* @param value
* allowed object is
* {@link String }
*
*/
void setThumbnailUrl(String value);
/**
* Gets the value of the resolvedUrl property.
*
* @return
* possible object is
* {@link String }
*
*/
String getResolvedUrl();
/**
* Sets the value of the resolvedUrl property.
*
* @param value
* allowed object is
* {@link String }
*
*/
void setResolvedUrl(String value);
/**
* Gets the value of the eyebrowUrl property.
*
* @return
* possible object is
* {@link String }
*
*/
String getEyebrowUrl();
/**
* Sets the value of the eyebrowUrl property.
*
* @param value
* allowed object is
* {@link String }
*
*/
void setEyebrowUrl(String value);
}
| apache-2.0 |
chewaca/gtsSolution | GtsSoftware/WebContent/Gts/proceso/js/data.js | 991 |
var ganttData = [
{
id: 1, name: "Gen\u00e9rico 1", series: [
{ name: "Tiempo Estimado", start: new Date(2014,11,01), end: new Date(2014,11,03) },
{ name: "Real", start: new Date(2014,11,02), end: new Date(2014,11,04), color: "#f0f0f0" }
]
},
{
id: 2, name: "Gen\u00e9rico 2", series: [
{ name: "Tiempo Estimado", start: new Date(2014,11,04), end: new Date(2014,11,09) },
{ name: "Real", start: new Date(2014,11,04), end: new Date(2014,11,11), color: "#f0f0f0" },
]
},
{
id: 3, name: "Gen\u00e9rico 3", series: [
{ name: "Tiempo Estimado", start: new Date(2014,11,10), end: new Date(2014,11,16) },
{ name: "Real", start: new Date(2014,11,12), end: new Date(2014,11,14), color: "#f0f0f0" }
]
},
]
;
| apache-2.0 |
SergeyI88/InduikovS | chapter_004/src/main/java/List/ContainerLinkedList.java | 2613 | package List;
import com.sun.org.apache.xml.internal.serializer.ElemDesc;
import java.util.Iterator;
import java.util.LinkedList;
public class ContainerLinkedList<E> implements Iterable<E> {
private int index = 0;
Entry first = null;
Entry last = null;
protected class IteratorList<E> implements Iterator<E> {
Entry first;
Entry last;
public IteratorList(Entry first, Entry last) {
this.first = first;
this.last = last;
}
@Override
public boolean hasNext() {
if (this.first != null) {
while (last != null) {
return true;
}
}
return false;
}
@Override
public E next() {
Entry next = first;
first = next;
first = first.next;
return (E) next.element;
}
@Override
public void remove() {
}
}
@Override
public Iterator<E> iterator() {
return new IteratorList(this.first, this.last);
}
/*Необходимо создать контейнер с методами
add(E value);
E get(int index);
То есть метод add(E value) - может принимать бесконечное количество элементов.*/
protected class Entry {
Object element;
Entry prev = null;
Entry next = null;
public Entry(E element) {
this.element = element;
}
}
public void add(E element) {
if (first == null) {
first = new Entry(element);
} else if (last == null) {
last = new Entry(element);
first.next = last;
last.prev = first;
} else {
Entry entry = last;
last = new Entry(element);
entry.next = last;
last.prev = entry;
}
index++;
}
public E get(int index) {
int tempIndex = 0;
Entry entry = first.next;
do {
if (tempIndex == 0) {
if (index == tempIndex) {
return (E) first.element;
}
} else if (tempIndex == 1) {
if (index == tempIndex) {
return (E) first.next.element;
}
} else {
entry = entry.next;
if (index == tempIndex) {
return (E) entry.element;
}
}
} while (tempIndex++ != index);
return null;
}
}
| apache-2.0 |
yanwen0614/Weibo | sina_spider/utils/Database.py | 636 | def Create_insert_sql(tablename, *arg):
sql = tablename.join(('''INSERT INTO ''',''' ({}) VALUES ({})'''))
attr = ','.join(arg)
placeholder = ','.join(["'{}'" for _ in range(len(arg))])
sql = sql.format(attr,placeholder)
return sql
def Create_createtable_sql(tablename,Key, **kwarg):
sql = "CREATE TABLE `{tablename}` (placeholderplaceholder,\
PRIMARY KEY (`{Key}`)) ENGINE=InnoDB DEFAULT CHARSET=utf8;".format(tablename=tablename,Key=Key)
attr = []
for k, w in kwarg.items():
attr.append("`{k}` {w} ".format(k=k,w=w))
return ','.join(attr).join(sql.split('placeholderplaceholder'))
| apache-2.0 |
msallin/SQLiteCodeFirst | SQLite.CodeFirst/Public/CollationFunction.cs | 1162 | namespace SQLite.CodeFirst
{
/// <summary>
/// The collation function to use for this column.
/// Is used together with the <see cref="CollateAttribute" />, and when setting a default collation for the database.
/// </summary>
public enum CollationFunction
{
None,
/// <summary>
/// The same as binary, except that trailing space characters are ignored.
/// </summary>
RTrim,
/// <summary>
/// The same as binary, except the 26 upper case characters of ASCII are folded to their lower case equivalents before
/// the comparison is performed. Note that only ASCII characters are case folded. SQLite does not attempt to do full
/// UTF case folding due to the size of the tables required.
/// </summary>
NoCase,
/// <summary>
/// Compares string data using memcmp(), regardless of text encoding.
/// </summary>
Binary,
/// <summary>
/// An application can register additional collating functions using the sqlite3_create_collation() interface.
/// </summary>
Custom
}
} | apache-2.0 |
WillSkywalker/blog | app/admin/views.py | 4192 | from flask import render_template, session, redirect, url_for, current_app
from . import admin, forms
from ..models import Article, Tag, Comment
from .. import db
from os import environ
from datetime import datetime
from markdown import markdown
import bleach
ALLOWED_TAGS = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul', 'img',
'h1', 'h2', 'h3', 'p']
ALLOWED_ATTRS = {'*': ['class'],
'a': ['href', 'rel'],
'img': ['src', 'alt']}
def markdown_to_html(value):
return bleach.linkify(bleach.clean(markdown(value, output_format='html'),
tags=ALLOWED_TAGS, strip=True, attributes=ALLOWED_ATTRS))
@admin.route('/', methods=['GET', 'POST'])
def new_article():
if session['login'] != 'true':
return '<h1>Under construction</h1>'
f = forms.NewPostForm()
if f.validate_on_submit():
post = Article(title=f.title.data,
subtitle=f.subtitle.data,
content=markdown_to_html(f.content.data),
image_url=f.image_url.data,
timestamp=datetime.utcnow())
if f.formatted_title.data:
post.formatted_title = f.formatted_title.data
tags = f.tags.data.split(', ')
for t in tags:
altag = Tag.query.filter_by(tagname=t).first()
if altag:
altag.articles.append(post)
db.session.add(altag)
else:
db.session.add(Tag(tagname=t, articles=[post]))
db.session.add(post)
return redirect(url_for('main.article_page', num=post.id))
return render_template('new-article.html', form=f)
@admin.route('/<int:num>', methods=['GET', 'POST', 'DELETE'])
def manage_article(num):
if session['login'] != 'true':
return '<h1>Under construction</h1>'
f = forms.NewPostForm()
article = Article.query.filter_by(id=num).first()
if f.validate_on_submit():
article.title = f.title.data
article.subtitle = f.subtitle.data
article.content = f.content.data
article.image_url = f.image_url.data
if f.formatted_title.data:
article.formatted_title = f.formatted_title.data
for t in article.tags:
altag = Tag.query.filter_by(tagname=t.tagname).first()
altag.articles.remove(article)
tags = f.tags.data.split(', ')
for t in tags:
altag = Tag.query.filter_by(tagname=t).first()
if altag:
altag.articles.append(article)
db.session.add(altag)
else:
db.session.add(Tag(tagname=t, articles=[article]))
db.session.add(article)
return redirect(url_for('main.article_page', num=num))
f.title.data = article.title
f.subtitle.data = article.subtitle
f.formatted_title.data = article.formatted_title
f.content.data = article.content
f.image_url.data = article.image_url
f.tags.data = ', '.join(t.tagname for t in article.tags)
return render_template('new-article.html', form=f)
@admin.route('/comment/<int:num>', methods=['GET', 'POST'])
def manage_comment(num):
if session['login'] != 'true':
return '<h1>Under construction</h1>'
comment = Comment.query.filter_by(id=num).first()
f = forms.ReplyForm()
if f.validate_on_submit():
comment.reply = f.reply.data
comment.disabled = f.ban.data
db.session.add(comment)
return redirect(url_for('main.contact'))
f.reply.data = comment.reply
f.ban.data = comment.disabled
return render_template('manage_comment.html', comment=comment,form=f)
@admin.route('/login', methods=['GET', 'POST'])
def login_page():
f = forms.LoginForm()
session['login'] = None
app = current_app._get_current_object()
if f.validate_on_submit():
if f.password.data == app.config['BLOG_PASSWD'] and app.config['BLOG_ADMIN'] == f.name.data:
session['login'] = 'true'
return redirect(url_for('admin.new_article'))
return render_template('login.html', form=f)
| apache-2.0 |
Adrian0350/PHP-AMI | src/PHPAMI/Message/Event/NewstateEvent.php | 3169 | <?php
/**
* Event triggered when a channel changes its status.
*
* NOTE: For correct callerid values: see: https://issues.asterisk.org/jira/browse/ASTERISK-16910
*
* PHP Version 5
*
* @category PHPAMI
* @package Message
* @subpackage Event
* @author Jaime Ziga <jaime.ziga@gmail.com>
* @license http://github.com/Adrian0350/PHP-AMI/ Apache License 2.0
* @version SVN: $Id$
* @link http://github.com/Adrian0350/PHP-AMI/
*
* Copyright 2011 Marcelo Gornstein <marcelog@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
require_once dirname(__FILE__) . '/EventMessage.php';
/**
* Event triggered when a channel changes its status.
*
* PHP Version 5
*
* @category PHPAMI
* @package Message
* @subpackage Event
* @author Jaime Ziga <jaime.ziga@gmail.com>
* @license http://github.com/Adrian0350/PHP-AMI/ Apache License 2.0
* @link http://github.com/Adrian0350/PHP-AMI/
*/
class NewstateEvent extends EventMessage
{
/**
* Returns key: 'Privilege'.
*
* @return string
*/
public function getPrivilege()
{
return $this->getKey('Privilege');
}
/**
* Returns key: 'Channel'.
*
* @return string
*/
public function getChannel()
{
return $this->getKey('Channel');
}
/**
* Returns key: 'ChannelState'.
*
* @return string
*/
public function getChannelState()
{
return $this->getKey('ChannelState');
}
/**
* Returns key: 'ChannelStateDesc'.
*
* @return string
*/
public function getChannelStateDesc()
{
return $this->getKey('ChannelStateDesc');
}
/**
* Returns key: 'CallerIDNum'. Asterisk < 1.8.
*
* @return string
*/
public function getCallerIDNum()
{
return $this->getKey('CallerIDNum');
}
/**
* Returns key: 'CallerIDName'. Asterisk < 1.8.
*
* @return string
*/
public function getCallerIDName()
{
return $this->getKey('CallerIDName');
}
/**
* Returns key: 'UniqueID'.
*
* @return string
*/
public function getUniqueID()
{
return $this->getKey('UniqueID');
}
/**
* Returns key: 'ConnectedLineNum'. Asterisk >= 1.8.
*
* @return string
*/
public function getConnectedLineNum()
{
return $this->getKey('ConnectedLineNum');
}
/**
* Returns key: 'ConnectedLineName'. Asterisk >= 1.8.
*
* @return string
*/
public function getConnectedLineName()
{
return $this->getKey('ConnectedLineName');
}
}
| apache-2.0 |
ComHub/register-app-android | presentation/src/main/java/io/comhub/register/android/presentation/presenter/UserDetailsPresenter.java | 4166 | /**
* Copyright (C) 2015 Fernando Cejas Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.comhub.register.android.presentation.presenter;
import android.support.annotation.NonNull;
import com.fernandocejas.frodo.annotation.RxLogSubscriber;
import io.comhub.register.android.domain.user.User;
import io.comhub.register.android.domain.exception.DefaultErrorBundle;
import io.comhub.register.android.domain.exception.ErrorBundle;
import io.comhub.register.android.domain.interactor.DefaultSubscriber;
import io.comhub.register.android.domain.interactor.UseCase;
import io.comhub.register.android.presentation.exception.ErrorMessageFactory;
import io.comhub.register.android.presentation.internal.di.PerActivity;
import io.comhub.register.android.presentation.mapper.UserModelDataMapper;
import io.comhub.register.android.presentation.model.UserModel;
import io.comhub.register.android.presentation.view.UserDetailsView;
import javax.inject.Inject;
import javax.inject.Named;
/**
* {@link Presenter} that controls communication between views and models of the presentation
* layer.
*/
@PerActivity
public class UserDetailsPresenter implements Presenter {
private UserDetailsView viewDetailsView;
private final UseCase getUserDetailsUseCase;
private final UserModelDataMapper userModelDataMapper;
@Inject
public UserDetailsPresenter(@Named("userDetails") UseCase getUserDetailsUseCase,
UserModelDataMapper userModelDataMapper) {
this.getUserDetailsUseCase = getUserDetailsUseCase;
this.userModelDataMapper = userModelDataMapper;
}
public void setView(@NonNull UserDetailsView view) {
this.viewDetailsView = view;
}
@Override
public void resume() {}
@Override
public void pause() {}
@Override
public void destroy() {
this.getUserDetailsUseCase.unsubscribe();
this.viewDetailsView = null;
}
/**
* Initializes the presenter by start retrieving user details.
*/
public void initialize() {
this.loadUserDetails();
}
/**
* Loads user details.
*/
private void loadUserDetails() {
this.hideViewRetry();
this.showViewLoading();
this.getUserDetails();
}
private void showViewLoading() {
this.viewDetailsView.showLoading();
}
private void hideViewLoading() {
this.viewDetailsView.hideLoading();
}
private void showViewRetry() {
this.viewDetailsView.showRetry();
}
private void hideViewRetry() {
this.viewDetailsView.hideRetry();
}
private void showErrorMessage(ErrorBundle errorBundle) {
String errorMessage = ErrorMessageFactory.create(this.viewDetailsView.context(),
errorBundle.getException());
this.viewDetailsView.showError(errorMessage);
}
private void showUserDetailsInView(User user) {
final UserModel userModel = this.userModelDataMapper.transform(user);
this.viewDetailsView.renderUser(userModel);
}
private void getUserDetails() {
this.getUserDetailsUseCase.execute(new UserDetailsSubscriber());
}
@RxLogSubscriber
private final class UserDetailsSubscriber extends DefaultSubscriber<User> {
@Override
public void onCompleted() {
UserDetailsPresenter.this.hideViewLoading();
}
@Override
public void onError(Throwable e) {
UserDetailsPresenter.this.hideViewLoading();
UserDetailsPresenter.this.showErrorMessage(new DefaultErrorBundle((Exception) e));
UserDetailsPresenter.this.showViewRetry();
}
@Override
public void onNext(User user) {
UserDetailsPresenter.this.showUserDetailsInView(user);
}
}
}
| apache-2.0 |
dsdn/tssdn | Controller/DPI.py | 1044 | '''
Ryu Script for data plane interactions in TSSDN
Naresh Nayak
11.02.2016
'''
from ryu.base.app_manager import RyuApp
from TopologyMonitor import EventTopologyChanged, EventNewHostInTopology
from ryu.topology import event, api
from ryu.controller.handler import set_ev_cls, MAIN_DISPATCHER
from ryu.ofproto import ofproto_v1_4
import networkx as nx
import time
class DPI(RyuApp):
OFP_VERSION = [ofproto_v1_4.OFP_VERSION]
# Constructor
def __init__(self, *args, **kwargs):
super(DPI, self).__init__(*args, **kwargs)
self.ip_to_mac = {}
# Set an observer for topology changes
@set_ev_cls(EventNewHostInTopology, MAIN_DISPATCHER)
def create_ip_mac_dict(self, ev):
# Wait for one second for IP address to get updated
time.sleep(1)
hosts = api.get_all_host(self)
if len(self.ip_to_mac) == hosts:
return
else:
for f in hosts:
if f.ipv4[0]: self.ip_to_mac[f.ipv4[0]]=f.mac
print self.ip_to_mac
| apache-2.0 |
amster/ctrlaltdel | lib/seq_ctrlaltdel.js | 1197 | var SEQ;
SEQ = SEQ || {};
SEQ.CtrlAltDel = function ($target) {
var t = this;
t._elemInnerHtml = {};
t.$target = $target;
}
$.extend(SEQ.CtrlAltDel.prototype, {
remove: function (id) {
var t = this,
$elem = $('#' + id);
$elem.remove();
delete t._elemInnerHtml[id];
},
/**
* opts: Supported keys:
*
* classes: CSS classes to apply. Separate multiple classes with spaces.
* css: CSS attributes to set/modify. If no position is supplied the DIV
* will be set to position:absolute.
* html: Inner HTML.
*/
set: function (id, opts) {
var t = this,
$elem = $('#' + id);
if ($elem.length === 0) {
$elem = $('<div id="'+id+'"></div>');
t.$target.append($elem);
}
if (t._elemInnerHtml[id] != opts.html) {
$elem.html(opts.html);
t._elemInnerHtml[id] = opts.html;
}
if (opts.classes) {
$elem.addClass(opts.classes);
}
if (!opts.css) {
opts.css = { position: 'absolute' };
}
if (!opts.css.position) {
opts.css.position = 'absolute';
}
$elem.css(opts.css);
return $elem;
},
target: function () {
return this.$target;
}
})
| apache-2.0 |
facebook/folly | folly/executors/Codel.cpp | 4666 | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/executors/Codel.h>
#include <algorithm>
#include <stdexcept>
#include <folly/portability/GFlags.h>
DEFINE_int32(codel_interval, 100, "Codel default interval time in ms");
DEFINE_int32(codel_target_delay, 5, "Target codel queueing delay in ms");
using namespace std::chrono;
namespace folly {
Codel::Codel()
: Codel(Codel::Options()
.setInterval(milliseconds(FLAGS_codel_interval))
.setTargetDelay(milliseconds(FLAGS_codel_target_delay))) {}
Codel::Codel(const Options& options)
: codelMinDelayNs_(0),
codelIntervalTimeNs_(
duration_cast<nanoseconds>(steady_clock::now().time_since_epoch())
.count()),
targetDelay_(options.targetDelay()),
interval_(options.interval()),
codelResetDelay_(true),
overloaded_(false) {}
bool Codel::overloaded(nanoseconds delay) {
bool ret = false;
auto now = steady_clock::now();
// Avoid another thread updating the value at the same time we are using it
// to calculate the overloaded state
auto minDelay = nanoseconds(codelMinDelayNs_);
// Get a snapshot of the parameters to determine overload condition
auto opts = getOptions();
auto sloughTimeout = getSloughTimeout(opts.targetDelay());
if (now > steady_clock::time_point(nanoseconds(codelIntervalTimeNs_)) &&
// testing before exchanging is more cacheline-friendly
(!codelResetDelay_.load(std::memory_order_acquire) &&
!codelResetDelay_.exchange(true))) {
codelIntervalTimeNs_ =
duration_cast<nanoseconds>((now + opts.interval()).time_since_epoch())
.count();
if (minDelay > opts.targetDelay()) {
overloaded_ = true;
} else {
overloaded_ = false;
}
}
// Care must be taken that only a single thread resets codelMinDelay_,
// and that it happens after the interval reset above
if (codelResetDelay_.load(std::memory_order_acquire) &&
codelResetDelay_.exchange(false)) {
codelMinDelayNs_ = delay.count();
// More than one request must come in during an interval before codel
// starts dropping requests
return false;
} else if (delay < nanoseconds(codelMinDelayNs_)) {
codelMinDelayNs_ = delay.count();
}
// Here is where we apply different logic than codel proper. Instead of
// adapting the interval until the next drop, we slough off requests with
// queueing delay > 2*target_delay while in the overloaded regime. This
// empirically works better for our services than the codel approach of
// increasingly often dropping packets.
if (overloaded_ && delay > sloughTimeout) {
ret = true;
}
return ret;
}
int Codel::getLoad() {
// it might be better to use the average delay instead of minDelay, but we'd
// have to track it. aspiring bootcamper?
auto opts = getOptions();
return std::min<int>(
100, 100 * getMinDelay() / getSloughTimeout(opts.targetDelay()));
}
void Codel::setOptions(Options const& options) {
// Carry out some basic sanity checks.
auto delay = options.targetDelay();
auto interval = options.interval();
if (interval <= delay || delay <= milliseconds::zero() ||
interval <= milliseconds::zero()) {
throw std::invalid_argument("Invalid arguments provided");
}
interval_.store(interval, std::memory_order_relaxed);
targetDelay_.store(delay, std::memory_order_relaxed);
}
const Codel::Options Codel::getOptions() const {
auto interval = interval_.load(std::memory_order_relaxed);
auto delay = targetDelay_.load(std::memory_order_relaxed);
// Enforcing the invariant that targetDelay <= interval. A violation could
// potentially occur if either parameter was updated by another concurrent
// thread via the setOptions() method.
delay = std::min(delay, interval);
return Codel::Options().setTargetDelay(delay).setInterval(interval);
}
nanoseconds Codel::getMinDelay() {
return nanoseconds(codelMinDelayNs_);
}
milliseconds Codel::getSloughTimeout(milliseconds delay) const {
return delay * 2;
}
} // namespace folly
| apache-2.0 |
sghill/gocd | plugin-infra/go-plugin-domain/src/com/thoughtworks/go/plugin/domain/elastic/ElasticAgentPluginInfo.java | 2958 | /*
* Copyright 2017 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.plugin.domain.elastic;
import com.thoughtworks.go.plugin.api.info.PluginDescriptor;
import com.thoughtworks.go.plugin.domain.common.Image;
import com.thoughtworks.go.plugin.domain.common.PluggableInstanceSettings;
import com.thoughtworks.go.plugin.domain.common.PluginConstants;
import com.thoughtworks.go.plugin.domain.common.PluginInfo;
public class ElasticAgentPluginInfo extends PluginInfo {
private final PluggableInstanceSettings profileSettings;
private final Image image;
private final Capabilities capabilities;
public ElasticAgentPluginInfo(PluginDescriptor descriptor, PluggableInstanceSettings profileSettings, Image image,
PluggableInstanceSettings pluginSettings, Capabilities capabilities) {
super(descriptor, PluginConstants.ELASTIC_AGENT_EXTENSION, pluginSettings);
this.profileSettings = profileSettings;
this.image = image;
this.capabilities = capabilities;
}
public PluggableInstanceSettings getProfileSettings() {
return profileSettings;
}
public Image getImage() {
return image;
}
public Capabilities getCapabilities() {
return capabilities;
}
public boolean supportsStatusReport() {
return this.capabilities != null ? this.capabilities.supportsStatusReport() : false;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (!super.equals(o)) return false;
ElasticAgentPluginInfo that = (ElasticAgentPluginInfo) o;
if (profileSettings != null ? !profileSettings.equals(that.profileSettings) : that.profileSettings != null)
return false;
if (image != null ? !image.equals(that.image) : that.image != null) return false;
return capabilities != null ? capabilities.equals(that.capabilities) : that.capabilities == null;
}
@Override
public int hashCode() {
int result = super.hashCode();
result = 31 * result + (profileSettings != null ? profileSettings.hashCode() : 0);
result = 31 * result + (image != null ? image.hashCode() : 0);
result = 31 * result + (capabilities != null ? capabilities.hashCode() : 0);
return result;
}
}
| apache-2.0 |
erikmack/lxd | lxd/main_forkstart.go | 1866 | package main
import (
"fmt"
"os"
"syscall"
"github.com/spf13/cobra"
"gopkg.in/lxc/go-lxc.v2"
"github.com/lxc/lxd/shared"
)
type cmdForkstart struct {
global *cmdGlobal
}
func (c *cmdForkstart) Command() *cobra.Command {
// Main subcommand
cmd := &cobra.Command{}
cmd.Use = "forkstart <container name> <containers path> <config>"
cmd.Short = "Start the container"
cmd.Long = `Description:
Start the container
This internal command is used to start the container as a separate
process.
`
cmd.RunE = c.Run
cmd.Hidden = true
return cmd
}
func (c *cmdForkstart) Run(cmd *cobra.Command, args []string) error {
// Sanity checks
if len(args) != 3 {
cmd.Help()
if len(args) == 0 {
return nil
}
return fmt.Errorf("Missing required arguments")
}
// Only root should run this
if os.Geteuid() != 0 {
return fmt.Errorf("This must be run as root")
}
name := args[0]
lxcpath := args[1]
configPath := args[2]
d, err := lxc.NewContainer(name, lxcpath)
if err != nil {
return fmt.Errorf("Error initializing container for start: %q", err)
}
err = d.LoadConfigFile(configPath)
if err != nil {
return fmt.Errorf("Error opening startup config file: %q", err)
}
/* due to https://github.com/golang/go/issues/13155 and the
* CollectOutput call we make for the forkstart process, we need to
* close our stdin/stdout/stderr here. Collecting some of the logs is
* better than collecting no logs, though.
*/
os.Stdin.Close()
os.Stderr.Close()
os.Stdout.Close()
// Redirect stdout and stderr to a log file
logPath := shared.LogPath(name, "forkstart.log")
if shared.PathExists(logPath) {
os.Remove(logPath)
}
logFile, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0644)
if err == nil {
syscall.Dup3(int(logFile.Fd()), 1, 0)
syscall.Dup3(int(logFile.Fd()), 2, 0)
}
return d.Start()
}
| apache-2.0 |
ferdinandhuebner/k8s-dns-sky | src/main/scala/k8sdnssky/DnsRecordHandler.scala | 7295 | package k8sdnssky
import akka.actor.{Actor, ActorRef, Props}
import akka.event.Logging
import io.fabric8.kubernetes.api.model.HasMetadata
import io.fabric8.kubernetes.client.KubernetesClient
import k8sdnssky.AppProperties.DnsProperties
import k8sdnssky.DnsRecordHandler.Protocol.{Refresh, Release, Update}
import k8sdnssky.EventDispatcher.Protocol.{DeletedRecordEvent, FailureForRecordEvent, NewRecordEvent, RefreshedRecordsEvent}
import k8sdnssky.KubernetesConversions.HasMetadataConvenience
import k8sdnssky.SkyDnsRepository._
import k8sdnssky.actuator.DnsInfoContributor.Protocol.{GetInfo, GetInfoResponse}
import scala.language.postfixOps
object DnsRecordHandler {
object Protocol {
object Release
object Refresh
case class Update(resource: HasMetadata)
}
def props(k8s: KubernetesClient, sky: SkyDnsRepository, eventDispatcher: ActorRef, resource: HasMetadata, dnsProperties: DnsProperties): Props = {
Props(new DnsRecordHandler(k8s, sky, eventDispatcher, resource, dnsProperties))
}
}
class DnsRecordHandler(
private val k8s: KubernetesClient,
private val sky: SkyDnsRepository,
private val eventDispatcher: ActorRef,
private val initialResource: HasMetadata,
private val dnsProperties: DnsProperties) extends Actor {
private val log = Logging(context.system, this)
private val whitelist = dnsProperties.whitelistAsList
private val blacklist = dnsProperties.blacklistAsList
log.debug(s"Record handler for ${initialResource.asString} with hostnames ${initialResource.hostnames}")
context.system.eventStream.subscribe(self, classOf[GetInfo])
updateRecords(initialResource, None)
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.languageFeature.postfixOps
private val schedule = context.system.scheduler.schedule(1 minute, 1 minute, self, Refresh)
context.become(handle(initialResource))
private def toFailureEvent(resource: HasMetadata, hostname: String, kind: FailureKind): FailureForRecordEvent = {
kind match {
case UnableToPut =>
FailureForRecordEvent(resource, hostname, "default", "UnableToPutInEtcd",
s"Cannot put DNS record for $hostname in etcd")
case UnableToDelete =>
FailureForRecordEvent(resource, hostname, "default", "UnableToDeleteInEtcd",
s"Cannot delete DNS record for $hostname in etcd")
case inUse: RecordInUse =>
if (inUse.record != null) {
FailureForRecordEvent(resource, hostname, inUse.record, "RecordInUse",
s"Record ${inUse.record} for hostname $hostname is in use by ${inUse.inUseBy}")
} else {
FailureForRecordEvent(resource, hostname, "default", "RecordInUse",
s"Record for hostname $hostname is in use by ${inUse.inUseBy}")
}
case invalidModel: InvalidModel =>
if (invalidModel.record != null) {
FailureForRecordEvent(resource, hostname, invalidModel.record, "InvalidModel",
s"Record ${invalidModel.record} for $hostname has an invalid model in etcd")
} else {
FailureForRecordEvent(resource, hostname, "default", "RecordInUse",
s"Record for $hostname has an invalid model in etcd")
}
}
}
private def updateHostname(resource: HasMetadata, hostname: String): Unit = {
val putResponse = sky.put(hostname, resource.loadBalancerIngress.toList,
resource.getMetadata.getSelfLink, 300)
if (putResponse.newRecords.nonEmpty) {
putResponse.newRecords.foreach(record => {
eventDispatcher ! NewRecordEvent(resource, hostname, record)
})
log.debug(s"New records for $hostname: ${putResponse.newRecords.mkString(", ")}")
}
if (putResponse.deletedRecords.nonEmpty) {
putResponse.deletedRecords.foreach(record => {
eventDispatcher ! DeletedRecordEvent(resource, hostname, record)
})
log.debug(s"Deleted records for $hostname: ${putResponse.deletedRecords.mkString(", ")}")
}
putResponse.failures.foreach(f => {
eventDispatcher ! toFailureEvent(resource, hostname, f.kind)
})
}
private def handleResourceUpdate(resource: HasMetadata, previous: HasMetadata): Unit = {
val toDelete = previous.hostnames.diff(resource.hostnames)
if (toDelete.nonEmpty) {
log.debug(s"Removing hosts for ${resource.asString}: $toDelete")
toDelete.foreach(hostname => {
val records = resource.loadBalancerIngress.toList
val deleteFailures = sky.delete(hostname, records)
records.foreach(record => {
if (!deleteFailures.contains(record)) {
eventDispatcher ! DeletedRecordEvent(resource, hostname, record)
}
})
deleteFailures.foreach { case (record, failure) =>
eventDispatcher ! toFailureEvent(resource, hostname, failure.kind)
log.error(failure, s"Unable to delete $record for ${resource.asString}")
}
})
}
}
private def updateRecords(resource: HasMetadata, previousResource: Option[HasMetadata]): Unit = {
resource.hostnames.foreach(hostname => {
val passesWhitelist = if (whitelist.nonEmpty) {
whitelist.exists(elem => hostname.matches(elem))
} else {
true
}
val passesBlacklist = if (blacklist.nonEmpty) {
!blacklist.exists(elem => hostname.matches(elem))
} else {
true
}
if (!passesWhitelist) {
val msg = s"Hostname $hostname does not pass DNS policy (not whitelisted)"
eventDispatcher ! FailureForRecordEvent(resource, hostname, null, "policyfailure", msg)
} else if (!passesBlacklist) {
val msg = s"Hostname $hostname does not pass DNS policy (hostname is blacklisted)"
eventDispatcher ! FailureForRecordEvent(resource, hostname, null, "policyfailure", msg)
} else {
updateHostname(resource, hostname)
}
})
previousResource.foreach(previous => {
handleResourceUpdate(resource, previous)
})
}
def handle(resource: HasMetadata): Receive = {
case Refresh =>
updateRecords(resource, None)
case Release =>
schedule.cancel()
log.debug(s"Releasing records for ${resource.asString}")
resource.hostnames.foreach(hostname => {
val records = resource.loadBalancerIngress.toList
val deleteFailures = sky.delete(hostname, records)
records.foreach(record => {
if (!deleteFailures.contains(record)) {
eventDispatcher ! DeletedRecordEvent(resource, hostname, record)
}
})
deleteFailures.foreach(f => {
eventDispatcher ! toFailureEvent(resource, hostname, f._2.kind)
log.error(f._2, s"Unable to delete record for hostname ${f._1}")
})
})
log.debug(s"Records for ${resource.asString} released; shutting down")
context.stop(self)
case Update(newResource) =>
updateRecords(newResource, Some(resource))
context.become(handle(newResource))
case GetInfo(target) =>
target ! GetInfoResponse(resource)
case x =>
log.warning("Unhandled message: " + x)
unhandled(x)
}
override def receive: Receive = {
case x =>
log.warning("Unhandled message: " + x)
unhandled(x)
}
}
| apache-2.0 |
leapframework/framework | base/core/src/main/java/leap/core/schedule/DefaultSchedulerManager.java | 1595 | /*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package leap.core.schedule;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import leap.lang.Args;
import leap.lang.Disposable;
import leap.lang.Try;
public class DefaultSchedulerManager implements SchedulerManager, Disposable {
protected final Set<FixedThreadPoolScheduler> fixedThreadPoolSchedulers = new CopyOnWriteArraySet<>();
@Override
public Scheduler newFixedThreadPoolScheduler(String name, int corePoolSize) {
Args.notEmpty(name,"name");
FixedThreadPoolScheduler scheduler = new FixedThreadPoolScheduler(name , corePoolSize);
fixedThreadPoolSchedulers.add(scheduler);
return scheduler;
}
@Override
public void dispose() throws Throwable {
if(!fixedThreadPoolSchedulers.isEmpty()) {
for(FixedThreadPoolScheduler scheduler : fixedThreadPoolSchedulers) {
Try.catchAll(() -> scheduler.dispose());
}
}
}
}
| apache-2.0 |
Utdanningsdirektoratet/PAS2-Public | ExampleClients/java/eksamen/udir/types/clientidentification/TransformsType.java | 2226 | //
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2015.05.18 at 04:18:21 PM CEST
//
package udir.types.clientidentification;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for TransformsType complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="TransformsType">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element ref="{http://www.w3.org/2000/09/xmldsig#}Transform" maxOccurs="unbounded"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "TransformsType", propOrder = {
"transform"
})
public class TransformsType {
@XmlElement(name = "Transform", required = true)
protected List<TransformType> transform;
/**
* Gets the value of the transform property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the transform property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getTransform().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link TransformType }
*
*
*/
public List<TransformType> getTransform() {
if (transform == null) {
transform = new ArrayList<TransformType>();
}
return this.transform;
}
}
| apache-2.0 |
quarkusio/quarkus | integration-tests/devtools/src/test/java/io/quarkus/platform/catalog/ExtensionProcessorTest.java | 3921 | package io.quarkus.platform.catalog;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.Optional;
import org.junit.jupiter.api.Test;
import io.quarkus.devtools.testing.PlatformAwareTestBase;
import io.quarkus.maven.ArtifactKey;
import io.quarkus.platform.catalog.processor.ExtensionProcessor;
import io.quarkus.registry.catalog.Extension;
import io.quarkus.registry.catalog.ExtensionCatalog;
public class ExtensionProcessorTest extends PlatformAwareTestBase {
@Test
void testRESTEasyMetadata() {
final ExtensionCatalog catalog = getExtensionsCatalog();
final Extension resteasy = findExtension(catalog, "quarkus-resteasy");
final ExtensionProcessor extensionProcessor = ExtensionProcessor.of(resteasy);
assertThat(extensionProcessor.getTags()).contains("code");
assertThat(extensionProcessor.getShortName()).contains("jax-rs");
assertThat(extensionProcessor.getCategories()).contains("web");
assertThat(extensionProcessor.getCodestartKind()).isEqualTo(ExtensionProcessor.CodestartKind.EXTENSION_CODESTART);
assertThat(extensionProcessor.getCodestartName()).isEqualTo("resteasy");
assertThat(extensionProcessor.getCodestartArtifact())
.isEqualTo("io.quarkus:quarkus-project-core-extension-codestarts::jar:" + getQuarkusCoreVersion());
assertThat(extensionProcessor.getCodestartLanguages()).contains("java", "kotlin", "scala");
assertThat(extensionProcessor.getKeywords()).contains("resteasy", "jaxrs", "web", "rest");
assertThat(extensionProcessor.getExtendedKeywords()).contains("resteasy", "jaxrs", "web", "rest");
assertThat(extensionProcessor.getGuide()).isEqualTo("https://quarkus.io/guides/rest-json");
}
@Test
void testGetBom() {
final ExtensionCatalog catalog = getExtensionsCatalog();
final Extension kotlin = findExtension(catalog, "quarkus-kotlin");
assertThat(ExtensionProcessor.getBom(kotlin).get().getKey())
.isEqualTo(ArtifactKey.fromString("io.quarkus:quarkus-bom::pom"));
}
@Test
void testGetNonQuarkusBomOnly() {
final ExtensionCatalog catalog = getExtensionsCatalog();
final Extension kotlin = findExtension(catalog, "quarkus-kotlin");
assertThat(ExtensionProcessor.getNonQuarkusBomOnly(kotlin)).isEmpty();
}
@Test
void testKotlinMetadata() {
final ExtensionCatalog catalog = getExtensionsCatalog();
final Extension kotlin = findExtension(catalog, "quarkus-kotlin");
final ExtensionProcessor extensionProcessor = ExtensionProcessor.of(kotlin);
assertThat(extensionProcessor.getTags()).contains("preview");
assertThat(extensionProcessor.getShortName()).contains("Kotlin");
assertThat(extensionProcessor.getCategories()).contains("alt-languages");
assertThat(extensionProcessor.getCodestartKind()).isEqualTo(ExtensionProcessor.CodestartKind.CORE);
assertThat(extensionProcessor.getCodestartName()).isEqualTo("kotlin");
assertThat(extensionProcessor.getCodestartLanguages()).isEmpty();
assertThat(extensionProcessor.getCodestartArtifact())
.isEqualTo("io.quarkus:quarkus-project-core-extension-codestarts::jar:" + getQuarkusCoreVersion());
assertThat(extensionProcessor.getKeywords()).contains("kotlin");
assertThat(extensionProcessor.getExtendedKeywords()).contains("kotlin", "quarkus-kotlin", "services", "write");
assertThat(extensionProcessor.getGuide()).isEqualTo("https://quarkus.io/guides/kotlin");
}
private Extension findExtension(ExtensionCatalog catalog, String id) {
final Optional<Extension> first = catalog.getExtensions().stream()
.filter(e -> e.getArtifact().getArtifactId().equals(id)).findFirst();
assertThat(first).isPresent();
return first.get();
}
}
| apache-2.0 |
Nickname0806/Test_Q4 | java/org/apache/catalina/util/ExtensionValidator.java | 14558 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.catalina.util;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Locale;
import java.util.StringTokenizer;
import java.util.jar.JarInputStream;
import java.util.jar.Manifest;
import org.apache.catalina.Context;
import org.apache.catalina.WebResource;
import org.apache.catalina.WebResourceRoot;
import org.apache.juli.logging.Log;
import org.apache.juli.logging.LogFactory;
import org.apache.tomcat.util.res.StringManager;
/**
* Ensures that all extension dependencies are resolved for a WEB application
* are met. This class builds a master list of extensions available to an
* application and then validates those extensions.
*
* See http://docs.oracle.com/javase/1.4.2/docs/guide/extensions/spec.html
* for a detailed explanation of the extension mechanism in Java.
*
* @author Greg Murray
* @author Justyna Horwat
*/
public final class ExtensionValidator {
private static final Log log = LogFactory.getLog(ExtensionValidator.class);
/**
* The string resources for this package.
*/
private static final StringManager sm =
StringManager.getManager("org.apache.catalina.util");
private static volatile ArrayList<Extension> containerAvailableExtensions =
null;
private static final ArrayList<ManifestResource> containerManifestResources =
new ArrayList<>();
// ----------------------------------------------------- Static Initializer
/**
* This static initializer loads the container level extensions that are
* available to all web applications. This method scans all extension
* directories available via the "java.ext.dirs" System property.
*
* The System Class-Path is also scanned for jar files that may contain
* available extensions.
*/
static {
// check for container level optional packages
String systemClasspath = System.getProperty("java.class.path");
StringTokenizer strTok = new StringTokenizer(systemClasspath,
File.pathSeparator);
// build a list of jar files in the classpath
while (strTok.hasMoreTokens()) {
String classpathItem = strTok.nextToken();
if (classpathItem.toLowerCase(Locale.ENGLISH).endsWith(".jar")) {
File item = new File(classpathItem);
if (item.isFile()) {
try {
addSystemResource(item);
} catch (IOException e) {
log.error(sm.getString
("extensionValidator.failload", item), e);
}
}
}
}
// add specified folders to the list
addFolderList("java.ext.dirs");
}
// --------------------------------------------------------- Public Methods
/**
* Runtime validation of a Web Application.
*
* This method uses JNDI to look up the resources located under a
* <code>DirContext</code>. It locates Web Application MANIFEST.MF
* file in the /META-INF/ directory of the application and all
* MANIFEST.MF files in each JAR file located in the WEB-INF/lib
* directory and creates an <code>ArrayList</code> of
* <code>ManifestResource</code> objects. These objects are then passed
* to the validateManifestResources method for validation.
*
* @param resources The resources configured for this Web Application
* @param context The context from which the Logger and path to the
* application
*
* @return true if all required extensions satisfied
* @throws IOException Error reading resources needed for validation
*/
public static synchronized boolean validateApplication(
WebResourceRoot resources,
Context context)
throws IOException {
String appName = context.getName();
ArrayList<ManifestResource> appManifestResources = new ArrayList<>();
// Web application manifest
WebResource resource = resources.getResource("/META-INF/MANIFEST.MF");
if (resource.isFile()) {
try (InputStream inputStream = resource.getInputStream()) {
Manifest manifest = new Manifest(inputStream);
ManifestResource mre = new ManifestResource
(sm.getString("extensionValidator.web-application-manifest"),
manifest, ManifestResource.WAR);
appManifestResources.add(mre);
}
}
// Web application library manifests
WebResource[] manifestResources =
resources.getClassLoaderResources("/META-INF/MANIFEST.MF");
for (WebResource manifestResource : manifestResources) {
if (manifestResource.isFile()) {
// Primarily used for error reporting
String jarName = manifestResource.getURL().toExternalForm();
Manifest jmanifest = manifestResource.getManifest();
if (jmanifest != null) {
ManifestResource mre = new ManifestResource(jarName,
jmanifest, ManifestResource.APPLICATION);
appManifestResources.add(mre);
}
}
}
return validateManifestResources(appName, appManifestResources);
}
/**
* Checks to see if the given system JAR file contains a MANIFEST, and adds
* it to the container's manifest resources.
*
* @param jarFile The system JAR whose manifest to add
* @throws IOException Error reading JAR file
*/
public static void addSystemResource(File jarFile) throws IOException {
try (InputStream is = new FileInputStream(jarFile)) {
Manifest manifest = getManifest(is);
if (manifest != null) {
ManifestResource mre = new ManifestResource(jarFile.getAbsolutePath(), manifest,
ManifestResource.SYSTEM);
containerManifestResources.add(mre);
}
}
}
// -------------------------------------------------------- Private Methods
/**
* Validates a <code>ArrayList</code> of <code>ManifestResource</code>
* objects. This method requires an application name (which is the
* context root of the application at runtime).
*
* <code>false</false> is returned if the extension dependencies
* represented by any given <code>ManifestResource</code> objects
* is not met.
*
* This method should also provide static validation of a Web Application
* if provided with the necessary parameters.
*
* @param appName The name of the Application that will appear in the
* error messages
* @param resources A list of <code>ManifestResource</code> objects
* to be validated.
*
* @return true if manifest resource file requirements are met
*/
private static boolean validateManifestResources(String appName,
ArrayList<ManifestResource> resources) {
boolean passes = true;
int failureCount = 0;
ArrayList<Extension> availableExtensions = null;
Iterator<ManifestResource> it = resources.iterator();
while (it.hasNext()) {
ManifestResource mre = it.next();
ArrayList<Extension> requiredList = mre.getRequiredExtensions();
if (requiredList == null) {
continue;
}
// build the list of available extensions if necessary
if (availableExtensions == null) {
availableExtensions = buildAvailableExtensionsList(resources);
}
// load the container level resource map if it has not been built
// yet
if (containerAvailableExtensions == null) {
containerAvailableExtensions
= buildAvailableExtensionsList(containerManifestResources);
}
// iterate through the list of required extensions
Iterator<Extension> rit = requiredList.iterator();
while (rit.hasNext()) {
boolean found = false;
Extension requiredExt = rit.next();
// check the application itself for the extension
if (availableExtensions != null) {
Iterator<Extension> ait = availableExtensions.iterator();
while (ait.hasNext()) {
Extension targetExt = ait.next();
if (targetExt.isCompatibleWith(requiredExt)) {
requiredExt.setFulfilled(true);
found = true;
break;
}
}
}
// check the container level list for the extension
if (!found && containerAvailableExtensions != null) {
Iterator<Extension> cit =
containerAvailableExtensions.iterator();
while (cit.hasNext()) {
Extension targetExt = cit.next();
if (targetExt.isCompatibleWith(requiredExt)) {
requiredExt.setFulfilled(true);
found = true;
break;
}
}
}
if (!found) {
// Failure
log.info(sm.getString(
"extensionValidator.extension-not-found-error",
appName, mre.getResourceName(),
requiredExt.getExtensionName()));
passes = false;
failureCount++;
}
}
}
if (!passes) {
log.info(sm.getString(
"extensionValidator.extension-validation-error", appName,
failureCount + ""));
}
return passes;
}
/*
* Build this list of available extensions so that we do not have to
* re-build this list every time we iterate through the list of required
* extensions. All available extensions in all of the
* <code>ManifestResource</code> objects will be added to a
* <code>HashMap</code> which is returned on the first dependency list
* processing pass.
*
* The key is the name + implementation version.
*
* NOTE: A list is built only if there is a dependency that needs
* to be checked (performance optimization).
*
* @param resources A list of <code>ManifestResource</code> objects
*
* @return HashMap Map of available extensions
*/
private static ArrayList<Extension> buildAvailableExtensionsList(
ArrayList<ManifestResource> resources) {
ArrayList<Extension> availableList = null;
Iterator<ManifestResource> it = resources.iterator();
while (it.hasNext()) {
ManifestResource mre = it.next();
ArrayList<Extension> list = mre.getAvailableExtensions();
if (list != null) {
Iterator<Extension> values = list.iterator();
while (values.hasNext()) {
Extension ext = values.next();
if (availableList == null) {
availableList = new ArrayList<>();
availableList.add(ext);
} else {
availableList.add(ext);
}
}
}
}
return availableList;
}
/**
* Return the Manifest from a jar file or war file
*
* @param inStream Input stream to a WAR or JAR file
* @return The WAR's or JAR's manifest
*/
private static Manifest getManifest(InputStream inStream) throws IOException {
Manifest manifest = null;
try (JarInputStream jin = new JarInputStream(inStream)) {
manifest = jin.getManifest();
}
return manifest;
}
/**
* Add the JARs specified to the extension list.
*/
private static void addFolderList(String property) {
// get the files in the extensions directory
String extensionsDir = System.getProperty(property);
if (extensionsDir != null) {
StringTokenizer extensionsTok
= new StringTokenizer(extensionsDir, File.pathSeparator);
while (extensionsTok.hasMoreTokens()) {
File targetDir = new File(extensionsTok.nextToken());
if (!targetDir.isDirectory()) {
continue;
}
File[] files = targetDir.listFiles();
if (files == null) {
continue;
}
for (int i = 0; i < files.length; i++) {
if (files[i].getName().toLowerCase(Locale.ENGLISH).endsWith(".jar") &&
files[i].isFile()) {
try {
addSystemResource(files[i]);
} catch (IOException e) {
log.error
(sm.getString
("extensionValidator.failload", files[i]), e);
}
}
}
}
}
}
}
| apache-2.0 |
CenturyLinkCloud/mdw | mdw-common/src/com/centurylink/mdw/common/translator/impl/StringTranslator.java | 252 | package com.centurylink.mdw.common.translator.impl;
public class StringTranslator extends BaseTranslator {
public Object toObject(String str){
return str;
}
public String toString(Object obj) {
return (String)obj;
}
} | apache-2.0 |
goeckeler/jcommons.lang | src/test/java/org/jcommons/lang/number/NumberUtilsTest.java | 3872 | package org.jcommons.lang.number;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.lessThan;
import static org.jcommons.lang.number.NumberUtils.compare;
import static org.jcommons.lang.number.NumberUtils.isNotNull;
import static org.jcommons.lang.number.NumberUtils.isNull;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import java.math.BigDecimal;
import org.junit.Test;
/** check that number utilities work as expected */
public class NumberUtilsTest
{
/** check if we can compare two numbers in a null safe manner */
@Test
public void testCompare()
{
// check for equal cases
assertThat(compare(null, null), equalTo(0));
assertThat(compare(0, 0), equalTo(0));
assertThat(compare(1, 1), equalTo(0));
// we compare values, not objects
assertThat(compare(new BigDecimal(1), BigDecimal.ONE), equalTo(0));
assertThat(compare(0, null), greaterThan(0));
assertThat(compare(null, 0), lessThan(0));
assertThat(compare(1, 0), greaterThan(0));
assertThat(compare(1, -1), greaterThan(0));
}
/** check if two numbers are equal in a null safe manner */
@Test
public void testEquals()
{
assertTrue(NumberUtils.equals(null, null));
assertTrue(NumberUtils.equals(0, 0));
assertTrue(NumberUtils.equals(1, 1));
// we check for value equality, not for object equality
assertTrue(NumberUtils.equals(new BigDecimal(1), BigDecimal.ONE));
// attention, a long is not a big decimal!
assertFalse(NumberUtils.equals(new Long(1), BigDecimal.ONE));
assertFalse(NumberUtils.equals(0, null));
assertFalse(NumberUtils.equals(null, 0));
assertFalse(NumberUtils.equals(1, 0));
assertFalse(NumberUtils.equals(1, -1));
}
/** check if any number that is null or whose value is zero is regarded as a null */
@Test
public void testIsNull()
{
// positive cases
assertTrue(isNull(null));
assertTrue(isNull(0L));
assertTrue(isNull(0.0f));
assertTrue(isNull(-0.0f));
assertTrue(isNull(new Long(0)));
assertTrue(isNull(new Integer(0)));
assertTrue(isNull(new Float(0.0)));
assertTrue(isNull(new Double(0.0)));
assertTrue(isNull(BigDecimal.ZERO));
// negative cases
assertFalse(isNull(2));
assertFalse(isNull(1L));
assertFalse(isNull(1.0f));
assertFalse(isNull(-1.0f));
assertFalse(isNull(new Long(1)));
assertFalse(isNull(new Integer(1)));
assertFalse(isNull(new Float(1.0)));
assertFalse(isNull(new Double(1.0)));
assertFalse(isNull(BigDecimal.ONE));
// attention - isNull considers only integer parts!
assertTrue(isNull(new Float(0.5f)));
assertTrue(isNull(new Double(0.9f)));
}
/** check if any number that exists or whose value is not zero is regarded as not null */
@Test
public void testIsNotNull()
{
// positive cases
assertFalse(isNotNull(null));
assertFalse(isNotNull(0L));
assertFalse(isNotNull(0.0f));
assertFalse(isNotNull(-0.0f));
assertFalse(isNotNull(new Long(0)));
assertFalse(isNotNull(new Integer(0)));
assertFalse(isNotNull(new Float(0.0)));
assertFalse(isNotNull(new Double(0.0)));
assertFalse(isNotNull(BigDecimal.ZERO));
// negative cases
assertTrue(isNotNull(2));
assertTrue(isNotNull(1L));
assertTrue(isNotNull(1.0f));
assertTrue(isNotNull(-1.0f));
assertTrue(isNotNull(new Long(1)));
assertTrue(isNotNull(new Integer(1)));
assertTrue(isNotNull(new Float(1.0)));
assertTrue(isNotNull(new Double(1.0)));
assertTrue(isNotNull(BigDecimal.ONE));
// attention - isNotNull considers only integer parts!
assertFalse(isNotNull(new Float(0.5f)));
assertFalse(isNotNull(new Double(0.9f)));
}
}
| apache-2.0 |
danielmarbach/SwissChocolate.Factory | Facility.Web/App_Start/BusConfig.cs | 1770 | using System;
using System.Collections.Generic;
using System.Web.Mvc;
using Facility.Web.Controllers;
using NServiceBus;
using NServiceBus.Logging;
namespace Facility.Web
{
public static class BusConfig
{
public static ISendOnlyBus Start()
{
DefaultFactory defaultFactory = LogManager.Use<DefaultFactory>();
defaultFactory.Level(LogLevel.Error);
var configuration = new BusConfiguration();
configuration.EndpointName("Chocolate.Facility.Web");
configuration.UseTransport<MsmqTransport>();
configuration.UsePersistence<InMemoryPersistence>();
var bus = Bus.CreateSendOnly(configuration);
var currentResolver = DependencyResolver.Current;
DependencyResolver.SetResolver(new SimpleTypeResolver(currentResolver, bus));
return bus;
}
private class SimpleTypeResolver : IDependencyResolver
{
private readonly IDependencyResolver dependencyResolver;
private readonly ISendOnlyBus bus;
public SimpleTypeResolver(IDependencyResolver defaultResolver, ISendOnlyBus bus)
{
dependencyResolver = defaultResolver;
this.bus = bus;
}
public object GetService(Type serviceType)
{
if (serviceType == typeof (HomeController))
{
return new HomeController(bus);
}
return dependencyResolver.GetService(serviceType);
}
public IEnumerable<object> GetServices(Type serviceType)
{
return dependencyResolver.GetServices(serviceType);
}
}
}
} | apache-2.0 |
NativeScript/nativescript-cli | lib/common/commands/analytics.ts | 2868 | import { IOptions } from "../../declarations";
import { ICommandParameter, ICommand } from "../definitions/commands";
import { IErrors, IAnalyticsService } from "../declarations";
import { injector } from "../yok";
export class AnalyticsCommandParameter implements ICommandParameter {
constructor(private $errors: IErrors) {}
mandatory = false;
async validate(validationValue: string): Promise<boolean> {
const val = validationValue || "";
switch (val.toLowerCase()) {
case "enable":
case "disable":
case "status":
case "":
return true;
default:
this.$errors.failWithHelp(
`The value '${validationValue}' is not valid. Valid values are 'enable', 'disable' and 'status'.`
);
}
}
}
class AnalyticsCommand implements ICommand {
constructor(
protected $analyticsService: IAnalyticsService,
private $logger: ILogger,
private $errors: IErrors,
private $options: IOptions,
private settingName: string,
private humanReadableSettingName: string
) {}
public allowedParameters = [new AnalyticsCommandParameter(this.$errors)];
public disableAnalytics = true;
public async execute(args: string[]): Promise<void> {
const arg = args[0] || "";
switch (arg.toLowerCase()) {
case "enable":
await this.$analyticsService.setStatus(this.settingName, true);
// TODO(Analytics): await this.$analyticsService.track(this.settingName, "enabled");
this.$logger.info(`${this.humanReadableSettingName} is now enabled.`);
break;
case "disable":
// TODO(Analytics): await this.$analyticsService.track(this.settingName, "disabled");
await this.$analyticsService.setStatus(this.settingName, false);
this.$logger.info(`${this.humanReadableSettingName} is now disabled.`);
break;
case "status":
case "":
this.$logger.info(
await this.$analyticsService.getStatusMessage(
this.settingName,
this.$options.json,
this.humanReadableSettingName
)
);
break;
}
}
}
export class UsageReportingCommand extends AnalyticsCommand {
constructor(
protected $analyticsService: IAnalyticsService,
$logger: ILogger,
$errors: IErrors,
$options: IOptions,
$staticConfig: Config.IStaticConfig
) {
super(
$analyticsService,
$logger,
$errors,
$options,
$staticConfig.TRACK_FEATURE_USAGE_SETTING_NAME,
"Usage reporting"
);
}
}
injector.registerCommand("usage-reporting", UsageReportingCommand);
export class ErrorReportingCommand extends AnalyticsCommand {
constructor(
protected $analyticsService: IAnalyticsService,
$logger: ILogger,
$errors: IErrors,
$options: IOptions,
$staticConfig: Config.IStaticConfig
) {
super(
$analyticsService,
$logger,
$errors,
$options,
$staticConfig.ERROR_REPORT_SETTING_NAME,
"Error reporting"
);
}
}
injector.registerCommand("error-reporting", ErrorReportingCommand);
| apache-2.0 |
ReactiveX/RxJava | src/main/java/io/reactivex/rxjava3/internal/operators/observable/ObservableFromAction.java | 2060 | /*
* Copyright (c) 2016-present, RxJava Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is
* distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See
* the License for the specific language governing permissions and limitations under the License.
*/
package io.reactivex.rxjava3.internal.operators.observable;
import io.reactivex.rxjava3.core.*;
import io.reactivex.rxjava3.exceptions.Exceptions;
import io.reactivex.rxjava3.functions.*;
import io.reactivex.rxjava3.internal.fuseable.CancellableQueueFuseable;
import io.reactivex.rxjava3.plugins.RxJavaPlugins;
/**
* Executes an {@link Action} and signals its exception or completes normally.
*
* @param <T> the value type
* @since 3.0.0
*/
public final class ObservableFromAction<T> extends Observable<T> implements Supplier<T> {
final Action action;
public ObservableFromAction(Action action) {
this.action = action;
}
@Override
protected void subscribeActual(Observer<? super T> observer) {
CancellableQueueFuseable<T> qs = new CancellableQueueFuseable<>();
observer.onSubscribe(qs);
if (!qs.isDisposed()) {
try {
action.run();
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
if (!qs.isDisposed()) {
observer.onError(ex);
} else {
RxJavaPlugins.onError(ex);
}
return;
}
if (!qs.isDisposed()) {
observer.onComplete();
}
}
}
@Override
public T get() throws Throwable {
action.run();
return null; // considered as onComplete()
}
}
| apache-2.0 |
roman-sd/java-a-to-z | chapter_004/src/main/java/ru/sdroman/tictactoe/input/RandomInput.java | 708 | package ru.sdroman.tictactoe.input;
import ru.sdroman.tictactoe.interfaces.Input;
import java.util.Random;
/**
* Class RandomInput.
*
* @author sdroman
* @version 0.1
* @since 03.17
*/
public class RandomInput implements Input {
/**
* Random object.
*/
private Random random = new Random();
/**
* Read.
*
* @param question String question
* @return String
*/
@Override
public String read(String question) {
final int limit = 3;
StringBuilder builder = new StringBuilder();
int x = random.nextInt(limit);
int y = random.nextInt(limit);
return builder.append(x).append(" ").append(y).toString();
}
}
| apache-2.0 |
AlexGerasimov/dts | server.py | 10226 | #! /usr/bin/env python
# coding=utf-8
import os
import os.path
import re
import sys
import time
import couchdb
import logging as logger
import cgi
import BaseHTTPServer
import SocketServer
import formatter
table_name = 'requests'
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __get_log(self):
idx = re.search('/(.+?)\.log', self.path).group(1)
couch = couchdb.Server()
db = couch[table_name]
if idx not in db:
return
doc = db[idx]
data = db.get_attachment(doc, 'log')
if data is None:
data = "Log is not yet created"
else:
data = data.read()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
buf = '<html><meta charset="UTF-8"><body><code>' + data.replace('\n', '<br/>') + '</code></body></html>'
self.wfile.write(buf)
def __construct_table(self, title, results):
logger.debug('__construct_table with {0} and {1}'.format(title, results))
if results is None:
results = {}
table = '<tr>'
rowspan = len(results)
table += '<th rowspan={0}>{1}</th>'.format(rowspan, title)
for k, v in results.items():
logger.debug('output value {0}'.format(v))
if not isinstance(v, dict):
table += '<th>{0}</th><td>{1}</td></tr>'.format(k, v)
elif 'name' in v and 'content' in v: # v is file
table += '<th>{0}</th><td>{1}</td></tr>'.format(k, v['name'])
elif 'head' in v and 'body' in v: # v is table
res_table = '<table>'
res_table += '<tr>' + ''.join(['<th>{0}</th>'.format(tk) for tk in v['head']]) + '</tr>'
for row in v['body']:
res_table += '<tr>' + ''.join(['<td>{0}</td>'.format(rv) for rv in row]) + '</tr>'
res_table += '</table>'
table += '<th>{0}</th><td>{1}</td></tr>'.format(k, res_table)
table += '<tr>'
table += '</tr>'
return table
def __construct_result_table(self):
couch = couchdb.Server()
db = couch[table_name]
docs = [db[idx] for idx in db]
table = ''
# Table cells
for req in sorted(docs, key=lambda d: d['timestampt'], reverse=True):
table += '<table border=1>'
for task_name, task_opts in req['tasks'].items():
table += self.__construct_table(task_name, task_opts['args'])
table += self.__construct_table('Results', task_opts.get('result', dict()))
table += self.__construct_table('System', {'status': req['status'], 'host': req['host']})
table += '</table><br/>'
return table
def __construct_input_form(self):
couch = couchdb.Server()
if 'tasks' not in couch:
all_task_configs = {}
else:
db = couch['tasks']
doc = db['config']
conf_names = doc['names']
conf_opts = doc['opts']
all_task_configs = { tdir: conf_opts[tdir] for tdir in conf_names }
form = '<h2>MyForm</h2><form action=/ method=POST ENCTYPE=multipart/form-data>'
# construct tasks form
for task_name, task in all_task_configs.items():
title = task.get('title', task_name)
form += '<fieldset><legend>' + title + '</legend>'
form += '<input type="hidden" name="{0}.version" value="{1}" />'.format(task_name, task['version'])
for param in task['args']: # {"name": "some_field", "type": "text", "title": "User Name"}
name = param['name']
key = task_name + '.' + name
tkey = task_name + '.' + name + '.type'
title = param.get('title', name)
input_type = param['type'] if param['type'] != 'bool' else 'checkbox'
required = 'required' if 'required' in param and param['required'] else ''
form += '<fieldset><legend>{0}</legend>'.format(title)
if param['type'] in ['bool', 'text', 'file']:
form += '<input type="{0}" name="{1}" "{2}" />'.format(input_type, key, required)
form += '<input type="hidden" name="{0}" value="{1}" />'.format(tkey, param['type'])
elif param['type'] in ['radio', 'checkbox']:
if 'values' not in param:
logger.warning('values are missed for param {0}'.format(name))
else:
for value in param['values']:
form += '<input type={0} name={1} value={2} {3} /> {4} <br/>'.format(input_type, key, value, required, value)
form += '<input type="hidden" name="{0}" value="{1}" />'.format(tkey, param['type'])
else:
logger.warning('incorrect param {0} type {1}'.format(name, param['type']))
form += '</fieldset>'
form += '</fieldset>'
form += '<input type=submit value="Upload patch">'
form += '</form>'
return form
def get_message(self, db_answer=None):
return message
def under_construction(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
buf = '<html><meta charset="UTF-8"><body><h1>UNDER CONSTRUCTION</h1></body></html>'
self.wfile.write(buf)
def __get_form(self):
form = '<html><head><title>DTS</title><meta charset="UTF-8">\
<link rel = "stylesheet" type = "text/css" href = "style.css">\
</head><body><table><tr><td style="padding:10px;vertical-align:top;">'
form += self.__construct_input_form()
form += '</td><td><div class=form>'
form += self.__construct_result_table()
form += '</div></td></tr></table>'
form += '</body></html>'
self.send_response(200)
self.end_headers()
self.wfile.write(form.encode('utf-8'))
def do_GET(self):
if self.path.endswith('.log'):
self.__get_log()
else:
self.__get_form()
def do_POST(self):
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,
environ={'REQUEST_METHOD': 'POST', 'CONTENT_TYPE': self.headers['Content-Type'], })
tasks_args = {}
for k in form:
if k.endswith('.version'):
tkey_list = k.split('.')
task_name = tkey_list[0]
task_version = form[k].value
tasks_args.setdefault(task_name, dict()).setdefault('version', task_version)
elif k.endswith('.type'):
tkey_list = k.split('.')
task_name = tkey_list[0]
param_name = tkey_list[1]
param_type = form[k].value
logger.debug('checking field {0}'.format(k))
k_value = k.replace('.type', '')
param_val = form[k_value] if k_value in form else None
logger.debug('field {0} values are {1}'.format(k_value, 'found' if param_val is not None else 'not-found'))
logger.debug('field {0}:\n{1}'.format(k_value, param_val))
task_args = tasks_args.setdefault(task_name, dict()).setdefault('args', dict())
if param_type == 'bool':
task_args[param_name] = param_val is not None
elif param_type in ['text', 'radio']:
task_args[param_name] = None if param_val is None or param_val.value == '' else param_val.value
elif param_type == 'checkbox':
task_args[param_name] = list()
if param_val is not None:
if isinstance(param_val, list):
for v in param_val:
task_args[param_name].append(v.value)
else:
task_args[param_name].append(param_val.value)
else:
task_args[param_name] = None
elif param_type == 'file':
if param_val is None or param_val.filename == '':
task_args[param_name] = None
else:
filename = os.path.basename(param_val.filename)
buf = param_val.file.read()
task_args[param_name] = {'name': filename, 'content': buf}
else:
logger.warning('incorrect param type {0} in request'.format(param_type))
self.send_response(200)
self.end_headers()
self.wfile.write('<html><head><title>Upload</title></head><body>\
Success\
<FORM><INPUT Type="button" VALUE="Back" onClick="history.go(-1);return true;"></FORM>\
</body></html>')
doc = {'version': 1,
'timestampt': time.time(),
'host': None,
'status': 'Waiting',
'tasks': tasks_args}
couch = couchdb.Server()
db = couch[table_name]
db.save(doc)
class ForkingHTTPServer(SocketServer.ForkingMixIn, BaseHTTPServer.HTTPServer):
def finish_request(self, request, client_address):
request.settimeout(30)
# "super" can not be used because BaseServer is not created from object
BaseHTTPServer.HTTPServer.finish_request(self, request, client_address)
if __name__ == '__main__':
logger.basicConfig(level=logger.DEBUG)
handler_class = MyHandler
try:
couch = couchdb.Server()
if table_name not in couch:
couch.create(table_name)
port = 8080
host = "balboa"
url = "http://{0}:{1}/".format(host, port)
print "Ask user to visit this URL:\n\t%s" % url
srvr = ForkingHTTPServer(('', port), handler_class)
srvr.serve_forever() # serve_forever
except KeyboardInterrupt:
pass
| apache-2.0 |
keil/TbDA | test/GoogleGadgets/YouTube/main.js | 22673 | /*
Copyright (C) 2008 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
debug.info('------ new yt instance -------');
/**
* Queue of videos waiting to be displayed
* @type Array<YouTubeVideo>
*/
var gl_videosQueue = [];
/**
* Map of video data for videos currently shown in the gadget.
* @type Object<YouTubeVideo>
*/
var gl_videoData = {};
/**
* Number of video search requests that we are expecting from YouTube
* @type Number
*/
var gl_pendingResponses = undefined;
/**
* Number of thumbnail requests that we are expecting from YouTube
* @type Number
*/
var gl_pendingThumbnails = undefined;
/**
* Flag if we recieve a double click. This is used to supress the single click
* function being called when we get a double click.
* @type Boolean
*/
var gl_receivedDblclick = false;
/**
* Used to determine if the details view is visible or not. This helps when a
* user clicks an item for the second time, we close the details pane instead
* of reopening it.
* @type Number
*/
var gl_currentItemDetailsView = -1;
/**
* The url of the currently selected video (used for details view).
* @type String
*/
var gl_selectedVideoUrl = undefined;
/**
* @type Object<KeywordsProfile>
*/
var gl_keywordProfile = new KeywordsProfile();
/**
* Animation timer ID used to fade listbox in and out. Set when an animation is
* in progress.
* @type Number
*/
var gl_contentAnimationTimer = undefined;
/**
* Set to true if the user is currently interacting with the gadget in any way
* @type Boolean
*/
var gl_isUserInteracting = false;
/**
* Timeout ID used to try refreshing the videos again if it currently needs to
* be avoided.
* @type Number
*/
var gl_restartVideoRefreshTimeout = undefined;
/**
* Animation Timer ID used to fade the status label in and out.
* @type Number
*/
var gl_statusAnimationTimer = undefined;
/**
* Timer ID used to delete old displayed videos to prevent the list from being
* too long
* @type Number
*/
var gl_removeVideosTimer = undefined;
/**
* Current number of video refreshes made (resets once in half an hour)
* @type Number
*/
var gl_videoRequestCount = 0;
var gl_poppedOut = false;
/**
* Offload initializing the gadget so the gadget display is refreshed to the
* right size first.
*/
function _onOpen() {
updateStatus(strings.UPDATING_VIDEOS);
view.setTimeout(init, 200);
}
function _onPopin() {
gl_poppedOut = false;
}
function _onPopout() {
gl_poppedOut = true;
view.setTimeout(_onSize, 200);
}
function setScrollImages() {
var listbox = undefined;
if (content.visible === true && content.scrollbar !== null) {
listbox = content;
}
if (searchresults.visible === true && searchresults.scrollbar !== null) {
listbox = searchresults;
}
if (listbox) {
listbox.scrollbar.thumbImage = "images/main/scrollbar.png";
listbox.scrollbar.thumbDownImage = "images/main/scrollbar.png";
listbox.scrollbar.thumbOverImage = "images/main/scrollbar.png";
listbox.scrollbar.background = "images/main/scrollbar_track.png";
}
}
function _logo_onclick() {
framework.openUrl('http://www.youtube.com');
}
var g_versionChecker;
var VERSION_INFO_URL = 'http://desktop.google.com/plugins/versions/youtube.txt';
var g_contentRefreshTimer;
/**
* Initial setup, creates a dummy xml details view to set the default size of
* the actual details view that will contain the videos. Loads the map of
* videos that have been previously watched and begin gathering the first
* set of videos.
*/
function init() {
_onDock();
getFreshVideos(true);
updateFeedDescription();
exitSearchMode();
g_contentRefreshTimer = view.setInterval(getFreshVideos,
CONTENT_REFRESH_INTERVAL_MS);
plugin.onAddCustomMenuItems = Options.onAddMenuItems;
var feed = Options.currentFeed();
setDescriptionText(feed);
view.setInterval(function() { gl_videoRequestCount = 0; }, 30 * 60 * 1000);
g_versionChecker = new VersionChecker(strings.VERSION_STRING,
VERSION_INFO_URL, onMandatoryUpgrade);
}
function onMandatoryUpgrade(upgradeInfo) {
debug.trace('Received mandatory upgrade notice.');
plugin.onAddCustomMenuItems = null;
killTimers();
feed_select.visible = false;
content.visible = false;
searchresults.visible = false;
messageDiv.visible = false;
searchbox.visible = false;
drop_arrow_container.y = -200;
description.y = -200;
upgradeReason.innerText = upgradeInfo.reason;
upgradeInfoUrl.href = upgradeInfo.infoUrl;
upgradeDownloadUrl.href = upgradeInfo.downloadUrl;
upgradeDiv.visible = true;
updateStatus(strings.PLEASE_UPGRADE);
}
function displayMessage(message, isShowLink) {
messageDiv.visible = true;
messageLabel.innerText = message;
messageRefreshLink.visible = isShowLink;
}
function killTimers() {
view.clearInterval(g_contentRefreshTimer);
view.clearTimeout(gl_networkCheckTimeout);
view.clearTimeout(gl_restartVideoRefreshTimeout);
}
function setDescriptionText(text) {
description.innerText = text;
descriptionSizer.value = text;
// It's okay if this is not i18n. It's a hack for this particular
// english string.
if (text == 'Most discussed videos') {
drop_arrow.x = 110;
} else {
drop_arrow.x = descriptionSizer.idealBoundingRect.width;
}
}
/**
* Called when the gadget is closing, save all of the viewed videos so they
* don't get displayed again.
*/
function _onClose() {
}
/**
* Resize function which handles the sizing to keep the resizable gadget
* looking good.
*/
function _onSize() {
// Goes through all visible videos and adjusts the width of the contents.
var width = view.width;
var height = view.height;
// Adjust the position of the rest of the elements
// search box
searchbox.width = width - SEARCHBOX_RIGHT_PADDING - SEARCHBOX_LEFT_PADDING;
searchbox.y = height - SEARCHBOX_BOTTOM_PADDING - searchbox.height;
searchfield_middle.width = searchbox.width - searchfield_right.width;
searchfield_right.x = searchbox.width - searchfield_right.width;
search_close.x = searchbox.width - SEARCHCLOSE_RIGHT_PADDING;
searchfield.width = searchbox.width - search_close.width - searchfield.x;
// list box - horizontal
content.width = width - CONTENT_WIDTH_PADDING;
searchresults.width = content.width;
top_middle.width = width - LEFT_WIDTH;
top_right.x = width - RIGHT_WIDTH;
middle_middle.width = width - LEFT_WIDTH;
middle_right.x = width - RIGHT_WIDTH;
bottom_middle.width = width - LEFT_WIDTH;
bottom_right.x = width - RIGHT_WIDTH;
status.width = width - status.x - RIGHT_WIDTH;
description.width = width - status.x - RIGHT_WIDTH;
drop_arrow_container.width = width - drop_arrow_container.x - RIGHT_WIDTH;
feed_select.width = width - feed_select.x - RIGHT_WIDTH;
// list box - vertical
content.height = height - CONTENT_HEIGHT_PADDING;
content.height = content.height - searchbox.height;
searchresults.height = content.height;
upgradeDiv.width = width - CONTENT_WIDTH_PADDING;
upgradeDiv.height = content.height + searchbox.height;
messageDiv.width = content.width;
messageDiv.height = content.height;
middle_left.height = height - TOP_HEIGHT;
middle_middle.height = height - TOP_HEIGHT;
middle_right.height = height - TOP_HEIGHT;
bottom_left.y = height - BOTTOM_HEIGHT;
bottom_middle.y = height - BOTTOM_HEIGHT;
bottom_right.y = height - BOTTOM_HEIGHT;
if (content.scrollbar && content.scrollbar.visible) {
content.itemwidth = content.width - content.scrollbar.offsetWidth;
} else {
content.itemwidth = content.width;
}
if (searchresults.scrollbar && searchresults.scrollbar.visible) {
searchresults.itemwidth = searchresults.width -
searchresults.scrollbar.offsetWidth;
} else {
searchresults.itemwidth = searchresults.width;
}
var item_width_offset;
for (var i = 0; i < content.children.count; ++i) {
var item = content.children.item(i);
item_width_offset = content.itemwidth - item.children('title').x;
item.children('title').width = item_width_offset;
item.children('desc').width = item_width_offset;
item.children('view_length').width = item_width_offset;
}
for (i = 0; i < searchresults.children.count; ++i) {
item = searchresults.children.item(i);
item_width_offset = searchresults.itemwidth - item.children('title').x;
item.children('title').width = item_width_offset;
item.children('desc').width = item_width_offset;
item.children('view_length').width = item_width_offset;
}
setScrollImages();
}
function _onSizing() {
if (event.width < MIN_GADGET_WIDTH) {
event.width = MIN_GADGET_WIDTH;
}
if (event.height < MIN_GADGET_HEIGHT) {
event.height = MIN_GADGET_HEIGHT;
}
}
/**
* Called when the refresh button is pressed. Displays an updating message and
* starts a video refresh.
*/
function _onRefreshButton() {
updateStatus(strings.UPDATING_VIDEOS);
getFreshVideos(true);
if (gl_selectedVideoUrl > 0) {
// Clear state if a details view is currently open
onDetailsViewClose();
}
}
/**
* Mark the start of user interacting with the gadget
*/
function _onMouseOver() {
gl_isUserInteracting = true;
}
/**
* Mark the end of user interacting with the gadget
*/
function _onMouseOut() {
if (gl_currentItemDetailsView < 0) {
gl_isUserInteracting = false;
}
}
function _search_onKeydown() {
if (event.keycode == 13 && // enter key
searchfield.value !== null && searchfield.value.length > 0) {
enterSearchMode();
gl_keywordProfile.setSearchKeywords(searchfield.value);
searchresults.removeAllElements();
getFreshVideos(true);
} else if (event.keycode == 27) { // ESC key
_search_reset();
}
}
function _search_onfocusout() {
if (!Util.trimWhitespace(searchfield.value)) {
_search_reset();
}
}
function _search_reset() {
searchfield.value = strings.SEARCH;
searchfield.color = 'gray';
search_close.visible = false;
frame.focus();
exitSearchMode();
}
/**
* Compiler workaround -- sets the focus to the searchfield
*/
function _search_activate() {
searchfield.focus();
if (searchfield.value != strings.SEARCH) {
return;
}
searchfield.value = '';
searchfield.color = 'black';
search_close.visible = true;
hideSelect();
}
function enterSearchMode() {
if (searchresults.children.count > 0) {
searchresults.visible = true;
content.visible = false;
messageDiv.visible = false;
}
setDescriptionText(strings.SEARCH_TITLE);
}
function exitSearchMode() {
// We don't clear all the search content, because we might need it again
// later (e.g. if the searchfield is enabled again).
gl_keywordProfile.setSearchKeywords();
updateFeedDescription();
searchresults.visible = false;
content.visible = true;
messageDiv.visible = false;
searchfield.killfocus();
}
function updateFeedDescription() {
var feed = Options.currentFeed();
setDescriptionText(feed);
}
function switchFeed(feed) {
_search_reset();
updateFeedDescription();
gl_keywordProfile.resetFeed();
content.removeAllElements();
getFreshVideos(true);
}
function colorItem(item) {
var title = item.children('title');
title.color = '#FFFFFF';
var desc = item.children('desc');
desc.color = '#999999';
var view_length = item.children('view_length');
view_length.color = '#999999';
}
function resizeItem(item) {
var image = item.children('image');
image.x = 1;
image.y = 1;
image.width = 45;
image.height = 35;
var title = item.children('title');
title.x = 50;
title.y = 1;
title.height = 17;
title.size = 8;
title.vAlign = 'bottom';
title.wordWrap = false;
title.trimming = 'character-ellipsis';
var desc = item.children('desc');
desc.visible = false;
var view_length = item.children('view_length');
view_length.x = 50;
view_length.y = 19;
view_length.size = 7;
view_length.height = 12;
view_length.visible = true;
view_length.vAlign = 'top';
}
function _onDock() {
view.setTimeout(_onSize, 200);
}
function _onUndock() {
_onPopout();
}
/**
* Display a message at the top of the gadget
* @param {String} message The string to display
*/
function updateStatus(message) {
if (status.innerText != message) {
status.innerText = message;
}
if (gl_statusAnimationTimer) {
gl_statusAnimationTimer = view.cancelAnimation(gl_statusAnimationTimer);
}
gl_statusAnimationTimer = view.beginAnimation(animateStatusFade,
status.opacity,
gddElementMaxOpacity,
FADE_TIMER_MS);
// Don't show the description and stats at the same time
description.visible = false;
drop_arrow_container.visible = false;
}
/**
* Removes the message at the top of the gadget
*/
function clearStatus() {
if (gl_statusAnimationTimer) {
gl_statusAnimationTimer = view.cancelAnimation(gl_statusAnimationTimer);
}
gl_statusAnimationTimer = view.beginAnimation(animateStatusFade,
status.opacity,
gddElementMinOpacity,
FADE_TIMER_MS);
view.setTimeout(function() {
status.innerText = '';
description.visible = true;
drop_arrow_container.visible = true;
}, FADE_TIMER_MS);
}
gl_networkCheckTimeout = null;
/**
* Prepares the gadget to receive new content (might clear the contents if
* we're in 'shuffle' mode). Initiates 2 queries to youtube - one for the
* popular videos of the day, one for a personalized list for the user.
* Cancels the refresh if a video is being watched, but will start a retry
* timeout.
* @param {Boolean} opt_force Forces the list to be refreshed
*/
function getFreshVideos(opt_force) {
if (!framework.system.network.online) {
if (opt_force) {
if (gl_networkCheckTimeout) {
view.clearTimeout(gl_networkCheckTimeout);
}
gl_networkCheckTimeout = view.setTimeout(function() {
getFreshVideos(true);
}, NETWORK_CHECK_INTERVAL_MS);
}
return;
} else {
if (gl_networkCheckTimeout) {
view.clearTimeout(gl_networkCheckTimeout);
}
}
if (!opt_force && (gl_isUserInteracting || system.user.idle)) {
// Start a timer to restart the refresh
if (gl_restartVideoRefreshTimeout) {
view.clearTimeout(gl_restartVideoRefreshTimeout);
}
gl_restartVideoRefreshTimeout = view.setTimeout(getFreshVideos,
RESTART_VIDEO_TIMEOUT_MS);
return;
}
if (!opt_force) { // Don't retry too often
gl_videoRequestCount += 1;
if (gl_videoRequestCount > 5) {
return;
}
}
updateStatus(strings.UPDATING_VIDEOS);
gl_pendingResponses = 0;
gl_pendingThumbnails = 0;
if (isSearchMode()) {
// Search view
var query = gl_keywordProfile.buildSearchQuery();
YouTubeVideo.videoRequest(query, updateVideos);
} else {
// Regular view
var feedQuery = undefined;
var currentFeed = Options.currentFeed();
feedQuery = gl_keywordProfile.buildFeedQuery(currentFeed, null, true);
if (feedQuery) {
debug.trace(feedQuery);
YouTubeVideo.videoRequest(feedQuery, updateVideos);
}
}
}
function isSearchMode() {
return gl_keywordProfile.isSearchMode();
}
/**
* Prepare videos for display. Checks if they've been watched before, and
* updates the display with the new videos if it's time to.
* @param {Array<YouTubeVideo>} videosArray Array of videos returned from
* YouTube. Could be null if the request failed.
*/
function updateVideos(videosArray) {
--gl_pendingResponses;
if (videosArray) {
if (videosArray.length) {
gl_pendingThumbnails += videosArray.length;
for (var i = 0; i < videosArray.length; ++i) {
gl_videosQueue.push(videosArray[i]);
videosArray[i].fetchStatusImage();
}
} else {
displayMessage(strings.NO_VIDEOS_FOUND, false);
}
} else {
displayMessage(strings.NETWORK_ERROR, true);
clearStatus();
}
}
/**
* Checks if we're ready to display all the new content, or if we're still
* waiting for more.
*/
function shouldDisplay() {
if (gl_pendingResponses <= 0 && gl_pendingThumbnails <= 0) {
displayVideos(gl_videosQueue);
}
}
/**
* Displays all of the videos in newVideos. In 'shuffle' mode, the entire
* display is cleared out and replaced with the new items. Otherwise, each item
* is faded in individually to simulate how it's done in the news panel. The
* new items are either randomly added, or sorted by popularity depending on
* user preferences.
* NOTE: The elements are removed from newVideos as they are displayed
* @param {Array<YouTubeVideo>} newVideos New yt videos to display
*/
function displayVideos(newVideos) {
var searchmode = isSearchMode();
searchresults.visible = searchmode;
content.visible = !searchmode;
messageDiv.visible = false;
var listbox = searchmode ? searchresults : content;
listbox.removeAllElements();
gl_videoData = {};
// Add each item to the head of the listbox
var length = newVideos.length;
for (var i = 0; newVideos.length > 0; ++i) {
var video = newVideos.pop();
var id = video.id;
var itemXml = video.getItemXml();
var newItem = undefined;
if (listbox.children(0)) {
newItem = listbox.insertElement(itemXml, listbox.children(0));
} else {
newItem = listbox.appendElement(itemXml);
}
assert(newItem !== null);
if (newItem) {
newItem.children('title').innerText = video.title;
newItem.children('desc').innerText = video.description;
newItem.children('view_length').innerText = video.getViewLength();
var snippet = video.title + '\n\n' +
video.description;
newItem.children('title').tooltip = snippet;
resizeItem(newItem);
colorItem(newItem);
var image = newItem.children('image');
image.src = video.thumbnail.src;
image.tooltip = snippet;
gl_videoData[id] = video;
}
}
// Resize the newly added items (it gets added without any sizing)
view.setTimeout(_onSize, 200);
clearStatus();
}
/////////////// List Item event handlers
/**
* Display the details for this selected message.
* @param {Number} itemId the message id of the item to be displayed
*/
function _showDetailsView(itemId) {
if (gl_receivedDblclick) {
return;
}
if (gl_currentItemDetailsView >= 0) {
var shouldCloseDetailsView = itemId == gl_currentItemDetailsView;
onDetailsViewClose();
if (shouldCloseDetailsView) {
plugin.CloseDetailsView();
return;
}
}
var curItem = getVideoById(itemId);
gl_isUserInteracting = true;
var xmlDetailsView = new DetailsView();
xmlDetailsView.detailsViewData.putValue("closeDetailsView", closeDetailsView);
xmlDetailsView.contentIsView = true;
xmlDetailsView.setContent("", undefined, "details.xml", false, 0);
if (curItem !== null && curItem.embeddedurl !== "") {
debug.trace("embedurl: " + curItem.embeddedurl);
xmlDetailsView.detailsViewData.putValue("curItem", curItem);
gl_selectedVideoUrl = curItem.url;
}
// Show the details view.
plugin.showDetailsView(xmlDetailsView,
curItem ? curItem.title : strings.VIDEO_ERROR,
gddDetailsViewFlagToolbarOpen +
gddDetailsViewFlagDisableAutoClose +
gddDetailsViewFlagNoFrame,
null);
gl_currentItemDetailsView = itemId;
}
/**
* Called when the user single clicks on an active element
* @param {Number} itemId the message id of the item that was clicked on
*/
function _itemSingleClick(itemId) {
gl_receivedDblclick = false;
view.setTimeout('_showDetailsView(' + itemId + ');', 200);
}
/**
* Takes in a message ID and opens the message in an external browser
* @param {Number} itemId the message id of the item to be displayed
*/
function _itemDoubleClick(itemId) {
gl_receivedDblclick = true;
var curItem = getVideoById(itemId);
framework.openUrl(curItem.url);
content.clearSelection();
searchresults.clearSelection();
}
/**
* Cleans up state when the details view closes
*/
function onDetailsViewClose() {
gl_currentItemDetailsView = -1;
content.clearSelection();
searchresults.clearSelection();
gl_isUserInteracting = false;
}
/**
* Gets the YouTubeVideo of a video associated by its unique ID.
* @param {Number} id ID of the video to get
* @return {YouTubeVideo} object for the video, if not found null is returned
*/
function getVideoById(id) {
return gl_videoData[id];
}
/**
* Closes the details view and cleans up gadget state. Used as a callback from
* the details view widgets.
*/
function closeDetailsView() {
plugin.CloseDetailsView();
onDetailsViewClose();
}
/**
* Given a video ID, returns the DIV associated with this ID.
* @param {Number} id YouTubeVideo Id of the video
* @return {Object} id The div associated with the video ID
*/
function getDivById(id) {
return content.children.item("video_" + id);
}
/**
* Sort function to sort a list of Videos by the view count
* @param {YouTubeVideo} word_a The first video to compare
* @param {YouTubeVideo} word_b The second video to compare
* @return {Number} the result of the comparison. > 0 if b is greater than a, < 0
* if a is greater than b and 0 if they are equal.
function sortViews(word_a, word_b) {
return word_b.viewCount - word_a.viewCount;
}
*/
/**
* Fade the status text at the top of the gadget according to the event value
*/
function animateStatusFade() {
status.opacity = event.value;
}
/**
* Fade the content list item
* @param {Number} id YouTubeVideo Id of the item to be animated
*/
function _animateItem(container, id) {
var item = container.children('video_' + id);
if (item) {
item.opacity = event.value;
}
}
function _toggleSelect() {
if (feed_select.visible) {
hideSelect();
} else {
showSelect();
}
}
function showSelect() {
feed_select.visible = true;
}
function hideSelect() {
feed_select.visible = false;
}
function _onSelectOver() {
event.srcElement.background = '#2a2a2a';
}
function _onSelectOut() {
event.srcElement.background = '#000000';
}
function _onSelectClick(value) {
if (Options.currentFeed() != value || isSearchMode()) {
Options.switchFeed(value);
}
hideSelect();
}
| apache-2.0 |
Nickname0806/Test_Q4 | java/org/apache/coyote/ActionCode.java | 6515 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.coyote;
/**
* ActionCodes represent callbacks from the servlet container to the coyote
* connector. Actions are implemented by ProtocolHandler, using the ActionHook
* interface.
*
* @see ProtocolHandler
* @see ActionHook
* @author Remy Maucherat
*/
public enum ActionCode {
ACK,
CLOSE,
COMMIT,
/**
* A serious error occurred from which it is not possible to recover safely.
* Further attempts to write to the response should be ignored and the
* connection needs to be closed as soon as possible. This can also be used
* to forcibly close a connection if an error occurs after the response has
* been committed.
*/
CLOSE_NOW,
/**
* A flush() operation originated by the client ( i.e. a flush() on the
* servlet output stream or writer, called by a servlet ). Argument is the
* Response.
*/
CLIENT_FLUSH,
/**
* Has the processor been placed into the error state? Note that the
* response may not have an appropriate error code set.
*/
IS_ERROR,
/**
* Hook called if swallowing request input should be disabled.
* Example: Cancel a large file upload.
*
*/
DISABLE_SWALLOW_INPUT,
/**
* Callback for lazy evaluation - extract the remote host name and address.
*/
REQ_HOST_ATTRIBUTE,
/**
* Callback for lazy evaluation - extract the remote host address.
*/
REQ_HOST_ADDR_ATTRIBUTE,
/**
* Callback for lazy evaluation - extract the SSL-related attributes
* including the client certificate if present.
*/
REQ_SSL_ATTRIBUTE,
/**
* Force a TLS re-handshake and make the resulting client certificate (if
* any) available as a request attribute.
*/
REQ_SSL_CERTIFICATE,
/**
* Callback for lazy evaluation - socket remote port.
*/
REQ_REMOTEPORT_ATTRIBUTE,
/**
* Callback for lazy evaluation - socket local port.
*/
REQ_LOCALPORT_ATTRIBUTE,
/**
* Callback for lazy evaluation - local address.
*/
REQ_LOCAL_ADDR_ATTRIBUTE,
/**
* Callback for lazy evaluation - local address.
*/
REQ_LOCAL_NAME_ATTRIBUTE,
/**
* Callback for setting FORM auth body replay
*/
REQ_SET_BODY_REPLAY,
/**
* Callback for getting the amount of available bytes.
*/
AVAILABLE,
/**
* Callback for an async request.
*/
ASYNC_START,
/**
* Callback for an async call to
* {@link javax.servlet.AsyncContext#dispatch()}.
*/
ASYNC_DISPATCH,
/**
* Callback to indicate the the actual dispatch has started and that the
* async state needs change.
*/
ASYNC_DISPATCHED,
/**
* Callback for an async call to
* {@link javax.servlet.AsyncContext#start(Runnable)}.
*/
ASYNC_RUN,
/**
* Callback for an async call to
* {@link javax.servlet.AsyncContext#complete()}.
*/
ASYNC_COMPLETE,
/**
* Callback to trigger the processing of an async timeout.
*/
ASYNC_TIMEOUT,
/**
* Callback to trigger the error processing.
*/
ASYNC_ERROR,
/**
* Callback for an async call to
* {@link javax.servlet.AsyncContext#setTimeout(long)}
*/
ASYNC_SETTIMEOUT,
/**
* Callback to determine if async processing is in progress.
*/
ASYNC_IS_ASYNC,
/**
* Callback to determine if async dispatch is in progress.
*/
ASYNC_IS_STARTED,
/**
* Call back to determine if async complete is in progress.
*/
ASYNC_IS_COMPLETING,
/**
* Callback to determine if async dispatch is in progress.
*/
ASYNC_IS_DISPATCHING,
/**
* Callback to determine if async is timing out.
*/
ASYNC_IS_TIMINGOUT,
/**
* Callback to determine if async is in error.
*/
ASYNC_IS_ERROR,
/**
* Callback to trigger post processing. Typically only used during error
* handling to trigger essential processing that otherwise would be skipped.
*/
ASYNC_POST_PROCESS,
/**
* Callback to trigger the HTTP upgrade process.
*/
UPGRADE,
/**
* Indicator that Servlet is interested in being
* notified when data is available to be read.
*/
NB_READ_INTEREST,
/**
* Used with non-blocking writes to determine if a write is currently
* allowed (sets passed parameter to <code>true</code>) or not (sets passed
* parameter to <code>false</code>). If a write is not allowed then callback
* will be triggered at some future point when write becomes possible again.
*/
NB_WRITE_INTEREST,
/**
* Indicates if the request body has been fully read.
*/
REQUEST_BODY_FULLY_READ,
/**
* Indicates that the container needs to trigger a call to onDataAvailable()
* for the registered non-blocking read listener.
*/
DISPATCH_READ,
/**
* Indicates that the container needs to trigger a call to onWritePossible()
* for the registered non-blocking write listener.
*/
DISPATCH_WRITE,
/**
* Execute any non-blocking dispatches that have been registered via
* {@link #DISPATCH_READ} or {@link #DISPATCH_WRITE}. Typically required
* when the non-blocking listeners are configured on a thread where the
* processing wasn't triggered by a read or write event on the socket.
*/
DISPATCH_EXECUTE,
/**
* Is server push supported and allowed for the current request?
*/
IS_PUSH_SUPPORTED,
/**
* Push a request on behalf of the client of the current request.
*/
PUSH_REQUEST
}
| apache-2.0 |
R3glisse/CS511 | SMART_FRIDGE_FRIGOLO/frifri/frifrifri/frifrifri/Frigolo/src/main/java/fr/esisar/frigolo/entities/ConsigneEJBEntity.java | 5048 | package fr.esisar.frigolo.entities;
import java.io.Serializable;
import javax.persistence.Column;
import javax.persistence.Embeddable;
import javax.persistence.GeneratedValue;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
@Embeddable
@NamedQueries({ @NamedQuery(name = "findConsignes", query = "select m from ConsigneEJBEntity m") })
public class ConsigneEJBEntity implements Serializable {
/**
* Permit to the class to be serializable
*/
private static final long serialVersionUID = -8706698602686846045L;
/**
* Constant for hashcode
*/
private static final int PRIME = 31;
/**
* identifier for the rule
*/
@GeneratedValue
@Column
private Long idConsigne;
/**
* rule for the air quality
*/
@Column
private int qualiteAir;
/**
* rule for the maximum temperature
*/
@Column
private int temperatureMax;
/**
* rule for the minimum temperature
*/
@Column
private int temperatureMin;
/**
* Constructor for the rule
*
* @param idConsigne
* the identifier of the rule
* @param qualiteAir
* the air quality
* @param temperatureMax
* the max temperature
* @param temperatureMin
* the min temperature
*/
public ConsigneEJBEntity(Long idConsigne, int qualiteAir, int temperatureMax, int temperatureMin) {
this.idConsigne = idConsigne;
this.qualiteAir = qualiteAir;
this.temperatureMax = temperatureMax;
this.temperatureMin = temperatureMin;
}
/**
* Empty constructor for the rule
*/
public ConsigneEJBEntity() {
}
/**
* Getter for the id of the rule
*
* @return the identifier of the rule
*/
public Long getIdConsigne() {
return idConsigne;
}
/**
* setter for the id of the rule
*
* @param idConsigne
* the identifier of the rule to set
*/
public void setIdConsigne(Long idConsigne) {
this.idConsigne = idConsigne;
}
/**
* Getter for the air quality
*
* @return the air quality
*/
public int getQualiteAir() {
return qualiteAir;
}
/**
* setter for the air quality
*
* @param qualiteAir
* the air quality to set
*/
public void setQualiteAir(int qualiteAir) {
this.qualiteAir = qualiteAir;
}
/**
* Getter for the maximum temperature
*
* @return the max temperature
*/
public int getTemperatureMax() {
return temperatureMax;
}
/**
* setter for the maximum temperature
*
* @param temperatureMax
* the max temperature to set
*/
public void setTemperatureMax(int temperatureMax) {
this.temperatureMax = temperatureMax;
}
/**
* Getter for the minimum temperature
*
* @return the min temperature
*/
public int getTemperatureMin() {
return temperatureMin;
}
/**
* setter for the minimum temperature
*
* @param temperatureMin
* the min temperature to set
*/
public void setTemperatureMin(int temperatureMin) {
this.temperatureMin = temperatureMin;
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
final int prime = PRIME;
int result = 1;
result = prime * result + (idConsigne == null ? 0 : idConsigne.hashCode());
result = prime * result + qualiteAir;
result = prime * result + temperatureMax;
result = prime * result + temperatureMin;
return result;
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
ConsigneEJBEntity other = (ConsigneEJBEntity) obj;
if (idConsigne == null) {
if (other.idConsigne != null) {
return false;
}
} else if (!idConsigne.equals(other.idConsigne)) {
return false;
}
if (qualiteAir != other.qualiteAir) {
return false;
}
if (temperatureMax != other.temperatureMax) {
return false;
}
if (temperatureMin != other.temperatureMin) {
return false;
}
return true;
}
/*
* (non-Javadoc)
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "ConsigneEJBEntity [idConsigne=" + idConsigne + ", qualiteAir=" + qualiteAir + ", temperatureMax="
+ temperatureMax + ", temperatureMin=" + temperatureMin + "]";
}
}
| apache-2.0 |
orlenko/FBBot | src/FBBot/FBBot/wsgi.py | 385 | """
WSGI config for FBBot project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FBBot.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| apache-2.0 |
mezz64/home-assistant | homeassistant/components/agent_dvr/__init__.py | 2085 | """Support for Agent."""
from agent import AgentError
from agent.a import Agent
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import CONNECTION, DOMAIN as AGENT_DOMAIN, SERVER_URL
ATTRIBUTION = "ispyconnect.com"
DEFAULT_BRAND = "Agent DVR by ispyconnect.com"
PLATFORMS = [Platform.ALARM_CONTROL_PANEL, Platform.CAMERA]
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up the Agent component."""
hass.data.setdefault(AGENT_DOMAIN, {})
server_origin = config_entry.data[SERVER_URL]
agent_client = Agent(server_origin, async_get_clientsession(hass))
try:
await agent_client.update()
except AgentError as err:
await agent_client.close()
raise ConfigEntryNotReady from err
if not agent_client.is_available:
raise ConfigEntryNotReady
await agent_client.get_devices()
hass.data[AGENT_DOMAIN][config_entry.entry_id] = {CONNECTION: agent_client}
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(AGENT_DOMAIN, agent_client.unique)},
manufacturer="iSpyConnect",
name=f"Agent {agent_client.name}",
model="Agent DVR",
sw_version=agent_client.version,
)
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
await hass.data[AGENT_DOMAIN][config_entry.entry_id][CONNECTION].close()
if unload_ok:
hass.data[AGENT_DOMAIN].pop(config_entry.entry_id)
return unload_ok
| apache-2.0 |
be-hase/honoumi | src/main/java/com/be_hase/honoumi/controller/argument/FormParams.java | 316 | package com.be_hase.honoumi.controller.argument;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Target({ ElementType.PARAMETER })
@Retention(RetentionPolicy.RUNTIME)
public @interface FormParams {
}
| apache-2.0 |
googleapis/gapic-generator-ruby | shared/output/cloud/language_v1/lib/google/cloud/language/v1/version.rb | 783 | # frozen_string_literal: true
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
module Google
module Cloud
module Language
module V1
VERSION = "0.0.1"
end
end
end
end
| apache-2.0 |
gomininggo/jTracker | print.php | 5738 | <!doctype html>
<?PHP
require_once('lib/connections/db.php');
include('lib/functions/functions.php');
checkLogin('2');
$getuser = getUserRecords($_SESSION['user_id']);
$intUserID=$getuser[0]['id'];
include 'variables.php';
include 'monnix_db.php';
//$encrypted_txt = encrypt_decrypt('encrypt', $plain_txt);
//echo "Encrypted Text = $encrypted_txt\n";
//$compIDs = encrypt_decrypt('decrypt', $_GET['its']);
$compIDs=$_POST['compsel'];
?>
<html>
<head>
<style>
#contentdv {
border-radius: 5px;
background-color: white;
border: 2px solid black;
font-size:12px;
}
.profileTable {
background-color: white;
font-size:12px;
border-collapse: collapse;
border-spacing: 0;
}
.tableHeader {
border-bottom: 2px solid black;
font-weight:bold;
font-size:14px;
}
.tableRow {
border-bottom: 2px solid black;
}
</style>
<meta charset="utf-8">
<title>Job Search Tracker Print Page</title>
</head>
<body>
<?
$dataSels=$_POST['datasel'];
$prfl=0;
$rsrc=0;
$cnts=0;
if (in_array('prfl', $dataSels)){$prfl=1;}
if (in_array('rsrc', $dataSels)){$rsrc=1;}
if (in_array('cnts', $dataSels)){$cnts=1;}
?>
<table align='left' width='700'><tr><td>
<?php
$runTimes=0;
//$json = json_decode($compIDs, true);
foreach ($compIDs as $compID)
{
$query = "SELECT profiles.name, profiles.notes AS pNotes, Locations.area AS lArea, Locations.country, Growth.area as gArea, Growth.notes as gNotes FROM profiles LEFT JOIN Locations ON Locations.index=profiles.compLocation LEFT JOIN Growth ON Growth.index=profiles.compGrowth WHERE profiles.index='".$compID."' AND profiles.user='$intUserID';";
$result = mysql_query($query) or die('<b>Error, query failed:</b> <BR>' . mysql_error());
$row = mysql_fetch_array($result);
if ($runTimes>0){echo "<BR><HR>";}
echo "<h3 style='margin:5px 5px 5px 5px;' align='center'>".$row['name']."</h3>";
if($prfl==1){
?>
<table width="80%" border="0" align="center" class="profileTable">
<tr>
<td><b>Growth Area:</b> <? echo $row['gArea'] ?><BR><? echo $row['gNotes'] ?> </td>
<td><b>Location:</b> <? echo $row['lArea'].",".$row['country'] ?></td>
</tr>
<tr>
<td colspan="2"><b>Notes:</b> <? echo $row['pNotes'] ?></td>
</tr>
</table>
<?
}
//START Contacts PRINT***************************************************
if($cnts==1)
{
$query = "SELECT COUNT(Contacts.index) AS cCount FROM Contacts WHERE Contacts.company='".$compID."' AND Contacts.user=$intUserID;";
$result3 = mysql_query($query) or die('Error, query failed' . mysql_error());
$row3 = mysql_fetch_array($result3);
if($row3['cCount']>0)
{
echo "<h4 style='margin:5px 5px 5px 5px;' align='center'>".$row['name']." Contacts</h3>";
?>
<div id='contentdv' style="display:block;" align="left">
<table style="text-align: left;" cellpadding="2" cellspacing="2" width="100%" align="center">
<tbody>
<tr class='tableHeader'>
<td align="left">Name</td>
<td align="left" width="120px">Role</td>
<td align="left" width="150px">Phone #</td>
<td align="left">E-Mail</td>
</tr>
<?php
$query2 = "SELECT Contacts.name AS cName, Contacts.phone, Contacts.email, cp_roles.name as cRole, Contacts.notes FROM Contacts LEFT JOIN cp_roles ON cp_roles.index=Contacts.role WHERE Contacts.company='".$compID."' AND Contacts.user=$intUserID;";
$result2 = mysql_query($query2) or die('Error, query failed' . mysql_error());
while($row2 = mysql_fetch_array($result2))
{
echo "<tr>";
echo "<td><u>".$row2['cName']."</u></td>";
echo "<td>".$row2['cRole']."</td>";
echo "<td>".$row2['phone']."</td>";
echo "<td>".$row2['email']."</td>";
echo "</tr><tr colspan='4' class='tableRow'><td><strong>Notes:</strong> ".$row2['notes']."</td></tr>";
}
?>
</tbody>
</table>
</div>
<?
}
}
//START RESEARCH PRINT***************************************************
if($rsrc==1)
{
$query = "SELECT COUNT(Research.index) AS rCount FROM Research WHERE Research.company='".$compID."' AND Research.user=$intUserID;";
$result3 = mysql_query($query) or die('Error, query failed' . mysql_error());
$row3 = mysql_fetch_array($result3);
if($row3['rCount']>0)
{
echo "<h4 style='margin:5px 5px 5px 5px;' align='center'>".$row['name']." Research</h3>";
?>
<div id='contentdv' style="display:block;" align="left">
<table style="text-align: left;" border="0" cellpadding="2" cellspacing="2" width="100%" align="center">
<tbody>
<tr class='tableHeader'>
<td align="left">Keyphrase</td>
<td align="left">Description</td>
</tr>
<?php
$query2 = "SELECT * FROM Research WHERE Research.company='".$compID."' AND Research.user=$intUserID;";
$result2 = mysql_query($query2) or die('Error, query failed' . mysql_error());
while($row2 = mysql_fetch_array($result2))
{
echo "<tr class='tableRow'>";
echo "<td>".$row2['phrase']."</td>";
echo "<td>".$row2['notes']."</td></tr>";
}
?>
</tbody>
</table>
</div>
<?php
}
else {
?>
This company has no reasearch yet.
<BR>
</td></tr>
<?php
}
}//END RESEARCH PRINT
$runTimes++;
}
?>
</table>
<!-- Piwik -->
<script type="text/javascript">
var _paq = _paq || [];
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="//monnixsys.com/analysis/";
_paq.push(['setTrackerUrl', u+'piwik.php']);
_paq.push(['setSiteId', 2]);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.type='text/javascript'; g.async=true; g.defer=true; g.src=u+'piwik.js'; s.parentNode.insertBefore(g,s);
})();
</script>
<noscript><p><img src="//monnixsys.com/analysis/piwik.php?idsite=2" style="border:0;" alt="" /></p></noscript>
<!-- End Piwik Code -->
</body>
</html> | apache-2.0 |
foursquare/finagle | finagle-thrift/src/main/scala/com/twitter/finagle/thrift/SeqIdFilter.scala | 3093 | package com.twitter.finagle.thrift
import com.twitter.finagle.{Service, SimpleFilter, TransportException}
import com.twitter.util.{Time, Future, Try, Return, Throw}
import scala.util.Random
case class SeqMismatchException(id: Int, expected: Int) extends TransportException {
override def toString = "SeqMismatchException: got %d, expected %d".format(id, expected)
}
object SeqIdFilter {
val VersionMask = 0xffff0000
val Version1 = 0x80010000
}
/**
* A filter to override the input sequence ids, replacing them with
* ones of our own provenance. We perform checking on these and fail
* accordingly.
*
* Note: This only works when using BinaryProtocol, but will become
* generic with mux support.
*/
class SeqIdFilter extends SimpleFilter[ThriftClientRequest, Array[Byte]] {
import SeqIdFilter._
// Why random? Since the underlying codec currently does serial
// dispatching, it doesn't make any difference, but technically we
// need to ensure that we pick IDs from a free pool.
private[this] val rng = new Random(Time.now.inMilliseconds)
private[this] def get32(buf: Array[Byte], off: Int) =
((buf(off+0) & 0xff) << 24) |
((buf(off+1) & 0xff) << 16) |
((buf(off+2) & 0xff) << 8) |
(buf(off+3) & 0xff)
private[this] def put32(buf: Array[Byte], off: Int, x: Int) {
buf(off) = (x>>24 & 0xff).toByte
buf(off+1) = (x>>16 & 0xff).toByte
buf(off+2) = (x>>8 & 0xff).toByte
buf(off+3) = (x & 0xff).toByte
}
private[this] def badMsg(why: String) = Throw(new IllegalArgumentException(why))
private[this] def getAndSetId(buf: Array[Byte], newId: Int): Try[Int] = {
if (buf.size < 4) return badMsg("short header")
val header = get32(buf, 0)
val off = if (header < 0) {
// [4]header
// [4]n
// [n]string
// [4]seqid
if ((header&VersionMask) != Version1)
return badMsg("bad version %d".format(header&VersionMask))
if (buf.size < 8) return badMsg("short name size")
4+4+get32(buf, 4)
} else {
// [4]n
// [n]name
// [1]type
// [4]seqid
4+header+1
}
if (buf.size < off+4) return badMsg("short buffer")
val currentId = get32(buf, off)
put32(buf, off, newId)
Return(currentId)
}
def apply(req: ThriftClientRequest, service: Service[ThriftClientRequest, Array[Byte]]): Future[Array[Byte]] =
if (req.oneway) service(req) else {
val reqBuf = req.message.clone()
val id = rng.nextInt()
val givenId = getAndSetId(reqBuf, id) match {
case Return(id) => id
case Throw(exc) => return Future.exception(exc)
}
val newReq = new ThriftClientRequest(reqBuf, req.oneway)
service(newReq) flatMap { resBuf =>
// We know it's safe to mutate the response buffer since the
// codec never touches it again.
getAndSetId(resBuf, givenId) match {
case Return(`id`) => Future.value(resBuf)
case Return(badId) => Future.exception(SeqMismatchException(badId, id))
case Throw(exc) => Future.exception(exc)
}
}
}
}
| apache-2.0 |
ixtf/japp-execution | server/src/main/java/org/jzb/execution/domain/extra/ExamQuestionLab.java | 1754 | package org.jzb.execution.domain.extra;
import com.google.common.collect.Sets;
import org.hibernate.validator.constraints.NotBlank;
import org.jzb.J;
import org.jzb.execution.domain.AbstractLogable;
import org.jzb.execution.domain.operator.Operator;
import javax.persistence.*;
import java.security.Principal;
import java.util.Collection;
import java.util.Set;
/**
* Created by jzb on 17-4-15.
*/
@Entity
@Table(name = "T_EXAMQUESTIONLAB")
@NamedQueries({
@NamedQuery(name = "ExamQuestionLab.queryByParticipant", query = "SELECT o FROM ExamQuestionLab o WHERE :participant MEMBER OF o.participants AND o.deleted=FALSE "),
@NamedQuery(name = "ExamQuestionLab.queryByCreator", query = "SELECT o FROM ExamQuestionLab o WHERE o.creator=:creator AND o.deleted=FALSE "),
})
public class ExamQuestionLab extends AbstractLogable {
@NotBlank
private String name;
@ManyToMany
@JoinTable(name = "T_EXAMQUESTIONLAB_T_PARTICIPANT")
private Collection<Operator> participants;
public boolean isManager(Principal principal) {
Set<Operator> operators = Sets.newHashSet();
operators.add(getCreator());
operators.add(getModifier());
operators.addAll(J.emptyIfNull(participants));
return operators.parallelStream()
.filter(it -> it.getId().equals(principal.getName()))
.findFirst()
.isPresent();
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Collection<Operator> getParticipants() {
return participants;
}
public void setParticipants(Collection<Operator> participants) {
this.participants = participants;
}
}
| apache-2.0 |
NuckChorris/hummingbird | test/models/manga_test.rb | 1525 | # == Schema Information
#
# Table name: manga
#
# id :integer not null, primary key
# romaji_title :string(255)
# slug :string(255)
# english_title :string(255)
# synopsis :text default(""), not null
# poster_image_file_name :string(255)
# poster_image_content_type :string(255)
# poster_image_file_size :integer
# poster_image_updated_at :datetime
# cover_image_file_name :string(255)
# cover_image_content_type :string(255)
# cover_image_file_size :integer
# cover_image_updated_at :datetime
# start_date :date
# end_date :date
# serialization :string(255)
# mal_id :integer
# created_at :datetime not null
# updated_at :datetime not null
# status :string(255)
# cover_image_top_offset :integer default(0)
# volume_count :integer
# chapter_count :integer
# manga_type :string(255) default("Manga")
#
require 'test_helper'
class MangaTest < ActiveSupport::TestCase
should validate_presence_of(:romaji_title)
should have_and_belong_to_many(:genres)
test "should implement search scopes" do
assert Manga.full_search("monstre").include?(manga(:monster)), "manga fuzzy search"
assert Manga.instant_search("monster").include?(manga(:monster)), "manga simple search"
end
end
| apache-2.0 |
Payshare/js-stellar-lib | test/setup/browser.js | 153 | window.global.request = StellarLib.request;
window.global.fixtures = StellarLib.fixtures;
window.global.dev_server = {hostname: "localhost", port: 1337}; | apache-2.0 |
smallbam/My-first-repository-on-GitHub | HelloGit/src/org/eclipse/wang/datastructure/package-info.java | 151 | /**
* data structure examples
* @author super
* @version create on:2015年5月10日 上午9:43:20
*/
package org.eclipse.wang.datastructure; | apache-2.0 |
olirogers/openui5 | src/sap.m/src/sap/m/ToggleButton.js | 2969 | /*!
* ${copyright}
*/
// Provides control sap.m.ToggleButton.
sap.ui.define(['jquery.sap.global', './Button', './library', 'sap/ui/core/EnabledPropagator'],
function(jQuery, Button, library, EnabledPropagator) {
"use strict";
/**
* Constructor for a new ToggleButton.
*
* @param {string} [sId] id for the new control, generated automatically if no id is given
* @param {object} [mSettings] initial settings for the new control
*
* @class
* The ToggleButton Control is a Button that can be toggled between pressed and normal state
* @extends sap.m.Button
*
* @author SAP SE
* @version ${version}
*
* @constructor
* @public
* @alias sap.m.ToggleButton
* @ui5-metamodel This control/element also will be described in the UI5 (legacy) designtime metamodel
*/
var ToggleButton = Button.extend("sap.m.ToggleButton", /** @lends sap.m.ToggleButton.prototype */ { metadata : {
library : "sap.m",
properties : {
/**
* The property is “true” when the control is toggled. The default state of this property is "false".
*/
pressed : {type : "boolean", group : "Data", defaultValue : false}
}
}});
EnabledPropagator.call(ToggleButton.prototype);
/**
* Function is called when ToggleButton is clicked.
*
* @param {jQuery.Event} oEvent
* @private
*/
ToggleButton.prototype.ontap = function(oEvent) {
// mark the event for components that needs to know if the event was handled by the ToggleButton
oEvent.setMarked();
if (this.getEnabled()) {
this.setPressed(!this.getPressed());
this.firePress({ pressed: this.getPressed() });
}
};
ToggleButton.prototype.setPressed = function(bPressed) {
bPressed = !!bPressed;
if (bPressed != this.getPressed()) {
this.setProperty("pressed", bPressed, true);
this.$().attr("aria-pressed", bPressed);
this.$("inner").toggleClass("sapMToggleBtnPressed",bPressed && !this._isUnstyled());
}
return this;
};
/**
* Handle the key down event for SPACE and ENTER.
* @param {jQuery.Event} oEvent - the keyboard event.
* @private
*/
ToggleButton.prototype.onkeydown = function(oEvent) {
if (oEvent.which === jQuery.sap.KeyCodes.SPACE || oEvent.which === jQuery.sap.KeyCodes.ENTER) {
this.ontap(oEvent);
}
};
/**
* Override the keyup event handler of Button.js.
*/
ToggleButton.prototype.onkeyup = function(oEvent) {
if (oEvent.which === jQuery.sap.KeyCodes.SPACE || oEvent.which === jQuery.sap.KeyCodes.ENTER) {
oEvent.setMarked();
}
};
/**
* @see sap.ui.core.Control#getAccessibilityInfo
* @protected
*/
ToggleButton.prototype.getAccessibilityInfo = function() {
var oInfo = Button.prototype.getAccessibilityInfo.apply(this, arguments);
if (this.getPressed()) {
oInfo.description = ((oInfo.description || "") + " " +
sap.ui.getCore().getLibraryResourceBundle("sap.m").getText("ACC_CTR_STATE_PRESSED")).trim();
}
return oInfo;
};
return ToggleButton;
}, /* bExport= */ true);
| apache-2.0 |
googleads/googleads-java-lib | modules/dfp_axis/src/main/java/com/google/api/ads/admanager/axis/v202108/LineItemService.java | 1252 | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* LineItemService.java
*
* This file was auto-generated from WSDL
* by the Apache Axis 1.4 Mar 02, 2009 (07:08:06 PST) WSDL2Java emitter.
*/
package com.google.api.ads.admanager.axis.v202108;
public interface LineItemService extends javax.xml.rpc.Service {
public java.lang.String getLineItemServiceInterfacePortAddress();
public com.google.api.ads.admanager.axis.v202108.LineItemServiceInterface getLineItemServiceInterfacePort() throws javax.xml.rpc.ServiceException;
public com.google.api.ads.admanager.axis.v202108.LineItemServiceInterface getLineItemServiceInterfacePort(java.net.URL portAddress) throws javax.xml.rpc.ServiceException;
}
| apache-2.0 |
openpitrix/openpitrix | test/models/openpitrix_repo_labels.go | 955 | // Code generated by go-swagger; DO NOT EDIT.
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// OpenpitrixRepoLabels labels
// swagger:model openpitrixRepoLabels
type OpenpitrixRepoLabels []*OpenpitrixRepoLabel
// Validate validates this openpitrix repo labels
func (m OpenpitrixRepoLabels) Validate(formats strfmt.Registry) error {
var res []error
for i := 0; i < len(m); i++ {
if swag.IsZero(m[i]) { // not required
continue
}
if m[i] != nil {
if err := m[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName(strconv.Itoa(i))
}
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
| apache-2.0 |
lvweiwolf/poi-3.16 | src/java/org/apache/poi/poifs/macros/VBAMacroReader.java | 15907 | /* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.poifs.macros;
import static org.apache.poi.util.StringUtil.startsWithIgnoreCase;
import static org.apache.poi.util.StringUtil.endsWithIgnoreCase;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.PushbackInputStream;
import java.nio.charset.Charset;
import java.util.HashMap;
import java.util.Map;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.poi.poifs.filesystem.DirectoryNode;
import org.apache.poi.poifs.filesystem.DocumentInputStream;
import org.apache.poi.poifs.filesystem.DocumentNode;
import org.apache.poi.poifs.filesystem.Entry;
import org.apache.poi.poifs.filesystem.NPOIFSFileSystem;
import org.apache.poi.poifs.filesystem.OfficeXmlFileException;
import org.apache.poi.util.CodePageUtil;
import org.apache.poi.util.HexDump;
import org.apache.poi.util.IOUtils;
import org.apache.poi.util.RLEDecompressingInputStream;
/**
* <p>Finds all VBA Macros in an office file (OLE2/POIFS and OOXML/OPC),
* and returns them.
* </p>
* <p>
* <b>NOTE:</b> This does not read macros from .ppt files.
* See org.apache.poi.hslf.usermodel.TestBugs.getMacrosFromHSLF() in the scratchpad
* module for an example of how to do this. Patches that make macro
* extraction from .ppt more elegant are welcomed!
* </p>
*
* @since 3.15-beta2
*/
public class VBAMacroReader implements Closeable {
protected static final String VBA_PROJECT_OOXML = "vbaProject.bin";
protected static final String VBA_PROJECT_POIFS = "VBA";
// FIXME: When minimum supported version is Java 7, replace with java.nio.charset.StandardCharsets.UTF_16LE
private static final Charset UTF_16LE = Charset.forName("UTF-16LE");
private NPOIFSFileSystem fs;
public VBAMacroReader(InputStream rstream) throws IOException {
PushbackInputStream stream = new PushbackInputStream(rstream, 8);
byte[] header8 = IOUtils.peekFirst8Bytes(stream);
if (NPOIFSFileSystem.hasPOIFSHeader(header8)) {
fs = new NPOIFSFileSystem(stream);
} else {
openOOXML(stream);
}
}
public VBAMacroReader(File file) throws IOException {
try {
this.fs = new NPOIFSFileSystem(file);
} catch (OfficeXmlFileException e) {
openOOXML(new FileInputStream(file));
}
}
public VBAMacroReader(NPOIFSFileSystem fs) {
this.fs = fs;
}
private void openOOXML(InputStream zipFile) throws IOException {
ZipInputStream zis = new ZipInputStream(zipFile);
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (endsWithIgnoreCase(zipEntry.getName(), VBA_PROJECT_OOXML)) {
try {
// Make a NPOIFS from the contents, and close the stream
this.fs = new NPOIFSFileSystem(zis);
return;
} catch (IOException e) {
// Tidy up
zis.close();
// Pass on
throw e;
}
}
}
zis.close();
throw new IllegalArgumentException("No VBA project found");
}
public void close() throws IOException {
fs.close();
fs = null;
}
/**
* Reads all macros from all modules of the opened office file.
* @return All the macros and their contents
*
* @since 3.15-beta2
*/
public Map<String, String> readMacros() throws IOException {
final ModuleMap modules = new ModuleMap();
findMacros(fs.getRoot(), modules);
Map<String, String> moduleSources = new HashMap<String, String>();
for (Map.Entry<String, Module> entry : modules.entrySet()) {
Module module = entry.getValue();
if (module.buf != null && module.buf.length > 0) { // Skip empty modules
moduleSources.put(entry.getKey(), new String(module.buf, modules.charset));
}
}
return moduleSources;
}
protected static class Module {
Integer offset;
byte[] buf;
void read(InputStream in) throws IOException {
final ByteArrayOutputStream out = new ByteArrayOutputStream();
IOUtils.copy(in, out);
out.close();
buf = out.toByteArray();
}
}
protected static class ModuleMap extends HashMap<String, Module> {
Charset charset = Charset.forName("Cp1252"); // default charset
}
/**
* Recursively traverses directory structure rooted at <tt>dir</tt>.
* For each macro module that is found, the module's name and code are
* added to <tt>modules<tt>.
*
* @param dir The directory of entries to look at
* @param modules The resulting map of modules
* @throws IOException If reading the VBA module fails
* @since 3.15-beta2
*/
protected void findMacros(DirectoryNode dir, ModuleMap modules) throws IOException {
if (VBA_PROJECT_POIFS.equalsIgnoreCase(dir.getName())) {
// VBA project directory, process
readMacros(dir, modules);
} else {
// Check children
for (Entry child : dir) {
if (child instanceof DirectoryNode) {
findMacros((DirectoryNode)child, modules);
}
}
}
}
/**
* Read <tt>length</tt> bytes of MBCS (multi-byte character set) characters from the stream
*
* @param stream the inputstream to read from
* @param length number of bytes to read from stream
* @param charset the character set encoding of the bytes in the stream
* @return a java String in the supplied character set
* @throws IOException If reading from the stream fails
*/
private static String readString(InputStream stream, int length, Charset charset) throws IOException {
byte[] buffer = new byte[length];
int count = stream.read(buffer);
return new String(buffer, 0, count, charset);
}
/**
* reads module from DIR node in input stream and adds it to the modules map for decompression later
* on the second pass through this function, the module will be decompressed
*
* Side-effects: adds a new module to the module map or sets the buf field on the module
* to the decompressed stream contents (the VBA code for one module)
*
* @param in the run-length encoded input stream to read from
* @param streamName the stream name of the module
* @param modules a map to store the modules
* @throws IOException If reading data from the stream or from modules fails
*/
private static void readModule(RLEDecompressingInputStream in, String streamName, ModuleMap modules) throws IOException {
int moduleOffset = in.readInt();
Module module = modules.get(streamName);
if (module == null) {
// First time we've seen the module. Add it to the ModuleMap and decompress it later
module = new Module();
module.offset = moduleOffset;
modules.put(streamName, module);
// Would adding module.read(in) here be correct?
} else {
// Decompress a previously found module and store the decompressed result into module.buf
InputStream stream = new RLEDecompressingInputStream(
new ByteArrayInputStream(module.buf, moduleOffset, module.buf.length - moduleOffset)
);
module.read(stream);
stream.close();
}
}
private static void readModule(DocumentInputStream dis, String name, ModuleMap modules) throws IOException {
Module module = modules.get(name);
// TODO Refactor this to fetch dir then do the rest
if (module == null) {
// no DIR stream with offsets yet, so store the compressed bytes for later
module = new Module();
modules.put(name, module);
module.read(dis);
} else if (module.buf == null) { //if we haven't already read the bytes for the module keyed off this name...
if (module.offset == null) {
//This should not happen. bug 59858
throw new IOException("Module offset for '" + name + "' was never read.");
}
// we know the offset already, so decompress immediately on-the-fly
long skippedBytes = dis.skip(module.offset);
if (skippedBytes != module.offset) {
throw new IOException("tried to skip " + module.offset + " bytes, but actually skipped " + skippedBytes + " bytes");
}
InputStream stream = new RLEDecompressingInputStream(dis);
module.read(stream);
stream.close();
}
}
/**
* Skips <tt>n</tt> bytes in an input stream, throwing IOException if the
* number of bytes skipped is different than requested.
* @throws IOException If skipping would exceed the available data or skipping did not work.
*/
private static void trySkip(InputStream in, long n) throws IOException {
long skippedBytes = in.skip(n);
if (skippedBytes != n) {
if (skippedBytes < 0) {
throw new IOException(
"Tried skipping " + n + " bytes, but no bytes were skipped. "
+ "The end of the stream has been reached or the stream is closed.");
} else {
throw new IOException(
"Tried skipping " + n + " bytes, but only " + skippedBytes + " bytes were skipped. "
+ "This should never happen.");
}
}
}
// Constants from MS-OVBA: https://msdn.microsoft.com/en-us/library/office/cc313094(v=office.12).aspx
private static final int EOF = -1;
private static final int VERSION_INDEPENDENT_TERMINATOR = 0x0010;
@SuppressWarnings("unused")
private static final int VERSION_DEPENDENT_TERMINATOR = 0x002B;
private static final int PROJECTVERSION = 0x0009;
private static final int PROJECTCODEPAGE = 0x0003;
private static final int STREAMNAME = 0x001A;
private static final int MODULEOFFSET = 0x0031;
@SuppressWarnings("unused")
private static final int MODULETYPE_PROCEDURAL = 0x0021;
@SuppressWarnings("unused")
private static final int MODULETYPE_DOCUMENT_CLASS_OR_DESIGNER = 0x0022;
@SuppressWarnings("unused")
private static final int PROJECTLCID = 0x0002;
@SuppressWarnings("unused")
private static final int MODULE_NAME = 0x0019;
@SuppressWarnings("unused")
private static final int MODULE_NAME_UNICODE = 0x0047;
@SuppressWarnings("unused")
private static final int MODULE_DOC_STRING = 0x001c;
private static final int STREAMNAME_RESERVED = 0x0032;
/**
* Reads VBA Project modules from a VBA Project directory located at
* <tt>macroDir</tt> into <tt>modules</tt>.
*
* @since 3.15-beta2
*/
protected void readMacros(DirectoryNode macroDir, ModuleMap modules) throws IOException {
for (Entry entry : macroDir) {
if (! (entry instanceof DocumentNode)) { continue; }
String name = entry.getName();
DocumentNode document = (DocumentNode)entry;
DocumentInputStream dis = new DocumentInputStream(document);
try {
if ("dir".equalsIgnoreCase(name)) {
// process DIR
RLEDecompressingInputStream in = new RLEDecompressingInputStream(dis);
String streamName = null;
int recordId = 0;
try {
while (true) {
recordId = in.readShort();
if (EOF == recordId
|| VERSION_INDEPENDENT_TERMINATOR == recordId) {
break;
}
int recordLength = in.readInt();
switch (recordId) {
case PROJECTVERSION:
trySkip(in, 6);
break;
case PROJECTCODEPAGE:
int codepage = in.readShort();
modules.charset = Charset.forName(CodePageUtil.codepageToEncoding(codepage, true));
break;
case STREAMNAME:
streamName = readString(in, recordLength, modules.charset);
int reserved = in.readShort();
if (reserved != STREAMNAME_RESERVED) {
throw new IOException("Expected x0032 after stream name before Unicode stream name, but found: "+
Integer.toHexString(reserved));
}
int unicodeNameRecordLength = in.readInt();
readUnicodeString(in, unicodeNameRecordLength);
// do something with this at some point
break;
case MODULEOFFSET:
readModule(in, streamName, modules);
break;
default:
trySkip(in, recordLength);
break;
}
}
} catch (final IOException e) {
throw new IOException(
"Error occurred while reading macros at section id "
+ recordId + " (" + HexDump.shortToHex(recordId) + ")", e);
}
finally {
in.close();
}
} else if (!startsWithIgnoreCase(name, "__SRP")
&& !startsWithIgnoreCase(name, "_VBA_PROJECT")) {
// process module, skip __SRP and _VBA_PROJECT since these do not contain macros
readModule(dis, name, modules);
}
}
finally {
dis.close();
}
}
}
private String readUnicodeString(RLEDecompressingInputStream in, int unicodeNameRecordLength) throws IOException {
byte[] buffer = new byte[unicodeNameRecordLength];
IOUtils.readFully(in, buffer);
return new String(buffer, UTF_16LE);
}
}
| apache-2.0 |
wangzijian777/contentManager | content/webapp/e3/tree/xtree/map.js | 1673 | function Map(){
this._values = new Array();
this._a = new Array();
}
Map.prototype.put = function (key, obj){
var len = this._a[key];
if(len == null){
len = this._values.length;
this._a[key] = len;
}
this._values[len] = obj;
}
Map.prototype._getIndex=function(key_index){
var index = null;
if((typeof key_index) == "string"){
index = this._a[key_index];
}else if((typeof key_index) == "number"){
index = key_index;
}else{
alert("您传入的参数类型不对,必须是正整数或字符串!");
}
return index;
}
Map.prototype.get = function (key_index){
var index = this._getIndex(key_index);
var obj = null;
if(index != null && index < this._values.length && index>=0){
var obj = this._values[index];
if(obj instanceof RemovedObj){
obj = null;
}
}
return obj;
}
Map.prototype.remove = function(key_index){
var obj = null;
var index = this._getIndex(key_index);
if(index != null && index < this._values.length && index>=0){
obj = this._values[index];
this._values[index] = new RemovedObj();
}
return obj;
}
Map.prototype.size = function(){
return this._values.length;
}
Map.prototype.values = function(){
return getValues();
}
Map.prototype.getValues = function(){
var a = new Array();
for(var i=0; i<this._values.length; i++){
if(!(this._values[i] instanceof RemovedObj)){
a[a.length] = this._values[i];
}
}
return a;
}
//** 被删除对象. ********************************
function RemovedObj(){
this.label = "[被删除的对象]";
}
RemovedObj.prototype.toString = function(){
return this.label;
} | apache-2.0 |