repo_name stringlengths 4 116 | path stringlengths 4 379 | size stringlengths 1 7 | content stringlengths 3 1.05M | license stringclasses 15
values |
|---|---|---|---|---|
netscaler/neutron | neutron/plugins/mlnx/common/config.py | 2466 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
from neutron.agent.common import config
from neutron.plugins.mlnx.common import constants
DEFAULT_VLAN_RANGES = ['default:1:1000']
DEFAULT_INTERFACE_MAPPINGS = []
vlan_opts = [
cfg.StrOpt('tenant_network_type', default='vlan',
help=_("Network type for tenant networks "
"(local, ib, vlan, or none)")),
cfg.ListOpt('network_vlan_ranges',
default=DEFAULT_VLAN_RANGES,
help=_("List of <physical_network>:<vlan_min>:<vlan_max> "
"or <physical_network>")),
]
eswitch_opts = [
cfg.ListOpt('physical_interface_mappings',
default=DEFAULT_INTERFACE_MAPPINGS,
help=_("List of <physical_network>:<physical_interface>")),
cfg.StrOpt('vnic_type',
default=constants.VIF_TYPE_DIRECT,
help=_("Type of VM network interface: mlnx_direct or "
"hostdev")),
cfg.StrOpt('daemon_endpoint',
default='tcp://127.0.0.1:5001',
help=_('eswitch daemon end point')),
cfg.IntOpt('request_timeout', default=3000,
help=_("The number of milliseconds the agent will wait for "
"response on request to daemon.")),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2,
help=_("The number of seconds the agent will wait between "
"polling for local device changes.")),
cfg.BoolOpt('rpc_support_old_agents', default=True,
help=_("Enable server RPC compatibility with old agents")),
]
cfg.CONF.register_opts(vlan_opts, "MLNX")
cfg.CONF.register_opts(eswitch_opts, "ESWITCH")
cfg.CONF.register_opts(agent_opts, "AGENT")
config.register_agent_state_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
| apache-2.0 |
adrian-wang/panthera-parser | src/main/java/com/intel/ssg/dcst/panthera/parse/sql/transformer/fb/processor/CompareProcessor4C.java | 1226 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.ssg.dcst.panthera.parse.sql.transformer.fb.processor;
import com.intel.ssg.dcst.panthera.parse.sql.SqlXlateException;
/**
* Process Correlated compare operation > >= < <= = != in WHERE subquery.<br>
* CompareProcessor4C.
*
*/
public class CompareProcessor4C extends CommonFilterBlockProcessor {
@Override
public void processFB() throws SqlXlateException {
super.processCompareC();
}
}
| apache-2.0 |
opennetworkinglab/onos | protocols/netconf/ctl/src/main/java/org/onosproject/netconf/ctl/impl/OsgiPropertyConstants.java | 1510 | /*
* Copyright 2018-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.netconf.ctl.impl;
/**
* Constants for default values of configurable properties.
*/
public final class OsgiPropertyConstants {
private OsgiPropertyConstants() {}
public static final String NETCONF_CONNECT_TIMEOUT = "netconfConnectTimeout";
public static final int NETCONF_CONNECT_TIMEOUT_DEFAULT = 5;
public static final String NETCONF_REPLY_TIMEOUT = "netconfReplyTimeout";
public static final int NETCONF_REPLY_TIMEOUT_DEFAULT = 5;
public static final String NETCONF_IDLE_TIMEOUT = "netconfIdleTimeout";
public static final int NETCONF_IDLE_TIMEOUT_DEFAULT = 300;
public static final String SSH_LIBRARY = "sshLibrary";
public static final String SSH_LIBRARY_DEFAULT = "apache-mina";
public static final String SSH_KEY_PATH = "sshKeyPath";
public static final String SSH_KEY_PATH_DEFAULT = "/root/.ssh/id_rsa";
}
| apache-2.0 |
chirino/activemq | activemq-amqp/src/test/java/org/apache/activemq/transport/amqp/joram/JoramJmsTest.java | 3274 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.transport.amqp.joram;
import java.util.concurrent.TimeUnit;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.Timeout;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
import org.objectweb.jtests.jms.conform.connection.ConnectionTest;
import org.objectweb.jtests.jms.conform.connection.TopicConnectionTest;
import org.objectweb.jtests.jms.conform.message.MessageBodyTest;
import org.objectweb.jtests.jms.conform.message.MessageDefaultTest;
import org.objectweb.jtests.jms.conform.message.MessageTypeTest;
import org.objectweb.jtests.jms.conform.message.headers.MessageHeaderTest;
import org.objectweb.jtests.jms.conform.message.properties.JMSXPropertyTest;
import org.objectweb.jtests.jms.conform.message.properties.MessagePropertyConversionTest;
import org.objectweb.jtests.jms.conform.message.properties.MessagePropertyTest;
import org.objectweb.jtests.jms.conform.queue.QueueBrowserTest;
import org.objectweb.jtests.jms.conform.queue.TemporaryQueueTest;
import org.objectweb.jtests.jms.conform.selector.SelectorSyntaxTest;
import org.objectweb.jtests.jms.conform.selector.SelectorTest;
import org.objectweb.jtests.jms.conform.session.QueueSessionTest;
import org.objectweb.jtests.jms.conform.session.SessionTest;
import org.objectweb.jtests.jms.conform.session.TopicSessionTest;
import org.objectweb.jtests.jms.conform.topic.TemporaryTopicTest;
@RunWith(Suite.class)
@Suite.SuiteClasses({
TopicSessionTest.class,
MessageHeaderTest.class,
QueueBrowserTest.class,
MessageTypeTest.class,
TemporaryTopicTest.class,
TopicConnectionTest.class,
SelectorSyntaxTest.class,
QueueSessionTest.class,
SelectorTest.class,
TemporaryQueueTest.class,
ConnectionTest.class,
SessionTest.class,
JMSXPropertyTest.class,
MessageBodyTest.class,
MessageDefaultTest.class,
MessagePropertyConversionTest.class,
MessagePropertyTest.class
})
public class JoramJmsTest {
@Rule
public Timeout timeout = new Timeout(10, TimeUnit.SECONDS);
@BeforeClass
public static void beforeClass() throws Exception {
System.setProperty("joram.jms.test.file", getJmsTestFileName());
}
@AfterClass
public static void afterClass() throws Exception {
System.clearProperty("joram.jms.test.file");
}
public static String getJmsTestFileName() {
return "provider.properties";
}
}
| apache-2.0 |
cwsus/esolutions | eSolutionsCore/src/main/java/com/cws/esolutions/core/listeners/CoreServiceInitializer.java | 9283 | /*
* Copyright (c) 2009 - 2020 CaspersBox Web Services
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cws.esolutions.core.listeners;
/*
* Project: eSolutionsCore
* Package: com.cws.esolutions.core.listeners
* File: CoreServiceInitializer.java
*
* History
*
* Author Date Comments
* ----------------------------------------------------------------------------
* cws-khuntly 11/23/2008 22:39:20 Created.
*/
import java.net.URL;
import java.util.Map;
import org.slf4j.Logger;
import java.util.HashMap;
import javax.sql.DataSource;
import java.sql.SQLException;
import org.slf4j.LoggerFactory;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.Unmarshaller;
import javax.xml.bind.JAXBException;
import java.net.MalformedURLException;
import org.apache.log4j.helpers.Loader;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.xml.DOMConfigurator;
import org.apache.commons.dbcp.BasicDataSource;
import com.cws.esolutions.core.CoreServicesBean;
import com.cws.esolutions.core.CoreServicesConstants;
import com.cws.esolutions.security.utils.PasswordUtils;
import com.cws.esolutions.security.SecurityServiceBean;
import com.cws.esolutions.core.config.xml.DataSourceManager;
import com.cws.esolutions.security.config.xml.SecurityConfig;
import com.cws.esolutions.core.exception.CoreServicesException;
import com.cws.esolutions.core.config.xml.CoreConfigurationData;
import com.cws.esolutions.security.config.xml.SecurityConfigurationData;
/**
* @author cws-khuntly
* @version 1.0
*/
public class CoreServiceInitializer
{
private static final String CNAME = CoreServiceInitializer.class.getName();
private static final CoreServicesBean appBean = CoreServicesBean.getInstance();
private static final Logger DEBUGGER = LoggerFactory.getLogger(CoreServicesConstants.DEBUGGER);
private static final boolean DEBUG = DEBUGGER.isDebugEnabled();
private static final Logger ERROR_RECORDER = LoggerFactory.getLogger(CoreServicesConstants.ERROR_LOGGER + CNAME);
/**
* Initializes the core service in a standalone mode - used for applications outside of a container or when
* run as a standalone jar.
*
* @param configFile - The service configuration file to utilize
* @param logConfig - The logging configuration file to utilize
* @param loadSecurity - Flag to start security
* @param startConnections - Flag to start connections
* @throws CoreServicesException @{link com.cws.esolutions.core.exception.CoreServicesException}
* if an exception occurs during initialization
*/
public static void initializeService(final String configFile, final String logConfig, final boolean loadSecurity, final boolean startConnections) throws CoreServicesException
{
URL xmlURL = null;
JAXBContext context = null;
Unmarshaller marshaller = null;
SecurityConfig secConfig = null;
CoreConfigurationData configData = null;
SecurityConfigurationData secConfigData = null;
if (loadSecurity)
{
secConfigData = SecurityServiceBean.getInstance().getConfigData();
secConfig = secConfigData.getSecurityConfig();
}
final String serviceConfig = (StringUtils.isBlank(configFile)) ? System.getProperty("coreConfigFile") : configFile;
final String loggingConfig = (StringUtils.isBlank(logConfig)) ? System.getProperty("coreLogConfig") : logConfig;
try
{
try
{
DOMConfigurator.configure(Loader.getResource(loggingConfig));
}
catch (NullPointerException npx)
{
try
{
DOMConfigurator.configure(FileUtils.getFile(loggingConfig).toURI().toURL());
}
catch (NullPointerException npx1)
{
System.err.println("Unable to load logging configuration. No logging enabled!");
System.err.println("");
npx1.printStackTrace();
}
}
xmlURL = CoreServiceInitializer.class.getClassLoader().getResource(serviceConfig);
if (xmlURL == null)
{
// try loading from the filesystem
xmlURL = FileUtils.getFile(configFile).toURI().toURL();
}
context = JAXBContext.newInstance(CoreConfigurationData.class);
marshaller = context.createUnmarshaller();
configData = (CoreConfigurationData) marshaller.unmarshal(xmlURL);
CoreServiceInitializer.appBean.setConfigData(configData);
if (startConnections)
{
Map<String, DataSource> dsMap = CoreServiceInitializer.appBean.getDataSources();
if (DEBUG)
{
DEBUGGER.debug("dsMap: {}", dsMap);
}
if (dsMap == null)
{
dsMap = new HashMap<String, DataSource>();
}
for (DataSourceManager mgr : configData.getResourceConfig().getDsManager())
{
if (!(dsMap.containsKey(mgr.getDsName())))
{
StringBuilder sBuilder = new StringBuilder()
.append("connectTimeout=" + mgr.getConnectTimeout() + ";")
.append("socketTimeout=" + mgr.getConnectTimeout() + ";")
.append("autoReconnect=" + mgr.getAutoReconnect() + ";")
.append("zeroDateTimeBehavior=convertToNull");
if (DEBUG)
{
DEBUGGER.debug("StringBuilder: {}", sBuilder);
}
BasicDataSource dataSource = new BasicDataSource();
dataSource.setDriverClassName(mgr.getDriver());
dataSource.setUrl(mgr.getDataSource());
dataSource.setUsername(mgr.getDsUser());
dataSource.setConnectionProperties(sBuilder.toString());
dataSource.setPassword(PasswordUtils.decryptText(mgr.getDsPass(), mgr.getSalt(),
secConfig.getSecretAlgorithm(), secConfig.getIterations(), secConfig.getKeyBits(),
secConfig.getEncryptionAlgorithm(), secConfig.getEncryptionInstance(),
configData.getAppConfig().getEncoding()));
if (DEBUG)
{
DEBUGGER.debug("BasicDataSource: {}", dataSource);
}
dsMap.put(mgr.getDsName(), dataSource);
}
}
if (DEBUG)
{
DEBUGGER.debug("dsMap: {}", dsMap);
}
CoreServiceInitializer.appBean.setDataSources(dsMap);
}
}
catch (JAXBException jx)
{
jx.printStackTrace();
throw new CoreServicesException(jx.getMessage(), jx);
}
catch (MalformedURLException mux)
{
mux.printStackTrace();
throw new CoreServicesException(mux.getMessage(), mux);
}
}
/**
* Shuts down the running core service process.
*/
public static void shutdown()
{
final String methodName = CoreServiceInitializer.CNAME + "#shutdown()";
if (DEBUG)
{
DEBUGGER.debug(methodName);
}
final Map<String, DataSource> datasources = CoreServiceInitializer.appBean.getDataSources();
try
{
if ((datasources != null) && (datasources.size() != 0))
{
for (String key : datasources.keySet())
{
if (DEBUG)
{
DEBUGGER.debug("Key: {}", key);
}
BasicDataSource dataSource = (BasicDataSource) datasources.get(key);
if (DEBUG)
{
DEBUGGER.debug("BasicDataSource: {}", dataSource);
}
if ((dataSource != null ) && (!(dataSource.isClosed())))
{
dataSource.close();
}
}
}
}
catch (SQLException sqx)
{
ERROR_RECORDER.error(sqx.getMessage(), sqx);
}
}
}
| apache-2.0 |
nathanielvarona/airflow | airflow/api_connexion/schemas/dag_schema.py | 3698 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import List, NamedTuple
from itsdangerous import URLSafeSerializer
from marshmallow import Schema, fields
from marshmallow_sqlalchemy import SQLAlchemySchema, auto_field
from airflow import DAG
from airflow.api_connexion.schemas.common_schema import ScheduleIntervalSchema, TimeDeltaSchema, TimezoneField
from airflow.configuration import conf
from airflow.models.dag import DagModel, DagTag
class DagTagSchema(SQLAlchemySchema):
"""Dag Tag schema"""
class Meta:
"""Meta"""
model = DagTag
name = auto_field()
class DAGSchema(SQLAlchemySchema):
"""DAG schema"""
class Meta:
"""Meta"""
model = DagModel
dag_id = auto_field(dump_only=True)
root_dag_id = auto_field(dump_only=True)
is_paused = auto_field()
is_subdag = auto_field(dump_only=True)
fileloc = auto_field(dump_only=True)
file_token = fields.Method("get_token", dump_only=True)
owners = fields.Method("get_owners", dump_only=True)
description = auto_field(dump_only=True)
schedule_interval = fields.Nested(ScheduleIntervalSchema)
tags = fields.List(fields.Nested(DagTagSchema), dump_only=True)
@staticmethod
def get_owners(obj: DagModel):
"""Convert owners attribute to DAG representation"""
if not getattr(obj, 'owners', None):
return []
return obj.owners.split(",")
@staticmethod
def get_token(obj: DagModel):
"""Return file token"""
serializer = URLSafeSerializer(conf.get('webserver', 'secret_key'))
return serializer.dumps(obj.fileloc)
class DAGDetailSchema(DAGSchema):
"""DAG details"""
owners = fields.Method("get_owners", dump_only=True)
timezone = TimezoneField()
catchup = fields.Boolean()
orientation = fields.String()
concurrency = fields.Integer()
start_date = fields.DateTime()
dag_run_timeout = fields.Nested(TimeDeltaSchema, attribute="dagrun_timeout")
doc_md = fields.String()
default_view = fields.String()
params = fields.Dict()
tags = fields.Method("get_tags", dump_only=True)
@staticmethod
def get_tags(obj: DAG):
"""Dumps tags as objects"""
tags = obj.tags
if tags:
return [DagTagSchema().dump(dict(name=tag)) for tag in tags]
return []
@staticmethod
def get_owners(obj: DAG):
"""Convert owners attribute to DAG representation"""
if not getattr(obj, 'owner', None):
return []
return obj.owner.split(",")
class DAGCollection(NamedTuple):
"""List of DAGs with metadata"""
dags: List[DagModel]
total_entries: int
class DAGCollectionSchema(Schema):
"""DAG Collection schema"""
dags = fields.List(fields.Nested(DAGSchema))
total_entries = fields.Int()
dags_collection_schema = DAGCollectionSchema()
dag_schema = DAGSchema()
dag_detail_schema = DAGDetailSchema()
| apache-2.0 |
googleapis/google-cloud-ruby | google-cloud-os_config-v1/snippets/os_config_service/get_patch_job.rb | 1252 | # frozen_string_literal: true
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
# [START osconfig_v1_generated_OsConfigService_GetPatchJob_sync]
require "google/cloud/os_config/v1"
# Create a client object. The client can be reused for multiple calls.
client = Google::Cloud::OsConfig::V1::OsConfigService::Client.new
# Create a request. To set request fields, pass in keyword arguments.
request = Google::Cloud::OsConfig::V1::GetPatchJobRequest.new
# Call the get_patch_job method.
result = client.get_patch_job request
# The returned object is of type Google::Cloud::OsConfig::V1::PatchJob.
p result
# [END osconfig_v1_generated_OsConfigService_GetPatchJob_sync]
| apache-2.0 |
pimusicbox/mopidy-musicbox-webclient | tests/js/test_custom_scripting.js | 960 | var chai = require('chai')
var expect = chai.expect
var assert = chai.assert
chai.use(require('chai-string'))
chai.use(require('chai-jquery'))
var sinon = require('sinon')
var configureJQueryMobile = require('../../mopidy_musicbox_webclient/static/js/custom_scripting.js')
describe('jQuery Defaults', function () {
it('should disable ajax and hashListening', function () {
expect($.mobile.ajaxEnabled).to.be.true
expect($.mobile.hashListeningEnabled).to.be.true
configureJQueryMobile()
expect($.mobile.ajaxEnabled).to.be.false
expect($.mobile.hashListeningEnabled).to.be.false
})
it('should bind to "mobileinit"', function () {
var configSpy = sinon.spy(configureJQueryMobile)
$(document).bind('mobileinit', configSpy)
expect(configSpy.called).to.be.false
$(document).trigger('mobileinit')
expect(configSpy.called).to.be.true
configSpy.reset()
})
})
| apache-2.0 |
kigsmtua/estatio | estatioapp/integspecs/src/test/java/org/estatio/integspecs/glue/InMemoryDBForEstatio.java | 2152 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.estatio.integspecs.glue;
import org.apache.isis.core.specsupport.scenarios.InMemoryDB;
import org.apache.isis.core.specsupport.scenarios.ScenarioExecution;
import org.estatio.dom.WithCodeComparable;
import org.estatio.dom.WithNameComparable;
import org.estatio.dom.WithReferenceComparable;
import org.estatio.dom.WithTitleComparable;
public class InMemoryDBForEstatio extends InMemoryDB {
public InMemoryDBForEstatio(ScenarioExecution scenarioExecution) {
super(scenarioExecution);
}
/**
* Hook to initialize if possible.
*/
@Override
protected void init(Object obj, String id) {
if(obj instanceof WithReferenceComparable) {
WithReferenceComparable<?> withRef = (WithReferenceComparable<?>) obj;
withRef.setReference(id);
}
if(obj instanceof WithNameComparable) {
WithNameComparable<?> withName = (WithNameComparable<?>) obj;
withName.setName(id);
}
if(obj instanceof WithCodeComparable) {
WithCodeComparable<?> withCode = (WithCodeComparable<?>) obj;
withCode.setCode(id);
}
if(obj instanceof WithTitleComparable) {
WithTitleComparable<?> withTitle = (WithTitleComparable<?>) obj;
withTitle.setTitle(id);
}
}
} | apache-2.0 |
puppetlabs/puppetlabs-dsc | lib/puppet/type/dsc_xazuresqldatabaseserverfirewallrule.rb | 5875 | require 'pathname'
Puppet::Type.newtype(:dsc_xazuresqldatabaseserverfirewallrule) do
require Pathname.new(__FILE__).dirname + '../../' + 'puppet/type/base_dsc'
require Pathname.new(__FILE__).dirname + '../../puppet_x/puppetlabs/dsc_type_helpers'
@doc = %q{
The DSC xAzureSqlDatabaseServerFirewallRule resource type.
Automatically generated from
'xAzure/DSCResources/MSFT_xAzureSqlDatabaseServerFirewallRule/MSFT_xAzureSqlDatabaseServerFirewallRule.schema.mof'
To learn more about PowerShell Desired State Configuration, please
visit https://technet.microsoft.com/en-us/library/dn249912.aspx.
For more information about built-in DSC Resources, please visit
https://technet.microsoft.com/en-us/library/dn249921.aspx.
For more information about xDsc Resources, please visit
https://github.com/PowerShell/DscResources.
}
validate do
fail('dsc_rulename is a required attribute') if self[:dsc_rulename].nil?
fail('dsc_servername is a required attribute') if self[:dsc_servername].nil?
end
def dscmeta_resource_friendly_name; 'xAzureSqlDatabaseServerFirewallRule' end
def dscmeta_resource_name; 'MSFT_xAzureSqlDatabaseServerFirewallRule' end
def dscmeta_module_name; 'xAzure' end
def dscmeta_module_version; '0.2.0.0' end
newparam(:name, :namevar => true ) do
end
ensurable do
newvalue(:exists?) { provider.exists? }
newvalue(:present) { provider.create }
newvalue(:absent) { provider.destroy }
defaultto { :present }
end
# Name: PsDscRunAsCredential
# Type: MSFT_Credential
# IsMandatory: False
# Values: None
newparam(:dsc_psdscrunascredential) do
def mof_type; 'MSFT_Credential' end
def mof_is_embedded?; true end
desc "PsDscRunAsCredential"
validate do |value|
unless value.kind_of?(Hash)
fail("Invalid value '#{value}'. Should be a hash")
end
PuppetX::Dsc::TypeHelpers.validate_MSFT_Credential("Credential", value)
end
munge do |value|
PuppetX::Dsc::TypeHelpers.munge_sensitive_hash!(value)
end
end
# Name: RuleName
# Type: string
# IsMandatory: True
# Values: None
newparam(:dsc_rulename) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "RuleName - Name of the firewall rule"
isrequired
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
end
end
# Name: ServerName
# Type: string
# IsMandatory: True
# Values: None
newparam(:dsc_servername) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "ServerName - Name of the database server for which firewall rule should be created"
isrequired
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
end
end
# Name: StartIPAddress
# Type: string
# IsMandatory: False
# Values: None
newparam(:dsc_startipaddress) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "StartIPAddress - Start IP address of the firewall rule"
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
end
end
# Name: EndIPAddress
# Type: string
# IsMandatory: False
# Values: None
newparam(:dsc_endipaddress) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "EndIPAddress - End IP address of the firewall rule"
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
end
end
# Name: AzureSubscriptionName
# Type: string
# IsMandatory: False
# Values: None
newparam(:dsc_azuresubscriptionname) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "AzureSubscriptionName - Specifies the name of the Azure subscription that should be set to Current"
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
end
end
# Name: AzurePublishSettingsFile
# Type: string
# IsMandatory: False
# Values: None
newparam(:dsc_azurepublishsettingsfile) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "AzurePublishSettingsFile - Specifies the location of the Publish Settings file for the Azure Subscription"
validate do |value|
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
end
end
# Name: Ensure
# Type: string
# IsMandatory: False
# Values: ["Present", "Absent"]
newparam(:dsc_ensure) do
def mof_type; 'string' end
def mof_is_embedded?; false end
desc "Ensure - Ensure that firewall rule is present or absent Valid values are Present, Absent."
validate do |value|
resource[:ensure] = value.downcase
unless value.kind_of?(String)
fail("Invalid value '#{value}'. Should be a string")
end
unless ['Present', 'present', 'Absent', 'absent'].include?(value)
fail("Invalid value '#{value}'. Valid values are Present, Absent")
end
end
end
def builddepends
pending_relations = super()
PuppetX::Dsc::TypeHelpers.ensure_reboot_relationship(self, pending_relations)
end
end
Puppet::Type.type(:dsc_xazuresqldatabaseserverfirewallrule).provide :powershell, :parent => Puppet::Type.type(:base_dsc).provider(:powershell) do
confine :true => (Gem::Version.new(Facter.value(:powershell_version)) >= Gem::Version.new('5.0.10586.117'))
defaultfor :operatingsystem => :windows
mk_resource_methods
end
| apache-2.0 |
EdiaEducationTechnology/uPortal | uportal-war/src/main/java/org/jasig/portal/events/aggr/login/LoginPortalEventAggregator.java | 3302 | /**
* Licensed to Jasig under one or more contributor license
* agreements. See the NOTICE file distributed with this work
* for additional information regarding copyright ownership.
* Jasig licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a
* copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.jasig.portal.events.aggr.login;
import org.jasig.portal.events.LoginEvent;
import org.jasig.portal.events.PortalEvent;
import org.jasig.portal.events.aggr.AggregationInterval;
import org.jasig.portal.events.aggr.AggregationIntervalInfo;
import org.jasig.portal.events.aggr.BaseAggregationPrivateDao;
import org.jasig.portal.events.aggr.BaseIntervalAwarePortalEventAggregator;
import org.jasig.portal.events.aggr.DateDimension;
import org.jasig.portal.events.aggr.EventAggregationContext;
import org.jasig.portal.events.aggr.TimeDimension;
import org.jasig.portal.events.aggr.groups.AggregatedGroupMapping;
import org.springframework.beans.factory.annotation.Autowired;
/**
* Event aggregator that uses {@link LoginAggregationPrivateDao} to aggregate login events
*
* @author Eric Dalquist
* @version $Revision$
*/
public class LoginPortalEventAggregator extends BaseIntervalAwarePortalEventAggregator<LoginEvent, LoginAggregationImpl, LoginAggregationKey> {
private LoginAggregationPrivateDao loginAggregationDao;
@Autowired
public void setLoginAggregationDao(LoginAggregationPrivateDao loginAggregationDao) {
this.loginAggregationDao = loginAggregationDao;
}
@Override
protected BaseAggregationPrivateDao<LoginAggregationImpl, LoginAggregationKey> getAggregationDao() {
return this.loginAggregationDao;
}
@Override
protected LoginAggregationKey createAggregationKey(LoginEvent e, EventAggregationContext eventAggregationContext,
AggregationIntervalInfo intervalInfo, AggregatedGroupMapping aggregatedGroup) {
final TimeDimension timeDimension = intervalInfo.getTimeDimension();
final DateDimension dateDimension = intervalInfo.getDateDimension();
final AggregationInterval aggregationInterval = intervalInfo.getAggregationInterval();
return new LoginAggregationKeyImpl(dateDimension, timeDimension, aggregationInterval, aggregatedGroup);
}
@Override
public boolean supports(Class<? extends PortalEvent> type) {
return LoginEvent.class.isAssignableFrom(type);
}
@Override
protected void updateAggregation(LoginEvent e, EventAggregationContext eventAggregationContext,
AggregationIntervalInfo intervalInfo, LoginAggregationImpl aggregation) {
final String userName = e.getUserName();
final int duration = intervalInfo.getDurationTo(e.getTimestampAsDate());
aggregation.setDuration(duration);
aggregation.countUser(userName);
}
}
| apache-2.0 |
JetBrains/ttorrent-lib | common/src/main/java/com/turn/ttorrent/common/TorrentFile.java | 1045 | package com.turn.ttorrent.common;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.io.File;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
/**
* @author dgiffin
* @author mpetazzoni
*/
public class TorrentFile {
@NotNull
public final List<String> relativePath;
public final long size;
@NotNull
public final Optional<String> md5Hash;
public TorrentFile(@NotNull List<String> relativePath, long size, @Nullable String md5Hash) {
this.relativePath = new ArrayList<String>(relativePath);
this.size = size;
this.md5Hash = Optional.of(md5Hash);
}
public String getRelativePathAsString() {
String delimiter = File.separator;
final Iterator<String> iterator = relativePath.iterator();
StringBuilder sb = new StringBuilder();
if (iterator.hasNext()) {
sb.append(iterator.next());
while (iterator.hasNext()) {
sb.append(delimiter).append(iterator.next());
}
}
return sb.toString();
}
}
| apache-2.0 |
shamim8888/SMSlib-ParallelPort | opensis1/modules/Student_Billing/Statements.php | 2808 | <?php
/**
* @file $Id: Statements.php 422 2007-02-10 22:08:22Z focus-sis $
* @package Focus/SIS
* @copyright Copyright (C) 2006 Andrew Schmadeke. All rights reserved.
* @license http://www.gnu.org/copyleft/gpl.html GNU/GPL, see LICENSE.txt
* Focus/SIS is free software. This version may have been modified pursuant
* to the GNU General Public License, and as distributed it includes or
* is derivative of works licensed under the GNU General Public License or
* other free or open source software licenses.
* See COPYRIGHT.txt for copyright notices and details.
*/
//Widgets('all');
Widgets('mailing_labels');
//Widgets('document_template');
if(!$_REQUEST['search_modfunc'] || $_REQUEST['search_modfunc']=='search' || $_ROSARIO['modules_search'])
{
DrawHeader(ProgramTitle());
$extra['force_search'] = true;
$extra['new'] = true;
$extra['action'] .= "&_ROSARIO_PDF=true";
Search('student_id',$extra);
}
else
{
// For the Student Fees / Student Payments programs
$_REQUEST['print_statements'] = true;
if($_REQUEST['mailing_labels']=='Y')
$extra['group'][] = 'ADDRESS_ID';
//modif Francois: fix Advanced Search
$extra['WHERE'] .= appendSQL('',$extra);
$extra['WHERE'] .= CustomFields('where');
$RET = GetStuList($extra);
if(count($RET))
{
$SESSION_student_id_save = $_SESSION['student_id'];
$handle = PDFStart();
foreach($RET as $student)
{
if($_REQUEST['mailing_labels']=='Y')
{
foreach($student as $address)
{
echo '<BR /><BR /><BR />';
unset($_ROSARIO['DrawHeader']);
DrawHeader(_('Statement'));
DrawHeader($address['FULL_NAME'],$address['STUDENT_ID']);
DrawHeader($address['GRADE_ID']);
DrawHeader(GetSchool(UserSchool()));
DrawHeader(ProperDate(DBDate()));
echo '<BR /><BR /><TABLE class="width-100p"><TR><TD style="width:50px;"> </TD><TD>'.$address['MAILING_LABEL'].'</TD></TR></TABLE><BR />';
$_SESSION['student_id'] = $address['STUDENT_ID'];
include('modules/Student_Billing/StudentFees.php');
include('modules/Student_Billing/StudentPayments.php');
echo '<div style="page-break-after: always;"></div>';
}
}
else
{
$_SESSION['student_id'] = $student['STUDENT_ID'];
unset($_ROSARIO['DrawHeader']);
DrawHeader(_('Statement'));
DrawHeader($student['FULL_NAME'],$student['STUDENT_ID']);
DrawHeader($student['GRADE_ID']);
DrawHeader(GetSchool(UserSchool()));
DrawHeader(ProperDate(DBDate()));
include('modules/Student_Billing/StudentFees.php');
include('modules/Student_Billing/StudentPayments.php');
echo '<div style="page-break-after: always;"></div>';
}
}
//unset($_SESSION['student_id']);
$_SESSION['student_id'] = $SESSION_student_id_save;
PDFStop($handle);
}
else
BackPrompt(_('No Students were found.'));
}
?> | apache-2.0 |
pakdev/roslyn-analyzers | src/NetAnalyzers/Core/Microsoft.NetCore.Analyzers/Security/UseXmlReaderBase.cs | 4532 | // Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Immutable;
using Analyzer.Utilities;
using Analyzer.Utilities.Extensions;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.Diagnostics;
using Microsoft.CodeAnalysis.Operations;
namespace Microsoft.NetCore.Analyzers.Security
{
public abstract class UseXmlReaderBase : DiagnosticAnalyzer
{
/// <summary>
/// Metadata name of the type which is recommended to use method take XmlReader as parameter.
/// </summary>
protected abstract string TypeMetadataName { get; }
/// <summary>
/// Metadata name of the method which is recommended to use XmlReader as parameter.
/// </summary>
protected abstract string MethodMetadataName { get; }
protected abstract DiagnosticDescriptor Rule { get; }
public override ImmutableArray<DiagnosticDescriptor> SupportedDiagnostics => ImmutableArray.Create(Rule);
protected static LocalizableString Description { get; } = new LocalizableResourceString(
nameof(MicrosoftNetCoreAnalyzersResources.UseXmlReaderDescription),
MicrosoftNetCoreAnalyzersResources.ResourceManager,
typeof(MicrosoftNetCoreAnalyzersResources));
protected static LocalizableString Message { get; } = new LocalizableResourceString(
nameof(MicrosoftNetCoreAnalyzersResources.UseXmlReaderMessage),
MicrosoftNetCoreAnalyzersResources.ResourceManager,
typeof(MicrosoftNetCoreAnalyzersResources));
public override void Initialize(AnalysisContext context)
{
context.EnableConcurrentExecution();
// Security analyzer - analyze and report diagnostics on generated code.
context.ConfigureGeneratedCodeAnalysis(GeneratedCodeAnalysisFlags.Analyze | GeneratedCodeAnalysisFlags.ReportDiagnostics);
context.RegisterCompilationStartAction(compilationStartAnalysisContext =>
{
var wellKnownTypeProvider = WellKnownTypeProvider.GetOrCreate(compilationStartAnalysisContext.Compilation);
if (!wellKnownTypeProvider.TryGetOrCreateTypeByMetadataName(
TypeMetadataName,
out INamedTypeSymbol? xmlSchemaTypeSymbol))
{
return;
}
INamedTypeSymbol? xmlReaderTypeSymbol = wellKnownTypeProvider.GetOrCreateTypeByMetadataName(WellKnownTypeNames.SystemXmlXmlReader);
compilationStartAnalysisContext.RegisterOperationAction(operationAnalysisContext =>
{
var operation = operationAnalysisContext.Operation;
IMethodSymbol? methodSymbol = null;
string? methodName = null;
switch (operation.Kind)
{
case OperationKind.Invocation:
methodSymbol = ((IInvocationOperation)operation).TargetMethod;
methodName = methodSymbol.Name;
break;
case OperationKind.ObjectCreation:
methodSymbol = ((IObjectCreationOperation)operation).Constructor;
methodName = methodSymbol.ContainingType.Name;
break;
default:
return;
}
if (methodName.StartsWith(MethodMetadataName, StringComparison.Ordinal) &&
methodSymbol.IsOverrideOrVirtualMethodOf(xmlSchemaTypeSymbol))
{
if (xmlReaderTypeSymbol != null &&
!methodSymbol.Parameters.IsEmpty &&
methodSymbol.Parameters[0].Type.Equals(xmlReaderTypeSymbol))
{
return;
}
operationAnalysisContext.ReportDiagnostic(
operation.CreateDiagnostic(
Rule,
methodSymbol.ContainingType.Name,
methodName));
}
}, OperationKind.Invocation, OperationKind.ObjectCreation);
});
}
}
}
| apache-2.0 |
bdobyns/OpenRQ | src/main/net/fec/openrq/util/io/printing/appendable/StringBufferWrapper.java | 414 | package net.fec.openrq.util.io.printing.appendable;
/**
* @author Ricardo Fonseca <rfonseca@lasige.di.fc.ul.pt>
*/
final class StringBufferWrapper extends AppendableWrapper<StringBuffer> {
public StringBufferWrapper(StringBuffer sb) {
super(sb);
}
@Override
public void print(char[] c, int off, int len) {
appendable.append(c, off, len);
}
}
| apache-2.0 |
mahak/cinder | cinder/cmd/scheduler.py | 2051 | #!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Starter script for Cinder Scheduler."""
import logging as python_logging
import sys
import eventlet
eventlet.monkey_patch()
# Monkey patch the original current_thread to use the up-to-date _active
# global variable. See https://bugs.launchpad.net/bugs/1863021 and
# https://github.com/eventlet/eventlet/issues/592
import __original_module_threading as orig_threading
import threading # noqa
orig_threading.current_thread.__globals__['_active'] = threading._active
from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
# Need to register global_opts
from cinder.common import config # noqa
from cinder import i18n
i18n.enable_lazy()
from cinder import objects
from cinder import service
from cinder import utils
from cinder import version
CONF = cfg.CONF
def main():
objects.register_all()
gmr_opts.set_defaults(CONF)
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
logging.setup(CONF, "cinder")
python_logging.captureWarnings(True)
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
server = service.Service.create(binary='cinder-scheduler')
service.serve(server)
service.wait()
| apache-2.0 |
evandor/skysail-server | skysail.client/src/app/app.component.ts | 910 | import { Component, OnInit } from '@angular/core';
import { NavbarComponent } from './navbar/navbar.component'
import { BreadcrumbModule, PanelMenuModule, MenuItem } from 'primeng/primeng';
import {BackendService} from './services/backend.service'
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css'],
providers: [NavbarComponent]
})
export class AppComponent implements OnInit {
constructor(private _backend: BackendService) {}
title = 'app';
items: MenuItem[];
leftmenuitems: MenuItem[];
ngOnInit() {
/*this.items = [];
this.items.push({ label: 'Categories' });
this.items.push({ label: 'test', url: 'https://www.skysail.io' });
this._backend.getLeftMenuItems()
.subscribe(res => {
this.leftmenuitems = res;
}, error => {
console.log("adding error to alertsService:", error);
});*/
}
}
| apache-2.0 |
joelaha/devopsdays-web | themes/devopsdays-legacy/static/js/googlemaps_content.js | 4466 | function initialize() {
<!-- http://stackoverflow.com/questions/3472040/rgb-value-to-hsl-converter -->
<!-- 197° - 78% 38% -->
var darkStyle = [{
featureType: 'water',
elementType: 'all',
stylers: [
{visibility: 'simplified'} ,
]
}];
var homeLatLng=new google.maps.LatLng(40.4419, -72.1419)
var map = new google.maps.Map(document.getElementById("map_canvas"), {
zoom: 1,
mapTypeControlOptions: {
mapTypeIds: ['darkwater']
},
center: homeLatLng,
mapTypeId: 'darkwater',
disableDefaultUI: true,
});
map.mapTypes.set('darkwater', new google.maps.StyledMapType(darkStyle, { name: 'Dark' }));
// Get Latitude and Longitude of a Point: http://itouchmap.com/latlong.html
// Events
var denverlocation = new google.maps.LatLng(39.739236, -104.990251);
var denvermaker = new MarkerWithLabel({
position: denverlocation,
draggable: false,
raiseOnDrag: false,
map: map,
labelContent: "Denver<br>April 21 & 22",
labelAnchor: new google.maps.Point(20, 30),
labelClass: "labels",
labelStyle: { opacity: 1 }
});
var dclocation = new google.maps.LatLng(38.8011545,-77.0659376);
var dcmarker = new MarkerWithLabel({
position: dclocation,
draggable: false,
raiseOnDrag: false,
map: map,
labelContent: "Washington, DC<br>Jun 8 & 9",
labelAnchor: new google.maps.Point(0, 0),
labelClass: "labels",
labelStyle: { opacity: 1 }
});
google.maps.event.addListener(dcmarker, "click", function (e) { location.href="/events/2016-washington-dc/"});
var minneapolislocation = new google.maps.LatLng(44.9726428,-93.2752562);
var minneapolismarker = new MarkerWithLabel({
position: minneapolislocation,
draggable: false,
raiseOnDrag: false,
map: map,
labelContent: "Minneapolis<br>Jul 20 & 21",
labelAnchor: new google.maps.Point(25, 0),
labelClass: "labels",
labelStyle: { opacity: 1 }
});
google.maps.event.addListener(minneapolismarker, "click", function (e) { location.href="/events/2016-minneapolis/"});
var kiellocation = new google.maps.LatLng(54.34208,10.121949);
var kielmarker = new MarkerWithLabel({
position: kiellocation,
draggable: false,
raiseOnDrag: false,
map: map,
labelContent: "Kiel<br>May 12 & 13",
labelAnchor: new google.maps.Point(20,-5),
labelClass: "labels",
labelStyle: { opacity: 1 }
});
google.maps.event.addListener(kielmarker, "click", function (e) { location.href="/events/2016-kiel/"});
var seattlelocation = new google.maps.LatLng(47.609895, -122.330259);
var seattlemarker = new MarkerWithLabel({
position: seattlelocation,
draggable: false,
raiseOnDrag: false,
map: map,
labelContent: "Seattle<br>May 12 & 13",
labelAnchor: new google.maps.Point(50,0),
labelClass: "labels",
labelStyle: { opacity: 1 }
});
google.maps.event.addListener(seattlemarker, "click", function (e) { location.href="/events/2016-seattle/"});
var torontolocation = new google.maps.LatLng(43.7000, -79.4000);
var torontomarker = new MarkerWithLabel({
position: torontolocation,
draggable: false,
raiseOnDrag: false,
map: map,
labelContent: "Toronto<br>May 26 & 27",
labelAnchor: new google.maps.Point(-5, 42),
labelClass: "labels", // the CSS class for the label
labelStyle: {opacity: 1}
});
google.maps.event.addListener(torontomarker, "click", function (e) { location.href="/events/2016-toronto"});
var londonlocation = new google.maps.LatLng(51.5072, -0.1275);
var londonmarker = new MarkerWithLabel({
position: londonlocation,
draggable: false,
raiseOnDrag: false,
map: map,
labelContent: "London<br>Apr 19 & 20",
labelAnchor: new google.maps.Point(25, 25),
labelClass: "labels", // the CSS class for the label
labelStyle: {opacity: 1}
});
google.maps.event.addListener(londonmarker, "click", function (e) { location.href="/events/2016-london"});
var vancouverlocation = new google.maps.LatLng(49.2827, -123.1207);
var vancouvermarker = new MarkerWithLabel({
position: vancouverlocation,
draggable: false,
raiseOnDrag: false,
map: map,
labelContent: "Vancouver<br>Apr 15 & 16",
labelAnchor: new google.maps.Point(60, 30),
labelClass: "labels", // the CSS class for the label
labelStyle: {opacity: 1}
});
google.maps.event.addListener(vancouvermarker, "click", function (e) { location.href="/events/2016-vancouver"});
}
| apache-2.0 |
twitter/heron | heron/spi/src/java/org/apache/heron/spi/common/Key.java | 13825 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.heron.spi.common;
import org.apache.heron.common.basics.ByteAmount;
/**
* Enum of all configuration key values. The following methods exist:
*
* name() - return a string representation of the member name (e.g. HERON_HOME)
* value() - return a key value bound to the enum (e.g. heron.directory.home)
* getDefault() - return the default value bound to the enum
* getType() - return the type of the key entry
*/
@SuppressWarnings({"checkstyle:MethodParamPad", "checkstyle:LineLength"})
public enum Key {
//keys for heron environment
HERON_HOME ("heron.directory.home", "/usr/local/heron"),
HERON_BIN ("heron.directory.bin", "${HERON_HOME}/bin"),
HERON_CONF ("heron.directory.conf", "${HERON_HOME}/conf"),
HERON_LIB ("heron.directory.lib", "${HERON_HOME}/lib"),
HERON_DIST ("heron.directory.dist", "${HERON_HOME}/dist"),
HERON_ETC ("heron.directory.etc", "${HERON_HOME}/etc"),
JAVA_HOME ("heron.directory.java.home", "${JAVA_HOME}"),
//keys for heron configuration files
CLUSTER_YAML ("heron.config.file.cluster.yaml", "${HERON_CONF}/cluster.yaml"),
CLIENT_YAML ("heron.config.file.client.yaml", "${HERON_CONF}/client.yaml"),
HEALTHMGR_YAML ("heron.config.file.healthmgr.yaml", "${HERON_CONF}/healthmgr.yaml"),
METRICS_YAML ("heron.config.file.metrics.yaml", "${HERON_CONF}/metrics_sinks.yaml"),
PACKING_YAML ("heron.config.file.packing.yaml", "${HERON_CONF}/packing.yaml"),
SCHEDULER_YAML ("heron.config.file.scheduler.yaml", "${HERON_CONF}/scheduler.yaml"),
STATEMGR_YAML ("heron.config.file.statemgr.yaml", "${HERON_CONF}/statemgr.yaml"),
SYSTEM_YAML ("heron.config.file.system.yaml", "${HERON_CONF}/heron_internals.yaml"),
UPLOADER_YAML ("heron.config.file.uploader.yaml", "${HERON_CONF}/uploader.yaml"),
DOWNLOADER_YAML ("heron.config.file.downloader.yaml", "${HERON_CONF}/downloader.yaml"),
STATEFUL_YAML ("heron.config.file.stateful.yaml", "${HERON_CONF}/stateful.yaml"),
//keys for config provided in the command line
CLUSTER ("heron.config.cluster", Type.STRING),
ROLE ("heron.config.role", Type.STRING),
ENVIRON ("heron.config.environ", Type.STRING),
SUBMIT_USER ("heron.config.submit_user", Type.STRING),
DRY_RUN ("heron.config.dry_run", Boolean.FALSE),
DRY_RUN_FORMAT_TYPE ("heron.config.dry_run_format_type", Type.DRY_RUN_FORMAT_TYPE),
VERBOSE ("heron.config.verbose", Boolean.FALSE),
// Used to enable verbose JVM GC logging
VERBOSE_GC ("heron.config.verbose_gc", Boolean.FALSE),
CONFIG_PROPERTY ("heron.config.property", Type.STRING),
//keys for release/build information
BUILD_VERSION ("heron.build.version", Type.STRING),
BUILD_TIME ("heron.build.time", Type.STRING),
BUILD_TIMESTAMP ("heron.build.timestamp", Type.STRING),
BUILD_HOST ("heron.build.host", Type.STRING),
BUILD_USER ("heron.build.user", Type.STRING),
//keys for config provided user classes
UPLOADER_CLASS ("heron.class.uploader", Type.STRING),
LAUNCHER_CLASS ("heron.class.launcher", Type.STRING),
SCHEDULER_CLASS ("heron.class.scheduler", Type.STRING),
PACKING_CLASS ("heron.class.packing.algorithm", Type.STRING),
REPACKING_CLASS ("heron.class.repacking.algorithm", Type.STRING),
STATE_MANAGER_CLASS ("heron.class.state.manager", Type.STRING),
AURORA_CONTROLLER_CLASS ("heron.class.scheduler.aurora.controller.cli", Boolean.TRUE),
//keys for scheduler config
SCHEDULER_IS_SERVICE ("heron.scheduler.is.service", Boolean.TRUE),
SCHEDULER_PROPERTIES ("heron.scheduler.properties", Type.PROPERTIES),
//keys for config provided user binaries and jars
SCHEDULER_JAR ("heron.jars.scheduler", "${HERON_LIB}/scheduler/heron-scheduler.jar"),
//keys for config provided files and directories
INTERNALS_CONFIG_FILE ("heron.internals.config.file", Type.STRING),
// heron core can be either a directory or URI, a switch to control it
// default is to use core URI
CORE_PACKAGE_DIRECTORY ("heron.package.core.directory", "${HERON_DIST}/heron-core"),
CORE_PACKAGE_URI ("heron.package.core.uri", "${HERON_DIST}/heron-core.tar.gz"),
USE_CORE_PACKAGE_URI ("heron.package.use_core_uri", Boolean.TRUE),
//keys for packages URIs
TOPOLOGY_PACKAGE_URI ("heron.package.topology.uri", Type.STRING),
//keys for topology
TOPOLOGY_ID ("heron.topology.id", Type.STRING),
TOPOLOGY_NAME ("heron.topology.name", Type.STRING),
TOPOLOGY_DEFINITION_FILE ("heron.topology.definition.file", Type.STRING),
TOPOLOGY_DEFINITION ("heron.topology.definition", Type.STRING),
TOPOLOGY_BINARY_FILE ("heron.topology.binary.file", Type.STRING),
TOPOLOGY_PACKAGE_FILE ("heron.topology.package.file", Type.STRING),
TOPOLOGY_PACKAGE_TYPE ("heron.topology.package.type", Type.PACKAGE_TYPE),
TOPOLOGY_CONTAINER_ID ("heron.topology.container.id", Type.STRING),
//keys for proxy config during submission
SCHEDULER_PROXY_CONNECTION_STRING("heron.proxy.connection.string", Type.STRING),
SCHEDULER_PROXY_CONNECTION_TYPE ("heron.proxy.connection.type", Type.STRING),
//keys for storing state"),
STATEMGR_CONNECTION_STRING("heron.statemgr.connection.string", Type.STRING),
STATEMGR_ROOT_PATH ("heron.statemgr.root.path", Type.STRING),
//keys for config provided default values for resources
STMGR_RAM ("heron.resources.stmgr.ram", ByteAmount.fromBytes(1073741824)),
CKPTMGR_RAM ("heron.resources.ckptmgr.ram", ByteAmount.fromBytes(1073741824)),
METRICSMGR_RAM ("heron.resources.metricsmgr.ram", ByteAmount.fromBytes(1073741824)),
INSTANCE_RAM ("heron.resources.instance.ram", ByteAmount.fromBytes(1073741824)),
INSTANCE_CPU ("heron.resources.instance.cpu", 1.0),
INSTANCE_DISK ("heron.resources.instance.disk", ByteAmount.fromBytes(1073741824)),
//keys for checkpoint management
STATEFUL_STORAGE_CLASSNAME ("heron.statefulstorage.classname", Type.STRING),
STATEFUL_STORAGE_CONF ("heron.statefulstorage.config", Type.MAP),
STATEFUL_STORAGE_CUSTOM_CLASSPATH ("heron.statefulstorage.custom.classpath", Type.STRING),
// keys for metricscache manager
METRICSCACHEMGR_MODE ("heron.topology.metricscachemgr.mode", "disabled"),
// keys for health manager
HEALTHMGR_MODE ("heron.topology.healthmgr.mode", Type.STRING),
//keys for config provided paths
INSTANCE_CLASSPATH ("heron.classpath.instance", "${HERON_LIB}/instance/*"),
HEALTHMGR_CLASSPATH ("heron.classpath.healthmgr", "${HERON_LIB}/healthmgr/*"),
METRICSMGR_CLASSPATH ("heron.classpath.metrics.manager", "${HERON_LIB}/metricsmgr/*"),
METRICSCACHEMGR_CLASSPATH ("heron.classpath.metricscache.manager", "${HERON_LIB}/metricscachemgr/*"),
PACKING_CLASSPATH ("heron.classpath.packing", "${HERON_LIB}/packing/*"),
SCHEDULER_CLASSPATH ("heron.classpath.scheduler", "${HERON_LIB}/scheduler/*"),
STATEMGR_CLASSPATH ("heron.classpath.statemgr", "${HERON_LIB}/statemgr/*"),
UPLOADER_CLASSPATH ("heron.classpath.uploader", "${HERON_LIB}/uploader/*"),
CKPTMGR_CLASSPATH ("heron.classpath.ckptmgr", "${HERON_LIB}/ckptmgr/*"),
STATEFULSTORAGE_CLASSPATH ("heron.classpath.statefulstorage", "${HERON_LIB}/statefulstorage/*"),
//keys for run time config
TOPOLOGY_CLASSPATH ("heron.runtime.topology.class.path", Type.STRING),
SCHEDULER_STATE_MANAGER_ADAPTOR("heron.runtime.scheduler.state.manager.adaptor", Type.STRING),
SCHEDULER_SHUTDOWN ("heron.runtime.scheduler.shutdown", Type.STRING),
PACKING_CLASS_INSTANCE ("heron.runtime.packing.class.instance", Type.STRING),
LAUNCHER_CLASS_INSTANCE ("heron.runtime.launcher.class.instance", Type.STRING),
COMPONENT_RAMMAP ("heron.runtime.component.rammap", Type.STRING),
COMPONENT_JVM_OPTS_IN_BASE64 ("heron.runtime.component.jvm.opts.in.base64", Type.STRING),
INSTANCE_JVM_OPTS_IN_BASE64 ("heron.runtime.instance.jvm.opts.in.base64", Type.STRING),
NUM_CONTAINERS ("heron.runtime.num.containers", Type.INTEGER),
DOWNLOADER_PROTOCOLS ("heron.downloader.registry", Type.MAP),
//release info
HERON_RELEASE_PACKAGE ("heron.release.package", Type.STRING),
HERON_RELEASE_PACKAGE_ROLE ("heron.release.package.role", Type.STRING),
HERON_RELEASE_PACKAGE_NAME ("heron.release.package.name", Type.STRING),
HERON_RELEASE_PACKAGE_VERSION ("heron.release.package.version", Type.STRING),
HERON_UPLOADER_VERSION ("heron.uploader.version", Type.STRING),
//keys for config provided paths
HERON_CLUSTER_HOME ("heron.directory.cluster.home", "./heron-core"),
HERON_CLUSTER_CONF ("heron.directory.cluster.conf", "./heron-conf"),
// TODO: rename below to heron.directory.cluster.java.home, coordinate change with twitter configs
HERON_CLUSTER_JAVA_HOME("heron.directory.sandbox.java.home", "/usr/lib/jvm/default-java"),
//keys for heron configuration files on the cluster
OVERRIDE_YAML("heron.config.file.override.yaml", "${HERON_CONF}/override.yaml"),
// Path to the config overrides passed into the API server. Only applicable to submitting
// topologies via API server
APISERVER_OVERRIDE_YAML("heron.apiserver.file.override.yaml", Type.STRING),
//keys for config provided user binaries
EXECUTOR_BINARY ("heron.binaries.executor", "${HERON_BIN}/heron-executor"),
STMGR_BINARY ("heron.binaries.stmgr", "${HERON_BIN}/heron-stmgr"),
TMANAGER_BINARY ("heron.binaries.tmanager", "${HERON_BIN}/heron-tmanager"),
SHELL_BINARY ("heron.binaries.shell", "${HERON_BIN}/heron-shell"),
PYTHON_INSTANCE_BINARY("heron.binaries.python.instance", "${HERON_BIN}/heron-python-instance"),
CPP_INSTANCE_BINARY ("heron.binaries.cpp.instance", "${HERON_BIN}/heron-cpp-instance"),
DOWNLOADER_BINARY ("heron.binaries.downloader", "${HERON_BIN}/heron-downloader"),
DOWNLOADER_CONF ("heron.binaries.downloader-conf", "${HERON_BIN}/heron-downloader-config"),
// keys for `heron` command line.
// Prompt user when more containers are required so that
// user has another chance to double check quota is available.
// To enable it, change the config from "disabled" to "prompt".
UPDATE_PROMPT ("heron.command.update.prompt", "disabled");
private final String value;
private final Object defaultValue;
private final Type type;
public enum Type {
BOOLEAN,
BYTE_AMOUNT,
DOUBLE,
DRY_RUN_FORMAT_TYPE,
INTEGER,
LONG,
STRING,
PACKAGE_TYPE,
PROPERTIES,
MAP,
UNKNOWN
}
Key(String value, Type type) {
this.value = value;
this.type = type;
this.defaultValue = null;
}
Key(String value, String defaultValue) {
this.value = value;
this.type = Type.STRING;
this.defaultValue = defaultValue;
}
Key(String value, Double defaultValue) {
this.value = value;
this.type = Type.DOUBLE;
this.defaultValue = defaultValue;
}
Key(String value, Boolean defaultValue) {
this.value = value;
this.type = Type.BOOLEAN;
this.defaultValue = defaultValue;
}
Key(String value, ByteAmount defaultValue) {
this.value = value;
this.type = Type.BYTE_AMOUNT;
this.defaultValue = defaultValue;
}
/**
* Get the key value for this enum (i.e., heron.directory.home)
* @return key value
*/
public String value() {
return value;
}
public Type getType() {
return type;
}
/**
* Return the default value
*/
public Object getDefault() {
return this.defaultValue;
}
public String getDefaultString() {
if (type != Type.STRING) {
throw new IllegalAccessError(String.format(
"Config Key %s is type %s, getDefaultString() not supported", this.name(), this.type));
}
return (String) this.defaultValue;
}
}
| apache-2.0 |
ninqing/tddl | tddl-executor/src/main/java/com/taobao/tddl/executor/function/scalar/datatime/Time.java | 1390 | package com.taobao.tddl.executor.function.scalar.datatime;
import com.taobao.tddl.executor.common.ExecutionContext;
import com.taobao.tddl.executor.function.ScalarFunction;
import com.taobao.tddl.executor.utils.ExecUtils;
import com.taobao.tddl.optimizer.core.datatype.DataType;
/**
* Extracts the time part of the time or datetime expression expr and returns it
* as a string. This function is unsafe for statement-based replication. In
* MySQL 5.6, a warning is logged if you use this function when binlog_format is
* set to STATEMENT. (Bug #47995)
*
* <pre>
* mysql> SELECT TIME('2003-12-31 01:02:03');
* -> '01:02:03'
* mysql> SELECT TIME('2003-12-31 01:02:03.000123');
* -> '01:02:03.000123'
* </pre>
*
* @author jianghang 2014-4-16 下午11:20:47
* @since 5.0.7
*/
public class Time extends ScalarFunction {
@Override
public Object compute(Object[] args, ExecutionContext ec) {
for (Object arg : args) {
if (ExecUtils.isNull(arg)) {
return null;
}
}
DataType type = getReturnType();
return type.convertFrom(args[0]);
}
@Override
public DataType getReturnType() {
return DataType.TimeType;
}
@Override
public String[] getFunctionNames() {
return new String[] { "TIME" };
}
}
| apache-2.0 |
zhouluoyang/openfire | src/java/org/jivesoftware/util/PropertyClusterEventTask.java | 3263 | /**
* $RCSfile$
* $Revision: $
* $Date: $
*
* Copyright (C) 2005-2008 Jive Software. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jivesoftware.util;
import org.jivesoftware.util.cache.ClusterTask;
import org.jivesoftware.util.cache.ExternalizableUtil;
import java.io.IOException;
import java.io.ObjectInput;
import java.io.ObjectOutput;
/**
* This task updates or deletes a property in a cluster node's property map.
* {@link PropertyEventListener} of each cluster node will be alerted of the event.
*
* @author Gaston Dombiak
*/
public class PropertyClusterEventTask implements ClusterTask<Void> {
private Type event;
private String key;
private String value;
public static PropertyClusterEventTask createPutTask(String key, String value) {
PropertyClusterEventTask task = new PropertyClusterEventTask();
task.event = Type.put;
task.key = key;
task.value = value;
return task;
}
public static PropertyClusterEventTask createDeleteTask(String key) {
PropertyClusterEventTask task = new PropertyClusterEventTask();
task.event = Type.deleted;
task.key = key;
return task;
}
@Override
public Void getResult() {
return null;
}
@Override
public void run() {
if (Type.put == event) {
JiveProperties.getInstance().localPut(key, value);
}
else if (Type.deleted == event) {
JiveProperties.getInstance().localRemove(key);
}
}
@Override
public void writeExternal(ObjectOutput out) throws IOException {
ExternalizableUtil.getInstance().writeInt(out, event.ordinal());
ExternalizableUtil.getInstance().writeSafeUTF(out, key);
ExternalizableUtil.getInstance().writeBoolean(out, value != null);
if (value != null) {
ExternalizableUtil.getInstance().writeSafeUTF(out, value);
}
}
@Override
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
event = Type.values()[ExternalizableUtil.getInstance().readInt(in)];
key = ExternalizableUtil.getInstance().readSafeUTF(in);
if (ExternalizableUtil.getInstance().readBoolean(in)) {
value = ExternalizableUtil.getInstance().readSafeUTF(in);
}
}
private static enum Type {
/**
* Event triggered when a system property was added or updated in the system.
*/
put,
/**
* Event triggered when a system property was deleted from the system.
*/
deleted
}
}
| apache-2.0 |
anupsabraham/store_tv_player | django_project/store_cms/store_cms/wsgi.py | 394 | """
WSGI config for store_cms project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "store_cms.settings")
application = get_wsgi_application() | apache-2.0 |
openhab/openhab.pebble | src/js/item.js | 1500 | // Copyright 2015 Richard Lee
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// A library to communicate with items
var Util = require('util');
var Config = require('config');
var ajax = require('ajax');
/* global module */
var exports = module.exports = {};
exports.sendCommand = function (item, command, success) {
Util.log('sending command: ' + command + ' to ' + item.name + ', url: ' + item.link);
ajax(
{
url: item.link,
method: 'post',
type: 'text',
data: command,
headers: {
'Content-Type': 'text/plain',
Authorization: Config.auth
}
},
function (data) {
Util.log('Successfully sent command: ' + data);
// refresh local item state
// XXX really, this should be done through http long polling
item.state = command;
success();
},
function (error) {
Util.log('Failed to send command: ' + error);
Util.error('Comm Error', "Can't set state");
}
);
};
| apache-2.0 |
osrf/opensplice | src/api/dcps/java5/common/java/code/org/opensplice/dds/core/TimeOutExceptionImpl.java | 1601 | /*
* Vortex OpenSplice
*
* This software and documentation are Copyright 2006 to TO_YEAR ADLINK
* Technology Limited, its affiliated companies and licensors. All rights
* reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.opensplice.dds.core;
import org.omg.dds.core.ServiceEnvironment;
public class TimeOutExceptionImpl extends java.util.concurrent.TimeoutException{
private static final long serialVersionUID = -2564470280204317019L;
private OsplServiceEnvironment environment;
public TimeOutExceptionImpl(OsplServiceEnvironment environment,
String message) {
super(message);
this.environment = environment;
}
public ServiceEnvironment getEnvironment() {
return this.environment;
}
@Override
public void printStackTrace() {
System.err.println(this.toString());
}
@Override
public String toString() {
return Utilities.getOsplExceptionStack(this, this.getStackTrace());
}
}
| apache-2.0 |
ibm-contribs/kubernetes | pkg/api/v1/helpers_test.go | 11833 | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"reflect"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api"
)
func TestAddToNodeAddresses(t *testing.T) {
testCases := []struct {
existing []NodeAddress
toAdd []NodeAddress
expected []NodeAddress
}{
{
existing: []NodeAddress{},
toAdd: []NodeAddress{},
expected: []NodeAddress{},
},
{
existing: []NodeAddress{},
toAdd: []NodeAddress{
{Type: NodeExternalIP, Address: "1.1.1.1"},
{Type: NodeHostName, Address: "localhost"},
},
expected: []NodeAddress{
{Type: NodeExternalIP, Address: "1.1.1.1"},
{Type: NodeHostName, Address: "localhost"},
},
},
{
existing: []NodeAddress{},
toAdd: []NodeAddress{
{Type: NodeExternalIP, Address: "1.1.1.1"},
{Type: NodeExternalIP, Address: "1.1.1.1"},
},
expected: []NodeAddress{
{Type: NodeExternalIP, Address: "1.1.1.1"},
},
},
{
existing: []NodeAddress{
{Type: NodeExternalIP, Address: "1.1.1.1"},
{Type: NodeInternalIP, Address: "10.1.1.1"},
},
toAdd: []NodeAddress{
{Type: NodeExternalIP, Address: "1.1.1.1"},
{Type: NodeHostName, Address: "localhost"},
},
expected: []NodeAddress{
{Type: NodeExternalIP, Address: "1.1.1.1"},
{Type: NodeInternalIP, Address: "10.1.1.1"},
{Type: NodeHostName, Address: "localhost"},
},
},
}
for i, tc := range testCases {
AddToNodeAddresses(&tc.existing, tc.toAdd...)
if !api.Semantic.DeepEqual(tc.expected, tc.existing) {
t.Errorf("case[%d], expected: %v, got: %v", i, tc.expected, tc.existing)
}
}
}
func TestGetAccessModesFromString(t *testing.T) {
modes := GetAccessModesFromString("ROX")
if !containsAccessMode(modes, ReadOnlyMany) {
t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes)
}
modes = GetAccessModesFromString("ROX,RWX")
if !containsAccessMode(modes, ReadOnlyMany) {
t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes)
}
if !containsAccessMode(modes, ReadWriteMany) {
t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes)
}
modes = GetAccessModesFromString("RWO,ROX,RWX")
if !containsAccessMode(modes, ReadOnlyMany) {
t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes)
}
if !containsAccessMode(modes, ReadWriteMany) {
t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes)
}
}
func TestRemoveDuplicateAccessModes(t *testing.T) {
modes := []PersistentVolumeAccessMode{
ReadWriteOnce, ReadOnlyMany, ReadOnlyMany, ReadOnlyMany,
}
modes = removeDuplicateAccessModes(modes)
if len(modes) != 2 {
t.Errorf("Expected 2 distinct modes in set but found %v", len(modes))
}
}
func TestNodeSelectorRequirementsAsSelector(t *testing.T) {
matchExpressions := []NodeSelectorRequirement{{
Key: "foo",
Operator: NodeSelectorOpIn,
Values: []string{"bar", "baz"},
}}
mustParse := func(s string) labels.Selector {
out, e := labels.Parse(s)
if e != nil {
panic(e)
}
return out
}
tc := []struct {
in []NodeSelectorRequirement
out labels.Selector
expectErr bool
}{
{in: nil, out: labels.Nothing()},
{in: []NodeSelectorRequirement{}, out: labels.Nothing()},
{
in: matchExpressions,
out: mustParse("foo in (baz,bar)"),
},
{
in: []NodeSelectorRequirement{{
Key: "foo",
Operator: NodeSelectorOpExists,
Values: []string{"bar", "baz"},
}},
expectErr: true,
},
{
in: []NodeSelectorRequirement{{
Key: "foo",
Operator: NodeSelectorOpGt,
Values: []string{"1"},
}},
out: mustParse("foo>1"),
},
{
in: []NodeSelectorRequirement{{
Key: "bar",
Operator: NodeSelectorOpLt,
Values: []string{"7"},
}},
out: mustParse("bar<7"),
},
}
for i, tc := range tc {
out, err := NodeSelectorRequirementsAsSelector(tc.in)
if err == nil && tc.expectErr {
t.Errorf("[%v]expected error but got none.", i)
}
if err != nil && !tc.expectErr {
t.Errorf("[%v]did not expect error but got: %v", i, err)
}
if !reflect.DeepEqual(out, tc.out) {
t.Errorf("[%v]expected:\n\t%+v\nbut got:\n\t%+v", i, tc.out, out)
}
}
}
func TestGetAffinityFromPod(t *testing.T) {
testCases := []struct {
pod *Pod
expectErr bool
}{
{
pod: &Pod{},
expectErr: false,
},
{
pod: &Pod{
ObjectMeta: ObjectMeta{
Annotations: map[string]string{
AffinityAnnotationKey: `
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "foo",
"operator": "In",
"values": ["value1", "value2"]
}]
}]
}}}`,
},
},
},
expectErr: false,
},
{
pod: &Pod{
ObjectMeta: ObjectMeta{
Annotations: map[string]string{
AffinityAnnotationKey: `
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "foo",
`,
},
},
},
expectErr: true,
},
}
for i, tc := range testCases {
_, err := GetAffinityFromPodAnnotations(tc.pod.Annotations)
if err == nil && tc.expectErr {
t.Errorf("[%v]expected error but got none.", i)
}
if err != nil && !tc.expectErr {
t.Errorf("[%v]did not expect error but got: %v", i, err)
}
}
}
func TestTaintToString(t *testing.T) {
testCases := []struct {
taint *Taint
expectedString string
}{
{
taint: &Taint{
Key: "foo",
Value: "bar",
Effect: TaintEffectNoSchedule,
},
expectedString: "foo=bar:NoSchedule",
},
{
taint: &Taint{
Key: "foo",
Effect: TaintEffectNoSchedule,
},
expectedString: "foo:NoSchedule",
},
}
for i, tc := range testCases {
if tc.expectedString != tc.taint.ToString() {
t.Errorf("[%v] expected taint %v converted to %s, got %s", i, tc.taint, tc.expectedString, tc.taint.ToString())
}
}
}
func TestMatchTaint(t *testing.T) {
testCases := []struct {
description string
taint *Taint
taintToMatch Taint
expectMatch bool
}{
{
description: "two taints with the same key,value,effect should match",
taint: &Taint{
Key: "foo",
Value: "bar",
Effect: TaintEffectNoSchedule,
},
taintToMatch: Taint{
Key: "foo",
Value: "bar",
Effect: TaintEffectNoSchedule,
},
expectMatch: true,
},
{
description: "two taints with the same key,effect but different value should match",
taint: &Taint{
Key: "foo",
Value: "bar",
Effect: TaintEffectNoSchedule,
},
taintToMatch: Taint{
Key: "foo",
Value: "different-value",
Effect: TaintEffectNoSchedule,
},
expectMatch: true,
},
{
description: "two taints with the different key cannot match",
taint: &Taint{
Key: "foo",
Value: "bar",
Effect: TaintEffectNoSchedule,
},
taintToMatch: Taint{
Key: "different-key",
Value: "bar",
Effect: TaintEffectNoSchedule,
},
expectMatch: false,
},
{
description: "two taints with the different effect cannot match",
taint: &Taint{
Key: "foo",
Value: "bar",
Effect: TaintEffectNoSchedule,
},
taintToMatch: Taint{
Key: "foo",
Value: "bar",
Effect: TaintEffectPreferNoSchedule,
},
expectMatch: false,
},
}
for _, tc := range testCases {
if tc.expectMatch != tc.taint.MatchTaint(tc.taintToMatch) {
t.Errorf("[%s] expect taint %s match taint %s", tc.description, tc.taint.ToString(), tc.taintToMatch.ToString())
}
}
}
func TestGetAvoidPodsFromNode(t *testing.T) {
controllerFlag := true
testCases := []struct {
node *Node
expectValue AvoidPods
expectErr bool
}{
{
node: &Node{},
expectValue: AvoidPods{},
expectErr: false,
},
{
node: &Node{
ObjectMeta: ObjectMeta{
Annotations: map[string]string{
PreferAvoidPodsAnnotationKey: `
{
"preferAvoidPods": [
{
"podSignature": {
"podController": {
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "foo",
"uid": "abcdef123456",
"controller": true
}
},
"reason": "some reason",
"message": "some message"
}
]
}`,
},
},
},
expectValue: AvoidPods{
PreferAvoidPods: []PreferAvoidPodsEntry{
{
PodSignature: PodSignature{
PodController: &metav1.OwnerReference{
APIVersion: "v1",
Kind: "ReplicationController",
Name: "foo",
UID: "abcdef123456",
Controller: &controllerFlag,
},
},
Reason: "some reason",
Message: "some message",
},
},
},
expectErr: false,
},
{
node: &Node{
// Missing end symbol of "podController" and "podSignature"
ObjectMeta: ObjectMeta{
Annotations: map[string]string{
PreferAvoidPodsAnnotationKey: `
{
"preferAvoidPods": [
{
"podSignature": {
"podController": {
"kind": "ReplicationController",
"apiVersion": "v1"
"reason": "some reason",
"message": "some message"
}
]
}`,
},
},
},
expectValue: AvoidPods{},
expectErr: true,
},
}
for i, tc := range testCases {
v, err := GetAvoidPodsFromNodeAnnotations(tc.node.Annotations)
if err == nil && tc.expectErr {
t.Errorf("[%v]expected error but got none.", i)
}
if err != nil && !tc.expectErr {
t.Errorf("[%v]did not expect error but got: %v", i, err)
}
if !reflect.DeepEqual(tc.expectValue, v) {
t.Errorf("[%v]expect value %v but got %v with %v", i, tc.expectValue, v, v.PreferAvoidPods[0].PodSignature.PodController.Controller)
}
}
}
func TestSysctlsFromPodAnnotation(t *testing.T) {
type Test struct {
annotation string
expectValue []Sysctl
expectErr bool
}
for i, test := range []Test{
{
annotation: "",
expectValue: nil,
},
{
annotation: "foo.bar",
expectErr: true,
},
{
annotation: "=123",
expectErr: true,
},
{
annotation: "foo.bar=",
expectValue: []Sysctl{{Name: "foo.bar", Value: ""}},
},
{
annotation: "foo.bar=42",
expectValue: []Sysctl{{Name: "foo.bar", Value: "42"}},
},
{
annotation: "foo.bar=42,",
expectErr: true,
},
{
annotation: "foo.bar=42,abc.def=1",
expectValue: []Sysctl{{Name: "foo.bar", Value: "42"}, {Name: "abc.def", Value: "1"}},
},
} {
sysctls, err := SysctlsFromPodAnnotation(test.annotation)
if test.expectErr && err == nil {
t.Errorf("[%v]expected error but got none", i)
} else if !test.expectErr && err != nil {
t.Errorf("[%v]did not expect error but got: %v", i, err)
} else if !reflect.DeepEqual(sysctls, test.expectValue) {
t.Errorf("[%v]expect value %v but got %v", i, test.expectValue, sysctls)
}
}
}
| apache-2.0 |
iychoi/syndicate | old/md-service/SMDS/content.py | 2089 | #!/usr/bin/python
from SMDS.db import Row, Table
import SMDS.logger as logger
from SMDS.parameter import Parameter, Mixed
from SMDS.filter import Filter
from types import *
class Content(Row):
table_name = 'contents'
primary_key = 'content_id'
join_tables = ['user_content']
fields = {
'content_id': Parameter(int, "Content server identifier"),
'host_url': Parameter(str, "Base URL of the content server"),
'owner': Parameter(long, "User ID of the user that is responsible for this content server")
}
class Contents(Table):
def __init__(self, api, content_filter = None, columns = None ):
Table.__init__(self, api, Content, columns)
db_name = "contents"
sql = "SELECT %s FROM %s WHERE True" % \
(", ".join(self.columns.keys()),db_name)
if content_filter is not None:
if isinstance(content_filter, (list, tuple, set)):
# Separate the list into integers and strings
ints = filter(lambda x: isinstance(x, (int, long)), content_filter)
strs = filter(lambda x: isinstance(x, StringTypes), content_filter)
content_filter = Filter(Content.fields, {'content_id': ints, 'host_url': strs})
sql += " AND (%s) %s" % content_filter.sql(api, "OR")
elif isinstance(content_filter, dict):
content_filter = Filter(Content.fields, content_filter)
sql += " AND (%s) %s" % content_filter.sql(api, "AND")
elif isinstance (content_filter, StringTypes):
content_filter = Filter(Content.fields, {'host_url':[content_filter]})
sql += " AND (%s) %s" % content_filter.sql(api, "AND")
elif isinstance (content_filter, int):
content_filter = Filter(Content.fields, {'content_id':[content_filter]})
sql += " AND (%s) %s" % content_filter.sql(api, "AND")
else:
raise MDInvalidArgument, "Wrong content filter %r"%content_filter
self.selectall(sql)
| apache-2.0 |
aglne/dubbo | dubbo-config/dubbo-config-api/src/main/java/org/apache/dubbo/config/builders/MethodBuilder.java | 6105 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.dubbo.config.builders;
import org.apache.dubbo.config.ArgumentConfig;
import org.apache.dubbo.config.MethodConfig;
import java.util.ArrayList;
import java.util.List;
/**
* This is a builder for build {@link MethodConfig}.
*
* @since 2.7
*/
public class MethodBuilder extends AbstractMethodBuilder<MethodConfig, MethodBuilder> {
/**
* The method name
*/
private String name;
/**
* Stat
*/
private Integer stat;
/**
* Whether to retry
*/
private Boolean retry;
/**
* If it's reliable
*/
private Boolean reliable;
/**
* Thread limits for method invocations
*/
private Integer executes;
/**
* If it's deprecated
*/
private Boolean deprecated;
/**
* Whether to enable sticky
*/
private Boolean sticky;
/**
* Whether need to return
*/
private Boolean isReturn;
/**
* Callback instance when async-call is invoked
*/
private Object oninvoke;
/**
* Callback method when async-call is invoked
*/
private String oninvokeMethod;
/**
* Callback instance when async-call is returned
*/
private Object onreturn;
/**
* Callback method when async-call is returned
*/
private String onreturnMethod;
/**
* Callback instance when async-call has exception thrown
*/
private Object onthrow;
/**
* Callback method when async-call has exception thrown
*/
private String onthrowMethod;
/**
* The method arguments
*/
private List<ArgumentConfig> arguments;
/**
* These properties come from MethodConfig's parent Config module, they will neither be collected directly from xml or API nor be delivered to url
*/
private String service;
private String serviceId;
public MethodBuilder name(String name) {
this.name = name;
return getThis();
}
public MethodBuilder stat(Integer stat) {
this.stat = stat;
return getThis();
}
public MethodBuilder retry(Boolean retry) {
this.retry = retry;
return getThis();
}
public MethodBuilder reliable(Boolean reliable) {
this.reliable = reliable;
return getThis();
}
public MethodBuilder executes(Integer executes) {
this.executes = executes;
return getThis();
}
public MethodBuilder deprecated(Boolean deprecated) {
this.deprecated = deprecated;
return getThis();
}
public MethodBuilder sticky(Boolean sticky) {
this.sticky = sticky;
return getThis();
}
public MethodBuilder isReturn(Boolean isReturn) {
this.isReturn = isReturn;
return getThis();
}
public MethodBuilder oninvoke(Object oninvoke) {
this.oninvoke = oninvoke;
return getThis();
}
public MethodBuilder oninvokeMethod(String oninvokeMethod) {
this.oninvokeMethod = oninvokeMethod;
return getThis();
}
public MethodBuilder onreturn(Object onreturn) {
this.onreturn = onreturn;
return getThis();
}
public MethodBuilder onreturnMethod(String onreturnMethod) {
this.onreturnMethod = onreturnMethod;
return getThis();
}
public MethodBuilder onthrow(Object onthrow) {
this.onthrow = onthrow;
return getThis();
}
public MethodBuilder onthrowMethod(String onthrowMethod) {
this.onthrowMethod = onthrowMethod;
return getThis();
}
public MethodBuilder addArguments(List<? extends ArgumentConfig> arguments) {
if (this.arguments == null) {
this.arguments = new ArrayList<>();
}
this.arguments.addAll(arguments);
return getThis();
}
public MethodBuilder addArgument(ArgumentConfig argument) {
if (this.arguments == null) {
this.arguments = new ArrayList<>();
}
this.arguments.add(argument);
return getThis();
}
public MethodBuilder service(String service) {
this.service = service;
return getThis();
}
public MethodBuilder serviceId(String serviceId) {
this.serviceId = serviceId;
return getThis();
}
public MethodConfig build() {
MethodConfig methodConfig = new MethodConfig();
super.build(methodConfig);
methodConfig.setArguments(arguments);
methodConfig.setDeprecated(deprecated);
methodConfig.setExecutes(executes);
methodConfig.setName(name);
methodConfig.setOninvoke(oninvoke);
methodConfig.setOninvokeMethod(oninvokeMethod);
methodConfig.setOnreturn(onreturn);
methodConfig.setOnreturnMethod(onreturnMethod);
methodConfig.setOnthrow(onthrow);
methodConfig.setOnthrowMethod(onthrowMethod);
methodConfig.setReturn(isReturn);
methodConfig.setService(service);
methodConfig.setServiceId(serviceId);
methodConfig.setSticky(sticky);
methodConfig.setReliable(reliable);
methodConfig.setStat(stat);
methodConfig.setRetry(retry);
return methodConfig;
}
@Override
protected MethodBuilder getThis() {
return this;
}
}
| apache-2.0 |
hudsonwoods/hudson_woods | app/scripts/non_cat/in-the-area.js | 25041 |
var map = L.map('map', {
center: [41.77197384322616, -74.20966662406921],
zoom: 10,
scrollWheelZoom: false
});
L.tileLayer('http://a.tiles.mapbox.com/v3/sandersonj.i245n6m6/{z}/{x}/{y}.png', {
maxZoom: 18,
id: 'examples.map-20v6611k'
}).addTo(map);
//array to store layers for each feature type
var mapLayerGroups = [];
// Set Global Popup Options
var popupOptions = {
maxWidth: 400,
keepInView: true,
closeButton: true,
autoPanPadding: [30, 30]
};
// Create Activity Specific Panes For Filtering
var pane1 = map.createPane('itaSleep ita');
var pane2 = map.createPane('itaEat ita');
var pane3 = map.createPane('itaSip ita');
var pane4 = map.createPane('itaSwing ita');
var pane5 = map.createPane('itaSwim ita');
var pane6 = map.createPane('itaTrail ita');
var pane7 = map.createPane('itaSki ita');
var pane8 = map.createPane('itaCulture ita');
var pane9 = map.createPane('itaPick ita');
var pane10 = map.createPane('itaMind ita');
var pane11 = map.createPane('itaHW ita');
// Activate Each Set of Activities
var hwIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/hw_logo.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
});
function highlightFeaturehw(e) {
var layer = e.target;
layer.setIcon(hwIcon);
}
function resetHighlighthw(e) {
var layer = e.target;
layer.setIcon(hwIcon);
}
function resetMapSleep(e) {
map.fitBounds(itaSleep,{
padding: [50,50]
});
}
function onEachFeaturehw(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
if (feature.properties && feature.properties.popupContent) {
popupContent += feature.properties;
}
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(grayhwIcon);
layer.on({
mouseover: highlightFeaturehw,
mouseout: resetHighlighthw,
popupclose: resetMapSleep
});
// map.on({click: resetHighlighthw});
}
var itahw = L.geoJson([itaHW], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {icon: hwIcon});
},
onEachFeature: onEachFeaturehw,
});
var hwBounds = itahw.getBounds();
itahw.addTo(map);
var graySleepIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_sleep.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorSleepIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_sleep.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeatureSleep(e) {
var layer = e.target;
layer.setIcon(colorSleepIcon);
}
function resetHighlightSleep(e) {
var layer = e.target;
layer.setIcon(graySleepIcon);
}
function resetMapSleep(e) {
map.fitBounds(itaSleep,{
padding: [50,50]
});
}
function onEachFeatureSleep(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
if (feature.properties && feature.properties.popupContent) {
popupContent += feature.properties;
}
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(graySleepIcon);
layer.on({
mouseover: highlightFeatureSleep,
mouseout: resetHighlightSleep,
popupclose: resetMapSleep
});
// map.on({click: resetHighlightSleep});
}
var itaSleep = L.geoJson([itaSleep], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaSleep ita', icon: graySleepIcon});
},
onEachFeature: onEachFeatureSleep,
});
// var sleepBounds = itaSleep.getBounds();
itaSleep.addTo(map);
var grayCultureIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_culture.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorCultureIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_culture.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeatureCulture(e) {
var layer = e.target;
layer.setIcon(colorCultureIcon);
}
function resetHighlightCulture(e) {
var layer = e.target;
layer.setIcon(grayCultureIcon);
}
function resetMapCulture(e) {
map.fitBounds(itaCulture,{
padding: [50,50]
});
}
function onEachFeatureCulture(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(grayCultureIcon);
layer.on({
mouseover: highlightFeatureCulture,
mouseout: resetHighlightCulture,
popupclose: resetMapCulture
});
// map.on({click: resetHighlightCulture});
}
var itaCulture = L.geoJson([itaCulture], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaCulture ita', icon: grayCultureIcon});
},
onEachFeature: onEachFeatureCulture
});
var CultureBounds = itaCulture.getBounds();
itaCulture.addTo(map);
var grayEatIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_eat.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorEatIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_eat.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeatureEat(e) {
var layer = e.target;
layer.setIcon(colorEatIcon);
}
function resetHighlightEat(e) {
var layer = e.target;
layer.setIcon(grayEatIcon);
}
function resetMapEat(e) {
map.fitBounds(itaEat,{
padding: [50,50]
});
}
function onEachFeatureEat(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(grayEatIcon);
layer.on({
mouseover: highlightFeatureEat,
mouseout: resetHighlightEat,
popupclose: resetMapEat
});
// map.on({click: resetHighlightEat});
}
var itaEat = L.geoJson([itaEat], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaEat ita', icon: grayEatIcon});
},
onEachFeature: onEachFeatureEat
});
var EatBounds = itaEat.getBounds();
itaEat.addTo(map);
var grayMindIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_mind-body.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorMindIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_mind-body.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeatureMind(e) {
var layer = e.target;
layer.setIcon(colorMindIcon);
}
function resetHighlightMind(e) {
var layer = e.target;
layer.setIcon(grayMindIcon);
}
function resetMapMind(e) {
map.fitBounds(itaMind,{
padding: [50,50]
});
}
function onEachFeatureMind(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(grayMindIcon);
layer.on({
mouseover: highlightFeatureMind,
mouseout: resetHighlightMind,
popupclose: resetMapMind
});
// map.on({click: resetHighlightMind});
}
var itaMind = L.geoJson([itaMind], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaMind ita', icon: grayMindIcon});
},
onEachFeature: onEachFeatureMind
});
var MindBounds = itaMind.getBounds();
itaMind.addTo(map);
var grayPickIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_pick-grow.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorPickIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_pick-grow.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeaturePick(e) {
var layer = e.target;
layer.setIcon(colorPickIcon);
}
function resetHighlightPick(e) {
var layer = e.target;
layer.setIcon(grayPickIcon);
}
function resetMapPick(e) {
map.fitBounds(itaPick,{
padding: [50,50]
});
}
function onEachFeaturePick(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(grayPickIcon);
layer.on({
mouseover: highlightFeaturePick,
mouseout: resetHighlightPick,
popupclose: resetMapPick
});
// map.on({click: resetHighlightPick});
}
var itaPick = L.geoJson([itaPick], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaPick ita', icon: grayPickIcon});
},
onEachFeature: onEachFeaturePick
});
var PickBounds = itaPick.getBounds();
itaPick.addTo(map);
var graySipIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_sip.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorSipIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_sip.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeatureSip(e) {
var layer = e.target;
layer.setIcon(colorSipIcon);
}
function resetHighlightSip(e) {
var layer = e.target;
layer.setIcon(graySipIcon);
}
function resetMapSip(e) {
map.fitBounds(itaSip,{
padding: [50,50]
});
}
function onEachFeatureSip(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(graySipIcon);
layer.on({
mouseover: highlightFeatureSip,
mouseout: resetHighlightSip,
popupclose: resetMapSip
});
// map.on({click: resetHighlightSip});
}
var itaSip = L.geoJson([itaSip], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaSip ita', icon: graySipIcon});
},
onEachFeature: onEachFeatureSip
});
var SipBounds = itaSip.getBounds();
itaSip.addTo(map);
var graySkiIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_ski.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorSkiIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_ski.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeatureSki(e) {
var layer = e.target;
layer.setIcon(colorSkiIcon);
}
function resetHighlightSki(e) {
var layer = e.target;
layer.setIcon(graySkiIcon);
}
function resetMapSki(e) {
map.fitBounds(itaSki,{
padding: [50,50]
});
}
function onEachFeatureSki(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(graySkiIcon);
layer.on({
mouseover: highlightFeatureSki,
mouseout: resetHighlightSki,
popupclose: resetMapSki
});
// map.on({click: resetHighlightSki});
}
var itaSki = L.geoJson([itaSki], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaSki ita', icon: graySkiIcon});
},
onEachFeature: onEachFeatureSki
});
var SkiBounds = itaSki.getBounds();
itaSki.addTo(map);
var graySwimIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_swim.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorSwimIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_swim.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeatureSwim(e) {
var layer = e.target;
layer.setIcon(colorSwimIcon);
}
function resetHighlightSwim(e) {
var layer = e.target;
layer.setIcon(graySwimIcon);
}
function resetMapSwim(e) {
map.fitBounds(itaSwim,{
padding: [50,50]
});
}
function onEachFeatureSwim(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(graySwimIcon);
layer.on({
mouseover: highlightFeatureSwim,
mouseout: resetHighlightSwim,
popupclose: resetMapSwim
});
// map.on({click: resetHighlightSwim});
}
var itaSwim = L.geoJson([itaSwim], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaSwim ita', icon: graySwimIcon});
},
onEachFeature: onEachFeatureSwim
});
var SwimBounds = itaSwim.getBounds();
itaSwim.addTo(map);
var graySwingIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_swing.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorSwingIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_swing.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeatureSwing(e) {
var layer = e.target;
layer.setIcon(colorSwingIcon);
}
function resetHighlightSwing(e) {
var layer = e.target;
layer.setIcon(graySwingIcon);
}
function resetMapSwing(e) {
map.fitBounds(itaSwing,{
padding: [50,50]
});
}
function onEachFeatureSwing(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(graySwingIcon);
layer.on({
mouseover: highlightFeatureSwing,
mouseout: resetHighlightSwing,
popupclose: resetMapSwing
});
// map.on({click: resetHighlightSwing});
}
var itaSwing = L.geoJson([itaSwing], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaSwing ita', icon: graySwingIcon});
},
onEachFeature: onEachFeatureSwing
});
var SwingBounds = itaSwing.getBounds();
itaSwing.addTo(map);
var grayTrailIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/gray_trail.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
var colorTrailIcon = L.icon({
iconUrl: 'assets/img/in-the-area/icons/color_trail.png',
iconSize: [28, 40], // size of the icon
iconAnchor: [14, 40], // point of the icon which will correspond to marker's location
popupAnchor: [0, 10] // point from which the popup should open relative to the iconAnchor
});
function highlightFeatureTrail(e) {
var layer = e.target;
layer.setIcon(colorTrailIcon);
}
function resetHighlightTrail(e) {
var layer = e.target;
layer.setIcon(grayTrailIcon);
}
function resetMapTrail(e) {
map.fitBounds(itaTrail,{
padding: [50,50]
});
}
function onEachFeatureTrail(feature, layer) {
var popupContent =
"<div class='card area " + feature.properties.classType + "'" + ">" +
"<h4>" + "<img src=" + feature.properties.classIcon + ">" + feature.properties.activity + "</h4>" +
"<h2>" + feature.properties.activityTitle + "</h2>" +
"<img src=" + feature.properties.image + ">" +
"<p>" + feature.properties.activityDescription + "</p>" +
"<a target=_blank href=" + feature.properties.website + ">" + feature.properties.website + "</a>" +
"</div>";
layer.bindPopup(popupContent,popupOptions);
// layer.setIcon(grayTrailsIcon);
layer.on({
mouseover: highlightFeatureTrail,
mouseout: resetHighlightTrail,
popupclose: resetMapTrail
});
// map.on({click: resetHighlightTrail});
}
var itaTrail = L.geoJson([itaTrail], {
style: function (feature) {
return feature.properties;
},
pointToLayer: function (feature, latlng) {
return L.marker(latlng, {pane: 'itaTrail ita', icon: grayTrailIcon});
},
onEachFeature: onEachFeatureTrail
});
var TrailBounds = itaTrail.getBounds();
itaTrail.addTo(map);
$('.ita-pane').addClass('active');
$('#all').addClass('active');
$('.markers').click(function(){
$('.ita-pane').removeClass('active');
$('.markers').removeClass('active');
$(this).addClass('active');
map.closePopup()
});
$( "#all" ).click(function() {
$('.ita-pane').addClass('active');
map.setView(new L.LatLng(41.77197384322616, -74.20966662406921), 10)
});
$( "#sleep" ).click(function() {
$('.leaflet-itaSleep').addClass('active');
map.fitBounds(itaSleep,{
padding: [50,50]
});
});
$( "#eat" ).click(function() {
$('.leaflet-itaEat').addClass('active');
map.fitBounds(itaEat,{
padding: [50,50]
});
});
$( "#sip" ).click(function() {
$('.leaflet-itaSip').addClass('active');
map.fitBounds(itaSip,{
padding: [50,50]
});
});
$( "#culture" ).click(function() {
$('.leaflet-itaCulture').addClass('active');
map.fitBounds(itaCulture,{
padding: [50,50]
});
});
$( "#swing" ).click(function() {
$('.leaflet-itaSwing').addClass('active');
map.fitBounds(itaSwing,{
padding: [50,50]
});
});
$( "#ski" ).click(function() {
$('.leaflet-itaSki').addClass('active');
map.fitBounds(itaSki,{
padding: [50,50]
});
});
$( "#swim" ).click(function() {
$('.leaflet-itaSwim').addClass('active');
map.fitBounds(itaSwim,{
padding: [50,50]
});
});
$( "#pick-and-grow" ).click(function() {
$('.leaflet-itaPick').addClass('active');
map.fitBounds(itaPick,{
padding: [50,50]
});
});
$( "#trails" ).click(function() {
$('.leaflet-itaTrail').addClass('active');
map.fitBounds(itaTrail,{
padding: [50,50]
});
});
$( "#mind-and-body" ).click(function() {
$('.leaflet-itaMind').addClass('active');
map.fitBounds(itaMind,{
padding: [50,50]
});
});
| apache-2.0 |
johnluetke/StatTracker | src/BlueHerons/StatTracker/Agent.php | 23217 | <?php
namespace BlueHerons\StatTracker;
use StdClass;
use DateTime;
use Exception;
use BlueHerons\StatTracker\StatTracker;
class Agent {
public $name;
public $token;
public $faction;
public $level;
public $stats;
const TOKEN_WEB = "WebApp";
/**
* Returns the registered Agent for the given email address. If no agent is found, a generic
* Agent object is returned.
*
* @param string $email_address
*
* @return string Agent object
*/
public static function lookupAgentName($email_address) {
$stmt = StatTracker::db()->prepare("SELECT agent, faction FROM Agent WHERE email = ?;");
$stmt->execute(array($email_address));
extract($stmt->fetch());
$stmt->closeCursor();
if (empty($agent)) {
return new Agent();
}
else {
$agent = new Agent($agent);
$agent->faction = $faction;
$stmt = StatTracker::db()->prepare("SELECT token FROM Tokens WHERE agent = ? AND name = ? AND revoked = ?;");
$stmt->execute(array($agent->name, Agent::TOKEN_WEB, 0));
extract($stmt->fetch());
$stmt->closeCursor();
if ($token !== null) {
$agent = new Agent($agent->name, $token);
$agent->faction = $faction;
}
return $agent;
}
}
public static function lookupAgentByToken($token) {
$stmt = StatTracker::db()->prepare("SELECT a.agent, a.faction FROM Agent a JOIN Tokens t ON t.agent = a.agent WHERE t.token = ? AND t.revoked = ?;");
$stmt->execute(array($token, 0));
extract($stmt->fetch());
$stmt->closeCursor();
if (empty($agent)) {
return new Agent();
}
else {
$stmt = StatTracker::db()->prepare("UPDATE Tokens SET last_used = NOW() WHERE token = ?;");
$stmt->execute(array($token));
$stmt->closeCursor();
$agent = new Agent($agent, $token);
$agent->faction = $faction;
return $agent;
}
}
/**
* Constructs a new Agent object for the given agent name. This object will include all information
* publicly visible from the "Agent Profile" screen in Ingress: Agent name, AP, and badges earned.
*
* @param string $agent the name of the agent. This name will be searched for in the database. If
* it is not found, an exception will be thrown.
*
* @return Agent object with public stats populated.
*
* @throws Exception if agent name is not found.
*/
public function __construct($agent = "Agent", $token = null) {
if (!is_string($agent)) {
throw new Exception("Agent name must be a string");
}
$this->name = $agent;
$this->token = $token;
if ($this->isValid()) {
$this->getLevel();
$this->hasSubmitted();
$this->getStat('ap');
$this->getUpdateTimestamp();
$this->getTokens();
}
}
/**
* Determines if a valid name has been set for this agent.
*
* @return boolean true if agent is valid, false otherwise
*/
public function isValid() {
return $this->name != "Agent" && !empty($this->token);
}
/**
* Generates a breakdown of AP earned by stat
*
* @param int $days_back Days before the more recent submission that should be considered for the Breakdown
*
* @return string Object AP Breakdown object
*/
public function getAPBreakdown($days_back = 0) {
$stmt = StatTracker::db()->prepare("CALL GetAPBreakdown(?, ?);");
$stmt->execute(array($this->name, $days_back));
$stmt->closeCursor();
$stmt = StatTracker::db()->query("SELECT * FROM APBreakdown ORDER BY grouping, sequence ASC;");
$data = array();
$colors = array();
// TODO: Numbers only!
while ($row = $stmt->fetch()) {
$data[] = array($row['name'], $row['ap_gained']);
if ($row['grouping'] == 1) {
$color =$this->faction == "R" ? ENL_GREEN : RES_BLUE;
}
else if ($row['grouping'] == 3) {
$color = $this->faction == "R" ? RES_BLUE : ENL_GREEN;
}
else {
$color = "#999";
}
$colors[] = $color;
}
$stmt->closeCursor();
return array("data" => $data, "slice_colors" => $colors);
}
public function getToken() {
return $this->token;
}
/**
* Gets the access tokens associated with this agent
*
* @param $refresh Refresh the cached list of access tokens
*/
public function getTokens($refresh = false) {
if (!isset($this->tokens) || $refresh) {
$stmt = StatTracker::db()->prepare("SELECT name FROM Tokens WHERE agent = ? AND revoked = ?;");
$stmt->execute(array($this->name, 0));
$tokens = array();
while ($row = $stmt->fetch()) {
extract($row);
$tokens[] = $name;
}
$this->tokens = $tokens;
}
return $this->tokens;
}
/**
* Creates a new access token. The token is returned once from this method, it cannot be retrieved again.
*
* @return the token if a new one was created, false if not
*/
public function createToken($name) {
if (!in_array($name, $this->getTokens())) {
$stmt = StatTracker::db()->prepare("INSERT INTO Tokens (agent, name, token) VALUES(?, UCASE(?), SHA2(CONCAT(?, ?, UUID()), 256));");
$stmt->execute(array($this->name, $name, $this->name, $name));
// A token is return only when it is created
$stmt = StatTracker::db()->prepare("SELECT token FROM Tokens WHERE agent = ? AND name = UCASE(?) AND revoked = ?");
$stmt->execute(array($this->name, $name, 0));
extract($stmt->fetch());
return $token;
}
return false;
}
/**
* Revokes the named token. If the web is revoked, a new one will be generated automatically
*/
public function revokeToken($name) {
if (in_array($name, $this->getTokens())) {
$stmt = StatTracker::db()->prepare("UPDATE Tokens SET revoked = ?, name = CONCAT(name, '-', UNIX_TIMESTAMP(NOW())) WHERE agent = ? and name = UCASE(?)");
$stmt->execute(array(1, $this->name, $name));
// Web token is special. If it was revoked, another one needs to be created
if (strtoupper($name) == Agent::TOKEN_WEB) {
$this->getTokens(true);
$this->createToken(strtoupper($name));
}
return true;
}
return false;
}
/**
* Generates JSON formatted data for use in a line graph.
*
* @param string $stat the stat to generate the data for
*
* @return string Object Graph Data object
*/
public function getGraphData($stat) {
$stmt = StatTracker::db()->prepare("CALL GetGraphForStat(?, ?);");
$stmt->execute(array($this->name, $stat));
$stmt = StatTracker::db()->query("SELECT * FROM GraphDataForStat;");
$data = array();
while ($row = $stmt->fetch()) {
if (sizeof($data) == 0) {
foreach (array_keys($row) as $key) {
$series = new StdClass();
$series->name = $key;
$series->data = array();
$data[] = $series;
}
}
$i = 0;
foreach (array_values($row) as $value) {
$data[$i]->data[] = $value;
$i++;
}
}
$stmt->closeCursor();
$response = new StdClass();
$response->data = $data;
$response->prediction = $this->getPrediction($stat); // TODO: move elsewhere
return $response;
}
/**
* Gets the current level for the Agent. Considers AP and badges.
*
* @returns int current Agent level
*/
public function getLevel($date = "latest") {
if (!isset($this->level)) {
if ($date == "latest") {
$date = date("Y-m-d");
}
$stmt = StatTracker::db()->prepare("CALL GetLevel(?, ?);");
$stmt->execute(array($this->name, $date));
$stmt->closeCursor();
$stmt = StatTracker::db()->query("SELECT level FROM _Level;");
extract($stmt->fetch());
$stmt->closeCursor();
$this->level = $level;
}
return $this->level;
}
public function getTrend($stat, $when) {
$start = "";
$end = "";
switch ($when) {
case "last-week":
$start = date("Y-m-d", strtotime("last monday", strtotime("6 days ago")));
$end = date("Y-m-d", strtotime("next sunday", strtotime("8 days ago")));
break;
case "this-week":
case "weekly":
default:
$start = date("Y-m-d", strtotime("last monday", strtotime("tomorrow")));
$end = date("Y-m-d", strtotime("next sunday", strtotime("yesterday")));
break;
}
$stmt = StatTracker::db()->prepare("CALL GetDailyTrend(?, ?, ?, ?);");
$stmt->execute(array($this->name, $stat, $start, $end));
$stmt->closeCursor();
$stmt = StatTracker::db()->query("SELECT * FROM DailyTrend");
$data = array();
while ($row = $stmt->fetch()) {
$data["dates"][] = $row["date"];
$data["target"][] = $row["target"];
$data["value"][] = $row["value"];
}
$stmt->closeCursor();
return $data;
}
/**
* Determines if the Agent has submitted to Stat Tracker
*/
public function hasSubmitted($refresh = false) {
if (!isset($this->has_submitted) || $refresh) {
$stmt = StatTracker::db()->prepare("SELECT count(stat) > 0 AS result FROM Data WHERE stat = 'ap' AND agent = ?;");
$stmt->execute(array($this->name));
extract($stmt->fetch());
$stmt->closeCursor();
$this->has_submitted = $result > 0;
}
return $this->has_submitted;
}
/**
* Gets the timestamp for which the last update was made for the agent. If $date is provided, the timestamp will
* be the update for that day
*/
public function getUpdateTimestamp($date = "latest", $refresh = false) {
if (!isset($this->update_time) || $this->update_time == null || $refresh) {
$stmt = null;
if ($date == "latest" || new DateTime() < new DateTime($date)) {
$stmt = StatTracker::db()->prepare("SELECT UNIX_TIMESTAMP(MAX(updated)) `updated` FROM Data WHERE agent = ?");
$stmt->execute(array($this->name));
}
else {
$stmt = StatTracker::db()->prepare("SELECT UNIX_TIMESTAMP(MAX(updated)) `updated` FROM Data WHERE agent = ? AND date = ?;");
$stmt->execute(array($this->name, $date));
}
extract($stmt->fetch());
$stmt->closeCursor();
$this->update_time = $updated;
}
return $this->update_time;
}
/**
* Gets the latest date that a submission was made for.
*
* @param boolean $refresh whether or not to refresh the cached values
*
* @return string date of latest submission
*/
public function getLatestSubmissionDate($refresh = false) {
$ts = $this->getUpdateTimestamp("latest", $refresh);
$stmt = StatTracker::db()->prepare("SELECT date FROM Data WHERE agent = ? and updated = FROM_UNIXTIME(?)");
$stmt->execute(array($this->name, $ts));
extract($stmt->fetch());
$stmt->closeCursor();
return $date;
}
/**
* Gets the values of all stats.
*
* @param string|date $when "latest" to get the latest stats submitted by the agent, or a date in "yyyy-mm-dd"
* format to retrieve stats on that date
* @param boolean $refresh whether or not to refresh the cached values
*
* @return array values for stats
*/
public function getStats($when = "latest", $refresh = true) {
if (!is_array($this->stats) || $refresh) {
if ($when == "latest" || new DateTime() < new DateTime($when)) {
$when = $this->getLatestSubmissionDate($refresh);
}
$stmt = StatTracker::db()->prepare("SELECT stat, value FROM Data WHERE agent = ? AND date = ? ORDER BY stat ASC;");
$stmt->execute(array($this->name, $when));
if (!is_array($this->stats) || $refresh) {
$this->stats = array();
$this->stats['ap'] = 0;
}
while ($row = $stmt->fetch()) {
extract($row);
$this->stats[$stat] = $value;
}
$stmt->closeCursor();
}
return $this->stats;
}
/**
* Gets the value of the specified stat.
*
* @param string|object If string, the stat's database key. If object, a Stat object for the class
* #param boolean $refresh whether or not to refresh the cached value
*
* @return the value for the stat
*/
public function getStat($stat, $when = "latest", $refresh = false) {
if (is_object($stat)) {
$stat = $stat->stat;
}
if (!isset($this->stats[$stat]) || $refresh) {
$ts = $this->getUpdateTimestamp($when, $refresh);
if ($when == "latest" || new DateTime() < new DateTime($when)) {
$when = $this->getLatestSubmissionDate($refresh);
}
$stmt = StatTracker::db()->prepare("SELECT value FROM Data WHERE stat = ? AND agent = ? AND (date = ? OR updated = FROM_UNIXTIME(?)) ORDER BY date DESC LIMIT 1;");
$r = $stmt->execute(array($stat, $this->name, $when, $ts));
extract($stmt->fetch());
$stmt->closeCursor();
if (!is_array($this->stats)) {
$this->stats = array();
}
$this->stats[$stat] = !is_numeric($value) ? 0 : $value;
}
return $this->stats[$stat];
}
/**
* Gets an array of badges for the current player. array index is the badge name, and the array value
* is the level of the current badge
*
* @param boolean $refresh Whether or not to refresh the cached values
*
* @return array the array of current badges the Agent has earned
*/
public function getBadges($date = "today", $refresh = false) {
if (!is_array($this->badges) || $refresh) {
$stmt = StatTracker::db()->prepare("CALL GetBadges(?, ?);");
if ($date == "today") {
$today = true;
$date = date("Y-m-d");
}
$stmt->execute(array($this->name, $date));
$stmt->closeCursor();
$stmt = StatTracker::db()->query("SELECT * FROM _Badges;");
if ($today && $stmt->rowCount() == 0) {
$this->getBadges(date("Y-m-d", $this->getUpdateTimestamp("latest", $refresh)), true);
}
if (!is_array($this->badges)) {
$this->badges = array();
}
while ($row = $stmt->fetch()) {
extract($row);
$badge = str_replace(" ", "_", $badge);
$badge = strtolower($badge);
$this->badges[$badge] = strtolower($level);
}
$stmt->closeCursor();
}
return $this->badges;
}
/**
* Gets the prediction for a stat. If the stat has a badge associated with it, this will also
* retrieve the badge name, current level, next level, and percentage complete to attain the next
* badge level.
*
* @param string $stat Stat to retrieve prediction for
*
* @return Object prediciton object
*/
public function getPrediction($stat) {
$prediction = new StdClass();
$stmt = StatTracker::db()->prepare("CALL GetBadgePrediction(?, ?);");
$stmt->execute(array($this->name, $stat));
$stmt = StatTracker::db()->query("SELECT * FROM BadgePrediction");
$row = $stmt->fetch();
$prediction->stat = $row['stat'];
$prediction->name = $row['name'];
$prediction->unit = $row['unit'];
$prediction->badge = $row['badge'];
$prediction->current = $row['current'];
$prediction->next = $row['next'];
$prediction->rate = $row['rate'];
$prediction->progress = $row['progress'];
$prediction->days_remaining = $row['days'];
$prediction->target_date = date("Y-m-d", strtotime("+" . round($row['days']) . " days"));
$local_fmt = ($row['days'] >= 365) ? "F j, Y" : "F j";
$prediction->target_date_local = date($local_fmt, strtotime("+" . round($row['days']) . " days"));
if ($stat !== "level") {
$prediction->amount_remaining = $row['remaining'];
}
else {
$prediction->silver_remaining = $row['silver_remaining'];
$prediction->gold_remaining = $row['gold_remaining'];
$prediction->platinum_remaining = $row['platinum_remaining'];
$prediction->onyx_remaining = $row['onyx_remaining'];
}
return $prediction;
}
/**
* Gets the ratios of stats for the given agent.
*
* @return array top level entries are a ratio "pair", with a sub array containing keys stat1, stat2, and ratio
*/
public function getRatios() {
if (!is_array($this->ratios)) {
$stmt = StatTracker::db()->prepare("CALL GetRatiosForAgent(?);");
$stmt->execute(array($this->name));
$stmt->closeCursor();
$stmt = StatTracker::db()->query("SELECT * FROM RatiosForAgent WHERE badge_1 IS NOT NULL AND badge_2 IS NOT NULL;");
$this->ratios = array();
while ($row = $stmt->fetch()) {
extract($row);
$badge = str_replace(" ", "_", $badge);
$badge = strtolower($badge);
$this->ratio[] = array(
"stat1" => array(
"stat" => $stat_1,
"badge" => strtolower(str_replace(" ", "_", $badge_1)),
"level" => strtolower($badge_1_level),
"name" => $stat_1_name,
"nickname" => $stat_1_nickname,
"unit" => $stat_1_unit,
),
"stat2" => array(
"stat" => $stat_2,
"badge" => strtolower(str_replace(" ", "_", $badge_2)),
"level" => strtolower($badge_2_level),
"name" => $stat_2_name,
"nickname" => $stat_2_nickname,
"unit" => $stat_2_unit
),
"ratio" => $ratio,
"step" => $factor
);
}
$stmt->closeCursor();
}
return $this->ratio;
}
/**
* Gets the next X badges for the agent, ordered by least time remaining
*
* @param int $limit number of badges to return, default 3
*
* @return array of badges
*/
public function getUpcomingBadges($limit = 4) {
if (!is_array($this->upcoming_badges)) {
$stmt = StatTracker::db()->prepare("CALL GetUpcomingBadges(?);");
$stmt->execute(array($this->name));
$stmt->closeCursor();
// sprintf still used intentionally
$stmt = StatTracker::db()->query(sprintf("SELECT * FROM UpcomingBadges ORDER BY days_remaining ASC LIMIT %d;", $limit));
if (!is_array($this->upcoming_badges)) {
$this->upcoming_badges = array();
}
while ($row = $stmt->fetch()) {
extract($row);
$this->upcoming_badges[] = array(
"name" => $badge,
"level" => ucfirst($next),
"progress" => $progress,
"days_remaining" => $days_remaining,
"target_date" => date("Y-m-d", strtotime("+" . round($days_remaining) . " days")),
"target_date_local" => date("F j", strtotime("+" . round($days_remaining) . " days"))
);
}
}
return $this->upcoming_badges;
}
/**
* Updates the agent's stats.
*
* @param array $data associative array where key is stat and value is the value for the stat.
*/
public function updateStats($data, $allow_lower) {
// Get lowest submission date
$stmt = StatTracker::db()->prepare("SELECT COALESCE(MIN(date), CAST(NOW() AS Date)) `min_date` FROM Data WHERE agent = ?");
try {
$stmt->execute(array($this->name));
extract($stmt->fetch());
$ts = date("Y-m-d 00:00:00");
$dt = $data['date'] == null ? date("Y-m-d") : $data['date'];
$select_stmt = StatTracker::db()->prepare("SELECT value `current_value` FROM Data WHERE agent = ? AND date = ? AND stat = ?");
$insert_stmt = StatTracker::db()->prepare("INSERT INTO Data (agent, date, timepoint, stat, value) VALUES (?, ?, DATEDIFF(?, ?) + 1, ?, ?) ON DUPLICATE KEY UPDATE value = VALUES(value);");
StatTracker::db()->beginTransaction();
foreach ($data as $stat => $value) {
if ($stat == "date") continue;
$value = filter_var($data[$stat], FILTER_SANITIZE_NUMBER_INT);
$value = !is_numeric($value) ? 0 : $value;
if ($allow_lower) {
$insert_stmt->execute(array($this->name, $dt, $dt, $min_date, $stat, $value));
}
else {
$select_stmt->execute(array($this->name, $dt, $stat));
extract($select_stmt->fetch());
$select_stmt->closeCursor();
if ($current_value <= $value) {
$insert_stmt->execute(array($this->name, $dt, $dt, $min_date, $stat, $value));
}
else {
StatTracker::db()->rollback();
return sprintf("Stats cannot be updated. %s is lower than %s for %s.", number_format($value), number_format($current_value), StatTracker::getStats()[$stat]->name);
}
}
}
StatTracker::db()->commit();
return true;
}
catch (Exception $e) {
throw $e;
}
finally {
$select_stmt->closeCursor();
$insert_stmt->closeCursor();
}
}
}
?>
| apache-2.0 |
jbonofre/beam | sdks/python/apache_beam/runners/portability/universal_local_runner_test.py | 3635 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import platform
import signal
import sys
import threading
import traceback
import unittest
import apache_beam as beam
from apache_beam.runners.portability import fn_api_runner_test
from apache_beam.runners.portability import universal_local_runner
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class UniversalLocalRunnerTest(fn_api_runner_test.FnApiRunnerTest):
TIMEOUT_SECS = 30
_use_grpc = False
_use_subprocesses = False
def setUp(self):
if platform.system() != 'Windows':
def handler(signum, frame):
msg = 'Timed out after %s seconds.' % self.TIMEOUT_SECS
print '=' * 20, msg, '=' * 20
traceback.print_stack(frame)
threads_by_id = {th.ident: th for th in threading.enumerate()}
for thread_id, stack in sys._current_frames().items():
th = threads_by_id.get(thread_id)
print
print '# Thread:', th or thread_id
traceback.print_stack(stack)
raise BaseException(msg)
signal.signal(signal.SIGALRM, handler)
signal.alarm(self.TIMEOUT_SECS)
def tearDown(self):
if platform.system() != 'Windows':
signal.alarm(0)
@classmethod
def get_runner(cls):
# Don't inherit.
if '_runner' not in cls.__dict__:
cls._runner = universal_local_runner.UniversalLocalRunner(
use_grpc=cls._use_grpc,
use_subprocesses=cls._use_subprocesses)
return cls._runner
@classmethod
def tearDownClass(cls):
if hasattr(cls, '_runner'):
cls._runner.cleanup()
def create_pipeline(self):
return beam.Pipeline(self.get_runner())
def test_assert_that(self):
# TODO: figure out a way for runner to parse and raise the
# underlying exception.
with self.assertRaises(Exception):
with self.create_pipeline() as p:
assert_that(p | beam.Create(['a', 'b']), equal_to(['a']))
def test_errors(self):
# TODO: figure out a way for runner to parse and raise the
# underlying exception.
with self.assertRaises(Exception):
with self.create_pipeline() as p:
def raise_error(x):
raise RuntimeError('x')
# pylint: disable=expression-not-assigned
(p
| beam.Create(['a', 'b'])
| 'StageA' >> beam.Map(lambda x: x)
| 'StageB' >> beam.Map(lambda x: x)
| 'StageC' >> beam.Map(raise_error)
| 'StageD' >> beam.Map(lambda x: x))
# Inherits all tests from fn_api_runner_test.FnApiRunnerTest
class UniversalLocalRunnerTestWithGrpc(UniversalLocalRunnerTest):
_use_grpc = True
@unittest.skip("BEAM-3040")
class UniversalLocalRunnerTestWithSubprocesses(UniversalLocalRunnerTest):
_use_grpc = True
_use_subprocesses = True
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
N03297857/2017Fall | node_modules/common-tags/es/TemplateTag/index.js | 429 | 'use strict';
import _default from './TemplateTag';
export { _default as default };
//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9UZW1wbGF0ZVRhZy9pbmRleC5qcyJdLCJuYW1lcyI6WyJkZWZhdWx0Il0sIm1hcHBpbmdzIjoiQUFBQTs7cUJBRW9CLGU7cUJBQWJBLE8iLCJmaWxlIjoiaW5kZXguanMiLCJzb3VyY2VzQ29udGVudCI6WyIndXNlIHN0cmljdCdcblxuZXhwb3J0IGRlZmF1bHQgZnJvbSAnLi9UZW1wbGF0ZVRhZydcbiJdfQ== | apache-2.0 |
ctetreault/Cordova-Kitchen-Sink | src/views/integration/video-editor/VideoEditorCtrl.js | 1227 | /* Copyright 2015 BlackBerry Ltd.
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language
governing permissions and limitations under the License. */
angular.module('tsteas')
.controller('VideoEditorCtrl', ['Utils',
function(Utils) {
this.title = 'Video Editor';
// targets
this.targets = [
{
text: 'Open Video Editor',
request: {
target: 'sys.video_editor.card',
action: 'bb.action.EDIT',
type: 'video/mp4',
uri: 'local:///assets/sample.mp4'
}
}
];
this.invoke = function(request) {
// invoke
Utils.invokeApp(
request,
// success
function(data) {},
// error
function(e) {}
);
};
}
]);
| apache-2.0 |
kares/killbill | overdue/src/test/java/org/killbill/billing/overdue/notification/TestDefaultOverdueCheckPoster.java | 5032 | /*
* Copyright 2010-2013 Ning, Inc.
*
* Ning licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.killbill.billing.overdue.notification;
import java.io.IOException;
import java.util.Collection;
import java.util.UUID;
import org.joda.time.DateTime;
import org.mockito.Mockito;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import org.killbill.billing.account.api.Account;
import org.killbill.notificationq.api.NotificationEventWithMetadata;
import org.killbill.notificationq.api.NotificationQueue;
import org.killbill.billing.overdue.OverdueTestSuiteWithEmbeddedDB;
import org.killbill.billing.overdue.service.DefaultOverdueService;
import org.killbill.billing.util.entity.dao.EntitySqlDao;
import org.killbill.billing.util.entity.dao.EntitySqlDaoTransactionWrapper;
import org.killbill.billing.util.entity.dao.EntitySqlDaoTransactionalJdbiWrapper;
import org.killbill.billing.util.entity.dao.EntitySqlDaoWrapperFactory;
import org.killbill.billing.util.jackson.ObjectMapper;
public class TestDefaultOverdueCheckPoster extends OverdueTestSuiteWithEmbeddedDB {
private static final ObjectMapper objectMapper = new ObjectMapper();
private EntitySqlDaoTransactionalJdbiWrapper entitySqlDaoTransactionalJdbiWrapper;
private NotificationQueue overdueQueue;
private DateTime testReferenceTime;
@Override
@BeforeMethod(groups = "slow")
public void beforeMethod() throws Exception {
super.beforeMethod();
entitySqlDaoTransactionalJdbiWrapper = new EntitySqlDaoTransactionalJdbiWrapper(dbi, clock, cacheControllerDispatcher, nonEntityDao);
overdueQueue = notificationQueueService.getNotificationQueue(DefaultOverdueService.OVERDUE_SERVICE_NAME,
OverdueCheckNotifier.OVERDUE_CHECK_NOTIFIER_QUEUE);
Assert.assertTrue(overdueQueue.isStarted());
testReferenceTime = clock.getUTCNow();
}
@Test(groups = "slow")
public void testShouldntInsertMultipleNotificationsPerOverdueable() throws Exception {
final UUID accountId = UUID.randomUUID();
final Account overdueable = Mockito.mock(Account.class);
Mockito.when(overdueable.getId()).thenReturn(accountId);
insertOverdueCheckAndVerifyQueueContent(overdueable, 10, 10);
insertOverdueCheckAndVerifyQueueContent(overdueable, 5, 5);
insertOverdueCheckAndVerifyQueueContent(overdueable, 15, 5);
// Verify the final content of the queue
Assert.assertEquals(overdueQueue.getFutureNotificationForSearchKeys(internalCallContext.getAccountRecordId(), internalCallContext.getTenantRecordId()).size(), 1);
}
private void insertOverdueCheckAndVerifyQueueContent(final Account account, final int nbDaysInFuture, final int expectedNbDaysInFuture) throws IOException {
final DateTime futureNotificationTime = testReferenceTime.plusDays(nbDaysInFuture);
final OverdueCheckNotificationKey notificationKey = new OverdueCheckNotificationKey(account.getId());
checkPoster.insertOverdueNotification(account.getId(), futureNotificationTime, OverdueCheckNotifier.OVERDUE_CHECK_NOTIFIER_QUEUE, notificationKey, internalCallContext);
final Collection<NotificationEventWithMetadata<OverdueCheckNotificationKey>> notificationsForKey = getNotificationsForOverdueable(account);
Assert.assertEquals(notificationsForKey.size(), 1);
final NotificationEventWithMetadata nm = notificationsForKey.iterator().next();
Assert.assertEquals(nm.getEvent(), notificationKey);
Assert.assertEquals(nm.getEffectiveDate(), testReferenceTime.plusDays(expectedNbDaysInFuture));
}
private Collection<NotificationEventWithMetadata<OverdueCheckNotificationKey>> getNotificationsForOverdueable(final Account account) {
return entitySqlDaoTransactionalJdbiWrapper.execute(new EntitySqlDaoTransactionWrapper<Collection<NotificationEventWithMetadata<OverdueCheckNotificationKey>>>() {
@Override
public Collection<NotificationEventWithMetadata<OverdueCheckNotificationKey>> inTransaction(final EntitySqlDaoWrapperFactory<EntitySqlDao> entitySqlDaoWrapperFactory) throws Exception {
return ((OverdueCheckPoster)checkPoster).getFutureNotificationsForAccountInTransaction(entitySqlDaoWrapperFactory, overdueQueue, OverdueCheckNotificationKey.class, internalCallContext);
}
});
}
}
| apache-2.0 |
yangming85/phabricator | src/applications/diviner/publisher/DivinerPublisher.php | 4214 | <?php
abstract class DivinerPublisher {
private $atomCache;
private $atomGraphHashToNodeHashMap;
private $atomMap = array();
private $renderer;
private $config;
private $symbolReverseMap;
private $dropCaches;
public function setDropCaches($drop_caches) {
$this->dropCaches = $drop_caches;
return $this;
}
public function setRenderer(DivinerRenderer $renderer) {
$renderer->setPublisher($this);
$this->renderer = $renderer;
return $this;
}
public function getRenderer() {
return $this->renderer;
}
public function setConfig(array $config) {
$this->config = $config;
return $this;
}
public function getConfig($key, $default = null) {
return idx($this->config, $key, $default);
}
public function getConfigurationData() {
return $this->config;
}
public function setAtomCache(DivinerAtomCache $cache) {
$this->atomCache = $cache;
$graph_map = $this->atomCache->getGraphMap();
$this->atomGraphHashToNodeHashMap = array_flip($graph_map);
}
protected function getAtomFromGraphHash($graph_hash) {
if (empty($this->atomGraphHashToNodeHashMap[$graph_hash])) {
throw new Exception("No such atom '{$graph_hash}'!");
}
return $this->getAtomFromNodeHash(
$this->atomGraphHashToNodeHashMap[$graph_hash]);
}
protected function getAtomFromNodeHash($node_hash) {
if (empty($this->atomMap[$node_hash])) {
$dict = $this->atomCache->getAtom($node_hash);
$this->atomMap[$node_hash] = DivinerAtom::newFromDictionary($dict);
}
return $this->atomMap[$node_hash];
}
protected function getSimilarAtoms(DivinerAtom $atom) {
if ($this->symbolReverseMap === null) {
$rmap = array();
$smap = $this->atomCache->getSymbolMap();
foreach ($smap as $nhash => $shash) {
$rmap[$shash][$nhash] = true;
}
$this->symbolReverseMap = $rmap;
}
$shash = $atom->getRef()->toHash();
if (empty($this->symbolReverseMap[$shash])) {
throw new Exception("Atom has no symbol map entry!");
}
$hashes = $this->symbolReverseMap[$shash];
$atoms = array();
foreach ($hashes as $hash => $ignored) {
$atoms[] = $this->getAtomFromNodeHash($hash);
}
$atoms = msort($atoms, 'getSortKey');
return $atoms;
}
/**
* If a book contains multiple definitions of some atom, like some function
* "f()", we assign them an arbitrary (but fairly stable) order and publish
* them as "function/f/1/", "function/f/2/", etc., or similar.
*/
protected function getAtomSimilarIndex(DivinerAtom $atom) {
$atoms = $this->getSimilarAtoms($atom);
if (count($atoms) == 1) {
return 0;
}
$index = 1;
foreach ($atoms as $similar_atom) {
if ($atom === $similar_atom) {
return $index;
}
$index++;
}
throw new Exception("Expected to find atom while disambiguating!");
}
abstract protected function loadAllPublishedHashes();
abstract protected function deleteDocumentsByHash(array $hashes);
abstract protected function createDocumentsByHash(array $hashes);
abstract public function findAtomByRef(DivinerAtomRef $ref);
final public function publishAtoms(array $hashes) {
$existing = $this->loadAllPublishedHashes();
if ($this->dropCaches) {
$deleted = $existing;
$created = $hashes;
} else {
$existing_map = array_fill_keys($existing, true);
$hashes_map = array_fill_keys($hashes, true);
$deleted = array_diff_key($existing_map, $hashes_map);
$created = array_diff_key($hashes_map, $existing_map);
$deleted = array_keys($deleted);
$created = array_keys($created);
}
echo pht('Deleting %d documents.', count($deleted))."\n";
$this->deleteDocumentsByHash($deleted);
echo pht('Creating %d documents.', count($created))."\n";
$this->createDocumentsByHash($created);
}
protected function shouldGenerateDocumentForAtom(DivinerAtom $atom) {
switch ($atom->getType()) {
case DivinerAtom::TYPE_METHOD:
case DivinerAtom::TYPE_FILE:
return false;
case DivinerAtom::TYPE_ARTICLE:
default:
break;
}
return true;
}
}
| apache-2.0 |
ciandt-dev/tech-gallery | src/main/java/com/ciandt/techgallery/service/impl/RecommendationServiceImpl.java | 2696 | package com.ciandt.techgallery.service.impl;
import com.google.api.server.spi.response.BadRequestException;
import com.google.api.server.spi.response.InternalServerErrorException;
import com.google.api.server.spi.response.NotFoundException;
import com.google.appengine.api.users.User;
import com.ciandt.techgallery.persistence.model.TechGalleryUser;
import com.ciandt.techgallery.service.RecommendationService;
import com.ciandt.techgallery.service.UserServiceTG;
import com.ciandt.techgallery.service.enums.RecommendationEnums;
import com.ciandt.techgallery.service.enums.ValidationMessageEnums;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Services for Recommendation Endpoint requests.
*
* @author Thulio Ribeiro
*
*/
public class RecommendationServiceImpl implements RecommendationService {
UserServiceTG userService = UserServiceTGImpl.getInstance();
private static RecommendationServiceImpl instance;
private RecommendationServiceImpl() {}
/**
* Singleton method for the service.
*
* @author <a href="mailto:joaom@ciandt.com"> João Felipe de Medeiros Moreira </a>
* @since 07/10/2015
*
* @return RecommendationServiceImpl instance.
*/
public static RecommendationServiceImpl getInstance() {
if (instance == null) {
instance = new RecommendationServiceImpl();
}
return instance;
}
@Override
public List<String> getRecommendations(User user)
throws NotFoundException, BadRequestException, InternalServerErrorException {
validateUser(user);
final List<RecommendationEnums> enumValues = Arrays.asList(RecommendationEnums.values());
final List<String> recommendations = new ArrayList<>();
for (final RecommendationEnums enumEntry : enumValues) {
recommendations.add(enumEntry.message());
}
return recommendations;
}
/**
* Validate the user logged in.
*
* @param user info about user from google
* @throws InternalServerErrorException in case something goes wrong
* @throws NotFoundException in case the information are not founded
* @throws BadRequestException in case a request with problem were made.
*/
private void validateUser(User user)
throws BadRequestException, NotFoundException, InternalServerErrorException {
if (user == null || user.getUserId() == null || user.getUserId().isEmpty()) {
throw new BadRequestException(ValidationMessageEnums.USER_GOOGLE_ENDPOINT_NULL.message());
}
final TechGalleryUser techUser = userService.getUserByGoogleId(user.getUserId());
if (techUser == null) {
throw new NotFoundException(ValidationMessageEnums.USER_NOT_EXIST.message());
}
}
}
| apache-2.0 |
TahaHachana/XPlot | tests/TestData/XPlot.Plotly/js/pie/pie-2.js | 187 | <script>vardata=[{"type":"pie","labels":["Residential","Non-Residential","Utility"],"values":[19,26,55]}];varlayout={"width":500.0,"height":400.0};Plotly.newPlot('',data,layout);</script> | apache-2.0 |
apache/jena | jena-arq/src/main/java/org/apache/jena/sparql/exec/UpdateExecDatasetBuilder.java | 5902 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.jena.sparql.exec;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import org.apache.jena.graph.Node;
import org.apache.jena.query.Query;
import org.apache.jena.sparql.core.DatasetGraph;
import org.apache.jena.sparql.core.Var;
import org.apache.jena.sparql.engine.binding.Binding;
import org.apache.jena.sparql.modify.UpdateEngineFactory;
import org.apache.jena.sparql.modify.UpdateEngineRegistry;
import org.apache.jena.sparql.syntax.syntaxtransform.UpdateTransformOps;
import org.apache.jena.sparql.util.Context;
import org.apache.jena.sparql.util.Symbol;
import org.apache.jena.update.Update;
import org.apache.jena.update.UpdateException;
import org.apache.jena.update.UpdateFactory;
import org.apache.jena.update.UpdateRequest;
public class UpdateExecDatasetBuilder implements UpdateExecBuilder {
public static UpdateExecDatasetBuilder create() { return new UpdateExecDatasetBuilder(); }
private DatasetGraph dataset = null;
private Query query = null;
private Context context = null;
// Uses query rewrite to replace variables by values.
private Map<Var, Node> substitutionMap = null;
private Binding initialBinding = null;
private UpdateRequest update = null;
private UpdateRequest updateRequest = new UpdateRequest();
private UpdateExecDatasetBuilder() {}
/** Append the updates in an {@link UpdateRequest} to the {@link UpdateRequest} being built. */
@Override
public UpdateExecDatasetBuilder update(UpdateRequest updateRequest) {
Objects.requireNonNull(updateRequest);
add(updateRequest);
return this;
}
/** Add the {@link Update} to the {@link UpdateRequest} being built. */
@Override
public UpdateExecDatasetBuilder update(Update update) {
Objects.requireNonNull(update);
add(update);
return this;
}
/** Parse and update operations to the {@link UpdateRequest} being built. */
@Override
public UpdateExecDatasetBuilder update(String updateRequestString) {
UpdateRequest more = UpdateFactory.create(updateRequestString);
add(more);
return this;
}
public UpdateExecDatasetBuilder dataset(DatasetGraph dsg) {
this.dataset = dsg;
return this;
}
/** Set the {@link Context}.
* This defaults to the global settings of {@code ARQ.getContext()}.
* If there was a previous call of {@code context} the multiple contexts are merged.
* */
@Override
public UpdateExecDatasetBuilder context(Context context) {
if ( context == null )
return this;
ensureContext();
this.context.putAll(context);
return this;
}
@Override
public UpdateExecDatasetBuilder set(Symbol symbol, Object value) {
ensureContext();
this.context.set(symbol, value);
return this;
}
@Override
public UpdateExecDatasetBuilder set(Symbol symbol, boolean value) {
ensureContext();
this.context.set(symbol, value);
return this;
}
private void ensureContext() {
if ( context == null )
context = new Context();
}
@Override
public UpdateExecDatasetBuilder substitution(Binding binding) {
ensureSubstitutionMap();
binding.forEach(this.substitutionMap::put);
return this;
}
@Override
public UpdateExecDatasetBuilder substitution(Var var, Node value) {
ensureSubstitutionMap();
this.substitutionMap.put(var, value);
return this;
}
private void ensureSubstitutionMap() {
if ( substitutionMap == null )
substitutionMap = new HashMap<>();
}
public UpdateExecDatasetBuilder initialBinding(Binding initialBinding) {
this.initialBinding = initialBinding;
return this;
}
@Override
public UpdateExec build() {
Objects.requireNonNull(dataset, "No dataset for update");
Objects.requireNonNull(updateRequest, "No update request");
UpdateRequest actualUpdate = updateRequest;
if ( substitutionMap != null && ! substitutionMap.isEmpty() )
actualUpdate = UpdateTransformOps.transform(actualUpdate, substitutionMap);
Context cxt = Context.setupContextForDataset(context, dataset);
UpdateEngineFactory f = UpdateEngineRegistry.get().find(dataset, cxt);
if ( f == null )
throw new UpdateException("Failed to find an UpdateEngine");
UpdateExec uExec = new UpdateExecDataset(actualUpdate, dataset, initialBinding, cxt, f);
return uExec;
}
// Abbreviated forms
@Override
public void execute() {
build().execute();
}
public void execute(DatasetGraph dsg) {
dataset(dsg);
execute();
}
private void add(UpdateRequest request) {
request.getOperations().forEach(this::add);
}
private void add(Update update) {
this.updateRequest.add(update);
}
}
| apache-2.0 |
zorzella/test-libraries-for-java | src/main/java/com/google/common/testing/TearDownStack.java | 2011 | /*
* Copyright (C) 2008 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.testing;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* A {@code TearDownStack} contains a stack of {@link TearDown} instances.
*
* @author Kevin Bourrillion
*/
public class TearDownStack implements TearDownAccepter {
public static final Logger logger
= Logger.getLogger(TearDownStack.class.getName());
final LinkedList<TearDown> stack = new LinkedList<TearDown>();
private final boolean suppressThrows;
public TearDownStack() {
this.suppressThrows = false;
}
public TearDownStack(boolean suppressThrows) {
this.suppressThrows = suppressThrows;
}
public final void addTearDown(TearDown tearDown) {
stack.addFirst(tearDown);
}
/**
* Causes teardown to execute.
*/
public final void runTearDown() {
List<Throwable> exceptions = new ArrayList<Throwable>();
for (TearDown tearDown : stack) {
try {
tearDown.tearDown();
} catch (Throwable t) {
if (suppressThrows) {
TearDownStack.logger.log(Level.INFO,
"exception thrown during tearDown: " + t.getMessage(), t);
} else {
exceptions.add(t);
}
}
}
stack.clear();
if ((!suppressThrows) && (exceptions.size() > 0)) {
throw ClusterException.create(exceptions);
}
}
}
| apache-2.0 |
ctomc/undertow | core/src/main/java/io/undertow/UndertowMessages.java | 25850 | /*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.undertow;
import java.io.IOException;
import java.nio.channels.ClosedChannelException;
import javax.net.ssl.SSLHandshakeException;
import javax.net.ssl.SSLPeerUnverifiedException;
import io.undertow.server.RequestTooBigException;
import io.undertow.server.handlers.form.MultiPartParserDefinition;
import org.jboss.logging.Messages;
import org.jboss.logging.annotations.Cause;
import org.jboss.logging.annotations.Message;
import org.jboss.logging.annotations.MessageBundle;
import io.undertow.predicate.PredicateBuilder;
import io.undertow.protocols.http2.HpackException;
import io.undertow.security.api.AuthenticationMechanism;
import io.undertow.server.handlers.builder.HandlerBuilder;
import io.undertow.util.HttpString;
import io.undertow.util.ParameterLimitException;
import io.undertow.util.BadRequestException;
/**
* @author Stuart Douglas
*/
@MessageBundle(projectCode = "UT")
public interface UndertowMessages {
UndertowMessages MESSAGES = Messages.getBundle(UndertowMessages.class);
@Message(id = 1, value = "Maximum concurrent requests must be larger than zero.")
IllegalArgumentException maximumConcurrentRequestsMustBeLargerThanZero();
@Message(id = 2, value = "The response has already been started")
IllegalStateException responseAlreadyStarted();
// id = 3
@Message(id = 4, value = "getResponseChannel() has already been called")
IllegalStateException responseChannelAlreadyProvided();
@Message(id = 5, value = "getRequestChannel() has already been called")
IllegalStateException requestChannelAlreadyProvided();
// id = 6
// id = 7
@Message(id = 8, value = "Handler cannot be null")
IllegalArgumentException handlerCannotBeNull();
@Message(id = 9, value = "Path must be specified")
IllegalArgumentException pathMustBeSpecified();
@Message(id = 10, value = "Session is invalid %s")
IllegalStateException sessionIsInvalid(String sessionId);
@Message(id = 11, value = "Session manager must not be null")
IllegalStateException sessionManagerMustNotBeNull();
@Message(id = 12, value = "Session manager was not attached to the request. Make sure that the SessionAttachmentHandler is installed in the handler chain")
IllegalStateException sessionManagerNotFound();
@Message(id = 13, value = "Argument %s cannot be null")
IllegalArgumentException argumentCannotBeNull(final String argument);
// @Message(id = 14, value = "close() called with data still to be flushed. Please call shutdownWrites() and then call flush() until it returns true before calling close()")
// IOException closeCalledWithDataStillToBeFlushed();
//
// @Message(id = 16, value = "Could not add cookie as cookie handler was not present in the handler chain")
// IllegalStateException cookieHandlerNotPresent();
@Message(id = 17, value = "Form value is a file, use getFile() instead")
IllegalStateException formValueIsAFile();
@Message(id = 18, value = "Form value is a String, use getValue() instead")
IllegalStateException formValueIsAString();
//
// @Message(id = 19, value = "Connection from %s terminated as request entity was larger than %s")
// IOException requestEntityWasTooLarge(SocketAddress address, long size);
@Message(id = 20, value = "Connection terminated as request was larger than %s")
RequestTooBigException requestEntityWasTooLarge(long size);
@Message(id = 21, value = "Session already invalidated")
IllegalStateException sessionAlreadyInvalidated();
@Message(id = 22, value = "The specified hash algorithm '%s' can not be found.")
IllegalArgumentException hashAlgorithmNotFound(String algorithmName);
@Message(id = 23, value = "An invalid Base64 token has been received.")
IllegalArgumentException invalidBase64Token(@Cause final IOException cause);
@Message(id = 24, value = "An invalidly formatted nonce has been received.")
IllegalArgumentException invalidNonceReceived();
@Message(id = 25, value = "Unexpected token '%s' within header.")
IllegalArgumentException unexpectedTokenInHeader(final String name);
@Message(id = 26, value = "Invalid header received.")
IllegalArgumentException invalidHeader();
@Message(id = 27, value = "Could not find session cookie config in the request")
IllegalStateException couldNotFindSessionCookieConfig();
//
// @Message(id = 28, value = "Session %s already exists")
// IllegalStateException sessionAlreadyExists(final String id);
@Message(id = 29, value = "Channel was closed mid chunk, if you have attempted to write chunked data you cannot shutdown the channel until after it has all been written.")
IOException chunkedChannelClosedMidChunk();
@Message(id = 30, value = "User %s successfully authenticated.")
String userAuthenticated(final String userName);
@Message(id = 31, value = "User %s has logged out.")
String userLoggedOut(final String userName);
//
// @Message(id = 33, value = "Authentication type %s cannot be combined with %s")
// IllegalStateException authTypeCannotBeCombined(String type, String existing);
@Message(id = 34, value = "Stream is closed")
IOException streamIsClosed();
@Message(id = 35, value = "Cannot get stream as startBlocking has not been invoked")
IllegalStateException startBlockingHasNotBeenCalled();
@Message(id = 36, value = "Connection terminated parsing multipart data")
IOException connectionTerminatedReadingMultiPartData();
@Message(id = 37, value = "Failed to parse path in HTTP request")
RuntimeException failedToParsePath();
@Message(id = 38, value = "Authentication failed, requested user name '%s'")
String authenticationFailed(final String userName);
@Message(id = 39, value = "Too many query parameters, cannot have more than %s query parameters")
BadRequestException tooManyQueryParameters(int noParams);
@Message(id = 40, value = "Too many headers, cannot have more than %s header")
String tooManyHeaders(int noParams);
@Message(id = 41, value = "Channel is closed")
ClosedChannelException channelIsClosed();
@Message(id = 42, value = "Could not decode trailers in HTTP request")
IOException couldNotDecodeTrailers();
@Message(id = 43, value = "Data is already being sent. You must wait for the completion callback to be be invoked before calling send() again")
IllegalStateException dataAlreadyQueued();
@Message(id = 44, value = "More than one predicate with name %s. Builder class %s and %s")
IllegalStateException moreThanOnePredicateWithName(String name, Class<? extends PredicateBuilder> aClass, Class<? extends PredicateBuilder> existing);
@Message(id = 45, value = "Error parsing predicated handler string %s:%n%s")
IllegalArgumentException errorParsingPredicateString(String reason, String s);
@Message(id = 46, value = "The number of cookies sent exceeded the maximum of %s")
IllegalStateException tooManyCookies(int maxCookies);
@Message(id = 47, value = "The number of parameters exceeded the maximum of %s")
ParameterLimitException tooManyParameters(int maxValues);
@Message(id = 48, value = "No request is currently active")
IllegalStateException noRequestActive();
@Message(id = 50, value = "AuthenticationMechanism Outcome is null")
IllegalStateException authMechanismOutcomeNull();
@Message(id = 51, value = "Not a valid IP pattern %s")
IllegalArgumentException notAValidIpPattern(String peer);
@Message(id = 52, value = "Session data requested when non session based authentication in use")
IllegalStateException noSessionData();
@Message(id = 53, value = "Listener %s already registered")
IllegalArgumentException listenerAlreadyRegistered(String name);
@Message(id = 54, value = "The maximum size %s for an individual file in a multipart request was exceeded")
MultiPartParserDefinition.FileTooLargeException maxFileSizeExceeded(long maxIndividualFileSize);
@Message(id = 55, value = "Could not set attribute %s to %s as it is read only")
String couldNotSetAttribute(String attributeName, String newValue);
@Message(id = 56, value = "Could not parse URI template %s, exception at char %s")
RuntimeException couldNotParseUriTemplate(String path, int i);
@Message(id = 57, value = "Mismatched braces in attribute string %s")
RuntimeException mismatchedBraces(String valueString);
@Message(id = 58, value = "More than one handler with name %s. Builder class %s and %s")
IllegalStateException moreThanOneHandlerWithName(String name, Class<? extends HandlerBuilder> aClass, Class<? extends HandlerBuilder> existing);
//
// @Message(id = 59, value = "Invalid syntax %s")
// IllegalArgumentException invalidSyntax(String line);
//
// @Message(id = 60, value = "Error parsing handler string %s:%n%s")
// IllegalArgumentException errorParsingHandlerString(String reason, String s);
@Message(id = 61, value = "Out of band responses only allowed for 100-continue requests")
IllegalArgumentException outOfBandResponseOnlyAllowedFor100Continue();
//
// @Message(id = 62, value = "AJP does not support HTTP upgrade")
// IllegalStateException ajpDoesNotSupportHTTPUpgrade();
//
// @Message(id = 63, value = "File system watcher already started")
// IllegalStateException fileSystemWatcherAlreadyStarted();
//
// @Message(id = 64, value = "File system watcher not started")
// IllegalStateException fileSystemWatcherNotStarted();
@Message(id = 65, value = "SSL must be specified to connect to a https URL")
IOException sslWasNull();
@Message(id = 66, value = "Incorrect magic number %s for AJP packet header")
IOException wrongMagicNumber(int number);
@Message(id = 67, value = "No client cert was provided")
SSLPeerUnverifiedException peerUnverified();
@Message(id = 68, value = "Servlet path match failed")
IllegalArgumentException servletPathMatchFailed();
@Message(id = 69, value = "Could not parse set cookie header %s")
IllegalArgumentException couldNotParseCookie(String headerValue);
@Message(id = 70, value = "method can only be called by IO thread")
IllegalStateException canOnlyBeCalledByIoThread();
@Message(id = 71, value = "Cannot add path template %s, matcher already contains an equivalent pattern %s")
IllegalStateException matcherAlreadyContainsTemplate(String templateString, String templateString1);
@Message(id = 72, value = "Failed to decode url %s to charset %s")
IllegalArgumentException failedToDecodeURL(String s, String enc, @Cause Exception e);
@Message(id = 73, value = "Resource change listeners are not supported")
IllegalArgumentException resourceChangeListenerNotSupported();
//
// @Message(id = 74, value = "Could not renegotiate SSL connection to require client certificate, as client had sent more data")
// IllegalStateException couldNotRenegotiate();
@Message(id = 75, value = "Object was freed")
IllegalStateException objectWasFreed();
@Message(id = 76, value = "Handler not shutdown")
IllegalStateException handlerNotShutdown();
@Message(id = 77, value = "The underlying transport does not support HTTP upgrade")
IllegalStateException upgradeNotSupported();
@Message(id = 78, value = "Renegotiation not supported")
IOException renegotiationNotSupported();
//
// @Message(id = 79, value = "Not a valid user agent pattern %s")
// IllegalArgumentException notAValidUserAgentPattern(String userAgent);
@Message(id = 80, value = "Not a valid regular expression pattern %s")
IllegalArgumentException notAValidRegularExpressionPattern(String pattern);
@Message(id = 81, value = "Bad request")
BadRequestException badRequest();
@Message(id = 82, value = "Host %s already registered")
RuntimeException hostAlreadyRegistered(Object host);
@Message(id = 83, value = "Host %s has not been registered")
RuntimeException hostHasNotBeenRegistered(Object host);
@Message(id = 84, value = "Attempted to write additional data after the last chunk")
IOException extraDataWrittenAfterChunkEnd();
@Message(id = 85, value = "Could not generate unique session id")
RuntimeException couldNotGenerateUniqueSessionId();
//
// @Message(id = 86, value = "SPDY needs to be provided with a heap buffer pool, for use in compressing and decompressing headers.")
// IllegalArgumentException mustProvideHeapBuffer();
//
// @Message(id = 87, value = "Unexpected SPDY frame type %s")
// IOException unexpectedFrameType(int type);
@Message(id = 88, value = "SPDY control frames cannot have body content")
IOException controlFrameCannotHaveBodyContent();
// @Message(id = 89, value = "SPDY not supported")
//// IOException spdyNotSupported();
//
// @Message(id = 90, value = "No ALPN implementation available (tried Jetty ALPN and JDK9)")
// IOException alpnNotAvailable();
@Message(id = 91, value = "Buffer has already been freed")
IllegalStateException bufferAlreadyFreed();
//
// @Message(id = 92, value = "A SPDY header was too large to fit in a response buffer, if you want to support larger headers please increase the buffer size")
// IllegalStateException headersTooLargeToFitInHeapBuffer();
// @Message(id = 93, value = "A SPDY stream was reset by the remote endpoint")
// IOException spdyStreamWasReset();
@Message(id = 94, value = "Blocking await method called from IO thread. Blocking IO must be dispatched to a worker thread or deadlocks will result.")
IOException awaitCalledFromIoThread();
@Message(id = 95, value = "Recursive call to flushSenders()")
RuntimeException recursiveCallToFlushingSenders();
@Message(id = 96, value = "More data was written to the channel than specified in the content-length")
IllegalStateException fixedLengthOverflow();
@Message(id = 97, value = "AJP request already in progress")
IllegalStateException ajpRequestAlreadyInProgress();
@Message(id = 98, value = "HTTP ping data must be 8 bytes in length")
String httpPingDataMustBeLength8();
@Message(id = 99, value = "Received a ping of size other than 8")
String invalidPingSize();
@Message(id = 100, value = "stream id must be zero for frame type %s")
String streamIdMustBeZeroForFrameType(int frameType);
@Message(id = 101, value = "stream id must not be zero for frame type %s")
String streamIdMustNotBeZeroForFrameType(int frameType);
//
// @Message(id = 102, value = "RST_STREAM received for idle stream")
// String rstStreamReceivedForIdleStream();
@Message(id = 103, value = "Http2 stream was reset")
IOException http2StreamWasReset();
@Message(id = 104, value = "Incorrect HTTP2 preface")
IOException incorrectHttp2Preface();
@Message(id = 105, value = "HTTP2 frame to large")
IOException http2FrameTooLarge();
@Message(id = 106, value = "HTTP2 continuation frame received without a corresponding headers or push promise frame")
IOException http2ContinuationFrameNotExpected();
@Message(id = 107, value = "Huffman encoded value in HPACK headers did not end with EOS padding")
HpackException huffmanEncodedHpackValueDidNotEndWithEOS();
@Message(id = 108, value = "HPACK variable length integer encoded over too many octects, max is %s")
HpackException integerEncodedOverTooManyOctets(int maxIntegerOctets);
@Message(id = 109, value = "Zero is not a valid header table index")
HpackException zeroNotValidHeaderTableIndex();
@Message(id = 110, value = "Cannot send 100-Continue, getResponseChannel() has already been called")
IOException cannotSendContinueResponse();
@Message(id = 111, value = "Parser did not make progress")
IOException parserDidNotMakeProgress();
@Message(id = 112, value = "Only client side can call createStream, if you wish to send a PUSH_PROMISE frame use createPushPromiseStream instead")
IOException headersStreamCanOnlyBeCreatedByClient();
@Message(id = 113, value = "Only the server side can send a push promise stream")
IOException pushPromiseCanOnlyBeCreatedByServer();
@Message(id = 114, value = "Invalid IP access control rule %s. Format is: [ip-match] allow|deny")
IllegalArgumentException invalidAclRule(String rule);
@Message(id = 115, value = "Server received PUSH_PROMISE frame from client")
IOException serverReceivedPushPromise();
@Message(id = 116, value = "CONNECT not supported by this connector")
IllegalStateException connectNotSupported();
@Message(id = 117, value = "Request was not a CONNECT request")
IllegalStateException notAConnectRequest();
@Message(id = 118, value = "Cannot reset buffer, response has already been commited")
IllegalStateException cannotResetBuffer();
@Message(id = 119, value = "HTTP2 via prior knowledge failed")
IOException http2PriRequestFailed();
@Message(id = 120, value = "Out of band responses are not allowed for this connector")
IllegalStateException outOfBandResponseNotSupported();
@Message(id = 121, value = "Session was rejected as the maximum number of sessions (%s) has been hit")
IllegalStateException tooManySessions(int maxSessions);
@Message(id = 122, value = "CONNECT attempt failed as target proxy returned %s")
IOException proxyConnectionFailed(int responseCode);
@Message(id = 123, value = "MCMP message %s rejected due to suspicious characters")
RuntimeException mcmpMessageRejectedDueToSuspiciousCharacters(String data);
@Message(id = 124, value = "renegotiation timed out")
IllegalStateException rengotiationTimedOut();
@Message(id = 125, value = "Request body already read")
IllegalStateException requestBodyAlreadyRead();
@Message(id = 126, value = "Attempted to do blocking IO from the IO thread. This is prohibited as it may result in deadlocks")
IllegalStateException blockingIoFromIOThread();
@Message(id = 127, value = "Response has already been sent")
IllegalStateException responseComplete();
@Message(id = 128, value = "Remote peer closed connection before all data could be read")
IOException couldNotReadContentLengthData();
@Message(id = 129, value = "Failed to send after being safe to send")
IllegalStateException failedToSendAfterBeingSafe();
@Message(id = 130, value = "HTTP reason phrase was too large for the buffer. Either provide a smaller message or a bigger buffer. Phrase: %s")
IllegalStateException reasonPhraseToLargeForBuffer(String phrase);
@Message(id = 131, value = "Buffer pool is closed")
IllegalStateException poolIsClosed();
@Message(id = 132, value = "HPACK decode failed")
HpackException hpackFailed();
@Message(id = 133, value = "Request did not contain an Upgrade header, upgrade is not permitted")
IllegalStateException notAnUpgradeRequest();
@Message(id = 134, value = "Authentication mechanism %s requires property %s to be set")
IllegalStateException authenticationPropertyNotSet(String name, String header);
@Message(id = 135, value = "renegotiation failed")
IllegalStateException rengotiationFailed();
@Message(id = 136, value = "User agent charset string must have an even number of items, in the form pattern,charset,pattern,charset,... Instead got: %s")
IllegalArgumentException userAgentCharsetMustHaveEvenNumberOfItems(String supplied);
@Message(id = 137, value = "Could not find the datasource called %s")
IllegalArgumentException datasourceNotFound(String ds);
@Message(id = 138, value = "Server not started")
IllegalStateException serverNotStarted();
@Message(id = 139, value = "Exchange already complete")
IllegalStateException exchangeAlreadyComplete();
@Message(id = 140, value = "Initial SSL/TLS data is not a handshake record")
SSLHandshakeException notHandshakeRecord();
@Message(id = 141, value = "Initial SSL/TLS handshake record is invalid")
SSLHandshakeException invalidHandshakeRecord();
@Message(id = 142, value = "Initial SSL/TLS handshake spans multiple records")
SSLHandshakeException multiRecordSSLHandshake();
@Message(id = 143, value = "Expected \"client hello\" record")
SSLHandshakeException expectedClientHello();
@Message(id = 144, value = "Expected server hello")
SSLHandshakeException expectedServerHello();
@Message(id = 145, value = "Too many redirects")
IOException tooManyRedirects(@Cause IOException exception);
@Message(id = 146, value = "HttpServerExchange cannot have both async IO resumed and dispatch() called in the same cycle")
IllegalStateException resumedAndDispatched();
@Message(id = 147, value = "No host header in a HTTP/1.1 request")
IOException noHostInHttp11Request();
@Message(id = 148, value = "Invalid HPack encoding. First byte: %s")
HpackException invalidHpackEncoding(byte b);
@Message(id = 149, value = "HttpString is not allowed to contain newlines. value: %s")
IllegalArgumentException newlineNotSupportedInHttpString(String value);
@Message(id = 150, value = "Pseudo header %s received after receiving normal headers. Pseudo headers must be the first headers in a HTTP/2 header block.")
String pseudoHeaderInWrongOrder(HttpString header);
@Message(id = 151, value = "Expected to receive a continuation frame")
String expectedContinuationFrame();
@Message(id = 152, value = "Incorrect frame size")
String incorrectFrameSize();
@Message(id = 153, value = "Stream id not registered")
IllegalStateException streamNotRegistered();
@Message(id = 154, value = "Mechanism %s returned a null result from sendChallenge()")
NullPointerException sendChallengeReturnedNull(AuthenticationMechanism mechanism);
@Message(id = 155, value = "Framed channel body was set when it was not ready for flush")
IllegalStateException bodyIsSetAndNotReadyForFlush();
@Message(id = 156, value = "Invalid GZIP header")
IOException invalidGzipHeader();
@Message(id = 157, value = "Invalid GZIP footer")
IOException invalidGZIPFooter();
@Message(id = 158, value = "Response of length %s is too large to buffer")
IllegalStateException responseTooLargeToBuffer(Long length);
//
// @Message(id = 159, value = "Max size must be larger than one")
// IllegalArgumentException maxSizeMustBeLargerThanOne();
@Message(id = 161, value = "HTTP/2 header block is too large")
String headerBlockTooLarge();
@Message(id = 162, value = "Same-site attribute %s is invalid. It must be Strict or Lax")
IllegalArgumentException invalidSameSiteMode(String mode);
@Message(id = 163, value = "Invalid token %s")
IllegalArgumentException invalidToken(byte c);
@Message(id = 164, value = "Request contained invalid headers")
IllegalArgumentException invalidHeaders();
@Message(id = 165, value = "Invalid character %s in request-target")
String invalidCharacterInRequestTarget(char next);
@Message(id = 166, value = "Pooled object is closed")
IllegalStateException objectIsClosed();
@Message(id = 167, value = "More than one host header in request")
IOException moreThanOneHostHeader();
@Message(id = 168, value = "An invalid character [ASCII code: %s] was present in the cookie value")
IllegalArgumentException invalidCookieValue(String value);
@Message(id = 169, value = "An invalid domain [%s] was specified for this cookie")
IllegalArgumentException invalidCookieDomain(String value);
@Message(id = 170, value = "An invalid path [%s] was specified for this cookie")
IllegalArgumentException invalidCookiePath(String value);
@Message(id = 173, value = "An invalid control character [%s] was present in the cookie value or attribute")
IllegalArgumentException invalidControlCharacter(String value);
@Message(id = 174, value = "An invalid escape character in cookie value")
IllegalArgumentException invalidEscapeCharacter();
@Message(id = 175, value = "Invalid Hpack index %s")
HpackException invalidHpackIndex(int index);
@Message(id = 178, value = "Buffer pool is too small, min size is %s")
IllegalArgumentException bufferPoolTooSmall(int minSize);
@Message(id = 179, value = "Invalid PROXY protocol header")
IOException invalidProxyHeader();
@Message(id = 180, value = "PROXY protocol header exceeded max size of 107 bytes")
IOException headerSizeToLarge();
@Message(id = 181, value = "HTTP/2 trailers too large for single buffer")
RuntimeException http2TrailerToLargeForSingleBuffer();
@Message(id = 182, value = "Ping not supported")
IOException pingNotSupported();
@Message(id = 183, value = "Ping timed out")
IOException pingTimeout();
@Message(id = 184, value = "Stream limit exceeded")
IOException streamLimitExceeded();
@Message(id = 185, value = "Invalid IP address %s")
IOException invalidIpAddress(String addressString);
}
| apache-2.0 |
varunarya10/rally | rally/benchmark/wrappers/network.py | 14475 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
import six
from rally.benchmark import utils as bench_utils
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally import consts
from rally import exceptions
from neutronclient.common import exceptions as neutron_exceptions
from novaclient import exceptions as nova_exceptions
LOG = logging.getLogger(__name__)
cidr_incr = utils.RAMInt()
def generate_cidr(start_cidr="10.2.0.0/24"):
"""Generate next CIDR for network or subnet, without IP overlapping.
This is process and thread safe, because `cidr_incr' points to
value stored directly in RAM. This guarantees that CIDRs will be
serial and unique even under hard multiprocessing/threading load.
:param start_cidr: start CIDR str
:returns: next available CIDR str
"""
cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr)))
LOG.debug("CIDR generated: %s" % cidr)
return cidr
class NetworkWrapperException(exceptions.RallyException):
msg_fmt = _("%(message)s")
@six.add_metaclass(abc.ABCMeta)
class NetworkWrapper(object):
"""Base class for network service implementations.
We actually have two network services implementations, with different API:
NovaNetwork and Neutron. The idea is (at least to try) to use unified
service, which hides most differences and routines behind the scenes.
This allows to significantly re-use and simplify code.
"""
START_CIDR = "10.2.0.0/24"
SERVICE_IMPL = None
def __init__(self, clients, config=None):
if hasattr(clients, self.SERVICE_IMPL):
self.client = getattr(clients, self.SERVICE_IMPL)()
else:
self.client = clients(self.SERVICE_IMPL)
self.config = config or {}
self.start_cidr = self.config.get("start_cidr", self.START_CIDR)
@abc.abstractmethod
def create_network(self):
"""Create network."""
@abc.abstractmethod
def delete_network(self):
"""Delete network."""
@abc.abstractmethod
def list_networks(self):
"""List networks."""
@abc.abstractmethod
def create_floating_ip(self):
"""Create floating IP."""
@abc.abstractmethod
def delete_floating_ip(self):
"""Delete floating IP."""
@abc.abstractmethod
def supports_security_group(self):
"""Checks whether security group is supported."""
class NovaNetworkWrapper(NetworkWrapper):
SERVICE_IMPL = consts.Service.NOVA
def __init__(self, *args):
super(NovaNetworkWrapper, self).__init__(*args)
self.skip_cidrs = [n.cidr for n in self.client.networks.list()]
def _generate_cidr(self):
cidr = generate_cidr(start_cidr=self.start_cidr)
while cidr in self.skip_cidrs:
cidr = generate_cidr(start_cidr=self.start_cidr)
return cidr
def create_network(self, tenant_id, **kwargs):
"""Create network.
:param tenant_id: str, tenant ID
:param **kwargs: for compatibility, not used here
:returns: dict, network data
"""
cidr = self._generate_cidr()
label = utils.generate_random_name("rally_net_")
network = self.client.networks.create(
tenant_id=tenant_id, cidr=cidr, label=label)
return {"id": network.id,
"cidr": network.cidr,
"name": network.label,
"status": "ACTIVE",
"external": False,
"tenant_id": tenant_id}
def delete_network(self, network):
return self.client.networks.delete(network["id"])
def list_networks(self):
return self.client.networks.list()
def create_floating_ip(self, ext_network=None, **kwargs):
"""Allocate a floating ip from the given nova-network pool
:param ext_network: name or external network, str
:param **kwargs: for compatibility, not used here
:returns: floating IP dict
"""
if not ext_network:
try:
ext_network = self.client.floating_ip_pools.list()[0].name
except IndexError:
raise NetworkWrapperException("No floating IP pools found")
fip = self.client.floating_ips.create(ext_network)
return {"id": fip.id, "ip": fip.ip}
def _get_floating_ip(self, fip_id, do_raise=False):
try:
fip = self.client.floating_ips.get(fip_id)
except nova_exceptions.NotFound:
if not do_raise:
return None
raise exceptions.GetResourceNotFound(
resource="Floating IP %s" % fip_id)
return fip.id
def delete_floating_ip(self, fip_id, wait=False):
"""Delete floating IP.
:param fip_id: int floating IP id
:param wait: if True then wait to return until floating ip is deleted
"""
self.client.floating_ips.delete(fip_id)
if not wait:
return
bench_utils.wait_for_delete(
fip_id,
update_resource=lambda i: self._get_floating_ip(i, do_raise=True))
def supports_security_group(self):
"""Check whether security group is supported
:return: result tuple. Always (True, "") for nova-network.
:rtype: (bool, string)
"""
return True, ""
class NeutronWrapper(NetworkWrapper):
SERVICE_IMPL = consts.Service.NEUTRON
SUBNET_IP_VERSION = 4
@property
def external_networks(self):
return self.client.list_networks(**{
"router:external": True})["networks"]
def get_network(self, net_id=None, name=None):
net = None
try:
if net_id:
net = self.client.show_network(net_id)["network"]
else:
for net in self.client.list_networks(name=name)["networks"]:
break
return {"id": net["id"],
"name": net["name"],
"tenant_id": net["tenant_id"],
"status": net["status"],
"external": net["router:external"],
"subnets": net["subnets"],
"router_id": None}
except (TypeError, neutron_exceptions.NeutronClientException):
raise NetworkWrapperException(
"Network not found: %s" % (name or net_id))
def create_router(self, external=False, **kwargs):
"""Create neutron router.
:param external: bool, whether to set setup external_gateway_info
:param **kwargs: POST /v2.0/routers request options
:returns: neutron router dict
"""
if "name" not in kwargs:
kwargs["name"] = utils.generate_random_name("rally_router_")
if external and "external_gateway_info" not in kwargs:
for net in self.external_networks:
kwargs["external_gateway_info"] = {
"network_id": net["id"], "enable_snat": True}
return self.client.create_router({"router": kwargs})["router"]
def _generate_cidr(self):
# TODO(amaretskiy): Generate CIDRs unique for network, not cluster
return generate_cidr(start_cidr=self.start_cidr)
def create_network(self, tenant_id, **kwargs):
"""Create network.
:param tenant_id: str, tenant ID
:param **kwargs: extra options
:returns: dict, network data
"""
network_args = {
"network": {
"tenant_id": tenant_id,
"name": utils.generate_random_name("rally_net_")
}
}
network = self.client.create_network(network_args)["network"]
router = None
if kwargs.get("add_router", False):
router = self.create_router(external=True, tenant_id=tenant_id)
subnets = []
subnets_num = kwargs.get("subnets_num", 0)
for i in range(subnets_num):
subnet_args = {
"subnet": {
"tenant_id": tenant_id,
"network_id": network["id"],
"name": utils.generate_random_name("rally_subnet_"),
"ip_version": self.SUBNET_IP_VERSION,
"cidr": self._generate_cidr(),
"enable_dhcp": True,
"dns_nameservers": kwargs.get("dns_nameservers",
["8.8.8.8", "8.8.4.4"])
}
}
subnet = self.client.create_subnet(subnet_args)["subnet"]
subnets.append(subnet["id"])
if router:
self.client.add_interface_router(router["id"],
{"subnet_id": subnet["id"]})
return {"id": network["id"],
"name": network["name"],
"status": network["status"],
"subnets": subnets,
"external": network.get("router:external", False),
"router_id": router and router["id"] or None,
"tenant_id": tenant_id}
def delete_network(self, network):
net_dhcps = self.client.list_dhcp_agent_hosting_networks(
network["id"])["agents"]
for net_dhcp in net_dhcps:
self.client.remove_network_from_dhcp_agent(net_dhcp["id"],
network["id"])
router_id = network["router_id"]
if router_id:
self.client.remove_gateway_router(router_id)
for subnet_id in network["subnets"]:
self.client.remove_interface_router(router_id,
{"subnet_id": subnet_id})
self.client.delete_router(router_id)
for port in self.client.list_ports(network_id=network["id"])["ports"]:
self.client.delete_port(port["id"])
for subnet_id in network["subnets"]:
self._delete_subnet(subnet_id)
return self.client.delete_network(network["id"])
def _delete_subnet(self, subnet_id):
self.client.delete_subnet(subnet_id)
def list_networks(self):
return self.client.list_networks()["networks"]
def create_port(self, network_id, **kwargs):
"""Create neutron port.
:param network_id: neutron network id
:param **kwargs: POST /v2.0/ports request options
:returns: neutron port dict
"""
kwargs["network_id"] = network_id
if "name" not in kwargs:
kwargs["name"] = utils.generate_random_name("rally_port_")
return self.client.create_port({"port": kwargs})["port"]
def create_floating_ip(self, ext_network=None, int_network=None,
tenant_id=None, port_id=None, **kwargs):
"""Create Neutron floating IP.
:param ext_network: floating network name or dict
:param int_network: fixed network name or dict
:param tenant_id str tenant id
:param port_id: str port id
:param **kwargs: for compatibility, not used here
:returns: floating IP dict
"""
if not tenant_id:
raise ValueError("Missed tenant_id")
net_id = None
if type(ext_network) is dict:
net_id = ext_network["id"]
elif ext_network:
ext_net = self.get_network(name=ext_network)
if not ext_net["external"]:
raise NetworkWrapperException("Network is not external: %s"
% ext_network)
net_id = ext_net["id"]
else:
ext_networks = self.external_networks
if not ext_networks:
raise NetworkWrapperException(
"Failed to allocate floating IP: "
"no external networks found")
net_id = ext_networks[0]["id"]
if not port_id:
if type(int_network) is dict:
port_id = self.create_port(int_network["id"])["id"]
elif int_network:
int_net = self.get_network(name=int_network)
if int_net["external"]:
raise NetworkWrapperException("Network is external: %s"
% int_network)
port_id = self.create_port(int_net["id"])["id"]
kwargs = {"floatingip": {"floating_network_id": net_id},
"tenant_id": tenant_id,
"port_id": port_id}
fip = self.client.create_floatingip(kwargs)["floatingip"]
return {"id": fip["id"], "ip": fip["floating_ip_address"]}
def delete_floating_ip(self, fip_id, **kwargs):
"""Delete floating IP.
:param fip_id: int floating IP id
:param **kwargs: for compatibility, not used here
"""
self.client.delete_floatingip(fip_id)
def supports_security_group(self):
"""Check whether security group is supported
:return: result tuple
:rtype: (bool, string)
"""
extensions = self.client.list_extensions().get("extensions", [])
use_sg = any(ext.get("alias") == "security-group"
for ext in extensions)
if use_sg:
return True, ""
return False, _("neutron driver does not support security groups")
def wrap(clients, config=None):
"""Returns available network wrapper instance.
:param clients: rally.osclients.Clients instance
:param config: task config dict
:returns: NetworkWrapper subclass instance
"""
if hasattr(clients, "services"):
services = clients.services()
else:
services = clients("services")
if consts.Service.NEUTRON in services.values():
return NeutronWrapper(clients, config)
return NovaNetworkWrapper(clients, config)
| apache-2.0 |
jexp/idea2 | platform/lang-api/src/com/intellij/execution/filters/RegexpFilter.java | 6436 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.execution.filters;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.openapi.vfs.LocalFileSystem;
import com.intellij.openapi.vfs.VirtualFile;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.Nullable;
import java.io.File;
import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* @author Yura Cangea
* @version 1.0
*/
public class RegexpFilter implements Filter {
@NonNls public static final String FILE_PATH_MACROS = "$FILE_PATH$";
@NonNls public static final String LINE_MACROS = "$LINE$";
@NonNls public static final String COLUMN_MACROS = "$COLUMN$";
@NonNls private static final String FILE_PATH_REGEXP = "((?:\\p{Alpha}\\:)?[0-9 a-z_A-Z\\-\\\\./]+)";
private static final String NUMBER_REGEXP = "([0-9]+)";
private final int myFileRegister;
private final int myLineRegister;
private final int myColumnRegister;
private final Pattern myPattern;
private final Project myProject;
@NonNls private static final String FILE_STR = "file";
@NonNls private static final String LINE_STR = "line";
@NonNls private static final String COLUMN_STR = "column";
public RegexpFilter(Project project, @NonNls String expression) {
myProject = project;
validate(expression);
if (expression == null || "".equals(expression.trim())) {
throw new InvalidExpressionException("expression == null or empty");
}
int filePathIndex = expression.indexOf(FILE_PATH_MACROS);
int lineIndex = expression.indexOf(LINE_MACROS);
int columnIndex = expression.indexOf(COLUMN_MACROS);
if (filePathIndex == -1) {
throw new InvalidExpressionException("Expression must contain " + FILE_PATH_MACROS + " macros.");
}
final TreeMap<Integer,String> map = new TreeMap<Integer, String>();
map.put(new Integer(filePathIndex), FILE_STR);
expression = StringUtil.replace(expression, FILE_PATH_MACROS, FILE_PATH_REGEXP);
if (lineIndex != -1) {
expression = StringUtil.replace(expression, LINE_MACROS, NUMBER_REGEXP);
map.put(new Integer(lineIndex), LINE_STR);
}
if (columnIndex != -1) {
expression = StringUtil.replace(expression, COLUMN_MACROS, NUMBER_REGEXP);
map.put(new Integer(columnIndex), COLUMN_STR);
}
// The block below determines the registers based on the sorted map.
int count = 0;
for (final Integer integer : map.keySet()) {
count++;
final String s = map.get(integer);
if (FILE_STR.equals(s)) {
filePathIndex = count;
}
else if (LINE_STR.equals(s)) {
lineIndex = count;
}
else if (COLUMN_STR.equals(s)) {
columnIndex = count;
}
}
myFileRegister = filePathIndex;
myLineRegister = lineIndex;
myColumnRegister = columnIndex;
myPattern = Pattern.compile(expression, Pattern.MULTILINE);
}
public static void validate(String expression) {
if (expression == null || "".equals(expression.trim())) {
throw new InvalidExpressionException("expression == null or empty");
}
expression = substituteMacrosesWithRegexps(expression);
Pattern.compile(expression, Pattern.MULTILINE);
}
private static String substituteMacrosesWithRegexps(String expression) {
int filePathIndex = expression.indexOf(FILE_PATH_MACROS);
int lineIndex = expression.indexOf(LINE_MACROS);
int columnIndex = expression.indexOf(COLUMN_MACROS);
if (filePathIndex == -1) {
throw new InvalidExpressionException("Expression must contain " + FILE_PATH_MACROS + " macros.");
}
expression = StringUtil.replace(expression, FILE_PATH_MACROS, FILE_PATH_REGEXP);
if (lineIndex != -1) {
expression = StringUtil.replace(expression, LINE_MACROS, NUMBER_REGEXP);
}
if (columnIndex != -1) {
expression = StringUtil.replace(expression, COLUMN_MACROS, NUMBER_REGEXP);
}
return expression;
}
public Result applyFilter(final String line, final int entireLength) {
final Matcher matcher = myPattern.matcher(line);
if (matcher.find()) {
return createResult(matcher, entireLength - line.length());
}
return null;
}
private Result createResult(final Matcher matcher, final int entireLen) {
final String filePath = matcher.group(myFileRegister);
String lineNumber = "0";
if (myLineRegister != -1) {
lineNumber = matcher.group(myLineRegister);
}
String columnNumber = "0";
if (myColumnRegister != -1) {
columnNumber = matcher.group(myColumnRegister);
}
int line = 0;
int column = 0;
try {
line = Integer.parseInt(lineNumber);
column = Integer.parseInt(columnNumber);
} catch (NumberFormatException e) {
// Do nothing, so that line and column will remain at their initial
// zero values.
}
if (line > 0) line -= 1;
if (column > 0) column -= 1;
// Calculate the offsets relative to the entire text.
final int highlightStartOffset = entireLen + matcher.start(myFileRegister);
final int highlightEndOffset = highlightStartOffset + filePath.length();
final HyperlinkInfo info = createOpenFileHyperlink(filePath, line, column);
return new Result(highlightStartOffset, highlightEndOffset, info);
}
@Nullable
protected HyperlinkInfo createOpenFileHyperlink(String fileName, final int line, final int column) {
fileName = fileName.replace(File.separatorChar, '/');
final VirtualFile file = LocalFileSystem.getInstance().findFileByPath(fileName);
if (file == null) return null;
return new OpenFileHyperlinkInfo(myProject, file, line, column);
}
public static String[] getMacrosName() {
return new String[] {FILE_PATH_MACROS, LINE_MACROS, COLUMN_MACROS};
}
}
| apache-2.0 |
GoogleCloudPlatform/python-docs-samples | composer/workflows/connections_test.py | 1020 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import internal_unit_testing
def test_dag_import():
"""Test that the DAG file can be successfully imported.
This tests that the DAG can be parsed, but does not run it in an Airflow
environment. This is a recommended confidence check by the official Airflow
docs: https://airflow.incubator.apache.org/tutorial.html#testing
"""
from . import connections as module
internal_unit_testing.assert_has_valid_dag(module)
| apache-2.0 |
jwagenleitner/groovy | src/main/java/org/codehaus/groovy/ast/AnnotatedNode.java | 3871 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.codehaus.groovy.ast;
import groovy.lang.groovydoc.Groovydoc;
import groovy.lang.groovydoc.GroovydocHolder;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Base class for any AST node which is capable of being annotated
*/
public class AnnotatedNode extends ASTNode implements GroovydocHolder<AnnotatedNode> {
private List<AnnotationNode> annotations = Collections.emptyList();
private boolean synthetic;
ClassNode declaringClass;
private boolean hasNoRealSourcePositionFlag;
public AnnotatedNode() {
}
public List<AnnotationNode> getAnnotations() {
return annotations;
}
public List<AnnotationNode> getAnnotations(ClassNode type) {
List<AnnotationNode> ret = new ArrayList<AnnotationNode>(annotations.size());
for (AnnotationNode node: annotations) {
if (type.equals(node.getClassNode())) ret.add(node);
}
return ret;
}
public void addAnnotation(AnnotationNode value) {
checkInit();
annotations.add(value);
}
private void checkInit() {
if (annotations == Collections.EMPTY_LIST)
annotations = new ArrayList<AnnotationNode>(3);
}
public void addAnnotations(List<AnnotationNode> annotations) {
for (AnnotationNode node : annotations) {
addAnnotation(node);
}
}
/**
* returns true if this node is added by the compiler.
* <b>NOTE</b>:
* This method has nothing to do with the synthetic flag
* for fields, methods or classes.
* @return true if this node is added by the compiler
*/
public boolean isSynthetic() {
return synthetic;
}
/**
* sets this node as a node added by the compiler.
* <b>NOTE</b>:
* This method has nothing to do with the synthetic flag
* for fields, methods or classes.
* @param synthetic - if true this node is marked as
* added by the compiler
*/
public void setSynthetic(boolean synthetic) {
this.synthetic = synthetic;
}
public ClassNode getDeclaringClass() {
return declaringClass;
}
/**
* @param declaringClass - The declaringClass to set.
*/
public void setDeclaringClass(ClassNode declaringClass) {
this.declaringClass = declaringClass;
}
/**
* Currently only ever returns true for default constructors
* added by the compiler. See GROOVY-4161.
*/
public boolean hasNoRealSourcePosition() {
return hasNoRealSourcePositionFlag;
}
public void setHasNoRealSourcePosition(boolean value) {
this.hasNoRealSourcePositionFlag = value;
}
@Override
public Groovydoc getGroovydoc() {
Groovydoc groovydoc = this.<Groovydoc>getNodeMetaData(DOC_COMMENT);
return null == groovydoc ? Groovydoc.EMPTY_GROOVYDOC : groovydoc;
}
@Override
public AnnotatedNode getInstance() {
return this;
}
}
| apache-2.0 |
eSCT/oppfin | src/test/java/test/searchbox/core/ref/ReflectionUtilsTest.java | 1119 | package test.searchbox.core.ref;
import java.util.ArrayList;
import java.util.List;
import org.junit.Test;
import com.searchbox.core.ref.ReflectionUtils;
public class ReflectionUtilsTest {
@Test
public void testPermutations() {
Object[][] parameters = new Object[3][];
List<Object> firstParam = new ArrayList<Object>();
firstParam.add(new Integer(10));
firstParam.add(new Integer(20));
parameters[0] = firstParam.toArray(new Object[0]);
List<Object> secondParam = new ArrayList<Object>();
secondParam.add(new Boolean(true));
secondParam.add(new Boolean(false));
parameters[1] = secondParam.toArray(new Object[0]);
List<Object> thirdParam = new ArrayList<Object>();
thirdParam.add(new String("P1"));
thirdParam.add(new String("P2"));
thirdParam.add(new String("P3"));
parameters[2] = thirdParam.toArray(new Object[0]);
List<Object[]> argumentBags = ReflectionUtils
.findAllArgumentPermutations(parameters);
for (Object[] bag : argumentBags) {
System.out.println("[" + bag[0] + ", " + bag[1] + ", " + bag[2] + "]");
}
}
}
| apache-2.0 |
google/cel-go | common/types/pb/type_test.go | 13249 | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pb
import (
"reflect"
"testing"
"time"
"github.com/google/cel-go/checker/decls"
"google.golang.org/protobuf/proto"
proto3pb "github.com/google/cel-go/test/proto3pb"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
dynamicpb "google.golang.org/protobuf/types/dynamicpb"
anypb "google.golang.org/protobuf/types/known/anypb"
dpb "google.golang.org/protobuf/types/known/durationpb"
structpb "google.golang.org/protobuf/types/known/structpb"
tpb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
)
func TestTypeDescription(t *testing.T) {
pbdb := NewDb()
types := []string{
".google.protobuf.Any",
".google.protobuf.BoolValue",
".google.protobuf.BytesValue",
".google.protobuf.DoubleValue",
".google.protobuf.FloatValue",
".google.protobuf.Int32Value",
".google.protobuf.Int64Value",
".google.protobuf.ListValue",
".google.protobuf.Struct",
".google.protobuf.Value",
}
for _, typeName := range types {
if _, found := pbdb.DescribeType(typeName); !found {
t.Errorf("pbdb.DescribeType(%v) not found", typeName)
}
}
}
func TestTypeDescriptionFieldMap(t *testing.T) {
pbdb := NewDb()
msg := &proto3pb.NestedTestAllTypes{}
pbdb.RegisterMessage(msg)
td, found := pbdb.DescribeType(string(msg.ProtoReflect().Descriptor().FullName()))
if !found {
t.Fatalf("pbdb.DescribeType(%v) not found", msg)
}
if len(td.FieldMap()) != 2 {
t.Errorf("Unexpected field count. got '%d', wanted '%d'", len(td.FieldMap()), 2)
}
}
func TestFieldDescription(t *testing.T) {
pbdb := NewDb()
msg := proto3pb.NestedTestAllTypes{}
_, err := pbdb.RegisterMessage(&msg)
if err != nil {
t.Fatalf("pbdb.RegisterMessage(%v) failed: %v", &msg, err)
}
td, found := pbdb.DescribeType(string(msg.ProtoReflect().Descriptor().FullName()))
if !found {
t.Fatalf("pbdb.DescribeType(%v) not found", &msg)
}
fd, found := td.FieldByName("payload")
if !found {
t.Error("Field 'payload' not found")
}
if fd.Name() != "payload" {
t.Error("Unexpected struct name for field 'payload'", fd.Name())
}
if fd.IsOneof() {
t.Error("Field payload is listed as a oneof and it is not.")
}
if fd.IsMap() {
t.Error("Field 'payload' is listed as a map and it is not.")
}
if !fd.IsMessage() {
t.Error("Field 'payload' is not marked as a message.")
}
if fd.IsEnum() {
t.Error("Field 'payload' is marked as an enum.")
}
if fd.IsList() {
t.Error("Field 'payload' is marked as repeated.")
}
// Access the field by its Go struct name and check to see that it's index
// matches the one determined by the TypeDescription utils.
got := fd.CheckedType()
wanted := &exprpb.Type{
TypeKind: &exprpb.Type_MessageType{
MessageType: "google.expr.proto3.test.TestAllTypes",
},
}
if !proto.Equal(got, wanted) {
t.Error("Field 'payload' had an unexpected checked type.")
}
}
func TestFieldDescriptionGetFrom(t *testing.T) {
pbdb := NewDb()
msg := &proto3pb.TestAllTypes{
SingleUint64: 12,
SingleDuration: dpb.New(time.Duration(1234)),
SingleTimestamp: tpb.New(time.Unix(12345, 0).UTC()),
SingleBoolWrapper: wrapperspb.Bool(false),
SingleInt32Wrapper: wrapperspb.Int32(42),
StandaloneEnum: proto3pb.TestAllTypes_BAR,
NestedType: &proto3pb.TestAllTypes_SingleNestedMessage{
SingleNestedMessage: &proto3pb.TestAllTypes_NestedMessage{
Bb: 123,
},
},
SingleValue: structpb.NewStringValue("hello world"),
SingleStruct: jsonStruct(t, map[string]interface{}{
"null": nil,
}),
}
msgName := string(msg.ProtoReflect().Descriptor().FullName())
_, err := pbdb.RegisterMessage(msg)
if err != nil {
t.Fatalf("pbdb.RegisterMessage(%q) failed: %v", msgName, err)
}
td, found := pbdb.DescribeType(msgName)
if !found {
t.Fatalf("pbdb.DescribeType(%q) not found", msgName)
}
expected := map[string]interface{}{
"single_uint64": uint64(12),
"single_duration": time.Duration(1234),
"single_timestamp": time.Unix(12345, 0).UTC(),
"single_bool_wrapper": false,
"single_int32_wrapper": int32(42),
"single_int64_wrapper": structpb.NullValue_NULL_VALUE,
"single_nested_message": &proto3pb.TestAllTypes_NestedMessage{
Bb: 123,
},
"standalone_enum": int64(1),
"single_value": "hello world",
"single_struct": jsonStruct(t, map[string]interface{}{
"null": nil,
}),
}
for field, want := range expected {
f, found := td.FieldByName(field)
if !found {
t.Fatalf("td.FieldByName(%q) not found", field)
}
got, err := f.GetFrom(msg)
if err != nil {
t.Fatalf("field.GetFrom() failed: %v", err)
}
switch g := got.(type) {
case proto.Message:
if !proto.Equal(g, want.(proto.Message)) {
t.Errorf("got field %s value %v, wanted %v", field, g, want)
}
default:
if !reflect.DeepEqual(g, want) {
t.Errorf("got field %s value %v, wanted %v", field, g, want)
}
}
}
}
func TestFieldDescriptionIsSet(t *testing.T) {
pbdb := NewDb()
msg := &proto3pb.TestAllTypes{}
msgName := string(msg.ProtoReflect().Descriptor().FullName())
_, err := pbdb.RegisterMessage(msg)
if err != nil {
t.Fatalf("pbdb.RegisterMessage(%q) failed: %v", msgName, err)
}
td, found := pbdb.DescribeType(msgName)
if !found {
t.Fatalf("pbdb.DescribeType(%q) not found", msgName)
}
tests := []struct {
msg interface{}
field string
isSet bool
}{
{
msg: &proto3pb.TestAllTypes{SingleBool: true},
field: "single_bool",
isSet: true,
},
{
msg: &proto3pb.TestAllTypes{},
field: "single_bool",
isSet: false,
},
{
msg: (&proto3pb.TestAllTypes{SingleBool: false}),
field: "single_bool",
isSet: false,
},
{
msg: (&proto3pb.TestAllTypes{}),
field: "single_bool",
isSet: false,
},
{
msg: reflect.ValueOf(&proto3pb.TestAllTypes{}),
field: "single_bool",
isSet: false,
},
{
msg: nil,
field: "single_any",
isSet: false,
},
}
for _, tc := range tests {
f, found := td.FieldByName(tc.field)
if !found {
t.Fatalf("td.FieldByName(%q) not found", tc.field)
}
if f.IsSet(tc.msg) != tc.isSet {
t.Errorf("got field %s set: %v, wanted %v", tc.field, f.IsSet(tc.msg), tc.isSet)
}
}
}
func TestTypeDescriptionMaybeUnwrap(t *testing.T) {
pbdb := NewDb()
_, err := pbdb.RegisterMessage(&proto3pb.TestAllTypes{})
if err != nil {
t.Fatalf("pbdb.RegisterMessage() failed: %v", err)
}
msgType := "google.protobuf.Value"
msgDesc, found := pbdb.DescribeType(msgType)
if !found {
t.Fatalf("pbdb.DescribeType(%q) not found", msgType)
}
tests := []struct {
in proto.Message
out interface{}
}{
{
in: msgDesc.Zero(),
out: structpb.NullValue_NULL_VALUE,
},
{
in: msgDesc.New().Interface(),
out: structpb.NullValue_NULL_VALUE,
},
{
in: anyMsg(t, wrapperspb.Bool(true)),
out: true,
},
{
in: anyMsg(t, structpb.NewNumberValue(4.5)),
out: float64(4.5),
},
{
in: dynMsg(t, anyMsg(t, structpb.NewNumberValue(4.5))),
out: float64(4.5),
},
{
in: dynMsg(t, anyMsg(t, &proto3pb.TestAllTypes{SingleFloat: 123.0})),
out: &proto3pb.TestAllTypes{SingleFloat: 123.0},
},
{
in: dynMsg(t, &structpb.ListValue{}),
out: jsonList(t, []interface{}{}),
},
{
in: structpb.NewBoolValue(true),
out: true,
},
{
in: structpb.NewBoolValue(false),
out: false,
},
{
in: structpb.NewNullValue(),
out: structpb.NullValue_NULL_VALUE,
},
{
in: &structpb.Value{},
out: structpb.NullValue_NULL_VALUE,
},
{
in: structpb.NewNumberValue(1.5),
out: float64(1.5),
},
{
in: structpb.NewStringValue("hello world"),
out: "hello world",
},
{
in: structpb.NewListValue(jsonList(t, []interface{}{true, 1.0})),
out: jsonList(t, []interface{}{true, 1.0}),
},
{
in: structpb.NewStructValue(jsonStruct(t, map[string]interface{}{"hello": "world"})),
out: jsonStruct(t, map[string]interface{}{"hello": "world"}),
},
{
in: wrapperspb.Bool(false),
out: false,
},
{
in: wrapperspb.Bool(true),
out: true,
},
{
in: wrapperspb.Bytes([]byte("hello")),
out: []byte("hello"),
},
{
in: wrapperspb.Double(-4.2),
out: -4.2,
},
{
in: wrapperspb.Float(4.5),
out: 4.5,
},
{
in: wrapperspb.Int32(123),
out: int64(123),
},
{
in: wrapperspb.Int64(456),
out: int64(456),
},
{
in: wrapperspb.String("goodbye"),
out: "goodbye",
},
{
in: wrapperspb.UInt32(1234),
out: uint64(1234),
},
{
in: wrapperspb.UInt64(5678),
out: uint64(5678),
},
{
in: tpb.New(time.Unix(12345, 0).UTC()),
out: time.Unix(12345, 0).UTC(),
},
{
in: dpb.New(time.Duration(345)),
out: time.Duration(345),
},
}
for _, tc := range tests {
typeName := string(tc.in.ProtoReflect().Descriptor().FullName())
td, found := pbdb.DescribeType(typeName)
if !found {
t.Fatalf("pbdb.DescribeType(%q) not found", typeName)
}
msg, unwrapped := td.MaybeUnwrap(tc.in)
if !unwrapped {
t.Errorf("value %v not unwrapped", tc.in)
}
switch val := msg.(type) {
case proto.Message:
if !proto.Equal(val, tc.out.(proto.Message)) {
t.Errorf("got value %v, wanted %v", val, tc.out)
}
default:
if !reflect.DeepEqual(val, tc.out) {
t.Errorf("got value %v, wanted %v", val, tc.out)
}
}
}
}
func BenchmarkTypeDescriptionMaybeUnwrap(b *testing.B) {
pbdb := NewDb()
pbdb.RegisterMessage(&proto3pb.TestAllTypes{})
msgType := "google.protobuf.Value"
msgDesc, found := pbdb.DescribeType(msgType)
if !found {
b.Fatalf("pbdb.DescribeType(%q) not found", msgType)
}
tests := []struct {
in proto.Message
}{
{in: msgDesc.Zero()},
{in: msgDesc.New().Interface()},
{in: dynamicpb.NewMessage((&structpb.ListValue{}).ProtoReflect().Descriptor())},
{in: structpb.NewBoolValue(true)},
{in: structpb.NewBoolValue(false)},
{in: structpb.NewNullValue()},
{in: &structpb.Value{}},
{in: structpb.NewNumberValue(1.5)},
{in: structpb.NewStringValue("hello world")},
{in: wrapperspb.Bool(false)},
{in: wrapperspb.Bool(true)},
{in: wrapperspb.Bytes([]byte("hello"))},
{in: wrapperspb.Double(-4.2)},
{in: wrapperspb.Float(4.5)},
{in: wrapperspb.Int32(123)},
{in: wrapperspb.Int64(456)},
{in: wrapperspb.String("goodbye")},
{in: wrapperspb.UInt32(1234)},
{in: wrapperspb.UInt64(5678)},
{in: tpb.New(time.Unix(12345, 0).UTC())},
{in: dpb.New(time.Duration(345))},
{in: &proto3pb.TestAllTypes{}},
}
for _, tc := range tests {
typeName := string(tc.in.ProtoReflect().Descriptor().FullName())
td, found := pbdb.DescribeType(typeName)
if !found {
b.Fatalf("pbdb.DescribeType(%q) not found", typeName)
}
in := tc.in
b.Run(typeName, func(b *testing.B) {
for i := 0; i < b.N; i++ {
td.MaybeUnwrap(in)
}
})
}
}
func TestTypeDescriptionCheckedType(t *testing.T) {
pbdb := NewDb()
msg := &proto3pb.TestAllTypes{}
msgName := string(msg.ProtoReflect().Descriptor().FullName())
_, err := pbdb.RegisterMessage(msg)
if err != nil {
t.Fatalf("pbdb.RegisterMessage(%q) failed: %v", msgName, err)
}
td, found := pbdb.DescribeType(msgName)
if !found {
t.Fatalf("pbdb.DescribeType(%q) not found", msgName)
}
field, found := td.FieldByName("map_string_string")
if !found {
t.Fatal("td.FieldByName('map_string_string') not found")
}
mapType := decls.NewMapType(decls.String, decls.String)
if !proto.Equal(field.CheckedType(), mapType) {
t.Errorf("got checked type %v, wanted %v", field.CheckedType(), mapType)
}
field, found = td.FieldByName("repeated_nested_message")
if !found {
t.Fatal("td.FieldByName('repeated_nested_message') not found")
}
listType := decls.NewListType(decls.NewObjectType("google.expr.proto3.test.TestAllTypes.NestedMessage"))
if !proto.Equal(field.CheckedType(), listType) {
t.Errorf("got checked type %v, wanted %v", field.CheckedType(), listType)
}
}
func dynMsg(t *testing.T, msg proto.Message) *dynamicpb.Message {
t.Helper()
dynPB := dynamicpb.NewMessage(msg.ProtoReflect().Descriptor())
proto.Merge(dynPB, msg)
return dynPB
}
func anyMsg(t *testing.T, msg proto.Message) *anypb.Any {
t.Helper()
pb, err := anypb.New(msg)
if err != nil {
t.Fatalf("anypb.New(%v) failed: %v", msg, err)
}
return pb
}
func jsonList(t *testing.T, elems []interface{}) *structpb.ListValue {
t.Helper()
l, err := structpb.NewList(elems)
if err != nil {
t.Fatalf("structpb.NewList() failed: %v", err)
}
return l
}
func jsonStruct(t *testing.T, entries map[string]interface{}) *structpb.Struct {
t.Helper()
s, err := structpb.NewStruct(entries)
if err != nil {
t.Fatalf("structpb.NewStruct() failed: %v", err)
}
return s
}
| apache-2.0 |
software-engineering-amsterdam/poly-ql | Nisha/src/expr/relational/GEq.java | 486 | package expr.relational;
import java.util.Map;
import ast.Visitor;
import ast.type.Booltype;
import ast.type.Type;
import expr.Expr;
import expr.Ident;
import expr.operation.Operation;
public class GEq extends Operation{
public GEq(Expr lhs, Expr rhs) {
super(lhs, rhs);
}
@Override
public <T> T accept(Visitor<T> visitor) {
return visitor.visit(this);
}
@Override
public Type typeof(Map<Ident,Type> symboles) {
return new Booltype();
}
} | apache-2.0 |
sapbuild/node-sap-promise | test/S1-PromiseState.spec.js | 13698 | 'use strict';
var assert = require('chai').assert;
var Promise = require('./promise');
// Helper functions
var helper = require('./helper/helper.js');
helper.assert = assert;
var testFailed = helper.testFailed;
var preventOnFulfilled = helper.preventOnFulfilled;
function PromiseWrapper() {
var self = this;
this.promise = new Promise(function (resolve, reject) {
self.resolve = resolve;
self.reject = reject;
});
}
// ------------------------------------------------------------
// 1 Promise States
// ------------------------------------------------------------
//
// 1.1 When pending, a promise:
// 1.1.1 may transition to either the fulfilled or rejected state.
// 1.2 When fulfilled, a promise:
// 1.2.1 must not transition to any other state.
// 1.2.2 must have a value, which must not change.
// 1.3 When rejected, a promise:
// 1.3.1 must not transition to any other state.
// 1.3.2 must have a reason, which must not change.
// 1.4 Attempting to fulfill or reject a resolved promise has no effect (ES6 addition to promise A+)
describe('1 Promise states', function () {
this.timeout(3000);
// 1.1
describe('1.1 When pending, a promise:', function () {
describe('1.1.1 may transition to either the fulfilled or rejected state.', function () {
it('Transition to fulfilled state', function (done) {
var p = new Promise(function (resolve) {
resolve(1);
});
p.then(function () {
assert.ok('true', 'Promise is fulfilled');
}).then(done, testFailed(done));
});
it('Transition to rejected state', function (done) {
var p = new Promise(function (resolve, reject) {
reject(new Error('rejected'));
});
p.then(preventOnFulfilled, function () {
assert.ok('true', 'Promise is rejected');
}).then(done, testFailed(done));
});
});
});
// 1.2
describe('1.2 When fulfilled, a promise:', function () {
describe('1.2.1 must not transition to any other state / 1.2.2 must have a value which must not change.', function () {
it('Fulfill a fulfilled promise', function (done) {
var wrapper, settled;
wrapper = new PromiseWrapper();
wrapper.promise.then(function (value) {
if (settled) {
assert.ok(false, 'onFulfilled must be called once');
}
settled = true;
assert.equal(value, 1, 'Promise is immutable once fulfilled');
}, function () {
assert.ok(false, 'onRejected must not be called');
});
wrapper.resolve(1);
wrapper.resolve(2);
setTimeout(done, 10);
});
it('Reject a fulfilled promise', function (done) {
var wrapper, settled;
wrapper = new PromiseWrapper();
wrapper.promise.then(function (value) {
if (settled) {
assert.ok(false, 'onFulfilled must be called once');
}
settled = true;
assert.equal(value, 1, 'Promise is immutable once fulfilled');
}, function () {
assert.ok(false, 'onRejected must not be called');
});
wrapper.resolve(1);
wrapper.reject(new Error('rejected'));
setTimeout(done, 10);
});
it('Throw in constructor after fulfilling the promise', function (done) {
var promise;
promise = new Promise(function (resolve) {
resolve(1);
throw new Error('rejected');
});
promise.then(function (value) {
assert.equal(value, 1, 'Promise is immutable once fulfilled');
}, function () {
assert.ok(false, 'onRejected must not be called');
}).then(done, testFailed(done));
});
});
});
// 1.3
describe('1.3 When rejected, a promise:', function () {
describe('1.3.1 must not transition to any other state / 1.3.2 must have a reason which must not change', function () {
it('Fulfill a rejected Promise', function (done) {
var wrapper, settled, error;
error = new Error('rejected');
wrapper = new PromiseWrapper();
wrapper.promise.then(preventOnFulfilled, function (reason) {
if (settled) {
assert.ok(false, 'onRejected must be called once');
}
settled = true;
assert.equal(reason, error, 'Promise is immutable once rejected');
});
wrapper.reject(error);
wrapper.resolve(1);
setTimeout(done, 10);
});
it('Reject a rejected Promise', function (done) {
var wrapper, settled, error;
error = new Error('rejected');
wrapper = new PromiseWrapper();
wrapper.promise.then(preventOnFulfilled, function (reason) {
if (settled) {
assert.ok(false, 'onRejected must be called once');
}
settled = true;
assert.equal(reason, error, 'Promise is immutable once rejected');
});
wrapper.reject(error);
wrapper.reject(new Error('Another reason'));
setTimeout(done, 10);
});
it('Throw in constructor after rejecting the promise', function (done) {
var promise, settled, error;
error = new Error('rejected');
promise = new Promise(function (resolve, reject) {
reject(error);
throw new Error('Another reason');
});
promise.then(preventOnFulfilled, function (reason) {
if (settled) {
assert.ok(false, 'onRejected must be called once');
}
settled = true;
assert.equal(reason, error, 'Promise is immutable once rejected');
}).then(done, testFailed(done));
});
});
});
// 1.4 Attempting to fulfill or reject a resolved promise has no effect
describe('1.4 Attempting to fulfill or reject a resolved promise has no effect ', function () {
it('Fulfill a promise resolved to an eventually fulfilled promise', function (done) {
var w1, w2, settled;
w1 = new PromiseWrapper();
w2 = new PromiseWrapper();
w2.promise.then(function (value) {
if (settled) {
assert.ok(false, 'onFulfilled must be called once');
}
settled = true;
assert.equal(value, 1, 'Promise is fulfilled with parent promise value');
}, function () {
assert.ok(false, 'onRejected must not be called');
});
w2.resolve(w1.promise);
w2.resolve(2);
setTimeout(function () {
assert.ok(!settled, 'Resolved promise must remain pending until parent promise is settled');
w1.resolve(1);
setTimeout(done, 10);
}, 10);
});
it('Reject a promise resolved to an eventually fulfilled promise', function (done) {
var w1, w2, settled;
w1 = new PromiseWrapper();
w2 = new PromiseWrapper();
w2.promise.then(function (value) {
if (settled) {
assert.ok(false, 'onFulfilled must be called once');
}
settled = true;
assert.equal(value, 1, 'Promise is fulfilled with upstream promise value');
}, function () {
assert.ok(false, 'onRejected must not be called');
});
w2.resolve(w1.promise);
w2.reject(new Error('rejected'));
setTimeout(function () {
assert.ok(!settled, 'Resolved promise must remain pending until upstream promise is settled');
w1.resolve(1);
setTimeout(done, 10);
}, 10);
});
it('Throw in promise constructor after resolving to an eventually fulfilled promise', function (done) {
var w1, p2, settled;
w1 = new PromiseWrapper();
p2 = new Promise(function (resolve) {
resolve(w1.promise);
throw new Error('rejected');
});
p2.then(function (value) {
if (settled) {
assert.ok(false, 'onFulfilled must be called once');
}
settled = true;
assert.equal(value, 1, 'Promise is fulfilled with upstream promise value');
}, function () {
assert.ok(false, 'onRejected must not be called');
});
setTimeout(function () {
assert.ok(!settled, 'Resolved promise must remain pending until parent promise is settled');
w1.resolve(1);
setTimeout(done, 10);
}, 10);
});
it('Fulfill a promise resolved to an eventually rejected promise', function (done) {
var w1, w2, settled, error;
error = new Error('rejected');
w1 = new PromiseWrapper();
w2 = new PromiseWrapper();
w2.promise.then(preventOnFulfilled, function (reason) {
if (settled) {
assert.ok(false, 'onRejected must be called once');
}
settled = true;
assert.equal(reason, error, 'Promise is rejected with parent promise reason');
});
w2.resolve(w1.promise);
w2.resolve(2);
setTimeout(function () {
assert.ok(!settled, 'Resolved promise must remain pending until parent promise is settled');
w1.reject(error);
setTimeout(done, 10);
}, 10);
});
it('Reject a promise resolved to an eventually rejected promise', function (done) {
var w1, w2, settled, error;
error = new Error('rejected');
w1 = new PromiseWrapper();
w2 = new PromiseWrapper();
w2.promise.then(preventOnFulfilled, function (reason) {
if (settled) {
assert.ok(false, 'onRejected must be called once');
}
settled = true;
assert.equal(reason, error, 'Promise is rejected with parent promise reason');
});
w2.resolve(w1.promise);
w2.reject(new Error('another reason'));
setTimeout(function () {
assert.ok(!settled, 'Resolved promise must remain pending until parent promise is settled');
w1.reject(error);
setTimeout(done, 10);
}, 10);
});
it('Throw in promise constructor after resolving to an eventually rejected promise', function (done) {
var w1, p2, settled, error;
error = new Error('rejected');
w1 = new PromiseWrapper();
p2 = new Promise(function (resolve) {
resolve(w1.promise);
throw new Error('another reason');
});
p2.then(preventOnFulfilled, function (reason) {
if (settled) {
assert.ok(false, 'onRejected must be called once');
}
settled = true;
assert.equal(reason, error, 'Promise is rejected with parent promise reason');
});
setTimeout(function () {
assert.ok(!settled, 'Resolved promise must remain pending until parent promise is settled');
w1.reject(error);
setTimeout(done, 10);
}, 10);
});
});
// 1.5 promise implementation gives access to status and result
describe('1.5 promise implementation gives access to status and result', function () {
if ('id' in Promise.prototype) {
it('Promise id', function () {
var p = new Promise(function () {});
var q = new Promise(function () {});
assert.notEqual(p.id, q.id);
});
}
if ('result' in Promise.prototype) {
it('Promise result', function () {
var p = new Promise(function () {});
var error = new Error('rejected');
assert.isUndefined(p.result);
assert.equal(Promise.resolve(1).result, 1);
assert.equal(Promise.reject(error).result, error);
});
}
if ('status' in Promise.prototype) {
it('Promise status', function () {
var p = new Promise(function () {});
assert.equal(p.status, Promise.PENDING);
assert.equal(Promise.resolve(1).status, Promise.FULFILLED);
assert.equal(Promise.reject(new Error('rejected')).status, Promise.REJECTED);
});
}
});
});
| apache-2.0 |
projectkudu/AzureFunctions | client-react/src/pages/app/functions/function/monitor/tabs/entities/FunctionEntitiesDataLoader.tsx | 2483 | import React, { useState, useEffect } from 'react';
import FunctionEntitiesData from './FunctionEntities.data';
import FunctionEntities from './FunctionEntities';
import { AppInsightsEntityTrace, AppInsightsEntityTraceDetail } from '../../../../../../../models/app-insights';
interface FunctionEntitiesDataLoaderProps {
resourceId: string;
appInsightsAppId: string;
appInsightsResourceId: string;
appInsightsToken?: string;
}
const entitiesData = new FunctionEntitiesData();
export const FunctionEntitiesContext = React.createContext(entitiesData);
const FunctionEntitiesDataLoader: React.FC<FunctionEntitiesDataLoaderProps> = props => {
const { resourceId, appInsightsToken, appInsightsAppId, appInsightsResourceId } = props;
const [currentTrace, setCurrentTrace] = useState<AppInsightsEntityTrace | undefined>(undefined);
const [entityTraces, setEntityTraces] = useState<AppInsightsEntityTrace[] | undefined>(undefined);
const [entityDetails, setEntityDetails] = useState<AppInsightsEntityTraceDetail[] | undefined>(undefined);
const fetchEntityTraces = async () => {
if (appInsightsToken) {
const entityTracesResponse = await entitiesData.getEntityTraces(appInsightsAppId, appInsightsToken, resourceId);
setEntityTraces(entityTracesResponse);
}
};
const refreshEntities = () => {
setEntityTraces(undefined);
fetchEntityTraces();
};
const fetchEntityTraceDetails = async () => {
if (appInsightsToken && currentTrace) {
const entitiesDetailsResponse = await entitiesData.getEntityDetails(
appInsightsAppId,
appInsightsToken,
currentTrace.DurableFunctionsInstanceId
);
setEntityDetails(entitiesDetailsResponse);
}
};
useEffect(() => {
fetchEntityTraceDetails();
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [currentTrace]);
useEffect(() => {
fetchEntityTraces();
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [appInsightsToken]);
return (
<FunctionEntitiesContext.Provider value={entitiesData}>
<FunctionEntities
functionResourceId={resourceId}
appInsightsResourceId={appInsightsResourceId}
setCurrentTrace={setCurrentTrace}
currentTrace={currentTrace}
entityTraces={entityTraces}
refreshEntities={refreshEntities}
entityDetails={entityDetails}
/>
</FunctionEntitiesContext.Provider>
);
};
export default FunctionEntitiesDataLoader;
| apache-2.0 |
dghubble/matchbox | vendor/github.com/coreos/container-linux-config-transpiler/config/types/systemd.go | 1893 | // Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
ignTypes "github.com/coreos/ignition/config/v2_1/types"
"github.com/coreos/ignition/config/validate/astnode"
"github.com/coreos/ignition/config/validate/report"
)
type Systemd struct {
Units []SystemdUnit `yaml:"units"`
}
type SystemdUnit struct {
Name string `yaml:"name"`
Enable bool `yaml:"enable"`
Enabled *bool `yaml:"enabled"`
Mask bool `yaml:"mask"`
Contents string `yaml:"contents"`
Dropins []SystemdUnitDropIn `yaml:"dropins"`
}
type SystemdUnitDropIn struct {
Name string `yaml:"name"`
Contents string `yaml:"contents"`
}
func init() {
register2_0(func(in Config, ast astnode.AstNode, out ignTypes.Config, platform string) (ignTypes.Config, report.Report, astnode.AstNode) {
for _, unit := range in.Systemd.Units {
newUnit := ignTypes.Unit{
Name: unit.Name,
Enable: unit.Enable,
Enabled: unit.Enabled,
Mask: unit.Mask,
Contents: unit.Contents,
}
for _, dropIn := range unit.Dropins {
newUnit.Dropins = append(newUnit.Dropins, ignTypes.Dropin{
Name: dropIn.Name,
Contents: dropIn.Contents,
})
}
out.Systemd.Units = append(out.Systemd.Units, newUnit)
}
return out, report.Report{}, ast
})
}
| apache-2.0 |
style95/openwhisk | common/scala/src/main/scala/org/apache/openwhisk/core/yarn/YARNContainerInfoActor.scala | 5092 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.yarn
import akka.actor.{Actor, ActorRef, ActorSystem}
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.core.containerpool.{ContainerAddress, ContainerId}
import org.apache.openwhisk.core.entity.ExecManifest.ImageName
import scala.collection.immutable.HashMap
import scala.concurrent.ExecutionContext
case class GetContainerInfo(yarnComponentActorRef: ActorRef)
//This actor is separate from the YARNComponentActor so that container create commands can be issued in parallel
class YARNContainerInfoActor(actorSystem: ActorSystem,
logging: Logging,
yarnConfig: YARNConfig,
serviceName: String,
imageName: ImageName)
extends Actor {
implicit val as: ActorSystem = actorSystem
implicit val ec: ExecutionContext = actorSystem.dispatcher
val containerStartTimeoutMS = 60000
val retryWaitMS = 1000
//Map with the definition of all active containers
var containerDefMap: Map[String, ContainerDefinition] = new HashMap[String, ContainerDefinition]
//Map that keeps track of which containers have been returned to the main invoker for use
val containersAllocated = new scala.collection.mutable.HashMap[String, Boolean]
def receive: PartialFunction[Any, Unit] = {
case GetContainerInfo(yarnComponentActorRef) =>
//Check if there are any left over containers from the last check
var firstNewContainerName = containersAllocated.find { case (k, v) => !v }
//If no containers are ready, wait for one to come up (up to containerStartTimeoutMS milliseconds)
var retryCount = 0
val maxRetryCount = containerStartTimeoutMS / retryWaitMS
while (firstNewContainerName.isEmpty && retryCount < maxRetryCount) {
//Get updated service def
val serviceDef =
YARNRESTUtil.downloadServiceDefinition(yarnConfig.authType, serviceName, yarnConfig.masterUrl)(logging)
//Update container list with new container details
if (serviceDef == null) {
retryCount += 1
Thread.sleep(retryWaitMS)
logging.info(this, s"Waiting for ${imageName.name} YARN container ($retryCount/$maxRetryCount)")
} else {
containerDefMap = serviceDef.components
.filter(c => c.name.equals(imageName.name))
.flatMap(c => c.containers.getOrElse(List[ContainerDefinition]()))
.filter(containerDef => containerDef.state.equals("READY"))
.map(containerDef => (containerDef.component_instance_name, containerDef))
.toMap
//Filter map to only contain active containers
containersAllocated.retain((k, v) => containerDefMap.contains(k))
for (containerDef <- containerDefMap) {
if (!containersAllocated.contains(containerDef._1))
containersAllocated.put(containerDef._1, false)
}
firstNewContainerName = containersAllocated.find { case (k, v) => !v }
//keep waiting
if (firstNewContainerName.isEmpty) {
retryCount += 1
Thread.sleep(retryWaitMS)
logging.info(this, s"Waiting for ${imageName.name} YARN container ($retryCount/$maxRetryCount)")
}
}
}
if (firstNewContainerName.isEmpty) {
throw new Exception(s"After ${containerStartTimeoutMS}ms ${imageName.name} YARN container was not available")
}
//Return container
val newContainerDef = containerDefMap(firstNewContainerName.get._1)
containersAllocated(firstNewContainerName.get._1) = true
val containerAddress = ContainerAddress(newContainerDef.ip.getOrElse("127.0.0.1")) //default port is 8080
val containerId = ContainerId(newContainerDef.id)
logging.info(this, s"New ${imageName.name} YARN Container: ${newContainerDef.id}, $containerAddress")
sender ! new YARNTask(
containerId,
containerAddress,
ec,
logging,
as,
newContainerDef.component_instance_name,
imageName,
yarnConfig,
yarnComponentActorRef)
case input =>
throw new IllegalArgumentException("Unknown input: " + input)
sender ! None
}
}
| apache-2.0 |
EvilMcJerkface/tidb | executor/partition_table_test.go | 9843 | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package executor_test
import (
. "github.com/pingcap/check"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/util/testkit"
)
func (s *partitionTableSuite) TestFourReader(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists pt")
tk.MustExec(`create table pt (id int, c int, key i_id(id), key i_c(c)) partition by range (c) (
partition p0 values less than (4),
partition p1 values less than (7),
partition p2 values less than (10))`)
tk.MustExec("insert into pt values (0, 0), (2, 2), (4, 4), (6, 6), (7, 7), (9, 9), (null, null)")
// Table reader
tk.MustQuery("select * from pt").Sort().Check(testkit.Rows("0 0", "2 2", "4 4", "6 6", "7 7", "9 9", "<nil> <nil>"))
// Table reader: table dual
tk.MustQuery("select * from pt where c > 10").Check(testkit.Rows())
// Table reader: one partition
tk.MustQuery("select * from pt where c > 8").Check(testkit.Rows("9 9"))
// Table reader: more than one partition
tk.MustQuery("select * from pt where c < 2 or c >= 9").Check(testkit.Rows("0 0", "9 9"))
// Index reader
tk.MustQuery("select c from pt").Sort().Check(testkit.Rows("0", "2", "4", "6", "7", "9", "<nil>"))
tk.MustQuery("select c from pt where c > 10").Check(testkit.Rows())
tk.MustQuery("select c from pt where c > 8").Check(testkit.Rows("9"))
tk.MustQuery("select c from pt where c < 2 or c >= 9").Check(testkit.Rows("0", "9"))
// Index lookup
tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt").Sort().Check(testkit.Rows("0 0", "2 2", "4 4", "6 6", "7 7", "9 9", "<nil> <nil>"))
tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 4 and c > 10").Check(testkit.Rows())
tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c > 8").Check(testkit.Rows("9 9"))
tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c < 2 or c >= 9").Check(testkit.Rows("0 0", "9 9"))
// Index Merge
tk.MustExec("set @@tidb_enable_index_merge = 1")
tk.MustQuery("select /*+ use_index(i_c, i_id) */ * from pt where id = 4 or c < 7").Check(testkit.Rows("0 0", "2 2", "4 4", "6 6"))
}
func (s *partitionTableSuite) TestPartitionIndexJoin(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists p, t")
tk.MustExec(`create table p (id int, c int, key i_id(id), key i_c(c)) partition by range (c) (
partition p0 values less than (4),
partition p1 values less than (7),
partition p2 values less than (10))`)
tk.MustExec("create table t (id int)")
tk.MustExec("insert into p values (3,3), (4,4), (6,6), (9,9)")
tk.MustExec("insert into t values (4), (9)")
// Build indexLookUp in index join
tk.MustQuery("select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id").Sort().Check(testkit.Rows("4 4 4", "9 9 9"))
// Build index reader in index join
tk.MustQuery("select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id").Check(testkit.Rows("4", "9"))
}
func (s *partitionTableSuite) TestPartitionUnionScanIndexJoin(c *C) {
// For issue https://github.com/pingcap/tidb/issues/19152
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int)) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue)")
tk.MustExec("create table t2 (c_int int, c_str varchar(40), primary key (c_int, c_str)) partition by hash (c_int) partitions 4")
tk.MustExec("insert into t1 values (10, 'interesting neumann')")
tk.MustExec("insert into t2 select * from t1")
tk.MustExec("begin")
tk.MustExec("insert into t2 values (11, 'hopeful hoover');")
tk.MustQuery("select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11)").Check(testkit.Rows("10 interesting neumann 10 interesting neumann"))
tk.MustQuery("select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11)").Check(testkit.Rows("10 interesting neumann 10 interesting neumann"))
tk.MustExec("commit")
}
func (s *partitionTableSuite) TestPartitionReaderUnderApply(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("use test")
// For issue 19458.
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(c_int int)")
tk.MustExec("insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9)")
tk.MustExec("DROP TABLE IF EXISTS `t1`")
tk.MustExec(`CREATE TABLE t1 (
c_int int NOT NULL,
c_str varchar(40) NOT NULL,
c_datetime datetime NOT NULL,
c_timestamp timestamp NULL DEFAULT NULL,
c_double double DEFAULT NULL,
c_decimal decimal(12,6) DEFAULT NULL,
PRIMARY KEY (c_int,c_str,c_datetime)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci
PARTITION BY RANGE (c_int)
(PARTITION p0 VALUES LESS THAN (2) ENGINE = InnoDB,
PARTITION p1 VALUES LESS THAN (4) ENGINE = InnoDB,
PARTITION p2 VALUES LESS THAN (6) ENGINE = InnoDB,
PARTITION p3 VALUES LESS THAN (8) ENGINE = InnoDB,
PARTITION p4 VALUES LESS THAN (10) ENGINE = InnoDB,
PARTITION p5 VALUES LESS THAN (20) ENGINE = InnoDB,
PARTITION p6 VALUES LESS THAN (50) ENGINE = InnoDB,
PARTITION p7 VALUES LESS THAN (1000000000) ENGINE = InnoDB)`)
tk.MustExec("INSERT INTO `t1` VALUES (19,'nifty feistel','2020-02-28 04:01:28','2020-02-04 06:11:57',32.430079,1.284000),(20,'objective snyder','2020-04-15 17:55:04','2020-05-30 22:04:13',37.690874,9.372000)")
tk.MustExec("begin")
tk.MustExec("insert into t1 values (22, 'wizardly saha', '2020-05-03 16:35:22', '2020-05-03 02:18:42', 96.534810, 0.088)")
tk.MustQuery("select c_int from t where (select min(t1.c_int) from t1 where t1.c_int > t.c_int) > (select count(*) from t1 where t1.c_int > t.c_int) order by c_int").Check(testkit.Rows(
"1", "2", "3", "4", "5", "6", "7", "8", "9"))
tk.MustExec("rollback")
// For issue 19450.
tk.MustExec("drop table if exists t1, t2")
tk.MustExec("create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int))")
tk.MustExec("create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)) partition by hash (c_int) partitions 4")
tk.MustExec("insert into t1 values (1, 'romantic robinson', 4.436), (2, 'stoic chaplygin', 9.826), (3, 'vibrant shamir', 6.300), (4, 'hungry wilson', 4.900), (5, 'naughty swartz', 9.524)")
tk.MustExec("insert into t2 select * from t1")
tk.MustQuery("select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str)").Check(testkit.Rows(
"1 romantic robinson 4.436000",
"2 stoic chaplygin 9.826000",
"3 vibrant shamir 6.300000",
"4 hungry wilson 4.900000",
"5 naughty swartz 9.524000"))
// For issue 19450 release-4.0
tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.StaticOnly) + `'`)
tk.MustQuery("select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str)").Check(testkit.Rows(
"1 romantic robinson 4.436000",
"2 stoic chaplygin 9.826000",
"3 vibrant shamir 6.300000",
"4 hungry wilson 4.900000",
"5 naughty swartz 9.524000"))
}
func (s *partitionTableSuite) TestImproveCoverage(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("use test")
tk.MustExec(`create table coverage_rr (
pk1 varchar(35) NOT NULL,
pk2 int NOT NULL,
c int,
PRIMARY KEY (pk1,pk2)) partition by hash(pk2) partitions 4;`)
tk.MustExec("create table coverage_dt (pk1 varchar(35), pk2 int)")
tk.MustExec("insert into coverage_rr values ('ios', 3, 2),('android', 4, 7),('linux',5,1)")
tk.MustExec("insert into coverage_dt values ('apple',3),('ios',3),('linux',5)")
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic-only'")
tk.MustQuery("select /*+ INL_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2);").Sort().Check(testkit.Rows("ios 3 ios 3 2", "linux 5 linux 5 1"))
tk.MustQuery("select /*+ INL_MERGE_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2);").Sort().Check(testkit.Rows("ios 3 ios 3 2", "linux 5 linux 5 1"))
}
func (s *globalIndexSuite) TestGlobalIndexScan(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists p")
tk.MustExec(`create table p (id int, c int) partition by range (c) (
partition p0 values less than (4),
partition p1 values less than (7),
partition p2 values less than (10))`)
tk.MustExec("alter table p add unique idx(id)")
tk.MustExec("insert into p values (1,3), (3,4), (5,6), (7,9)")
tk.MustQuery("select id from p use index (idx)").Check(testkit.Rows("1", "3", "5", "7"))
}
func (s *globalIndexSuite) TestGlobalIndexDoubleRead(c *C) {
tk := testkit.NewTestKitWithInit(c, s.store)
tk.MustExec("drop table if exists p")
tk.MustExec(`create table p (id int, c int) partition by range (c) (
partition p0 values less than (4),
partition p1 values less than (7),
partition p2 values less than (10))`)
tk.MustExec("alter table p add unique idx(id)")
tk.MustExec("insert into p values (1,3), (3,4), (5,6), (7,9)")
tk.MustQuery("select * from p use index (idx)").Check(testkit.Rows("1 3", "3 4", "5 6", "7 9"))
}
| apache-2.0 |
trivium-io/trivium | src/io/trivium/dep/com/google/common/hash/MessageDigestHashFunction.java | 4681 | /*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.trivium.dep.com.google.common.hash;
import static io.trivium.dep.com.google.common.base.Preconditions.checkArgument;
import static io.trivium.dep.com.google.common.base.Preconditions.checkNotNull;
import static io.trivium.dep.com.google.common.base.Preconditions.checkState;
import java.io.Serializable;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
/**
* {@link HashFunction} adapter for {@link MessageDigest} instances.
*
* @author Kevin Bourrillion
* @author Dimitris Andreou
*/
final class MessageDigestHashFunction extends AbstractStreamingHashFunction
implements Serializable {
private final MessageDigest prototype;
private final int bytes;
private final boolean supportsClone;
private final String toString;
MessageDigestHashFunction(String algorithmName, String toString) {
this.prototype = getMessageDigest(algorithmName);
this.bytes = prototype.getDigestLength();
this.toString = checkNotNull(toString);
this.supportsClone = supportsClone();
}
MessageDigestHashFunction(String algorithmName, int bytes, String toString) {
this.toString = checkNotNull(toString);
this.prototype = getMessageDigest(algorithmName);
int maxLength = prototype.getDigestLength();
checkArgument(
bytes >= 4 && bytes <= maxLength, "bytes (%s) must be >= 4 and < %s", bytes, maxLength);
this.bytes = bytes;
this.supportsClone = supportsClone();
}
private boolean supportsClone() {
try {
prototype.clone();
return true;
} catch (CloneNotSupportedException e) {
return false;
}
}
@Override
public int bits() {
return bytes * Byte.SIZE;
}
@Override
public String toString() {
return toString;
}
private static MessageDigest getMessageDigest(String algorithmName) {
try {
return MessageDigest.getInstance(algorithmName);
} catch (NoSuchAlgorithmException e) {
throw new AssertionError(e);
}
}
@Override
public Hasher newHasher() {
if (supportsClone) {
try {
return new MessageDigestHasher((MessageDigest) prototype.clone(), bytes);
} catch (CloneNotSupportedException e) {
// falls through
}
}
return new MessageDigestHasher(getMessageDigest(prototype.getAlgorithm()), bytes);
}
private static final class SerializedForm implements Serializable {
private final String algorithmName;
private final int bytes;
private final String toString;
private SerializedForm(String algorithmName, int bytes, String toString) {
this.algorithmName = algorithmName;
this.bytes = bytes;
this.toString = toString;
}
private Object readResolve() {
return new MessageDigestHashFunction(algorithmName, bytes, toString);
}
private static final long serialVersionUID = 0;
}
Object writeReplace() {
return new SerializedForm(prototype.getAlgorithm(), bytes, toString);
}
/**
* Hasher that updates a message digest.
*/
private static final class MessageDigestHasher extends AbstractByteHasher {
private final MessageDigest digest;
private final int bytes;
private boolean done;
private MessageDigestHasher(MessageDigest digest, int bytes) {
this.digest = digest;
this.bytes = bytes;
}
@Override
protected void update(byte b) {
checkNotDone();
digest.update(b);
}
@Override
protected void update(byte[] b) {
checkNotDone();
digest.update(b);
}
@Override
protected void update(byte[] b, int off, int len) {
checkNotDone();
digest.update(b, off, len);
}
private void checkNotDone() {
checkState(!done, "Cannot re-use a Hasher after calling hash() on it");
}
@Override
public HashCode hash() {
checkNotDone();
done = true;
return (bytes == digest.getDigestLength())
? HashCode.fromBytesNoCopy(digest.digest())
: HashCode.fromBytesNoCopy(Arrays.copyOf(digest.digest(), bytes));
}
}
}
| apache-2.0 |
awslabs/aws-sdk-java-resources | aws-resources-glacier/src/main/java/com/amazonaws/resources/glacier/Glacier.java | 2991 | /*
* Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.resources.glacier;
import com.amazonaws.resources.ResultCapture;
import com.amazonaws.resources.Service;
import com.amazonaws.resources.internal.V1ServiceInterface;
import com.amazonaws.services.glacier.AmazonGlacier;
import com.amazonaws.services.glacier.model.CreateVaultRequest;
import com.amazonaws.services.glacier.model.CreateVaultResult;
import com.amazonaws.services.glacier.model.ListVaultsRequest;
/**
* The <code>Glacier</code> service.
* This is the entry point to interact with the following service resources:<ul>
* <li>Job</li>
* <li>Archive</li>
* <li>Vault</li>
* <li>Account</li>
* <li>MultipartUpload</li>
* <li>Notification</li>
* </ul>
*/
@V1ServiceInterface(model="model.json", impl=
"com.amazonaws.resources.glacier.internal.GlacierImpl")
public interface Glacier extends Service<AmazonGlacier> {
/**
* Gets an instance of {@code Account} resource by its identifier(s).
*/
Account getAccount(String id);
/**
* Retrieves the Vaults collection referenced by this resource.
*/
VaultCollection getVaults();
/**
* Retrieves the Vaults collection referenced by this resource.
*/
VaultCollection getVaults(ListVaultsRequest request);
/**
* Performs the <code>CreateVault</code> action.
*
* <p>
*
* @return The <code>Vault</code> resource object associated with the result
* of this action.
* @see CreateVaultRequest
*/
Vault createVault(CreateVaultRequest request);
/**
* Performs the <code>CreateVault</code> action and use a ResultCapture to
* retrieve the low-level client response.
*
* <p>
*
* @return The <code>Vault</code> resource object associated with the result
* of this action.
* @see CreateVaultRequest
*/
Vault createVault(CreateVaultRequest request,
ResultCapture<CreateVaultResult> extractor);
/**
* The convenient method form for the <code>CreateVault</code> action.
*
* @see #createVault(CreateVaultRequest)
*/
Vault createVault(String vaultName);
/**
* The convenient method form for the <code>CreateVault</code> action.
*
* @see #createVault(CreateVaultRequest, ResultCapture)
*/
Vault createVault(String vaultName, ResultCapture<CreateVaultResult>
extractor);
}
| apache-2.0 |
shahmishal/swift | utils/swift_build_support/swift_build_support/products/sourcekitlsp.py | 1276 | # swift_build_support/products/sourcekitlsp.py -------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
from . import indexstoredb
from . import product
class SourceKitLSP(product.Product):
@classmethod
def product_source_name(cls):
return "sourcekit-lsp"
@classmethod
def is_build_script_impl_product(cls):
return False
def build(self, host_target):
indexstoredb.run_build_script_helper(
'build', host_target, self, self.args)
def test(self, host_target):
if self.args.test and self.args.test_sourcekitlsp:
indexstoredb.run_build_script_helper(
'test', host_target, self, self.args)
def install(self, host_target):
if self.args.install_sourcekitlsp:
indexstoredb.run_build_script_helper(
'install', host_target, self, self.args)
| apache-2.0 |
jsonking/mongo-java-driver | bson/src/main/org/bson/json/ExtendedJsonDecimal128Converter.java | 997 | /*
* Copyright 2017 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.bson.json;
import org.bson.types.Decimal128;
class ExtendedJsonDecimal128Converter implements Converter<Decimal128> {
@Override
public void convert(final Decimal128 value, final StrictJsonWriter writer) {
writer.writeStartObject();
writer.writeName("$numberDecimal");
writer.writeString(value.toString());
writer.writeEndObject();
}
}
| apache-2.0 |
jhalterman/modelmapper | core/src/test/java/org/modelmapper/internal/converter/MapConverterTest.java | 5227 | package org.modelmapper.internal.converter;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
import java.lang.reflect.Type;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import org.modelmapper.ModelMapper;
import org.modelmapper.spi.ConditionalConverter.MatchResult;
import org.testng.annotations.Test;
/**
* @author Jonathan Halterman
*/
@Test
public class MapConverterTest extends AbstractConverterTest {
public MapConverterTest() {
super(new MapConverter());
}
@SuppressWarnings("unchecked")
static class S {
Map<String, Integer> a = new HashMap<String, Integer>();
Map<Integer, String> b = new HashMap<Integer, String>();
@SuppressWarnings("rawtypes")
Map rawmap = new HashMap();
{
a.put("1", 1);
a.put("2", 2);
a.put("3", 3);
b.put(4, "4");
b.put(5, "5");
b.put(6, "6");
rawmap.put(7, "7");
rawmap.put(8, "8");
rawmap.put(9, "9");
}
}
static class D {
Map<String, String> a;
Map<Integer, Integer> b;
@SuppressWarnings("rawtypes")
Map rawmap;
}
@SuppressWarnings("unchecked")
public void shouldConvertElements() {
Map<String, Integer> map = new HashMap<String, Integer>();
map.put("1", 1);
map.put("2", 2);
map.put("3", 3);
Map<String, Integer> dest = modelMapper.map(map, Map.class);
assertEquals(dest, map);
}
public void shouldConvertElementsFromModel() {
Map<String, String> expectedA = new HashMap<String, String>();
expectedA.put("1", "1");
expectedA.put("2", "2");
expectedA.put("3", "3");
Map<Integer, Integer> expectedB = new HashMap<Integer, Integer>();
expectedB.put(4, 4);
expectedB.put(5, 5);
expectedB.put(6, 6);
Map<Object, Object> expectedRaw = new HashMap<Object, Object>();
expectedRaw.put(7, "7");
expectedRaw.put(8, "8");
expectedRaw.put(9, "9");
D d = modelMapper.map(new S(), D.class);
assertEquals(d.a, expectedA);
assertEquals(d.b, expectedB);
assertEquals(d.rawmap, expectedRaw);
}
@SuppressWarnings("unchecked")
static class SrcSortedMap {
SortedMap<String, Integer> a = new TreeMap<String, Integer>();
SortedMap<Integer, String> b = new TreeMap<Integer, String>();
@SuppressWarnings("rawtypes")
SortedMap rawmap = new TreeMap();
{
a.put("3", 3);
a.put("1", 1);
a.put("2", 2);
b.put(6, "6");
b.put(4, "4");
b.put(5, "5");
rawmap.put(9, "9");
rawmap.put(7, "7");
rawmap.put(8, "8");
}
}
static class DestSortedMap {
SortedMap<String, String> a;
SortedMap<Integer, Integer> b;
@SuppressWarnings("rawtypes")
SortedMap rawmap;
}
@SuppressWarnings("unchecked")
public void shouldConvertElementsFromSortedMap() {
SortedMap<String, Integer> map = new TreeMap<String, Integer>();
map.put("3", 3);
map.put("1", 1);
map.put("2", 2);
Map<String, Integer> dest = modelMapper.map(map, SortedMap.class);
assertEquals(dest, map);
assertTrue(dest instanceof SortedMap);
}
public void shouldConvertElementsFromSortedMapModel() {
SortedMap<String, String> expectedA = new TreeMap<String, String>();
expectedA.put("1", "1");
expectedA.put("2", "2");
expectedA.put("3", "3");
SortedMap<Integer, Integer> expectedB = new TreeMap<Integer, Integer>();
expectedB.put(4, 4);
expectedB.put(5, 5);
expectedB.put(6, 6);
SortedMap<Object, Object> expectedRaw = new TreeMap<Object, Object>();
expectedRaw.put(7, "7");
expectedRaw.put(8, "8");
expectedRaw.put(9, "9");
DestSortedMap d = modelMapper.map(new SrcSortedMap(), DestSortedMap.class);
assertEquals(d.a, expectedA);
assertEquals(d.b, expectedB);
assertEquals(d.rawmap, expectedRaw);
assertTrue(d.a instanceof SortedMap);
assertTrue(d.b instanceof SortedMap);
assertTrue(d.rawmap instanceof SortedMap);
}
public void shouldConvertWithGenericTypes() {
Map<Integer, BigDecimal> numbers = Collections.singletonMap(1, BigDecimal.valueOf(2));
Type mapType = new org.modelmapper.TypeToken<Map<Long, String>>() {}.getType();
Map<Long, String> mixed = new ModelMapper().map(numbers, mapType);
assertFalse(mixed.isEmpty());
assertTrue(mixed.size() == 1);
assertTrue(mixed.keySet().iterator().next() instanceof Long);
assertTrue(mixed.keySet().iterator().next() == 1l);
assertTrue(mixed.values().iterator().next() instanceof String);
assertTrue(mixed.values().iterator().next().equals("2"));
}
public void testMatches() {
assertEquals(converter.match(HashMap.class, TreeMap.class), MatchResult.FULL);
assertEquals(converter.match(Map.class, HashMap.class), MatchResult.FULL);
// Negative
assertEquals(converter.match(Map.class, ArrayList.class), MatchResult.NONE);
}
}
| apache-2.0 |
tviho/spartancoinj | core/src/main/java/com/google/spartancoin/wallet/CoinSelector.java | 653 | package com.google.spartancoin.wallet;
import com.google.spartancoin.core.TransactionOutput;
import java.math.BigInteger;
import java.util.LinkedList;
/**
* A CoinSelector is responsible for picking some outputs to spend, from the list of all spendable outputs. It
* allows you to customize the policies for creation of transactions to suit your needs. The select operation
* may return a {@link CoinSelection} that has a valueGathered lower than the requested target, if there's not
* enough money in the wallet.
*/
public interface CoinSelector {
public CoinSelection select(BigInteger target, LinkedList<TransactionOutput> candidates);
}
| apache-2.0 |
cuckoohello/mCotton | app/lib/collections/5_datamessages.js | 1832 | /**
* Created by chenhao on 15/4/16.
*/
Collections.DataMessages = new Mongo.Collection('datamessages');
Schemas.DataMessage = new SimpleSchema({
owner_user_id: {
type: String,
label: "meteor.User _id",
optional: false,
},
device_id: {
type: String, label: "device_id",
optional: false,
},
data_message: {
type: String, label: "data_message",
optional: false,
},
sid: {
type: String, label: "sid",
optional: true,
},
data_submit_time: {
type: Date, label: "data_submit_time",
optional: false,
},
process_status: {
type: String, label: "process_status",
optional: true,
},
});
Collections.DataMessages.attachSchema(Schemas.DataMessage);
Meteor.methods({
dataMessageInsert: function (_attributes) {
// console.log("dataMessageInsert", _attributes);
var device_id = _attributes.device_id;
var device = Collections.Devices.findOne({_id: device_id});
if (!device) {
return {
notExistDevice: true,
_id: device_id
};
}
delete _attributes.device_id;
var sid = _attributes.sid;
delete _attributes.sid;
var _data = JSON.stringify(_attributes);
// console.log("_data", _data);
var entity = {
device_id: device_id,
data_message: _data,
data_submit_time: new Date(),
owner_user_id: device.owner_user_id,
process_status: "new"
};
if(sid){
_.extend(entity, {
sid: sid,
});
}
var entityId = Collections.DataMessages.insert(entity);
return {
_id: entityId
};
}
});
| apache-2.0 |
tempbottle/ginsing | src/main.cc | 4809 | /*
Copyright (c) 2013
Author: Jeff Weisberg <jaw @ solvemedia.com>
Created: 2013-Jan-04 10:49 (EST)
Function: main
*/
#include "defs.h"
#include "diag.h"
#include "daemon.h"
#include "config.h"
#include "hrtime.h"
#include "thread.h"
#include "runmode.h"
#include "zdb.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sys/stat.h>
int flag_foreground = 1;
int flag_debugall = 0;
int force_reload = 0;
char *filename_config = 0;
RunMode runmode;
static void* runmode_manage(void*);
static void *reload_config(void*);
void network_init(void);
void network_manage(void);
void dns_init(void);
void console_init(void);
void mmdb_init(void);
void zdb_init(void);
void mon_init(void);
void
usage(void){
fprintf(stderr, MYNAME " [options]\n"
" -f foreground\n"
" -d enable debugging\n"
" -C check config + exit\n"
" -c config file\n");
exit(0);
}
int
main(int argc, char **argv){
extern char *optarg;
extern int optind;
int prev_status = 0;
int save_argc = argc;
char **save_argv = argv;
int checkonly = 0;
int tmp_foreground = 0;
int c;
srandom( time(0) );
// parse command line
while( (c = getopt(argc, argv, "c:Cdfh")) != -1 ){
switch(c){
case 'f':
tmp_foreground = 1;
break;
case 'd':
flag_debugall = 1;
debug_enabled = 1;
break;
case 'c':
filename_config = optarg;
break;
case 'h':
usage();
break;
case 'C':
checkonly = 1;
tmp_foreground = 1;
break;
}
}
argc -= optind;
argv += optind;
if( !filename_config ){
fprintf(stderr, "no config specified!\ntry -c config\n");
exit(-1);
}
// init logging
diag_init();
// daemonize
if( tmp_foreground){
daemon_siginit();
}else{
prev_status = daemonize(10, MYNAME "d", save_argc, save_argv);
}
flag_foreground = tmp_foreground;
VERBOSE( "starting." );
// read config files + databases
if( read_config(filename_config) ){
FATAL("cannot read config file");
}
mmdb_init();
zdb_init();
if( checkonly ) exit(0);
if( prev_status && prev_status != (EXIT_NORMAL_RESTART<<8) ){
// previous restarted due to an error - send an email
PROBLEM("previous %sd restarted due to an error (%d)", MYNAME, prev_status);
}
start_thread( runmode_manage, 0 );
start_thread( reload_config, (void*)filename_config );
// init subsystems
mon_init();
console_init();
dns_init();
network_init();
VERBOSE("running.");
// manage threads
// this does not return until we shutdown
network_manage();
VERBOSE("exiting");
exit(runmode.final_exit_value());
}
static void *
reload_config(void *file){
struct stat sb;
time_t lastmod = lr_now();
int lastino = 0;
while(1){
sleep(15);
// watch config file
int i = stat((char*)file, &sb);
if( i == -1 ){
VERBOSE("cannot stat config file '%s': %s", file, strerror(errno));
continue;
}
if( !lastino ) lastino = sb.st_ino;
if( sb.st_mtime > lastmod || force_reload || sb.st_ino != lastino ){
lastmod = sb.st_mtime;
lastino = sb.st_ino;
force_reload = 0;
VERBOSE("config changed, reloading");
read_config( (char*)file );
load_zdb();
}
}
}
// normal exit:
// network_manage finishes + returns to main
// main exits
///
// normal winddown:
// janitor causes runmode transition windown=>exiting
// runmode_manage handles shutting down in the cases
// where the normal processes are hung
// !!! - this thread must not do anything which could ever hang
// - no locks, no mallocs, no i/o, no std::string, std::...
// - no debug, no verbose, ...
#define TIME_LIMIT 60
#define WIND_LIMIT 300
static void*
runmode_manage(void*){
time_t texit=0, twind=0, terrd=0;
time_t nowt;
while(1){
nowt = lr_now();
switch(runmode.mode()){
case RUN_MODE_EXITING:
if( !texit ) texit = nowt + TIME_LIMIT;
if( texit < nowt ) _exit(runmode.final_exit_value());
break;
case RUN_MODE_WINDDOWN:
if( !twind ) twind = nowt + WIND_LIMIT;
if( twind < nowt ) _exit(runmode.final_exit_value());
break;
case RUN_MODE_ERRORED:
if( !terrd ) terrd = nowt + TIME_LIMIT;
if( terrd < nowt ) _exit(EXIT_ERROR_RESTART);
break;
default:
twind = texit = terrd = 0; // shutdown canceled, reset
}
sleep(5);
}
return 0;
}
| apache-2.0 |
realsystem/CloudFerry | cloudferrylib/os/image/glance_image.py | 16344 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import copy
import datetime
import json
import re
import time
from fabric.api import run
from fabric.api import settings
from glanceclient import client as glance_client
from glanceclient.v1.images import CREATE_PARAMS
from cloudferrylib.base import image
from cloudferrylib.utils import mysql_connector
from cloudferrylib.utils import file_like_proxy
from cloudferrylib.utils import utils as utl
LOG = utl.get_log(__name__)
class GlanceImage(image.Image):
"""
The main class for working with Openstack Glance Image Service.
"""
def __init__(self, config, cloud):
self.config = config
self.host = config.cloud.host
self.cloud = cloud
self.identity_client = cloud.resources['identity']
# get mysql settings
self.glance_client = self.proxy(self.get_client(), config)
super(GlanceImage, self).__init__(config)
def get_db_connection(self):
if not hasattr(
self.cloud.config,
self.cloud.position + '_image'):
LOG.debug('running on default mysql settings')
return mysql_connector.MysqlConnector(
self.config.mysql, 'glance')
else:
LOG.debug('running on custom mysql settings')
my_settings = getattr(
self.cloud.config,
self.cloud.position + '_image')
return mysql_connector.MysqlConnector(
my_settings, my_settings.database_name)
def get_client(self):
""" Getting glance client """
endpoint_glance = self.identity_client.get_endpoint_by_service_type(
service_type='image',
endpoint_type='publicURL')
# we can figure out what version of client to use from url
# check if we have "v1" or "v2" in the end of url
m = re.search("(.*)/v(\d)", endpoint_glance)
if m:
endpoint_glance = m.group(1)
# for now we always use 1 version of client
version = 1 # m.group(2)
else:
version = 1
return glance_client.Client(
version,
endpoint=endpoint_glance,
token=self.identity_client.get_auth_token_from_user())
def get_image_list(self):
# by some reason - guys from community decided to create that strange
# option to get images of all tenants
filters = {"is_public": None} if self.config.migrate.all_images else {}
return self.glance_client.images.list(filters=filters)
def create_image(self, **kwargs):
return self.glance_client.images.create(**kwargs)
def delete_image(self, image_id):
self.glance_client.images.delete(image_id)
def get_image_by_id(self, image_id):
for glance_image in self.get_image_list():
if glance_image.id == image_id:
return glance_image
def get_image_by_name(self, image_name):
for glance_image in self.get_image_list():
if glance_image.name == image_name:
return glance_image
def get_img_id_list_by_checksum(self, checksum):
l = []
for glance_image in self.get_image_list():
if glance_image.checksum == checksum:
l.append(glance_image.id)
return l
def get_image(self, im):
""" Get image by id or name. """
for glance_image in self.get_image_list():
if im in (glance_image.name, glance_image.id):
return glance_image
def get_image_status(self, image_id):
return self.get_image_by_id(image_id).status
def get_ref_image(self, image_id):
return self.glance_client.images.data(image_id)._resp
def get_image_checksum(self, image_id):
return self.get_image_by_id(image_id).checksum
@staticmethod
def convert(glance_image, cloud):
"""Convert OpenStack Glance image object to CloudFerry object.
:param glance_image: Direct OS Glance image object to convert,
:param cloud: Cloud object.
"""
resource = cloud.resources[utl.IMAGE_RESOURCE]
keystone = cloud.resources["identity"]
gl_image = {
k: w for k, w in glance_image.to_dict().items(
) if k in CREATE_PARAMS}
# we need to pass resource to destination to copy image
gl_image.update({'resource': resource})
# at this point we write name of owner of this tenant
# to map it to different tenant id on destination
gl_image.update(
{'owner_name': keystone.try_get_tenant_name_by_id(
glance_image.owner, default=cloud.cloud_config.cloud.tenant)})
if resource.is_snapshot(glance_image):
# for snapshots we need to write snapshot username to namespace
# to map it later to new user id
user_ids = [i.id for i in keystone.keystone_client.users.list()]
user_id = gl_image["properties"].get("user_id")
if user_id in user_ids:
gl_image["properties"]["user_name"] = \
keystone.keystone_client.users.get(user_id).name
return gl_image
def is_snapshot(self, img):
# snapshots have {'image_type': 'snapshot"} in "properties" field
return img.to_dict().get("properties", {}).get(
'image_type') == 'snapshot'
def get_tags(self):
return {}
def get_members(self):
# members structure {image_id: {tenant_name: can_share}}
result = {}
for img in self.get_image_list():
for entry in self.glance_client.image_members.list(image=img.id):
if img.id not in result:
result[img.id] = {}
# change tenant_id to tenant_name
tenant_name = self.identity_client.try_get_tenant_name_by_id(
entry.member_id,
default=self.config.cloud.tenant)
result[img.id][tenant_name] = entry.can_share
return result
def create_member(self, image_id, tenant_name, can_share):
# change tenant_name to tenant_id
tenant_id = self.identity_client.get_tenant_id_by_name(tenant_name)
self.glance_client.image_members.create(
image_id,
tenant_id,
can_share)
def read_info(self, **kwargs):
"""Get info about images or specified image.
:param image_id: Id of specified image
:param image_name: Name of specified image
:param images_list: List of specified images
:param images_list_meta: Tuple of specified images with metadata in
format [(image, meta)]
:param date: date object. snapshots updated after this date will be
dropped
:rtype: Dictionary with all necessary images info
"""
info = {'images': {}}
def image_valid(img, date):
""" Check if image was updated recently """
updated = datetime.datetime.strptime(
img.updated_at,
"%Y-%m-%dT%H:%M:%S")
return date <= updated.date()
if kwargs.get('date'):
for img in self.get_image_list():
if (not self.is_snapshot(img)) or image_valid(
img, kwargs.get('date')):
self.make_image_info(img, info)
if kwargs.get('image_id'):
glance_image = self.get_image_by_id(kwargs['image_id'])
info = self.make_image_info(glance_image, info)
elif kwargs.get('image_name'):
glance_image = self.get_image_by_name(kwargs['image_name'])
info = self.make_image_info(glance_image, info)
elif kwargs.get('images_list'):
for im in kwargs['images_list']:
glance_image = self.get_image(im)
info = self.make_image_info(glance_image, info)
elif kwargs.get('images_list_meta'):
for (im, meta) in kwargs['images_list_meta']:
glance_image = self.get_image(im)
info = self.make_image_info(glance_image, info)
info['images'][glance_image.id]['meta'] = meta
else:
for glance_image in self.get_image_list():
info = self.make_image_info(glance_image, info)
info.update({
"tags": self.get_tags(),
"members": self.get_members()
})
return info
def make_image_info(self, glance_image, info):
if glance_image:
if glance_image.status == "active":
gl_image = self.convert(glance_image, self.cloud)
info['images'][glance_image.id] = {'image': gl_image,
'meta': {},
}
else:
LOG.warning("image {img} was not migrated according to "
"status = {status}, (expected status "
"= active)".format(
img=glance_image.id,
status=glance_image.status))
else:
LOG.error('Image has not been found')
return info
def deploy(self, info, callback=None):
info = copy.deepcopy(info)
new_info = {'images': {}}
migrate_images_list = []
delete_container_format, delete_disk_format = [], []
empty_image_list = {}
for image_id_src, gl_image in info['images'].iteritems():
if gl_image['image']:
dst_img_checksums = {x.checksum: x for x in
self.get_image_list()}
dst_img_names = [x.name for x in self.get_image_list()]
checksum_current = gl_image['image']['checksum']
name_current = gl_image['image']['name']
meta = gl_image['meta']
if checksum_current in dst_img_checksums and (
name_current) in dst_img_names:
migrate_images_list.append(
(dst_img_checksums[checksum_current], meta))
continue
LOG.debug("updating owner of image {image}".format(
image=gl_image["image"]["owner"]))
gl_image["image"]["owner"] = \
self.identity_client.get_tenant_id_by_name(
gl_image["image"]["owner_name"])
del gl_image["image"]["owner_name"]
if gl_image["image"]["properties"]:
# update snapshot metadata
metadata = gl_image["image"]["properties"]
if "owner_id" in metadata:
# update tenant id
LOG.debug("updating snapshot metadata for field "
"'owner_id' for image {image}".format(
image=gl_image["image"]["id"]))
metadata["owner_id"] = gl_image["image"]["owner"]
if "user_id" in metadata:
# update user id by specified name
LOG.debug("updating snapshot metadata for field "
"'user_id' for image {image}".format(
image=gl_image["image"]["id"]))
metadata["user_id"] = \
self.identity_client.keystone_client.users.find(
username=metadata["user_name"]).id
del metadata["user_name"]
LOG.debug("migrating image {image}".format(
image=gl_image["image"]["id"]))
# we can face situation when image has no
# disk_format and container_format properties
# this situation appears, when image was created
# with option --copy-from
# glance-client cannot create image without this
# properties, we need to create them artificially
# and then - delete from database
migrate_image = self.create_image(
name=gl_image['image']['name'],
container_format=gl_image['image']['container_format'] or "bare",
disk_format=gl_image['image']['disk_format'] or "qcow2",
is_public=gl_image['image']['is_public'],
protected=gl_image['image']['protected'],
owner=gl_image['image']['owner'],
size=gl_image['image']['size'],
properties=gl_image['image']['properties'],
data=file_like_proxy.FileLikeProxy(
gl_image['image'],
callback,
self.config['migrate']['speed_limit']))
migrate_images_list.append((migrate_image, meta))
if not gl_image["image"]["container_format"]:
delete_container_format.append(migrate_image.id)
if not gl_image["image"]["disk_format"]:
delete_disk_format.append(migrate_image.id)
else:
empty_image_list[image_id_src] = gl_image
if migrate_images_list:
im_name_list = [(im.name, meta) for (im, meta) in
migrate_images_list]
new_info = self.read_info(images_list_meta=im_name_list)
new_info['images'].update(empty_image_list)
# on this step we need to create map between source ids and dst ones
LOG.debug("creating map between source and destination image ids")
image_ids_map = {}
dst_img_checksums = {x.checksum: x.id for x in self.get_image_list()}
for image_id_src, gl_image in info['images'].iteritems():
cur_image = gl_image["image"]
image_ids_map[cur_image["id"]] = dst_img_checksums[cur_image["checksum"]]
LOG.debug("deploying image members")
for image_id, data in info.get("members", {}).items():
for tenant_name, can_share in data.items():
LOG.debug("deploying image member for image {image}"
" tenant {tenant}".format(
image=image_id,
tenant=tenant_name))
self.create_member(
image_ids_map[image_id],
tenant_name,
can_share)
self.delete_fields('disk_format', delete_disk_format)
self.delete_fields('container_format', delete_container_format)
return new_info
def delete_fields(self, field, list_of_ids):
if not list_of_ids:
return
# this command sets disk_format, container_format to NULL
command = ("UPDATE images SET {field}=NULL"
" where id in ({id_list})".format(
field=field,
id_list=",".join(
[" '{0}' ".format(i) for i in list_of_ids])))
self.get_db_connection().execute(command)
def wait_for_status(self, id_res, status):
while self.glance_client.images.get(id_res).status != status:
time.sleep(1)
def patch_image(self, backend_storage, image_id):
if backend_storage == 'ceph':
image_from_glance = self.get_image_by_id(image_id)
with settings(host_string=self.cloud.getIpSsh()):
out = json.loads(
run("rbd -p images info %s --format json" % image_id))
image_from_glance.update(size=out["size"])
| apache-2.0 |
apache/incubator-systemml | src/test/java/org/apache/sysds/test/functions/io/proto/FrameReaderWriterProtoTest.java | 3191 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysds.test.functions.io.proto;
import java.io.IOException;
import java.util.Random;
import org.apache.sysds.common.Types;
import org.apache.sysds.runtime.io.FrameReader;
import org.apache.sysds.runtime.io.FrameReaderFactory;
import org.apache.sysds.runtime.io.FrameWriter;
import org.apache.sysds.runtime.io.FrameWriterFactory;
import org.apache.sysds.runtime.matrix.data.FrameBlock;
import org.apache.sysds.runtime.util.DataConverter;
import org.apache.sysds.test.TestUtils;
import org.junit.Test;
public class FrameReaderWriterProtoTest {
private static final String FILENAME_SINGLE = "target/testTemp/functions/data/FrameReaderWriterProtoTest/testFrameBlock.proto";
private static final long SEED = 4669201;
private FrameWriter frameWriterProto = FrameWriterFactory.createFrameWriter(Types.FileFormat.PROTO);
private FrameReader frameReaderProto = FrameReaderFactory.createFrameReader(Types.FileFormat.PROTO);
@Test
public void testWriteReadFrameBlockWithSinleRowAndSingleColumnFromHDFS() throws IOException {
testWriteReadFrameBlockWith(1, 1);
}
@Test
public void testWriteReadFrameBlockWithSingleRowAndMultipleColumnsFromHDFS() throws IOException {
testWriteReadFrameBlockWith(1, 23);
}
@Test
public void testWriteReadFrameBlockWithMultipleRowsAndSingleColumnFromHDFS() throws IOException {
testWriteReadFrameBlockWith(21, 1);
}
@Test
public void testWriteReadFrameBlockWithSmallMultipleRowsAndMultipleColumnsFromHDFS() throws IOException {
testWriteReadFrameBlockWith(42, 35);
}
@Test
public void testWriteReadFrameBlockWithMediumMultipleRowsAndMultipleColumnsFromHDFS() throws IOException {
testWriteReadFrameBlockWith(694, 164);
}
public void testWriteReadFrameBlockWith(int rows, int cols) throws IOException {
final Random random = new Random(SEED);
Types.ValueType[] schema = TestUtils.generateRandomSchema(cols, random);
FrameBlock expectedFrame = TestUtils.generateRandomFrameBlock(rows, cols, schema, random);
frameWriterProto.writeFrameToHDFS(expectedFrame, FILENAME_SINGLE, rows, cols);
FrameBlock actualFrame = frameReaderProto.readFrameFromHDFS(FILENAME_SINGLE, schema, rows, cols);
String[][] expected = DataConverter.convertToStringFrame(expectedFrame);
String[][] actual = DataConverter.convertToStringFrame(actualFrame);
TestUtils.compareFrames(expected, actual, rows, cols);
}
}
| apache-2.0 |
skibey/libxcam | xcore/cl_context.cpp | 16248 | /*
* cl_context.cpp - CL context
*
* Copyright (c) 2015 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Wind Yuan <feng.yuan@intel.com>
*/
#include "cl_context.h"
#include "cl_kernel.h"
#include "cl_device.h"
#include <utility>
#undef XCAM_CL_MAX_STR_SIZE
#define XCAM_CL_MAX_STR_SIZE 1024
#undef XCAM_CL_MAX_EVENT_SIZE
#define XCAM_CL_MAX_EVENT_SIZE 256
namespace XCam {
class CLKernel;
void
CLContext::context_pfn_notify (
const char* erro_info,
const void *private_info,
size_t cb,
void *user_data
)
{
CLContext *context = (CLContext*) user_data;
XCAM_UNUSED (context);
XCAM_UNUSED (erro_info);
XCAM_UNUSED (private_info);
XCAM_UNUSED (cb);
XCAM_LOG_DEBUG ("cl context pfn error:%s", XCAM_STR (erro_info));
}
void CLContext::program_pfn_notify (
cl_program program, void *user_data)
{
CLContext *context = (CLContext*) user_data;
char kernel_names [XCAM_CL_MAX_STR_SIZE];
XCAM_UNUSED (context);
XCAM_UNUSED (program);
xcam_mem_clear (kernel_names);
//clGetProgramInfo (program, CL_PROGRAM_KERNEL_NAMES, sizeof (kernel_names) - 1, kernel_names, NULL);
//XCAM_LOG_DEBUG ("cl program report error on kernels: %s", kernel_names);
}
uint32_t
CLContext::event_list_2_id_array (
CLEventList &events_wait,
cl_event *cl_events, uint32_t max_count)
{
uint32_t num_of_events_wait = 0;
for (CLEventList::iterator iter = events_wait.begin ();
iter != events_wait.end (); ++iter) {
SmartPtr<CLEvent> &event = *iter;
if (num_of_events_wait >= max_count) {
XCAM_LOG_WARNING ("CLEventList(%d) larger than id_array(max_count:%d)", events_wait.size(), max_count);
break;
}
XCAM_ASSERT (event->get_event_id ());
cl_events[num_of_events_wait++] = event->get_event_id ();
}
return num_of_events_wait;
}
CLContext::CLContext (SmartPtr<CLDevice> &device)
: _context_id (NULL)
, _device (device)
{
if (!init_context ()) {
XCAM_LOG_DEBUG ("CL init context failed");
}
XCAM_LOG_DEBUG ("CLContext constructed");
}
CLContext::~CLContext ()
{
destroy_context ();
XCAM_LOG_DEBUG ("CLContext destructed");
}
void
CLContext::terminate ()
{
//_kernel_map.clear ();
_cmd_queue_list.clear ();
}
XCamReturn
CLContext::flush ()
{
cl_int error_code = CL_SUCCESS;
cl_command_queue cmd_queue_id = NULL;
SmartPtr<CLCommandQueue> cmd_queue = get_default_cmd_queue ();
XCAM_ASSERT (cmd_queue.ptr ());
cmd_queue_id = cmd_queue->get_cmd_queue_id ();
error_code = clFlush (cmd_queue_id);
XCAM_FAIL_RETURN (
WARNING,
error_code == CL_SUCCESS,
XCAM_RETURN_ERROR_CL,
"CL flush cmdqueue failed with error_code:%d", error_code);
return XCAM_RETURN_NO_ERROR;
}
XCamReturn
CLContext::finish ()
{
cl_int error_code = CL_SUCCESS;
cl_command_queue cmd_queue_id = NULL;
SmartPtr<CLCommandQueue> cmd_queue = get_default_cmd_queue ();
XCAM_ASSERT (cmd_queue.ptr ());
cmd_queue_id = cmd_queue->get_cmd_queue_id ();
error_code = clFinish (cmd_queue_id);
XCAM_FAIL_RETURN (
WARNING,
error_code == CL_SUCCESS,
XCAM_RETURN_ERROR_CL,
"CL finish cmdqueue failed with error_code:%d", error_code);
return XCAM_RETURN_NO_ERROR;
}
bool
CLContext::init_context ()
{
cl_context context_id = NULL;
cl_int err_code = 0;
cl_device_id device_id = _device->get_device_id ();
XCAM_ASSERT (_context_id == NULL);
if (!_device->is_inited()) {
XCAM_LOG_DEBUG ("create cl context failed since device ");
return false;
}
context_id =
clCreateContext (NULL, 1, &device_id,
CLContext::context_pfn_notify, this,
&err_code);
if (err_code != CL_SUCCESS)
{
XCAM_LOG_WARNING ("create cl context failed, error:%d", err_code);
return false;
}
_context_id = context_id;
return true;
}
bool
CLContext::init_cmd_queue (SmartPtr<CLContext> &self)
{
XCAM_ASSERT (_cmd_queue_list.empty ());
XCAM_ASSERT (self.ptr() == this);
SmartPtr<CLCommandQueue> cmd_queue = create_cmd_queue (self);
if (!cmd_queue.ptr ())
return false;
_cmd_queue_list.push_back (cmd_queue);
return true;
}
SmartPtr<CLCommandQueue>
CLContext::get_default_cmd_queue ()
{
CLCmdQueueList::iterator iter;
XCAM_ASSERT (!_cmd_queue_list.empty ());
if (_cmd_queue_list.empty ())
return NULL;
iter = _cmd_queue_list.begin ();
return *iter;
}
void
CLContext::destroy_context ()
{
if (!is_valid ())
return;
clReleaseContext (_context_id);
_context_id = NULL;
}
XCamReturn
CLContext::execute_kernel (
CLKernel *kernel,
CLCommandQueue *queue,
CLEventList &events_wait,
SmartPtr<CLEvent> &event_out)
{
cl_int error_code = CL_SUCCESS;
cl_command_queue cmd_queue_id = NULL;
cl_kernel kernel_id = kernel->get_kernel_id ();
uint32_t work_dims = kernel->get_work_dims ();
const size_t *global_sizes = kernel->get_work_global_size ();
const size_t *local_sizes = kernel->get_work_local_size ();
cl_event *event_out_id = NULL;
cl_event events_id_wait[XCAM_CL_MAX_EVENT_SIZE];
uint32_t num_of_events_wait = 0;
uint32_t work_group_size = 1;
XCAM_ASSERT (kernel);
if (queue == NULL) {
SmartPtr<CLCommandQueue> cmd_queue = get_default_cmd_queue ();
queue = cmd_queue.ptr ();
}
XCAM_ASSERT (queue);
cmd_queue_id = queue->get_cmd_queue_id ();
num_of_events_wait = event_list_2_id_array (events_wait, events_id_wait, XCAM_CL_MAX_EVENT_SIZE);
if (event_out.ptr ())
event_out_id = &event_out->get_event_id ();
for (uint32_t i = 0; i < work_dims; ++i) {
work_group_size *= local_sizes[i];
}
if (!work_group_size)
local_sizes = NULL;
error_code =
clEnqueueNDRangeKernel (
cmd_queue_id, kernel_id,
work_dims, NULL, global_sizes, local_sizes,
num_of_events_wait, (num_of_events_wait ? events_id_wait : NULL),
event_out_id);
XCAM_FAIL_RETURN(
WARNING,
error_code == CL_SUCCESS,
XCAM_RETURN_ERROR_CL,
"execute kernel(%s) failed with error_code:%d",
kernel->get_kernel_name (), error_code);
return XCAM_RETURN_NO_ERROR;
}
SmartPtr<CLCommandQueue>
CLContext::create_cmd_queue (SmartPtr<CLContext> &self)
{
cl_device_id device_id = _device->get_device_id ();
cl_command_queue cmd_queue_id = NULL;
cl_int err_code = 0;
SmartPtr<CLCommandQueue> result;
XCAM_ASSERT (self.ptr() == this);
cmd_queue_id = clCreateCommandQueue (_context_id, device_id, 0, &err_code);
if (err_code != CL_SUCCESS) {
XCAM_LOG_WARNING ("create CL command queue failed.");
return NULL;
}
result = new CLCommandQueue (self, cmd_queue_id);
return result;
}
cl_kernel
CLContext::generate_kernel_id (
CLKernel *kernel,
const uint8_t *source, size_t length,
CLContext::KernelBuildType type,
uint8_t **program_binaries, size_t *binary_sizes)
{
struct CLProgram {
cl_program id;
CLProgram ()
: id (NULL)
{}
~CLProgram () {
if (id)
clReleaseProgram (id);
}
};
CLProgram program;
cl_kernel kernel_id = NULL;
cl_int error_code = CL_SUCCESS;
cl_device_id device_id = _device->get_device_id ();
const char * name = kernel->get_kernel_name ();
XCAM_ASSERT (source && length);
XCAM_ASSERT (name);
switch (type) {
case KERNEL_BUILD_SOURCE:
program.id =
clCreateProgramWithSource (
_context_id, 1,
(const char**)(&source), (const size_t *)&length,
&error_code);
break;
case KERNEL_BUILD_BINARY:
program.id =
clCreateProgramWithBinary (
_context_id, 1, &device_id,
(const size_t *)&length, (const uint8_t**)(&source),
NULL, &error_code);
break;
}
XCAM_FAIL_RETURN (
WARNING,
error_code == CL_SUCCESS,
NULL,
"cl create program failed with error_cod:%d", error_code);
XCAM_ASSERT (program.id);
error_code = clBuildProgram (program.id, 1, &device_id, NULL, CLContext::program_pfn_notify, this);
if (error_code != CL_SUCCESS) {
char error_log [XCAM_CL_MAX_STR_SIZE];
xcam_mem_clear (error_log);
clGetProgramBuildInfo (program.id, device_id, CL_PROGRAM_BUILD_LOG, sizeof (error_log) - 1, error_log, NULL);
XCAM_LOG_WARNING ("CL build program failed on %s, build log:%s", name, error_log);
return NULL;
}
if (program_binaries != NULL && binary_sizes != NULL) {
error_code = clGetProgramInfo (program.id, CL_PROGRAM_BINARY_SIZES, sizeof (size_t) * 1, binary_sizes, NULL);
if (error_code != CL_SUCCESS) {
XCAM_LOG_WARNING ("CL query binary sizes failed on %s", name);
}
*program_binaries = (uint8_t *) xcam_malloc0 (sizeof (uint8_t) * (*binary_sizes));
error_code = clGetProgramInfo (program.id, CL_PROGRAM_BINARIES, sizeof (uint8_t *) * 1, program_binaries, NULL);
if (error_code != CL_SUCCESS) {
XCAM_LOG_WARNING ("CL query program binaries failed on %s", name);
}
}
kernel_id = clCreateKernel (program.id, name, &error_code);
XCAM_FAIL_RETURN (
WARNING,
error_code == CL_SUCCESS,
NULL,
"cl create kernel(%s) failed with error_cod:%d", name, error_code);
return kernel_id;
}
void
CLContext::destroy_kernel_id (cl_kernel &kernel_id)
{
if (kernel_id) {
clReleaseKernel (kernel_id);
kernel_id = NULL;
}
}
#if 0
bool
CLContext::insert_kernel (SmartPtr<CLKernel> &kernel)
{
std::string kernel_name = kernel->get_kernel_name ();
CLKernelMap::iterator i_pos = _kernel_map.lower_bound (kernel_name);
XCAM_ASSERT (!kernel_name.empty());
if (i_pos != _kernel_map.end () && !_kernel_map.key_comp ()(kernel_name, i_pos->first)) {
// need update
i_pos->second = kernel;
XCAM_LOG_DEBUG ("kernel:%s already exist in context, now update to new one", kernel_name.c_str());
return true;
}
_kernel_map.insert (i_pos, std::make_pair (kernel_name, kernel));
return true;
}
#endif
cl_mem
CLContext::create_va_buffer (uint32_t bo_name)
{
cl_mem mem_id = NULL;
cl_int errcode = CL_SUCCESS;
if (!is_valid())
return NULL;
mem_id = clCreateBufferFromLibvaIntel (_context_id, bo_name, &errcode);
XCAM_FAIL_RETURN(
WARNING,
errcode == CL_SUCCESS,
NULL,
"create cl memory from va image failed");
return mem_id;
}
cl_mem
CLContext::create_va_image (const cl_libva_image &image_info)
{
cl_mem mem_id = NULL;
cl_int errcode = CL_SUCCESS;
if (!is_valid())
return NULL;
mem_id = clCreateImageFromLibvaIntel (_context_id, &image_info, &errcode);
XCAM_FAIL_RETURN(
WARNING,
errcode == CL_SUCCESS,
NULL,
"create cl memory from va image failed");
return mem_id;
}
cl_mem
CLContext::create_image (
cl_mem_flags flags, const cl_image_format& format,
const cl_image_desc &image_info, void *host_ptr)
{
cl_mem mem_id = NULL;
cl_int errcode = CL_SUCCESS;
mem_id = clCreateImage (
_context_id, flags,
&format, &image_info,
host_ptr, &errcode);
XCAM_FAIL_RETURN (
WARNING,
errcode == CL_SUCCESS,
NULL,
"create cl image failed");
return mem_id;
}
void
CLContext::destroy_mem (cl_mem mem_id)
{
if (mem_id)
clReleaseMemObject (mem_id);
}
cl_mem
CLContext::create_buffer (uint32_t size, cl_mem_flags flags, void *host_ptr)
{
cl_mem mem_id = NULL;
cl_int errcode = CL_SUCCESS;
XCAM_ASSERT (_context_id);
mem_id = clCreateBuffer (
_context_id, flags,
size, host_ptr,
&errcode);
XCAM_FAIL_RETURN (
WARNING,
errcode == CL_SUCCESS,
NULL,
"create cl buffer failed");
return mem_id;
}
XCamReturn
CLContext::enqueue_read_buffer (
cl_mem buf_id, void *ptr,
uint32_t offset, uint32_t size,
bool block,
CLEventList &events_wait,
SmartPtr<CLEvent> &event_out)
{
SmartPtr<CLCommandQueue> cmd_queue;
cl_command_queue cmd_queue_id = NULL;
cl_event *event_out_id = NULL;
cl_event events_id_wait[XCAM_CL_MAX_EVENT_SIZE];
uint32_t num_of_events_wait = 0;
cl_int errcode = CL_SUCCESS;
cmd_queue = get_default_cmd_queue ();
cmd_queue_id = cmd_queue->get_cmd_queue_id ();
num_of_events_wait = event_list_2_id_array (events_wait, events_id_wait, XCAM_CL_MAX_EVENT_SIZE);
if (event_out.ptr ())
event_out_id = &event_out->get_event_id ();
XCAM_ASSERT (_context_id);
XCAM_ASSERT (cmd_queue_id);
errcode = clEnqueueReadBuffer (
cmd_queue_id, buf_id,
(block ? CL_BLOCKING : CL_NON_BLOCKING),
offset, size, ptr,
num_of_events_wait, (num_of_events_wait ? events_id_wait : NULL),
event_out_id);
XCAM_FAIL_RETURN (
WARNING,
errcode == CL_SUCCESS,
XCAM_RETURN_ERROR_CL,
"cl enqueue read buffer failed with error_code:%d", errcode);
return XCAM_RETURN_NO_ERROR;
}
XCamReturn
CLContext::enqueue_write_buffer (
cl_mem buf_id, void *ptr,
uint32_t offset, uint32_t size,
bool block,
CLEventList &events_wait,
SmartPtr<CLEvent> &event_out)
{
SmartPtr<CLCommandQueue> cmd_queue;
cl_command_queue cmd_queue_id = NULL;
cl_event *event_out_id = NULL;
cl_event events_id_wait[XCAM_CL_MAX_EVENT_SIZE];
uint32_t num_of_events_wait = 0;
cl_int errcode = CL_SUCCESS;
cmd_queue = get_default_cmd_queue ();
cmd_queue_id = cmd_queue->get_cmd_queue_id ();
num_of_events_wait = event_list_2_id_array (events_wait, events_id_wait, XCAM_CL_MAX_EVENT_SIZE);
if (event_out.ptr ())
event_out_id = &event_out->get_event_id ();
XCAM_ASSERT (_context_id);
XCAM_ASSERT (cmd_queue_id);
errcode = clEnqueueWriteBuffer (
cmd_queue_id, buf_id,
(block ? CL_BLOCKING : CL_NON_BLOCKING),
offset, size, ptr,
num_of_events_wait, (num_of_events_wait ? events_id_wait : NULL),
event_out_id);
XCAM_FAIL_RETURN (
WARNING,
errcode == CL_SUCCESS,
XCAM_RETURN_ERROR_CL,
"cl enqueue write buffer failed with error_code:%d", errcode);
return XCAM_RETURN_NO_ERROR;
}
int32_t
CLContext::export_mem_fd (cl_mem mem_id)
{
cl_int errcode = CL_SUCCESS;
int32_t fd = -1;
XCAM_ASSERT (mem_id);
errcode = clGetMemObjectFdIntel (_context_id, mem_id, &fd);
XCAM_FAIL_RETURN (
WARNING,
errcode == CL_SUCCESS,
-1,
"export cl mem fd failed");
return fd;
}
CLCommandQueue::CLCommandQueue (SmartPtr<CLContext> &context, cl_command_queue id)
: _context (context)
, _cmd_queue_id (id)
{
XCAM_ASSERT (context.ptr ());
XCAM_ASSERT (id);
XCAM_LOG_DEBUG ("CLCommandQueue constructed");
}
CLCommandQueue::~CLCommandQueue ()
{
destroy ();
XCAM_LOG_DEBUG ("CLCommandQueue desstructed");
}
void
CLCommandQueue::destroy ()
{
if (_cmd_queue_id == NULL)
return;
clReleaseCommandQueue (_cmd_queue_id);
_cmd_queue_id = NULL;
}
};
| apache-2.0 |
medgar/click | examples/src/org/apache/click/examples/page/general/PageImportsExample.java | 2276 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.click.examples.page.general;
import java.util.List;
import org.apache.click.element.CssImport;
import org.apache.click.element.Element;
import org.apache.click.element.JsImport;
import org.apache.click.examples.page.BorderPage;
/**
* This page provides an example of how to programmatically optimize your
* PageImports for high traffic pages. You optimize your Page by combining
* multiple CSS and JavaScript import files into a single file, which reduces
* the number of HTTP requests required to serve the page.
* <p/>
* Then you set the Page property "includeControlHeadElements" to false so that
* Controls won't have their head elements imported.
*/
public class PageImportsExample extends BorderPage {
private static final long serialVersionUID = 1L;
public PageImportsExample() {
// Indicate that Controls should not import their head elements
setIncludeControlHeadElements(false);
}
/**
* Return the list of the Page HEAD elements.
*
* @return the list of Page HEAD elements
*/
@Override
public List<Element> getHeadElements() {
if (headElements == null) {
headElements = super.getHeadElements();
headElements.add(new CssImport("/assets/css/imports.css"));
headElements.add(new JsImport("/assets/js/imports.js"));
}
return headElements;
}
}
| apache-2.0 |
bmaxa/changes_and_fixes | src/test/run-pass/newtype.rs | 763 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct mytype(Mytype);
struct Mytype {compute: extern fn(mytype) -> int, val: int}
fn compute(i: mytype) -> int { return i.val + 20; }
pub fn main() {
let myval = mytype(Mytype{compute: compute, val: 30});
printfln!("%d", compute(myval));
assert_eq!((myval.compute)(myval), 50);
}
| apache-2.0 |
garpinc/pac4j | pac4j-saml/src/main/java/org/pac4j/saml/metadata/SAML2ServiceProvicerRequestedAttribute.java | 2026 | package org.pac4j.saml.metadata;
import java.io.Serializable;
/**
* This is {@link SAML2ServiceProvicerRequestedAttribute}.
*
* @author Misagh Moayyed
*/
public class SAML2ServiceProvicerRequestedAttribute implements Serializable {
private static final long serialVersionUID = 1040516205957826527L;
public String name;
public String friendlyName;
public String nameFormat = "urn:oasis:names:tc:SAML:2.0:attrname-format:uri";
public boolean isRequired;
public SAML2ServiceProvicerRequestedAttribute() {
}
public SAML2ServiceProvicerRequestedAttribute(final String name, final String friendlyName) {
this.name = name;
this.friendlyName = friendlyName;
}
public SAML2ServiceProvicerRequestedAttribute(final String name, final String friendlyName,
final String nameFormat, final boolean isRequired) {
this.name = name;
this.friendlyName = friendlyName;
this.nameFormat = nameFormat;
this.isRequired = isRequired;
}
public String getName() {
return name;
}
public void setName(final String name) {
this.name = name;
}
public String getFriendlyName() {
return friendlyName;
}
public void setFriendlyName(final String friendlyName) {
this.friendlyName = friendlyName;
}
public String getNameFormat() {
return nameFormat;
}
public void setNameFormat(final String nameFormat) {
this.nameFormat = nameFormat;
}
public boolean isRequired() {
return isRequired;
}
public void setRequired(final boolean required) {
isRequired = required;
}
@Override
public String toString() {
return "RequestedServiceProviderAttribute{" +
"name='" + name + '\'' +
", friendlyName='" + friendlyName + '\'' +
", nameFormat='" + nameFormat + '\'' +
", isRequired=" + isRequired +
'}';
}
}
| apache-2.0 |
ClarenceAu/log4j2 | log4j-taglib/src/test/java/org/apache/logging/log4j/taglib/ErrorTagTest.java | 1141 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache license, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the license for the specific language governing permissions and
* limitations under the license.
*/
package org.apache.logging.log4j.taglib;
import org.apache.logging.log4j.Level;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/**
*
*/
public class ErrorTagTest {
@Test
public void testGetLevel() {
assertEquals("The logging level is not correct.", Level.ERROR, new ErrorTag().getLevel());
}
}
| apache-2.0 |
nknize/elasticsearch | x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java | 40490 | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.cluster.routing.allocation;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.snapshots.EmptySnapshotsInfoService;
import org.elasticsearch.test.gateway.TestGatewayAllocator;
import org.elasticsearch.xpack.core.DataTier;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public class DataTierAllocationDeciderTests extends ESAllocationTestCase {
private static final Set<Setting<?>> ALL_SETTINGS;
private static final DiscoveryNode HOT_NODE = newNode("node-hot", Collections.singleton(DataTier.DATA_HOT_NODE_ROLE));
private static final DiscoveryNode WARM_NODE = newNode("node-warm", Collections.singleton(DataTier.DATA_WARM_NODE_ROLE));
private static final DiscoveryNode COLD_NODE = newNode("node-cold", Collections.singleton(DataTier.DATA_COLD_NODE_ROLE));
private static final DiscoveryNode CONTENT_NODE = newNode("node-content", Collections.singleton(DataTier.DATA_CONTENT_NODE_ROLE));
private static final DiscoveryNode DATA_NODE = newNode("node-data", Collections.singleton(DiscoveryNodeRole.DATA_ROLE));
private final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ALL_SETTINGS);
private final DataTierAllocationDecider decider = new DataTierAllocationDecider(Settings.EMPTY, clusterSettings);
private final AllocationDeciders allocationDeciders = new AllocationDeciders(
Arrays.asList(decider,
new SameShardAllocationDecider(Settings.EMPTY, clusterSettings),
new ReplicaAfterPrimaryActiveAllocationDecider()));
private final AllocationService service = new AllocationService(allocationDeciders,
new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE,
EmptySnapshotsInfoService.INSTANCE);
private final ShardRouting shard = ShardRouting.newUnassigned(new ShardId("myindex", "myindex", 0), true,
RecoverySource.EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "index created"));
static {
Set<Setting<?>> allSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
allSettings.add(DataTierAllocationDecider.CLUSTER_ROUTING_REQUIRE_SETTING);
allSettings.add(DataTierAllocationDecider.CLUSTER_ROUTING_INCLUDE_SETTING);
allSettings.add(DataTierAllocationDecider.CLUSTER_ROUTING_EXCLUDE_SETTING);
ALL_SETTINGS = allSettings;
}
public void testClusterRequires() {
ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"));
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state,
null, null, 0);
allocation.debugDecision(true);
clusterSettings.applySettings(Settings.builder()
.put(DataTierAllocationDecider.CLUSTER_ROUTING_REQUIRE, "data_hot")
.build());
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(HOT_NODE, DATA_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
}
for (DiscoveryNode n : Arrays.asList(WARM_NODE, COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match all cluster setting [cluster.routing.allocation.require._tier] " +
"tier filters [data_hot]"));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match all cluster setting [cluster.routing.allocation.require._tier] " +
"tier filters [data_hot]"));
}
}
public void testClusterIncludes() {
ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"));
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state,
null, null, 0);
allocation.debugDecision(true);
clusterSettings.applySettings(Settings.builder()
.put(DataTierAllocationDecider.CLUSTER_ROUTING_INCLUDE, "data_warm,data_cold")
.build());
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(WARM_NODE, DATA_NODE, COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
}
for (DiscoveryNode n : Arrays.asList(HOT_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match any cluster setting [cluster.routing.allocation.include._tier] " +
"tier filters [data_warm,data_cold]"));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match any cluster setting [cluster.routing.allocation.include._tier] " +
"tier filters [data_warm,data_cold]"));
}
}
public void testClusterExcludes() {
ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"));
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state,
null, null, 0);
allocation.debugDecision(true);
clusterSettings.applySettings(Settings.builder()
.put(DataTierAllocationDecider.CLUSTER_ROUTING_EXCLUDE, "data_warm")
.build());
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(WARM_NODE, DATA_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node matches any cluster setting [cluster.routing.allocation.exclude._tier] " +
"tier filters [data_warm]"));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node matches any cluster setting [cluster.routing.allocation.exclude._tier] " +
"tier filters [data_warm]"));
}
for (DiscoveryNode n : Arrays.asList(HOT_NODE, COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(n.toString(), d.type(), equalTo(Decision.Type.YES));
d = decider.canRemain(shard, node, allocation);
assertThat(n.toString(), d.type(), equalTo(Decision.Type.YES));
}
}
public void testIndexRequires() {
ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"),
Settings.builder()
.put(DataTierAllocationDecider.INDEX_ROUTING_REQUIRE, "data_hot")
.build());
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state,
null, null, 0);
allocation.debugDecision(true);
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(HOT_NODE, DATA_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
}
for (DiscoveryNode n : Arrays.asList(WARM_NODE, COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match all index setting [index.routing.allocation.require._tier] tier filters [data_hot]"));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match all index setting [index.routing.allocation.require._tier] tier filters [data_hot]"));
}
}
public void testIndexIncludes() {
ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"),
Settings.builder()
.put(DataTierAllocationDecider.INDEX_ROUTING_INCLUDE, "data_warm,data_cold")
.build());
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state,
null, null, 0);
allocation.debugDecision(true);
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(WARM_NODE, DATA_NODE, COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
}
for (DiscoveryNode n : Arrays.asList(HOT_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match any index setting [index.routing.allocation.include._tier] " +
"tier filters [data_warm,data_cold]"));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match any index setting [index.routing.allocation.include._tier] " +
"tier filters [data_warm,data_cold]"));
}
}
public void testIndexExcludes() {
ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"),
Settings.builder()
.put(DataTierAllocationDecider.INDEX_ROUTING_EXCLUDE, "data_warm,data_cold")
.build());
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state,
null, null,0);
allocation.debugDecision(true);
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(WARM_NODE, DATA_NODE, COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node matches any index setting [index.routing.allocation.exclude._tier] " +
"tier filters [data_warm,data_cold]"));
d = decider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node matches any index setting [index.routing.allocation.exclude._tier] " +
"tier filters [data_warm,data_cold]"));
}
for (DiscoveryNode n : Arrays.asList(HOT_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(n.toString(), d.type(), equalTo(Decision.Type.YES));
d = decider.canRemain(shard, node, allocation);
assertThat(n.toString(), d.type(), equalTo(Decision.Type.YES));
}
}
public void testIndexPrefer() {
ClusterState state = ClusterState.builder(service.reroute(ClusterState.EMPTY_STATE, "initial state"))
.nodes(DiscoveryNodes.builder()
.add(HOT_NODE)
.build())
.metadata(Metadata.builder()
.put(IndexMetadata.builder("myindex")
.settings(Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_INDEX_UUID, "myindex")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(DataTierAllocationDecider.INDEX_ROUTING_PREFER, "data_warm,data_cold")
.build()))
.build())
.build();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0);
allocation.debugDecision(true);
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(HOT_NODE, WARM_NODE, COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold], " +
"but no nodes for any of those tiers are available in the cluster"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold], " +
"but no nodes for any of those tiers are available in the cluster"));
}
state = ClusterState.builder(service.reroute(ClusterState.EMPTY_STATE, "initial state"))
.nodes(DiscoveryNodes.builder()
.add(HOT_NODE)
.add(COLD_NODE)
.build())
.metadata(Metadata.builder()
.put(IndexMetadata.builder("myindex")
.settings(Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_INDEX_UUID, "myindex")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(DataTierAllocationDecider.INDEX_ROUTING_PREFER, "data_warm,data_cold")
.build()))
.build())
.build();
allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0);
allocation.debugDecision(true);
for (DiscoveryNode n : Arrays.asList(HOT_NODE, WARM_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] " +
"and node does not meet the required [data_cold] tier"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] " +
"and node does not meet the required [data_cold] tier"));
}
for (DiscoveryNode n : Arrays.asList(COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.YES));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] and node has tier [data_cold]"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.YES));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] and node has tier [data_cold]"));
}
}
public void testIndexPreferWithInclude() {
ClusterState state = ClusterState.builder(service.reroute(ClusterState.EMPTY_STATE, "initial state"))
.nodes(DiscoveryNodes.builder()
.add(WARM_NODE)
.add(COLD_NODE)
.build())
.metadata(Metadata.builder()
.put(IndexMetadata.builder("myindex")
.settings(Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_INDEX_UUID, "myindex")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(DataTierAllocationDecider.INDEX_ROUTING_INCLUDE, "data_cold")
.put(DataTierAllocationDecider.INDEX_ROUTING_PREFER, "data_warm,data_cold")
.build()))
.build())
.build();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0);
allocation.debugDecision(true);
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(HOT_NODE, WARM_NODE, CONTENT_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node does not match any index setting [index.routing.allocation.include._tier] tier filters [data_cold]"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node does not match any index setting [index.routing.allocation.include._tier] tier filters [data_cold]"));
}
for (DiscoveryNode n : Arrays.asList(COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] " +
"and node does not meet the required [data_warm] tier"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] " +
"and node does not meet the required [data_warm] tier"));
}
for (DiscoveryNode n : Arrays.asList(DATA_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.YES));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] and node has tier [data_warm]"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.YES));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] and node has tier [data_warm]"));
}
}
public void testIndexPreferWithExclude() {
ClusterState state = ClusterState.builder(service.reroute(ClusterState.EMPTY_STATE, "initial state"))
.nodes(DiscoveryNodes.builder()
.add(WARM_NODE)
.add(COLD_NODE)
.build())
.metadata(Metadata.builder()
.put(IndexMetadata.builder("myindex")
.settings(Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_INDEX_UUID, "myindex")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(DataTierAllocationDecider.INDEX_ROUTING_EXCLUDE, "data_warm")
.put(DataTierAllocationDecider.INDEX_ROUTING_PREFER, "data_warm,data_cold")
.build()))
.build())
.build();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0);
allocation.debugDecision(true);
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(HOT_NODE, COLD_NODE, CONTENT_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] " +
"and node does not meet the required [data_warm] tier"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] " +
"and node does not meet the required [data_warm] tier"));
}
for (DiscoveryNode n : Arrays.asList(WARM_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node matches any index setting [index.routing.allocation.exclude._tier] tier filters [data_warm]"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node matches any index setting [index.routing.allocation.exclude._tier] tier filters [data_warm]"));
}
for (DiscoveryNode n : Arrays.asList(DATA_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node matches any index setting [index.routing.allocation.exclude._tier] tier filters [data_warm]"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node matches any index setting [index.routing.allocation.exclude._tier] tier filters [data_warm]"));
}
}
public void testIndexPreferWithRequire() {
ClusterState state = ClusterState.builder(service.reroute(ClusterState.EMPTY_STATE, "initial state"))
.nodes(DiscoveryNodes.builder()
.add(WARM_NODE)
.add(COLD_NODE)
.build())
.metadata(Metadata.builder()
.put(IndexMetadata.builder("myindex")
.settings(Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_INDEX_UUID, "myindex")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(DataTierAllocationDecider.INDEX_ROUTING_REQUIRE, "data_cold")
.put(DataTierAllocationDecider.INDEX_ROUTING_PREFER, "data_warm,data_cold")
.build()))
.build())
.build();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0);
allocation.debugDecision(true);
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(HOT_NODE, WARM_NODE, CONTENT_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node does not match all index setting [index.routing.allocation.require._tier] tier filters [data_cold]"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node does not match all index setting [index.routing.allocation.require._tier] tier filters [data_cold]"));
}
for (DiscoveryNode n : Arrays.asList(COLD_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] " +
"and node does not meet the required [data_warm] tier"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] " +
"and node does not meet the required [data_warm] tier"));
}
for (DiscoveryNode n : Arrays.asList(DATA_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.YES));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] and node has tier [data_warm]"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.YES));
assertThat(node.toString(), d.getExplanation(),
containsString("index has a preference for tiers [data_warm,data_cold] and node has tier [data_warm]"));
}
}
public void testClusterAndIndex() {
ClusterState state = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"),
Settings.builder()
.put(DataTierAllocationDecider.INDEX_ROUTING_INCLUDE, "data_warm,data_cold")
.build());
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state,
null, null,0);
clusterSettings.applySettings(Settings.builder()
.put(DataTierAllocationDecider.CLUSTER_ROUTING_EXCLUDE, "data_cold")
.build());
allocation.debugDecision(true);
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(HOT_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node does not match any index setting [index.routing.allocation.include._tier] " +
"tier filters [data_warm,data_cold]"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(node.toString(), d.getExplanation(),
containsString("node does not match any index setting [index.routing.allocation.include._tier] " +
"tier filters [data_warm,data_cold]"));
}
for (DiscoveryNode n : Arrays.asList(DATA_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node matches any cluster setting [cluster.routing.allocation.exclude._tier] tier filters [data_cold]"));
d = decider.canRemain(shard, node, allocation);
assertThat(node.toString(), d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node matches any cluster setting [cluster.routing.allocation.exclude._tier] tier filters [data_cold]"));
}
for (DiscoveryNode n : Arrays.asList(WARM_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = decider.canAllocate(shard, node, allocation);
assertThat(n.toString(), d.type(), equalTo(Decision.Type.YES));
d = decider.canRemain(shard, node, allocation);
assertThat(n.toString(), d.type(), equalTo(Decision.Type.YES));
}
}
public void testTierNodesPresent() {
DiscoveryNodes nodes = DiscoveryNodes.builder().build();
assertFalse(DataTierAllocationDecider.tierNodesPresent("data", nodes));
assertFalse(DataTierAllocationDecider.tierNodesPresent("data_hot", nodes));
assertFalse(DataTierAllocationDecider.tierNodesPresent("data_warm", nodes));
assertFalse(DataTierAllocationDecider.tierNodesPresent("data_cold", nodes));
assertFalse(DataTierAllocationDecider.tierNodesPresent("data_content", nodes));
nodes = DiscoveryNodes.builder()
.add(WARM_NODE)
.add(CONTENT_NODE)
.build();
assertFalse(DataTierAllocationDecider.tierNodesPresent("data", nodes));
assertFalse(DataTierAllocationDecider.tierNodesPresent("data_hot", nodes));
assertTrue(DataTierAllocationDecider.tierNodesPresent("data_warm", nodes));
assertFalse(DataTierAllocationDecider.tierNodesPresent("data_cold", nodes));
assertTrue(DataTierAllocationDecider.tierNodesPresent("data_content", nodes));
nodes = DiscoveryNodes.builder()
.add(DATA_NODE)
.build();
assertTrue(DataTierAllocationDecider.tierNodesPresent("data", nodes));
assertTrue(DataTierAllocationDecider.tierNodesPresent("data_hot", nodes));
assertTrue(DataTierAllocationDecider.tierNodesPresent("data_warm", nodes));
assertTrue(DataTierAllocationDecider.tierNodesPresent("data_cold", nodes));
assertTrue(DataTierAllocationDecider.tierNodesPresent("data_content", nodes));
}
public void testPreferredTierAvailable() {
DiscoveryNodes nodes = DiscoveryNodes.builder().build();
assertThat(DataTierAllocationDecider.preferredAvailableTier("data", nodes), equalTo(Optional.empty()));
assertThat(DataTierAllocationDecider.preferredAvailableTier("data_hot,data_warm", nodes), equalTo(Optional.empty()));
assertThat(DataTierAllocationDecider.preferredAvailableTier("data_warm,data_content", nodes), equalTo(Optional.empty()));
assertThat(DataTierAllocationDecider.preferredAvailableTier("data_cold", nodes), equalTo(Optional.empty()));
nodes = DiscoveryNodes.builder()
.add(WARM_NODE)
.add(CONTENT_NODE)
.build();
assertThat(DataTierAllocationDecider.preferredAvailableTier("data", nodes), equalTo(Optional.empty()));
assertThat(DataTierAllocationDecider.preferredAvailableTier("data_hot,data_warm", nodes), equalTo(Optional.of("data_warm")));
assertThat(DataTierAllocationDecider.preferredAvailableTier("data_warm,data_content", nodes), equalTo(Optional.of("data_warm")));
assertThat(DataTierAllocationDecider.preferredAvailableTier("data_content,data_warm", nodes), equalTo(Optional.of("data_content")));
assertThat(DataTierAllocationDecider.preferredAvailableTier("data_hot,data_content,data_warm", nodes),
equalTo(Optional.of("data_content")));
assertThat(DataTierAllocationDecider.preferredAvailableTier("data_hot,data_cold,data_warm", nodes),
equalTo(Optional.of("data_warm")));
}
public void testExistedClusterFilters() {
Settings existedSettings = Settings.builder()
.put("cluster.routing.allocation.include._tier", "data_hot,data_warm")
.put("cluster.routing.allocation.exclude._tier", "data_cold")
.build();
ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ALL_SETTINGS);
DataTierAllocationDecider dataTierAllocationDecider = new DataTierAllocationDecider(existedSettings, clusterSettings);
AllocationDeciders allocationDeciders = new AllocationDeciders(
List.of(dataTierAllocationDecider));
AllocationService service = new AllocationService(allocationDeciders,
new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE,
EmptySnapshotsInfoService.INSTANCE);
ClusterState clusterState = prepareState(service.reroute(ClusterState.EMPTY_STATE, "initial state"));
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, clusterState.getRoutingNodes(), clusterState,
null, null, 0);
allocation.debugDecision(true);
Decision d;
RoutingNode node;
for (DiscoveryNode n : Arrays.asList(HOT_NODE, WARM_NODE)) {
node = new RoutingNode(n.getId(), n, shard);
d = dataTierAllocationDecider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
d = dataTierAllocationDecider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.YES));
}
node = new RoutingNode(DATA_NODE.getId(), DATA_NODE, shard);
d = dataTierAllocationDecider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node matches any cluster setting [cluster.routing.allocation.exclude._tier] " +
"tier filters [data_cold]"));
d = dataTierAllocationDecider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node matches any cluster setting [cluster.routing.allocation.exclude._tier] " +
"tier filters [data_cold]"));
node = new RoutingNode(COLD_NODE.getId(), COLD_NODE, shard);
d = dataTierAllocationDecider.canAllocate(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match any cluster setting [cluster.routing.allocation.include._tier] " +
"tier filters [data_hot,data_warm]"));
d = dataTierAllocationDecider.canRemain(shard, node, allocation);
assertThat(d.type(), equalTo(Decision.Type.NO));
assertThat(d.getExplanation(),
containsString("node does not match any cluster setting [cluster.routing.allocation.include._tier] " +
"tier filters [data_hot,data_warm]"));
}
private ClusterState prepareState(ClusterState initialState) {
return prepareState(initialState, Settings.EMPTY);
}
private ClusterState prepareState(ClusterState initialState, Settings indexSettings) {
return ClusterState.builder(initialState)
.nodes(DiscoveryNodes.builder()
.add(HOT_NODE)
.add(WARM_NODE)
.add(COLD_NODE)
.add(DATA_NODE)
.build())
.metadata(Metadata.builder()
.put(IndexMetadata.builder("myindex")
.settings(Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_INDEX_UUID, "myindex")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(indexSettings)
.build()))
.build())
.build();
}
}
| apache-2.0 |
google/gvisor | test/syscalls/linux/sigreturn_amd64.cc | 4612 | // Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <signal.h>
#include <sys/types.h>
#include <sys/ucontext.h>
#include <unistd.h>
#include "gtest/gtest.h"
#include "test/util/logging.h"
#include "test/util/signal_util.h"
#include "test/util/test_util.h"
#include "test/util/timer_util.h"
namespace gvisor {
namespace testing {
namespace {
constexpr uint64_t kOrigRcx = 0xdeadbeeffacefeed;
constexpr uint64_t kOrigR11 = 0xfacefeedbaad1dea;
volatile int gotvtalrm, ready;
void sigvtalrm(int sig, siginfo_t* siginfo, void* _uc) {
ucontext_t* uc = reinterpret_cast<ucontext_t*>(_uc);
// Verify that:
// - test is in the busy-wait loop waiting for signal.
// - %rcx and %r11 values in mcontext_t match kOrigRcx and kOrigR11.
if (ready &&
static_cast<uint64_t>(uc->uc_mcontext.gregs[REG_RCX]) == kOrigRcx &&
static_cast<uint64_t>(uc->uc_mcontext.gregs[REG_R11]) == kOrigR11) {
// Modify the values %rcx and %r11 in the ucontext. These are the
// values seen by the application after the signal handler returns.
uc->uc_mcontext.gregs[REG_RCX] = ~kOrigRcx;
uc->uc_mcontext.gregs[REG_R11] = ~kOrigR11;
gotvtalrm = 1;
}
}
TEST(SigIretTest, CheckRcxR11) {
// Setup signal handler for SIGVTALRM.
struct sigaction sa = {};
sigfillset(&sa.sa_mask);
sa.sa_sigaction = sigvtalrm;
sa.sa_flags = SA_SIGINFO;
auto const action_cleanup =
ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGVTALRM, sa));
auto const mask_cleanup =
ASSERT_NO_ERRNO_AND_VALUE(ScopedSignalMask(SIG_UNBLOCK, SIGVTALRM));
// Setup itimer to fire after 500 msecs.
struct itimerval itimer = {};
itimer.it_value.tv_usec = 500 * 1000; // 500 msecs.
auto const timer_cleanup =
ASSERT_NO_ERRNO_AND_VALUE(ScopedItimer(ITIMER_VIRTUAL, itimer));
// Initialize %rcx and %r11 and spin until the signal handler returns.
uint64_t rcx = kOrigRcx;
uint64_t r11 = kOrigR11;
asm volatile(
"movq %[rcx], %%rcx;" // %rcx = rcx
"movq %[r11], %%r11;" // %r11 = r11
"movl $1, %[ready];" // ready = 1
"1: pause; cmpl $0, %[gotvtalrm]; je 1b;" // while (!gotvtalrm);
"movq %%rcx, %[rcx];" // rcx = %rcx
"movq %%r11, %[r11];" // r11 = %r11
: [ ready ] "=m"(ready), [ rcx ] "+m"(rcx), [ r11 ] "+m"(r11)
: [ gotvtalrm ] "m"(gotvtalrm)
: "cc", "memory", "rcx", "r11");
// If sigreturn(2) returns via 'sysret' then %rcx and %r11 will be
// clobbered and set to 'ptregs->rip' and 'ptregs->rflags' respectively.
//
// The following check verifies that %rcx and %r11 were not clobbered
// when returning from the signal handler (via sigreturn(2)).
EXPECT_EQ(rcx, ~kOrigRcx);
EXPECT_EQ(r11, ~kOrigR11);
}
constexpr uint64_t kNonCanonicalRip = 0xCCCC000000000000;
// Test that a non-canonical signal handler faults as expected.
TEST(SigIretTest, BadHandler) {
struct sigaction sa = {};
sa.sa_sigaction =
reinterpret_cast<void (*)(int, siginfo_t*, void*)>(kNonCanonicalRip);
auto const cleanup = ASSERT_NO_ERRNO_AND_VALUE(ScopedSigaction(SIGUSR1, sa));
pid_t pid = fork();
if (pid == 0) {
// Child, wait for signal.
while (1) {
pause();
}
}
ASSERT_THAT(pid, SyscallSucceeds());
EXPECT_THAT(kill(pid, SIGUSR1), SyscallSucceeds());
int status;
EXPECT_THAT(waitpid(pid, &status, 0), SyscallSucceedsWithValue(pid));
EXPECT_TRUE(WIFSIGNALED(status) && WTERMSIG(status) == SIGSEGV)
<< "status = " << status;
}
} // namespace
} // namespace testing
} // namespace gvisor
int main(int argc, char** argv) {
// SigIretTest.CheckRcxR11 depends on delivering SIGVTALRM to the main thread.
// Block SIGVTALRM so that any other threads created by TestInit will also
// have SIGVTALRM blocked.
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGVTALRM);
TEST_PCHECK(sigprocmask(SIG_BLOCK, &set, nullptr) == 0);
gvisor::testing::TestInit(&argc, &argv);
return gvisor::testing::RunAllTests();
}
| apache-2.0 |
lrytz/scala | src/compiler/scala/tools/nsc/transform/TailCalls.scala | 20888 | /*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package tools.nsc
package transform
import symtab.Flags
import Flags.SYNTHETIC
import scala.annotation.tailrec
/** Perform tail recursive call elimination.
*
* @author Iulian Dragos
*/
abstract class TailCalls extends Transform {
import global._ // the global environment
import definitions._ // standard classes and methods
import typer.typedPos // methods to type trees
val phaseName: String = "tailcalls"
override def enabled = settings.debuginfo.value != "notailcalls"
def newTransformer(unit: CompilationUnit): AstTransformer =
new TailCallElimination(unit)
import treeInfo.hasSynthCaseSymbol
/**
* A Tail Call Transformer
*
* @author Erik Stenman, Iulian Dragos
*
* What it does:
* <p>
* Finds method calls in tail-position and replaces them with jumps.
* A call is in a tail-position if it is the last instruction to be
* executed in the body of a method. This is done by recursing over
* the trees that may contain calls in tail-position (trees that can't
* contain such calls are not transformed). However, they are not that
* many.
* </p>
* <p>
* Self-recursive calls in tail-position are replaced by jumps to a
* label at the beginning of the method. As the JVM provides no way to
* jump from a method to another one, non-recursive calls in
* tail-position are not optimized.
* </p>
* <p>
* A method call is self-recursive if it calls the current method and
* the method is final (otherwise, it could
* be a call to an overridden method in a subclass). Furthermore, if
* the method has `@specialized` annotated type parameters, the recursive
* call must contain these parameters as type arguments.
* Recursive calls on a different instance are optimized.
* Since 'this' is not a local variable, a dummy local val
* is added and used as a label parameter. The backend knows to load
* the corresponding argument in the 'this' (local at index 0). This dummy local
* is never used and should be cleaned up by dead code elimination (when enabled).
* </p>
* <p>
* This phase has been moved before pattern matching to catch more
* of the common cases of tail recursive functions. This means that
* more cases should be taken into account (like nested function, and
* pattern cases).
* </p>
* <p>
* If a method contains self-recursive calls, a label is added to at
* the beginning of its body and the calls are replaced by jumps to
* that label.
* </p>
* <p>
* Assumes: `Uncurry` has been run already, and no multiple
* parameter lists exist.
* </p>
*/
class TailCallElimination(unit: CompilationUnit) extends AstTransformer {
private def defaultReason = "it contains a recursive call not in tail position"
private val failPositions = perRunCaches.newMap[TailContext, Position]() withDefault (_.methodPos)
private val failReasons = perRunCaches.newMap[TailContext, String]() withDefaultValue defaultReason
private def tailrecFailure(ctx: TailContext): Unit = {
val method = ctx.method
val failReason = failReasons(ctx)
val failPos = failPositions(ctx)
reporter.error(failPos, s"could not optimize @tailrec annotated $method: $failReason")
}
/** Has the label been accessed? Then its symbol is in this set. */
private val accessed = perRunCaches.newSet[Symbol]()
// `accessed` was stored as boolean in the current context -- this is no longer tenable
// with jumps to labels in tailpositions now considered in tailposition,
// a downstream context may access the label, and the upstream one will be none the wiser
// this is necessary because tail-calls may occur in places where syntactically they seem impossible
// (since we now consider jumps to labels that are in tailposition, such as matchEnd(x) {x})
sealed trait TailContext {
def method: Symbol // current method
def tparams: List[Symbol] // type parameters
def methodPos: Position // default position for failure reporting
def tailPos: Boolean // context is in tail position
def label: Symbol // new label, tail call target
def tailLabels: Set[Symbol]
def enclosingType = method.enclClass.typeOfThis
def isEligible = method.isEffectivelyFinalOrNotOverridden
def isMandatory = method.hasAnnotation(TailrecClass)
def isTransformed = isEligible && accessed(label)
def newThis(pos: Position) = {
def msg = "Creating new `this` during tailcalls\n method: %s\n current class: %s".format(
method.ownerChain.mkString(" -> "),
currentClass.ownerChain.mkString(" -> ")
)
logResult(msg)(method.newValue(nme.THIS, pos, SYNTHETIC) setInfo currentClass.typeOfThis)
}
override def toString = s"${method.name} tparams=$tparams tailPos=$tailPos label=$label label info=${label.info}"
final def noTailContext() = clonedTailContext(false)
final def yesTailContext() = clonedTailContext(true)
@tailrec
protected final def clonedTailContext(tailPos: Boolean): TailContext = this match {
case _ if this.tailPos == tailPos => this
case clone: ClonedTailContext => clone.that.clonedTailContext(tailPos)
case _ => new ClonedTailContext(this, tailPos)
}
}
object EmptyTailContext extends TailContext {
def method = NoSymbol
def tparams = Nil
def methodPos = NoPosition
def tailPos = false
def label = NoSymbol
def tailLabels = Set.empty[Symbol]
}
class DefDefTailContext(dd: DefDef) extends TailContext {
def method = dd.symbol
def tparams = dd.tparams map (_.symbol)
def methodPos = dd.pos
def tailPos = true
lazy val label = mkLabel()
lazy val tailLabels = {
// labels are local to a method, so only traverse the rhs of a defdef
val collector = new TailPosLabelsTraverser
collector traverse dd.rhs
collector.tailLabels.toSet
}
private def mkLabel() = {
val label = method.newLabel(newTermName("_" + method.name), method.pos)
val thisParam = method.newSyntheticValueParam(currentClass.typeOfThis)
label setInfo MethodType(thisParam :: method.tpe.params, method.tpe_*.finalResultType)
if (isEligible)
label.substInfo(method.tpe.typeParams, tparams)
label
}
private def isRecursiveCall(t: Tree) = {
val receiver = t.symbol
( (receiver != null)
&& receiver.isMethod
&& (method.name == receiver.name)
&& (method.enclClass isSubClass receiver.enclClass)
)
}
def containsRecursiveCall(t: Tree) = t exists isRecursiveCall
}
class ClonedTailContext(val that: TailContext, override val tailPos: Boolean) extends TailContext {
def method = that.method
def tparams = that.tparams
def methodPos = that.methodPos
def tailLabels = that.tailLabels
def label = that.label
}
private var ctx: TailContext = EmptyTailContext
override def transformUnit(unit: CompilationUnit): Unit = {
try {
super.transformUnit(unit)
} finally {
// OPT clear these after each compilation unit
failPositions.clear()
failReasons.clear()
accessed.clear()
}
}
/** Rewrite this tree to contain no tail recursive calls */
def transform(tree: Tree, nctx: TailContext): Tree = {
val saved = ctx
ctx = nctx
try transform(tree)
finally this.ctx = saved
}
def yesTailTransform(tree: Tree): Tree = transform(tree, ctx.yesTailContext())
def noTailTransform(tree: Tree): Tree = transform(tree, ctx.noTailContext())
def noTailTransforms(trees: List[Tree]) = {
val nctx = ctx.noTailContext()
trees mapConserve (t => transform(t, nctx))
}
override def transform(tree: Tree): Tree = {
/* A possibly polymorphic apply to be considered for tail call transformation. */
def rewriteApply(target: Tree, fun: Tree, targs: List[Tree], args: List[Tree], mustTransformArgs: Boolean = true) = {
val receiver: Tree = fun match {
case Select(qual, _) => qual
case _ => EmptyTree
}
def receiverIsSame = ctx.enclosingType.widen =:= receiver.tpe.widen
def receiverIsSuper = ctx.enclosingType.widen <:< receiver.tpe.widen
def isRecursiveCall = (ctx.method eq fun.symbol) && ctx.tailPos
def transformArgs = if (mustTransformArgs) noTailTransforms(args) else args
def matchesTypeArgs = (ctx.tparams corresponds targs)((p, a) => !isSpecialized(p) || p == a.tpe.typeSymbol)
def isSpecialized(tparam: Symbol) =
tparam.hasAnnotation(SpecializedClass)
/* Records failure reason in Context for reporting.
* Position is unchanged (by default, the method definition.)
*/
def fail(reason: String) = {
debuglog("Cannot rewrite recursive call at: " + fun.pos + " because: " + reason)
if (ctx.isMandatory) failReasons(ctx) = reason
treeCopy.Apply(tree, noTailTransform(target), transformArgs)
}
/* Position of failure is that of the tree being considered. */
def failHere(reason: String) = {
if (ctx.isMandatory) failPositions(ctx) = fun.pos
fail(reason)
}
def rewriteTailCall(recv: Tree): Tree = {
debuglog("Rewriting tail recursive call: " + fun.pos.lineContent.trim)
accessed += ctx.label
typedPos(fun.pos) {
val args = mapWithIndex(transformArgs)((arg, i) => mkAttributedCastHack(arg, ctx.label.info.params(i + 1).tpe))
Apply(Ident(ctx.label), noTailTransform(recv) :: args)
}
}
if (!ctx.isEligible) fail("it is neither private nor final so can be overridden")
else if (!isRecursiveCall) {
if (ctx.isMandatory && receiverIsSuper) // OPT expensive check, avoid unless we will actually report the error
failHere("it contains a recursive call targeting a supertype")
else failHere(defaultReason)
}
else if (!matchesTypeArgs) failHere("it is called recursively with different specialized type arguments")
else if (receiver == EmptyTree) rewriteTailCall(This(currentClass))
else if (!receiverIsSame) failHere("it changes type of 'this' on a polymorphic recursive call")
else rewriteTailCall(receiver)
}
def isEligible(tree: DefDef) = {
val sym = tree.symbol
!(sym.hasAccessorFlag || sym.isConstructor)
}
// intentionally shadowing imports from definitions for performance
val runDefinitions = currentRun.runDefinitions
import runDefinitions.{Boolean_or, Boolean_and}
tree match {
case dd: DefDef if tree.symbol.isLazy && tree.symbol.hasAnnotation(TailrecClass) =>
reporter.error(tree.pos, "lazy vals are not tailcall transformed")
tree.transform(this)
case dd @ DefDef(_, name, _, vparamss0, _, rhs0) if isEligible(dd) =>
val newCtx = new DefDefTailContext(dd)
if (newCtx.isMandatory && !(newCtx containsRecursiveCall rhs0))
reporter.error(tree.pos, "@tailrec annotated method contains no recursive calls")
debuglog(s"Considering $name for tailcalls, with labels in tailpos: ${newCtx.tailLabels}")
val newRHS = transform(rhs0, newCtx)
deriveDefDef(tree) { rhs =>
if (newCtx.isTransformed) {
/* We have rewritten the tree, but there may be nested recursive calls remaining.
* If @tailrec is given we need to fail those now.
*/
if (newCtx.isMandatory) {
for (t @ Apply(fn, _) <- newRHS ; if fn.symbol == newCtx.method) {
failPositions(newCtx) = t.pos
tailrecFailure(newCtx)
}
}
val newThis = newCtx.newThis(tree.pos)
val vpSyms = vparamss0.flatten map (_.symbol)
typedPos(tree.pos)(Block(
List(ValDef(newThis, This(currentClass))),
LabelDef(newCtx.label, newThis :: vpSyms, mkAttributedCastHack(newRHS, newCtx.label.tpe.resultType))
))
}
else {
if (newCtx.isMandatory && (newCtx containsRecursiveCall newRHS))
tailrecFailure(newCtx)
newRHS
}
}
// a translated match
case Block(stats, expr) if stats forall hasSynthCaseSymbol =>
// the assumption is once we encounter a case, the remainder of the block will consist of cases
// the prologue may be empty, usually it is the valdef that stores the scrut
val (prologue, cases) = stats span (s => !s.isInstanceOf[LabelDef])
val transformedPrologue = noTailTransforms(prologue)
val transformedCases = transformTrees(cases)
val transformedStats =
if ((prologue eq transformedPrologue) && (cases eq transformedCases)) stats // allow reuse of `tree` if the subtransform was an identity
else transformedPrologue ++ transformedCases
treeCopy.Block(tree,
transformedStats,
transform(expr)
)
// a translated casedef
case LabelDef(_, _, body) if hasSynthCaseSymbol(tree) =>
deriveLabelDef(tree)(transform)
case Block(stats, expr) =>
treeCopy.Block(tree,
noTailTransforms(stats),
transform(expr)
)
case CaseDef(pat, guard, body) =>
// CaseDefs are already translated and guards were moved into the body.
// If this was not the case, guards would have to be transformed here as well.
assert(guard.isEmpty, "empty guard")
deriveCaseDef(tree)(transform)
case If(cond, thenp, elsep) =>
treeCopy.If(tree,
noTailTransform(cond),
transform(thenp),
transform(elsep)
)
case Match(selector, cases) =>
treeCopy.Match(tree,
noTailTransform(selector),
transformTrees(cases).asInstanceOf[List[CaseDef]]
)
case Try(block, catches, finalizer @ EmptyTree) =>
// scala/bug#1672 Catches are in tail position when there is no finalizer
treeCopy.Try(tree,
noTailTransform(block),
transformTrees(catches).asInstanceOf[List[CaseDef]],
EmptyTree
)
case Try(block, catches, finalizer) =>
// no calls inside a try are in tail position if there is a finalizer, but keep recursing for nested functions
treeCopy.Try(tree,
noTailTransform(block),
noTailTransforms(catches).asInstanceOf[List[CaseDef]],
noTailTransform(finalizer)
)
case Apply(tapply @ TypeApply(fun, targs), vargs) =>
rewriteApply(tapply, fun, targs, vargs)
case Apply(fun, args) if fun.symbol == Boolean_or || fun.symbol == Boolean_and =>
treeCopy.Apply(tree, noTailTransform(fun), transformTrees(args))
// this is to detect tailcalls in translated matches
// it's a one-argument call to a label that is in a tailposition and that looks like label(x) {x}
// thus, the argument to the call is in tailposition
case Apply(fun, args @ (arg :: Nil)) if fun.symbol.isLabel && ctx.tailLabels(fun.symbol) =>
debuglog(s"in tailpos label: $arg")
val res = yesTailTransform(arg)
// we tail-called -- TODO: shield from false-positives where we rewrite but don't tail-call
// must leave the jump to the original tailpos-label (fun)!
// there might be *a* tailcall *in* res, but it doesn't mean res *always* tailcalls
if (res ne arg)
treeCopy.Apply(tree, fun, res :: Nil)
else
rewriteApply(fun, fun, Nil, args, mustTransformArgs = false)
case Apply(fun, args) =>
rewriteApply(fun, fun, Nil, args)
case Alternative(_) | Star(_) | Bind(_, _) =>
assert(false, "We should've never gotten inside a pattern")
tree
case Select(qual, name) =>
treeCopy.Select(tree, noTailTransform(qual), name)
case EmptyTree | Super(_, _) | This(_) | Ident(_) | Literal(_) | Function(_, _) | TypeTree() =>
tree
case _ =>
tree.transform(this)
}
}
// Workaround for scala/bug#6900. Uncurry installs an InfoTransformer and a tree Transformer.
// These leave us with conflicting view on method signatures; the parameter symbols in
// the MethodType can be clones of the ones originally found on the parameter ValDef, and
// consequently appearing in the typechecked RHS of the method.
private def mkAttributedCastHack(tree: Tree, tpe: Type) =
gen.mkAttributedCast(tree, tpe)
}
// collect the LabelDefs (generated by the pattern matcher) in a DefDef that are in tail position
// the labels all look like: matchEnd(x) {x}
// then, in a forward jump `matchEnd(expr)`, `expr` is considered in tail position (and the matchEnd jump is replaced by the jump generated by expr)
class TailPosLabelsTraverser extends Traverser {
val tailLabels = new scala.collection.mutable.HashSet[Symbol]()
private var maybeTail: Boolean = true // since we start in the rhs of a DefDef
def traverse(tree: Tree, maybeTailNew: Boolean): Unit = {
val saved = maybeTail
maybeTail = maybeTailNew
try traverse(tree)
finally maybeTail = saved
}
def traverseNoTail(tree: Tree) = traverse(tree, maybeTailNew = false)
def traverseTreesNoTail(trees: List[Tree]) = trees foreach traverseNoTail
// intentionally shadowing imports from definitions for performance
private val runDefinitions = currentRun.runDefinitions
import runDefinitions.{Boolean_or, Boolean_and}
override def traverse(tree: Tree) = tree match {
// we're looking for label(x){x} in tail position, since that means `a` is in tail position in a call `label(a)`
case LabelDef(_, List(arg), body@Ident(_)) if arg.symbol == body.symbol =>
if (maybeTail) tailLabels += tree.symbol
// jumps to matchEnd are transparent; need this case for nested matches
// (and the translated match case below does things in reverse for this case's sake)
case Apply(fun, arg :: Nil) if hasSynthCaseSymbol(fun) && tailLabels(fun.symbol) =>
traverse(arg)
case Apply(fun, args) if (fun.symbol == Boolean_or || fun.symbol == Boolean_and) =>
traverseTrees(args)
// a translated casedef
case LabelDef(_, _, body) if hasSynthCaseSymbol(tree) =>
traverse(body)
// a translated match
case Block(stats, expr) if stats forall hasSynthCaseSymbol =>
// the assumption is once we encounter a case, the remainder of the block will consist of cases
// the prologue may be empty, usually it is the valdef that stores the scrut
val (prologue, cases) = stats span (s => !s.isInstanceOf[LabelDef])
traverse(expr)
traverseTrees(cases.reverse) // reverse so that we enter the matchEnd LabelDef before we see jumps to it
traverseTreesNoTail(prologue) // selector (may be absent)
case CaseDef(pat, guard, body) =>
traverse(body)
case Match(selector, cases) =>
traverseNoTail(selector)
traverseTrees(cases)
case dd @ DefDef(_, _, _, _, _, _) => // we are run per-method
case Block(stats, expr) =>
traverseTreesNoTail(stats)
traverse(expr)
case If(cond, thenp, elsep) =>
traverse(thenp)
traverse(elsep)
case Try(block, catches, finalizer) =>
traverseNoTail(block)
traverseTreesNoTail(catches)
traverseNoTail(finalizer)
case Apply(_, _) | EmptyTree | Super(_, _) | This(_) | Select(_, _) | Ident(_) | Literal(_) | Function(_, _) | TypeTree() =>
case _ => super.traverse(tree)
}
}
}
| apache-2.0 |
onesocialweb/osw-web | src/org/onesocialweb/gwt/client/ui/widget/compose/TextareaUpdate.java | 1658 | /*
* Copyright 2010 Vodafone Group Services Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.onesocialweb.gwt.client.ui.widget.compose;
import com.google.gwt.event.dom.client.KeyPressEvent;
import com.google.gwt.event.dom.client.KeyPressHandler;
import com.google.gwt.user.client.ui.FlowPanel;
import com.google.gwt.user.client.ui.TextArea;
public class TextareaUpdate extends FlowPanel {
private TextArea update = new TextArea();
// private SuggestBox suggest = new SuggestBox();
public TextareaUpdate() {
// compose widget
add(update);
// add(suggest);
// suggest = new
// SuggestBox(RecipientOracle.getInstance().getSuggestions());
// style
update.addStyleName("textareaUpdate");
// handlers
// Listen for keyboard events in the status update
update.addKeyPressHandler(new KeyPressHandler() {
public void onKeyPress(KeyPressEvent event) {
char keyCode = event.getCharCode();
char at = '@';
if (keyCode == at) {
}
}
});
}
public void setText(String text) {
update.setText(text);
}
public String getText() {
return update.getText();
}
}
| apache-2.0 |
concordion/concordion-net | Concordion.Spec/Support/EventRecorder.cs | 1177 | using System;
using System.Collections.Generic;
using System.Linq;
using Concordion.Api.Listener;
namespace Concordion.Spec.Support
{
public class EventRecorder : IAssertEqualsListener, IExceptionCaughtListener
{
private readonly List<Object> m_Events;
public EventRecorder()
{
this.m_Events = new List<Object>();
}
public Object GetLast(Type eventType)
{
Object lastMatch = null;
foreach (var anEvent in this.m_Events.Where(eventType.IsInstanceOfType))
{
lastMatch = anEvent;
}
return lastMatch;
}
public void ExceptionCaught(ExceptionCaughtEvent caughtEvent)
{
this.m_Events.Add(caughtEvent);
}
#region IAssertEqualsListener Members
public void SuccessReported(AssertSuccessEvent successEvent)
{
this.m_Events.Add(successEvent);
}
public void FailureReported(AssertFailureEvent failureEvent)
{
this.m_Events.Add(failureEvent);
}
#endregion
}
}
| apache-2.0 |
dbs-leipzig/gradoop | gradoop-flink/src/test/java/org/gradoop/flink/model/impl/operators/matching/transactional/TransactionalPatternMatchingTest.java | 7188 | /*
* Copyright © 2014 - 2021 Leipzig University (Database Research Group)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradoop.flink.model.impl.operators.matching.transactional;
import com.google.common.collect.Lists;
import org.apache.flink.api.java.io.LocalCollectionOutputFormat;
import org.gradoop.common.model.impl.id.GradoopId;
import org.gradoop.common.model.impl.pojo.EPGMGraphHead;
import org.gradoop.flink.model.GradoopFlinkTestBase;
import org.gradoop.flink.model.impl.epgm.GraphCollection;
import org.gradoop.flink.model.impl.operators.matching.TestData;
import org.gradoop.flink.model.impl.operators.matching.transactional.algorithm.DepthSearchMatching;
import org.gradoop.flink.util.FlinkAsciiGraphLoader;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import static org.junit.Assert.assertEquals;
public class TransactionalPatternMatchingTest extends GradoopFlinkTestBase {
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@Test
public void test() throws Exception {
FlinkAsciiGraphLoader loader = getLoaderFromString(
"g1:A[" +
"(v0:B {id : 0})" +
"(v1:A {id : 1})" +
"(v2:A {id : 2})" +
"(v3:C {id : 3})" +
"(v4:B {id : 4})" +
"(v5:A {id : 5})" +
"(v6:B {id : 6})" +
"(v7:C {id : 7})" +
"(v8:B {id : 8})" +
"(v9:C {id : 9})" +
"(v10:D {id : 10})" +
"(v0)-[e0:a {id : 0}]->(v1)" +
"(v0)-[e1:a {id : 1}]->(v3)" +
"(v1)-[e2:a {id : 2}]->(v6)" +
"(v2)-[e3:a {id : 3}]->(v6)" +
"(v4)-[e4:a {id : 4}]->(v1)" +
"(v4)-[e5:b {id : 5}]->(v3)" +
"(v5)-[e6:a {id : 6}]->(v4)" +
"(v6)-[e7:a {id : 7}]->(v2)" +
"(v6)-[e8:a {id : 8}]->(v5)" +
"(v6)-[e9:b {id : 9}]->(v7)" +
"(v8)-[e10:a {id : 10}]->(v5)" +
"(v5)-[e11:a {id : 11}]->(v9)" +
"(v9)-[e12:c {id : 12}]->(v10)" +
"]" +
"g2:B[" +
"(v11:B {id : 0})" +
"(v12:A {id : 1, abc : 5})" +
"(v13:A {id : 2})" +
"(v14:A {id : 3})" +
"(v15:C {id : 4})" +
"(v16:B {id : 5})" +
"(v17:B {id : 6})" +
"(v18:C {id : 7})" +
"(v19:B {id : 8})" +
"(v20:B {id : 9})" +
"(v21:A {id : 10})" +
"(v22:C {id : 11})" +
"(v23:D {id : 12})" +
"(v12)-[e13:a {id : 0}]->(v11)" +
"(v11)-[e14:b {id : 1}]->(v15)" +
"(v11)-[e15:a {id : 2}]->(v15)" +
"(v11)-[e16:a {id : 3}]->(v14)" +
"(v14)-[e17:a {id : 4}]->(v16)" +
"(v16)-[e18:a {id : 5}]->(v12)" +
"(v12)-[e19:a {id : 6}]->(v17)" +
"(v17)-[e20:a {id : 7}]->(v13)" +
"(v13)-[e21:a {id : 8}]->(v17)" +
"(v16)-[e22:a {id : 9}]->(v15)" +
"(v16)-[e23:b {id : 10}]->(v15)" +
"(v17)-[e24:b {id : 11}]->(v18)" +
"(v19)-[e25:a {id : 12}]->(v18)" +
"(v21)-[e26:a {id : 13}]->(v16)" +
"(v17)-[e27:a {id : 14}]->(v21)" +
"(v20)-[e28:d {id : 15}]->(v20)" +
"(v20)-[e29:a {id : 16}]->(v21)" +
"(v21)-[e30:d {id : 17}]->(v22)" +
"(v22)-[e31:a {id : 18}]->(v23)" +
"]" +
"g3:C[" +
"(v24:A {id : 0})-[e32:a {id : 0}]->(v25:A {id : 1})" +
"(v25)-[e33:a {id : 1}]->(v26:A {id : 2})" +
"(v26)-[e34:a {id : 2}]->(v27:A {id : 3})" +
"]" +
"g4:D[" +
"(v28:A {id : 0})-[e35:a {id : 0}]->(v29:A {id : 1})" +
"(v29)-[e36:a {id : 1}]->(v30:A {id : 2})-[e37:a {id : 3}]->(v31:A {id : 3})" +
"(v29)-[e38:a {id : 2}]->(v30)" +
"]" +
"g5:E[(v32 {id : 0})-[e39 {id : 0}]->(v33 {id : 1})]");
GraphCollection coll = loader.getGraphCollectionByVariables("g1", "g2", "g3", "g4", "g5");
for (int i = 0; i < tests.length; i++) {
String testPattern = tests[i];
GraphCollection result = coll.query(testPattern, new DepthSearchMatching(), true);
Collection<EPGMGraphHead> originalHeads = Lists.newArrayList();
Collection<EPGMGraphHead> resultHeads = Lists.newArrayList();
coll.getGraphHeads().output(new LocalCollectionOutputFormat<>(
originalHeads));
result.getGraphHeads().output(new LocalCollectionOutputFormat<>(
resultHeads
));
getExecutionEnvironment().execute();
Map<GradoopId, String> lineageIdMap = new HashMap<>();
for (EPGMGraphHead original : originalHeads) {
lineageIdMap.put(original.getId(), original.getLabel());
}
int aCount = 0;
int bCount = 0;
int cCount = 0;
int dCount = 0;
int eCount = 0;
for (EPGMGraphHead head : resultHeads) {
GradoopId id = head.getPropertyValue("lineage").getGradoopId();
if (lineageIdMap.get(id).equals("A")) {
aCount++;
}
if (lineageIdMap.get(id).equals("B")) {
bCount++;
}
if (lineageIdMap.get(id).equals("C")) {
cCount++;
}
if (lineageIdMap.get(id).equals("D")) {
dCount++;
}
if (lineageIdMap.get(id).equals("E")) {
eCount++;
}
}
assertEquals(resultCounts[i][0], aCount);
assertEquals(resultCounts[i][1], bCount);
assertEquals(resultCounts[i][2], cCount);
assertEquals(resultCounts[i][3], dCount);
assertEquals(resultCounts[i][4], eCount);
}
}
private String[] tests = {
TestData.CHAIN_PATTERN_0,
TestData.CHAIN_PATTERN_1,
TestData.CHAIN_PATTERN_2,
TestData.CHAIN_PATTERN_3,
TestData.CHAIN_PATTERN_4,
TestData.CHAIN_PATTERN_5,
TestData.CHAIN_PATTERN_6,
TestData.CYCLE_PATTERN_0,
TestData.CYCLE_PATTERN_1,
TestData.CYCLE_PATTERN_2,
TestData.CYCLE_PATTERN_3,
TestData.CYCLE_PATTERN_4,
TestData.CYCLE_PATTERN_5,
TestData.LOOP_PATTERN_0,
TestData.UNLABELED_PATTERN_0,
TestData.UNLABELED_PATTERN_1,
TestData.UNLABELED_PATTERN_2,
TestData.UNLABELED_PATTERN_3,
TestData.PROPERTY_PATTERN_1,
TestData.PROPERTY_PATTERN_2,
TestData.PROPERTY_PATTERN_3
};
private long[][] resultCounts = {
{3, 5, 0, 0, 0},
{0, 0, 3, 4, 0},
{3, 4, 4, 4, 0},
{0, 0, 0, 0, 0},
{0, 0, 2, 4, 0},
{0, 0, 0, 0, 0},
{12, 18, 0, 0, 0},
{1, 1, 0, 0, 0},
{0, 0, 0, 0, 0},
{1, 1, 0, 0, 0},
{0, 1, 0, 0, 0},
{2, 4, 0, 0, 0},
{0, 2, 0, 0, 0},
{0, 1, 0, 0, 0},
{11, 13, 4, 4, 2},
{13, 18, 3, 4, 1},
{2, 3, 0, 0, 0},
{3, 5, 0, 0, 0},
{1, 2, 0, 0, 0},
{0, 0, 0, 0, 0},
{0, 2, 0, 0, 0}
};
}
| apache-2.0 |
wangjun/smile | src/main/java/me/zsj/smile/utils/ScreenUtils.java | 837 | package me.zsj.smile.utils;
import android.content.Context;
import android.util.DisplayMetrics;
import android.view.WindowManager;
/**
* Created by acer on 2015/8/4 0004.
*/
public class ScreenUtils {
public static int getWidth(Context context) {
WindowManager manager = (WindowManager) context.getSystemService(Context.WINDOW_SERVICE);
DisplayMetrics outMetrics = new DisplayMetrics();
manager.getDefaultDisplay().getMetrics(outMetrics);
return outMetrics.widthPixels;
}
public static int getHeight(Context context) {
WindowManager manager = (WindowManager) context.getSystemService(Context.WINDOW_SERVICE);
DisplayMetrics outMetrics = new DisplayMetrics();
manager.getDefaultDisplay().getMetrics(outMetrics);
return outMetrics.heightPixels;
}
}
| apache-2.0 |
cloudstax/openmanage | pkg/containersvc/memcontainersvc.go | 6238 | package containersvc
import (
"strings"
"sync"
"github.com/golang/glog"
"golang.org/x/net/context"
"github.com/cloudstax/firecamp/api/common"
)
const keySep = ":"
type taskItem struct {
service string
containerInstanceID string
}
type MemContainerSvc struct {
mlock *sync.Mutex
// key: cluster+service
services map[string]bool
// key: cluster+taskID
servicetasks map[string]taskItem
// key: taskID
tasks map[string]bool
}
func NewMemContainerSvc() *MemContainerSvc {
return &MemContainerSvc{
mlock: &sync.Mutex{},
services: map[string]bool{},
servicetasks: map[string]taskItem{},
tasks: map[string]bool{},
}
}
func (m *MemContainerSvc) GetContainerSvcType() string {
return "MemContainerSvc"
}
func (m *MemContainerSvc) CreateService(ctx context.Context, opts *CreateServiceOptions) error {
key := opts.Common.Cluster + opts.Common.ServiceName
m.mlock.Lock()
defer m.mlock.Unlock()
_, ok := m.services[key]
if ok {
glog.Errorln("service exist", key)
return common.ErrServiceExist
}
m.services[key] = true
return nil
}
func (m *MemContainerSvc) IsServiceExist(ctx context.Context, cluster string, service string) (bool, error) {
key := cluster + service
m.mlock.Lock()
defer m.mlock.Unlock()
_, ok := m.services[key]
return ok, nil
}
func (m *MemContainerSvc) ListActiveServiceTasks(ctx context.Context, cluster string, service string) (taskIDs map[string]bool, err error) {
if strings.Contains(cluster, keySep) {
glog.Errorln("invalid cluster name", cluster, "should not have", keySep)
return nil, common.ErrInternal
}
m.mlock.Lock()
defer m.mlock.Unlock()
taskIDs = map[string]bool{}
for key, t := range m.servicetasks {
s := strings.SplitN(key, keySep, 2)
if s[0] == cluster && t.service == service {
glog.Infoln("taskID", s[1], "belongs to service", service, "cluster", cluster)
taskIDs[s[1]] = true
}
}
glog.Infoln("taskIDs", taskIDs, "belongs to service", service, "cluster", cluster)
return taskIDs, nil
}
func (m *MemContainerSvc) GetServiceStatus(ctx context.Context, cluster string, service string) (*common.ServiceStatus, error) {
taskIDs, err := m.ListActiveServiceTasks(ctx, cluster, service)
if err != nil {
return nil, err
}
count := int64(len(taskIDs))
status := &common.ServiceStatus{
RunningCount: count,
DesiredCount: count,
}
return status, nil
}
func (m *MemContainerSvc) GetServiceTask(ctx context.Context, cluster string, service string, containerInstanceID string) (taskID string, err error) {
if strings.Contains(cluster, keySep) {
glog.Errorln("invalid cluster name", cluster, "should not have", keySep)
return "", common.ErrInternal
}
m.mlock.Lock()
defer m.mlock.Unlock()
for key, t := range m.servicetasks {
s := strings.SplitN(key, keySep, 2)
if s[0] == cluster && t.service == service && t.containerInstanceID == containerInstanceID {
if len(taskID) != 0 {
glog.Errorln("more than 1 tasks", taskID, s[1], "for container instance",
containerInstanceID, "service", service, "cluster", cluster)
return "", ErrContainerSvcTooManyTasks
}
taskID = s[1]
glog.Infoln("get task", taskID, "for container instance", containerInstanceID,
"service", service, "cluster", cluster)
}
}
if len(taskID) == 0 {
glog.Errorln("no task for container instance", containerInstanceID,
"service", service, "cluster", cluster)
return "", ErrContainerSvcNoTask
}
return taskID, nil
}
func (m *MemContainerSvc) AddServiceTask(ctx context.Context, cluster string, service string, taskID string, containerInstanceID string) error {
if strings.Contains(cluster, keySep) {
glog.Errorln("invalid cluster name", cluster, "should not have", keySep)
return common.ErrInternal
}
key := cluster + keySep + taskID
m.mlock.Lock()
defer m.mlock.Unlock()
_, ok := m.servicetasks[key]
if ok {
glog.Errorln("task exists", taskID)
return common.ErrInternal
}
taskItem := taskItem{
service: service,
containerInstanceID: containerInstanceID,
}
m.servicetasks[key] = taskItem
glog.Infoln("added task", taskItem)
return nil
}
func (m *MemContainerSvc) UpdateService(ctx context.Context, opts *UpdateServiceOptions) error {
return nil
}
func (m *MemContainerSvc) StopService(ctx context.Context, cluster string, service string) error {
return nil
}
func (m *MemContainerSvc) ScaleService(ctx context.Context, cluster string, service string, desiredCount int64) error {
return nil
}
func (m *MemContainerSvc) RollingRestartService(ctx context.Context, cluster string, service string, opts *RollingRestartOptions) error {
return nil
}
func (m *MemContainerSvc) DeleteService(ctx context.Context, cluster string, service string) error {
key := cluster + service
m.mlock.Lock()
defer m.mlock.Unlock()
delete(m.services, key)
return nil
}
func (m *MemContainerSvc) RunTask(ctx context.Context, opts *RunTaskOptions) (taskID string, err error) {
taskID = opts.Common.Cluster + opts.Common.ServiceName + opts.TaskType
m.mlock.Lock()
defer m.mlock.Unlock()
_, ok := m.tasks[taskID]
if ok {
glog.Infoln("task is already running")
return taskID, nil
}
m.tasks[taskID] = true
return taskID, nil
}
func (m *MemContainerSvc) GetTaskStatus(ctx context.Context, cluster string, taskID string) (*common.TaskStatus, error) {
m.mlock.Lock()
defer m.mlock.Unlock()
_, ok := m.tasks[taskID]
if !ok {
glog.Infoln("task not exist")
return nil, common.ErrNotFound
}
status := &common.TaskStatus{
Status: "running",
StoppedReason: "",
StartedAt: "",
FinishedAt: "",
}
return status, nil
}
func (m *MemContainerSvc) DeleteTask(ctx context.Context, cluster string, service string, taskType string) error {
taskID := cluster + service + taskType
m.mlock.Lock()
defer m.mlock.Unlock()
delete(m.tasks, taskID)
return nil
}
func (m *MemContainerSvc) CreateServiceVolume(ctx context.Context, service string, memberName string, volumeID string, volumeSizeGB int64, journal bool) (existingVolumeID string, err error) {
return "", nil
}
func (m *MemContainerSvc) DeleteServiceVolume(ctx context.Context, service string, memberName string, journal bool) error {
return nil
}
| apache-2.0 |
AndiHappy/solo | src/main/java/org/b3log/solo/processor/CaptchaProcessor.java | 5853 | /*
* Copyright (c) 2010-2017, b3log.org & hacpai.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.b3log.solo.processor;
import org.apache.commons.io.IOUtils;
import org.b3log.latke.Latkes;
import org.b3log.latke.RuntimeEnv;
import org.b3log.latke.image.Image;
import org.b3log.latke.image.ImageService;
import org.b3log.latke.image.ImageServiceFactory;
import org.b3log.latke.logging.Level;
import org.b3log.latke.logging.Logger;
import org.b3log.latke.servlet.HTTPRequestContext;
import org.b3log.latke.servlet.HTTPRequestMethod;
import org.b3log.latke.servlet.annotation.RequestProcessing;
import org.b3log.latke.servlet.annotation.RequestProcessor;
import org.b3log.latke.servlet.renderer.PNGRenderer;
import org.b3log.solo.SoloServletListener;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.servlet.http.HttpSession;
import java.io.*;
import java.net.URL;
import java.util.Enumeration;
import java.util.Random;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
/**
* Captcha processor.
*
* <p>
* Checkout <a href="http://toy-code.googlecode.com/svn/trunk/CaptchaGenerator">
* the sample captcha generator</a> for more details.
* </p>
*
* @author <a href="http://88250.b3log.org">Liang Ding</a>
* @version 1.1.0.4, Oct 31, 2015
* @since 0.3.1
*/
@RequestProcessor
public class CaptchaProcessor {
/**
* Logger.
*/
private static final Logger LOGGER = Logger.getLogger(CaptchaProcessor.class.getName());
/**
* Images service.
*/
private static final ImageService IMAGE_SERVICE = ImageServiceFactory.getImageService();
/**
* Key of captcha.
*/
public static final String CAPTCHA = "captcha";
/**
* Captchas.
*/
private Image[] captchas;
/**
* Count of static captchas.
*/
private static final int CAPTCHA_COUNT = 100;
/**
* Gets captcha.
*
* @param context the specified context
*/
@RequestProcessing(value = "/captcha.do", method = HTTPRequestMethod.GET)
public void get(final HTTPRequestContext context) {
final PNGRenderer renderer = new PNGRenderer();
context.setRenderer(renderer);
if (null == captchas) {
loadCaptchas();
}
try {
final HttpServletRequest request = context.getRequest();
final HttpServletResponse response = context.getResponse();
final Random random = new Random();
final int index = random.nextInt(CAPTCHA_COUNT);
final Image captchaImg = captchas[index];
final String captcha = captchaImg.getName();
final HttpSession httpSession = request.getSession(false);
if (null != httpSession) {
LOGGER.log(Level.DEBUG, "Captcha[{0}] for session[id={1}]", captcha, httpSession.getId());
httpSession.setAttribute(CAPTCHA, captcha);
}
response.setHeader("Pragma", "no-cache");
response.setHeader("Cache-Control", "no-cache");
response.setDateHeader("Expires", 0);
renderer.setImage(captchaImg);
} catch (final Exception e) {
LOGGER.log(Level.ERROR, e.getMessage(), e);
}
}
/**
* Loads captcha.
*/
private synchronized void loadCaptchas() {
LOGGER.debug("Loading captchas....");
try {
captchas = new Image[CAPTCHA_COUNT];
ZipFile zipFile;
if (RuntimeEnv.LOCAL == Latkes.getRuntimeEnv()) {
final InputStream inputStream = SoloServletListener.class.getClassLoader().getResourceAsStream("captcha_static.zip");
final File file = File.createTempFile("b3log_captcha_static", null);
final OutputStream outputStream = new FileOutputStream(file);
IOUtils.copy(inputStream, outputStream);
zipFile = new ZipFile(file);
IOUtils.closeQuietly(inputStream);
IOUtils.closeQuietly(outputStream);
} else {
final URL captchaURL = SoloServletListener.class.getClassLoader().getResource("captcha_static.zip");
zipFile = new ZipFile(captchaURL.getFile());
}
final Enumeration<? extends ZipEntry> entries = zipFile.entries();
int i = 0;
while (entries.hasMoreElements()) {
final ZipEntry entry = entries.nextElement();
final BufferedInputStream bufferedInputStream = new BufferedInputStream(zipFile.getInputStream(entry));
final byte[] captchaCharData = new byte[bufferedInputStream.available()];
bufferedInputStream.read(captchaCharData);
bufferedInputStream.close();
final Image image = IMAGE_SERVICE.makeImage(captchaCharData);
image.setName(entry.getName().substring(0, entry.getName().lastIndexOf('.')));
captchas[i] = image;
i++;
}
zipFile.close();
} catch (final Exception e) {
LOGGER.error("Can not load captchs!");
throw new IllegalStateException(e);
}
LOGGER.debug("Loaded captch images");
}
}
| apache-2.0 |
GoogleCloudPlatform/google-cloud-visualstudio | GoogleCloudExtension/GoogleCloudExtension/Utils/ToolWindowCommandUtils.cs | 3668 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using GoogleCloudExtension.Accounts;
using Microsoft.VisualStudio.Shell;
using Microsoft.VisualStudio.Shell.Interop;
using System;
using System.Threading.Tasks;
namespace GoogleCloudExtension.Utils
{
/// <summary>
/// This class contains helpers for ToolWindow
/// </summary>
internal static class ToolWindowCommandUtils
{
/// <summary>
/// Shows the tool window, either existing or new.
/// </summary>
/// <returns>The tool window object.</returns>
public static Task<TToolWindow> ShowToolWindowAsync<TToolWindow>() where TToolWindow : ToolWindowPane
{
return ShowToolWindowAsync<TToolWindow>(0);
}
/// <summary>
/// Shows the tool window for a given id, either existing or new.
/// </summary>
/// <returns>The tool window object.</returns>
public static async Task<TToolWindow> ShowToolWindowAsync<TToolWindow>(int id) where TToolWindow : ToolWindowPane
{
// The last flag is set to true so that if the tool window does not exists it will be created.
var window = GoogleCloudExtensionPackage.Instance.FindToolWindow<TToolWindow>(true, id);
await GoogleCloudExtensionPackage.Instance.JoinableTaskFactory.SwitchToMainThreadAsync();
var windowFrame = (IVsWindowFrame)window?.Frame;
if (windowFrame == null)
{
throw new NotSupportedException("Failed to create the tool window");
}
Microsoft.VisualStudio.ErrorHandler.ThrowOnFailure(windowFrame.Show());
return window;
}
/// <summary>
/// Creates a new instance of a multi-instance tool window.
/// </summary>
/// <returns>The tool window object if it is found.</returns>
public static async Task<TToolWindow> AddToolWindowAsync<TToolWindow>() where TToolWindow : ToolWindowPane
{
// Find the first unused tool window id.
for (var id = 0; true; id++)
{
var window = GoogleCloudExtensionPackage.Instance.FindToolWindow<TToolWindow>(false, id);
if (window == null)
{
// Create a new tool window at the unused id.
return await ShowToolWindowAsync<TToolWindow>(id);
}
}
}
/// <summary>
/// Response to <seealso cref="OleMenuCommand.BeforeQueryStatus"/>
/// to enable menu item if current project id is valid.
/// </summary>
/// <param name="sender">The event sender.</param>
/// <param name="e">The event args.</param>
public static void EnableMenuItemOnValidProjectId(object sender, EventArgs e)
{
var menuCommand = sender as OleMenuCommand;
if (menuCommand == null)
{
return;
}
menuCommand.Enabled = !string.IsNullOrWhiteSpace(CredentialsStore.Default.CurrentProjectId);
}
}
}
| apache-2.0 |
ProxyBuilder/proxybuilder | testusage/src/main/java/org/rapidpm/proxybuilder/proxy/generated/v011/FooTop.java | 1379 | /**
* Copyright © 2013 Sven Ruppert (sven.ruppert@gmail.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Copyright © 2013 Sven Ruppert (sven.ruppert@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.rapidpm.proxybuilder.proxy.generated.v011;
/**
* Created by b.bosch on 17.02.2016.
*/
public interface FooTop {
void getBar();
}
| apache-2.0 |
yangyunfeng666/AndBase | src/com/ab/util/AbFileUtil.java | 30538 | /*
* Copyright (C) 2012 www.amsoft.cn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ab.util;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.Comparator;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.http.Header;
import org.apache.http.HttpResponse;
import android.content.Context;
import android.content.pm.PackageInfo;
import android.content.res.AssetManager;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.drawable.Drawable;
import android.os.Environment;
import android.os.StatFs;
import com.ab.global.AbAppConfig;
// TODO: Auto-generated Javadoc
/**
* © 2012 amsoft.cn
* 名称:AbFileUtil.java
* 描述:文件操作类.
*
* @author 还如一梦中
* @version v1.0
* @date:2013-01-18 下午11:52:13
*/
public class AbFileUtil {
/** 默认APP根目录. */
private static String downloadRootDir = null;
/** 默认下载图片文件目录. */
private static String imageDownloadDir = null;
/** 默认下载文件目录. */
private static String fileDownloadDir = null;
/** 默认缓存目录. */
private static String cacheDownloadDir = null;
/** 默认下载数据库文件的目录. */
private static String dbDownloadDir = null;
/** 剩余空间大于200M才使用SD缓存. */
private static int freeSdSpaceNeededToCache = 200*1024*1024;
/**
* 描述:通过文件的网络地址从SD卡中读取图片,如果SD中没有则自动下载并保存.
* @param url 文件的网络地址
* @param type 图片的处理类型(剪切或者缩放到指定大小,参考AbImageUtil类)
* 如果设置为原图,则后边参数无效,得到原图
* @param desiredWidth 新图片的宽
* @param desiredHeight 新图片的高
* @return Bitmap 新图片
*/
public static Bitmap getBitmapFromSD(String url,int type,int desiredWidth, int desiredHeight){
Bitmap bitmap = null;
try {
if(AbStrUtil.isEmpty(url)){
return null;
}
//SD卡不存在 或者剩余空间不足了就不缓存到SD卡了
if(!isCanUseSD() || freeSdSpaceNeededToCache < freeSpaceOnSD()){
bitmap = getBitmapFromURL(url,type,desiredWidth,desiredHeight);
return bitmap;
}
//下载文件,如果不存在就下载,存在直接返回地址
String downFilePath = downloadFile(url,imageDownloadDir);
if(downFilePath != null){
//获取图片
return getBitmapFromSD(new File(downFilePath),type,desiredWidth,desiredHeight);
}else{
return null;
}
} catch (Exception e) {
e.printStackTrace();
}
return bitmap;
}
/**
* 描述:通过文件的本地地址从SD卡读取图片.
*
* @param file the file
* @param type 图片的处理类型(剪切或者缩放到指定大小,参考AbConstant类)
* 如果设置为原图,则后边参数无效,得到原图
* @param desiredWidth 新图片的宽
* @param desiredHeight 新图片的高
* @return Bitmap 新图片
*/
public static Bitmap getBitmapFromSD(File file,int type,int desiredWidth, int desiredHeight){
Bitmap bitmap = null;
try {
//SD卡是否存在
if(!isCanUseSD()){
return null;
}
//文件是否存在
if(!file.exists()){
return null;
}
//文件存在
if(type == AbImageUtil.CUTIMG){
bitmap = AbImageUtil.cutImg(file,desiredWidth,desiredHeight);
}else if(type == AbImageUtil.SCALEIMG){
bitmap = AbImageUtil.scaleImg(file,desiredWidth,desiredHeight);
}else{
bitmap = AbImageUtil.getBitmap(file);
}
} catch (Exception e) {
e.printStackTrace();
}
return bitmap;
}
/**
* 描述:通过文件的本地地址从SD卡读取图片.
*
* @param file the file
* @return Bitmap 图片
*/
public static Bitmap getBitmapFromSD(File file){
Bitmap bitmap = null;
try {
//SD卡是否存在
if(!isCanUseSD()){
return null;
}
//文件是否存在
if(!file.exists()){
return null;
}
//文件存在
bitmap = AbImageUtil.getBitmap(file);
} catch (Exception e) {
e.printStackTrace();
}
return bitmap;
}
/**
* 描述:将图片的byte[]写入本地文件.
* @param imgByte 图片的byte[]形势
* @param fileName 文件名称,需要包含后缀,如.jpg
* @param type 图片的处理类型(剪切或者缩放到指定大小,参考AbConstant类)
* @param desiredWidth 新图片的宽
* @param desiredHeight 新图片的高
* @return Bitmap 新图片
*/
public static Bitmap getBitmapFromByte(byte[] imgByte,String fileName,int type,int desiredWidth, int desiredHeight){
FileOutputStream fos = null;
DataInputStream dis = null;
ByteArrayInputStream bis = null;
Bitmap bitmap = null;
File file = null;
try {
if(imgByte!=null){
file = new File(imageDownloadDir+fileName);
if(!file.exists()){
file.createNewFile();
}
fos = new FileOutputStream(file);
int readLength = 0;
bis = new ByteArrayInputStream(imgByte);
dis = new DataInputStream(bis);
byte[] buffer = new byte[1024];
while ((readLength = dis.read(buffer))!=-1) {
fos.write(buffer, 0, readLength);
try {
Thread.sleep(500);
} catch (Exception e) {
}
}
fos.flush();
bitmap = getBitmapFromSD(file,type,desiredWidth,desiredHeight);
}
} catch (Exception e) {
e.printStackTrace();
}finally{
if(dis!=null){
try {
dis.close();
} catch (Exception e) {
}
}
if(bis!=null){
try {
bis.close();
} catch (Exception e) {
}
}
if(fos!=null){
try {
fos.close();
} catch (Exception e) {
}
}
}
return bitmap;
}
/**
* 描述:根据URL从互连网获取图片.
* @param url 要下载文件的网络地址
* @param type 图片的处理类型(剪切或者缩放到指定大小,参考AbConstant类)
* @param desiredWidth 新图片的宽
* @param desiredHeight 新图片的高
* @return Bitmap 新图片
*/
public static Bitmap getBitmapFromURL(String url,int type,int desiredWidth, int desiredHeight){
Bitmap bit = null;
try {
bit = AbImageUtil.getBitmap(url, type, desiredWidth, desiredHeight);
} catch (Exception e) {
AbLogUtil.d(AbFileUtil.class, "下载图片异常:"+e.getMessage());
}
return bit;
}
/**
* 描述:获取src中的图片资源.
*
* @param src 图片的src路径,如(“image/arrow.png”)
* @return Bitmap 图片
*/
public static Bitmap getBitmapFromSrc(String src){
Bitmap bit = null;
try {
bit = BitmapFactory.decodeStream(AbFileUtil.class.getResourceAsStream(src));
} catch (Exception e) {
AbLogUtil.d(AbFileUtil.class, "获取图片异常:"+e.getMessage());
}
return bit;
}
/**
* 描述:获取Asset中的图片资源.
*
* @param context the context
* @param fileName the file name
* @return Bitmap 图片
*/
public static Bitmap getBitmapFromAsset(Context context,String fileName){
Bitmap bit = null;
try {
AssetManager assetManager = context.getAssets();
InputStream is = assetManager.open(fileName);
bit = BitmapFactory.decodeStream(is);
} catch (Exception e) {
AbLogUtil.d(AbFileUtil.class, "获取图片异常:"+e.getMessage());
}
return bit;
}
/**
* 描述:获取Asset中的图片资源.
*
* @param context the context
* @param fileName the file name
* @return Drawable 图片
*/
public static Drawable getDrawableFromAsset(Context context,String fileName){
Drawable drawable = null;
try {
AssetManager assetManager = context.getAssets();
InputStream is = assetManager.open(fileName);
drawable = Drawable.createFromStream(is,null);
} catch (Exception e) {
AbLogUtil.d(AbFileUtil.class, "获取图片异常:"+e.getMessage());
}
return drawable;
}
/**
* 下载网络文件到SD卡中.如果SD中存在同名文件将不再下载
*
* @param url 要下载文件的网络地址
* @param dirPath the dir path
* @return 下载好的本地文件地址
*/
public static String downloadFile(String url,String dirPath){
InputStream in = null;
FileOutputStream fileOutputStream = null;
HttpURLConnection connection = null;
String downFilePath = null;
File file = null;
try {
if(!isCanUseSD()){
return null;
}
//先判断SD卡中有没有这个文件,不比较后缀部分比较
String fileNameNoMIME = getCacheFileNameFromUrl(url);
File parentFile = new File(imageDownloadDir);
File[] files = parentFile.listFiles();
for(int i = 0; i < files.length; ++i){
String fileName = files[i].getName();
String name = fileName.substring(0,fileName.lastIndexOf("."));
if(name.equals(fileNameNoMIME)){
//文件已存在
return files[i].getPath();
}
}
URL mUrl = new URL(url);
connection = (HttpURLConnection)mUrl.openConnection();
connection.connect();
//获取文件名,下载文件
String fileName = getCacheFileNameFromUrl(url,connection);
file = new File(imageDownloadDir,fileName);
downFilePath = file.getPath();
if(!file.exists()){
file.createNewFile();
}else{
//文件已存在
return file.getPath();
}
in = connection.getInputStream();
fileOutputStream = new FileOutputStream(file);
byte[] b = new byte[1024];
int temp = 0;
while((temp=in.read(b))!=-1){
fileOutputStream.write(b, 0, temp);
}
}catch(Exception e){
e.printStackTrace();
AbLogUtil.e(AbFileUtil.class, "有文件下载出错了,已删除");
//检查文件大小,如果文件为0B说明网络不好没有下载成功,要将建立的空文件删除
if(file != null){
file.delete();
}
file = null;
downFilePath = null;
}finally{
try {
if(in!=null){
in.close();
}
} catch (Exception e) {
e.printStackTrace();
}
try {
if(fileOutputStream!=null){
fileOutputStream.close();
}
} catch (Exception e) {
e.printStackTrace();
}
try {
if(connection!=null){
connection.disconnect();
}
} catch (Exception e) {
e.printStackTrace();
}
}
return downFilePath;
}
/**
* 描述:获取网络文件的大小.
*
* @param Url 图片的网络路径
* @return int 网络文件的大小
*/
public static int getContentLengthFromUrl(String Url){
int mContentLength = 0;
try {
URL url = new URL(Url);
HttpURLConnection mHttpURLConnection = (HttpURLConnection) url.openConnection();
mHttpURLConnection.setConnectTimeout(5 * 1000);
mHttpURLConnection.setRequestMethod("GET");
mHttpURLConnection.setRequestProperty("Accept","image/gif, image/jpeg, image/pjpeg, image/pjpeg, application/x-shockwave-flash, application/xaml+xml, application/vnd.ms-xpsdocument, application/x-ms-xbap, application/x-ms-application, application/vnd.ms-excel, application/vnd.ms-powerpoint, application/msword, */*");
mHttpURLConnection.setRequestProperty("Accept-Language", "zh-CN");
mHttpURLConnection.setRequestProperty("Referer", Url);
mHttpURLConnection.setRequestProperty("Charset", "UTF-8");
mHttpURLConnection.setRequestProperty("User-Agent","Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)");
mHttpURLConnection.setRequestProperty("Connection", "Keep-Alive");
mHttpURLConnection.connect();
if (mHttpURLConnection.getResponseCode() == 200){
// 根据响应获取文件大小
mContentLength = mHttpURLConnection.getContentLength();
}
} catch (Exception e) {
e.printStackTrace();
AbLogUtil.d(AbFileUtil.class, "获取长度异常:"+e.getMessage());
}
return mContentLength;
}
/**
* 获取文件名,通过网络获取.
* @param url 文件地址
* @return 文件名
*/
public static String getRealFileNameFromUrl(String url){
String name = null;
try {
if(AbStrUtil.isEmpty(url)){
return name;
}
URL mUrl = new URL(url);
HttpURLConnection mHttpURLConnection = (HttpURLConnection) mUrl.openConnection();
mHttpURLConnection.setConnectTimeout(5 * 1000);
mHttpURLConnection.setRequestMethod("GET");
mHttpURLConnection.setRequestProperty("Accept","image/gif, image/jpeg, image/pjpeg, image/pjpeg, application/x-shockwave-flash, application/xaml+xml, application/vnd.ms-xpsdocument, application/x-ms-xbap, application/x-ms-application, application/vnd.ms-excel, application/vnd.ms-powerpoint, application/msword, */*");
mHttpURLConnection.setRequestProperty("Accept-Language", "zh-CN");
mHttpURLConnection.setRequestProperty("Referer", url);
mHttpURLConnection.setRequestProperty("Charset", "UTF-8");
mHttpURLConnection.setRequestProperty("User-Agent","");
mHttpURLConnection.setRequestProperty("Connection", "Keep-Alive");
mHttpURLConnection.connect();
if (mHttpURLConnection.getResponseCode() == 200){
for (int i = 0;; i++) {
String mine = mHttpURLConnection.getHeaderField(i);
if (mine == null){
break;
}
if ("content-disposition".equals(mHttpURLConnection.getHeaderFieldKey(i).toLowerCase())) {
Matcher m = Pattern.compile(".*filename=(.*)").matcher(mine.toLowerCase());
if (m.find())
return m.group(1).replace("\"", "");
}
}
}
} catch (Exception e) {
e.printStackTrace();
AbLogUtil.e(AbFileUtil.class, "网络上获取文件名失败");
}
return name;
}
/**
* 获取真实文件名(xx.后缀),通过网络获取.
* @param connection 连接
* @return 文件名
*/
public static String getRealFileName(HttpURLConnection connection){
String name = null;
try {
if(connection == null){
return name;
}
if (connection.getResponseCode() == 200){
for (int i = 0;; i++) {
String mime = connection.getHeaderField(i);
if (mime == null){
break;
}
// "Content-Disposition","attachment; filename=1.txt"
// Content-Length
if ("content-disposition".equals(connection.getHeaderFieldKey(i).toLowerCase())) {
Matcher m = Pattern.compile(".*filename=(.*)").matcher(mime.toLowerCase());
if (m.find()){
return m.group(1).replace("\"", "");
}
}
}
}
} catch (Exception e) {
e.printStackTrace();
AbLogUtil.e(AbFileUtil.class, "网络上获取文件名失败");
}
return name;
}
/**
* 获取真实文件名(xx.后缀),通过网络获取.
*
* @param response the response
* @return 文件名
*/
public static String getRealFileName(HttpResponse response){
String name = null;
try {
if(response == null){
return name;
}
//获取文件名
Header[] headers = response.getHeaders("content-disposition");
for(int i=0;i<headers.length;i++){
Matcher m = Pattern.compile(".*filename=(.*)").matcher(headers[i].getValue());
if (m.find()){
name = m.group(1).replace("\"", "");
}
}
} catch (Exception e) {
e.printStackTrace();
AbLogUtil.e(AbFileUtil.class, "网络上获取文件名失败");
}
return name;
}
/**
* 获取文件名(不含后缀).
*
* @param url 文件地址
* @return 文件名
*/
public static String getCacheFileNameFromUrl(String url){
if(AbStrUtil.isEmpty(url)){
return null;
}
String name = null;
try {
name = AbMd5.MD5(url);
} catch (Exception e) {
e.printStackTrace();
}
return name;
}
/**
* 获取文件名(.后缀),外链模式和通过网络获取.
*
* @param url 文件地址
* @param response the response
* @return 文件名
*/
public static String getCacheFileNameFromUrl(String url,HttpResponse response){
if(AbStrUtil.isEmpty(url)){
return null;
}
String name = null;
try {
//获取后缀
String suffix = getMIMEFromUrl(url,response);
if(AbStrUtil.isEmpty(suffix)){
suffix = ".ab";
}
name = AbMd5.MD5(url)+suffix;
} catch (Exception e) {
e.printStackTrace();
}
return name;
}
/**
* 获取文件名(.后缀),外链模式和通过网络获取.
*
* @param url 文件地址
* @param connection the connection
* @return 文件名
*/
public static String getCacheFileNameFromUrl(String url,HttpURLConnection connection){
if(AbStrUtil.isEmpty(url)){
return null;
}
String name = null;
try {
//获取后缀
String suffix = getMIMEFromUrl(url,connection);
if(AbStrUtil.isEmpty(suffix)){
suffix = ".ab";
}
name = AbMd5.MD5(url)+suffix;
} catch (Exception e) {
e.printStackTrace();
}
return name;
}
/**
* 获取文件后缀,本地.
*
* @param url 文件地址
* @param connection the connection
* @return 文件后缀
*/
public static String getMIMEFromUrl(String url,HttpURLConnection connection){
if(AbStrUtil.isEmpty(url)){
return null;
}
String suffix = null;
try {
//获取后缀
if(url.lastIndexOf(".")!=-1){
suffix = url.substring(url.lastIndexOf("."));
if(suffix.indexOf("/")!=-1 || suffix.indexOf("?")!=-1 || suffix.indexOf("&")!=-1){
suffix = null;
}
}
if(AbStrUtil.isEmpty(suffix)){
//获取文件名 这个效率不高
String fileName = getRealFileName(connection);
if(fileName!=null && fileName.lastIndexOf(".")!=-1){
suffix = fileName.substring(fileName.lastIndexOf("."));
}
}
} catch (Exception e) {
e.printStackTrace();
}
return suffix;
}
/**
* 获取文件后缀,本地和网络.
*
* @param url 文件地址
* @param response the response
* @return 文件后缀
*/
public static String getMIMEFromUrl(String url,HttpResponse response){
if(AbStrUtil.isEmpty(url)){
return null;
}
String mime = null;
try {
//获取后缀
if(url.lastIndexOf(".")!=-1){
mime = url.substring(url.lastIndexOf("."));
if(mime.indexOf("/")!=-1 || mime.indexOf("?")!=-1 || mime.indexOf("&")!=-1){
mime = null;
}
}
if(AbStrUtil.isEmpty(mime)){
//获取文件名 这个效率不高
String fileName = getRealFileName(response);
if(fileName!=null && fileName.lastIndexOf(".")!=-1){
mime = fileName.substring(fileName.lastIndexOf("."));
}
}
} catch (Exception e) {
e.printStackTrace();
}
return mime;
}
/**
* 描述:从sd卡中的文件读取到byte[].
*
* @param path sd卡中文件路径
* @return byte[]
*/
public static byte[] getByteArrayFromSD(String path) {
byte[] bytes = null;
ByteArrayOutputStream out = null;
try {
File file = new File(path);
//SD卡是否存在
if(!isCanUseSD()){
return null;
}
//文件是否存在
if(!file.exists()){
return null;
}
long fileSize = file.length();
if (fileSize > Integer.MAX_VALUE) {
return null;
}
FileInputStream in = new FileInputStream(path);
out = new ByteArrayOutputStream(1024);
byte[] buffer = new byte[1024];
int size=0;
while((size=in.read(buffer))!=-1) {
out.write(buffer,0,size);
}
in.close();
bytes = out.toByteArray();
} catch (Exception e) {
e.printStackTrace();
} finally{
if(out!=null){
try {
out.close();
} catch (Exception e) {
}
}
}
return bytes;
}
/**
* 描述:将byte数组写入文件.
*
* @param path the path
* @param content the content
* @param create the create
*/
public static void writeByteArrayToSD(String path, byte[] content,boolean create){
FileOutputStream fos = null;
try {
File file = new File(path);
//SD卡是否存在
if(!isCanUseSD()){
return;
}
//文件是否存在
if(!file.exists()){
if(create){
File parent = file.getParentFile();
if(!parent.exists()){
parent.mkdirs();
file.createNewFile();
}
}else{
return;
}
}
fos = new FileOutputStream(path);
fos.write(content);
} catch (Exception e) {
e.printStackTrace();
}finally{
if(fos!=null){
try {
fos.close();
} catch (Exception e) {
}
}
}
}
/**
* 描述:SD卡是否能用.
*
* @return true 可用,false不可用
*/
public static boolean isCanUseSD() {
try {
return Environment.getExternalStorageState().equals(Environment.MEDIA_MOUNTED);
} catch (Exception e) {
e.printStackTrace();
}
return false;
}
/**
* 描述:初始化存储目录.
*
* @param context the context
*/
public static void initFileDir(Context context){
PackageInfo info = AbAppUtil.getPackageInfo(context);
//默认下载文件根目录.
String downloadRootPath = File.separator + AbAppConfig.DOWNLOAD_ROOT_DIR + File.separator + info.packageName + File.separator;
//默认下载图片文件目录.
String imageDownloadPath = downloadRootPath + AbAppConfig.DOWNLOAD_IMAGE_DIR + File.separator;
//默认下载文件目录.
String fileDownloadPath = downloadRootPath + AbAppConfig.DOWNLOAD_FILE_DIR + File.separator;
//默认缓存目录.
String cacheDownloadPath = downloadRootPath + AbAppConfig.CACHE_DIR + File.separator;
//默认DB目录.
String dbDownloadPath = downloadRootPath + AbAppConfig.DB_DIR + File.separator;
try {
if(!isCanUseSD()){
return;
}else{
File root = Environment.getExternalStorageDirectory();
File downloadDir = new File(root.getAbsolutePath() + downloadRootPath);
if(!downloadDir.exists()){
downloadDir.mkdirs();
}
downloadRootDir = downloadDir.getPath();
File cacheDownloadDirFile = new File(root.getAbsolutePath() + cacheDownloadPath);
if(!cacheDownloadDirFile.exists()){
cacheDownloadDirFile.mkdirs();
}
cacheDownloadDir = cacheDownloadDirFile.getPath();
File imageDownloadDirFile = new File(root.getAbsolutePath() + imageDownloadPath);
if(!imageDownloadDirFile.exists()){
imageDownloadDirFile.mkdirs();
}
imageDownloadDir = imageDownloadDirFile.getPath();
File fileDownloadDirFile = new File(root.getAbsolutePath() + fileDownloadPath);
if(!fileDownloadDirFile.exists()){
fileDownloadDirFile.mkdirs();
}
fileDownloadDir = fileDownloadDirFile.getPath();
File dbDownloadDirFile = new File(root.getAbsolutePath() + dbDownloadPath);
if(!dbDownloadDirFile.exists()){
dbDownloadDirFile.mkdirs();
}
dbDownloadDir = dbDownloadDirFile.getPath();
}
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* 计算sdcard上的剩余空间.
*
* @return the int
*/
public static int freeSpaceOnSD() {
StatFs stat = new StatFs(Environment.getExternalStorageDirectory().getPath());
double sdFreeMB = ((double)stat.getAvailableBlocks() * (double) stat.getBlockSize()) / 1024*1024;
return (int) sdFreeMB;
}
/**
* 根据文件的最后修改时间进行排序.
*/
public static class FileLastModifSort implements Comparator<File> {
/* (non-Javadoc)
* @see java.util.Comparator#compare(java.lang.Object, java.lang.Object)
*/
public int compare(File arg0, File arg1) {
if (arg0.lastModified() > arg1.lastModified()) {
return 1;
} else if (arg0.lastModified() == arg1.lastModified()) {
return 0;
} else {
return -1;
}
}
}
/**
* 删除所有缓存文件.
*
* @return true, if successful
*/
public static boolean clearDownloadFile() {
try {
if(!isCanUseSD()){
return false;
}
File path = Environment.getExternalStorageDirectory();
File fileDirectory = new File(path.getAbsolutePath() + downloadRootDir);
File[] files = fileDirectory.listFiles();
if (files == null) {
return true;
}
for (int i = 0; i < files.length; i++) {
files[i].delete();
}
} catch (Exception e) {
e.printStackTrace();
return false;
}
return true;
}
/**
* 描述:读取Assets目录的文件内容.
*
* @param context the context
* @param name the name
* @param encoding the encoding
* @return the string
*/
public static String readAssetsByName(Context context,String name,String encoding){
String text = null;
InputStreamReader inputReader = null;
BufferedReader bufReader = null;
try {
inputReader = new InputStreamReader(context.getAssets().open(name));
bufReader = new BufferedReader(inputReader);
String line = null;
StringBuffer buffer = new StringBuffer();
while((line = bufReader.readLine()) != null){
buffer.append(line);
}
text = new String(buffer.toString().getBytes(), encoding);
} catch (Exception e) {
e.printStackTrace();
} finally{
try {
if(bufReader!=null){
bufReader.close();
}
if(inputReader!=null){
inputReader.close();
}
} catch (Exception e) {
e.printStackTrace();
}
}
return text;
}
/**
* 描述:读取Raw目录的文件内容.
*
* @param context the context
* @param id the id
* @param encoding the encoding
* @return the string
*/
public static String readRawByName(Context context,int id,String encoding){
String text = null;
InputStreamReader inputReader = null;
BufferedReader bufReader = null;
try {
inputReader = new InputStreamReader(context.getResources().openRawResource(id));
bufReader = new BufferedReader(inputReader);
String line = null;
StringBuffer buffer = new StringBuffer();
while((line = bufReader.readLine()) != null){
buffer.append(line);
}
text = new String(buffer.toString().getBytes(),encoding);
} catch (Exception e) {
e.printStackTrace();
}finally{
try {
if(bufReader!=null){
bufReader.close();
}
if(inputReader!=null){
inputReader.close();
}
} catch (Exception e) {
e.printStackTrace();
}
}
return text;
}
/**
* Gets the download root dir.
*
* @param context the context
* @return the download root dir
*/
public static String getDownloadRootDir(Context context) {
if(downloadRootDir == null){
initFileDir(context);
}
return downloadRootDir;
}
/**
* Gets the image download dir.
*
* @param context the context
* @return the image download dir
*/
public static String getImageDownloadDir(Context context) {
if(downloadRootDir == null){
initFileDir(context);
}
return imageDownloadDir;
}
/**
* Gets the file download dir.
*
* @param context the context
* @return the file download dir
*/
public static String getFileDownloadDir(Context context) {
if(downloadRootDir == null){
initFileDir(context);
}
return fileDownloadDir;
}
/**
* Gets the cache download dir.
*
* @param context the context
* @return the cache download dir
*/
public static String getCacheDownloadDir(Context context) {
if(downloadRootDir == null){
initFileDir(context);
}
return cacheDownloadDir;
}
/**
* Gets the db download dir.
*
* @param context the context
* @return the db download dir
*/
public static String getDbDownloadDir(Context context) {
if(downloadRootDir == null){
initFileDir(context);
}
return dbDownloadDir;
}
/**
* Gets the free sd space needed to cache.
*
* @return the free sd space needed to cache
*/
public static int getFreeSdSpaceNeededToCache() {
return freeSdSpaceNeededToCache;
}
}
| apache-2.0 |
DevendraDahiphale/StronglyConnectedComponentsMapreduce | src/main/java/pad/ClusterWritable.java | 2547 | /**
* @file ClusterWritable.java
* @brief Data structure used to write a cluster on hdfs files.
* @author Federico Conte (draxent)
*
* Copyright 2015 Federico Conte
* https://github.com/Draxent/ConnectedComponents
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pad;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import org.apache.hadoop.io.Writable;
/** Data structure used to write a cluster on hdfs files. */
public class ClusterWritable extends ArrayList<Integer> implements Writable
{
private static final long serialVersionUID = 1L;
/** Array of nodes that make up the cluster */
public ArrayList<Integer> Cluster = new ArrayList<Integer>();
/** Creates an ClusterWritable object. */
public ClusterWritable()
{
super();
}
/** Creates an ClusterWritable object from an ArrayList. */
public ClusterWritable( ArrayList<Integer> array )
{
super( array );
}
/**
* Deserializes the array. Read the data out in the order it is written.
* @param in source for raw byte representation.
* @throws IOException
*/
public void readFields( DataInput in ) throws IOException
{
this.clear();
int numFields = in.readInt();
if ( numFields == 0 ) return;
for (int i = 0; i < numFields; i++)
this.add( in.readInt() );
}
/**
* Serializes this array. Write the data out in the order it is read.
* @param out where to write the raw byte representation.
* @throws IOException
*/
public void write( DataOutput out ) throws IOException
{
out.writeInt( this.size() );
if ( size() == 0 ) return;
for ( int i = 0; i < size(); i++ )
out.writeInt( this.get( i ) );
}
/**
* Convert the object into a string.
* @return the resulting string.
*/
public String toString()
{
StringBuffer sb = new StringBuffer();
if ( this.size() > 0 )
{
sb.append( this.get(0) );
for ( int i = 1; i < this.size(); i++ )
sb.append( " " ).append( this.get(i) );
}
return sb.toString();
}
} | apache-2.0 |
bitclaw/netsuite-php | src/Classes/SearchLongCustomField.php | 305 | <?php namespace Fungku\NetSuite\Classes;
class SearchLongCustomField extends SearchCustomField {
public $searchValue;
public $searchValue2;
public $operator;
static $paramtypesmap = array(
"searchValue" => "integer",
"searchValue2" => "integer",
"operator" => "SearchLongFieldOperator",
);
}
| apache-2.0 |
gooljim/android-volley | app/build/generated/source/buildConfig/debug/com/android/volley/BuildConfig.java | 443 | /**
* Automatically generated file. DO NOT MODIFY
*/
package com.android.volley;
public final class BuildConfig {
public static final boolean DEBUG = Boolean.parseBoolean("true");
public static final String APPLICATION_ID = "com.android.volley";
public static final String BUILD_TYPE = "debug";
public static final String FLAVOR = "";
public static final int VERSION_CODE = 1;
public static final String VERSION_NAME = "1.0";
}
| apache-2.0 |
walterDurin/stickycode | net.stickycode/sticky-exception/src/main/java/net/stickycode/exception/resolver/FragmentIterator.java | 1776 | package net.stickycode.exception.resolver;
import java.util.Iterator;
public class FragmentIterator
implements Iterator<Fragment> {
private String message;
private int index;
private int nextQuote;
private int nextParameter;
public FragmentIterator(String message) {
this.message = message;
nextParameter();
nextQuote();
}
private void nextQuote() {
nextQuote = message.indexOf("''", index);
}
private void nextParameter() {
nextParameter = message.indexOf("{}", index);
}
@Override
public boolean hasNext() {
if (index >= message.length())
return false;
return true;
}
@Override
public Fragment next() {
if (index == nextParameter) {
updateIndex(nextParameter + 2);
nextParameter();
return new ParameterFragment();
}
if (index == nextQuote) {
updateIndex(nextQuote + 2);
nextQuote();
return new QuoteFragment();
}
if (nextParameter == -1) {
if (index < nextQuote) {
return text(nextQuote);
}
return tail();
}
if (nextQuote == -1) {
return text(nextParameter);
}
if (nextQuote < nextParameter) {
return text(nextQuote);
}
return text(nextParameter);
}
private Fragment text(int nextIndex) {
TextFragment textFragment = new TextFragment(message.substring(index, nextIndex));
updateIndex(nextIndex);
return textFragment;
}
private void updateIndex(int processed) {
this.index = processed;
}
private Fragment tail() {
TextFragment textFragment = new TextFragment(message.substring(index));
index = message.length();
return textFragment;
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
}
| apache-2.0 |
apache/activemq-activeio | activeio-core/src/main/java/org/apache/activeio/packet/ByteArrayPacket.java | 6591 | /**
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activeio.packet;
import java.io.DataOutput;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.reflect.Constructor;
/**
* Provides a Packet implementation that is directly backed by a <code>byte[]</code>.
*
* @version $Revision$
*/
final public class ByteArrayPacket implements Packet {
private final byte buffer[];
private final int offset;
private final int capacity;
private int position;
private int limit;
private int remaining;
public ByteArrayPacket(byte buffer[]) {
this(buffer,0, buffer.length);
}
public ByteArrayPacket(ByteSequence sequence) {
this(sequence.getData(), sequence.getOffset(), sequence.getLength());
}
public ByteArrayPacket(byte buffer[], int offset, int capacity) {
this.buffer = buffer;
this.offset=offset;
this.capacity=capacity;
this.position = 0;
this.limit = capacity;
this.remaining = limit-position;
}
public int position() {
return position;
}
public void position(int position) {
this.position = position;
remaining = limit-position;
}
public int limit() {
return limit;
}
public void limit(int limit) {
this.limit = limit;
remaining = limit-position;
}
public void flip() {
limit = position;
position = 0;
remaining = limit - position;
}
public int remaining() {
return remaining;
}
public void rewind() {
position = 0;
remaining = limit - position;
}
public boolean hasRemaining() {
return remaining > 0;
}
public void clear() {
position = 0;
limit = capacity;
remaining = limit - position;
}
public int capacity() {
return capacity;
}
public Packet slice() {
return new ByteArrayPacket(buffer, offset+position, remaining);
}
public Packet duplicate() {
return new ByteArrayPacket(buffer, offset, capacity);
}
public Object duplicate(ClassLoader cl) throws IOException {
try{
Class clazz = cl.loadClass(ByteArrayPacket.class.getName());
Constructor constructor = clazz.getConstructor(new Class[]{byte[].class, int.class, int.class});
return constructor.newInstance(new Object[]{buffer, new Integer(offset), new Integer(capacity())});
} catch (Throwable e) {
throw (IOException)new IOException("Could not duplicate packet in a different classloader: "+e).initCause(e);
}
}
public void writeTo(OutputStream out) throws IOException {
out.write(buffer, offset+position, remaining);
position=limit;
remaining = limit-position;
}
public void writeTo(DataOutput out) throws IOException {
out.write(buffer, offset+position, remaining);
position=limit;
remaining = limit-position;
}
/**
* @see org.apache.activeio.packet.Packet#read()
*/
public int read() {
if( !(remaining > 0) )
return -1;
int rc = buffer[offset+position];
position++;
remaining = limit-position;
return rc & 0xff;
}
/**
* @see org.apache.activeio.packet.Packet#read(byte[], int, int)
*/
public int read(byte[] data, int offset, int length) {
if( !(remaining > 0) )
return -1;
int copyLength = ((length <= remaining) ? length : remaining);
System.arraycopy(buffer, this.offset+position, data, offset, copyLength);
position += copyLength;
remaining = limit-position;
return copyLength;
}
/**
* @see org.apache.activeio.packet.Packet#write(int)
*/
public boolean write(int data) {
if( !(remaining > 0) )
return false;
buffer[offset+position]=(byte) data;
position++;
remaining = limit-position;
return true;
}
/**
* @see org.apache.activeio.packet.Packet#write(byte[], int, int)
*/
public int write(byte[] data, int offset, int length) {
if( !(remaining > 0) )
return -1;
int copyLength = ((length <= remaining) ? length : remaining);
System.arraycopy(data, offset, buffer, this.offset+position, copyLength);
position+=copyLength;
remaining = limit-position;
return copyLength;
}
public ByteSequence asByteSequence() {
return new ByteSequence(buffer, offset+position, remaining);
}
/**
* @see org.apache.activeio.packet.Packet#sliceAsBytes()
*/
public byte[] sliceAsBytes() {
if( buffer.length == remaining ) {
return buffer;
} else {
byte rc[] = new byte[remaining];
int op = position;
read(rc,0,remaining);
position=op;
remaining = limit-position;
return rc;
}
}
/**
* @param dest
* @return the number of bytes read into the dest.
*/
public int read(Packet dest) {
int a = dest.remaining();
int rc = ((a <= remaining) ? a : remaining);
if( rc > 0 ) {
dest.write( buffer, offset+position, rc);
position = position+rc;
remaining = limit-position;
}
return rc;
}
public String toString() {
return "{position="+position+",limit="+limit+",capacity="+capacity+"}";
}
public Object getAdapter(Class target) {
if( target.isAssignableFrom(getClass()) ) {
return this;
}
return null;
}
public byte[] getBuffer() {
return buffer;
}
public void dispose() {
}
}
| apache-2.0 |
sverkera/camel | components/camel-spring/src/test/java/org/apache/camel/spring/processor/SpringRouteStartupOrderTest.java | 1315 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.spring.processor;
import org.apache.camel.CamelContext;
import org.apache.camel.processor.RouteStartupOrderTest;
import static org.apache.camel.spring.processor.SpringTestHelper.createSpringCamelContext;
/**
* @version
*/
public class SpringRouteStartupOrderTest extends RouteStartupOrderTest {
protected CamelContext createCamelContext() throws Exception {
return createSpringCamelContext(this, "org/apache/camel/spring/processor/RouteStartupOrderTest.xml");
}
} | apache-2.0 |
lance-gg/lance | src/game/Timer.js | 2530 | // TODO: needs documentation
// I think the API could be simpler
// - Timer.run(waitSteps, cb)
// - Timer.repeat(waitSteps, count, cb) // count=null=>forever
// - Timer.cancel(cb)
export default class Timer {
constructor() {
this.currentTime = 0;
this.isActive = false;
this.idCounter = 0;
this.events = {};
}
play() {
this.isActive = true;
}
tick() {
let event;
let eventId;
if (this.isActive) {
this.currentTime++;
for (eventId in this.events) {
event = this.events[eventId];
if (event) {
if (event.type == 'repeat') {
if ((this.currentTime - event.startOffset) % event.time == 0) {
event.callback.apply(event.thisContext, event.args);
}
}
if (event.type == 'single') {
if ((this.currentTime - event.startOffset) % event.time == 0) {
event.callback.apply(event.thisContext, event.args);
event.destroy();
}
}
}
}
}
}
destroyEvent(eventId) {
delete this.events[eventId];
}
loop(time, callback) {
let timerEvent = new TimerEvent(this,
TimerEvent.TYPES.repeat,
time,
callback
);
this.events[timerEvent.id] = timerEvent;
return timerEvent;
}
add(time, callback, thisContext, args) {
let timerEvent = new TimerEvent(this,
TimerEvent.TYPES.single,
time,
callback,
thisContext,
args
);
this.events[timerEvent.id] = timerEvent;
return timerEvent;
}
// todo implement timer delete all events
destroy(id) {
delete this.events[id];
}
}
// timer event
class TimerEvent {
constructor(timer, type, time, callback, thisContext, args) {
this.id = ++timer.idCounter;
this.timer = timer;
this.type = type;
this.time = time;
this.callback = callback;
this.startOffset = timer.currentTime;
this.thisContext = thisContext;
this.args = args;
this.destroy = function() {
this.timer.destroy(this.id);
};
}
}
TimerEvent.TYPES = {
repeat: 'repeat',
single: 'single'
};
| apache-2.0 |
deeplearning4j/DataVec | datavec-spark-inference-parent/datavec-spark-inference-server/src/main/java/org/datavec/spark/transform/SparkTransformServerChooser.java | 1676 | package org.datavec.spark.transform;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import java.io.InvalidClassException;
import java.util.Arrays;
import java.util.List;
/**
* Created by kepricon on 17. 6. 20.
*/
@Data
@Slf4j
public class SparkTransformServerChooser {
private SparkTransformServer sparkTransformServer = null;
private TransformDataType transformDataType = null;
public void runMain(String[] args) throws Exception {
int pos = getMatchingPosition(args, "-dt", "--dataType");
if (pos == -1) {
log.error("no valid options");
log.error("-dt, --dataType Options: [CSV, IMAGE]");
throw new Exception("no valid options");
} else {
transformDataType = TransformDataType.valueOf(args[pos + 1]);
}
switch (transformDataType) {
case CSV:
sparkTransformServer = new CSVSparkTransformServer();
break;
case IMAGE:
sparkTransformServer = new ImageSparkTransformServer();
break;
default:
throw new InvalidClassException("no matching SparkTransform class");
}
sparkTransformServer.runMain(args);
}
private int getMatchingPosition(String[] args, String... options) {
List optionList = Arrays.asList(options);
for (int i = 0; i < args.length; i++) {
if (optionList.contains(args[i])) {
return i;
}
}
return -1;
}
public static void main(String[] args) throws Exception {
new SparkTransformServerChooser().runMain(args);
}
}
| apache-2.0 |
w1ll1am23/home-assistant | homeassistant/components/fritzbox_callmonitor/__init__.py | 2690 | """The fritzbox_callmonitor integration."""
from asyncio import gather
import logging
from fritzconnection.core.exceptions import FritzConnectionException, FritzSecurityError
from requests.exceptions import ConnectionError as RequestsConnectionError
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.exceptions import ConfigEntryNotReady
from .base import FritzBoxPhonebook
from .const import (
CONF_PHONEBOOK,
CONF_PREFIXES,
DOMAIN,
FRITZBOX_PHONEBOOK,
PLATFORMS,
UNDO_UPDATE_LISTENER,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry):
"""Set up the fritzbox_callmonitor platforms."""
fritzbox_phonebook = FritzBoxPhonebook(
host=config_entry.data[CONF_HOST],
username=config_entry.data[CONF_USERNAME],
password=config_entry.data[CONF_PASSWORD],
phonebook_id=config_entry.data[CONF_PHONEBOOK],
prefixes=config_entry.options.get(CONF_PREFIXES),
)
try:
await hass.async_add_executor_job(fritzbox_phonebook.init_phonebook)
except FritzSecurityError as ex:
_LOGGER.error(
"User has insufficient permissions to access AVM FRITZ!Box settings and its phonebooks: %s",
ex,
)
return False
except FritzConnectionException as ex:
_LOGGER.error("Invalid authentication: %s", ex)
return False
except RequestsConnectionError as ex:
_LOGGER.error("Unable to connect to AVM FRITZ!Box call monitor: %s", ex)
raise ConfigEntryNotReady from ex
undo_listener = config_entry.add_update_listener(update_listener)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = {
FRITZBOX_PHONEBOOK: fritzbox_phonebook,
UNDO_UPDATE_LISTENER: undo_listener,
}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unloading the fritzbox_callmonitor platforms."""
unload_ok = all(
await gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
)
hass.data[DOMAIN][config_entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
async def update_listener(hass, config_entry):
"""Update listener to reload after option has changed."""
await hass.config_entries.async_reload(config_entry.entry_id)
| apache-2.0 |
brennentsmith/teletraan | deploy-service/teletraanservice/src/main/java/com/pinterest/teletraan/TeletraanServiceConfiguration.java | 7392 | /**
* Copyright 2016 Pinterest, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pinterest.teletraan;
import com.pinterest.teletraan.config.AnonymousAuthenticationFactory;
import com.pinterest.teletraan.config.AuthenticationFactory;
import com.pinterest.teletraan.config.AuthorizationFactory;
import com.pinterest.teletraan.config.ChatFactory;
import com.pinterest.teletraan.config.DataSourceFactory;
import com.pinterest.teletraan.config.DefaultChatFactory;
import com.pinterest.teletraan.config.DefaultEmailFactory;
import com.pinterest.teletraan.config.DefaultHostGroupFactory;
import com.pinterest.teletraan.config.DefaultSourceControlFactory;
import com.pinterest.teletraan.config.EmailFactory;
import com.pinterest.teletraan.config.EmbeddedDataSourceFactory;
import com.pinterest.teletraan.config.EventSenderFactory;
import com.pinterest.teletraan.config.ExternalAlertsConfigFactory;
import com.pinterest.teletraan.config.HostGroupFactory;
import com.pinterest.teletraan.config.JenkinsFactory;
import com.pinterest.teletraan.config.OpenAuthorizationFactory;
import com.pinterest.teletraan.config.RodimusFactory;
import com.pinterest.teletraan.config.SourceControlFactory;
import com.pinterest.teletraan.config.SystemFactory;
import com.pinterest.teletraan.config.WorkerConfig;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.dropwizard.Configuration;
import java.util.Collections;
import java.util.List;
import javax.validation.Valid;
public class TeletraanServiceConfiguration extends Configuration {
@Valid
@JsonProperty("db")
private DataSourceFactory dataSourceFactory;
@JsonProperty("authentication")
@Valid
private AuthenticationFactory authenticationFactory;
@JsonProperty("authorization")
@Valid
private AuthorizationFactory authorizationFactory;
@Valid
@JsonProperty("scm")
private SourceControlFactory sourceControlFactory;
@Valid
@JsonProperty("chat")
private ChatFactory chatFactory;
@Valid
@JsonProperty("email")
private EmailFactory emailFactory;
@Valid
@JsonProperty("hostgroup")
private HostGroupFactory hostGroupFactory;
@Valid
@JsonProperty("event")
private EventSenderFactory eventSenderFactory;
@Valid
@JsonProperty("workers")
private List<WorkerConfig> workerConfigs;
@Valid
@JsonProperty("system")
private SystemFactory systemFactory;
@Valid
@JsonProperty("rodimus")
private RodimusFactory rodimusFactory;
@Valid
@JsonProperty("jenkins")
private JenkinsFactory jenkinsFactory;
@Valid
@JsonProperty("externalAlerts")
private ExternalAlertsConfigFactory externalAlertsConfigs;
@Valid
@JsonProperty("pingrequestvalidators")
private List<String> pingRequestValidators;
public DataSourceFactory getDataSourceFactory() {
if (dataSourceFactory == null) {
return new EmbeddedDataSourceFactory();
}
return dataSourceFactory;
}
public void setDataSourceFactory(DataSourceFactory factory) {
this.dataSourceFactory = factory;
}
public AuthenticationFactory getAuthenticationFactory() {
if (authenticationFactory == null) {
return new AnonymousAuthenticationFactory();
}
return authenticationFactory;
}
public void setAuthenticationFactory(AuthenticationFactory authenticationFactory) {
this.authenticationFactory = authenticationFactory;
}
public SourceControlFactory getSourceControlFactory() {
if (sourceControlFactory == null) {
return new DefaultSourceControlFactory();
}
return sourceControlFactory;
}
public void setSourceControlFactory(SourceControlFactory sourceControlFactory) {
this.sourceControlFactory = sourceControlFactory;
}
public AuthorizationFactory getAuthorizationFactory() {
if (authorizationFactory == null) {
return new OpenAuthorizationFactory();
}
return authorizationFactory;
}
public ChatFactory getChatFactory() {
if (chatFactory == null) {
return new DefaultChatFactory();
}
return chatFactory;
}
public void setChatFactory(ChatFactory chatFactory) {
this.chatFactory = chatFactory;
}
public EmailFactory getEmailFactory() {
if (emailFactory == null) {
return new DefaultEmailFactory();
}
return emailFactory;
}
public void setEmailFactory(EmailFactory emailFactory) {
this.emailFactory = emailFactory;
}
public void setAuthorizationFactory(AuthorizationFactory authorizationFactory) {
this.authorizationFactory = authorizationFactory;
}
public HostGroupFactory getHostGroupFactory() {
if (hostGroupFactory == null) {
return new DefaultHostGroupFactory();
}
return hostGroupFactory;
}
public void setHostGroupFactory(HostGroupFactory hostGroupFactory) {
this.hostGroupFactory = hostGroupFactory;
}
public EventSenderFactory getEventSenderFactory() {
return eventSenderFactory;
}
public void setEventSenderFactory(EventSenderFactory eventSenderFactory) {
this.eventSenderFactory = eventSenderFactory;
}
public SystemFactory getSystemFactory() {
if (systemFactory == null) {
return new SystemFactory();
}
return systemFactory;
}
public void setSystemFactory(SystemFactory systemFactory) {
this.systemFactory = systemFactory;
}
public RodimusFactory getRodimusFactory() {
return rodimusFactory;
}
public void setRodimusFactory(RodimusFactory rodimusFactory) {
this.rodimusFactory = rodimusFactory;
}
public JenkinsFactory getJenkinsFactory() {
return jenkinsFactory;
}
public void setJenkinsFactory(JenkinsFactory jenkinsFactory) {
this.jenkinsFactory = jenkinsFactory;
}
public List<WorkerConfig> getWorkerConfigs() {
if (workerConfigs == null) {
return Collections.emptyList();
}
return workerConfigs;
}
public void setWorkerConfigs(List<WorkerConfig> workerConfigs) {
this.workerConfigs = workerConfigs;
}
public ExternalAlertsConfigFactory getExternalAlertsConfigs() {
return externalAlertsConfigs;
}
public void setExternalAlertsConfigs(
ExternalAlertsConfigFactory externalAlertsConfigs) {
this.externalAlertsConfigs = externalAlertsConfigs;
}
public List<String> getPingRequestValidators() {
return pingRequestValidators;
}
public void setPingRequestValidators(
List<String> pingRequestValidators) {
this.pingRequestValidators = pingRequestValidators;
}
}
| apache-2.0 |
Rikkola/kie-wb-common | kie-wb-common-dmn/kie-wb-common-dmn-webapp-kogito-common/src/main/java/org/kie/workbench/common/dmn/webapp/kogito/common/client/editor/DMNProjectToolbarStateHandler.java | 3513 | /*
* Copyright 2019 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.dmn.webapp.kogito.common.client.editor;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import org.kie.workbench.common.dmn.client.editors.toolbar.ToolbarStateHandler;
import org.kie.workbench.common.stunner.core.client.session.command.ClientSessionCommand;
import org.kie.workbench.common.stunner.core.client.session.command.impl.ClearSessionCommand;
import org.kie.workbench.common.stunner.core.client.session.command.impl.CopySelectionSessionCommand;
import org.kie.workbench.common.stunner.core.client.session.command.impl.CutSelectionSessionCommand;
import org.kie.workbench.common.stunner.core.client.session.command.impl.DeleteSelectionSessionCommand;
import org.kie.workbench.common.stunner.core.client.session.command.impl.PasteSelectionSessionCommand;
import org.kie.workbench.common.stunner.core.client.session.command.impl.PerformAutomaticLayoutCommand;
import org.kie.workbench.common.stunner.core.client.session.command.impl.SwitchGridSessionCommand;
import org.kie.workbench.common.stunner.core.client.session.command.impl.VisitGraphSessionCommand;
import org.kie.workbench.common.stunner.kogito.client.editor.AbstractDiagramEditorMenuSessionItems;
public class DMNProjectToolbarStateHandler implements ToolbarStateHandler {
private static final Class[] COMMAND_CLASSES = {
ClearSessionCommand.class,
SwitchGridSessionCommand.class,
VisitGraphSessionCommand.class,
DeleteSelectionSessionCommand.class,
CutSelectionSessionCommand.class,
CopySelectionSessionCommand.class,
PasteSelectionSessionCommand.class,
PerformAutomaticLayoutCommand.class
};
private final Map<Class<? extends ClientSessionCommand>, Boolean> commandStates = new HashMap<>();
private final AbstractDiagramEditorMenuSessionItems projectEditorMenuSessionItems;
@SuppressWarnings("unchecked")
public DMNProjectToolbarStateHandler(final AbstractDiagramEditorMenuSessionItems projectEditorMenuSessionItems) {
this.projectEditorMenuSessionItems = projectEditorMenuSessionItems;
Arrays.asList(COMMAND_CLASSES).forEach(clazz -> commandStates.put(clazz, false));
}
@Override
public void enterGridView() {
commandStates.entrySet().forEach(entry -> {
final Class<? extends ClientSessionCommand> command = entry.getKey();
entry.setValue(projectEditorMenuSessionItems.isItemEnabled(command));
projectEditorMenuSessionItems.setItemEnabled(command, false);
});
}
@Override
public void enterGraphView() {
commandStates.entrySet().forEach(entry -> {
final Class<? extends ClientSessionCommand> command = entry.getKey();
projectEditorMenuSessionItems.setItemEnabled(command, entry.getValue());
});
}
}
| apache-2.0 |
fuhongliang/2015weitonghui | Application/User/Controller/BusinessController.class.php | 28087 | <?php
// .-----------------------------------------------------------------------------------
// |
// | WE TRY THE BEST WAY
// | Site: http://www.gooraye.net
// |-----------------------------------------------------------------------------------
// | Author: 贝贝 <hebiduhebi@163.com>
// | Copyright (c) 2012-2014, http://www.gooraye.net. All Rights Reserved.
// |-----------------------------------------------------------------------------------
namespace User\Controller;
use Think\Controller;
/**
*
*
*
*/
class BusinessController extends UserController
{
public function _initialize() {
parent::_initialize();
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$arrAllow = array('fitness', 'gover', 'food', 'travel', 'flower', 'property', 'ktv', 'bar', 'fitment', 'wedding', 'affections', 'housekeeper', 'lease', 'beauty');
if (!in_array($type, $arrAllow)) {
$this->error('抱歉,您的参数不合法!', U('Function/index', array('token' => $this->token)));
}
$this->assign('type', $type);
$_POST['token'] = session('token');
}
/**
* [index description]
* @return [type] [description]
*/
public function index() {
$data = D('busines');
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type);
$count = $data->where($where)->count();
$Page = new \Think\Page($count, 20);
$show = $Page->show();
$busines = $data->where($where)->order('sort desc')->limit($Page->firstRow . ',' . $Page->listRows)->select();
$this->assign('page', $show);
$this->assign('busines', $busines);
$this->display();
}
/**
* [index_add description]
* @return [type]
*/
public function index_add() {
$Photo = M("Photo");
$where = array('token' => session('token'), 'status' => 1);
$photo = $Photo->where($where)->order('id desc')->select();
$this->assign('photo', $photo);
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$t_busines = D('busines');
$bid = filter_var(I('get.bid'), FILTER_VALIDATE_INT);
$where_2 = array('token' => session('token'), 'type' => $type, 'bid' => $bid);
$busines = $t_busines->where($where_2)->find();
if (IS_POST) {
$filters = array('keyword' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED), 'title' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED), 'picurl' => array('filter' => FILTER_VALIDATE_URL), 'business_desc' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED));
$check = filter_var_array($_POST, $filters);
if (!$check) {
exit($this->error('包含敏感字符,或者是不允许字串!', U("Business/index", array('token' => session('token'), 'type' => $type))));
} else {
$_POST['token'] = session('token');
if (!$t_busines->create()) {
exit($this->error($t_busines->getError()));
} else {
$bid = filter_var(I('post.bid'), FILTER_VALIDATE_INT);
$status = filter_var(I('post.status'), FILTER_SANITIZE_STRING);
if ('edit' == $status && $bid != '') {
$o = $t_busines->where(array('bid' => $bid, 'token' => session('token'), 'type' => $type))->save($_POST);
if ($o) {
$data2['keyword'] = filter_var(I('post.keyword'), FILTER_SANITIZE_STRING);
M('Keyword')->where(array('pid' => $bid, 'token' => session('token'), 'module' => 'Business'))->data($data2)->save();
exit($this->success('修改成功', U("Business/index", array('token' => session('token'), 'type' => $type))));
} else {
exit($this->error('修改失败', U("Business/index", array('token' => session('token'), 'type' => $type))));
}
} else {
if ($id = $t_busines->data($_POST)->add()) {
$data1['pid'] = $id;
$data1['module'] = 'Business';
$data1['token'] = session('token');
$data1['keyword'] = filter_var(I('post.keyword'), FILTER_SANITIZE_STRING);
M('Keyword')->add($data1);
$this->success('添加成功', U("Business/index", array('token' => session('token'), 'type' => $type)));
exit;
} else {
exit($this->error('服务器繁忙,添加失败,请稍候再试', U("Business/index", array('token' => session('token'), 'type' => $type))));
}
}
}
}
}
$this->assign('busines', $busines);
$this->display();
}
public function index_del() {
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$bid = filter_var(I('get.bid'), FILTER_VALIDATE_INT);
$t_busines = M('busines');
$find = array('bid' => $bid, 'type' => $type, 'token' => session('token'));
$result = $t_busines->where($find)->find();
if ($result) {
$t_busines->where(array('bid' => $result['bid'], 'type' => $result['type'], 'token' => session('token')))->delete();
M('Keyword')->where(array('pid' => $result['bid'], 'module' => 'Business', 'token' => session('token')))->delete();
$this->success('删除成功', U("Business/index", array('token' => session('token'), 'type' => $result['type'])));
exit;
} else {
exit($this->error('非法操作,请稍候再试', U("Business/index", array('token' => session('token'), 'type' => $type))));
}
}
public function classify() {
$data = D('busines_main');
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type);
$count = $data->where($where)->count();
$Page = new \Think\Page($count, 20);
$show = $Page->show();
$busines_main = $data->where($where)->order('sort desc')->limit($Page->firstRow . ',' . $Page->listRows)->select();
$i = 0;
foreach ($busines_main as $val) {
$busines = M("busines")->where(array('token' => session('token'), 'bid' => $val['bid_id']))->field('mtitle')->find();
array_push($busines_main[$i], $busines['mtitle']);
unset($busines);
++$i;
}
$this->assign('page', $show);
$this->assign('busines_main', $busines_main);
$this->display();
}
public function classify_add() {
$t_busines = M("busines");
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type);
$busines_list = $t_busines->where($where)->order('sort desc')->field('bid,mtitle')->select();
$this->assign('busines_list', $busines_list);
$t_busines_main = D('busines_main');
$mid = filter_var(I('get.mid'), FILTER_VALIDATE_INT);
$where_2 = array('token' => session('token'), 'type' => $type, 'mid' => $mid);
$busines_main = $t_busines_main->where($where_2)->find();
if (IS_POST) {
$filters = array('name' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED), 'main_desc' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED));
$check = filter_var_array($_POST, $filters);
if (!$check) {
exit($this->error('表单包含敏感字符!'));
} else {
$_POST['token'] = session('token');
if (!$t_busines_main->create()) {
exit($this->error($t_busines_main->getError()));
} else {
$mid = filter_var(I('post.mid'), FILTER_VALIDATE_INT);
$status = filter_var(I('post.status'), FILTER_SANITIZE_STRING);
if ('edit' == $status && $mid != '') {
$o = $t_busines_main->where(array('mid' => $mid, 'token' => session('token'), 'type' => $type))->save($_POST);
if ($o) {
exit($this->success('修改成功', U("Business/classify", array('token' => session('token'), 'type' => $type))));
} else {
exit($this->error('修改失败', U("Business/classify", array('token' => session('token'), 'type' => $type))));
}
} else {
if ($id = $t_busines_main->data($_POST)->add()) {
$this->success('添加成功', U("Business/classify", array('token' => session('token'), 'type' => $type)));
exit;
} else {
exit($this->error('务器繁忙,添加失败,请稍候再试', U("Business/classify", array('token' => session('token'), 'type' => $type))));
}
}
}
}
}
$this->assign('busines_main', $busines_main);
$this->display();
}
public function classify_del() {
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$mid = filter_var(I('get.mid'), FILTER_VALIDATE_INT);
$t_busines_main = M('busines_main');
$find = array('mid' => $mid, 'type' => $type, 'token' => session('token'));
$result = $t_busines_main->where($find)->find();
if ($result) {
$t_busines_main->where(array('mid' => $result['mid'], 'type' => $result['type'], 'token' => session('token')))->delete();
exit($this->success('删除成功', U("Business/classify", array('token' => session('token'), 'type' => $result['type']))));
} else {
exit($this->error('非法操作,请稍候再试', U("Business/classify", array('token' => session('token'), 'type' => $type))));
}
}
public function project() {
$data = D('busines_second');
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type);
$count = $data->where($where)->count();
$Page = new \Think\Page($count, 20);
$show = $Page->show();
$busines_second = $data->where($where)->order('sort desc')->limit($Page->firstRow . ',' . $Page->listRows)->select();
$i = 0;
foreach ($busines_second as $val) {
$busines = M("busines_main")->where(array('token' => session('token'), 'mid' => $val['mid_id']))->field('name')->find();
array_push($busines_second[$i], $busines['name']);
unset($busines);
++$i;
}
$this->assign('page', $show);
$this->assign('busines_second', $busines_second);
$this->display();
}
public function project_add() {
$t_busines_main = M("busines_main");
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type);
$busines_list = $t_busines_main->where($where)->order('sort desc')->field('mid,name')->select();
$this->assign('busines_list', $busines_list);
$t_busines_second = D('busines_second');
$sid = filter_var(I('get.sid'), FILTER_VALIDATE_INT);
$where_2 = array('token' => session('token'), 'type' => $type, 'sid' => $sid);
$busines_second = $t_busines_second->where($where_2)->find();
if (IS_POST) {
$filters = array('name' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED), 'main_desc' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED));
$check = filter_var_array($_POST, $filters);
if (!$check) {
exit($this->error('表单包含敏感字符!'));
} else {
$_POST['token'] = session('token');
if (!$t_busines_second->create()) {
exit($this->error($t_busines_second->getError()));
} else {
$sid = filter_var(I('post.sid'), FILTER_VALIDATE_INT);
$status = filter_var(I('post.status'), FILTER_SANITIZE_STRING);
if ('edit' == $status && $sid != '') {
$o = $t_busines_second->where(array('sid' => $sid, 'token' => session('token'), 'type' => $type))->save($_POST);
if ($o) {
exit($this->success('修改成功', U("Business/project", array('token' => session('token'), 'type' => $type))));
} else {
exit($this->error('修改失败', U("Business/project", array('token' => session('token'), 'type' => $type))));
}
} else {
if ($id = $t_busines_second->data($_POST)->add()) {
$this->success('添加成功', U("Business/project", array('token' => session('token'), 'type' => $type)));
exit;
} else {
exit($this->error('务器繁忙,添加失败,请稍候再试', U("Business/project", array('token' => session('token'), 'type' => $type))));
}
}
}
}
}
$this->assign('busines_second', $busines_second);
$this->display();
}
public function project_del() {
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$sid = filter_var(I('get.sid'), FILTER_VALIDATE_INT);
$t_busines_main = M('busines_second');
$find = array('sid' => $sid, 'type' => $type, 'token' => session('token'));
$result = $t_busines_main->where($find)->find();
if ($result) {
$t_busines_main->where(array('sid' => $result['sid'], 'type' => $result['type'], 'token' => session('token')))->delete();
exit($this->success('删除成功', U("Business/project", array('token' => session('token'), 'type' => $result['type']))));
} else {
exit($this->error('非法操作,请稍候再试', U("Business/project", array('token' => session('token'), 'type' => $type))));
}
}
public function poster() {
$data = D('busines_pic');
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type);
$count = $data->where($where)->count();
$Page = new \Think\Page($count, 20);
$show = $Page->show();
$busines_pic = $data->where($where)->order('pid desc')->limit($Page->firstRow . ',' . $Page->listRows)->select();
$i = 0;
$j = 0;
foreach ($busines_pic as $val) {
$busines = M("busines")->where(array('token' => session('token'), 'bid' => $val['bid_id']))->field('mtitle')->find();
$photo = M('photo')->where(array('token' => session('token'), 'id' => $val['ablum_id']))->field('title')->find();
array_push($busines_pic[$i], $busines['mtitle']);
array_push($busines_pic[$j], $photo['title']);
unset($busines);
unset($photo);
++$j;
++$i;
}
$this->assign('page', $show);
$this->assign('busines_pic', $busines_pic);
$this->display();
}
public function poster_add() {
$t_busines = M("busines");
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type);
$busines_list = $t_busines->where($where)->order('sort desc')->field('bid,mtitle')->select();
$this->assign('busines_list', $busines_list);
$photo = M('photo')->where(array('token' => session('token'), 'status' => 1))->order('id desc')->field('id,title')->select();
$this->assign('photo', $photo);
$t_busines_second = D('busines_pic');
$pid = filter_var(I('get.pid'), FILTER_VALIDATE_INT);
$where_2 = array('token' => session('token'), 'type' => $type, 'pid' => $pid);
$busines_second = $t_busines_second->where($where_2)->find();
if (IS_POST) {
$filters = array('picurl_1' => array('filter' => FILTER_VALIDATE_URL), 'picurl_2' => array('filter' => FILTER_VALIDATE_URL), 'picurl_3' => array('filter' => FILTER_VALIDATE_URL), 'picurl_4' => array('filter' => FILTER_VALIDATE_URL), 'picurl_5' => array('filter' => FILTER_VALIDATE_URL));
$check = filter_var_array($_POST, $filters);
if (!$check) {
exit($this->error('包含特殊字符,请检查后再提交.', U("Business/poster", array('token' => session('token'), 'type' => $type))));
} else {
$_POST['token'] = session('token');
if (!$t_busines_second->create()) {
exit($this->error($t_busines_second->getError()));
} else {
$pid = filter_var(I('post.pid'), FILTER_VALIDATE_INT);
$status = filter_var(I('post.status'), FILTER_SANITIZE_STRING);
if ('edit' == $status && $pid != '') {
$o = $t_busines_second->where(array('pid' => $pid, 'token' => session('token'), 'type' => $type))->save($_POST);
if ($o) {
exit($this->success('修改成功', U("Business/poster", array('token' => session('token'), 'type' => $type))));
} else {
exit($this->error('修改失败', U("Business/poster", array('token' => session('token'), 'type' => $type))));
}
} else {
if ($id = $t_busines_second->data($_POST)->add()) {
$this->success('添加成功', U("Business/poster", array('token' => session('token'), 'type' => $type)));
exit;
} else {
exit($this->error('务器繁忙,添加失败,请稍候再试', U("Business/poster", array('token' => session('token'), 'type' => $type))));
}
}
}
}
}
$this->assign('busines_second', $busines_second);
$this->display();
}
public function poster_del() {
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$pid = filter_var(I('get.pid'), FILTER_VALIDATE_INT);
$t_busines_main = M('busines_pic');
$find = array('pid' => $pid, 'type' => $type, 'token' => session('token'));
$result = $t_busines_main->where($find)->find();
if ($result) {
$t_busines_main->where(array('pid' => $result['pid'], 'type' => $result['type'], 'token' => session('token')))->delete();
exit($this->success('删除成功', U("Business/poster", array('token' => session('token'), 'type' => $result['type']))));
} else {
exit($this->error('非法操作!请稍候再试', U("Business/poster", array('token' => session('token'), 'type' => $type))));
}
}
public function comments() {
$data = D('busines_comment');
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type);
$count = $data->where($where)->count();
$Page = new \Think\Page($count, 20);
$show = $Page->show();
$comments = $data->where($where)->order('sort desc')->limit($Page->firstRow . ',' . $Page->listRows)->select();
$i = 0;
foreach ($comments as $val) {
$busines = M("busines")->where(array('token' => session('token'), 'bid' => $val['bid_id']))->field('mtitle')->find();
array_push($comments[$i], $busines['mtitle']);
unset($busines);
++$i;
}
$this->assign('page', $show);
$this->assign('comments', $comments);
$this->display();
}
public function comments_add() {
$t_busines = M("busines");
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type);
$busines_list = $t_busines->where($where)->order('sort desc')->field('bid,mtitle')->select();
$this->assign('busines_list', $busines_list);
$t_busines_comment = D('busines_comment');
$cid = filter_var(I('get.cid'), FILTER_VALIDATE_INT);
$where_2 = array('token' => session('token'), 'type' => $type, 'cid' => $cid);
$comments = $t_busines_comment->where($where_2)->find();
if (IS_POST) {
$filters = array('name' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED), 'face_picurl' => array('filter' => FILTER_VALIDATE_URL), 'position' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED), 'face_desc' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED), 'comment' => array('filter' => FILTER_SANITIZE_STRIPPED, 'flags' => FILTER_SANITIZE_STRING, 'options' => FILTER_SANITIZE_ENCODED));
$check = filter_var_array($_POST, $filters);
if (!$check) {
exit($this->error('表单包含不允许字符.', U("Business/comments", array('token' => session('token'), 'type' => $type))));
} else {
$_POST['token'] = session('token');
if (!$t_busines_comment->create()) {
exit($this->error($t_busines_comment->getError()));
} else {
$cid = filter_var(I('post.cid'), FILTER_VALIDATE_INT);
$status = filter_var(I('post.status'), FILTER_SANITIZE_STRING);
if ('edit' == $status && $cid != '') {
$o = $t_busines_comment->where(array('cid' => $cid, 'token' => session('token'), 'type' => $type))->save($_POST);
if ($o) {
exit($this->success('修改成功', U("Business/comments", array('token' => session('token'), 'type' => $type))));
} else {
exit($this->error('修改失败', U("Business/comments", array('token' => session('token'), 'type' => $type))));
}
} else {
if ($id = $t_busines_comment->data($_POST)->add()) {
$this->success('添加成功', U("Business/comments", array('token' => session('token'), 'type' => $type)));
exit;
} else {
exit($this->error('服务器繁忙,添加失败,请稍候再试', U("Business/comments", array('token' => session('token'), 'type' => $type))));
}
}
}
}
}
$this->assign('comments', $comments);
$this->display();
}
public function comments_del() {
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$cid = filter_var(I('get.cid'), FILTER_VALIDATE_INT);
$t_busines_main = M('busines_comment');
$find = array('cid' => $cid, 'type' => $type, 'token' => session('token'));
$result = $t_busines_main->where($find)->find();
if ($result) {
$t_busines_main->where(array('cid' => $result['cid'], 'type' => $result['type'], 'token' => session('token')))->delete();
exit($this->success('删除成功', U("Business/comments", array('token' => session('token'), 'type' => $result['type']))));
} else {
exit($this->error('非法操作!请稍候再试', U("Business/comments", array('token' => session('token'), 'type' => $type))));
}
}
public function orders() {
$t_reservebook = M('Reservebook');
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$where = array('token' => session('token'), 'type' => $type, "orderid!=''");
$count = $t_reservebook->where($where)->count();
$Page = new \Think\Page($count, 50);
$show = $Page->show();
$books = $t_reservebook->where($where)->order('booktime DESC')->limit($Page->firstRow . ',' . $Page->listRows)->select();
$this->assign('page', $show);
$this->assign('books', $books);
$this->assign('count', $t_reservebook->where($where)->count());
$where2 = array('token' => session('token'), 'type' => $type, 'paid' => 1);
$where3 = array('token' => session('token'), 'type' => $type, 'paid' => 0);
$where4 = array('token' => session('token'), 'type' => $type, 'remate' => 0);
$this->assign('ok_count', $t_reservebook->where($where2)->count());
$this->assign('lose_count', $t_reservebook->where($where3)->count());
$this->assign('call_count', $t_reservebook->where($where4)->count());
$this->display();
}
public function order_del() {
$id = filter_var(I('get.id'), FILTER_VALIDATE_INT);
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$t_reservebook = M('Reservebook');
$where = array('id' => $id, 'token' => session('token'), 'type' => $type);
$check = $t_reservebook->where($where)->find();
if (!empty($check)) {
$t_reservebook->where(array('id' => $check['id'], 'token' => session('token'), 'type' => $type))->delete();
$this->success('删除成功', U("Business/orders", array('token' => session('token'), 'type' => $type)));
exit;
} else {
$this->error('非法操作!', U("Business/orders", array('token' => session('token'), 'type' => $type)));
exit;
}
}
public function orders_list() {
$id = filter_var(I('get.id'), FILTER_VALIDATE_INT);
$type = filter_var(I('get.type'), FILTER_SANITIZE_STRING);
$token = session('token');
$where = array('id' => $id, 'token' => $token, 'type' => $type);
$t_reservebook = M('reservebook');
$userinfo = $t_reservebook->where($where)->find();
$this->assign('userinfo', $userinfo);
if (IS_POST) {
$id = filter_var(I('post.id'), FILTER_VALIDATE_INT);
$type = filter_var(I('post.type'), FILTER_VALIDATE_INT);
$token = session('token');
$where = array('id' => $id, 'token' => $token);
$ok = $t_reservebook->where($where)->save($_POST);
if ($ok) {
$this->assign('ok', 1);
} else {
$this->assign('ok', 2);
}
}
$this->display();
}
}
| apache-2.0 |
bmaxa/changes_and_fixes | src/test/run-pass/region-dependent-addr-of.rs | 2644 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test lifetimes are linked properly when we create dependent region pointers.
// Issue #3148.
struct A {
value: B
}
struct B {
v1: int,
v2: [int, ..3],
v3: ~[int],
v4: C,
v5: ~C,
v6: Option<C>
}
struct C {
f: int
}
fn get_v1<'v>(a: &'v A) -> &'v int {
// Region inferencer must deduce that &v < L2 < L1
let foo = &a.value; // L1
&foo.v1 // L2
}
fn get_v2<'v>(a: &'v A, i: uint) -> &'v int {
let foo = &a.value;
&foo.v2[i]
}
fn get_v3<'v>(a: &'v A, i: uint) -> &'v int {
let foo = &a.value;
&foo.v3[i]
}
fn get_v4<'v>(a: &'v A, _i: uint) -> &'v int {
let foo = &a.value;
&foo.v4.f
}
fn get_v5<'v>(a: &'v A, _i: uint) -> &'v int {
let foo = &a.value;
&foo.v5.f
}
fn get_v6_a<'v>(a: &'v A, _i: uint) -> &'v int {
match a.value.v6 {
Some(ref v) => &v.f,
None => fail!()
}
}
fn get_v6_b<'v>(a: &'v A, _i: uint) -> &'v int {
match *a {
A { value: B { v6: Some(ref v), _ } } => &v.f,
_ => fail!()
}
}
fn get_v6_c<'v>(a: &'v A, _i: uint) -> &'v int {
match a {
&A { value: B { v6: Some(ref v), _ } } => &v.f,
_ => fail!()
}
}
fn get_v5_ref<'v>(a: &'v A, _i: uint) -> &'v int {
match &a.value {
&B {v5: ~C {f: ref v}, _} => v
}
}
pub fn main() {
let a = A {value: B {v1: 22,
v2: [23, 24, 25],
v3: ~[26, 27, 28],
v4: C { f: 29 },
v5: ~C { f: 30 },
v6: Some(C { f: 31 })}};
let p = get_v1(&a);
assert_eq!(*p, a.value.v1);
let p = get_v2(&a, 1);
assert_eq!(*p, a.value.v2[1]);
let p = get_v3(&a, 1);
assert_eq!(*p, a.value.v3[1]);
let p = get_v4(&a, 1);
assert_eq!(*p, a.value.v4.f);
let p = get_v5(&a, 1);
assert_eq!(*p, a.value.v5.f);
let p = get_v6_a(&a, 1);
assert_eq!(*p, a.value.v6.unwrap().f);
let p = get_v6_b(&a, 1);
assert_eq!(*p, a.value.v6.unwrap().f);
let p = get_v6_c(&a, 1);
assert_eq!(*p, a.value.v6.unwrap().f);
let p = get_v5_ref(&a, 1);
assert_eq!(*p, a.value.v5.f);
}
| apache-2.0 |
porcelli-forks/drools-wb | drools-wb-screens/drools-wb-scenario-simulation-editor/drools-wb-scenario-simulation-editor-client/src/test/java/org/drools/workbench/screens/scenariosimulation/client/rightpanel/AbstractTestToolsTest.java | 5568 | /*
* Copyright 2018 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.drools.workbench.screens.scenariosimulation.client.rightpanel;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.stream.IntStream;
import com.google.gwt.dom.client.LIElement;
import org.drools.workbench.screens.scenariosimulation.client.editor.strategies.DataManagementStrategy;
import org.drools.workbench.screens.scenariosimulation.client.utils.ViewsProvider;
import org.drools.workbench.screens.scenariosimulation.model.typedescriptor.FactModelTree;
import org.junit.Before;
import org.mockito.Mock;
import static org.drools.workbench.screens.scenariosimulation.client.TestProperties.FACT_PACKAGE;
import static org.drools.workbench.screens.scenariosimulation.client.TestProperties.LOWER_CASE_VALUE;
abstract class AbstractTestToolsTest {
@Mock
protected LIElement lIElementMock;
@Mock
protected ViewsProvider viewsProviderMock;
protected SortedMap<String, FactModelTree> dataObjectFactTreeMap;
protected SortedMap<String, FactModelTree> simpleJavaTypeTreeMap;
protected SortedMap<String, FactModelTree> instanceFactTreeMap;
protected SortedMap<String, FactModelTree> simpleJavaInstanceFactTreeMap;
protected FactModelTree FACT_MODEL_TREE;
protected String localFactName;
@Before
public void setup() {
dataObjectFactTreeMap = getDataObjectFactTreeMap();
simpleJavaTypeTreeMap = getSimpleJavaTypeFieldsMap();
instanceFactTreeMap = new TreeMap<>();
dataObjectFactTreeMap.keySet().forEach(key -> instanceFactTreeMap.put(getRandomString(), dataObjectFactTreeMap.get(key)));
simpleJavaInstanceFactTreeMap = new TreeMap<>();
localFactName = new ArrayList<>(dataObjectFactTreeMap.keySet()).get(0);
FACT_MODEL_TREE = dataObjectFactTreeMap.get(localFactName);
}
protected String getRandomFactModelTree(Map<String, FactModelTree> source, int position) {
return new ArrayList<>(source.keySet()).get(position);
}
protected SortedMap<String, FactModelTree> getDataObjectFactTreeMap() {
SortedMap<String, FactModelTree> toReturn = new TreeMap<>();
IntStream
.range(0, 3)
.forEach(id -> {
String key = getRandomString();
FactModelTree value = new FactModelTree(key, FACT_PACKAGE, getMockSimpleProperties(), new HashMap<>());
toReturn.put(key, value);
if (id == 1) {
value.addSimpleProperty(getRandomString(), getRandomFactModelTree(toReturn, 0));
}
if (id == 2) {
value.addSimpleProperty(getRandomString(), getRandomFactModelTree(toReturn, 1));
// Recursion
value.addSimpleProperty(getRandomString(), value.getFactName());
}
});
return toReturn;
}
protected SortedMap<String, FactModelTree> getSimpleJavaTypeFieldsMap() {
SortedMap<String, FactModelTree> toReturn = new TreeMap<>();
for (String key : DataManagementStrategy.SIMPLE_CLASSES_MAP.keySet()) {
Map<String, String> simpleProperties = new HashMap<>();
String fullName = DataManagementStrategy.SIMPLE_CLASSES_MAP.get(key).getCanonicalName();
simpleProperties.put(LOWER_CASE_VALUE, fullName);
String packageName = fullName.substring(0, fullName.lastIndexOf("."));
FactModelTree value = new FactModelTree(key, packageName, simpleProperties, new HashMap<>());
toReturn.put(key, value);
}
return toReturn;
}
protected Map<String, String> getMockSimpleProperties() {
Map<String, String> toReturn = new HashMap<>();
IntStream
.range(0, +3)
.forEach(id -> toReturn.put(getRandomString(), getRandomType()));
return toReturn;
}
protected String getRandomString() {
String letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
StringBuilder builder = new StringBuilder();
int numberOfLetters = letters.length();
Random random = new Random();
int sizeOfRandomString = random.nextInt(6) + 3;
IntStream
.range(0, sizeOfRandomString)
.forEach(position -> builder.append(letters.charAt(random.nextInt(numberOfLetters))));
return builder.toString();
}
protected String getRandomType() {
int type = new Random().nextInt(4);
switch (type) {
case 0:
return "lava.lang.String";
case 1:
return "byte";
case 2:
return "java.lang.Integer";
case 3:
return "java.lang.Boolean";
default:
return "int";
}
}
} | apache-2.0 |
ngageoint/geowave | extensions/formats/geolife/src/main/java/org/locationtech/geowave/format/geolife/GeoLifeIngestFormat.java | 1524 | /**
* Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
*
* <p> See the NOTICE file distributed with this work for additional information regarding copyright
* ownership. All rights reserved. This program and the accompanying materials are made available
* under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
* available at http://www.apache.org/licenses/LICENSE-2.0.txt
*/
package org.locationtech.geowave.format.geolife;
import org.locationtech.geowave.adapter.vector.ingest.AbstractSimpleFeatureIngestFormat;
import org.locationtech.geowave.adapter.vector.ingest.AbstractSimpleFeatureIngestPlugin;
import org.locationtech.geowave.core.ingest.avro.AvroWholeFile;
import org.locationtech.geowave.core.store.ingest.IngestFormatOptions;
/**
* This represents an ingest format plugin provider for GeoLife data. It will support ingesting
* directly from a local file system or staging data from a local files system and ingesting into
* GeoWave using a map-reduce job.
*/
public class GeoLifeIngestFormat extends AbstractSimpleFeatureIngestFormat<AvroWholeFile> {
@Override
protected AbstractSimpleFeatureIngestPlugin<AvroWholeFile> newPluginInstance(
final IngestFormatOptions options) {
return new GeoLifeIngestPlugin();
}
@Override
public String getIngestFormatName() {
return "geolife";
}
@Override
public String getIngestFormatDescription() {
return "files from Microsoft Research GeoLife trajectory data set";
}
}
| apache-2.0 |
jim-cooley/abletonremotescripts | remote-scripts/branches/VCM600_XL/consts.py | 1510 | # Constants
NUM_TRACKS = 12
NUM_SCENES = 0
NUM_RETURNS = 2
# Channel definitions
VCM_CHANNEL = 12
TRACK_1_CHANNEL = 0
# Control Definitions:
SCENE_WHEEL_DOWN = 89
SCENE_WHEEL_UP = 90
SCENE_WHEEL_CLICK = 87
CROSS_FADER = 8
CUE_VOLUME = 24
#Master Controls:
MASTER_VOLUME = 7
MASTER_PAN = 10
# Track Buttons:
TRACK_STOP = 68
TRACK_PLAY = 69
# Track Controls:
NUM_TRACK_GAINS = 3
TRACK_PAN = 10
TRACK_GAIN_HIGH = 16
TRACK_GAIN_MED = 17
TRACK_GAIN_LOW = 18
TRACK_SEND_A = 19
TRACK_SEND_B = 20
TRACK_RESONANCE = 21
TRACK_FREQUENCY = 22
TRACK_VOLUME = 23
TRACK_HIGH_CUT = 60
TRACK_MED_CUT = 61
TRACK_LOW_CUT = 62
TRACK_MUTE = 63
TRACK_SOLO = 64
TRACK_CF_ASSIGN = 65 # Assign track to Cross Fader
TRACK_VIEW_CLIP = 66
TRACK_VIEW_DEVICE = 67
TRACK_SELECT = TRACK_VIEW_DEVICE
# Return Definitions: (for Return A)
RETURN_PAN = 20
RETURN_VOLUME = 22
RETURN_MUTE = 78
# Array definitions to get around control CC id issues: (These are all in a vertical line)
TRACK_GAIN_CONTROLS = [TRACK_GAIN_HIGH, TRACK_GAIN_MED, TRACK_GAIN_LOW, TRACK_PAN, TRACK_SEND_B, TRACK_FREQUENCY]
# Device Controls:
NUM_DEVICE_BUTTONS = 8
DEVICE_BUTTON_ROW_1 = 70
DEVICE_BUTTON_ROW_2 = 74
DEVICE_BUTTON_ROW_3 = 78 # 78, 79, 80, 81
DEVICE_PARAM_ROW_1 = 12
DEVICE_PARAM_ROW_2 = 16
DEVICE_PARAM_ROW_3 = 20 # row three goes: 20, 21, 24, 10
# Transport Controls:
TRANSPORT_PLAY = 80
TRANSPORT_RECORD = 81
TRANSPORT_LOOP = 84
LOOP_IN = 82
LOOP_OUT = 83
TEMPO_NUDGE_RIGHT = 86
TEMPO_NUDGE_LEFT = 85
TEMPO_FINE = 25
TEMPO_COURSE = 26 | apache-2.0 |
bsa01/qbit | qbit/vertx/src/main/java/io/advantageous/qbit/vertx/http/server/VertxHttpResponseReceiver.java | 3904 | package io.advantageous.qbit.vertx.http.server;
import io.advantageous.qbit.http.request.HttpResponseCreator;
import io.advantageous.qbit.http.request.HttpResponseDecorator;
import io.advantageous.qbit.http.HttpStatus;
import io.advantageous.qbit.http.request.HttpResponse;
import io.advantageous.qbit.http.request.HttpResponseReceiver;
import io.advantageous.qbit.util.MultiMap;
import org.vertx.java.core.buffer.Buffer;
import org.vertx.java.core.http.HttpServerResponse;
import java.nio.charset.StandardCharsets;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
public class VertxHttpResponseReceiver implements HttpResponseReceiver<Object> {
private final HttpServerResponse response;
private final CopyOnWriteArrayList<HttpResponseDecorator> decorators;
private final HttpResponseCreator httpResponseCreator;
private final String requestPath;
private final MultiMap<String, String> requestHeaders;
private final MultiMap<String, String> requestParams;
public VertxHttpResponseReceiver(final String requestPath,
final MultiMap<String, String> headers,
final MultiMap<String, String> params,
final HttpServerResponse response,
final CopyOnWriteArrayList<HttpResponseDecorator> decorators,
final HttpResponseCreator httpResponseCreator) {
this.response = response;
this.decorators = decorators;
this.httpResponseCreator = httpResponseCreator;
this.requestPath = requestPath;
this.requestHeaders = headers;
this.requestParams = params;
}
@Override
public void response(int code, String contentType, Object body) {
response(code, contentType, body, MultiMap.empty());
}
@Override
public void response(final int code, final String contentType, final Object body,
final MultiMap<String, String> responseHeaders) {
final HttpResponse<?> response = decorators.size() > 0 ? httpResponseCreator.createResponse(
decorators, requestPath, code, contentType, body,
responseHeaders, this.requestHeaders,
this.requestParams) : null;
/** Response was decorated. */
if (response == null) {
doResponse(code, contentType, body, responseHeaders);
} else {
/** Response was not decorated. */
doResponse(response.code(), response.contentType(), response.body(), response.headers());
}
}
private void doResponse(int code, String contentType, Object body, MultiMap<String, String> headers) {
if (!headers.isEmpty()) {
for (Map.Entry<String, Collection<String>> entry : headers) {
this.response.putHeader(entry.getKey(), entry.getValue());
}
}
this.response.putHeader("Content-Type", contentType);
this.response.setStatusCode(code);
this.response.setStatusMessage(HttpStatus.message(code));
Buffer buffer = createBuffer(body, this.response);
this.response.end(buffer);
}
private static Buffer createBuffer(Object body, HttpServerResponse response) {
Buffer buffer = null;
if (body instanceof byte[]) {
byte[] bBody = ((byte[]) body);
response.putHeader("Content-Length", String.valueOf(bBody.length));
buffer = new Buffer(bBody);
} else if (body instanceof String) {
String sBody = ((String) body);
byte[] bBody = sBody.getBytes(StandardCharsets.UTF_8);
response.putHeader("Content-Length", String.valueOf(bBody.length));
buffer = new Buffer(bBody);
}
return buffer;
}
} | apache-2.0 |
aws/aws-sdk-go | service/route53/unmarshal_error_test.go | 3029 | //go:build go1.8
// +build go1.8
package route53
import (
"io/ioutil"
"net/http"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
func TestUnmarshalInvalidChangeBatch(t *testing.T) {
const errorMessage = `
Tried to create resource record set duplicate.example.com. type A,
but it already exists
`
type batchError struct {
Code, Message string
}
cases := map[string]struct {
Request *request.Request
Code, Message, RequestID string
StatusCode int
BatchErrors []batchError
}{
"standard error": {
Request: &request.Request{
HTTPResponse: &http.Response{
StatusCode: 400,
Header: http.Header{},
Body: ioutil.NopCloser(strings.NewReader(
`<?xml version="1.0" encoding="UTF-8"?>
<ErrorResponse xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<Error>
<Code>InvalidDomainName</Code>
<Message>The domain name is invalid</Message>
</Error>
<RequestId>12345</RequestId>
</ErrorResponse>`)),
},
},
Code: "InvalidDomainName", Message: "The domain name is invalid",
StatusCode: 400, RequestID: "12345",
},
"batched error": {
Request: &request.Request{
HTTPResponse: &http.Response{
StatusCode: 400,
Header: http.Header{},
Body: ioutil.NopCloser(strings.NewReader(
`<?xml version="1.0" encoding="UTF-8"?>
<InvalidChangeBatch xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<Messages>
<Message>` + errorMessage + `</Message>
</Messages>
<RequestId>12345</RequestId>
</InvalidChangeBatch>`)),
},
},
Code: "InvalidChangeBatch", Message: "ChangeBatch errors occurred",
StatusCode: 400, RequestID: "12345",
BatchErrors: []batchError{
{Code: "InvalidChangeBatch", Message: errorMessage},
},
},
}
for name, c := range cases {
t.Run(name, func(t *testing.T) {
unmarshalChangeResourceRecordSetsError(c.Request)
err := c.Request.Error
if err == nil {
t.Error("expected error, but received none")
}
reqErr := err.(awserr.RequestFailure)
if e, a := c.StatusCode, reqErr.StatusCode(); e != a {
t.Errorf("expected %d status, got %d", e, a)
}
if e, a := c.Code, reqErr.Code(); e != a {
t.Errorf("expected %v code, got %v", e, a)
}
if e, a := c.Message, reqErr.Message(); e != a {
t.Errorf("expected %q message, got %q", e, a)
}
if e, a := c.RequestID, reqErr.RequestID(); e != a {
t.Errorf("expected %v request ID, got %v", e, a)
}
batchErr := err.(awserr.BatchedErrors)
batchedErrs := batchErr.OrigErrs()
if e, a := len(c.BatchErrors), len(batchedErrs); e != a {
t.Fatalf("expect %v batch errors, got %v", e, a)
}
for i, ee := range c.BatchErrors {
bErr := batchedErrs[i].(awserr.Error)
if e, a := ee.Code, bErr.Code(); e != a {
t.Errorf("expect %v code, got %v", e, a)
}
if e, a := ee.Message, bErr.Message(); e != a {
t.Errorf("expect %v message, got %v", e, a)
}
}
})
}
}
| apache-2.0 |
kuujo/copycat | protocols/raft/src/test/java/io/atomix/protocols/raft/TestPrimitiveType.java | 1773 | /*
* Copyright 2017-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.atomix.protocols.raft;
import io.atomix.primitive.PrimitiveBuilder;
import io.atomix.primitive.config.PrimitiveConfig;
import io.atomix.primitive.PrimitiveManagementService;
import io.atomix.primitive.PrimitiveType;
import io.atomix.primitive.service.PrimitiveService;
import io.atomix.primitive.service.ServiceConfig;
/**
* Test primitive type.
*/
public class TestPrimitiveType implements PrimitiveType {
private static final TestPrimitiveType INSTANCE = new TestPrimitiveType();
/**
* Returns a singleton instance.
*
* @return a singleton primitive type instance
*/
public static TestPrimitiveType instance() {
return INSTANCE;
}
@Override
public String name() {
return "test";
}
@Override
public PrimitiveService newService(ServiceConfig config) {
throw new UnsupportedOperationException();
}
@Override
public PrimitiveConfig newConfig() {
throw new UnsupportedOperationException();
}
@Override
public PrimitiveBuilder newBuilder(String primitiveName, PrimitiveConfig config, PrimitiveManagementService managementService) {
throw new UnsupportedOperationException();
}
}
| apache-2.0 |
tamseo/nd4j | nd4j-api/src/main/java/org/nd4j/linalg/api/iter/NdIndexIterator.java | 1511 | package org.nd4j.linalg.api.iter;
import org.nd4j.linalg.util.ArrayUtil;
import org.nd4j.linalg.api.shape.Shape;
import java.util.Iterator;
/**
* Iterates and returns int arrays
* over a particular shape.
*
* This iterator starts at zero and increments
* the shape until each item in the "position"
* hits the current shape
*
* @author Adam Gibson
*/
public class NdIndexIterator implements Iterator<int[]> {
private int length = -1;
private int i = 0;
private int[] shape;
private char order = 'c';
public NdIndexIterator(char order) {
this.order = order;
}
/**
* Pass in the shape to iterate over.
* Defaults to c ordering
* @param shape the shape to iterate over
*/
public NdIndexIterator(int...shape) {
this('c',shape);
}
/**
* Pass in the shape to iterate over
* @param shape the shape to iterate over
*/
public NdIndexIterator(char order,int...shape) {
this.shape = ArrayUtil.copy(shape);
this.length = ArrayUtil.prod(shape);
this.order = order;
}
@Override
public boolean hasNext() {
return i < length;
}
@Override
public int[] next() {
switch(order) {
case 'c': return Shape.ind2subC(shape,i++);
case 'f': return Shape.ind2sub(shape, i++);
default: throw new IllegalArgumentException("Illegal ordering " + order);
}
}
@Override
public void remove() {
}
}
| apache-2.0 |
svagionitis/aws-sdk-cpp | aws-cpp-sdk-gamelift/source/model/DescribeScalingPoliciesResult.cpp | 1834 | /*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/gamelift/model/DescribeScalingPoliciesResult.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/UnreferencedParam.h>
#include <utility>
using namespace Aws::GameLift::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
using namespace Aws;
DescribeScalingPoliciesResult::DescribeScalingPoliciesResult()
{
}
DescribeScalingPoliciesResult::DescribeScalingPoliciesResult(const AmazonWebServiceResult<JsonValue>& result)
{
*this = result;
}
DescribeScalingPoliciesResult& DescribeScalingPoliciesResult::operator =(const AmazonWebServiceResult<JsonValue>& result)
{
const JsonValue& jsonValue = result.GetPayload();
if(jsonValue.ValueExists("ScalingPolicies"))
{
Array<JsonValue> scalingPoliciesJsonList = jsonValue.GetArray("ScalingPolicies");
for(unsigned scalingPoliciesIndex = 0; scalingPoliciesIndex < scalingPoliciesJsonList.GetLength(); ++scalingPoliciesIndex)
{
m_scalingPolicies.push_back(scalingPoliciesJsonList[scalingPoliciesIndex].AsObject());
}
}
if(jsonValue.ValueExists("NextToken"))
{
m_nextToken = jsonValue.GetString("NextToken");
}
return *this;
}
| apache-2.0 |
IdentityServer/IdentityServer4 | src/AspNetIdentity/host/Models/ManageViewModels/IndexViewModel.cs | 486 | using System.ComponentModel.DataAnnotations;
namespace IdentityServer4.Models.ManageViewModels
{
public class IndexViewModel
{
public string Username { get; set; }
public bool IsEmailConfirmed { get; set; }
[Required]
[EmailAddress]
public string Email { get; set; }
[Phone]
[Display(Name = "Phone number")]
public string PhoneNumber { get; set; }
public string StatusMessage { get; set; }
}
}
| apache-2.0 |