repo_name stringlengths 4 116 | path stringlengths 4 379 | size stringlengths 1 7 | content stringlengths 3 1.05M | license stringclasses 15
values |
|---|---|---|---|---|
xuhuisheng/lemon | src/main/java/com/mossle/vehicle/persistence/domain/VehicleDriver.java | 7407 | package com.mossle.vehicle.persistence.domain;
// Generated by Hibernate Tools
import java.util.Date;
import java.util.HashSet;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Id;
import javax.persistence.OneToMany;
import javax.persistence.Table;
import javax.persistence.Temporal;
import javax.persistence.TemporalType;
/**
* VehicleDriver 车辆信息.
*
* @author Lingo
*/
@Entity
@Table(name = "VEHICLE_DRIVER")
public class VehicleDriver implements java.io.Serializable {
private static final long serialVersionUID = 0L;
/** 主键. */
private Long id;
/** 姓名. */
private String name;
/** 性别. */
private String gender;
/** 生日. */
private Date birthday;
/** 驾驶证号码. */
private String code;
/** 领证时间. */
private Date licenseDate;
/** 驾驶证到期时间. */
private Date expireDate;
/** 驾龄. */
private Integer year;
/** 准驾车型. */
private String type;
/** 电话. */
private String mobile;
/** 地址. */
private String location;
/** 年检记录. */
private String annualInspection;
/** 状态. */
private Integer status;
/** 备注. */
private String description;
/** 租户. */
private String tenantId;
/** . */
private Set<VehicleAccident> vehicleAccidents = new HashSet<VehicleAccident>(
0);
public VehicleDriver() {
}
public VehicleDriver(Long id) {
this.id = id;
}
public VehicleDriver(Long id, String name, String gender, Date birthday,
String code, Date licenseDate, Date expireDate, Integer year,
String type, String mobile, String location,
String annualInspection, Integer status, String description,
String tenantId, Set<VehicleAccident> vehicleAccidents) {
this.id = id;
this.name = name;
this.gender = gender;
this.birthday = birthday;
this.code = code;
this.licenseDate = licenseDate;
this.expireDate = expireDate;
this.year = year;
this.type = type;
this.mobile = mobile;
this.location = location;
this.annualInspection = annualInspection;
this.status = status;
this.description = description;
this.tenantId = tenantId;
this.vehicleAccidents = vehicleAccidents;
}
/** @return 主键. */
@Id
@Column(name = "ID", unique = true, nullable = false)
public Long getId() {
return this.id;
}
/**
* @param id
* 主键.
*/
public void setId(Long id) {
this.id = id;
}
/** @return 姓名. */
@Column(name = "NAME", length = 50)
public String getName() {
return this.name;
}
/**
* @param name
* 姓名.
*/
public void setName(String name) {
this.name = name;
}
/** @return 性别. */
@Column(name = "GENDER", length = 50)
public String getGender() {
return this.gender;
}
/**
* @param gender
* 性别.
*/
public void setGender(String gender) {
this.gender = gender;
}
/** @return 生日. */
@Temporal(TemporalType.DATE)
@Column(name = "BIRTHDAY", length = 10)
public Date getBirthday() {
return this.birthday;
}
/**
* @param birthday
* 生日.
*/
public void setBirthday(Date birthday) {
this.birthday = birthday;
}
/** @return 驾驶证号码. */
@Column(name = "CODE", length = 50)
public String getCode() {
return this.code;
}
/**
* @param code
* 驾驶证号码.
*/
public void setCode(String code) {
this.code = code;
}
/** @return 领证时间. */
@Temporal(TemporalType.DATE)
@Column(name = "LICENSE_DATE", length = 10)
public Date getLicenseDate() {
return this.licenseDate;
}
/**
* @param licenseDate
* 领证时间.
*/
public void setLicenseDate(Date licenseDate) {
this.licenseDate = licenseDate;
}
/** @return 驾驶证到期时间. */
@Temporal(TemporalType.DATE)
@Column(name = "EXPIRE_DATE", length = 10)
public Date getExpireDate() {
return this.expireDate;
}
/**
* @param expireDate
* 驾驶证到期时间.
*/
public void setExpireDate(Date expireDate) {
this.expireDate = expireDate;
}
/** @return 驾龄. */
@Column(name = "YEAR")
public Integer getYear() {
return this.year;
}
/**
* @param year
* 驾龄.
*/
public void setYear(Integer year) {
this.year = year;
}
/** @return 准驾车型. */
@Column(name = "TYPE", length = 50)
public String getType() {
return this.type;
}
/**
* @param type
* 准驾车型.
*/
public void setType(String type) {
this.type = type;
}
/** @return 电话. */
@Column(name = "MOBILE", length = 50)
public String getMobile() {
return this.mobile;
}
/**
* @param mobile
* 电话.
*/
public void setMobile(String mobile) {
this.mobile = mobile;
}
/** @return 地址. */
@Column(name = "LOCATION", length = 200)
public String getLocation() {
return this.location;
}
/**
* @param location
* 地址.
*/
public void setLocation(String location) {
this.location = location;
}
/** @return 年检记录. */
@Column(name = "ANNUAL_INSPECTION", length = 200)
public String getAnnualInspection() {
return this.annualInspection;
}
/**
* @param annualInspection
* 年检记录.
*/
public void setAnnualInspection(String annualInspection) {
this.annualInspection = annualInspection;
}
/** @return 状态. */
@Column(name = "STATUS")
public Integer getStatus() {
return this.status;
}
/**
* @param status
* 状态.
*/
public void setStatus(Integer status) {
this.status = status;
}
/** @return 备注. */
@Column(name = "DESCRIPTION", length = 200)
public String getDescription() {
return this.description;
}
/**
* @param description
* 备注.
*/
public void setDescription(String description) {
this.description = description;
}
/** @return 租户. */
@Column(name = "TENANT_ID", length = 64)
public String getTenantId() {
return this.tenantId;
}
/**
* @param tenantId
* 租户.
*/
public void setTenantId(String tenantId) {
this.tenantId = tenantId;
}
/** @return . */
@OneToMany(fetch = FetchType.LAZY, mappedBy = "vehicleDriver")
public Set<VehicleAccident> getVehicleAccidents() {
return this.vehicleAccidents;
}
/**
* @param vehicleAccidents
* .
*/
public void setVehicleAccidents(Set<VehicleAccident> vehicleAccidents) {
this.vehicleAccidents = vehicleAccidents;
}
}
| apache-2.0 |
apache/skywalking | apm-protocol/apm-network/src/main/java/org/apache/skywalking/oap/server/network/trace/component/command/UnsupportedCommandException.java | 1215 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.skywalking.oap.server.network.trace.component.command;
import org.apache.skywalking.apm.network.common.v3.Command;
public class UnsupportedCommandException extends RuntimeException {
private final Command command;
public UnsupportedCommandException(final Command command) {
this.command = command;
}
public Command getCommand() {
return command;
}
}
| apache-2.0 |
bdpiparva/gocd | server/src/main/java/com/thoughtworks/go/config/update/ElasticAgentProfileCommand.java | 5121 | /*
* Copyright 2020 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.config.update;
import com.thoughtworks.go.config.BasicCruiseConfig;
import com.thoughtworks.go.config.ConfigSaveValidationContext;
import com.thoughtworks.go.config.ConfigTag;
import com.thoughtworks.go.config.CruiseConfig;
import com.thoughtworks.go.config.commands.EntityConfigUpdateCommand;
import com.thoughtworks.go.config.elastic.ElasticProfile;
import com.thoughtworks.go.config.elastic.ElasticProfiles;
import com.thoughtworks.go.config.exceptions.EntityType;
import com.thoughtworks.go.config.exceptions.RecordNotFoundException;
import com.thoughtworks.go.plugin.access.elastic.ElasticAgentExtension;
import com.thoughtworks.go.plugin.api.response.validation.ValidationResult;
import com.thoughtworks.go.server.domain.Username;
import com.thoughtworks.go.server.service.GoConfigService;
import com.thoughtworks.go.server.service.result.LocalizedOperationResult;
import org.apache.commons.lang3.StringUtils;
import java.util.Map;
import static com.thoughtworks.go.i18n.LocalizedMessage.resourceNotFound;
import static com.thoughtworks.go.serverhealth.HealthStateType.forbidden;
import static com.thoughtworks.go.serverhealth.HealthStateType.notFound;
public abstract class ElasticAgentProfileCommand implements EntityConfigUpdateCommand<ElasticProfile> {
private final GoConfigService goConfigService;
private final ElasticAgentExtension extension;
private final Username currentUser;
final LocalizedOperationResult result;
final ElasticProfile elasticProfile;
ElasticProfile preprocessedProfile;
public ElasticAgentProfileCommand(GoConfigService goConfigService, ElasticProfile profile, ElasticAgentExtension extension, Username currentUser, LocalizedOperationResult result) {
this.goConfigService = goConfigService;
this.elasticProfile = profile;
this.extension = extension;
this.currentUser = currentUser;
this.result = result;
}
protected ElasticProfiles getPluginProfiles(CruiseConfig preprocessedConfig) {
return preprocessedConfig.getElasticConfig().getProfiles();
}
public ValidationResult validateUsingExtension(String pluginId, Map<String, String> configuration) {
return extension.validate(pluginId, configuration);
}
@Override
public void clearErrors() {
BasicCruiseConfig.clearErrors(elasticProfile);
}
@Override
public ElasticProfile getPreprocessedEntityConfig() {
return preprocessedProfile;
}
@Override
public boolean canContinue(CruiseConfig cruiseConfig) {
return isAuthorized();
}
protected EntityType getObjectDescriptor() {
return EntityType.ElasticProfile;
}
protected final boolean isAuthorized() {
return true;
}
protected boolean isValidForCreateOrUpdate(CruiseConfig preprocessedConfig) {
preprocessedProfile = findExistingProfile(preprocessedConfig);
preprocessedProfile.validateTree(new ConfigSaveValidationContext(preprocessedConfig));
if (preprocessedProfile.getAllErrors().isEmpty()) {
getPluginProfiles(preprocessedConfig).validate(null);
BasicCruiseConfig.copyErrors(preprocessedProfile, elasticProfile);
return preprocessedProfile.getAllErrors().isEmpty();
}
BasicCruiseConfig.copyErrors(preprocessedProfile, elasticProfile);
return false;
}
protected final ElasticProfile findExistingProfile(CruiseConfig cruiseConfig) {
if (elasticProfile == null || StringUtils.isBlank(elasticProfile.getId())) {
if (elasticProfile != null) {
elasticProfile.addError("id", getObjectDescriptor() + " cannot have a blank id.");
}
result.unprocessableEntity("The " + getObjectDescriptor().getEntityNameLowerCase() + " config is invalid. Attribute 'id' cannot be null.");
throw new IllegalArgumentException(getObjectDescriptor().idCannotBeBlank());
} else {
ElasticProfile profile = getPluginProfiles(cruiseConfig).find(this.elasticProfile.getId());
if (profile == null) {
result.notFound(resourceNotFound(getTagName(), elasticProfile.getId()), notFound());
throw new RecordNotFoundException(getObjectDescriptor(), elasticProfile.getId());
}
return profile;
}
}
private String getTagName() {
return elasticProfile.getClass().getAnnotation(ConfigTag.class).value();
}
}
| apache-2.0 |
m0ppers/arangodb | 3rdParty/V8/V8-5.0.71.39/test/test262/data/test/built-ins/Object/defineProperty/15.2.3.6-4-150.js | 1100 | // Copyright (c) 2012 Ecma International. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
es5id: 15.2.3.6-4-150
description: >
Object.defineProperty - 'O' is an Array, 'name' is the length
property of 'O', test TypeError is thrown when the [[Value]] field
of 'desc' is an Object that both toString and valueOf wouldn't
return primitive value (15.4.5.1 step 3.c)
---*/
var arrObj = [];
var toStringAccessed = false;
var valueOfAccessed = false;
assert.throws(TypeError, function() {
Object.defineProperty(arrObj, "length", {
value: {
toString: function () {
toStringAccessed = true;
return {};
},
valueOf: function () {
valueOfAccessed = true;
return {};
}
}
});
});
assert(toStringAccessed, 'toStringAccessed !== true');
assert(valueOfAccessed, 'valueOfAccessed !== true');
| apache-2.0 |
gablg1/PerfKitBenchmarker | perfkitbenchmarker/benchmarks/cassandra_stress_benchmark.py | 10690 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs cassandra.
Cassandra homepage: http://cassandra.apache.org
cassandra-stress tool page:
http://www.datastax.com/documentation/cassandra/2.0/cassandra/tools/toolsCStress_t.html
"""
import functools
import logging
import math
import os
import posixpath
import re
import time
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.packages import cassandra
NUM_KEYS_PER_CORE = 2000000
flags.DEFINE_integer('num_keys', 0,
'Number of keys used in cassandra-stress tool. '
'If unset, this benchmark will use %s * num_cpus '
'on data nodes as the value.' % NUM_KEYS_PER_CORE)
flags.DEFINE_integer('num_cassandra_stress_threads', 50,
'Number of threads used in cassandra-stress tool '
'on each loader node.')
FLAGS = flags.FLAGS
DEFAULT_CLUSTER_SIZE = 4
# Disks and machines are set in config file.
BENCHMARK_INFO = {'name': 'cassandra_stress',
'description': 'Benchmark Cassandra using cassandra-stress',
'scratch_disk': False,
'num_machines': DEFAULT_CLUSTER_SIZE}
LOADER_NODE = 'loader'
DATA_NODE = 'cas'
PROPAGATION_WAIT_TIME = 30
SLEEP_BETWEEN_CHECK_IN_SECONDS = 5
# Stress test options.
CONSISTENCY_LEVEL = 'quorum'
REPLICATION_FACTOR = 3
RETRIES = 1000
CASSANDRA_STRESS = posixpath.join(cassandra.CASSANDRA_DIR, 'tools', 'bin',
'cassandra-stress')
def GetInfo():
return BENCHMARK_INFO
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
cassandra.CheckPrerequisites()
def Prepare(benchmark_spec):
"""Install Cassandra and Java on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_dict
logging.info('VM dictionary %s', vm_dict)
if vm_dict['default']:
logging.info('No config file is provided, use default settings: '
'1 loader node, 3 data nodes')
vm_dict[LOADER_NODE] = [vm_dict['default'][-1]]
vm_dict[DATA_NODE] = vm_dict['default'][:3]
disk_spec = disk.BaseDiskSpec(
FLAGS.scratch_disk_size,
FLAGS.scratch_disk_type,
'/cassandra_data')
for vm in vm_dict[DATA_NODE]:
vm.CreateScratchDisk(disk_spec)
logging.info('Authorizing loader[0] permission to access all other vms.')
vm_dict[LOADER_NODE][0].AuthenticateVm()
logging.info('Preparing data files and Java on all vms.')
vm_util.RunThreaded(lambda vm: vm.Install('cassandra'), benchmark_spec.vms)
seed_vm = vm_dict[DATA_NODE][0]
configure = functools.partial(cassandra.Configure, seed_vms=[seed_vm])
vm_util.RunThreaded(configure, vm_dict[DATA_NODE])
cassandra.StartCluster(seed_vm, vm_dict[DATA_NODE][1:])
def _ResultFilePath(vm):
return posixpath.join(vm_util.VM_TMP_DIR,
vm.hostname + '.stress_results.txt')
def RunTestOnLoader(vm, data_node_ips):
"""Run Cassandra-stress test on loader node.
Args:
vm: The target vm.
data_node_ips: List of IP addresses for all data nodes.
"""
vm.RemoteCommand(
'%s '
'--file "%s" --nodes %s '
'--replication-factor %s --consistency-level %s '
'--num-keys %s -K %s -t %s' % (
CASSANDRA_STRESS,
_ResultFilePath(vm),
','.join(data_node_ips),
REPLICATION_FACTOR, CONSISTENCY_LEVEL, FLAGS.num_keys,
RETRIES, FLAGS.num_cassandra_stress_threads))
def RunCassandraStress(benchmark_spec):
"""Start Cassandra test.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
logging.info('Creating Keyspace.')
data_node_ips = [data_vm.internal_ip
for data_vm in benchmark_spec.vm_dict[DATA_NODE]]
loader_vms = benchmark_spec.vm_dict[LOADER_NODE]
loader_vms[0].RemoteCommand(
'%s '
'--nodes %s --replication-factor %s '
'--consistency-level %s --num-keys 1 > /dev/null' % (
CASSANDRA_STRESS,
','.join(data_node_ips),
REPLICATION_FACTOR, CONSISTENCY_LEVEL))
logging.info('Waiting %s for keyspace to propagate.', PROPAGATION_WAIT_TIME)
time.sleep(PROPAGATION_WAIT_TIME)
if not FLAGS.num_keys:
FLAGS.num_keys = NUM_KEYS_PER_CORE * benchmark_spec.vm_dict[
DATA_NODE][0].num_cpus
logging.info('Num keys not set, using %s in cassandra-stress test.',
FLAGS.num_keys)
logging.info('Executing the benchmark.')
args = [((loader_vm, data_node_ips), {})
for loader_vm in benchmark_spec.vm_dict[LOADER_NODE]]
vm_util.RunThreaded(RunTestOnLoader, args)
def WaitForLoaderToFinish(vm):
"""Watch loader node and wait for it to finish test.
Args:
vm: The target vm.
"""
result_path = _ResultFilePath(vm)
while True:
resp, _ = vm.RemoteCommand('tail -n 1 ' + result_path)
if re.findall(r'END', resp):
break
if re.findall(r'FAILURE', resp):
vm.PullFile(vm_util.GetTempDir(), result_path)
raise errors.Benchmarks.RunError(
'cassandra-stress tool failed, check %s for details.'
% posixpath.join(vm_util.GetTempDir(),
os.path.basename(result_path)))
time.sleep(SLEEP_BETWEEN_CHECK_IN_SECONDS)
def CollectResultFile(vm, interval_op_rate_list, interval_key_rate_list,
latency_median_list, latency_95th_list,
latency_99_9th_list,
total_operation_time_list):
"""Collect result file on vm.
Args:
vm: The target vm.
interval_op_rate_list: The list stores interval_op_rate.
interval_key_rate_list: The list stores interval_key_rate.
latency_median_list: The list stores latency median.
latency_95th_list: The list stores latency 95th percentile.
latency_99_9th_list: The list stores latency 99.9th percentile.
total_operation_time_list: The list stores total operation time.
"""
result_path = _ResultFilePath(vm)
vm.PullFile(vm_util.GetTempDir(), result_path)
resp, _ = vm.RemoteCommand('tail ' + result_path)
match = re.findall(r'[\w\t ]: +([\d\.:]+)', resp)
if len(match) < 6:
raise ValueError('Result not found in "%s"' % resp)
interval_op_rate_list.append(int(match[0]))
interval_key_rate_list.append(int(match[1]))
latency_median_list.append(float(match[2]))
latency_95th_list.append(float(match[3]))
latency_99_9th_list.append(float(match[4]))
raw_time_data = match[5].split(':')
total_operation_time_list.append(
int(raw_time_data[0]) * 3600 + int(raw_time_data[1]) * 60 + int(
raw_time_data[2]))
def RunCassandraStressTest(benchmark_spec):
"""Start all loader nodes as Cassandra clients and run stress test.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
try:
RunCassandraStress(benchmark_spec)
finally:
logging.info('Tests running. Watching progress.')
vm_util.RunThreaded(WaitForLoaderToFinish,
benchmark_spec.vm_dict[LOADER_NODE])
def CollectResults(benchmark_spec):
"""Collect and parse test results.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
logging.info('Gathering results.')
vm_dict = benchmark_spec.vm_dict
interval_op_rate_list = []
interval_key_rate_list = []
latency_median_list = []
latency_95th_list = []
latency_99_9th_list = []
total_operation_time_list = []
args = [((vm, interval_op_rate_list, interval_key_rate_list,
latency_median_list, latency_95th_list,
latency_99_9th_list,
total_operation_time_list), {}) for vm in vm_dict[LOADER_NODE]]
vm_util.RunThreaded(CollectResultFile, args)
results = []
metadata = {'num_keys': FLAGS.num_keys,
'num_data_nodes': len(vm_dict[DATA_NODE]),
'num_loader_nodes': len(vm_dict[LOADER_NODE]),
'num_cassandra_stress_threads':
FLAGS.num_cassandra_stress_threads}
results = [
sample.Sample('Interval_op_rate', math.fsum(interval_op_rate_list),
'operations per second', metadata),
sample.Sample('Interval_key_rate', math.fsum(interval_key_rate_list),
'operations per second', metadata),
sample.Sample('Latency median',
math.fsum(latency_median_list) / len(vm_dict[LOADER_NODE]),
'ms', metadata),
sample.Sample('Latency 95th percentile',
math.fsum(latency_95th_list) / len(vm_dict[LOADER_NODE]),
'ms', metadata),
sample.Sample('Latency 99.9th percentile',
math.fsum(latency_99_9th_list) / len(vm_dict[LOADER_NODE]),
'ms', metadata),
sample.Sample('Total operation time',
math.fsum(total_operation_time_list) / len(
vm_dict[LOADER_NODE]), 'seconds', metadata)]
logging.info('Cassandra results:\n%s', results)
return results
def Run(benchmark_spec):
"""Run Cassandra on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
RunCassandraStressTest(benchmark_spec)
return CollectResults(benchmark_spec)
def Cleanup(benchmark_spec):
"""Cleanup function.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_dict
vm_util.RunThreaded(cassandra.Stop, vm_dict[DATA_NODE])
vm_util.RunThreaded(cassandra.CleanNode, vm_dict[DATA_NODE])
| apache-2.0 |
xuzhongxing/deeplearning4j | deeplearning4j-ui-parent/deeplearning4j-ui-components/src/main/typescript/org/deeplearning4j/ui/components/text/style/StyleText.ts | 1291 | /*
*
* * Copyright 2016 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
class StyleText extends Style {
private font: string;
private fontSize: number;
private underline: boolean;
private color: string;
constructor( jsonObj: any){
super(jsonObj['StyleText']);
var style: any = jsonObj['StyleText'];
if(style){
this.font = style['font'];
this.fontSize = style['fontSize'];
this.underline = style['underline'];
this.color = style['color'];
}
}
getFont = () => this.font;
getFontSize = () => this.fontSize;
getUnderline = () => this.underline;
getColor = () => this.color;
} | apache-2.0 |
whiskeysierra/archer | archer-aspectj/src/test/java/org/example/persistence/LibraryCallingPersistence.java | 947 | package org.example.persistence;
/*-
*
* Archer: AspectJ
*
* Copyright (C) 2015 - 2018 whiskeysierra
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
import io.github.whiskeysierra.archer.Persistence;
import org.example.SomeLibrary;
@Persistence
public final class LibraryCallingPersistence {
private SomeLibrary library;
public void send() {
library.perform();
}
}
| apache-2.0 |
yugangw-msft/azure-sdk-for-net | sdk/synapse/Azure.Analytics.Synapse.Artifacts/src/Generated/Models/AzureFunctionLinkedService.Serialization.cs | 7760 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// <auto-generated/>
#nullable disable
using System.Collections.Generic;
using System.Text.Json;
using Azure.Core;
namespace Azure.Analytics.Synapse.Artifacts.Models
{
public partial class AzureFunctionLinkedService : IUtf8JsonSerializable
{
void IUtf8JsonSerializable.Write(Utf8JsonWriter writer)
{
writer.WriteStartObject();
writer.WritePropertyName("type");
writer.WriteStringValue(Type);
if (Optional.IsDefined(ConnectVia))
{
writer.WritePropertyName("connectVia");
writer.WriteObjectValue(ConnectVia);
}
if (Optional.IsDefined(Description))
{
writer.WritePropertyName("description");
writer.WriteStringValue(Description);
}
if (Optional.IsCollectionDefined(Parameters))
{
writer.WritePropertyName("parameters");
writer.WriteStartObject();
foreach (var item in Parameters)
{
writer.WritePropertyName(item.Key);
writer.WriteObjectValue(item.Value);
}
writer.WriteEndObject();
}
if (Optional.IsCollectionDefined(Annotations))
{
writer.WritePropertyName("annotations");
writer.WriteStartArray();
foreach (var item in Annotations)
{
writer.WriteObjectValue(item);
}
writer.WriteEndArray();
}
writer.WritePropertyName("typeProperties");
writer.WriteStartObject();
writer.WritePropertyName("functionAppUrl");
writer.WriteObjectValue(FunctionAppUrl);
if (Optional.IsDefined(FunctionKey))
{
writer.WritePropertyName("functionKey");
writer.WriteObjectValue(FunctionKey);
}
if (Optional.IsDefined(EncryptedCredential))
{
writer.WritePropertyName("encryptedCredential");
writer.WriteObjectValue(EncryptedCredential);
}
writer.WriteEndObject();
foreach (var item in AdditionalProperties)
{
writer.WritePropertyName(item.Key);
writer.WriteObjectValue(item.Value);
}
writer.WriteEndObject();
}
internal static AzureFunctionLinkedService DeserializeAzureFunctionLinkedService(JsonElement element)
{
string type = default;
Optional<IntegrationRuntimeReference> connectVia = default;
Optional<string> description = default;
Optional<IDictionary<string, ParameterSpecification>> parameters = default;
Optional<IList<object>> annotations = default;
object functionAppUrl = default;
Optional<SecretBase> functionKey = default;
Optional<object> encryptedCredential = default;
IDictionary<string, object> additionalProperties = default;
Dictionary<string, object> additionalPropertiesDictionary = new Dictionary<string, object>();
foreach (var property in element.EnumerateObject())
{
if (property.NameEquals("type"))
{
type = property.Value.GetString();
continue;
}
if (property.NameEquals("connectVia"))
{
if (property.Value.ValueKind == JsonValueKind.Null)
{
property.ThrowNonNullablePropertyIsNull();
continue;
}
connectVia = IntegrationRuntimeReference.DeserializeIntegrationRuntimeReference(property.Value);
continue;
}
if (property.NameEquals("description"))
{
description = property.Value.GetString();
continue;
}
if (property.NameEquals("parameters"))
{
if (property.Value.ValueKind == JsonValueKind.Null)
{
property.ThrowNonNullablePropertyIsNull();
continue;
}
Dictionary<string, ParameterSpecification> dictionary = new Dictionary<string, ParameterSpecification>();
foreach (var property0 in property.Value.EnumerateObject())
{
dictionary.Add(property0.Name, ParameterSpecification.DeserializeParameterSpecification(property0.Value));
}
parameters = dictionary;
continue;
}
if (property.NameEquals("annotations"))
{
if (property.Value.ValueKind == JsonValueKind.Null)
{
property.ThrowNonNullablePropertyIsNull();
continue;
}
List<object> array = new List<object>();
foreach (var item in property.Value.EnumerateArray())
{
array.Add(item.GetObject());
}
annotations = array;
continue;
}
if (property.NameEquals("typeProperties"))
{
if (property.Value.ValueKind == JsonValueKind.Null)
{
property.ThrowNonNullablePropertyIsNull();
continue;
}
foreach (var property0 in property.Value.EnumerateObject())
{
if (property0.NameEquals("functionAppUrl"))
{
functionAppUrl = property0.Value.GetObject();
continue;
}
if (property0.NameEquals("functionKey"))
{
if (property0.Value.ValueKind == JsonValueKind.Null)
{
property0.ThrowNonNullablePropertyIsNull();
continue;
}
functionKey = SecretBase.DeserializeSecretBase(property0.Value);
continue;
}
if (property0.NameEquals("encryptedCredential"))
{
if (property0.Value.ValueKind == JsonValueKind.Null)
{
property0.ThrowNonNullablePropertyIsNull();
continue;
}
encryptedCredential = property0.Value.GetObject();
continue;
}
}
continue;
}
additionalPropertiesDictionary.Add(property.Name, property.Value.GetObject());
}
additionalProperties = additionalPropertiesDictionary;
return new AzureFunctionLinkedService(type, connectVia.Value, description.Value, Optional.ToDictionary(parameters), Optional.ToList(annotations), additionalProperties, functionAppUrl, functionKey.Value, encryptedCredential.Value);
}
}
}
| apache-2.0 |
linzhaoming/origin | vendor/github.com/Azure/azure-sdk-for-go/profiles/preview/keyvault/mgmt/keyvault/models.go | 10339 | // +build go1.9
// Copyright 2018 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This code was auto-generated by:
// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
package keyvault
import original "github.com/Azure/azure-sdk-for-go/services/keyvault/mgmt/2018-02-14/keyvault"
const (
DefaultBaseURI = original.DefaultBaseURI
)
type BaseClient = original.BaseClient
type AccessPolicyUpdateKind = original.AccessPolicyUpdateKind
const (
Add AccessPolicyUpdateKind = original.Add
Remove AccessPolicyUpdateKind = original.Remove
Replace AccessPolicyUpdateKind = original.Replace
)
type CertificatePermissions = original.CertificatePermissions
const (
Backup CertificatePermissions = original.Backup
Create CertificatePermissions = original.Create
Delete CertificatePermissions = original.Delete
Deleteissuers CertificatePermissions = original.Deleteissuers
Get CertificatePermissions = original.Get
Getissuers CertificatePermissions = original.Getissuers
Import CertificatePermissions = original.Import
List CertificatePermissions = original.List
Listissuers CertificatePermissions = original.Listissuers
Managecontacts CertificatePermissions = original.Managecontacts
Manageissuers CertificatePermissions = original.Manageissuers
Purge CertificatePermissions = original.Purge
Recover CertificatePermissions = original.Recover
Restore CertificatePermissions = original.Restore
Setissuers CertificatePermissions = original.Setissuers
Update CertificatePermissions = original.Update
)
type CreateMode = original.CreateMode
const (
CreateModeDefault CreateMode = original.CreateModeDefault
CreateModeRecover CreateMode = original.CreateModeRecover
)
type KeyPermissions = original.KeyPermissions
const (
KeyPermissionsBackup KeyPermissions = original.KeyPermissionsBackup
KeyPermissionsCreate KeyPermissions = original.KeyPermissionsCreate
KeyPermissionsDecrypt KeyPermissions = original.KeyPermissionsDecrypt
KeyPermissionsDelete KeyPermissions = original.KeyPermissionsDelete
KeyPermissionsEncrypt KeyPermissions = original.KeyPermissionsEncrypt
KeyPermissionsGet KeyPermissions = original.KeyPermissionsGet
KeyPermissionsImport KeyPermissions = original.KeyPermissionsImport
KeyPermissionsList KeyPermissions = original.KeyPermissionsList
KeyPermissionsPurge KeyPermissions = original.KeyPermissionsPurge
KeyPermissionsRecover KeyPermissions = original.KeyPermissionsRecover
KeyPermissionsRestore KeyPermissions = original.KeyPermissionsRestore
KeyPermissionsSign KeyPermissions = original.KeyPermissionsSign
KeyPermissionsUnwrapKey KeyPermissions = original.KeyPermissionsUnwrapKey
KeyPermissionsUpdate KeyPermissions = original.KeyPermissionsUpdate
KeyPermissionsVerify KeyPermissions = original.KeyPermissionsVerify
KeyPermissionsWrapKey KeyPermissions = original.KeyPermissionsWrapKey
)
type NetworkRuleAction = original.NetworkRuleAction
const (
Allow NetworkRuleAction = original.Allow
Deny NetworkRuleAction = original.Deny
)
type NetworkRuleBypassOptions = original.NetworkRuleBypassOptions
const (
AzureServices NetworkRuleBypassOptions = original.AzureServices
None NetworkRuleBypassOptions = original.None
)
type Reason = original.Reason
const (
AccountNameInvalid Reason = original.AccountNameInvalid
AlreadyExists Reason = original.AlreadyExists
)
type SecretPermissions = original.SecretPermissions
const (
SecretPermissionsBackup SecretPermissions = original.SecretPermissionsBackup
SecretPermissionsDelete SecretPermissions = original.SecretPermissionsDelete
SecretPermissionsGet SecretPermissions = original.SecretPermissionsGet
SecretPermissionsList SecretPermissions = original.SecretPermissionsList
SecretPermissionsPurge SecretPermissions = original.SecretPermissionsPurge
SecretPermissionsRecover SecretPermissions = original.SecretPermissionsRecover
SecretPermissionsRestore SecretPermissions = original.SecretPermissionsRestore
SecretPermissionsSet SecretPermissions = original.SecretPermissionsSet
)
type SkuName = original.SkuName
const (
Premium SkuName = original.Premium
Standard SkuName = original.Standard
)
type StoragePermissions = original.StoragePermissions
const (
StoragePermissionsBackup StoragePermissions = original.StoragePermissionsBackup
StoragePermissionsDelete StoragePermissions = original.StoragePermissionsDelete
StoragePermissionsDeletesas StoragePermissions = original.StoragePermissionsDeletesas
StoragePermissionsGet StoragePermissions = original.StoragePermissionsGet
StoragePermissionsGetsas StoragePermissions = original.StoragePermissionsGetsas
StoragePermissionsList StoragePermissions = original.StoragePermissionsList
StoragePermissionsListsas StoragePermissions = original.StoragePermissionsListsas
StoragePermissionsPurge StoragePermissions = original.StoragePermissionsPurge
StoragePermissionsRecover StoragePermissions = original.StoragePermissionsRecover
StoragePermissionsRegeneratekey StoragePermissions = original.StoragePermissionsRegeneratekey
StoragePermissionsRestore StoragePermissions = original.StoragePermissionsRestore
StoragePermissionsSet StoragePermissions = original.StoragePermissionsSet
StoragePermissionsSetsas StoragePermissions = original.StoragePermissionsSetsas
StoragePermissionsUpdate StoragePermissions = original.StoragePermissionsUpdate
)
type AccessPolicyEntry = original.AccessPolicyEntry
type CheckNameAvailabilityResult = original.CheckNameAvailabilityResult
type DeletedVault = original.DeletedVault
type DeletedVaultListResult = original.DeletedVaultListResult
type DeletedVaultListResultIterator = original.DeletedVaultListResultIterator
type DeletedVaultListResultPage = original.DeletedVaultListResultPage
type DeletedVaultProperties = original.DeletedVaultProperties
type IPRule = original.IPRule
type LogSpecification = original.LogSpecification
type NetworkRuleSet = original.NetworkRuleSet
type Operation = original.Operation
type OperationDisplay = original.OperationDisplay
type OperationListResult = original.OperationListResult
type OperationListResultIterator = original.OperationListResultIterator
type OperationListResultPage = original.OperationListResultPage
type OperationProperties = original.OperationProperties
type Permissions = original.Permissions
type Resource = original.Resource
type ResourceListResult = original.ResourceListResult
type ResourceListResultIterator = original.ResourceListResultIterator
type ResourceListResultPage = original.ResourceListResultPage
type ServiceSpecification = original.ServiceSpecification
type Sku = original.Sku
type Vault = original.Vault
type VaultAccessPolicyParameters = original.VaultAccessPolicyParameters
type VaultAccessPolicyProperties = original.VaultAccessPolicyProperties
type VaultCheckNameAvailabilityParameters = original.VaultCheckNameAvailabilityParameters
type VaultCreateOrUpdateParameters = original.VaultCreateOrUpdateParameters
type VaultListResult = original.VaultListResult
type VaultListResultIterator = original.VaultListResultIterator
type VaultListResultPage = original.VaultListResultPage
type VaultPatchParameters = original.VaultPatchParameters
type VaultPatchProperties = original.VaultPatchProperties
type VaultProperties = original.VaultProperties
type VaultsCreateOrUpdateFuture = original.VaultsCreateOrUpdateFuture
type VaultsPurgeDeletedFuture = original.VaultsPurgeDeletedFuture
type VirtualNetworkRule = original.VirtualNetworkRule
type OperationsClient = original.OperationsClient
type VaultsClient = original.VaultsClient
func New(subscriptionID string) BaseClient {
return original.New(subscriptionID)
}
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
return original.NewWithBaseURI(baseURI, subscriptionID)
}
func PossibleAccessPolicyUpdateKindValues() []AccessPolicyUpdateKind {
return original.PossibleAccessPolicyUpdateKindValues()
}
func PossibleCertificatePermissionsValues() []CertificatePermissions {
return original.PossibleCertificatePermissionsValues()
}
func PossibleCreateModeValues() []CreateMode {
return original.PossibleCreateModeValues()
}
func PossibleKeyPermissionsValues() []KeyPermissions {
return original.PossibleKeyPermissionsValues()
}
func PossibleNetworkRuleActionValues() []NetworkRuleAction {
return original.PossibleNetworkRuleActionValues()
}
func PossibleNetworkRuleBypassOptionsValues() []NetworkRuleBypassOptions {
return original.PossibleNetworkRuleBypassOptionsValues()
}
func PossibleReasonValues() []Reason {
return original.PossibleReasonValues()
}
func PossibleSecretPermissionsValues() []SecretPermissions {
return original.PossibleSecretPermissionsValues()
}
func PossibleSkuNameValues() []SkuName {
return original.PossibleSkuNameValues()
}
func PossibleStoragePermissionsValues() []StoragePermissions {
return original.PossibleStoragePermissionsValues()
}
func NewOperationsClient(subscriptionID string) OperationsClient {
return original.NewOperationsClient(subscriptionID)
}
func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
return original.NewOperationsClientWithBaseURI(baseURI, subscriptionID)
}
func NewVaultsClient(subscriptionID string) VaultsClient {
return original.NewVaultsClient(subscriptionID)
}
func NewVaultsClientWithBaseURI(baseURI string, subscriptionID string) VaultsClient {
return original.NewVaultsClientWithBaseURI(baseURI, subscriptionID)
}
func UserAgent() string {
return original.UserAgent() + " profiles/preview"
}
func Version() string {
return original.Version()
}
| apache-2.0 |
pmhtech/api-server | sys/src/main/webapp/static/src/main/javascript/view/partials/signature.js | 26626 | 'use strict';
/* jshint -W122 */
SwaggerUi.partials.signature = (function () {
// copy-pasted from swagger-js
var resolveSchema = function (schema) {
if (_.isPlainObject(schema.schema)) {
schema = resolveSchema(schema.schema);
}
return schema;
};
// copy-pasted from swagger-js
var simpleRef = function (name) {
if (typeof name === 'undefined') {
return null;
}
if (name.indexOf('#/definitions/') === 0) {
return name.substring('#/definitions/'.length);
} else {
return name;
}
};
// copy-pasted from swagger-js
var getInlineModel = function(inlineStr) {
if(/^Inline Model \d+$/.test(inlineStr) && this.inlineModels) {
var id = parseInt(inlineStr.substr('Inline Model'.length).trim(),10); //
var model = this.inlineModels[id];
return model;
}
// I'm returning null here, should I rather throw an error?
return null;
};
// copy-pasted from swagger-js
var formatXml = function(xml) {
var contexp, fn, formatted, indent, l, lastType, len, lines, ln, pad, reg, transitions, wsexp;
reg = /(>)(<)(\/*)/g;
wsexp = /[ ]*(.*)[ ]+\n/g;
contexp = /(<.+>)(.+\n)/g;
xml = xml.replace(reg, '$1\n$2$3').replace(wsexp, '$1\n').replace(contexp, '$1\n$2');
pad = 0;
formatted = '';
lines = xml.split('\n');
indent = 0;
lastType = 'other';
transitions = {
'single->single': 0,
'single->closing': -1,
'single->opening': 0,
'single->other': 0,
'closing->single': 0,
'closing->closing': -1,
'closing->opening': 0,
'closing->other': 0,
'opening->single': 1,
'opening->closing': 0,
'opening->opening': 1,
'opening->other': 1,
'other->single': 0,
'other->closing': -1,
'other->opening': 0,
'other->other': 0
};
fn = function(ln) {
var fromTo, j, key, padding, type, types, value;
types = {
single: Boolean(ln.match(/<.+\/>/)),
closing: Boolean(ln.match(/<\/.+>/)),
opening: Boolean(ln.match(/<[^!?].*>/))
};
type = ((function() {
var results;
results = [];
for (key in types) {
value = types[key];
if (value) {
results.push(key);
}
}
return results;
})())[0];
type = type === void 0 ? 'other' : type;
fromTo = lastType + '->' + type;
lastType = type;
padding = '';
indent += transitions[fromTo];
padding = ((function() {
var m, ref1, results;
results = [];
for (j = m = 0, ref1 = indent; 0 <= ref1 ? m < ref1 : m > ref1; j = 0 <= ref1 ? ++m : --m) {
results.push(' ');
}
return results;
})()).join('');
if (fromTo === 'opening->closing') {
formatted = formatted.substr(0, formatted.length - 1) + ln + '\n';
} else {
formatted += padding + ln + '\n';
}
};
for (l = 0, len = lines.length; l < len; l++) {
ln = lines[l];
fn(ln);
}
return formatted;
};
// copy-pasted from swagger-js
var getModelSignature = function (name, schema, models, modelPropertyMacro) {
var strongOpen = '<span class="strong">';
var strongClose = '</span>';
var optionHtml = function (label, value) {
return '<tr><td class="optionName">' + label + ':</td><td>' + value + '</td></tr>';
};
// Allow for ignoring the 'name' argument.... shifting the rest
if(_.isObject(arguments[0])) {
name = void 0;
schema = arguments[0];
models = arguments[1];
modelPropertyMacro = arguments[2];
}
models = models || {};
// Resolve the schema (Handle nested schemas)
schema = resolveSchema(schema);
// Return for empty object
if(_.isEmpty(schema)) {
return strongOpen + 'Empty' + strongClose;
}
// Dereference $ref from 'models'
if(typeof schema.$ref === 'string') {
name = simpleRef(schema.$ref);
schema = models[name];
if(typeof schema === 'undefined')
{
return strongOpen + name + ' is not defined!' + strongClose;
}
}
if(typeof name !== 'string') {
name = schema.title || 'Inline Model';
}
// If we are a Model object... adjust accordingly
if(schema.definition) {
schema = schema.definition;
}
if(typeof modelPropertyMacro !== 'function') {
modelPropertyMacro = function(prop){
return (prop || {}).default;
};
}
var references = {};
var seenModels = [];
var inlineModels = 0;
// Generate current HTML
var html = processModel(schema, name);
// Generate references HTML
while (_.keys(references).length > 0) {
/* jshint ignore:start */
_.forEach(references, function (schema, name) {
var seenModel = _.indexOf(seenModels, name) > -1;
delete references[name];
if (!seenModel) {
seenModels.push(name);
html += '<br />' + processModel(schema, name);
}
});
/* jshint ignore:end */
}
return html;
function addReference(schema, name, skipRef) {
var modelName = name;
var model;
if (schema.$ref) {
modelName = schema.title || simpleRef(schema.$ref);
model = models[simpleRef(schema.$ref)];
} else if (_.isUndefined(name)) {
modelName = schema.title || 'Inline Model ' + (++inlineModels);
model = {definition: schema};
}
if (skipRef !== true) {
references[modelName] = _.isUndefined(model) ? {} : model.definition;
}
return modelName;
}
function primitiveToHTML(schema) {
var html = '<span class="propType">';
var type = schema.type || 'object';
if (schema.$ref) {
html += addReference(schema, simpleRef(schema.$ref));
} else if (type === 'object') {
if (!_.isUndefined(schema.properties)) {
html += addReference(schema);
} else {
html += 'object';
}
} else if (type === 'array') {
html += 'Array[';
if (_.isArray(schema.items)) {
html += _.map(schema.items, addReference).join(',');
} else if (_.isPlainObject(schema.items)) {
if (_.isUndefined(schema.items.$ref)) {
if (!_.isUndefined(schema.items.type) && _.indexOf(['array', 'object'], schema.items.type) === -1) {
html += schema.items.type;
} else {
html += addReference(schema.items);
}
} else {
html += addReference(schema.items, simpleRef(schema.items.$ref));
}
} else {
console.log('Array type\'s \'items\' schema is not an array or an object, cannot process');
html += 'object';
}
html += ']';
} else {
html += schema.type;
}
html += '</span>';
return html;
}
function primitiveToOptionsHTML(schema, html) {
var options = '';
var type = schema.type || 'object';
var isArray = type === 'array';
if (!_.isUndefined(schema.description)) {
html += ': ' + '<span class="propDesc">' + schema.description + '</span>';
}
if (schema.enum) {
html += ' = <span class="propVals">[\'' + schema.enum.join('\', \'') + '\']</span>';
}
if (isArray) {
if (_.isPlainObject(schema.items) && !_.isUndefined(schema.items.type)) {
type = schema.items.type;
} else {
type = 'object';
}
}
if (!_.isUndefined(schema.default)) {
options += optionHtml('Default', schema.default);
}
switch (type) {
case 'string':
if (schema.minLength) {
options += optionHtml('Min. Length', schema.minLength);
}
if (schema.maxLength) {
options += optionHtml('Max. Length', schema.maxLength);
}
if (schema.pattern) {
options += optionHtml('Reg. Exp.', schema.pattern);
}
break;
case 'integer':
case 'number':
if (schema.minimum) {
options += optionHtml('Min. Value', schema.minimum);
}
if (schema.exclusiveMinimum) {
options += optionHtml('Exclusive Min.', 'true');
}
if (schema.maximum) {
options += optionHtml('Max. Value', schema.maximum);
}
if (schema.exclusiveMaximum) {
options += optionHtml('Exclusive Max.', 'true');
}
if (schema.multipleOf) {
options += optionHtml('Multiple Of', schema.multipleOf);
}
break;
}
if (isArray) {
if (schema.minItems) {
options += optionHtml('Min. Items', schema.minItems);
}
if (schema.maxItems) {
options += optionHtml('Max. Items', schema.maxItems);
}
if (schema.uniqueItems) {
options += optionHtml('Unique Items', 'true');
}
if (schema.collectionFormat) {
options += optionHtml('Coll. Format', schema.collectionFormat);
}
}
if (_.isUndefined(schema.items)) {
if (_.isArray(schema.enum)) {
var enumString;
if (type === 'number' || type === 'integer') {
enumString = schema.enum.join(', ');
} else {
enumString = '"' + schema.enum.join('", "') + '"';
}
options += optionHtml('Enum', enumString);
}
}
if (options.length > 0) {
html = '<span class="propWrap">' + html + '<table class="optionsWrapper"><tr><th colspan="2">' + type + '</th></tr>' + options + '</table></span>';
}
return html;
}
function processModel(schema, name) {
var type = schema.type || 'object';
var isArray = schema.type === 'array';
var html = strongOpen + name + ' ' + (isArray ? '[' : '{') + strongClose;
var contents;
if (name) {
seenModels.push(name);
}
if (isArray) {
if (_.isArray(schema.items)) {
html += '<div>' + _.map(schema.items, function (item) {
var type = item.type || 'object';
if (_.isUndefined(item.$ref)) {
if (_.indexOf(['array', 'object'], type) > -1) {
if (type === 'object' && _.isUndefined(item.properties)) {
return 'object';
} else {
return addReference(item);
}
} else {
return primitiveToOptionsHTML(item, type);
}
} else {
return addReference(item, simpleRef(item.$ref));
}
}).join(',</div><div>');
} else if (_.isPlainObject(schema.items)) {
if (_.isUndefined(schema.items.$ref)) {
if (_.indexOf(['array', 'object'], schema.items.type || 'object') > -1) {
if ((_.isUndefined(schema.items.type) || schema.items.type === 'object') && _.isUndefined(schema.items.properties)) {
html += '<div>object</div>';
} else {
html += '<div>' + addReference(schema.items) + '</div>';
}
} else {
html += '<div>' + primitiveToOptionsHTML(schema.items, schema.items.type) + '</div>';
}
} else {
html += '<div>' + addReference(schema.items, simpleRef(schema.items.$ref)) + '</div>';
}
} else {
console.log('Array type\'s \'items\' property is not an array or an object, cannot process');
html += '<div>object</div>';
}
} else {
if (schema.$ref) {
html += '<div>' + addReference(schema, name) + '</div>';
} else if (type === 'object') {
if (_.isPlainObject(schema.properties)) {
contents = _.map(schema.properties, function (property, name) {
var propertyIsRequired = (_.indexOf(schema.required, name) >= 0);
var cProperty = _.cloneDeep(property);
var requiredClass = propertyIsRequired ? 'required' : '';
var html = '<span class="propName ' + requiredClass + '">' + name + '</span> (';
var model;
// Allow macro to set the default value
cProperty.default = modelPropertyMacro(cProperty);
// Resolve the schema (Handle nested schemas)
cProperty = resolveSchema(cProperty);
// We need to handle property references to primitives (Issue 339)
if (!_.isUndefined(cProperty.$ref)) {
model = models[simpleRef(cProperty.$ref)];
if (!_.isUndefined(model) && _.indexOf([undefined, 'array', 'object'], model.definition.type) === -1) {
// Use referenced schema
cProperty = resolveSchema(model.definition);
}
}
html += primitiveToHTML(cProperty);
if(!propertyIsRequired) {
html += ', <span class="propOptKey">optional</span>';
}
if(property.readOnly) {
html += ', <span class="propReadOnly">read only</span>';
}
html += ')';
return '<div' + (property.readOnly ? ' class="readOnly"' : '') + '>' + primitiveToOptionsHTML(cProperty, html);
}).join(',</div>');
}
if (contents) {
html += contents + '</div>';
}
} else {
html += '<div>' + primitiveToOptionsHTML(schema, type) + '</div>';
}
}
return html + strongOpen + (isArray ? ']' : '}') + strongClose;
}
};
// copy-pasted from swagger-js
var schemaToJSON = function (schema, models, modelsToIgnore, modelPropertyMacro) {
// Resolve the schema (Handle nested schemas)
schema = resolveSchema(schema);
if(typeof modelPropertyMacro !== 'function') {
modelPropertyMacro = function(prop){
return (prop || {}).default;
};
}
modelsToIgnore= modelsToIgnore || {};
var type = schema.type || 'object';
var format = schema.format;
var model;
var output;
if (!_.isUndefined(schema.example)) {
output = schema.example;
} else if (_.isUndefined(schema.items) && _.isArray(schema.enum)) {
output = schema.enum[0];
}
if (_.isUndefined(output)) {
if (schema.$ref) {
model = models[simpleRef(schema.$ref)];
if (!_.isUndefined(model)) {
if (_.isUndefined(modelsToIgnore[model.name])) {
modelsToIgnore[model.name] = model;
output = schemaToJSON(model.definition, models, modelsToIgnore, modelPropertyMacro);
delete modelsToIgnore[model.name];
} else {
if (model.type === 'array') {
output = [];
} else {
output = {};
}
}
}
} else if (!_.isUndefined(schema.default)) {
output = schema.default;
} else if (type === 'string') {
if (format === 'date-time') {
output = new Date().toISOString();
} else if (format === 'date') {
output = new Date().toISOString().split('T')[0];
} else {
output = 'string';
}
} else if (type === 'integer') {
output = 0;
} else if (type === 'number') {
output = 0.0;
} else if (type === 'boolean') {
output = true;
} else if (type === 'object') {
output = {};
_.forEach(schema.properties, function (property, name) {
var cProperty = _.cloneDeep(property);
// Allow macro to set the default value
cProperty.default = modelPropertyMacro(property);
output[name] = schemaToJSON(cProperty, models, modelsToIgnore, modelPropertyMacro);
});
} else if (type === 'array') {
output = [];
if (_.isArray(schema.items)) {
_.forEach(schema.items, function (item) {
output.push(schemaToJSON(item, models, modelsToIgnore, modelPropertyMacro));
});
} else if (_.isPlainObject(schema.items)) {
output.push(schemaToJSON(schema.items, models, modelsToIgnore, modelPropertyMacro));
} else if (_.isUndefined(schema.items)) {
output.push({});
} else {
console.log('Array type\'s \'items\' property is not an array or an object, cannot process');
}
}
}
return output;
};
// copy-pasted from swagger-js
var createJSONSample = function (value, modelsToIgnore) {
modelsToIgnore = modelsToIgnore || {};
modelsToIgnore[value.name] = value;
// Response support
if (value.examples && _.isPlainObject(value.examples) && value.examples['application/json']) {
value.definition.example = value.examples['application/json'];
if (_.isString(value.definition.example)) {
value.definition.example = jsyaml.safeLoad(value.definition.example);
}
} else if (!value.definition.example) {
value.definition.example = value.examples;
}
return schemaToJSON(value.definition, value.models, modelsToIgnore, value.modelPropertyMacro);
};
// copy-pasted from swagger-js
var getParameterModelSignature = function (type, definitions) {
var isPrimitive, listType;
if (type instanceof Array) {
listType = true;
type = type[0];
}
// Convert undefined to string of 'undefined'
if (typeof type === 'undefined') {
type = 'undefined';
isPrimitive = true;
} else if (definitions[type]){
// a model def exists?
type = definitions[type]; /* Model */
isPrimitive = false;
} else if (getInlineModel(type)) {
type = getInlineModel(type); /* Model */
isPrimitive = false;
} else {
// We default to primitive
isPrimitive = true;
}
if (isPrimitive) {
if (listType) {
return 'Array[' + type + ']';
} else {
return type.toString();
}
} else {
if (listType) {
return 'Array[' + getModelSignature(type.name, type.definition, type.models, type.modelPropertyMacro) + ']';
} else {
return getModelSignature(type.name, type.definition, type.models, type.modelPropertyMacro);
}
}
};
// copy-pasted from swagger-js
var createParameterJSONSample = function (type, models) {
var listType, sampleJson, innerType;
models = models || {};
listType = (type instanceof Array);
innerType = listType ? type[0] : type;
if(models[innerType]) {
sampleJson = createJSONSample(models[innerType]);
} else if (getInlineModel(innerType)){
sampleJson = createJSONSample(getInlineModel(innerType)); // may return null, if type isn't correct
}
if (sampleJson) {
sampleJson = listType ? [sampleJson] : sampleJson;
if (typeof sampleJson === 'string') {
return sampleJson;
} else if (_.isObject(sampleJson)) {
var t = sampleJson;
if (sampleJson instanceof Array && sampleJson.length > 0) {
t = sampleJson[0];
}
if (t.nodeName && typeof t === 'Node') {
var xmlString = new XMLSerializer().serializeToString(t);
return formatXml(xmlString);
} else {
return JSON.stringify(sampleJson, null, 2);
}
} else {
return sampleJson;
}
}
};
var wrapTag = function (name, value, attrs) {
var str, attributes;
attrs = attrs || [];
attributes = attrs.map(function (attr) {
return ' ' + attr.name + '="' + attr.value + '"';
}).join('');
if (!name) {
return getErrorMessage('Node name is not provided');
}
str = [
'<', name,
attributes,
'>',
value,
'</', name, '>'
];
return str.join('');
};
var getName = function (name, xml) {
var result = name || '';
xml = xml || {};
if (xml.name) {
result = xml.name;
}
if (xml.prefix) {
result = xml.prefix + ':' + result;
}
return result;
};
var getNamespace = function (xml) {
var namespace = '';
var name = 'xmlns';
xml = xml || {};
if (xml.namespace) {
namespace = xml.namespace;
} else {
return namespace;
}
if (xml.prefix) {
name += ':' + xml.prefix;
}
return {
name: name,
value: namespace
};
};
var createArrayXML = function (descriptor) {
var name = descriptor.name;
var config = descriptor.config;
var definition = descriptor.definition;
var models = descriptor.models;
var value;
var items = definition.items;
var xml = definition.xml || {};
if (!items) { return getErrorMessage(); }
value = createSchemaXML(name, items, models, config);
xml = xml || {};
if (xml.wrapped) {
value = wrapTag(name, value);
}
return value;
};
var getPrimitiveSignature = function (schema) {
var type, items;
schema = schema || {};
items = schema.items || {};
type = schema.type || '';
switch (type) {
case 'object': return 'Object is not a primitive';
case 'array' : return 'Array[' + (items.format || items.type) + ']';
default: return schema.format || type;
}
};
var createPrimitiveXML = function (descriptor) {
var name = descriptor.name;
var definition = descriptor.definition;
var primitivesMap = {
'string': {
'date': new Date(1).toISOString().split('T')[0],
'date-time' : new Date(1).toISOString(),
'default': 'string'
},
'integer': {
'default': 1
},
'number': {
'default': 1.1
},
'boolean': {
'default': true
}
};
var type = definition.type;
var format = definition.format;
var xml = definition.xml || {};
var namespace = getNamespace(xml);
var attributes = [];
var value;
if (_.keys(primitivesMap).indexOf(type) < 0) { return getErrorMessage(); }
if (_.isArray(definition.enum)){
value = definition.enum[0];
} else {
value = definition.example || primitivesMap[type][format] || primitivesMap[type].default;
}
if (xml.attribute) {
return {name: name, value: value};
}
if (namespace) {
attributes.push(namespace);
}
return wrapTag(name, value, attributes);
};
function createObjectXML (descriptor) {
var name = descriptor.name;
var definition = descriptor.definition;
var config = descriptor.config;
var models = descriptor.models;
var isParam = descriptor.config.isParam;
var serializedProperties;
var attrs = [];
var properties = definition.properties;
var additionalProperties = definition.additionalProperties;
var xml = definition.xml;
var namespace = getNamespace(xml);
if (namespace) {
attrs.push(namespace);
}
if (!properties && !additionalProperties) { return getErrorMessage(); }
properties = properties || {};
serializedProperties = _.map(properties, function (prop, key) {
var xml, result;
if (isParam && prop.readOnly) {
return '';
}
xml = prop.xml || {};
result = createSchemaXML(key, prop, models, config);
if (xml.attribute) {
attrs.push(result);
return '';
}
return result;
}).join('');
if (additionalProperties) {
serializedProperties += '<!-- additional elements allowed -->';
}
return wrapTag(name, serializedProperties, attrs);
}
function getInfiniteLoopMessage (name) {
return '<!-- Infinite loop $ref:' + name + ' -->';
}
function getErrorMessage (details) {
details = details ? ': ' + details : '';
return '<!-- invalid XML' + details + ' -->';
}
function createSchemaXML (name, definition, models, config) {
var $ref = _.isObject(definition) ? definition.$ref : null;
var output, index;
config = config || {};
config.modelsToIgnore = config.modelsToIgnore || [];
var descriptor = _.isString($ref) ? getDescriptorByRef($ref, name, models, config)
: getDescriptor(name, definition, models, config);
if (!descriptor) {
return getErrorMessage();
}
switch (descriptor.type) {
case 'array':
output = createArrayXML(descriptor); break;
case 'object':
output = createObjectXML(descriptor); break;
case 'loop':
output = getInfiniteLoopMessage(descriptor.name); break;
default:
output = createPrimitiveXML(descriptor);
}
if ($ref) {
index = config.modelsToIgnore.indexOf($ref);
if (index > -1) {
config.modelsToIgnore.splice(index, 1);
}
}
return output;
}
function Descriptor (name, type, definition, models, config) {
if (arguments.length < 4) {
throw new Error();
}
this.config = config || {};
this.config.modelsToIgnore = this.config.modelsToIgnore || [];
this.name = getName(name, definition.xml);
this.definition = definition;
this.models = models;
this.type = type;
}
function getDescriptorByRef($ref, name, models, config) {
var modelType = simpleRef($ref);
var model = models[modelType] || {};
var type = model.definition && model.definition.type ? model.definition.type : 'object';
name = name || model.name;
if (config.modelsToIgnore.indexOf($ref) > -1) {
type = 'loop';
name = modelType;
} else {
config.modelsToIgnore.push($ref);
}
if (!model.definition) {
return null;
}
return new Descriptor(name, type, model.definition, models, config);
}
function getDescriptor (name, definition, models, config){
var type = definition.type || 'object';
if (!definition) {
return null;
}
return new Descriptor(name, type, definition, models, config);
}
function createXMLSample (name, definition, models, isParam) {
var prolog = '<?xml version="1.0"?>';
return formatXml(prolog + createSchemaXML(name, definition, models, { isParam: isParam } ));
}
return {
getModelSignature: getModelSignature,
createJSONSample: createJSONSample,
getParameterModelSignature: getParameterModelSignature,
createParameterJSONSample: createParameterJSONSample,
createSchemaXML: createSchemaXML,
createXMLSample: createXMLSample,
getPrimitiveSignature: getPrimitiveSignature
};
})();
| apache-2.0 |
nmldiegues/stibt | infinispan/core/src/main/java/org/infinispan/distribution/wrappers/RpcManagerWrapper.java | 18688 | /*
* INESC-ID, Instituto de Engenharia de Sistemas e Computadores Investigação e Desevolvimento em Lisboa
* Copyright 2013 INESC-ID and/or its affiliates and other
* contributors as indicated by the @author tags. All rights reserved.
* See the copyright.txt in the distribution for a full listing of
* individual contributors.
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 3.0 of
* the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this software; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA, or see the FSF site: http://www.fsf.org.
*/
package org.infinispan.distribution.wrappers;
import org.infinispan.commands.ReplicableCommand;
import org.infinispan.commands.remote.ClusteredGetCommand;
import org.infinispan.commands.remote.GMUClusteredGetCommand;
import org.infinispan.commands.remote.recovery.TxCompletionNotificationCommand;
import org.infinispan.commands.tx.CommitCommand;
import org.infinispan.commands.tx.PrepareCommand;
import org.infinispan.commands.tx.RollbackCommand;
import org.infinispan.commands.tx.totalorder.TotalOrderGMUPrepareCommand;
import org.infinispan.remoting.RpcException;
import org.infinispan.remoting.responses.AbstractResponse;
import org.infinispan.remoting.responses.Response;
import org.infinispan.remoting.rpc.ResponseFilter;
import org.infinispan.remoting.rpc.ResponseMode;
import org.infinispan.remoting.rpc.RpcManager;
import org.infinispan.remoting.transport.Address;
import org.infinispan.remoting.transport.Transport;
import org.infinispan.remoting.transport.jgroups.JGroupsTransport;
import org.infinispan.stats.ExposedStatistic;
import org.infinispan.stats.PiggyBackStat;
import org.infinispan.stats.TransactionsStatisticsRegistry;
import org.infinispan.stats.container.TransactionStatistics;
import org.infinispan.util.concurrent.NotifyingNotifiableFuture;
import org.infinispan.util.concurrent.ResponseFuture;
import org.infinispan.util.logging.Log;
import org.infinispan.util.logging.LogFactory;
import org.jgroups.blocks.RpcDispatcher;
import org.jgroups.util.Buffer;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.infinispan.stats.ExposedStatistic.*;
/**
* @author Mircea Markus <mircea.markus@jboss.com> (C) 2011 Red Hat Inc.
* @author Diego Didona <didona@gsd.inesc-id.pt>
* @author Pedro Ruivo
* @since 5.2
*/
public class RpcManagerWrapper implements RpcManager {
private static final Log log = LogFactory.getLog(RpcManagerWrapper.class);
private final RpcManager actual;
private final RpcDispatcher.Marshaller marshaller;
private Address myAddress;
public RpcManagerWrapper(RpcManager actual) {
this.actual = actual;
Transport t = actual.getTransport();
if (t instanceof JGroupsTransport) {
marshaller = ((JGroupsTransport) t).getCommandAwareRpcDispatcher().getMarshaller();
} else {
marshaller = null;
}
myAddress = actual.getTransport().getAddress();
}
@Override
public Map<Address, Response> invokeRemotely(Collection<Address> recipients, ReplicableCommand rpcCommand,
ResponseMode mode, long timeout, boolean usePriorityQueue,
ResponseFilter responseFilter, boolean totalOrder) {
long currentTime = System.nanoTime();
Map<Address, Response> ret = actual.invokeRemotely(recipients, rpcCommand, mode, timeout, usePriorityQueue, responseFilter, totalOrder);
updateStats(rpcCommand, mode.isSynchronous(), currentTime, recipients, null, ret);
return ret;
}
@Override
public Map<Address, Response> invokeRemotely(Collection<Address> recipients, ReplicableCommand rpcCommand,
ResponseMode mode, long timeout, boolean usePriorityQueue, boolean totalOrder) {
long currentTime = System.nanoTime();
Map<Address, Response> ret = actual.invokeRemotely(recipients, rpcCommand, mode, timeout, usePriorityQueue, totalOrder);
updateStats(rpcCommand, mode.isSynchronous(), currentTime, recipients, null, ret);
return ret;
}
@Override
public Map<Address, Response> invokeRemotely(Collection<Address> recipients, ReplicableCommand rpcCommand, ResponseMode mode, long timeout, boolean totalOrder) {
long currentTime = System.nanoTime();
Map<Address, Response> ret = actual.invokeRemotely(recipients, rpcCommand, mode, timeout, totalOrder);
updateStats(rpcCommand, mode.isSynchronous(), currentTime, recipients, null, ret);
return ret;
}
@Override
public void broadcastRpcCommand(ReplicableCommand rpc, boolean sync, boolean totalOrder) throws RpcException {
long currentTime = System.nanoTime();
actual.broadcastRpcCommand(rpc, sync, totalOrder);
updateStats(rpc, sync, currentTime, null, null, null);
}
@Override
public void broadcastRpcCommand(ReplicableCommand rpc, boolean sync, boolean usePriorityQueue, boolean totalOrder) throws RpcException {
long currentTime = System.nanoTime();
actual.broadcastRpcCommand(rpc, sync, usePriorityQueue, totalOrder);
updateStats(rpc, sync, currentTime, null, null, null);
}
@Override
public void broadcastRpcCommandInFuture(ReplicableCommand rpc, NotifyingNotifiableFuture<Object> future) {
long currentTime = System.nanoTime();
actual.broadcastRpcCommandInFuture(rpc, future);
updateStats(rpc, false, currentTime, null, null, null);
}
@Override
public void broadcastRpcCommandInFuture(ReplicableCommand rpc, boolean usePriorityQueue,
NotifyingNotifiableFuture<Object> future) {
long currentTime = System.nanoTime();
actual.broadcastRpcCommandInFuture(rpc, usePriorityQueue, future);
updateStats(rpc, false, currentTime, null, null, null);
}
@Override
public Map<Address, Response> invokeRemotely(Collection<Address> recipients, ReplicableCommand rpc, boolean sync, boolean totalOrder) throws RpcException {
long currentTime = System.nanoTime();
Map<Address, Response> ret = actual.invokeRemotely(recipients, rpc, sync, totalOrder);
updateStats(rpc, sync, currentTime, recipients, null, ret);
return ret;
}
@Override
//This should be the method invoked at prepareTime
public Map<Address, Response> invokeRemotely(Collection<Address> recipients, ReplicableCommand rpc, boolean sync,
boolean usePriorityQueue, boolean totalOrder) throws RpcException {
boolean isPrepareCmd = rpc instanceof PrepareCommand;
final TransactionStatistics transactionStatistics = TransactionsStatisticsRegistry.getTransactionStatistics();
long currentTime = System.nanoTime();
if (isPrepareCmd && transactionStatistics != null) {
transactionStatistics.markPrepareSent();
}
Map<Address, Response> ret = actual.invokeRemotely(recipients, rpc, sync, usePriorityQueue, totalOrder);
if (transactionStatistics != null) {
updateStats(rpc, sync, currentTime, recipients, null, ret);
}
return ret;
}
@Override
public ResponseFuture invokeRemotelyWithFuture(Collection<Address> recipients, ReplicableCommand rpc, boolean usePriorityQueue, boolean totalOrder) {
long currentTime = System.nanoTime();
ResponseFuture ret = actual.invokeRemotelyWithFuture(recipients, rpc, usePriorityQueue, totalOrder);
updateStats(rpc, true, currentTime, recipients, ret, null);
return ret;
}
@Override
public void invokeRemotelyInFuture(Collection<Address> recipients, ReplicableCommand rpc,
NotifyingNotifiableFuture<Object> future) {
long currentTime = System.nanoTime();
actual.invokeRemotelyInFuture(recipients, rpc, future);
updateStats(rpc, false, currentTime, recipients, null, null);
}
@Override
public void invokeRemotelyInFuture(Collection<Address> recipients, ReplicableCommand rpc, boolean usePriorityQueue,
NotifyingNotifiableFuture<Object> future) {
long currentTime = System.nanoTime();
actual.invokeRemotelyInFuture(recipients, rpc, usePriorityQueue, future);
updateStats(rpc, false, currentTime, recipients, null, null);
}
@Override
public void invokeRemotelyInFuture(Collection<Address> recipients, ReplicableCommand rpc, boolean usePriorityQueue,
NotifyingNotifiableFuture<Object> future, long timeout) {
long currentTime = System.nanoTime();
actual.invokeRemotelyInFuture(recipients, rpc, usePriorityQueue, future, timeout);
updateStats(rpc, false, currentTime, recipients, null, null);
}
@Override
public void invokeRemotelyInFuture(Collection<Address> recipients, ReplicableCommand rpc, boolean usePriorityQueue,
NotifyingNotifiableFuture<Object> future, long timeout, boolean ignoreLeavers) {
long currentTime = System.nanoTime();
actual.invokeRemotelyInFuture(recipients, rpc, usePriorityQueue, future, timeout, ignoreLeavers);
updateStats(rpc, false, currentTime, recipients, null, null);
}
@Override
public Transport getTransport() {
return actual.getTransport();
}
@Override
public List<Address> getMembers() {
return actual.getMembers();
}
@Override
public Address getAddress() {
return actual.getAddress();
}
@Override
public int getTopologyId() {
return actual.getTopologyId();
}
private void updateStats(ReplicableCommand command, boolean sync, long init, Collection<Address> recipients, ResponseFuture future, Map<Address, Response> responseMap) {
final TransactionStatistics transactionStatistics = TransactionsStatisticsRegistry.getTransactionStatistics();
if (!TransactionsStatisticsRegistry.isActive() || transactionStatistics == null &&
!(command instanceof TxCompletionNotificationCommand)) {
if (log.isTraceEnabled()) {
log.tracef("Does not update stats for command %s. No statistic collector found", command);
}
return;
} else if (transactionStatistics != null && !transactionStatistics.isLocal()) {
if (log.isTraceEnabled()) {
log.tracef("Does not update stats for command %s. The command is remote!", command);
}
return;
}
ExposedStatistic durationStat;
ExposedStatistic counterStat;
ExposedStatistic recipientSizeStat;
ExposedStatistic commandSizeStat = null;
long contactedNodesMinusMe = recipientListSize(recipients) - (isCurrentNodeInvolved(recipients) ? 1 : 0);
long wallClockTimeTaken = System.nanoTime() - init;
if (command instanceof PrepareCommand) {
if (sync) {
durationStat = RTT_PREPARE;
counterStat = NUM_RTTS_PREPARE;
} else {
durationStat = ASYNC_PREPARE;
counterStat = NUM_ASYNC_PREPARE;
}
recipientSizeStat = NUM_NODES_PREPARE;
commandSizeStat = PREPARE_COMMAND_SIZE;
if (command instanceof TotalOrderGMUPrepareCommand) {
WaitStats w = new WaitStats(responseMap);
long maxW = w.maxConditionalWaitTime;
long avgW = w.avgUnconditionalWaitTime;
long condAvg = w.avgConditionalWaitTime;
long waits = w.numWaitedNodes;
transactionStatistics.addValue(TO_GMU_PREPARE_COMMAND_RTT_MINUS_MAX, wallClockTimeTaken - maxW);
transactionStatistics.addValue(TO_GMU_PREPARE_COMMAND_RTT_MINUS_AVG, wallClockTimeTaken - avgW);
if (waits > 0) {
transactionStatistics.incrementValue(NUM_TO_GMU_PREPARE_COMMAND_AT_LEAST_ONE_WAIT);
transactionStatistics.addValue(TO_GMU_PREPARE_COMMAND_NODES_WAITED, waits);
transactionStatistics.addValue(TO_GMU_PREPARE_COMMAND_AVG_WAIT_TIME, condAvg);
transactionStatistics.addValue(TO_GMU_PREPARE_COMMAND_MAX_WAIT_TIME, maxW);
} else {
transactionStatistics.incrementValue(NUM_TO_GMU_PREPARE_COMMAND_RTT_NO_WAITED);//NB this could be obtained by taking the total number of prepare-the ones that waited
transactionStatistics.addValue(TO_GMU_PREPARE_COMMAND_RTT_NO_WAIT, wallClockTimeTaken);
}
}
} else if (command instanceof RollbackCommand) {
if (sync) {
durationStat = RTT_ROLLBACK;
counterStat = NUM_RTTS_ROLLBACK;
} else {
durationStat = ASYNC_ROLLBACK;
counterStat = NUM_ASYNC_ROLLBACK;
}
recipientSizeStat = NUM_NODES_ROLLBACK;
commandSizeStat = ROLLBACK_COMMAND_SIZE;
} else if (command instanceof CommitCommand) {
if (sync) {
durationStat = RTT_COMMIT;
counterStat = NUM_RTTS_COMMIT;
transactionStatistics.addValue(SENT_SYNC_COMMIT, contactedNodesMinusMe);
} else {
durationStat = ASYNC_COMMIT;
counterStat = NUM_ASYNC_COMMIT;
transactionStatistics.addValue(SENT_ASYNC_COMMIT, contactedNodesMinusMe);
}
recipientSizeStat = NUM_NODES_COMMIT;
commandSizeStat = COMMIT_COMMAND_SIZE;
} else if (command instanceof ClusteredGetCommand) {
durationStat = RTT_GET;
counterStat = NUM_RTTS_GET;
recipientSizeStat = NUM_NODES_GET;
commandSizeStat = CLUSTERED_GET_COMMAND_SIZE;
//Take rtt sample if the remote read has not waited
if (command instanceof GMUClusteredGetCommand) {
if (pickGmuRemoteGetWaitingTime(responseMap) == 0) {
transactionStatistics.incrementValue(NUM_RTT_GET_NO_WAIT);
transactionStatistics.addValue(RTT_GET_NO_WAIT, wallClockTimeTaken);
}
}
} else if (command instanceof TxCompletionNotificationCommand) {
durationStat = ASYNC_COMPLETE_NOTIFY;
counterStat = NUM_ASYNC_COMPLETE_NOTIFY;
recipientSizeStat = NUM_NODES_COMPLETE_NOTIFY;
} else {
if (log.isTraceEnabled()) {
log.tracef("Does not update stats for command %s. The command is not needed", command);
}
return;
}
if (log.isTraceEnabled()) {
log.tracef("Update stats for command %s. Is sync? %s. Duration stat is %s, counter stats is %s, " +
"recipient size stat is %s", command, sync, durationStat, counterStat, recipientSizeStat);
}
if (future != null) {
future.setUpdateStats(transactionStatistics, init, durationStat, counterStat, recipientSizeStat, commandSizeStat,
getCommandSize(command), recipientListSize(recipients));
} else if (transactionStatistics != null) {
transactionStatistics.addValue(durationStat, wallClockTimeTaken);
transactionStatistics.incrementValue(counterStat);
transactionStatistics.addValue(recipientSizeStat, recipientListSize(recipients));
if (commandSizeStat != null) {
transactionStatistics.addValue(commandSizeStat, getCommandSize(command));
}
} else {
TransactionsStatisticsRegistry.addValueAndFlushIfNeeded(durationStat, wallClockTimeTaken, true);
TransactionsStatisticsRegistry.incrementValueAndFlushIfNeeded(counterStat, true);
TransactionsStatisticsRegistry.addValueAndFlushIfNeeded(recipientSizeStat, recipientListSize(recipients), true);
}
}
private int recipientListSize(Collection<Address> recipients) {
return recipients == null ? actual.getTransport().getMembers().size() : recipients.size();
}
private boolean isCurrentNodeInvolved(Collection<Address> recipients) {
//If recipients is null it's either a BroadCast (or I am the only one in the cluster, which is a trivial broadcast)
return recipients == null || recipients.contains(myAddress);
}
private int getCommandSize(ReplicableCommand command) {
try {
Buffer buffer = marshaller.objectToBuffer(command);
return buffer != null ? buffer.getLength() : 0;
} catch (Exception e) {
return 0;
}
}
private long pickGmuRemoteGetWaitingTime(Map<Address, Response> map) {
if (map == null || map.size() == 0) {
if (log.isDebugEnabled())
log.debug("GmuClusteredGetCommand reply is empty");
return -1;
}
AbstractResponse r;
long w;
PiggyBackStat pbs;
for (Map.Entry<Address, Response> e : map.entrySet()) {
if (e != null && (r = (AbstractResponse) e.getValue()) != null) {
pbs = r.getPiggyBackStat();
if (pbs != null) {
if ((w = pbs.getWaitTime()) > 0) {
return w;
}
}
}
}
return 0;
}
private class WaitStats {
private long numWaitedNodes;
private long avgConditionalWaitTime;
private long maxConditionalWaitTime;
private long avgUnconditionalWaitTime;
WaitStats(Map<Address, Response> map) {
if (map == null || map.size() == 0)
return;
long max = 0, sum = 0, temp;
long waited = 0;
AbstractResponse r;
Set<Map.Entry<Address, Response>> set = map.entrySet();
for (Map.Entry<Address, Response> e : set) {
r = (AbstractResponse) e.getValue();
temp = r.getPiggyBackStat().getWaitTime();
if (temp > 0) {
waited++;
if (temp > max) {
temp = max;
}
sum += temp;
}
}
long unAvg = (sum / set.size());
long coAvg = waited != 0 ? (sum / waited) : 0;
this.maxConditionalWaitTime = max;
this.avgUnconditionalWaitTime = unAvg;
this.avgConditionalWaitTime = coAvg;
this.numWaitedNodes = waited;
}
}
}
| apache-2.0 |
stoksey69/googleads-java-lib | examples/dfp_axis/src/main/java/dfp/axis/v201505/proposallineitemservice/GetAllProposalLineItems.java | 3524 | // Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dfp.axis.v201505.proposallineitemservice;
import com.google.api.ads.common.lib.auth.OfflineCredentials;
import com.google.api.ads.common.lib.auth.OfflineCredentials.Api;
import com.google.api.ads.dfp.axis.factory.DfpServices;
import com.google.api.ads.dfp.axis.utils.v201505.StatementBuilder;
import com.google.api.ads.dfp.axis.v201505.ProposalLineItem;
import com.google.api.ads.dfp.axis.v201505.ProposalLineItemPage;
import com.google.api.ads.dfp.axis.v201505.ProposalLineItemServiceInterface;
import com.google.api.ads.dfp.lib.client.DfpSession;
import com.google.api.client.auth.oauth2.Credential;
/**
* This example gets all proposal line items. To create proposal line items, run
* CreateProposalLineItems.java.
*
* Credentials and properties in {@code fromFile()} are pulled from the
* "ads.properties" file. See README for more info.
*
* Tags: ProposalLineItemService.getProposalLineItemsByStatement
*
* @author Nicholas Chen
*/
public class GetAllProposalLineItems {
public static void runExample(DfpServices dfpServices, DfpSession session) throws Exception {
// Get the ProposalLineItemService.
ProposalLineItemServiceInterface proposalLineItemService =
dfpServices.get(session, ProposalLineItemServiceInterface.class);
// Create a statement to select all proposal line items.
StatementBuilder statementBuilder = new StatementBuilder()
.orderBy("id ASC")
.limit(StatementBuilder.SUGGESTED_PAGE_LIMIT);
// Default for total result set size.
int totalResultSetSize = 0;
do {
// Get proposal line items by statement.
ProposalLineItemPage page =
proposalLineItemService.getProposalLineItemsByStatement(statementBuilder.toStatement());
if (page.getResults() != null) {
totalResultSetSize = page.getTotalResultSetSize();
int i = page.getStartIndex();
for (ProposalLineItem proposalLineItem : page.getResults()) {
System.out.printf(
"%d) Proposal line item with ID \"%d\" and name \"%s\" was found.%n", i++,
proposalLineItem.getId(), proposalLineItem.getName());
}
}
statementBuilder.increaseOffsetBy(StatementBuilder.SUGGESTED_PAGE_LIMIT);
} while (statementBuilder.getOffset() < totalResultSetSize);
System.out.printf("Number of results found: %d%n", totalResultSetSize);
}
public static void main(String[] args) throws Exception {
// Generate a refreshable OAuth2 credential.
Credential oAuth2Credential = new OfflineCredentials.Builder()
.forApi(Api.DFP)
.fromFile()
.build()
.generateCredential();
// Construct a DfpSession.
DfpSession session = new DfpSession.Builder()
.fromFile()
.withOAuth2Credential(oAuth2Credential)
.build();
DfpServices dfpServices = new DfpServices();
runExample(dfpServices, session);
}
}
| apache-2.0 |
Miciah/origin | vendor/gonum.org/v1/gonum/spatial/barneshut/barneshut3_test.go | 16745 | // Copyright ©2019 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package barneshut
import (
"fmt"
"math"
"reflect"
"testing"
"golang.org/x/exp/rand"
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/spatial/r3"
)
type particle3 struct {
x, y, z, m float64
name string
}
func (p particle3) Coord3() r3.Vec { return r3.Vec{X: p.x, Y: p.y, Z: p.z} }
func (p particle3) Mass() float64 { return p.m }
var volumeTests = []struct {
name string
particles []particle3
want *Volume
}{
{
name: "nil",
particles: nil,
want: &Volume{},
},
{
name: "empty",
particles: []particle3{},
want: &Volume{Particles: []Particle3{}},
},
{
name: "one",
particles: []particle3{{m: 1}}, // Must have a mass to avoid vacuum decay.
want: &Volume{
root: bucket{
particle: particle3{x: 0, y: 0, z: 0, m: 1},
bounds: r3.Box{Min: r3.Vec{X: 0, Y: 0, Z: 0}, Max: r3.Vec{X: 0, Y: 0, Z: 0}},
center: r3.Vec{X: 0, Y: 0, Z: 0},
mass: 1,
},
Particles: []Particle3{
particle3{x: 0, y: 0, z: 0, m: 1},
},
},
},
{
name: "3 corners",
particles: []particle3{
{x: 1, y: 1, z: 1, m: 1},
{x: -1, y: 1, z: 0, m: 1},
{x: -1, y: -1, z: -1, m: 1},
},
want: &Volume{
root: bucket{
bounds: r3.Box{Min: r3.Vec{X: -1, Y: -1, Z: -1}, Max: r3.Vec{X: 1, Y: 1, Z: 1}},
nodes: [8]*bucket{
lnw: {
particle: particle3{x: -1, y: -1, z: -1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: -1, Y: -1, Z: -1}, Max: r3.Vec{X: 0, Y: 0, Z: 0}},
center: r3.Vec{X: -1, Y: -1, Z: -1},
mass: 1,
},
use: {
particle: particle3{x: 1, y: 1, z: 1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: 0, Y: 0, Z: 0}, Max: r3.Vec{X: 1, Y: 1, Z: 1}},
center: r3.Vec{X: 1, Y: 1, Z: 1},
mass: 1,
},
usw: {
particle: particle3{x: -1, y: 1, z: 0, m: 1},
bounds: r3.Box{Min: r3.Vec{X: -1, Y: 0, Z: 0}, Max: r3.Vec{X: 0, Y: 1, Z: 1}},
center: r3.Vec{X: -1, Y: 1, Z: 0},
mass: 1,
},
},
center: r3.Vec{X: -0.3333333333333333, Y: 0.3333333333333333, Z: 0},
mass: 3,
},
Particles: []Particle3{
particle3{x: 1, y: 1, z: 1, m: 1},
particle3{x: -1, y: 1, z: 0, m: 1},
particle3{x: -1, y: -1, z: -1, m: 1},
},
},
},
{
name: "4 corners",
particles: []particle3{
{x: 1, y: 1, z: -1, m: 1},
{x: -1, y: 1, z: 1, m: 1},
{x: 1, y: -1, z: 1, m: 1},
{x: -1, y: -1, z: -1, m: 1},
},
want: &Volume{
root: bucket{
bounds: r3.Box{Min: r3.Vec{X: -1, Y: -1, Z: -1}, Max: r3.Vec{X: 1, Y: 1, Z: 1}},
nodes: [8]*bucket{
lse: {
particle: particle3{x: 1, y: 1, z: -1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: 0, Y: 0, Z: -1}, Max: r3.Vec{X: 1, Y: 1, Z: 0}},
center: r3.Vec{X: 1, Y: 1, Z: -1},
mass: 1,
},
lnw: {
particle: particle3{x: -1, y: -1, z: -1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: -1, Y: -1, Z: -1}, Max: r3.Vec{X: 0, Y: 0, Z: 0}},
center: r3.Vec{X: -1, Y: -1, Z: -1},
mass: 1,
},
une: {
particle: particle3{x: 1, y: -1, z: 1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: 0, Y: -1, Z: 0}, Max: r3.Vec{X: 1, Y: 0, Z: 1}},
center: r3.Vec{X: 1, Y: -1, Z: 1},
mass: 1,
},
usw: {
particle: particle3{x: -1, y: 1, z: 1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: -1, Y: 0, Z: 0}, Max: r3.Vec{X: 0, Y: 1, Z: 1}},
center: r3.Vec{X: -1, Y: 1, Z: 1},
mass: 1,
},
},
center: r3.Vec{X: 0, Y: 0, Z: 0},
mass: 4,
},
Particles: []Particle3{
particle3{x: 1, y: 1, z: -1, m: 1},
particle3{x: -1, y: 1, z: 1, m: 1},
particle3{x: 1, y: -1, z: 1, m: 1},
particle3{x: -1, y: -1, z: -1, m: 1},
},
},
},
{
name: "5 corners",
particles: []particle3{
{x: 1, y: 1, z: -1, m: 1},
{x: -1, y: 1, z: 1, m: 1},
{x: 1, y: -1, z: 1, m: 1},
{x: -1, y: -1, z: -1, m: 1},
{x: -1.1, y: -1, z: -1.1, m: 1},
},
want: &Volume{
root: bucket{
bounds: r3.Box{Min: r3.Vec{X: -1.1, Y: -1, Z: -1.1}, Max: r3.Vec{X: 1, Y: 1, Z: 1}},
nodes: [8]*bucket{
lse: {
particle: particle3{x: 1, y: 1, z: -1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: -0.050000000000000044, Y: 0, Z: -1.1}, Max: r3.Vec{X: 1, Y: 1, Z: -0.050000000000000044}},
center: r3.Vec{X: 1, Y: 1, Z: -1},
mass: 1,
},
lnw: {
bounds: r3.Box{Min: r3.Vec{X: -1.1, Y: -1, Z: -1.1}, Max: r3.Vec{X: -0.050000000000000044, Y: 0, Z: -0.050000000000000044}},
nodes: [8]*bucket{
lnw: {
bounds: r3.Box{Min: r3.Vec{X: -1.1, Y: -1, Z: -1.1}, Max: r3.Vec{X: -0.5750000000000001, Y: -0.5, Z: -0.5750000000000001}},
nodes: [8]*bucket{
lnw: {
bounds: r3.Box{Min: r3.Vec{X: -1.1, Y: -1, Z: -1.1}, Max: r3.Vec{X: -0.8375000000000001, Y: -0.75, Z: -0.8375000000000001}},
nodes: [8]*bucket{
lnw: {
bounds: r3.Box{Min: r3.Vec{X: -1.1, Y: -1, Z: -1.1}, Max: r3.Vec{X: -0.9687500000000001, Y: -0.875, Z: -0.9687500000000001}},
nodes: [8]*bucket{
lnw: {
particle: particle3{x: -1.1, y: -1, z: -1.1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: -1.1, Y: -1, Z: -1.1}, Max: r3.Vec{X: -1.034375, Y: -0.9375, Z: -1.034375}},
center: r3.Vec{X: -1.1, Y: -1, Z: -1.1},
mass: 1,
},
une: {
particle: particle3{x: -1, y: -1, z: -1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: -1.034375, Y: -1, Z: -1.034375}, Max: r3.Vec{X: -0.9687500000000001, Y: -0.9375, Z: -0.9687500000000001}},
center: r3.Vec{X: -1, Y: -1, Z: -1},
mass: 1,
},
},
center: r3.Vec{X: -1.05, Y: -1, Z: -1.05},
mass: 2,
},
},
center: r3.Vec{X: -1.05, Y: -1, Z: -1.05},
mass: 2,
},
},
center: r3.Vec{X: -1.05, Y: -1, Z: -1.05},
mass: 2,
},
},
center: r3.Vec{X: -1.05, Y: -1, Z: -1.05},
mass: 2,
},
une: {
particle: particle3{x: 1, y: -1, z: 1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: -0.050000000000000044, Y: -1, Z: -0.050000000000000044}, Max: r3.Vec{X: 1, Y: 0, Z: 1}},
center: r3.Vec{X: 1, Y: -1, Z: 1},
mass: 1,
},
usw: {
particle: particle3{x: -1, y: 1, z: 1, m: 1},
bounds: r3.Box{Min: r3.Vec{X: -1.1, Y: 0, Z: -0.050000000000000044}, Max: r3.Vec{X: -0.050000000000000044, Y: 1, Z: 1}},
center: r3.Vec{X: -1, Y: 1, Z: 1},
mass: 1,
},
},
center: r3.Vec{X: -0.22000000000000003, Y: -0.2, Z: -0.22000000000000003},
mass: 5,
},
Particles: []Particle3{
particle3{x: 1, y: 1, z: -1, m: 1},
particle3{x: -1, y: 1, z: 1, m: 1},
particle3{x: 1, y: -1, z: 1, m: 1},
particle3{x: -1, y: -1, z: -1, m: 1},
particle3{x: -1.1, y: -1, z: -1.1, m: 1},
},
},
},
{
// This case is derived from the 2D example of the same name,
// but with a monotonic increase in Z position according to name.
name: "http://arborjs.org/docs/barnes-hut example",
particles: []particle3{
{x: 64.5, y: 81.5, z: 0, m: 1, name: "A"},
{x: 242, y: 34, z: 40, m: 1, name: "B"},
{x: 199, y: 69, z: 80, m: 1, name: "C"},
{x: 285, y: 106.5, z: 120, m: 1, name: "D"},
{x: 170, y: 194.5, z: 160, m: 1, name: "E"},
{x: 42.5, y: 334.5, z: 200, m: 1, name: "F"},
{x: 147, y: 309, z: 240, m: 1, name: "G"},
{x: 236.5, y: 324, z: 280, m: 1, name: "H"},
},
want: &Volume{
root: bucket{
bounds: r3.Box{Min: r3.Vec{X: 42.5, Y: 34, Z: 0}, Max: r3.Vec{X: 285, Y: 334.5, Z: 280}},
nodes: [8]*bucket{
lne: {
bounds: r3.Box{Min: r3.Vec{X: 163.75, Y: 34, Z: 0}, Max: r3.Vec{X: 285, Y: 184.25, Z: 140}},
nodes: [8]*bucket{
lne: {
particle: particle3{x: 242, y: 34, z: 40, m: 1, name: "B"},
bounds: r3.Box{Min: r3.Vec{X: 224.375, Y: 34, Z: 0}, Max: r3.Vec{X: 285, Y: 109.125, Z: 70}},
center: r3.Vec{X: 242, Y: 34, Z: 40},
mass: 1,
},
une: {
particle: particle3{x: 285, y: 106.5, z: 120, m: 1, name: "D"},
bounds: r3.Box{Min: r3.Vec{X: 224.375, Y: 34, Z: 70}, Max: r3.Vec{X: 285, Y: 109.125, Z: 140}},
center: r3.Vec{X: 285, Y: 106.5, Z: 120},
mass: 1,
},
unw: {
particle: particle3{x: 199, y: 69, z: 80, m: 1, name: "C"},
bounds: r3.Box{Min: r3.Vec{X: 163.75, Y: 34, Z: 70}, Max: r3.Vec{X: 224.375, Y: 109.125, Z: 140}},
center: r3.Vec{X: 199, Y: 69, Z: 80},
mass: 1,
},
},
center: r3.Vec{X: 242, Y: 69.83333333333333, Z: 80},
mass: 3,
},
lnw: {
particle: particle3{x: 64.5, y: 81.5, z: 0, m: 1, name: "A"},
bounds: r3.Box{Min: r3.Vec{X: 42.5, Y: 34, Z: 0}, Max: r3.Vec{X: 163.75, Y: 184.25, Z: 140}},
center: r3.Vec{X: 64.5, Y: 81.5, Z: 0},
mass: 1,
},
(*bucket)(nil),
use: {
bounds: r3.Box{Min: r3.Vec{X: 163.75, Y: 184.25, Z: 140}, Max: r3.Vec{X: 285, Y: 334.5, Z: 280}},
nodes: [8]*bucket{
lnw: {
particle: particle3{x: 170, y: 194.5, z: 160, m: 1, name: "E"},
bounds: r3.Box{Min: r3.Vec{X: 163.75, Y: 184.25, Z: 140}, Max: r3.Vec{X: 224.375, Y: 259.375, Z: 210}},
center: r3.Vec{X: 170, Y: 194.5, Z: 160},
mass: 1,
},
use: {
particle: particle3{x: 236.5, y: 324, z: 280, m: 1, name: "H"},
bounds: r3.Box{Min: r3.Vec{X: 224.375, Y: 259.375, Z: 210}, Max: r3.Vec{X: 285, Y: 334.5, Z: 280}},
center: r3.Vec{X: 236.5, Y: 324, Z: 280},
mass: 1,
},
},
center: r3.Vec{X: 203.25, Y: 259.25, Z: 220},
mass: 2,
},
usw: {
bounds: r3.Box{Min: r3.Vec{X: 42.5, Y: 184.25, Z: 140}, Max: r3.Vec{X: 163.75, Y: 334.5, Z: 280}},
nodes: [8]*bucket{
lsw: {
particle: particle3{x: 42.5, y: 334.5, z: 200, m: 1, name: "F"},
bounds: r3.Box{Min: r3.Vec{X: 42.5, Y: 259.375, Z: 140}, Max: r3.Vec{X: 103.125, Y: 334.5, Z: 210}},
center: r3.Vec{X: 42.5, Y: 334.5, Z: 200},
mass: 1,
},
use: {
particle: particle3{x: 147, y: 309, z: 240, m: 1, name: "G"},
bounds: r3.Box{Min: r3.Vec{X: 103.125, Y: 259.375, Z: 210}, Max: r3.Vec{X: 163.75, Y: 334.5, Z: 280}},
center: r3.Vec{X: 147, Y: 309, Z: 240},
mass: 1,
},
},
center: r3.Vec{X: 94.75, Y: 321.75, Z: 220},
mass: 2,
},
},
center: r3.Vec{X: 173.3125, Y: 181.625, Z: 140},
mass: 8,
},
Particles: []Particle3{
particle3{x: 64.5, y: 81.5, z: 0, m: 1, name: "A"},
particle3{x: 242, y: 34, z: 40, m: 1, name: "B"},
particle3{x: 199, y: 69, z: 80, m: 1, name: "C"},
particle3{x: 285, y: 106.5, z: 120, m: 1, name: "D"},
particle3{x: 170, y: 194.5, z: 160, m: 1, name: "E"},
particle3{x: 42.5, y: 334.5, z: 200, m: 1, name: "F"},
particle3{x: 147, y: 309, z: 240, m: 1, name: "G"},
particle3{x: 236.5, y: 324, z: 280, m: 1, name: "H"},
},
},
},
}
func TestVolume(t *testing.T) {
const tol = 1e-15
for _, test := range volumeTests {
var particles []Particle3
if test.particles != nil {
particles = make([]Particle3, len(test.particles))
}
for i, p := range test.particles {
particles[i] = p
}
got, err := NewVolume(particles)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if test.want != nil && !reflect.DeepEqual(got, test.want) {
t.Errorf("unexpected result for %q: got:%v want:%v", test.name, got, test.want)
}
// Recursively check all internal centers of mass.
walkVolume(&got.root, func(b *bucket) {
var sub []Particle3
walkVolume(b, func(b *bucket) {
if b.particle != nil {
sub = append(sub, b.particle)
}
})
center, mass := centerOfMass3(sub)
if !floats.EqualWithinAbsOrRel(center.X, b.center.X, tol, tol) || !floats.EqualWithinAbsOrRel(center.Y, b.center.Y, tol, tol) || !floats.EqualWithinAbsOrRel(center.Z, b.center.Z, tol, tol) {
t.Errorf("unexpected result for %q for center of mass: got:%f want:%f", test.name, b.center, center)
}
if !floats.EqualWithinAbsOrRel(mass, b.mass, tol, tol) {
t.Errorf("unexpected result for %q for total mass: got:%f want:%f", test.name, b.mass, mass)
}
})
}
}
func centerOfMass3(particles []Particle3) (center r3.Vec, mass float64) {
for _, p := range particles {
m := p.Mass()
mass += m
c := p.Coord3()
center.X += c.X * m
center.Y += c.Y * m
center.Z += c.Z * m
}
if mass != 0 {
center.X /= mass
center.Y /= mass
center.Z /= mass
}
return center, mass
}
func walkVolume(t *bucket, fn func(*bucket)) {
if t == nil {
return
}
fn(t)
for _, q := range t.nodes {
walkVolume(q, fn)
}
}
func TestVolumeForceOn(t *testing.T) {
const (
size = 1000
tol = 1e-3
)
for _, n := range []int{3e3, 1e4, 3e4} {
rnd := rand.New(rand.NewSource(1))
particles := make([]Particle3, n)
for i := range particles {
particles[i] = particle3{x: size * rnd.Float64(), y: size * rnd.Float64(), z: size * rnd.Float64(), m: 1}
}
moved := make([]r3.Vec, n)
for i, p := range particles {
var v r3.Vec
m := p.Mass()
pv := p.Coord3()
for _, e := range particles {
v = v.Add(Gravity3(p, e, m, e.Mass(), e.Coord3().Sub(pv)))
}
moved[i] = p.Coord3().Add(v)
}
volume, err := NewVolume(particles)
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
for _, theta := range []float64{0, 0.3, 0.6, 0.9} {
t.Run(fmt.Sprintf("%d-body/theta=%v", len(particles), theta), func(t *testing.T) {
var ssd, sd float64
var calls int
for i, p := range particles {
v := volume.ForceOn(p, theta, func(p1, p2 Particle3, m1, m2 float64, v r3.Vec) r3.Vec {
calls++
return Gravity3(p1, p2, m1, m2, v)
})
pos := p.Coord3().Add(v)
d := moved[i].Sub(pos)
ssd += d.X*d.X + d.Y*d.Y + d.Z*d.Z
sd += math.Hypot(math.Hypot(d.X, d.Y), d.Z)
}
rmsd := math.Sqrt(ssd / float64(len(particles)))
if rmsd > tol {
t.Error("RMSD for approximation too high")
}
t.Logf("rmsd=%.4v md=%.4v calls/particle=%.5v",
rmsd, sd/float64(len(particles)), float64(calls)/float64(len(particles)))
})
}
}
}
var (
fv3sink r3.Vec
volumeSink *Volume
)
func BenchmarkNewVolume(b *testing.B) {
for _, n := range []int{1e3, 1e4, 1e5, 1e6} {
rnd := rand.New(rand.NewSource(1))
particles := make([]Particle3, n)
for i := range particles {
particles[i] = particle3{x: rnd.Float64(), y: rnd.Float64(), z: rnd.Float64(), m: 1}
}
b.ResetTimer()
var err error
b.Run(fmt.Sprintf("%d-body", len(particles)), func(b *testing.B) {
for i := 0; i < b.N; i++ {
volumeSink, err = NewVolume(particles)
if err != nil {
b.Fatalf("unexpected error: %v", err)
}
}
})
}
}
func BenchmarkVolumeForceOn(b *testing.B) {
for _, n := range []int{1e3, 1e4, 1e5} {
for _, theta := range []float64{0, 0.1, 0.5, 1, 1.5} {
if n > 1e4 && theta < 0.5 {
// Don't run unreasonably long benchmarks.
continue
}
rnd := rand.New(rand.NewSource(1))
particles := make([]Particle3, n)
for i := range particles {
particles[i] = particle3{x: rnd.Float64(), y: rnd.Float64(), z: rnd.Float64(), m: 1}
}
volume, err := NewVolume(particles)
if err != nil {
b.Fatalf("unexpected error: %v", err)
}
b.ResetTimer()
b.Run(fmt.Sprintf("%d-body/theta=%v", len(particles), theta), func(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, p := range particles {
fv3sink = volume.ForceOn(p, theta, Gravity3)
}
}
})
}
}
}
func BenchmarkVolumeFull(b *testing.B) {
for _, n := range []int{1e3, 1e4, 1e5} {
for _, theta := range []float64{0, 0.1, 0.5, 1, 1.5} {
if n > 1e4 && theta < 0.5 {
// Don't run unreasonably long benchmarks.
continue
}
rnd := rand.New(rand.NewSource(1))
particles := make([]Particle3, n)
for i := range particles {
particles[i] = particle3{x: rnd.Float64(), y: rnd.Float64(), z: rnd.Float64(), m: 1}
}
b.ResetTimer()
b.Run(fmt.Sprintf("%d-body/theta=%v", len(particles), theta), func(b *testing.B) {
for i := 0; i < b.N; i++ {
volume, err := NewVolume(particles)
if err != nil {
b.Fatalf("unexpected error: %v", err)
}
for _, p := range particles {
fv3sink = volume.ForceOn(p, theta, Gravity3)
}
}
})
}
}
}
| apache-2.0 |
ysc/superword | src/main/java/org/apdplat/superword/extract/ChineseSynonymAntonymExtractor.java | 16449 | /**
*
* APDPlat - Application Product Development Platform Copyright (c) 2013, 杨尚川,
* yang-shangchuan@qq.com
*
* This program is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation, either version 3 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package org.apdplat.superword.extract;
import org.apache.commons.lang.StringUtils;
import org.apdplat.superword.model.SynonymAntonym;
import org.apdplat.superword.model.Word;
import org.apdplat.superword.tools.ProxyIp;
import org.eclipse.jetty.util.ConcurrentHashSet;
import org.jsoup.Connection;
import org.jsoup.HttpStatusException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Element;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* 汉语同义词反义词提取工具
* @author 杨尚川
*/
public class ChineseSynonymAntonymExtractor {
private ChineseSynonymAntonymExtractor(){}
private static final Logger LOGGER = LoggerFactory.getLogger(ChineseSynonymAntonymExtractor.class);
private static final String SYNONYM_ANTONYM_CSS_PATH = "html body.bg_main div#layout div#center div#main_box div#dict_main div.simple div#dict_content_3.dict_content div.industry_box div.industry.cn_synon_box";
private static final String ACCEPT = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8";
private static final String ENCODING = "gzip, deflate";
private static final String LANGUAGE = "zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3";
private static final String CONNECTION = "keep-alive";
private static final String HOST = "www.iciba.com";
private static final String REFERER = "http://www.iciba.com/";
private static final List<String> USER_AGENTS = Arrays.asList("Mozilla/5.0 (Windows NT 6.1; WOW64; rv:27.0) Gecko",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:36.0) Gecko/20100101 Firefox/36.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.102 Safari/537.36 OPR"
);
private static final AtomicInteger uac = new AtomicInteger();
private static final Map<String, String> ANTONYM = new ConcurrentHashMap<>();
private static final ExecutorService EXECUTOR_SERVICE = Executors.newCachedThreadPool();
private static final Set<String> CHECKED_WORDS = new ConcurrentHashSet<>();
//用来合并不同条目
private static final Map<Word, Set<Word>> SYNONYM_MAP = new ConcurrentHashMap<>();
private static final Path CHECKED_WORDS_PATH = Paths.get("src/main/resources/checked_words.txt");
private static final Path CHINESE_SYNONYM = Paths.get("src/main/resources/chinese_synonym.txt");
private static final Path CHINESE_ANTONYM = Paths.get("src/main/resources/chinese_antonym.txt");
public static SynonymAntonym parseSynonymAntonym(String html, String word){
SynonymAntonym synonymAntonym = new SynonymAntonym();
synonymAntonym.setWord(new Word(word, ""));
try {
for(Element element : Jsoup.parse(html).select(SYNONYM_ANTONYM_CSS_PATH)){
int size = element.children().size();
LOGGER.debug("element size:" + size);
for(int i=0;i<size/2;i++) {
String type = element.child(i*2).text();
LOGGER.debug("type:"+type);
if ("同义词".equals(type)) {
String synonym = element.child(i*2+1).text();
LOGGER.debug("synonym:"+synonym);
for(String w : synonym.split("\\s+")){
w=w.replaceAll("\\s+", "");
if(w.length()<2){
continue;
}
if(isNotChineseChar(w)){
LOGGER.debug("非中文字符:"+w);
continue;
}
if(w.equals(word)){
continue;
}
LOGGER.debug("word:"+w);
synonymAntonym.addSynonym(new Word(w, ""));
}
}
if ("反义词".equals(type)) {
String antonym = element.child(i*2+1).text();
LOGGER.debug("antonym:"+antonym);
for(String w : antonym.split("\\s+")){
w=w.replaceAll("\\s+", "");
if(w.length()<2){
continue;
}
if(isNotChineseChar(w)){
LOGGER.debug("非中文字符:"+w);
continue;
}
LOGGER.debug("word:"+w);
synonymAntonym.addAntonym(new Word(w, ""));
}
}
}
}
if(!synonymAntonym.getAntonym().isEmpty() || !synonymAntonym.getSynonym().isEmpty()) {
LOGGER.info("解析出同义词反义词:" + synonymAntonym);
}
}catch (Exception e){
LOGGER.error("解析同义词反义词出错", e);
}
return synonymAntonym;
}
public static void parseSynonymAntonym(List<String> words){
LOGGER.info("开始解析,词数:" + words.size());
Set<String> SKIP_WORDS = new ConcurrentSkipListSet<>();
try{
if(Files.notExists(CHECKED_WORDS_PATH)){
CHECKED_WORDS_PATH.toFile().createNewFile();
}
SKIP_WORDS.addAll(Files.readAllLines(CHECKED_WORDS_PATH));
}catch (Exception e){
LOGGER.error("读取文件失败", e);
}
int total = words.size()-SKIP_WORDS.size();
LOGGER.info("之前已经解析的词数:" + SKIP_WORDS.size());
LOGGER.info("现在还需解析的词数:" + total);
String url = "http://www.iciba.com/";
AtomicInteger i = new AtomicInteger();
EXECUTOR_SERVICE.submit(()->{
while(true){
try {
Thread.sleep(60000);
} catch (InterruptedException e) {
e.printStackTrace();
}
save();
}
});
words.parallelStream().forEach(word -> {
if (SKIP_WORDS.contains(word)) {
return;
}
LOGGER.info("进度:" + total + "/" + i.incrementAndGet() + " 来自线程:" + Thread.currentThread());
try {
word = word.trim();
if ("".equals(word) || isNotChineseChar(word) || word.length() < 2) {
return;
}
String html = getContent(url + word);
int times = 1;
while (StringUtils.isBlank(html) && times < 3) {
times++;
//使用新的IP地址
ProxyIp.toNewIp();
html = getContent(url + word);
}
if (StringUtils.isBlank(html)) {
LOGGER.error("获取页面失败:" + url + word);
return;
}
times = 1;
//LOGGER.debug("获取到的HTML:" +html);
while (html.contains("非常抱歉,来自您ip的请求异常频繁") && times < 3) {
times++;
//使用新的IP地址
ProxyIp.toNewIp();
html = getContent(url + word);
}
SynonymAntonym synonymAntonym = parseSynonymAntonym(html, word);
if (!synonymAntonym.getSynonym().isEmpty()) {
SYNONYM_MAP.put(synonymAntonym.getWord(), synonymAntonym.getSynonym());
}
if (!synonymAntonym.getAntonym().isEmpty()) {
StringBuilder str = new StringBuilder();
synonymAntonym.getAntonym().forEach(w -> str.append(w.getWord()).append(" "));
ANTONYM.put(word, str.toString().trim());
}
CHECKED_WORDS.add(word);
} catch (Exception e) {
LOGGER.error("错误:", e);
}
});
save();
filterSameRecord(CHINESE_SYNONYM);
filterSameRecord(CHINESE_ANTONYM);
}
private static synchronized void save(){
System.out.println("开始保存文件");
List<String> SYNONYM_LIST = null;
List<String> ANTONYM_LIST = null;
try {
if(Files.notExists(CHINESE_SYNONYM)){
CHINESE_SYNONYM.toFile().createNewFile();
}
if(Files.notExists(CHINESE_ANTONYM)){
CHINESE_ANTONYM.toFile().createNewFile();
}
System.out.println("同义词数:"+SYNONYM_MAP.size());
Set<String> SYNONYM_STR = new HashSet<>();
SYNONYM_MAP.keySet().forEach(k -> {
StringBuilder str = new StringBuilder();
str.append(k.getWord()).append(" ");
SYNONYM_MAP.get(k).stream().sorted().forEach(w -> {
str.append(w.getWord()).append(" ");
});
SYNONYM_STR.add(str.toString().trim());
});
List<String> existList = Files.readAllLines(CHINESE_SYNONYM);
SYNONYM_STR.addAll(existList);
SYNONYM_LIST = SYNONYM_STR.stream().sorted().collect(Collectors.toList());
System.out.println("总的同义词数:"+SYNONYM_LIST.size());
Files.write(CHINESE_SYNONYM, SYNONYM_LIST);
Set<String> set = ANTONYM.keySet().stream().sorted().map(k -> k + " " + ANTONYM.get(k)).collect(Collectors.toSet());
existList = Files.readAllLines(CHINESE_ANTONYM);
set.addAll(existList);
ANTONYM_LIST = set.stream().sorted().collect(Collectors.toList());
System.out.println("总的反义词数:"+ANTONYM_LIST.size());
Files.write(CHINESE_ANTONYM, ANTONYM_LIST);
existList = Files.readAllLines(CHECKED_WORDS_PATH);
CHECKED_WORDS.addAll(existList);
System.out.println("总的已检查词数:" + CHECKED_WORDS.size());
Files.write(CHECKED_WORDS_PATH, CHECKED_WORDS);
} catch (Exception e) {
LOGGER.error("同义词:",SYNONYM_LIST.toString());
LOGGER.error("反义词:",ANTONYM_LIST.toString());
LOGGER.error("保存文件失败", e);
}
}
public static String getContent(String url) {
LOGGER.debug("url:" + url);
Connection conn = Jsoup.connect(url)
.header("Accept", ACCEPT)
.header("Accept-Encoding", ENCODING)
.header("Accept-Language", LANGUAGE)
.header("Connection", CONNECTION)
.header("Referer", REFERER)
.header("Host", HOST)
.header("User-Agent", USER_AGENTS.get(uac.incrementAndGet() % USER_AGENTS.size()))
.header("X-Forwarded-For", getRandomIp())
.header("Proxy-Client-IP", getRandomIp())
.header("WL-Proxy-Client-IP", getRandomIp())
.ignoreContentType(true);
String html = "";
try {
html = conn.post().html();
}catch (Exception e){
if(e instanceof HttpStatusException) {
HttpStatusException ex = (HttpStatusException) e;
LOGGER.error("error code:"+ex.getStatusCode());
if(ex.getStatusCode()==404){
return "404";
}
}
LOGGER.error("获取URL:"+url+" 页面出错", e);
}
return html;
}
public static boolean isNotChineseChar(String str){
boolean temp = false;
Pattern p= Pattern.compile("[^\u4e00-\u9fa5]");
Matcher m=p.matcher(str);
if(m.find()){
temp = true;
}
return temp;
}
public static SynonymAntonym parseSynonymAntonym(String word){
try {
return parseSynonymAntonym(Jsoup.parse(new URL("http://www.iciba.com/" + word), 15000).html(), word);
}catch (Exception e){
LOGGER.error("解析同义词反义词出错", e);
}
return null;
}
public static String getRandomIp(){
int first = new Random().nextInt(254)+1;
//排除A类私有地址0.0.0.0--10.255.255.255
while(first==10){
first = new Random().nextInt(254)+1;
}
int second = new Random().nextInt(254)+1;
//排除B类私有地址172.16.0.0--172.31.255.255
while(first==172 && (second>=16 && second<=31)){
first = new Random().nextInt(254)+1;
second = new Random().nextInt(254)+1;
}
//排除C类私有地址192.168.0.0--192.168.255.255
while(first==192 && second==168){
first = new Random().nextInt(254)+1;
second = new Random().nextInt(254)+1;
}
int third = new Random().nextInt(254)+1;
int forth = new Random().nextInt(254)+1;
return first+"."+second+"."+second+"."+forth;
}
/**
* 去掉重复的记录,如:
* 一丘之貉 比众不同
* 比众不同 一丘之貉
* 只保留一条记录
* @param path
*/
private static void filterSameRecord(Path path){
try {
AtomicInteger i = new AtomicInteger();
Set<String> set = new HashSet<>();
List<String> list = Files.readAllLines(path).stream().filter(line -> {
String[] attr = line.split("\\s+");
String words = Arrays.asList(attr).stream().sorted().collect(Collectors.toList()).toString();
if (set.contains(words)) {
i.incrementAndGet();
LOGGER.info("去掉重复的记录:" + line);
return false;
}
set.add(words);
return true;
}).sorted().collect(Collectors.toList());
Files.write(path, list);
LOGGER.info("去掉重复的记录数:" + i.get());
}catch (Exception e){
LOGGER.error("去掉重复的记录出错", e);
}
}
public static void main(String[] args) throws Exception{
//parseSynonymAntonym("热爱");
//parseSynonymAntonym("一举成名");
//parseSynonymAntonym(Arrays.asList("热爱", "一举成名"));
//System.out.println(getContent("http://www.iciba.com/%E7%83%AD%E7%88%B1"));
parseSynonymAntonym(Files.readAllLines(Paths.get("src/main/resources/dic.txt")).stream().sorted((a, b) -> new Integer(a.length()).compareTo(b.length())).collect(Collectors.toList()));
}
}
| apache-2.0 |
sortable/framework | persistence/mongodb-record/src/test/scala/net/liftweb/mongodb/record/CustomSerializersSpec.scala | 12623 | /*
* Copyright 2010-2014 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mongodb
package record
import common._
import field._
import http.js.JE._
import http.{LiftSession, S}
import json.JsonAST._
import util.Helpers._
import java.util.{Calendar, Date}
import org.bson.types.ObjectId
import org.specs2.mutable.Specification
import net.liftweb.record.field._
import xml.{Elem, NodeSeq}
import util.Helpers
package customserializersspecs {
case class Child(name: String, birthdate: Date) extends JsonObject[Child] {
def meta = Child
}
object Child extends JsonObjectMeta[Child]
/*
* Date as String
*/
class Person extends MongoRecord[Person] with ObjectIdPk[Person] {
def meta = Person
object children extends MongoJsonObjectListField(this, Child)
object firstBorn extends JsonObjectField(this, Child) {
def defaultValue = Child("", now)
}
}
object Person extends Person with MongoMetaRecord[Person]
/*
* Date as Date
*/
class Person2 extends MongoRecord[Person2] with ObjectIdPk[Person2] {
def meta = Person2
object children extends MongoJsonObjectListField(this, Child)
object firstBorn extends JsonObjectField(this, Child) {
def defaultValue = Child("", now)
}
}
object Person2 extends Person2 with MongoMetaRecord[Person2] {
override def formats = allFormats
}
class Player extends MongoRecord[Player] with ObjectIdPk[Player] {
def meta = Player
object name extends StringField(this, 256)
}
object Player extends Player with MongoMetaRecord[Player]
/*
* ObjectId as String
*/
case class Team(id: String, name: String, qb: String) extends JsonObject[Team] {
def meta = Team
}
object Team extends JsonObjectMeta[Team]
class League extends MongoRecord[League] with ObjectIdPk[League] {
def meta = League
object teams extends MongoJsonObjectListField(this, Team)
object champion extends JsonObjectField(this, Team) {
def defaultValue = Team("", "", "")
}
}
object League extends League with MongoMetaRecord[League]
/*
* ObjectId as ObjectId
*/
case class Team2(id: ObjectId, name: String, qb: ObjectId) extends JsonObject[Team2] {
def meta = Team2
}
object Team2 extends JsonObjectMeta[Team2]
class League2 extends MongoRecord[League2] with ObjectIdPk[League2] {
def meta = League2
object teams extends MongoJsonObjectListField(this, Team2)
object champion extends JsonObjectField(this, Team2) {
def defaultValue = Team2(ObjectId.get, "", ObjectId.get)
}
}
object League2 extends League2 with MongoMetaRecord[League2] {
override def formats = super.formats + new ObjectIdSerializer
}
object WeekDay extends Enumeration {
type WeekDay = Value
val Mon, Tue, Wed, Thu, Fri, Sat, Sun = Value
}
class EnumRec extends MongoRecord[EnumRec] with ObjectIdPk[EnumRec] {
def meta = EnumRec
object dow extends EnumField(this, WeekDay)
}
object EnumRec extends EnumRec with MongoMetaRecord[EnumRec] {
override def collectionName = "enumrecs"
}
}
/**
* Systems under specification for CustomSerializers.
*/
object CustomSerializersSpec extends Specification with MongoTestKit {
"CustomSerializers Specification".title
import customserializersspecs._
"CustomSerializers" should {
"handle Date as String value in JsonObjects" in {
checkMongoIsRunning
// test data
val bdjack = Calendar.getInstance.setTimezone(utc)
bdjack.setTimeInMillis(1288742280000L)
val bdjill = Calendar.getInstance.setTimezone(utc)
bdjill.setTimeInMillis(1288742880000L)
val jack = Child("Jack", bdjack.getTime)
val jill = Child("Jill", bdjill.getTime)
// create and save a Person record
val mother = Person.createRecord
mother.children(List(jack, jill))
mother.firstBorn(jack)
mother.save()
// retrieve it and compare
val mother2 = Person.find(mother.id.get)
mother2.isDefined must_== true
mother2 foreach {
m =>
m.children.value mustEqual mother.children.value
m.firstBorn.value mustEqual mother.firstBorn.value
}
// check the conversion functions
/*
mother.children.asJs mustEqual JsArray(
JsObj(("name", Str("Jack")), ("birthdate", Str("2010-11-02T23:58:00.000Z"))),
JsObj(("name", Str("Jill")), ("birthdate", Str("2010-11-03T00:08:00.000Z")))
)*/
mother.children.asJValue mustEqual JArray(List(
JObject(List(
JField("name", JString("Jack")),
JField("birthdate", JString("2010-11-02T23:58:00.000Z"))
)),
JObject(List(
JField("name", JString("Jill")),
JField("birthdate", JString("2010-11-03T00:08:00.000Z"))))
))
mother.children.toForm must beEmpty
/*
mother.firstBorn.asJs mustEqual
JsObj(("name", Str("Jack")), ("birthdate", Str("2010-11-02T23:58:00.000Z")))
*/
mother.firstBorn.asJValue mustEqual
JObject(List(
JField("name", JString("Jack")),
JField("birthdate", JString("2010-11-02T23:58:00.000Z"))
))
mother.firstBorn.toForm must beEmpty
}
"handle Date as Date value in JsonObjects using DateSerializer" in {
checkMongoIsRunning
// test data
val bdjack = Calendar.getInstance.setTimezone(utc)
bdjack.setTimeInMillis(1288742280000L)
val bdjill = Calendar.getInstance.setTimezone(utc)
bdjill.setTimeInMillis(1288742880000L)
val jack = Child("Jack", bdjack.getTime)
val jill = Child("Jill", bdjill.getTime)
// create and save a Person record
val mother = Person2.createRecord
mother.children(List(jack, jill))
mother.firstBorn(jack)
mother.save()
// retrieve it and compare
val mother2 = Person2.find(mother.id.get)
mother2.isDefined must_== true
mother2 foreach {
m =>
m.children.value mustEqual mother.children.value
m.firstBorn.value mustEqual mother.firstBorn.value
}
// check the conversion functions
/*
mother.children.asJs mustEqual JsArray(
JsObj(("name", Str("Jack")), ("birthdate", JsObj(("$dt", Str("2010-11-02T23:58:00.000Z"))))),
JsObj(("name", Str("Jill")), ("birthdate", JsObj(("$dt", Str("2010-11-03T00:08:00.000Z")))))
)*/
mother.children.asJValue mustEqual JArray(List(
JObject(List(
JField("name", JString("Jack")),
JField("birthdate", JObject(List(JField("$dt", JString("2010-11-02T23:58:00.000Z")))))
)),
JObject(List(
JField("name", JString("Jill")),
JField("birthdate", JObject(List(JField("$dt", JString("2010-11-03T00:08:00.000Z")))))
))
))
mother.children.toForm must beEmpty
/*
mother.firstBorn.asJs mustEqual
JsObj(("name", Str("Jack")), ("birthdate", JsObj(("$dt", Str("2010-11-02T23:58:00.000Z")))))
*/
mother.firstBorn.asJValue mustEqual
JObject(List(
JField("name", JString("Jack")),
JField("birthdate", JObject(List(JField("$dt", JString("2010-11-02T23:58:00.000Z")))))
))
mother.firstBorn.toForm must beEmpty
}
"handle ObjectId as String value in JsonObjects" in {
checkMongoIsRunning
// test data
val rmoss = Player.createRecord.name("Randy Moss").save()
val bfavre = Player.createRecord.name("Brett Favre").save()
val vikes = Team(ObjectId.get.toString, "Vikings", bfavre.id.toString)
val jets = Team(ObjectId.get.toString, "Jets", "")
val saints = Team(ObjectId.get.toString, "Saints", "")
// create and save a League record
val nfl = League.createRecord
nfl.teams(List(vikes, jets, saints))
nfl.champion(saints)
nfl.save()
// retrieve it and compare
val nfl2 = League.find(nfl.id.get)
nfl2.isDefined must_== true
nfl2 foreach {
l =>
l.teams.value mustEqual nfl.teams.value
l.champion.value mustEqual nfl.champion.value
}
// find a player
val vqb = Player.find(vikes.qb)
vqb.isDefined must_== true
vqb foreach {
p =>
p.name.value mustEqual "Brett Favre"
}
// check the conversion functions
nfl.id.asJs mustEqual Str(nfl.id.value.toString)
nfl.id.asJValue mustEqual JString(nfl.id.value.toString)
val session = new LiftSession("", randomString(20), Empty)
val formPattern = <input name=".*" type="text" tabindex="1" value={nfl.id.value.toString} id="_id_id"></input>
S.initIfUninitted(session) {
val form = nfl.id.toForm
form.isDefined must_== true
form foreach {
fprime =>
val f = ("* [name]" #> ".*" & "select *" #> (((ns: NodeSeq) => ns.filter {
case e: Elem => e.attribute("selected").map(_.text) == Some("selected")
case _ => false
}) andThen "* [value]" #> ".*"))(fprime)
val ret: Boolean = Helpers.compareXml(f, formPattern)
ret must_== true
}
}
// check the setFrom* functions
val nflid = ObjectId.get
nfl.id.setFromString(nflid.toString)
nfl.id.value mustEqual nflid
nfl.id.setFromString("garbage")
nfl.id.valueBox mustEqual Failure("Invalid ObjectId string: garbage")
nfl.id.setFromJValue(JString(nflid.toString))
nfl.id.value mustEqual nflid
nfl.id.setFromAny(nflid)
nfl.id.value mustEqual nflid
nfl.id.setFromAny(nflid.toString)
nfl.id.value mustEqual nflid
}
"handle ObjectId as ObjectId values in JsonObjects using ObjectIdSerializer" in {
checkMongoIsRunning
// test data
val rmoss = Player.createRecord.name("Randy Moss").save()
val bfavre = Player.createRecord.name("Brett Favre").save()
val vikes = Team2(ObjectId.get, "Vikings", bfavre.id.get)
val jets = Team2(ObjectId.get, "Jets", bfavre.id.get)
val saints = Team2(ObjectId.get, "Saints", bfavre.id.get)
// create and save a League record
val nfl = League2.createRecord
nfl.teams(List(vikes, jets, saints))
nfl.champion(saints)
nfl.save()
// retrieve it and compare
val nfl2 = League2.find(nfl.id.toString)
nfl2.isDefined must_== true
nfl2 foreach {
l =>
l.teams.value mustEqual nfl.teams.value
l.champion.value mustEqual nfl.champion.value
}
// find a player
val vqb = Player.find(vikes.qb)
vqb.isDefined must_== true
vqb foreach {
p =>
p.name.value mustEqual "Brett Favre"
}
// check the conversion functions
nfl.id.asJs.toJsCmd mustEqual """{"$oid":"%s"}""".format(nfl.id.value.toString)
nfl.id.asJValue mustEqual JObject(List(JField("$oid", JString(nfl.id.value.toString))))
val session = new LiftSession("", randomString(20), Empty)
val formPattern = <input name=".*" type="text" tabindex="1" value={nfl.id.value.toString} id="_id_id"></input>
S.initIfUninitted(session) {
val form = nfl.id.toForm
form.isDefined must_== true
form foreach {
fprime =>
val f = ("* [name]" #> ".*" & "select *" #> (((ns: NodeSeq) => ns.filter {
case e: Elem => e.attribute("selected").map(_.text) == Some("selected")
case _ => false
}) andThen "* [value]" #> ".*"))(fprime)
val ret: Boolean = Helpers.compareXml(f, formPattern)
ret must_== true
}
}
// check the setFrom* functions
val nflid = ObjectId.get
nfl.id.setFromString(nflid.toString)
nfl.id.value mustEqual nflid
nfl.id.setFromString("garbage")
nfl.id.valueBox mustEqual Failure("Invalid ObjectId string: garbage")
nfl.id.setFromJValue(JObject(List(JField("$oid", JString(nflid.toString)))))
nfl.id.value mustEqual nflid
nfl.id.setFromAny(nflid)
nfl.id.value mustEqual nflid
nfl.id.setFromAny(nflid.toString)
nfl.id.value mustEqual nflid
}
}
}
| apache-2.0 |
zhujzhuo/Sahara | sahara/plugins/cdh/v5/config_helper.py | 9673 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from sahara.plugins import provisioning as p
from sahara.utils import files as f
CDH5_UBUNTU_REPO = ('deb [arch=amd64] http://archive.cloudera.com/cdh5'
'/ubuntu/precise/amd64/cdh precise-cdh5.0.0 contrib'
'\ndeb-src http://archive.cloudera.com/cdh5/ubuntu'
'/precise/amd64/cdh precise-cdh5.0.0 contrib')
DEFAULT_CDH5_UBUNTU_REPO_KEY_URL = ('http://archive.cloudera.com/cdh5/ubuntu'
'/precise/amd64/cdh/archive.key')
CM5_UBUNTU_REPO = ('deb [arch=amd64] http://archive.cloudera.com/cm5'
'/ubuntu/precise/amd64/cm precise-cm5.0.0 contrib'
'\ndeb-src http://archive.cloudera.com/cm5/ubuntu'
'/precise/amd64/cm precise-cm5.0.0 contrib')
DEFAULT_CM5_UBUNTU_REPO_KEY_URL = ('http://archive.cloudera.com/cm5/ubuntu'
'/precise/amd64/cm/archive.key')
CDH5_CENTOS_REPO = ('[cloudera-cdh5]'
'\nname=Cloudera\'s Distribution for Hadoop, Version 5'
'\nbaseurl=http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/5.0.0/'
'\ngpgkey = http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
CM5_CENTOS_REPO = ('[cloudera-manager]'
'\nname=Cloudera Manager'
'\nbaseurl=http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/5.0.0/'
'\ngpgkey = http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
DEFAULT_SWIFT_LIB_URL = ('https://repository.cloudera.com/artifactory/repo/org'
'/apache/hadoop/hadoop-openstack/2.3.0-cdh5.0.0'
'/hadoop-openstack-2.3.0-cdh5.0.0.jar')
DEFAULT_EXTJS_LIB_URL = 'http://extjs.com/deploy/ext-2.2.zip'
CDH5_REPO_URL = p.Config(
'CDH5 repo list URL', 'general', 'cluster', priority=1,
default_value="")
CDH5_REPO_KEY_URL = p.Config(
'CDH5 repo key URL (for debian-based only)', 'general', 'cluster',
priority=1, default_value="")
CM5_REPO_URL = p.Config(
'CM5 repo list URL', 'general', 'cluster', priority=1,
default_value="")
CM5_REPO_KEY_URL = p.Config(
'CM5 repo key URL (for debian-based only)', 'general', 'cluster',
priority=1, default_value="")
ENABLE_SWIFT = p.Config('Enable Swift', 'general', 'cluster',
config_type='bool', priority=1,
default_value=True)
ENABLE_HBASE_COMMON_LIB = p.Config('Enable HBase Common Lib',
'general', 'cluster', config_type='bool',
priority=1, default_value=True)
SWIFT_LIB_URL = p.Config(
'Hadoop OpenStack library URL', 'general', 'cluster', priority=1,
default_value=DEFAULT_SWIFT_LIB_URL,
description=("Library that adds Swift support to CDH. The file will be "
"downloaded from VM."))
EXTJS_LIB_URL = p.Config(
"ExtJS library URL", 'general', 'cluster', priority=1,
default_value=DEFAULT_EXTJS_LIB_URL,
description=("Ext 2.2 library is required for Oozie Web Console. "
"The file will be downloaded from VM with oozie."))
AWAIT_AGENTS_TIMEOUT = p.Config(
'Await Cloudera agents timeout', 'general', 'cluster', config_type='int',
priority=1, default_value=300, is_optional=True,
description="Timeout for Cloudera agents connecting to Coudera Manager, "
"in seconds")
AWAIT_MANAGER_STARTING_TIMEOUT = p.Config(
'Timeout for Cloudera Manager starting', 'general', 'cluster',
config_type='int', priority=1, default_value=300, is_optional=True,
description='Timeout for Cloudera Manager starting, in seconds')
def _get_cluster_plugin_configs():
return [CDH5_REPO_URL, CDH5_REPO_KEY_URL, CM5_REPO_URL, CM5_REPO_KEY_URL,
ENABLE_SWIFT, ENABLE_HBASE_COMMON_LIB, SWIFT_LIB_URL,
EXTJS_LIB_URL, AWAIT_MANAGER_STARTING_TIMEOUT,
AWAIT_AGENTS_TIMEOUT]
# ng wide configs
def _load_json(path_to_file):
data = f.get_file_text(path_to_file)
return json.loads(data)
path_to_config = 'plugins/cdh/v5/resources/'
hdfs_confs = _load_json(path_to_config + 'hdfs-service.json')
namenode_confs = _load_json(path_to_config + 'hdfs-namenode.json')
datanode_confs = _load_json(path_to_config + 'hdfs-datanode.json')
secnamenode_confs = _load_json(path_to_config + 'hdfs-secondarynamenode.json')
yarn_confs = _load_json(path_to_config + 'yarn-service.json')
resourcemanager_confs = _load_json(
path_to_config + 'yarn-resourcemanager.json')
nodemanager_confs = _load_json(path_to_config + 'yarn-nodemanager.json')
jobhistory_confs = _load_json(path_to_config + 'yarn-jobhistory.json')
oozie_service_confs = _load_json(path_to_config + 'oozie-service.json')
oozie_role_confs = _load_json(path_to_config + 'oozie-oozie.json')
hive_service_confs = _load_json(path_to_config + 'hive-service.json')
hive_metastore_confs = _load_json(path_to_config + 'hive-metastore.json')
hive_hiveserver_confs = _load_json(path_to_config + 'hive-hiveserver2.json')
hive_webhcat_confs = _load_json(path_to_config + 'hive-webhcat.json')
hue_service_confs = _load_json(path_to_config + 'hue-service.json')
hue_role_confs = _load_json(path_to_config + 'hue-hue.json')
spark_service_confs = _load_json(path_to_config + 'spark-service.json')
spark_role_confs = _load_json(path_to_config + 'spark-history.json')
zookeeper_server_confs = _load_json(path_to_config + 'zookeeper-server.json')
zookeeper_service_confs = _load_json(path_to_config + 'zookeeper-service.json')
hbase_confs = _load_json(path_to_config + 'hbase-service.json')
master_confs = _load_json(path_to_config + 'hbase-master.json')
regionserver_confs = _load_json(path_to_config + 'hbase-regionserver.json')
priority_one_confs = _load_json(path_to_config + 'priority-one-confs.json')
def _prepare_value(value):
if not value:
return ""
return value.replace('\n', ' ')
def _init_configs(confs, app_target, scope):
cfgs = []
for cfg in confs:
priority = 1 if cfg['name'] in priority_one_confs else 2
c = p.Config(cfg['name'], app_target, scope, priority=priority,
default_value=_prepare_value(cfg['value']),
description=cfg['desc'], is_optional=True)
cfgs.append(c)
return cfgs
def _get_ng_plugin_configs():
cfg = []
cfg += _init_configs(hdfs_confs, 'HDFS', 'cluster')
cfg += _init_configs(namenode_confs, 'NAMENODE', 'node')
cfg += _init_configs(datanode_confs, 'DATANODE', 'node')
cfg += _init_configs(secnamenode_confs, 'SECONDARYNAMENODE', 'node')
cfg += _init_configs(yarn_confs, 'YARN', 'cluster')
cfg += _init_configs(resourcemanager_confs, 'RESOURCEMANAGER', 'node')
cfg += _init_configs(nodemanager_confs, 'NODEMANAGER', 'node')
cfg += _init_configs(jobhistory_confs, 'JOBHISTORY', 'node')
cfg += _init_configs(oozie_service_confs, 'OOZIE', 'cluster')
cfg += _init_configs(oozie_role_confs, 'OOZIE', 'node')
cfg += _init_configs(hive_service_confs, 'HIVE', 'cluster')
cfg += _init_configs(hive_metastore_confs, 'HIVEMETASTORE', 'node')
cfg += _init_configs(hive_hiveserver_confs, 'HIVESERVER', 'node')
cfg += _init_configs(hive_webhcat_confs, 'WEBHCAT', 'node')
cfg += _init_configs(hue_service_confs, 'HUE', 'cluster')
cfg += _init_configs(hue_role_confs, 'HUE', 'node')
cfg += _init_configs(spark_service_confs, 'SPARK_ON_YARN', 'cluster')
cfg += _init_configs(spark_role_confs, 'SPARK_ON_YARN', 'node')
cfg += _init_configs(zookeeper_service_confs, 'ZOOKEEPER', 'cluster')
cfg += _init_configs(zookeeper_server_confs, 'ZOOKEEPER', 'node')
cfg += _init_configs(hbase_confs, 'HBASE', 'cluster')
cfg += _init_configs(master_confs, 'MASTER', 'node')
cfg += _init_configs(regionserver_confs, 'REGIONSERVER', 'node')
return cfg
def get_plugin_configs():
cluster_wide = _get_cluster_plugin_configs()
ng_wide = _get_ng_plugin_configs()
return cluster_wide + ng_wide
def _get_config_value(cluster, key):
return cluster.cluster_configs.get(
'general', {}).get(key.name, key.default_value)
def get_cdh5_repo_url(cluster):
return _get_config_value(cluster, CDH5_REPO_URL)
def get_cdh5_key_url(cluster):
return _get_config_value(cluster, CDH5_REPO_KEY_URL)
def get_cm5_repo_url(cluster):
return _get_config_value(cluster, CM5_REPO_URL)
def get_cm5_key_url(cluster):
return _get_config_value(cluster, CM5_REPO_KEY_URL)
def is_swift_enabled(cluster):
return _get_config_value(cluster, ENABLE_SWIFT)
def is_hbase_common_lib_enabled(cluster):
return _get_config_value(cluster, ENABLE_HBASE_COMMON_LIB)
def get_swift_lib_url(cluster):
return _get_config_value(cluster, SWIFT_LIB_URL)
def get_extjs_lib_url(cluster):
return _get_config_value(cluster, EXTJS_LIB_URL)
| apache-2.0 |
amirakhmedov/ignite | modules/core/src/main/java/org/apache/ignite/IgniteCache.java | 72761 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite;
import java.io.Serializable;
import java.sql.Timestamp;
import java.util.Collection;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import javax.cache.Cache;
import javax.cache.CacheException;
import javax.cache.configuration.Configuration;
import javax.cache.event.CacheEntryRemovedListener;
import javax.cache.expiry.ExpiryPolicy;
import javax.cache.integration.CacheLoader;
import javax.cache.integration.CacheWriter;
import javax.cache.processor.EntryProcessor;
import javax.cache.processor.EntryProcessorException;
import javax.cache.processor.EntryProcessorResult;
import org.apache.ignite.cache.CacheAtomicityMode;
import org.apache.ignite.cache.CacheEntry;
import org.apache.ignite.cache.CacheEntryProcessor;
import org.apache.ignite.cache.CacheMetrics;
import org.apache.ignite.cache.CacheMode;
import org.apache.ignite.cache.CachePeekMode;
import org.apache.ignite.cache.query.FieldsQueryCursor;
import org.apache.ignite.cache.query.Query;
import org.apache.ignite.cache.query.QueryCursor;
import org.apache.ignite.cache.query.QueryDetailMetrics;
import org.apache.ignite.cache.query.QueryMetrics;
import org.apache.ignite.cache.query.ScanQuery;
import org.apache.ignite.cache.query.SpiQuery;
import org.apache.ignite.cache.query.SqlFieldsQuery;
import org.apache.ignite.cache.query.SqlQuery;
import org.apache.ignite.cache.query.TextQuery;
import org.apache.ignite.cache.store.CacheStore;
import org.apache.ignite.cluster.ClusterGroup;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.configuration.IgniteConfiguration;
import org.apache.ignite.lang.IgniteAsyncSupport;
import org.apache.ignite.lang.IgniteAsyncSupported;
import org.apache.ignite.lang.IgniteBiInClosure;
import org.apache.ignite.lang.IgniteBiPredicate;
import org.apache.ignite.lang.IgniteClosure;
import org.apache.ignite.lang.IgniteFuture;
import org.apache.ignite.mxbean.CacheMetricsMXBean;
import org.apache.ignite.transactions.TransactionException;
import org.apache.ignite.transactions.TransactionHeuristicException;
import org.apache.ignite.transactions.TransactionRollbackException;
import org.apache.ignite.transactions.TransactionTimeoutException;
import org.jetbrains.annotations.Nullable;
/**
* Main entry point for all <b>Data Grid APIs.</b> You can get a named cache by calling {@link Ignite#cache(String)}
* method.
* <h1 class="header">Functionality</h1>
* This API extends {@link javax.cache.Cache} API which contains {@code JCache (JSR107)} cache functionality
* and documentation. In addition to {@link javax.cache.Cache} functionality this API provides:
* <ul>
* <li>Ability to perform basic atomic Map-like operations available on {@code JCache} API.</li>
* <li>Ability to bulk load cache via {@link #loadCache(IgniteBiPredicate, Object...)} method.
* <li>Distributed lock functionality via {@link #lock(Object)} methods.</li>
* <li>Ability to query cache using Predicate, SQL, and Text queries via {@link #query(Query)} method.</li>
* <li>Ability to collect cache and query metrics.</li>
* <li>Ability to force partition rebalancing via {@link #rebalance()} methopd
* (in case if delayed rebalancing was configured.)</li>
* <li>Ability to peek into memory without doing actual {@code get(...)} from cache
* via {@link #localPeek(Object, CachePeekMode...)} methods</li>
* <li>Ability to evict and promote entries from on-heap to off-heap or swap and back.</li>
* <li>Ability to atomically collocate compute and data via {@link #invoke(Object, CacheEntryProcessor, Object...)}
* methods.</li>
* </ul>
* <h1 class="header">Transactions</h1>
* Cache API supports transactions. You can group and set of cache methods within a transaction
* to provide ACID-compliant behavior. See {@link IgniteTransactions} for more information.
* <br>
* Methods which can be used inside transaction (put, get...) throw TransactionException.
* See {@link TransactionException} for more information.
*
* @param <K> Cache key type.
* @param <V> Cache value type.
*/
public interface IgniteCache<K, V> extends javax.cache.Cache<K, V>, IgniteAsyncSupport {
/** {@inheritDoc} */
@Deprecated
@Override public IgniteCache<K, V> withAsync();
/** {@inheritDoc} */
@Override public <C extends Configuration<K, V>> C getConfiguration(Class<C> clazz);
/**
* Returns cache with the specified expired policy set. This policy will be used for each operation
* invoked on the returned cache.
* <p>
* This method does not modify existing cache instance.
*
* @param plc Expire policy to use.
* @return Cache instance with the specified expiry policy set.
*/
public IgniteCache<K, V> withExpiryPolicy(ExpiryPolicy plc);
/**
* @return Cache with read-through write-through behavior disabled.
*/
public IgniteCache<K, V> withSkipStore();
/**
* @return Cache with no-retries behavior enabled.
*/
public IgniteCache<K, V> withNoRetries();
/**
* Gets an instance of {@code IgniteCache} that will be allowed to execute cache operations (read, write)
* regardless of partition loss policy.
*
* @return Cache without partition loss protection.
*/
public IgniteCache<K, V> withPartitionRecover();
/**
* Returns cache that will operate with binary objects.
* <p>
* Cache returned by this method will not be forced to deserialize binary objects,
* so keys and values will be returned from cache API methods without changes. Therefore,
* signature of the cache can contain only following types:
* <ul>
* <li><code>org.apache.ignite.binary.BinaryObject</code> for binary classes</li>
* <li>All primitives (byte, int, ...) and there boxed versions (Byte, Integer, ...)</li>
* <li>Arrays of primitives (byte[], int[], ...)</li>
* <li>{@link String} and array of {@link String}s</li>
* <li>{@link UUID} and array of {@link UUID}s</li>
* <li>{@link Date} and array of {@link Date}s</li>
* <li>{@link Timestamp} and array of {@link Timestamp}s</li>
* <li>Enums and array of enums</li>
* <li>
* Maps, collections and array of objects (but objects inside
* them will still be converted if they are binary)
* </li>
* </ul>
* <p>
* For example, if you use {@link Integer} as a key and {@code Value} class as a value
* (which will be stored in binary format), you should acquire following projection
* to avoid deserialization:
* <pre>
* IgniteCache<Integer, BinaryObject> prj = cache.withKeepBinary();
*
* // Value is not deserialized and returned in binary format.
* BinaryObject po = prj.get(1);
* </pre>
* <p>
* Note that this method makes sense only if cache is working in binary mode
* if default marshaller is used.
* If not, this method is no-op and will return current cache.
*
* @return New cache instance for binary objects.
*/
public <K1, V1> IgniteCache<K1, V1> withKeepBinary();
/**
* By default atomic operations are allowed in transaction.
* To restrict transactions from operations with atomic caches you can set system property
* {@link IgniteSystemProperties#IGNITE_ALLOW_ATOMIC_OPS_IN_TX IGNITE_ALLOW_ATOMIC_OPS_IN_TX} to {@code false}.
* <p>
* If you want to use atomic operations inside transactions in case they are restricted by system property,
* you should allow it before transaction start.
*
* @return Cache with atomic operations allowed in transactions.
*/
public <K1, V1> IgniteCache<K1, V1> withAllowAtomicOpsInTx();
/**
* Executes {@link #localLoadCache(IgniteBiPredicate, Object...)} on all cache nodes.
*
* @param p Optional predicate (may be {@code null}). If provided, will be used to
* filter values loaded from storage before they are put into cache.
* @param args Optional user arguments to be passed into
* {@link CacheStore#loadCache(IgniteBiInClosure, Object...)} method.
* @throws CacheException If loading failed.
*/
@IgniteAsyncSupported
public void loadCache(@Nullable IgniteBiPredicate<K, V> p, @Nullable Object... args) throws CacheException;
/**
* Asynchronously executes {@link #localLoadCache(IgniteBiPredicate, Object...)} on all cache nodes.
*
* @param p Optional predicate (may be {@code null}). If provided, will be used to
* filter values loaded from storage before they are put into cache.
* @param args Optional user arguments to be passed into
* {@link CacheStore#loadCache(IgniteBiInClosure, Object...)} method.
* @return a Future representing pending completion of the cache loading.
* @throws CacheException If loading failed.
*/
public IgniteFuture<Void> loadCacheAsync(@Nullable IgniteBiPredicate<K, V> p, @Nullable Object... args)
throws CacheException;
/**
* Delegates to {@link CacheStore#loadCache(IgniteBiInClosure,Object...)} method
* to load state from the underlying persistent storage. The loaded values
* will then be given to the optionally passed in predicate, and, if the predicate returns
* {@code true}, will be stored in cache. If predicate is {@code null}, then
* all loaded values will be stored in cache.
* <p>
* Note that this method does not receive keys as a parameter, so it is up to
* {@link CacheStore} implementation to provide all the data to be loaded.
* <p>
* This method is not transactional and may end up loading a stale value into
* cache if another thread has updated the value immediately after it has been
* loaded. It is mostly useful when pre-loading the cache from underlying
* data store before start, or for read-only caches.
*
* @param p Optional predicate (may be {@code null}). If provided, will be used to
* filter values to be put into cache.
* @param args Optional user arguments to be passed into
* {@link CacheStore#loadCache(IgniteBiInClosure, Object...)} method.
* @throws CacheException If loading failed.
*/
@IgniteAsyncSupported
public void localLoadCache(@Nullable IgniteBiPredicate<K, V> p, @Nullable Object... args) throws CacheException;
/**
* Asynchronously loads state from the underlying persistent storage by delegating
* to {@link CacheStore#loadCache(IgniteBiInClosure,Object...)} method. The loaded values
* will then be given to the optionally passed in predicate, and, if the predicate returns
* {@code true}, will be stored in cache. If predicate is {@code null}, then
* all loaded values will be stored in cache.
* <p>
* Note that this method does not receive keys as a parameter, so it is up to
* {@link CacheStore} implementation to provide all the data to be loaded.
* <p>
* This method is not transactional and may end up loading a stale value into
* cache if another thread has updated the value immediately after it has been
* loaded. It is mostly useful when pre-loading the cache from underlying
* data store before start, or for read-only caches.
*
* @param p Optional predicate (may be {@code null}). If provided, will be used to
* filter values to be put into cache.
* @param args Optional user arguments to be passed into
* {@link CacheStore#loadCache(IgniteBiInClosure, Object...)} method.
* @return a Future representing pending completion of the cache loading.
* @throws CacheException If loading failed.
*/
public IgniteFuture<Void> localLoadCacheAsync(@Nullable IgniteBiPredicate<K, V> p, @Nullable Object... args)
throws CacheException;
/**
* Stores given key-value pair in cache only if cache had no previous mapping for it. If cache
* previously contained value for the given key, then this value is returned.
* In case of {@link CacheMode#PARTITIONED} or {@link CacheMode#REPLICATED} caches,
* the value will be loaded from the primary node, which in its turn may load the value
* from the swap storage, and consecutively, if it's not in swap,
* from the underlying persistent storage. If value has to be loaded from persistent
* storage, {@link CacheStore#load(Object)} method will be used.
* <p>
* If the returned value is not needed, method {@link #putIfAbsent(Object, Object)} should
* always be used instead of this one to avoid the overhead associated with returning of the
* previous value.
* <p>
* If write-through is enabled, the stored value will be persisted to {@link CacheStore}
* via {@link CacheStore#write(javax.cache.Cache.Entry)} method.
* <h2 class="header">Transactions</h2>
* This method is transactional and will enlist the entry into ongoing transaction
* if there is one.
*
* @param key Key to store in cache.
* @param val Value to be associated with the given key.
* @return Previously contained value regardless of whether put happened or not ({@code null} if there was no
* previous value).
* @throws NullPointerException If either key or value are {@code null}.
* @throws CacheException If put operation failed.
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
public V getAndPutIfAbsent(K key, V val) throws CacheException, TransactionException;
/**
* Asynchronously stores given key-value pair in cache only if cache had no previous mapping for it. If cache
* previously contained value for the given key, then this value is returned.
* In case of {@link CacheMode#PARTITIONED} or {@link CacheMode#REPLICATED} caches,
* the value will be loaded from the primary node, which in its turn may load the value
* from the swap storage, and consecutively, if it's not in swap,
* from the underlying persistent storage. If value has to be loaded from persistent
* storage, {@link CacheStore#load(Object)} method will be used.
* <p>
* If the returned value is not needed, method {@link #putIfAbsentAsync(Object, Object)} should
* always be used instead of this one to avoid the overhead associated with returning of the
* previous value.
* <p>
* If write-through is enabled, the stored value will be persisted to {@link CacheStore}
* via {@link CacheStore#write(javax.cache.Cache.Entry)} method.
* <h2 class="header">Transactions</h2>
* This method is transactional and will enlist the entry into ongoing transaction
* if there is one.
*
* @param key Key to store in cache.
* @param val Value to be associated with the given key.
* @return a Future representing pending completion of the operation.
* @throws NullPointerException If either key or value are {@code null}.
* @throws CacheException If put operation failed.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<V> getAndPutIfAbsentAsync(K key, V val) throws CacheException, TransactionException;
/**
* Creates a {@link Lock} instance associated with passed key.
* This method does not acquire lock immediately, you have to call appropriate method on returned instance.
* Returned lock does not support {@link Lock#newCondition()} method,
* other methods defined in {@link Lock} are supported.
*
* @param key Key for lock.
* @return New lock instance associated with passed key.
* @see Lock#lock()
* @see Lock#tryLock(long, TimeUnit)
*/
public Lock lock(K key);
/**
* Creates a {@link Lock} instance associated with passed keys.
* This method does not acquire lock immediately, you have to call appropriate method on returned instance.
* Returned lock does not support {@link Lock#newCondition()} method,
* other methods defined in {@link Lock} are supported.
*
* @param keys Keys for lock.
* @return New lock instance associated with passed key.
* @see Lock#lock()
* @see Lock#tryLock(long, TimeUnit)
*/
public Lock lockAll(Collection<? extends K> keys);
/**
* Checks if specified key is locked.
* <p>
* This is a local in-VM operation and does not involve any network trips
* or access to persistent storage in any way.
*
* @param key Key to check.
* @param byCurrThread If {@code true} method will check that current thread owns a lock on this key, other vise
* will check that any thread on any node owns a lock on this key.
* @return {@code True} if lock is owned by some node.
*/
public boolean isLocalLocked(K key, boolean byCurrThread);
/**
* Queries cache. Accepts any subclass of {@link Query} interface.
* See also {@link #query(SqlFieldsQuery)}.
*
* @param qry Query.
* @return Cursor.
* @see ScanQuery
* @see SqlQuery
* @see SqlFieldsQuery
* @see TextQuery
* @see SpiQuery
*
*/
public <R> QueryCursor<R> query(Query<R> qry);
/**
* Queries cache. Accepts {@link SqlFieldsQuery} class.
*
* @param qry SqlFieldsQuery.
* @return Cursor.
* @see SqlFieldsQuery
*/
public FieldsQueryCursor<List<?>> query(SqlFieldsQuery qry);
/**
* Queries the cache transforming the entries on the server nodes. Can be used, for example,
* to avoid network overhead in case only one field out of the large is required by client.
* <p>
* Currently transformers are supported ONLY for {@link ScanQuery}. Passing any other
* subclass of {@link Query} interface to this method will end up with
* {@link UnsupportedOperationException}.
*
* @param qry Query.
* @param transformer Transformer.
* @return Cursor.
*/
public <T, R> QueryCursor<R> query(Query<T> qry, IgniteClosure<T, R> transformer);
/**
* Allows for iteration over local cache entries.
*
* @param peekModes Peek modes.
* @return Iterable over local cache entries.
* @throws CacheException If failed.
*/
public Iterable<Entry<K, V>> localEntries(CachePeekMode... peekModes) throws CacheException;
/**
* Gets query metrics.
*
* @return Metrics.
*/
public QueryMetrics queryMetrics();
/**
* Reset query metrics.
*/
public void resetQueryMetrics();
/**
* Gets query detail metrics.
* Query detail metrics could be enabled via {@link CacheConfiguration#setQueryDetailMetricsSize(int)} method.
*
* @return Metrics.
*/
public Collection<? extends QueryDetailMetrics> queryDetailMetrics();
/**
* Reset query detail metrics.
*/
public void resetQueryDetailMetrics();
/**
* Attempts to evict all entries associated with keys. Note,
* that entry will be evicted only if it's not used (not
* participating in any locks or transactions).
*
* @param keys Keys to evict.
*/
public void localEvict(Collection<? extends K> keys);
/**
* Peeks at in-memory cached value using default optional peek mode.
* <p>
* This method will not load value from any persistent store or from a remote node.
* <h2 class="header">Transactions</h2>
* This method does not participate in any transactions.
*
* @param key Entry key.
* @param peekModes Peek modes.
* @return Peeked value, or {@code null} if not found.
* @throws NullPointerException If key is {@code null}.
*/
public V localPeek(K key, CachePeekMode... peekModes);
/**
* Gets the number of all entries cached across all nodes. By default, if {@code peekModes} value isn't defined,
* only size of primary copies across all nodes will be returned. This behavior is identical to calling
* this method with {@link CachePeekMode#PRIMARY} peek mode.
* <p>
* NOTE: this operation is distributed and will query all participating nodes for their cache sizes.
*
* @param peekModes Optional peek modes. If not provided, then total cache size is returned.
* @return Cache size across all nodes.
* @throws CacheException On error.
*/
@IgniteAsyncSupported
public int size(CachePeekMode... peekModes) throws CacheException;
/**
* Asynchronously gets the number of all entries cached across all nodes. By default,
* if {@code peekModes} value isn't defined, only size of primary copies across all nodes will be returned.
* This behavior is identical to calling this method with {@link CachePeekMode#PRIMARY} peek mode.
* <p>
* NOTE: this operation is distributed and will query all participating nodes for their cache sizes.
*
* @param peekModes Optional peek modes. If not provided, then total cache size is returned.
* @return a Future representing pending completion of the operation.
* @throws CacheException On error.
*/
public IgniteFuture<Integer> sizeAsync(CachePeekMode... peekModes) throws CacheException;
/**
* Gets the number of all entries cached across all nodes as a long value. By default, if {@code peekModes} value
* isn't defined, only size of primary copies across all nodes will be returned. This behavior is identical to
* calling this method with {@link CachePeekMode#PRIMARY} peek mode.
* <p>
* NOTE: this operation is distributed and will query all participating nodes for their cache sizes.
*
* @param peekModes Optional peek modes. If not provided, then total cache size is returned.
* @return Cache size across all nodes.
* @throws CacheException On error.
*/
@IgniteAsyncSupported
public long sizeLong(CachePeekMode... peekModes) throws CacheException;
/**
* Asynchronously gets the number of all entries cached across all nodes as a long value. By default,
* if {@code peekModes} value isn't defined, only size of primary copies across all nodes will be returned.
* This behavior is identical to calling this method with {@link CachePeekMode#PRIMARY} peek mode.
* <p>
* NOTE: this operation is distributed and will query all participating nodes for their cache sizes.
*
* @param peekModes Optional peek modes. If not provided, then total cache size is returned.
* @return a Future representing pending completion of the operation.
* @throws CacheException On error.
*/
public IgniteFuture<Long> sizeLongAsync(CachePeekMode... peekModes) throws CacheException;
/**
* Gets the number of all entries cached in a partition as a long value. By default, if {@code peekModes} value
* isn't defined, only size of primary copies across all nodes will be returned. This behavior is identical to
* calling this method with {@link CachePeekMode#PRIMARY} peek mode.
* <p>
* NOTE: this operation is distributed and will query all participating nodes for their partition cache sizes.
*
* @param partition partition.
* @param peekModes Optional peek modes. If not provided, then total partition cache size is returned.
* @return Partition cache size across all nodes.
* @throws CacheException On error.
*/
@IgniteAsyncSupported
public long sizeLong(int partition, CachePeekMode... peekModes) throws CacheException;
/**
* Asynchronously gets the number of all entries cached in a partition as a long value. By default, if {@code peekModes} value
* isn't defined, only size of primary copies across all nodes will be returned. This behavior is identical to
* calling this method with {@link CachePeekMode#PRIMARY} peek mode.
* <p>
* NOTE: this operation is distributed and will query all participating nodes for their partition cache sizes.
*
* @param partition partition.
* @param peekModes Optional peek modes. If not provided, then total partition cache size is returned.
* @return a Future representing pending completion of the operation.
* @throws CacheException On error.
*/
public IgniteFuture<Long> sizeLongAsync(int partition, CachePeekMode... peekModes) throws CacheException;
/**
* Gets the number of all entries cached on this node. By default, if {@code peekModes} value isn't defined,
* only size of primary copies will be returned. This behavior is identical to calling this method with
* {@link CachePeekMode#PRIMARY} peek mode.
*
* @param peekModes Optional peek modes. If not provided, then total cache size is returned.
* @return Cache size on this node.
*/
public int localSize(CachePeekMode... peekModes);
/**
* Gets the number of all entries cached on this node as a long value. By default, if {@code peekModes} value isn't
* defined, only size of primary copies will be returned. This behavior is identical to calling this method with
* {@link CachePeekMode#PRIMARY} peek mode.
*
* @param peekModes Optional peek modes. If not provided, then total cache size is returned.
* @return Cache size on this node.
*/
public long localSizeLong(CachePeekMode... peekModes);
/**
* Gets the number of all entries cached on this node for the partition as a long value. By default, if {@code peekModes} value isn't
* defined, only size of primary copies will be returned. This behavior is identical to calling this method with
* {@link CachePeekMode#PRIMARY} peek mode.
*
* @param partition partition.
* @param peekModes Optional peek modes. If not provided, then total cache size is returned.
* @return Cache size on this node.
*/
public long localSizeLong(int partition, CachePeekMode... peekModes);
/**
* Asynchronously invokes each {@link EntryProcessor} from map's values against the correspondent
* {@link javax.cache.Cache.Entry} specified by map's key set.
* <p>
* If an {@link javax.cache.Cache.Entry} does not exist for the specified key, an attempt is made
* to load it (if a loader is configured) or a surrogate {@link javax.cache.Cache.Entry},
* consisting of the key and a value of null is provided.
* <p>
* The order that the entries for the keys are processed is undefined.
* Implementations may choose to process the entries in any order, including
* concurrently. Furthermore there is no guarantee implementations will
* use the same {@link EntryProcessor} instance to process each entry, as
* the case may be in a non-local cache topology.
* <p>
* The result of executing the {@link EntryProcessor} is returned in the future as a
* {@link Map} of {@link EntryProcessorResult}s, one result per key. Should the
* {@link EntryProcessor} or Caching implementation throw an exception, the
* exception is wrapped and re-thrown when a call to
* {@link javax.cache.processor.EntryProcessorResult#get()} is made.
* <p>
* Please refer to documentation for {@link CacheAtomicityMode#ATOMIC} for information on
* system behavior in crash scenarios for atomic caches.
*
* @param map Map containing keys and entry processors to be applied to values.
* @param args Additional arguments to pass to the {@link EntryProcessor}.
* @return The map of {@link EntryProcessorResult}s of the processing per key,
* if any, defined by the {@link EntryProcessor} implementation. No mappings
* will be returned for {@link EntryProcessor}s that return a
* <code>null</code> value for a key.
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
public <T> Map<K, EntryProcessorResult<T>> invokeAll(Map<? extends K, ? extends EntryProcessor<K, V, T>> map,
Object... args) throws TransactionException;
/**
* Asynchronously version of the {@link #invokeAll(Map, Object...)} method.
*
* @param map Map containing keys and entry processors to be applied to values.
* @param args Additional arguments to pass to the {@link EntryProcessor}.
* @return a Future representing pending completion of the operation. See more about future result
* at the {@link #invokeAll(Map, Object...)}.
* @throws TransactionException If operation within transaction is failed.
*/
public <T> IgniteFuture<Map<K, EntryProcessorResult<T>>> invokeAllAsync(
Map<? extends K, ? extends EntryProcessor<K, V, T>> map, Object... args) throws TransactionException;
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public V get(K key) throws TransactionException;
/**
* Asynchronously gets an entry from the cache.
* <p>
* If the cache is configured to use read-through, and a future result would be null
* because the entry is missing from the cache, the Cache's {@link CacheLoader}
* is called in an attempt to load the entry.
*
* @param key Key.
* @return a Future representing pending completion of the operation.
*/
public IgniteFuture<V> getAsync(K key);
/**
* Gets an entry from the cache.
* <p>
* If the cache is configured to use read-through, and get would return null
* because the entry is missing from the cache, the Cache's {@link CacheLoader}
* is called in an attempt to load the entry.
*
* @param key The key whose associated value is to be returned.
* @return The element, or null, if it does not exist.
* @throws IllegalStateException If the cache is {@link #isClosed()}.
* @throws NullPointerException If the key is {@code null}.
* @throws CacheException If there is a problem fetching the value.
* @throws ClassCastException If the implementation is configured to perform
* runtime-type-checking, and the key or value types are incompatible with those that have been
* configured for the {@link Cache}.
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
public CacheEntry<K, V> getEntry(K key) throws TransactionException;
/**
* Asynchronously gets an entry from the cache.
* <p>
* If the cache is configured to use read-through, and a future result would be null
* because the entry is missing from the cache, the Cache's {@link CacheLoader}
* is called in an attempt to load the entry.
*
* @param key The key whose associated value is to be returned.
* @return a Future representing pending completion of the operation.
* @throws IllegalStateException If the cache is {@link #isClosed()}.
* @throws NullPointerException If the key is {@code null}.
* @throws CacheException If there is a problem fetching the value.
* @throws ClassCastException If the implementation is configured to perform
* runtime-type-checking, and the key or value types are incompatible with those that have been
* configured for the {@link Cache}.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<CacheEntry<K, V>> getEntryAsync(K key) throws TransactionException;
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public Map<K, V> getAll(Set<? extends K> keys) throws TransactionException;
/**
* Asynchronously gets a collection of entries from the {@link Cache}, returning them as
* {@link Map} of the values associated with the set of keys requested.
* <p>
* If the cache is configured read-through, and a future result for a key would
* be null because an entry is missing from the cache, the Cache's
* {@link CacheLoader} is called in an attempt to load the entry. If an
* entry cannot be loaded for a given key, the key will not be present in
* the returned Map.
*
* @param keys Keys set.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Map<K, V>> getAllAsync(Set<? extends K> keys) throws TransactionException;
/**
* Gets a collection of entries from the {@link Cache}.
* <p>
* If the cache is configured read-through, and a get for a key would
* return null because an entry is missing from the cache, the Cache's
* {@link CacheLoader} is called in an attempt to load the entry. If an
* entry cannot be loaded for a given key, the key will not be present in
* the returned Collection.
*
* @param keys The keys whose associated values are to be returned.
* @return A collection of entries that were found for the given keys. Entries not found
* in the cache are not in the returned collection.
* @throws NullPointerException If keys is null or if keys contains a {@code null}.
* @throws IllegalStateException If the cache is {@link #isClosed()}.
* @throws CacheException If there is a problem fetching the values.
* @throws ClassCastException If the implementation is configured to perform
* runtime-type-checking, and the key or value types are incompatible with those that have been
* configured for the {@link Cache}.
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
public Collection<CacheEntry<K, V>> getEntries(Set<? extends K> keys) throws TransactionException;
/**
* Asynchronously gets a collection of entries from the {@link Cache}.
* <p>
* If the cache is configured read-through, and a future result for a key would
* be null because an entry is missing from the cache, the Cache's
* {@link CacheLoader} is called in an attempt to load the entry. If an
* entry cannot be loaded for a given key, the key will not be present in
* the returned Collection.
*
* @param keys The keys whose associated values are to be returned.
* @return a Future representing pending completion of the operation.
* @throws NullPointerException If keys is null or if keys contains a {@code null}.
* @throws IllegalStateException If the cache is {@link #isClosed()}.
* @throws CacheException If there is a problem fetching the values.
* @throws ClassCastException If the implementation is configured to perform
* runtime-type-checking, and the key or value types are incompatible with those that have been
* configured for the {@link Cache}.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Collection<CacheEntry<K, V>>> getEntriesAsync(Set<? extends K> keys) throws TransactionException;
/**
* Gets values from cache. Will bypass started transaction, if any, i.e. will not enlist entries
* and will not lock any keys if pessimistic transaction is started by thread.
*
* @param keys The keys whose associated values are to be returned.
* @return A map of entries that were found for the given keys.
*/
@IgniteAsyncSupported
public Map<K, V> getAllOutTx(Set<? extends K> keys);
/**
* Asynchronously gets values from cache. Will bypass started transaction, if any, i.e. will not enlist entries
* and will not lock any keys if pessimistic transaction is started by thread.
*
* @param keys The keys whose associated values are to be returned.
* @return a Future representing pending completion of the operation.
*/
public IgniteFuture<Map<K, V>> getAllOutTxAsync(Set<? extends K> keys);
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public boolean containsKey(K key) throws TransactionException;
/**
* Asynchronously determines if the {@link Cache} contains an entry for the specified key.
* <p>
* More formally, future result is <tt>true</tt> if and only if this cache contains a
* mapping for a key <tt>k</tt> such that <tt>key.equals(k)</tt>.
* (There can be at most one such mapping.)
*
* @param key Key.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Boolean> containsKeyAsync(K key) throws TransactionException;
/**
* Determines if the {@link Cache} contains entries for the specified keys.
*
* @param keys Key whose presence in this cache is to be tested.
* @return {@code True} if this cache contains a mapping for the specified keys.
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
public boolean containsKeys(Set<? extends K> keys) throws TransactionException;
/**
* Asynchronously determines if the {@link Cache} contains entries for the specified keys.
*
* @param keys Key whose presence in this cache is to be tested.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Boolean> containsKeysAsync(Set<? extends K> keys) throws TransactionException;
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public void put(K key, V val) throws TransactionException;
/**
* Asynchronously associates the specified value with the specified key in the cache.
* <p>
* If the {@link Cache} previously contained a mapping for the key, the old
* value is replaced by the specified value. (A cache <tt>c</tt> is said to
* contain a mapping for a key <tt>k</tt> if and only if {@link
* #containsKey(Object) c.containsKey(k)} would return <tt>true</tt>.)
*
* @param key Key.
* @param val Value.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Void> putAsync(K key, V val) throws TransactionException;
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public V getAndPut(K key, V val) throws TransactionException;
/**
* Asynchronously associates the specified value with the specified key in this cache,
* returning an existing value if one existed as the future result.
* <p>
* If the cache previously contained a mapping for
* the key, the old value is replaced by the specified value. (A cache
* <tt>c</tt> is said to contain a mapping for a key <tt>k</tt> if and only
* if {@link #containsKey(Object) c.containsKey(k)} would return
* <tt>true</tt>.)
* <p>
* The previous value is returned as the future result, or future result is null if there was no value associated
* with the key previously.
*
* @param key Key.
* @param val Value.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<V> getAndPutAsync(K key, V val) throws TransactionException;
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public void putAll(Map<? extends K, ? extends V> map) throws TransactionException;
/**
* Asynchronously copies all of the entries from the specified map to the {@link Cache}.
* <p>
* The effect of this call is equivalent to that of calling
* {@link #putAsync(Object, Object)} putAsync(k, v)} on this cache once for each mapping
* from key <tt>k</tt> to value <tt>v</tt> in the specified map.
* <p>
* The order in which the individual puts occur is undefined.
* <p>
* The behavior of this operation is undefined if entries in the cache
* corresponding to entries in the map are modified or removed while this
* operation is in progress. or if map is modified while the operation is in
* progress.
* <p>
* In Default Consistency mode, individual puts occur atomically but not
* the entire putAll. Listeners may observe individual updates.
*
* @param map Map containing keys and values to put into the cache.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Void> putAllAsync(Map<? extends K, ? extends V> map) throws TransactionException;
/**
* {@inheritDoc}
* <p>
* For {@link CacheAtomicityMode#ATOMIC} return
* value on primary node crash may be incorrect because of the automatic retries. It is recommended
* to disable retries with {@link #withNoRetries()} and manually restore primary-backup
* consistency in case of update failure.
*
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public boolean putIfAbsent(K key, V val) throws TransactionException;
/**
* Asynchronously associates the specified key with the given value if it is
* not already associated with a value.
* <p>
* For {@link CacheAtomicityMode#ATOMIC} return
* value on primary node crash may be incorrect because of the automatic retries. It is recommended
* to disable retries with {@link #withNoRetries()} and manually restore primary-backup
* consistency in case of update failure.
*
* @param key Key.
* @param val Value.
* @return a Future representing pending completion of the operation.
* @throws TransactionTimeoutException If operation performs within transaction and timeout occurred.
* @throws TransactionRollbackException If operation performs within transaction that automatically rolled back.
* @throws TransactionHeuristicException If operation performs within transaction that entered an unknown state.
*/
public IgniteFuture<Boolean> putIfAbsentAsync(K key, V val);
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public boolean remove(K key) throws TransactionException;
/**
* Asynchronously removes the mapping for a key from this cache if it is present.
* <p>
* More formally, if this cache contains a mapping from key <tt>k</tt> to
* value <tt>v</tt> such that
* <code>(key==null ? k==null : key.equals(k))</code>, that mapping is removed.
* (The cache can contain at most one such mapping.)
*
* <p>A future result is <tt>true</tt> if this cache previously associated the key,
* or <tt>false</tt> if the cache contained no mapping for the key.
* <p>
* The cache will not contain a mapping for the specified key once the
* returned future is completed.
*
* @param key Key.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Boolean> removeAsync(K key) throws TransactionException;
/**
* {@inheritDoc}
* <p>
* For {@link CacheAtomicityMode#ATOMIC} return
* value on primary node crash may be incorrect because of the automatic retries. It is recommended
* to disable retries with {@link #withNoRetries()} and manually restore primary-backup
* consistency in case of update failure.
*
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public boolean remove(K key, V oldVal) throws TransactionException;
/**
* Asynchronously removes the mapping for a key only if currently mapped to the
* given value.
* <p>
* For {@link CacheAtomicityMode#ATOMIC} return
* value on primary node crash may be incorrect because of the automatic retries. It is recommended
* to disable retries with {@link #withNoRetries()} and manually restore primary-backup
* consistency in case of update failure.
*
* @param key Key.
* @param oldVal Old value.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Boolean> removeAsync(K key, V oldVal) throws TransactionException;
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public V getAndRemove(K key) throws TransactionException;
/**
* Asynchronously removes the entry for a key only if currently mapped to some
* value.
*
* @param key Key.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<V> getAndRemoveAsync(K key) throws TransactionException;
/**
* {@inheritDoc}
* <p>
* For {@link CacheAtomicityMode#ATOMIC} return
* value on primary node crash may be incorrect because of the automatic retries. It is recommended
* to disable retries with {@link #withNoRetries()} and manually restore primary-backup
* consistency in case of update failure.
*
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public boolean replace(K key, V oldVal, V newVal) throws TransactionException;
/**
* Asynchronous version of the {@link #replace(Object, Object, Object)}.
* <p>
* For {@link CacheAtomicityMode#ATOMIC} return
* value on primary node crash may be incorrect because of the automatic retries. It is recommended
* to disable retries with {@link #withNoRetries()} and manually restore primary-backup
* consistency in case of update failure.
*
* @param key Key.
* @param oldVal Old value.
* @param newVal New value.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Boolean> replaceAsync(K key, V oldVal, V newVal) throws TransactionException;
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public boolean replace(K key, V val) throws TransactionException;
/**
* Asynchronously replaces the entry for a key only if currently mapped to a
* given value.
*
* @param key Key.
* @param val Value.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Boolean> replaceAsync(K key, V val) throws TransactionException;
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public V getAndReplace(K key, V val) throws TransactionException;
/**
* Asynchronously replaces the value for a given key if and only if there is a
* value currently mapped by the key.
*
* @param key Key.
* @param val Value.
* @return a Future representing pending completion of the operation.
* @throws TransactionTimeoutException If operation performs within transaction and timeout occurred.
* @throws TransactionRollbackException If operation performs within transaction that automatically rolled back.
* @throws TransactionHeuristicException If operation performs within transaction that entered an unknown state.
*/
public IgniteFuture<V> getAndReplaceAsync(K key, V val);
/**
* {@inheritDoc}
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public void removeAll(Set<? extends K> keys) throws TransactionException;
/**
* Asynchronously removes entries for the specified keys.
* <p>
* The order in which the individual entries are removed is undefined.
* <p>
* For every entry in the key set, the following are called:
* <ul>
* <li>any registered {@link CacheEntryRemovedListener}s</li>
* <li>if the cache is a write-through cache, the {@link CacheWriter}</li>
* </ul>
* If the key set is empty, the {@link CacheWriter} is not called.
*
* @param keys Keys set.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public IgniteFuture<Void> removeAllAsync(Set<? extends K> keys) throws TransactionException;
/**
* Removes all of the mappings from this cache.
* <p>
* The order that the individual entries are removed is undefined.
* <p>
* For every mapping that exists the following are called:
* <ul>
* <li>any registered {@link CacheEntryRemovedListener}s</li>
* <li>if the cache is a write-through cache, the {@link CacheWriter}</li>
* </ul>
* If the cache is empty, the {@link CacheWriter} is not called.
* <p>
* This operation is not transactional. It calls broadcast closure that
* deletes all primary keys from remote nodes.
* <p>
* This is potentially an expensive operation as listeners are invoked.
* Use {@link #clear()} to avoid this.
*
* @throws IllegalStateException if the cache is {@link #isClosed()}
* @throws CacheException if there is a problem during the remove
* @see #clear()
* @see CacheWriter#deleteAll
*/
@IgniteAsyncSupported
@Override public void removeAll();
/**
* Asynchronously removes all of the mappings from this cache.
* <p>
* The order that the individual entries are removed is undefined.
* <p>
* For every mapping that exists the following are called:
* <ul>
* <li>any registered {@link CacheEntryRemovedListener}s</li>
* <li>if the cache is a write-through cache, the {@link CacheWriter}</li>
* </ul>
* If the cache is empty, the {@link CacheWriter} is not called.
* <p>
* This is potentially an expensive operation as listeners are invoked.
* Use {@link #clearAsync()} to avoid this.
*
* @return a Future representing pending completion of the operation.
* @throws IllegalStateException if the cache is {@link #isClosed()}
* @throws CacheException if there is a problem during the remove
* @see #clearAsync()
* @see CacheWriter#deleteAll
*/
public IgniteFuture<Void> removeAllAsync();
/** {@inheritDoc} */
@IgniteAsyncSupported
@Override public void clear();
/**
* Asynchronously clears the contents of the cache, without notifying listeners or
* {@link CacheWriter}s.
*
* @return a Future representing pending completion of the operation.
*/
public IgniteFuture<Void> clearAsync();
/**
* Clears entry from the cache and swap storage, without notifying listeners or
* {@link CacheWriter}s. Entry is cleared only if it is not currently locked,
* and is not participating in a transaction.
*
* @param key Key to clear.
* @throws IllegalStateException if the cache is {@link #isClosed()}
* @throws CacheException if there is a problem during the clear
*/
@IgniteAsyncSupported
public void clear(K key);
/**
* Asynchronously clears entry from the cache and swap storage, without notifying listeners or
* {@link CacheWriter}s. Entry is cleared only if it is not currently locked,
* and is not participating in a transaction.
*
* @param key Key to clear.
* @return a Future representing pending completion of the operation.
* @throws IllegalStateException if the cache is {@link #isClosed()}
* @throws CacheException if there is a problem during the clear
*/
public IgniteFuture<Void> clearAsync(K key);
/**
* Clears entries from the cache and swap storage, without notifying listeners or
* {@link CacheWriter}s. Entry is cleared only if it is not currently locked,
* and is not participating in a transaction.
*
* @param keys Keys to clear.
* @throws IllegalStateException if the cache is {@link #isClosed()}
* @throws CacheException if there is a problem during the clear
*/
@IgniteAsyncSupported
public void clearAll(Set<? extends K> keys);
/**
* Asynchronously clears entries from the cache and swap storage, without notifying listeners or
* {@link CacheWriter}s. Entry is cleared only if it is not currently locked,
* and is not participating in a transaction.
*
* @param keys Keys to clear.
* @return a Future representing pending completion of the operation.
* @throws IllegalStateException if the cache is {@link #isClosed()}
* @throws CacheException if there is a problem during the clear
*/
public IgniteFuture<Void> clearAllAsync(Set<? extends K> keys);
/**
* Clears entry from the cache and swap storage, without notifying listeners or
* {@link CacheWriter}s. Entry is cleared only if it is not currently locked,
* and is not participating in a transaction.
* <p/>
* Note that this operation is local as it merely clears
* an entry from local cache, it does not remove entries from
* remote caches.
*
* @param key Key to clear.
*/
public void localClear(K key);
/**
* Clears entries from the cache and swap storage, without notifying listeners or
* {@link CacheWriter}s. Entry is cleared only if it is not currently locked,
* and is not participating in a transaction.
* <p/>
* Note that this operation is local as it merely clears
* an entry from local cache, it does not remove entries from
* remote caches.
*
* @param keys Keys to clear.
*/
public void localClearAll(Set<? extends K> keys);
/**
* {@inheritDoc}
* <p>
* Please refer to documentation for {@link CacheAtomicityMode#ATOMIC} for information on
* system behavior in crash scenarios for atomic caches.
*
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public <T> T invoke(K key, EntryProcessor<K, V, T> entryProcessor, Object... arguments)
throws TransactionException;
/**
* Asynchronously invokes an {@link EntryProcessor} against the {@link javax.cache.Cache.Entry} specified by
* the provided key. If an {@link javax.cache.Cache.Entry} does not exist for the specified key,
* an attempt is made to load it (if a loader is configured) or a surrogate
* {@link javax.cache.Cache.Entry}, consisting of the key with a null value is used instead.
* <p>
* Please refer to documentation for {@link CacheAtomicityMode#ATOMIC} for information on
* system behavior in crash scenarios for atomic caches.
*
* @param key The key to the entry.
* @param entryProcessor The {@link EntryProcessor} to invoke.
* @param arguments Additional arguments to pass to the {@link EntryProcessor}.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public <T> IgniteFuture<T> invokeAsync(K key, EntryProcessor<K, V, T> entryProcessor, Object... arguments)
throws TransactionException;
/**
* Invokes an {@link CacheEntryProcessor} against the {@link javax.cache.Cache.Entry} specified by
* the provided key. If an {@link javax.cache.Cache.Entry} does not exist for the specified key,
* an attempt is made to load it (if a loader is configured) or a surrogate
* {@link javax.cache.Cache.Entry}, consisting of the key with a null value is used instead.
* <p>
* An instance of entry processor must be stateless as it may be invoked multiple times on primary and
* backup nodes in the cache. It is guaranteed that the value passed to the entry processor will be always
* the same.
* <p>
* Please refer to documentation for {@link CacheAtomicityMode#ATOMIC} for information on
* system behavior in crash scenarios for atomic caches.
*
* @param key The key to the entry.
* @param entryProcessor The {@link CacheEntryProcessor} to invoke.
* @param arguments Additional arguments to pass to the {@link CacheEntryProcessor}.
* @return The result of the processing, if any, defined by the {@link CacheEntryProcessor} implementation.
* @throws NullPointerException If key or {@link CacheEntryProcessor} is null
* @throws IllegalStateException If the cache is {@link #isClosed()}
* @throws ClassCastException If the implementation is configured to perform
* runtime-type-checking, and the key or value
* types are incompatible with those that have been
* configured for the {@link Cache}.
* @throws EntryProcessorException If an exception is thrown by the {@link
* CacheEntryProcessor}, a Caching Implementation
* must wrap any {@link Exception} thrown
* wrapped in an {@link EntryProcessorException}.
* @throws TransactionException If operation within transaction is failed.
* @see CacheEntryProcessor
*/
@IgniteAsyncSupported
public <T> T invoke(K key, CacheEntryProcessor<K, V, T> entryProcessor, Object... arguments)
throws TransactionException;
/**
* Asynchronously invokes an {@link CacheEntryProcessor} against the {@link javax.cache.Cache.Entry} specified by
* the provided key. If an {@link javax.cache.Cache.Entry} does not exist for the specified key,
* an attempt is made to load it (if a loader is configured) or a surrogate
* {@link javax.cache.Cache.Entry}, consisting of the key with a null value is used instead.
* <p>
* An instance of entry processor must be stateless as it may be invoked multiple times on primary and
* backup nodes in the cache. It is guaranteed that the value passed to the entry processor will be always
* the same.
*
* @param key The key to the entry.
* @param entryProcessor The {@link CacheEntryProcessor} to invoke.
* @param arguments Additional arguments to pass to the {@link CacheEntryProcessor}.
* @return a Future representing pending completion of the operation.
* @throws NullPointerException If key or {@link CacheEntryProcessor} is null
* @throws IllegalStateException If the cache is {@link #isClosed()}
* @throws ClassCastException If the implementation is configured to perform
* runtime-type-checking, and the key or value
* types are incompatible with those that have been
* configured for the {@link Cache}.
* @throws EntryProcessorException If an exception is thrown by the {@link
* CacheEntryProcessor}, a Caching Implementation
* must wrap any {@link Exception} thrown
* wrapped in an {@link EntryProcessorException}.
* @throws TransactionException If operation within transaction is failed.
* @see CacheEntryProcessor
*/
public <T> IgniteFuture<T> invokeAsync(K key, CacheEntryProcessor<K, V, T> entryProcessor, Object... arguments)
throws TransactionException;
/**
* {@inheritDoc}
* <p>
* Please refer to documentation for {@link CacheAtomicityMode#ATOMIC} for information on
* system behavior in crash scenarios for atomic caches.
*
* @throws TransactionException If operation within transaction is failed.
*/
@IgniteAsyncSupported
@Override public <T> Map<K, EntryProcessorResult<T>> invokeAll(Set<? extends K> keys,
EntryProcessor<K, V, T> entryProcessor, Object... args) throws TransactionException;
/**
* Asynchronously invokes an {@link EntryProcessor} against the set of {@link javax.cache.Cache.Entry}s
* specified by the set of keys.
* <p>
* If an {@link javax.cache.Cache.Entry} does not exist for the specified key, an attempt is made
* to load it (if a loader is configured) or a surrogate {@link javax.cache.Cache.Entry},
* consisting of the key and a value of null is provided.
* <p>
* The order that the entries for the keys are processed is undefined.
* Implementations may choose to process the entries in any order, including
* concurrently. Furthermore there is no guarantee implementations will
* use the same {@link EntryProcessor} instance to process each entry, as
* the case may be in a non-local cache topology.
* <p>
* The result of executing the {@link EntryProcessor} is returned in the future as a
* {@link Map} of {@link EntryProcessorResult}s, one result per key. Should the
* {@link EntryProcessor} or Caching implementation throw an exception, the
* exception is wrapped and re-thrown when a call to
* {@link javax.cache.processor.EntryProcessorResult#get()} is made.
* <p>
* Please refer to documentation for {@link CacheAtomicityMode#ATOMIC} for information on
* system behavior in crash scenarios for atomic caches.
*
* @param keys The set of keys.
* @param entryProcessor The {@link EntryProcessor} to invoke.
* @param args Additional arguments to pass to the {@link EntryProcessor}.
* @return a Future representing pending completion of the operation.
* @throws TransactionException If operation within transaction is failed.
*/
public <T> IgniteFuture<Map<K, EntryProcessorResult<T>>> invokeAllAsync(Set<? extends K> keys,
EntryProcessor<K, V, T> entryProcessor, Object... args) throws TransactionException;
/**
* Invokes an {@link CacheEntryProcessor} against the set of {@link javax.cache.Cache.Entry}s
* specified by the set of keys.
* <p>
* If an {@link javax.cache.Cache.Entry} does not exist for the specified key, an attempt is made
* to load it (if a loader is configured) or a surrogate {@link javax.cache.Cache.Entry},
* consisting of the key and a value of null is provided.
* <p>
* The order that the entries for the keys are processed is undefined.
* Implementations may choose to process the entries in any order, including
* concurrently. Furthermore there is no guarantee implementations will
* use the same {@link CacheEntryProcessor} instance to process each entry, as
* the case may be in a non-local cache topology.
* <p>
* The result of executing the {@link CacheEntryProcessor} is returned as a
* {@link Map} of {@link EntryProcessorResult}s, one result per key. Should the
* {@link CacheEntryProcessor} or Caching implementation throw an exception, the
* exception is wrapped and re-thrown when a call to
* {@link javax.cache.processor.EntryProcessorResult#get()} is made.
* <p>
* An instance of entry processor must be stateless as it may be invoked multiple times on primary and
* backup nodes in the cache. It is guaranteed that the value passed to the entry processor will be always
* the same.
*
* @param keys The set of keys for entries to process.
* @param entryProcessor The {@link CacheEntryProcessor} to invoke.
* @param args Additional arguments to pass to the {@link CacheEntryProcessor}.
* @return The map of {@link EntryProcessorResult}s of the processing per key,
* if any, defined by the {@link CacheEntryProcessor} implementation. No mappings
* will be returned for {@link CacheEntryProcessor}s that return a
* <code>null</code> value for a key.
* @throws NullPointerException If keys or {@link CacheEntryProcessor} are {#code null}.
* @throws IllegalStateException If the cache is {@link #isClosed()}.
* @throws ClassCastException If the implementation is configured to perform
* runtime-type-checking, and the key or value
* types are incompatible with those that have been
* configured for the {@link Cache}.
* @throws TransactionException If operation within transaction is failed.
* @see CacheEntryProcessor
*/
@IgniteAsyncSupported
public <T> Map<K, EntryProcessorResult<T>> invokeAll(Set<? extends K> keys,
CacheEntryProcessor<K, V, T> entryProcessor, Object... args) throws TransactionException;
/**
* Asynchronously invokes an {@link CacheEntryProcessor} against the set of {@link javax.cache.Cache.Entry}s
* specified by the set of keys.
* <p>
* If an {@link javax.cache.Cache.Entry} does not exist for the specified key, an attempt is made
* to load it (if a loader is configured) or a surrogate {@link javax.cache.Cache.Entry},
* consisting of the key and a value of null is provided.
* <p>
* The order that the entries for the keys are processed is undefined.
* Implementations may choose to process the entries in any order, including
* concurrently. Furthermore there is no guarantee implementations will
* use the same {@link CacheEntryProcessor} instance to process each entry, as
* the case may be in a non-local cache topology.
* <p>
* The result of executing the {@link CacheEntryProcessor} is returned in the future as a
* {@link Map} of {@link EntryProcessorResult}s, one result per key. Should the
* {@link CacheEntryProcessor} or Caching implementation throw an exception, the
* exception is wrapped and re-thrown when a call to
* {@link javax.cache.processor.EntryProcessorResult#get()} is made.
* <p>
* An instance of entry processor must be stateless as it may be invoked multiple times on primary and
* backup nodes in the cache. It is guaranteed that the value passed to the entry processor will be always
* the same.
*
* @param keys The set of keys for entries to process.
* @param entryProcessor The {@link CacheEntryProcessor} to invoke.
* @param args Additional arguments to pass to the {@link CacheEntryProcessor}.
* @return a Future representing pending completion of the operation.
* @throws NullPointerException If keys or {@link CacheEntryProcessor} are {#code null}.
* @throws IllegalStateException If the cache is {@link #isClosed()}.
* @throws ClassCastException If the implementation is configured to perform
* runtime-type-checking, and the key or value
* types are incompatible with those that have been
* configured for the {@link Cache}.
* @throws TransactionException If operation within transaction is failed.
* @see CacheEntryProcessor
*/
public <T> IgniteFuture<Map<K, EntryProcessorResult<T>>> invokeAllAsync(Set<? extends K> keys,
CacheEntryProcessor<K, V, T> entryProcessor, Object... args) throws TransactionException;
/**
* Closes this cache instance.
* <p>
* For local cache equivalent to {@link #destroy()}.
* For distributed caches, if called on clients, stops client cache, if called on a server node,
* just closes this cache instance and does not destroy cache data.
* <p>
* After cache instance is closed another {@code IgniteCache} instance for the same
* cache can be created using {@link Ignite#cache(String)} method.
*/
@Override public void close();
/**
* Completely deletes the cache with all its data from the system on all cluster nodes.
*/
public void destroy();
/**
* This cache node to re-balance its partitions. This method is usually used when
* {@link CacheConfiguration#getRebalanceDelay()} configuration parameter has non-zero value.
* When many nodes are started or stopped almost concurrently, it is more efficient to delay
* rebalancing until the node topology is stable to make sure that no redundant re-partitioning
* happens.
* <p>
* In case of{@link CacheMode#PARTITIONED} caches, for better efficiency user should
* usually make sure that new nodes get placed on the same place of consistent hash ring as
* the left nodes, and that nodes are restarted before
* {@link CacheConfiguration#getRebalanceDelay() rebalanceDelay} expires. To place nodes
* on the same place in consistent hash ring, use
* {@link IgniteConfiguration#setConsistentId(Serializable)} to make sure that
* a node maps to the same hash ID if re-started.
* <p>
* See {@link CacheConfiguration#getRebalanceDelay()} for more information on how to configure
* rebalance re-partition delay.
* <p>
* @return Future that will be completed when rebalancing is finished. Future.get() returns {@code true}
* when rebalance was successfully finished.
*/
public IgniteFuture<Boolean> rebalance();
/**
* Returns future that will be completed when all indexes for this cache are ready to use.
*
* @return Future.
*/
public IgniteFuture<?> indexReadyFuture();
/**
* Gets whole cluster snapshot metrics (statistics) for this cache.
*
* @return Cache metrics.
*/
public CacheMetrics metrics();
/**
* Gets cluster group snapshot metrics for caches in cluster group.
*
* @param grp Cluster group.
* @return Cache metrics.
*/
public CacheMetrics metrics(ClusterGroup grp);
/**
* Gets local snapshot metrics (statistics) for this cache.
*
* @return Cache metrics.
*/
public CacheMetrics localMetrics();
/**
* Gets whole cluster MxBean for this cache.
*
* @return MxBean.
*/
public CacheMetricsMXBean mxBean();
/**
* Gets local MxBean for this cache.
*
* @return MxBean.
*/
public CacheMetricsMXBean localMxBean();
/**
* Gets a collection of lost partition IDs.
*
* @return Lost paritions.
*/
public Collection<Integer> lostPartitions();
/**
* Sets statistics enabled flag cluster wide for this cache.
*
* @param enabled Statistics enabled flag.
*/
public void enableStatistics(boolean enabled);
/**
* Clear cluster statistics for this cache.
*/
public void clearStatistics();
}
| apache-2.0 |
vmanoria/oozie | core/src/main/java/org/apache/oozie/command/wf/NotificationXCommand.java | 4149 | /**
* Copyright (c) 2010 Yahoo! Inc. All rights reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.oozie.command.wf;
import org.apache.oozie.client.OozieClient;
import org.apache.oozie.WorkflowActionBean;
import org.apache.oozie.WorkflowJobBean;
import org.apache.oozie.command.CommandException;
import org.apache.oozie.command.PreconditionException;
import org.apache.oozie.util.LogUtils;
import org.apache.oozie.util.ParamChecker;
import org.apache.oozie.util.XLog;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
public class NotificationXCommand extends WorkflowXCommand<Void> {
private static final String STATUS_PATTERN = "\\$status";
private static final String JOB_ID_PATTERN = "\\$jobId";
private static final String NODE_NAME_PATTERN = "\\$nodeName";
private String url;
private int retries = 0;
public NotificationXCommand(WorkflowJobBean workflow) {
super("job.notification", "job.notification", 0);
ParamChecker.notNull(workflow, "workflow");
LogUtils.setLogInfo(workflow, logInfo);
url = workflow.getWorkflowInstance().getConf().get(OozieClient.WORKFLOW_NOTIFICATION_URL);
if (url != null) {
url = url.replaceAll(JOB_ID_PATTERN, workflow.getId());
url = url.replaceAll(STATUS_PATTERN, workflow.getStatus().toString());
}
}
public NotificationXCommand(WorkflowJobBean workflow, WorkflowActionBean action) {
super("action.notification", "job.notification", 0);
ParamChecker.notNull(workflow, "workflow");
ParamChecker.notNull(action, "action");
LogUtils.setLogInfo(workflow, logInfo);
LogUtils.setLogInfo(action, logInfo);
url = workflow.getWorkflowInstance().getConf().get(OozieClient.ACTION_NOTIFICATION_URL);
if (url != null) {
url = url.replaceAll(JOB_ID_PATTERN, workflow.getId());
url = url.replaceAll(NODE_NAME_PATTERN, action.getName());
if (action.isComplete()) {
url = url.replaceAll(STATUS_PATTERN, "T:" + action.getTransition());
}
else {
url = url.replaceAll(STATUS_PATTERN, "S:" + action.getStatus().toString());
}
}
}
@Override
protected boolean isLockRequired() {
return false;
}
@Override
protected String getEntityKey() {
return url;
}
@Override
protected void loadState() throws CommandException {
}
@Override
protected void verifyPrecondition() throws CommandException, PreconditionException {
}
@Override
protected Void execute() throws CommandException {
//if command is requeue, the logInfo has to set to thread local Info object again
LogUtils.setLogInfo(logInfo);
if (url != null) {
try {
URL url = new URL(this.url);
HttpURLConnection urlConn = (HttpURLConnection) url.openConnection();
if (urlConn.getResponseCode() != HttpURLConnection.HTTP_OK) {
handleRetry();
}
}
catch (IOException ex) {
handleRetry();
}
}
return null;
}
private void handleRetry() {
if (retries < 3) {
retries++;
this.resetUsed();
queue(this, 60 * 1000);
}
else {
LOG.warn(XLog.OPS, "could not send notification [{0}]", url);
}
}
public String getUrl() {
return url;
}
}
| apache-2.0 |
mkis-/elasticsearch | src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java | 147433 | /*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.highlight;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import com.google.common.base.Joiner;
import com.google.common.collect.Iterables;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.common.settings.Settings.Builder;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.query.*;
import org.elasticsearch.index.query.MatchQueryBuilder.Operator;
import org.elasticsearch.index.query.MatchQueryBuilder.Type;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.highlight.HighlightBuilder.Field;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.hamcrest.Matcher;
import org.hamcrest.Matchers;
import org.junit.Test;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import static org.elasticsearch.client.Requests.searchRequest;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.*;
import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight;
import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
import static org.hamcrest.Matchers.*;
@Slow
public class HighlighterSearchTests extends ElasticsearchIntegrationTest {
@Test
public void testHighlightingWithWildcardName() throws IOException {
// test the kibana case with * as fieldname that will try highlight all fields including meta fields
XContentBuilder mappings = jsonBuilder();
mappings.startObject();
mappings.startObject("type")
.startObject("properties")
.startObject("text")
.field("type", "string")
.field("analyzer", "keyword")
.field("index_options", "offsets")
.field("term_vector", "with_positions_offsets")
.endObject()
.endObject()
.endObject();
mappings.endObject();
assertAcked(prepareCreate("test")
.addMapping("type", mappings));
ensureYellow();
client().prepareIndex("test", "type", "1")
.setSource(jsonBuilder().startObject().field("text", "text").endObject())
.get();
refresh();
String highlighter = randomFrom(new String[]{"plain", "postings", "fvh"});
SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("*").highlighterType(highlighter)).get();
assertHighlight(search, 0, "text", 0, equalTo("<em>text</em>"));
}
@Test
public void testPlainHighlighterWithLongUnanalyzedStringTerm() throws IOException {
XContentBuilder mappings = jsonBuilder();
mappings.startObject();
mappings.startObject("type")
.startObject("properties")
.startObject("long_text")
.field("type", "string")
.field("analyzer", "keyword")
.field("index_options", "offsets")
.field("term_vector", "with_positions_offsets")
.field("ignore_above", 1)
.endObject()
.startObject("text")
.field("type", "string")
.field("analyzer", "keyword")
.field("index_options", "offsets")
.field("term_vector", "with_positions_offsets")
.endObject()
.endObject()
.endObject();
mappings.endObject();
assertAcked(prepareCreate("test")
.addMapping("type", mappings));
ensureYellow();
// crate a term that is larger than the allowed 32766, index it and then try highlight on it
// the search request should still succeed
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 32767; i++) {
builder.append('a');
}
client().prepareIndex("test", "type", "1")
.setSource(jsonBuilder().startObject().field("long_text", builder.toString()).field("text", "text").endObject())
.get();
refresh();
String highlighter = randomFrom(new String[]{"plain", "postings", "fvh"});
SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("*").highlighterType(highlighter)).get();
assertHighlight(search, 0, "text", 0, equalTo("<em>text</em>"));
search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("long_text").highlighterType(highlighter)).get();
assertNoFailures(search);
assertThat(search.getHits().getAt(0).getHighlightFields().size(), equalTo(0));
}
@Test
public void testHighlightingWhenFieldsAreNotStoredThereIsNoSource() throws IOException {
XContentBuilder mappings = jsonBuilder();
mappings.startObject();
mappings.startObject("type")
.startObject("_source")
.field("enabled", false)
.endObject()
.startObject("properties")
.startObject("unstored_field")
.field("index_options", "offsets")
.field("term_vector", "with_positions_offsets")
.field("type", "string")
.field("store", "no")
.endObject()
.startObject("text")
.field("index_options", "offsets")
.field("term_vector", "with_positions_offsets")
.field("type", "string")
.field("store", "yes")
.endObject()
.endObject()
.endObject();
mappings.endObject();
assertAcked(prepareCreate("test")
.addMapping("type", mappings));
ensureYellow();
client().prepareIndex("test", "type", "1")
.setSource(jsonBuilder().startObject().field("unstored_text", "text").field("text", "text").endObject())
.get();
refresh();
String highlighter = randomFrom(new String[]{"plain", "postings", "fvh"});
SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("*").highlighterType(highlighter)).get();
assertHighlight(search, 0, "text", 0, equalTo("<em>text</em>"));
search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("text", "text"))).addHighlightedField(new Field("unstored_text")).get();
assertNoFailures(search);
assertThat(search.getHits().getAt(0).getHighlightFields().size(), equalTo(0));
}
@Test
// see #3486
public void testHighTermFrequencyDoc() throws IOException {
assertAcked(prepareCreate("test")
.addMapping("test", "name", "type=string,term_vector=with_positions_offsets,store=" + (randomBoolean() ? "yes" : "no")));
ensureYellow();
StringBuilder builder = new StringBuilder();
for (int i = 0; i < 6000; i++) {
builder.append("abc").append(" ");
}
client().prepareIndex("test", "test", "1")
.setSource("name", builder.toString())
.get();
refresh();
SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "abc"))).addHighlightedField("name").get();
assertHighlight(search, 0, "name", 0, startsWith("<em>abc</em> <em>abc</em> <em>abc</em> <em>abc</em>"));
}
@Test
public void testNgramHighlightingWithBrokenPositions() throws IOException {
assertAcked(prepareCreate("test")
.addMapping("test", jsonBuilder()
.startObject()
.startObject("test")
.startObject("properties")
.startObject("name")
.startObject("fields")
.startObject("autocomplete")
.field("type", "string")
.field("analyzer", "autocomplete")
.field("search_analyzer", "search_autocomplete")
.field("term_vector", "with_positions_offsets")
.endObject()
.startObject("name")
.field("type", "string")
.endObject()
.endObject()
.field("type", "multi_field")
.endObject()
.endObject()
.endObject())
.setSettings(settingsBuilder()
.put(indexSettings())
.put("analysis.tokenizer.autocomplete.max_gram", 20)
.put("analysis.tokenizer.autocomplete.min_gram", 1)
.put("analysis.tokenizer.autocomplete.token_chars", "letter,digit")
.put("analysis.tokenizer.autocomplete.type", "nGram")
.put("analysis.filter.wordDelimiter.type", "word_delimiter")
.putArray("analysis.filter.wordDelimiter.type_table",
"& => ALPHANUM", "| => ALPHANUM", "! => ALPHANUM",
"? => ALPHANUM", ". => ALPHANUM", "- => ALPHANUM", "# => ALPHANUM", "% => ALPHANUM",
"+ => ALPHANUM", ", => ALPHANUM", "~ => ALPHANUM", ": => ALPHANUM", "/ => ALPHANUM",
"^ => ALPHANUM", "$ => ALPHANUM", "@ => ALPHANUM", ") => ALPHANUM", "( => ALPHANUM",
"] => ALPHANUM", "[ => ALPHANUM", "} => ALPHANUM", "{ => ALPHANUM")
.put("analysis.filter.wordDelimiter.type.split_on_numerics", false)
.put("analysis.filter.wordDelimiter.generate_word_parts", true)
.put("analysis.filter.wordDelimiter.generate_number_parts", false)
.put("analysis.filter.wordDelimiter.catenate_words", true)
.put("analysis.filter.wordDelimiter.catenate_numbers", true)
.put("analysis.filter.wordDelimiter.catenate_all", false)
.put("analysis.analyzer.autocomplete.tokenizer", "autocomplete")
.putArray("analysis.analyzer.autocomplete.filter", "lowercase", "wordDelimiter")
.put("analysis.analyzer.search_autocomplete.tokenizer", "whitespace")
.putArray("analysis.analyzer.search_autocomplete.filter", "lowercase", "wordDelimiter")));
ensureYellow();
client().prepareIndex("test", "test", "1")
.setSource("name", "ARCOTEL Hotels Deutschland").get();
refresh();
SearchResponse search = client().prepareSearch("test").setTypes("test").setQuery(matchQuery("name.autocomplete", "deut tel").operator(Operator.OR)).addHighlightedField("name.autocomplete").execute().actionGet();
assertHighlight(search, 0, "name.autocomplete", 0, equalTo("ARCO<em>TEL</em> Ho<em>tel</em>s <em>Deut</em>schland"));
}
@Test
public void testMultiPhraseCutoff() throws IOException {
/*
* MultiPhraseQuery can literally kill an entire node if there are too many terms in the
* query. We cut off and extract terms if there are more than 16 terms in the query
*/
assertAcked(prepareCreate("test")
.addMapping("test", "body", "type=string,analyzer=custom_analyzer,search_analyzer=custom_analyzer,term_vector=with_positions_offsets")
.setSettings(
settingsBuilder().put(indexSettings())
.put("analysis.filter.wordDelimiter.type", "word_delimiter")
.put("analysis.filter.wordDelimiter.type.split_on_numerics", false)
.put("analysis.filter.wordDelimiter.generate_word_parts", true)
.put("analysis.filter.wordDelimiter.generate_number_parts", true)
.put("analysis.filter.wordDelimiter.catenate_words", true)
.put("analysis.filter.wordDelimiter.catenate_numbers", true)
.put("analysis.filter.wordDelimiter.catenate_all", false)
.put("analysis.analyzer.custom_analyzer.tokenizer", "whitespace")
.putArray("analysis.analyzer.custom_analyzer.filter", "lowercase", "wordDelimiter"))
);
ensureGreen();
client().prepareIndex("test", "test", "1")
.setSource("body", "Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature")
.get();
refresh();
SearchResponse search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com ").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
assertHighlight(search, 0, "body", 0, startsWith("<em>Test: http://www.facebook.com</em>"));
search = client().prepareSearch().setQuery(matchQuery("body", "Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature Test: http://www.facebook.com http://elasticsearch.org http://xing.com http://cnn.com http://quora.com http://twitter.com this is a test for highlighting feature").type(Type.PHRASE)).addHighlightedField("body").execute().actionGet();
assertHighlight(search, 0, "body", 0, equalTo("<em>Test</em>: <em>http://www.facebook.com</em> <em>http://elasticsearch.org</em> <em>http://xing.com</em> <em>http://cnn.com</em> http://quora.com"));
}
@Test
public void testNgramHighlightingPreLucene42() throws IOException {
assertAcked(prepareCreate("test")
.addMapping("test",
"name", "type=string,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets",
"name2", "type=string,analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer," + randomStoreField() + "term_vector=with_positions_offsets")
.setSettings(settingsBuilder()
.put(indexSettings())
.put("analysis.filter.my_ngram.max_gram", 20)
.put("analysis.filter.my_ngram.version", "4.1")
.put("analysis.filter.my_ngram.min_gram", 1)
.put("analysis.filter.my_ngram.type", "ngram")
.put("analysis.tokenizer.my_ngramt.max_gram", 20)
.put("analysis.tokenizer.my_ngramt.version", "4.1")
.put("analysis.tokenizer.my_ngramt.min_gram", 1)
.put("analysis.tokenizer.my_ngramt.type", "ngram")
.put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt")
.put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace")
.putArray("analysis.analyzer.name2_index_analyzer.filter", "lowercase", "my_ngram")
.put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace")
.put("analysis.analyzer.name_search_analyzer.filter", "lowercase")));
ensureYellow();
client().prepareIndex("test", "test", "1")
.setSource("name", "logicacmg ehemals avinci - the know how company",
"name2", "logicacmg ehemals avinci - the know how company").get();
client().prepareIndex("test", "test", "2")
.setSource("name", "avinci, unilog avinci, logicacmg, logica",
"name2", "avinci, unilog avinci, logicacmg, logica").get();
refresh();
SearchResponse search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica m"))).addHighlightedField("name").get();
assertHighlight(search, 0, "name", 0, anyOf(equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"),
equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>")));
assertHighlight(search, 1, "name", 0, anyOf(equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"),
equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>")));
search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica ma"))).addHighlightedField("name").get();
assertHighlight(search, 0, "name", 0, anyOf(equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"),
equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
assertHighlight(search, 1, "name", 0, anyOf(equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"),
equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name", "logica"))).addHighlightedField("name").get();
assertHighlight(search, 0, "name", 0, anyOf(equalTo("<em>logica</em>cmg ehemals avinci - the know how company"),
equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
assertHighlight(search, 0, "name", 0, anyOf(equalTo("<em>logica</em>cmg ehemals avinci - the know how company"),
equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica m"))).addHighlightedField("name2").get();
assertHighlight(search, 0, "name2", 0, anyOf(equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"),
equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>")));
assertHighlight(search, 1, "name2", 0, anyOf(equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"),
equalTo("avinci, unilog avinci, <em>logica</em>c<em>m</em>g, <em>logica</em>")));
search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica ma"))).addHighlightedField("name2").get();
assertHighlight(search, 0, "name2", 0, anyOf(equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"),
equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
assertHighlight(search, 1, "name2", 0, anyOf(equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"),
equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
search = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("name2", "logica"))).addHighlightedField("name2").get();
assertHighlight(search, 0, "name2", 0, anyOf(equalTo("<em>logica</em>cmg ehemals avinci - the know how company"),
equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
assertHighlight(search, 1, "name2", 0, anyOf(equalTo("<em>logica</em>cmg ehemals avinci - the know how company"),
equalTo("avinci, unilog avinci, <em>logica</em>cmg, <em>logica</em>")));
}
@Test
public void testNgramHighlighting() throws IOException {
assertAcked(prepareCreate("test")
.addMapping("test",
"name", "type=string,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets",
"name2", "type=string,analyzer=name2_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets")
.setSettings(settingsBuilder()
.put(indexSettings())
.put("analysis.filter.my_ngram.max_gram", 20)
.put("analysis.filter.my_ngram.min_gram", 1)
.put("analysis.filter.my_ngram.type", "ngram")
.put("analysis.tokenizer.my_ngramt.max_gram", 20)
.put("analysis.tokenizer.my_ngramt.min_gram", 1)
.put("analysis.tokenizer.my_ngramt.token_chars", "letter,digit")
.put("analysis.tokenizer.my_ngramt.type", "ngram")
.put("analysis.analyzer.name_index_analyzer.tokenizer", "my_ngramt")
.put("analysis.analyzer.name2_index_analyzer.tokenizer", "whitespace")
.put("analysis.analyzer.name2_index_analyzer.filter", "my_ngram")
.put("analysis.analyzer.name_search_analyzer.tokenizer", "whitespace")));
client().prepareIndex("test", "test", "1")
.setSource("name", "logicacmg ehemals avinci - the know how company",
"name2", "logicacmg ehemals avinci - the know how company").get();
refresh();
ensureGreen();
SearchResponse search = client().prepareSearch().setQuery(matchQuery("name", "logica m")).addHighlightedField("name").get();
assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>c<em>m</em>g ehe<em>m</em>als avinci - the know how co<em>m</em>pany"));
search = client().prepareSearch().setQuery(matchQuery("name", "logica ma")).addHighlightedField("name").get();
assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehe<em>ma</em>ls avinci - the know how company"));
search = client().prepareSearch().setQuery(matchQuery("name", "logica")).addHighlightedField("name").get();
assertHighlight(search, 0, "name", 0, equalTo("<em>logica</em>cmg ehemals avinci - the know how company"));
search = client().prepareSearch().setQuery(matchQuery("name2", "logica m")).addHighlightedField("name2").get();
assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how <em>company</em>"));
search = client().prepareSearch().setQuery(matchQuery("name2", "logica ma")).addHighlightedField("name2").get();
assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> <em>ehemals</em> avinci - the know how company"));
search = client().prepareSearch().setQuery(matchQuery("name2", "logica")).addHighlightedField("name2").get();
assertHighlight(search, 0, "name2", 0, equalTo("<em>logicacmg</em> ehemals avinci - the know how company"));
}
@Test
public void testEnsureNoNegativeOffsets() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1",
"no_long_term", "type=string,term_vector=with_positions_offsets",
"long_term", "type=string,term_vector=with_positions_offsets"));
ensureYellow();
client().prepareIndex("test", "type1", "1")
.setSource("no_long_term", "This is a test where foo is highlighed and should be highlighted",
"long_term", "This is a test thisisaverylongwordandmakessurethisfails where foo is highlighed and should be highlighted")
.get();
refresh();
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("long_term", "thisisaverylongwordandmakessurethisfails foo highlighed"))
.addHighlightedField("long_term", 18, 1)
.get();
assertHighlight(search, 0, "long_term", 0, 1, equalTo("<em>thisisaverylongwordandmakessurethisfails</em>"));
search = client().prepareSearch()
.setQuery(matchQuery("no_long_term", "test foo highlighed").type(Type.PHRASE).slop(3))
.addHighlightedField("no_long_term", 18, 1).setHighlighterPostTags("</b>").setHighlighterPreTags("<b>")
.get();
assertNotHighlighted(search, 0, "no_long_term");
search = client().prepareSearch()
.setQuery(matchQuery("no_long_term", "test foo highlighed").type(Type.PHRASE).slop(3))
.addHighlightedField("no_long_term", 30, 1).setHighlighterPostTags("</b>").setHighlighterPreTags("<b>")
.get();
assertHighlight(search, 0, "no_long_term", 0, 1, equalTo("a <b>test</b> where <b>foo</b> is <b>highlighed</b> and"));
}
@Test
public void testSourceLookupHighlightingUsingPlainHighlighter() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
// we don't store title and don't use term vector, now lets see if it works...
.startObject("title").field("type", "string").field("store", "no").field("term_vector", "no").endObject()
.startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("term_vector", "no").endObject().endObject().endObject()
.endObject().endObject().endObject()));
ensureYellow();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < indexRequestBuilders.length; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource(XContentFactory.jsonBuilder().startObject()
.field("title", "This is a test on the highlighting bug present in elasticsearch")
.startArray("attachments").startObject().field("body", "attachment 1").endObject().startObject().field("body", "attachment 2").endObject().endArray()
.endObject());
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "bug"))
.addHighlightedField("title", -1, 0)
.get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
}
search = client().prepareSearch()
.setQuery(matchQuery("attachments.body", "attachment"))
.addHighlightedField("attachments.body", -1, 0)
.get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> 1"));
assertHighlight(search, i, "attachments.body", 1, equalTo("<em>attachment</em> 2"));
}
}
@Test
public void testSourceLookupHighlightingUsingFastVectorHighlighter() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
// we don't store title, now lets see if it works...
.startObject("title").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").endObject()
.startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").endObject().endObject().endObject()
.endObject().endObject().endObject()));
ensureYellow();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < indexRequestBuilders.length; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource(XContentFactory.jsonBuilder().startObject()
.field("title", "This is a test on the highlighting bug present in elasticsearch")
.startArray("attachments").startObject().field("body", "attachment 1").endObject().startObject().field("body", "attachment 2").endObject().endArray()
.endObject());
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "bug"))
.addHighlightedField("title", -1, 0)
.get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
}
search = client().prepareSearch()
.setQuery(matchQuery("attachments.body", "attachment"))
.addHighlightedField("attachments.body", -1, 2)
.execute().get();
for (int i = 0; i < 5; i++) {
assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> 1"));
assertHighlight(search, i, "attachments.body", 1, equalTo("<em>attachment</em> 2"));
}
}
@Test
public void testSourceLookupHighlightingUsingPostingsHighlighter() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
// we don't store title, now lets see if it works...
.startObject("title").field("type", "string").field("store", "no").field("index_options", "offsets").endObject()
.startObject("attachments").startObject("properties").startObject("body").field("type", "string").field("store", "no").field("index_options", "offsets").endObject().endObject().endObject()
.endObject().endObject().endObject()));
ensureYellow();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < indexRequestBuilders.length; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource(XContentFactory.jsonBuilder().startObject()
.array("title", "This is a test on the highlighting bug present in elasticsearch. Hopefully it works.",
"This is the second bug to perform highlighting on.")
.startArray("attachments").startObject().field("body", "attachment for this test").endObject().startObject().field("body", "attachment 2").endObject().endArray()
.endObject());
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "bug"))
//asking for the whole field to be highlighted
.addHighlightedField("title", -1, 0).get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch. Hopefully it works."));
assertHighlight(search, i, "title", 1, 2, equalTo("This is the second <em>bug</em> to perform highlighting on."));
}
search = client().prepareSearch()
.setQuery(matchQuery("title", "bug"))
//sentences will be generated out of each value
.addHighlightedField("title").get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(search, i, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch."));
assertHighlight(search, i, "title", 1, 2, equalTo("This is the second <em>bug</em> to perform highlighting on."));
}
search = client().prepareSearch()
.setQuery(matchQuery("attachments.body", "attachment"))
.addHighlightedField("attachments.body", -1, 2)
.get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(search, i, "attachments.body", 0, equalTo("<em>attachment</em> for this test"));
assertHighlight(search, i, "attachments.body", 1, 2, equalTo("<em>attachment</em> 2"));
}
}
@Test
public void testHighlightIssue1994() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "title", "type=string,store=no", "titleTV", "type=string,store=no,term_vector=with_positions_offsets"));
ensureYellow();
indexRandom(false, client().prepareIndex("test", "type1", "1")
.setSource("title", new String[]{"This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us"},
"titleTV", new String[]{"This is a test on the highlighting bug present in elasticsearch", "The bug is bugging us"}));
indexRandom(true, client().prepareIndex("test", "type1", "2")
.setSource("titleTV", new String[]{"some text to highlight", "highlight other text"}));
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "bug"))
.addHighlightedField("title", -1, 2)
.addHighlightedField("titleTV", -1, 2).setHighlighterRequireFieldMatch(false)
.get();
assertHighlight(search, 0, "title", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
assertHighlight(search, 0, "title", 1, 2, equalTo("The <em>bug</em> is bugging us"));
assertHighlight(search, 0, "titleTV", 0, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
assertHighlight(search, 0, "titleTV", 1, 2, equalTo("The <em>bug</em> is bugging us"));
search = client().prepareSearch()
.setQuery(matchQuery("titleTV", "highlight"))
.addHighlightedField("titleTV", -1, 2)
.get();
assertHighlight(search, 0, "titleTV", 0, equalTo("some text to <em>highlight</em>"));
assertHighlight(search, 0, "titleTV", 1, 2, equalTo("<em>highlight</em> other text"));
}
@Test
public void testGlobalHighlightingSettingsOverriddenAtFieldLevel() {
createIndex("test");
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field1", new String[]{"this is a test", "this is the second test"},
"field2", new String[]{"this is another test", "yet another test"}).get();
refresh();
logger.info("--> highlighting and searching on field1 and field2 produces different tags");
SearchSourceBuilder source = searchSource()
.query(termQuery("field1", "test"))
.highlight(highlight().order("score").preTags("<global>").postTags("</global>").fragmentSize(1).numOfFragments(1)
.field(new HighlightBuilder.Field("field1").numOfFragments(2))
.field(new HighlightBuilder.Field("field2").preTags("<field2>").postTags("</field2>").fragmentSize(50).requireFieldMatch(false)));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field1", 0, 2, equalTo(" <global>test</global>"));
assertHighlight(searchResponse, 0, "field1", 1, 2, equalTo(" <global>test</global>"));
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("this is another <field2>test</field2>"));
}
@Test //https://github.com/elasticsearch/elasticsearch/issues/5175
public void testHighlightingOnWildcardFields() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1",
"field-postings", "type=string,index_options=offsets",
"field-fvh", "type=string,term_vector=with_positions_offsets",
"field-plain", "type=string"));
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field-postings", "This is the first test sentence. Here is the second one.",
"field-fvh", "This is the test with term_vectors",
"field-plain", "This is the test for the plain highlighter").get();
refresh();
logger.info("--> highlighting and searching on field*");
SearchSourceBuilder source = searchSource()
//postings hl doesn't support require_field_match, its field needs to be queried directly
.query(termQuery("field-postings", "test"))
.highlight(highlight().field("field*").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field-postings", 0, 1, equalTo("This is the first <xxx>test</xxx> sentence."));
assertHighlight(searchResponse, 0, "field-fvh", 0, 1, equalTo("This is the <xxx>test</xxx> with term_vectors"));
assertHighlight(searchResponse, 0, "field-plain", 0, 1, equalTo("This is the <xxx>test</xxx> for the plain highlighter"));
}
@Test
public void testForceSourceWithSourceDisabled() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1")
.startObject("_source").field("enabled", false).endObject()
.startObject("properties")
.startObject("field1").field("type", "string").field("store", "yes").field("index_options", "offsets")
.field("term_vector", "with_positions_offsets").endObject()
.endObject().endObject().endObject()));
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field1", "The quick brown fox jumps over the lazy dog", "field2", "second field content").get();
refresh();
//works using stored field
SearchResponse searchResponse = client().prepareSearch("test")
.setQuery(termQuery("field1", "quick"))
.addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>"))
.get();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
assertFailures(client().prepareSearch("test")
.setQuery(termQuery("field1", "quick"))
.addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("plain").forceSource(true)),
RestStatus.BAD_REQUEST,
containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
assertFailures(client().prepareSearch("test")
.setQuery(termQuery("field1", "quick"))
.addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("fvh").forceSource(true)),
RestStatus.BAD_REQUEST,
containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
assertFailures(client().prepareSearch("test")
.setQuery(termQuery("field1", "quick"))
.addHighlightedField(new Field("field1").preTags("<xxx>").postTags("</xxx>").highlighterType("postings").forceSource(true)),
RestStatus.BAD_REQUEST,
containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
SearchSourceBuilder searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
.highlight(highlight().forceSource(true).field("field1"));
assertFailures(client().prepareSearch("test").setSource(searchSource.buildAsBytes()),
RestStatus.BAD_REQUEST,
containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
.highlight(highlight().forceSource(true).field("field*"));
assertFailures(client().prepareSearch("test").setSource(searchSource.buildAsBytes()),
RestStatus.BAD_REQUEST,
matches("source is forced for fields \\[field\\d, field\\d\\] but type \\[type1\\] has disabled _source"));
}
@Test
public void testPlainHighlighter() throws Exception {
createIndex("test");
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(termQuery("field1", "test"))
.highlight(highlight().field("field1").order("score").preTags("<xxx>").postTags("</xxx>"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
logger.info("--> searching on _all, highlighting on field1");
source = searchSource()
.query(termQuery("_all", "test"))
.highlight(highlight().field("field1").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
logger.info("--> searching on _all, highlighting on field2");
source = searchSource()
.query(termQuery("_all", "quick"))
.highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
logger.info("--> searching on _all, highlighting on field2");
source = searchSource()
.query(prefixQuery("_all", "qui"))
.highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
logger.info("--> searching on _all with constant score, highlighting on field2");
source = searchSource()
.query(constantScoreQuery(prefixQuery("_all", "qui")))
.highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
logger.info("--> searching on _all with constant score, highlighting on field2");
source = searchSource()
.query(boolQuery().should(constantScoreQuery(prefixQuery("_all", "qui"))))
.highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
}
@Test
public void testFastVectorHighlighter() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(termQuery("field1", "test"))
.highlight(highlight().field("field1", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
logger.info("--> searching on _all, highlighting on field1");
source = searchSource()
.query(termQuery("_all", "test"))
.highlight(highlight().field("field1", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
// LUCENE 3.1 UPGRADE: Caused adding the space at the end...
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
logger.info("--> searching on _all, highlighting on field2");
source = searchSource()
.query(termQuery("_all", "quick"))
.highlight(highlight().field("field2", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
// LUCENE 3.1 UPGRADE: Caused adding the space at the end...
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
logger.info("--> searching on _all, highlighting on field2");
source = searchSource()
.query(prefixQuery("_all", "qui"))
.highlight(highlight().field("field2", 100, 0).order("score").preTags("<xxx>").postTags("</xxx>").requireFieldMatch(false));
searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
// LUCENE 3.1 UPGRADE: Caused adding the space at the end...
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy dog"));
}
/**
* The FHV can spend a long time highlighting degenerate documents if phraseLimit is not set.
*/
@Test(timeout=120000)
public void testFVHManyMatches() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
ensureGreen();
// Index one megabyte of "t " over and over and over again
client().prepareIndex("test", "type1")
.setSource("field1", Joiner.on("").join(Iterables.limit(Iterables.cycle("t "), 1024*256))).get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(termQuery("field1", "t"))
.highlight(highlight().highlighterType("fvh").field("field1", 20, 1).order("score").preTags("<xxx>").postTags("</xxx>"));
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field1", 0, 1, containsString("<xxx>t</xxx>"));
logger.info("--> done");
}
@Test
public void testMatchedFieldsFvhRequireFieldMatch() throws Exception {
checkMatchedFieldsCase(true);
}
@Test
public void testMatchedFieldsFvhNoRequireFieldMatch() throws Exception {
checkMatchedFieldsCase(false);
}
private void checkMatchedFieldsCase(boolean requireFieldMatch) throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("properties")
.startObject("foo")
.field("type", "multi_field")
.startObject("fields")
.startObject("foo")
.field("type", "string")
.field("termVector", "with_positions_offsets")
.field("store", "yes")
.field("analyzer", "english")
.endObject()
.startObject("plain")
.field("type", "string")
.field("termVector", "with_positions_offsets")
.field("analyzer", "standard")
.endObject()
.endObject()
.endObject()
.startObject("bar")
.field("type", "multi_field")
.startObject("fields")
.startObject("bar")
.field("type", "string")
.field("termVector", "with_positions_offsets")
.field("store", "yes")
.field("analyzer", "english")
.endObject()
.startObject("plain")
.field("type", "string")
.field("termVector", "with_positions_offsets")
.field("analyzer", "standard")
.endObject()
.endObject()
.endObject()
.endObject()));
ensureGreen();
index("test", "type1", "1",
"foo", "running with scissors");
index("test", "type1", "2",
"foo", "cat cat junk junk junk junk junk junk junk cats junk junk",
"bar", "cat cat junk junk junk junk junk junk junk cats junk junk");
index("test", "type1", "3",
"foo", "weird",
"bar", "result");
refresh();
Field fooField = new Field("foo").numOfFragments(1).order("score").fragmentSize(25)
.highlighterType("fvh").requireFieldMatch(requireFieldMatch);
Field barField = new Field("bar").numOfFragments(1).order("score").fragmentSize(25)
.highlighterType("fvh").requireFieldMatch(requireFieldMatch);
SearchRequestBuilder req = client().prepareSearch("test").addHighlightedField(fooField);
// First check highlighting without any matched fields set
SearchResponse resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
// And that matching a subfield doesn't automatically highlight it
resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("running with <em>scissors</em>"));
// Add the subfield to the list of matched fields but don't match it. Everything should still work
// like before we added it.
fooField.matchedFields("foo", "foo.plain");
resp = req.setQuery(queryStringQuery("running scissors").field("foo")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
// Now make half the matches come from the stored field and half from just a matched field.
resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
// Now remove the stored field from the matched field list. That should work too.
fooField.matchedFields("foo.plain");
resp = req.setQuery(queryStringQuery("foo.plain:running scissors").field("foo")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with scissors"));
// Now make sure boosted fields don't blow up when matched fields is both the subfield and stored field.
fooField.matchedFields("foo", "foo.plain");
resp = req.setQuery(queryStringQuery("foo.plain:running^5 scissors").field("foo")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
// Now just all matches are against the matched field. This still returns highlighting.
resp = req.setQuery(queryStringQuery("foo.plain:running foo.plain:scissors").field("foo")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
// And all matched field via the queryString's field parameter, just in case
resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
// Finding the same string two ways is ok too
resp = req.setQuery(queryStringQuery("run foo.plain:running^5 scissors").field("foo")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
// But we use the best found score when sorting fragments
resp = req.setQuery(queryStringQuery("cats foo.plain:cats^5").field("foo")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
// which can also be written by searching on the subfield
resp = req.setQuery(queryStringQuery("cats").field("foo").field("foo.plain^5")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
// Speaking of two fields, you can have two fields, only one of which has matchedFields enabled
QueryBuilder twoFieldsQuery = queryStringQuery("cats").field("foo").field("foo.plain^5")
.field("bar").field("bar.plain^5");
resp = req.setQuery(twoFieldsQuery).addHighlightedField(barField).get();
assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
assertHighlight(resp, 0, "bar", 0, equalTo("<em>cat</em> <em>cat</em> junk junk junk junk"));
// And you can enable matchedField highlighting on both
barField.matchedFields("bar", "bar.plain");
resp = req.get();
assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
assertHighlight(resp, 0, "bar", 0, equalTo("junk junk <em>cats</em> junk junk"));
// Setting a matchedField that isn't searched/doesn't exist is simply ignored.
barField.matchedFields("bar", "candy");
resp = req.get();
assertHighlight(resp, 0, "foo", 0, equalTo("junk junk <em>cats</em> junk junk"));
assertHighlight(resp, 0, "bar", 0, equalTo("<em>cat</em> <em>cat</em> junk junk junk junk"));
// If the stored field doesn't have a value it doesn't matter what you match, you get nothing.
barField.matchedFields("bar", "foo.plain");
resp = req.setQuery(queryStringQuery("running scissors").field("foo.plain").field("bar")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar")));
// If the stored field is found but the matched field isn't then you don't get a result either.
fooField.matchedFields("bar.plain");
resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("foo")));
// But if you add the stored field to the list of matched fields then you'll get a result again
fooField.matchedFields("foo", "bar.plain");
resp = req.setQuery(queryStringQuery("running scissors").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>running</em> with <em>scissors</em>"));
assertThat(resp.getHits().getAt(0).getHighlightFields(), not(hasKey("bar")));
// You _can_ highlight fields that aren't subfields of one another.
resp = req.setQuery(queryStringQuery("weird").field("foo").field("foo.plain").field("bar").field("bar.plain")).get();
assertHighlight(resp, 0, "foo", 0, equalTo("<em>weird</em>"));
assertHighlight(resp, 0, "bar", 0, equalTo("<em>resul</em>t"));
assertFailures(req.setQuery(queryStringQuery("result").field("foo").field("foo.plain").field("bar").field("bar.plain")),
RestStatus.INTERNAL_SERVER_ERROR, containsString("String index out of range"));
}
@Test
@Slow
public void testFastVectorHighlighterManyDocs() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
ensureGreen();
int COUNT = between(20, 100);
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT];
for (int i = 0; i < COUNT; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "test " + i);
}
logger.info("--> indexing docs");
indexRandom(true, indexRequestBuilders);
logger.info("--> searching explicitly on field1 and highlighting on it");
SearchResponse searchResponse = client().prepareSearch()
.setSize(COUNT)
.setQuery(termQuery("field1", "test"))
.addHighlightedField("field1", 100, 0)
.get();
for (int i = 0; i < COUNT; i++) {
SearchHit hit = searchResponse.getHits().getHits()[i];
// LUCENE 3.1 UPGRADE: Caused adding the space at the end...
assertHighlight(searchResponse, i, "field1", 0, 1, equalTo("<em>test</em> " + hit.id()));
}
logger.info("--> searching explicitly _all and highlighting on _all");
searchResponse = client().prepareSearch()
.setSize(COUNT)
.setQuery(termQuery("_all", "test"))
.addHighlightedField("_all", 100, 0)
.get();
for (int i = 0; i < COUNT; i++) {
SearchHit hit = searchResponse.getHits().getHits()[i];
assertHighlight(searchResponse, i, "_all", 0, 1, equalTo("<em>test</em> " + hit.id() + " "));
}
}
public XContentBuilder type1TermVectorMapping() throws IOException {
return XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("_all").field("store", "yes").field("termVector", "with_positions_offsets").endObject()
.startObject("properties")
.startObject("field1").field("type", "string").field("termVector", "with_positions_offsets").endObject()
.startObject("field2").field("type", "string").field("termVector", "with_positions_offsets").endObject()
.endObject()
.endObject().endObject();
}
@Test
public void testSameContent() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets"));
ensureYellow();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < 5; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource("title", "This is a test on the highlighting bug present in elasticsearch");
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "bug"))
.addHighlightedField("title", -1, 0)
.get();
for (int i = 0; i < 5; i++) {
assertHighlight(search, i, "title", 0, 1, equalTo("This is a test on the highlighting <em>bug</em> present in elasticsearch"));
}
}
@Test
public void testFastVectorHighlighterOffsetParameter() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets").get());
ensureYellow();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < 5; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource("title", "This is a test on the highlighting bug present in elasticsearch");
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "bug"))
.addHighlightedField("title", 30, 1, 10)
.get();
for (int i = 0; i < 5; i++) {
// LUCENE 3.1 UPGRADE: Caused adding the space at the end...
assertHighlight(search, i, "title", 0, 1, equalTo("highlighting <em>bug</em> present in elasticsearch"));
}
}
@Test
public void testEscapeHtml() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "title", "type=string,store=yes"));
ensureYellow();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < indexRequestBuilders.length; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "test"))
.setHighlighterEncoder("html")
.addHighlightedField("title", 50, 1, 10)
.get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(search, i, "title", 0, 1, equalTo("This is a html escaping highlighting <em>test</em> for *&? elasticsearch"));
}
}
@Test
public void testEscapeHtml_vector() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets"));
ensureYellow();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < 5; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "test"))
.setHighlighterEncoder("html")
.addHighlightedField("title", 30, 1, 10)
.get();
for (int i = 0; i < 5; i++) {
assertHighlight(search, i, "title", 0, 1, equalTo("highlighting <em>test</em> for *&? elasticsearch"));
}
}
@Test
public void testMultiMapperVectorWithStore() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("title").field("type", "multi_field").startObject("fields")
.startObject("title").field("type", "string").field("store", "yes").field("term_vector", "with_positions_offsets").field("analyzer", "classic").endObject()
.startObject("key").field("type", "string").field("store", "yes").field("term_vector", "with_positions_offsets").field("analyzer", "whitespace").endObject()
.endObject().endObject()
.endObject().endObject().endObject()));
ensureGreen();
client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
refresh();
// simple search on body with standard analyzer with a simple field query
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "this is a test"))
.setHighlighterEncoder("html")
.addHighlightedField("title", 50, 1)
.get();
assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
// search on title.key and highlight on title
search = client().prepareSearch()
.setQuery(matchQuery("title.key", "this is a test"))
.setHighlighterEncoder("html")
.addHighlightedField("title.key", 50, 1)
.get();
assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
}
@Test
public void testMultiMapperVectorFromSource() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("title").field("type", "multi_field").startObject("fields")
.startObject("title").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").field("analyzer", "classic").endObject()
.startObject("key").field("type", "string").field("store", "no").field("term_vector", "with_positions_offsets").field("analyzer", "whitespace").endObject()
.endObject().endObject()
.endObject().endObject().endObject()));
ensureGreen();
client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
refresh();
// simple search on body with standard analyzer with a simple field query
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "this is a test"))
.setHighlighterEncoder("html")
.addHighlightedField("title", 50, 1)
.get();
assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
// search on title.key and highlight on title.key
search = client().prepareSearch()
.setQuery(matchQuery("title.key", "this is a test"))
.setHighlighterEncoder("html")
.addHighlightedField("title.key", 50, 1)
.get();
assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
}
@Test
public void testMultiMapperNoVectorWithStore() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("title").field("type", "multi_field").startObject("fields")
.startObject("title").field("type", "string").field("store", "yes").field("term_vector", "no").field("analyzer", "classic").endObject()
.startObject("key").field("type", "string").field("store", "yes").field("term_vector", "no").field("analyzer", "whitespace").endObject()
.endObject().endObject()
.endObject().endObject().endObject()));
ensureGreen();
client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
refresh();
// simple search on body with standard analyzer with a simple field query
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "this is a test"))
.setHighlighterEncoder("html")
.addHighlightedField("title", 50, 1)
.get();
assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
// search on title.key and highlight on title
search = client().prepareSearch()
.setQuery(matchQuery("title.key", "this is a test"))
.setHighlighterEncoder("html")
.addHighlightedField("title.key", 50, 1)
.get();
assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
}
@Test
public void testMultiMapperNoVectorFromSource() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("title").field("type", "multi_field").startObject("fields")
.startObject("title").field("type", "string").field("store", "no").field("term_vector", "no").field("analyzer", "classic").endObject()
.startObject("key").field("type", "string").field("store", "no").field("term_vector", "no").field("analyzer", "whitespace").endObject()
.endObject().endObject()
.endObject().endObject().endObject()));
ensureGreen();
client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
refresh();
// simple search on body with standard analyzer with a simple field query
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "this is a test"))
.setHighlighterEncoder("html")
.addHighlightedField("title", 50, 1)
.get();
assertHighlight(search, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
// search on title.key and highlight on title.key
search = client().prepareSearch()
.setQuery(matchQuery("title.key", "this is a test"))
.setHighlighterEncoder("html")
.addHighlightedField("title.key", 50, 1)
.get();
assertHighlight(search, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
}
@Test
public void testFastVectorHighlighterShouldFailIfNoTermVectors() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "title", "type=string,store=yes,term_vector=no"));
ensureGreen();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < 5; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource("title", "This is a test for the enabling fast vector highlighter");
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchPhraseQuery("title", "this is a test"))
.addHighlightedField("title", 50, 1, 10)
.get();
assertNoFailures(search);
assertFailures(client().prepareSearch()
.setQuery(matchPhraseQuery("title", "this is a test"))
.addHighlightedField("title", 50, 1, 10)
.setHighlighterType("fast-vector-highlighter"),
RestStatus.BAD_REQUEST,
containsString("the field [title] should be indexed with term vector with position offsets to be used with fast vector highlighter"));
//should not fail if there is a wildcard
assertNoFailures(client().prepareSearch()
.setQuery(matchPhraseQuery("title", "this is a test"))
.addHighlightedField("tit*", 50, 1, 10)
.setHighlighterType("fast-vector-highlighter").get());
}
@Test
public void testDisableFastVectorHighlighter() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "title", "type=string,store=yes,term_vector=with_positions_offsets,analyzer=classic"));
ensureGreen();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < indexRequestBuilders.length; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource("title", "This is a test for the workaround for the fast vector highlighting SOLR-3724");
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchPhraseQuery("title", "test for the workaround"))
.addHighlightedField("title", 50, 1, 10)
.get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
// Because of SOLR-3724 nothing is highlighted when FVH is used
assertNotHighlighted(search, i, "title");
}
// Using plain highlighter instead of FVH
search = client().prepareSearch()
.setQuery(matchPhraseQuery("title", "test for the workaround"))
.addHighlightedField("title", 50, 1, 10)
.setHighlighterType("highlighter")
.get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(search, i, "title", 0, 1, equalTo("This is a <em>test</em> for the <em>workaround</em> for the fast vector highlighting SOLR-3724"));
}
// Using plain highlighter instead of FVH on the field level
search = client().prepareSearch()
.setQuery(matchPhraseQuery("title", "test for the workaround"))
.addHighlightedField(new HighlightBuilder.Field("title").highlighterType("highlighter"))
.setHighlighterType("highlighter")
.get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(search, i, "title", 0, 1, equalTo("This is a <em>test</em> for the <em>workaround</em> for the fast vector highlighting SOLR-3724"));
}
}
@Test
public void testFSHHighlightAllMvFragments() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "tags", "type=string,term_vector=with_positions_offsets"));
ensureGreen();
client().prepareIndex("test", "type1", "1")
.setSource("tags", new String[]{
"this is a really long tag i would like to highlight",
"here is another one that is very long and has the tag token near the end"}).get();
refresh();
SearchResponse response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("tags", "tag"))
.addHighlightedField("tags", -1, 0).get();
assertHighlight(response, 0, "tags", 0, equalTo("this is a really long <em>tag</em> i would like to highlight"));
assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long and has the <em>tag</em> token near the end"));
}
@Test
public void testBoostingQuery() {
createIndex("test");
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
.highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog"));
}
@Test
public void testBoostingQueryTermVector() throws IOException {
assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog")
.get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
.highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog"));
}
@Test
public void testCommonTermsQuery() {
createIndex("test");
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog")
.get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100))
.highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
}
@Test
public void testCommonTermsTermVector() throws IOException {
assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100))
.highlight(highlight().field("field2").order("score").preTags("<x>").postTags("</x>"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
}
@Test
public void testPhrasePrefix() throws IOException {
Builder builder = settingsBuilder()
.put(indexSettings())
.put("index.analysis.analyzer.synonym.tokenizer", "whitespace")
.putArray("index.analysis.analyzer.synonym.filter", "synonym", "lowercase")
.put("index.analysis.filter.synonym.type", "synonym")
.putArray("index.analysis.filter.synonym.synonyms", "quick => fast");
assertAcked(prepareCreate("test").setSettings(builder.build()).addMapping("type1", type1TermVectorMapping())
.addMapping("type2", "_all", "store=yes,termVector=with_positions_offsets",
"field4", "type=string,term_vector=with_positions_offsets,analyzer=synonym",
"field3", "type=string,analyzer=synonym"));
ensureGreen();
client().prepareIndex("test", "type1", "0")
.setSource("field0", "The quick brown fox jumps over the lazy dog", "field1", "The quick brown fox jumps over the lazy dog").get();
client().prepareIndex("test", "type1", "1")
.setSource("field1", "The quick browse button is a fancy thing, right bro?").get();
refresh();
logger.info("--> highlighting and searching on field0");
SearchSourceBuilder source = searchSource()
.query(matchPhrasePrefixQuery("field0", "quick bro"))
.highlight(highlight().field("field0").order("score").preTags("<x>").postTags("</x>"));
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field0", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
logger.info("--> highlighting and searching on field1");
source = searchSource()
.query(matchPhrasePrefixQuery("field1", "quick bro"))
.highlight(highlight().field("field1").order("score").preTags("<x>").postTags("</x>"));
searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field1", 0, 1, anyOf(equalTo("The <x>quick browse</x> button is a fancy thing, right bro?"), equalTo("The <x>quick brown</x> fox jumps over the lazy dog")));
assertHighlight(searchResponse, 1, "field1", 0, 1, anyOf(equalTo("The <x>quick browse</x> button is a fancy thing, right bro?"), equalTo("The <x>quick brown</x> fox jumps over the lazy dog")));
// with synonyms
client().prepareIndex("test", "type2", "0")
.setSource("field4", "The quick brown fox jumps over the lazy dog", "field3", "The quick brown fox jumps over the lazy dog").get();
client().prepareIndex("test", "type2", "1")
.setSource("field4", "The quick browse button is a fancy thing, right bro?").get();
client().prepareIndex("test", "type2", "2")
.setSource("field4", "a quick fast blue car").get();
refresh();
source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field3", "fast bro"))
.highlight(highlight().field("field3").order("score").preTags("<x>").postTags("</x>"));
searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field3", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog"));
logger.info("--> highlighting and searching on field4");
source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "the fast bro"))
.highlight(highlight().field("field4").order("score").preTags("<x>").postTags("</x>"));
searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field4", 0, 1, anyOf(equalTo("<x>The quick browse</x> button is a fancy thing, right bro?"), equalTo("<x>The quick brown</x> fox jumps over the lazy dog")));
assertHighlight(searchResponse, 1, "field4", 0, 1, anyOf(equalTo("<x>The quick browse</x> button is a fancy thing, right bro?"), equalTo("<x>The quick brown</x> fox jumps over the lazy dog")));
logger.info("--> highlighting and searching on field4");
source = searchSource().postFilter(typeQuery("type2")).query(matchPhrasePrefixQuery("field4", "a fast quick blue ca"))
.highlight(highlight().field("field4").order("score").preTags("<x>").postTags("</x>"));
searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field4", 0, 1, equalTo("<x>a quick fast blue car</x>"));
}
@Test
public void testPlainHighlightDifferentFragmenter() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "tags", "type=string"));
ensureGreen();
client().prepareIndex("test", "type1", "1")
.setSource(jsonBuilder().startObject().field("tags",
"this is a really long tag i would like to highlight",
"here is another one that is very long tag and has the tag token near the end").endObject()).get();
refresh();
SearchResponse response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
.addHighlightedField(new HighlightBuilder.Field("tags")
.fragmentSize(-1).numOfFragments(2).fragmenter("simple")).get();
assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
.addHighlightedField(new HighlightBuilder.Field("tags")
.fragmentSize(-1).numOfFragments(2).fragmenter("span")).get();
assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
assertFailures(client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE))
.addHighlightedField(new HighlightBuilder.Field("tags")
.fragmentSize(-1).numOfFragments(2).fragmenter("invalid")),
RestStatus.BAD_REQUEST,
containsString("unknown fragmenter option [invalid] for the field [tags]"));
}
@Test
public void testPlainHighlighterMultipleFields() {
createIndex("test");
ensureGreen();
index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox");
refresh();
SearchResponse response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("field1", "fox"))
.addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
.addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
.get();
assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>"));
assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>"));
}
@Test
public void testFastVectorHighlighterMultipleFields() {
assertAcked(prepareCreate("test")
.addMapping("type1", "field1", "type=string,term_vector=with_positions_offsets", "field2", "type=string,term_vector=with_positions_offsets"));
ensureGreen();
index("test", "type1", "1", "field1", "The <b>quick<b> brown fox", "field2", "The <b>slow<b> brown fox");
refresh();
SearchResponse response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("field1", "fox"))
.addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
.addHighlightedField(new HighlightBuilder.Field("field2").preTags("<2>").postTags("</2>").requireFieldMatch(false))
.get();
assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>"));
assertHighlight(response, 0, "field2", 0, 1, equalTo("The <b>slow<b> brown <2>fox</2>"));
}
@Test
public void testMissingStoredField() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "highlight_field", "type=string,store=yes"));
ensureGreen();
client().prepareIndex("test", "type1", "1")
.setSource(jsonBuilder().startObject()
.field("field", "highlight")
.endObject()).get();
refresh();
// This query used to fail when the field to highlight was absent
SearchResponse response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("field", "highlight").type(MatchQueryBuilder.Type.BOOLEAN))
.addHighlightedField(new HighlightBuilder.Field("highlight_field")
.fragmentSize(-1).numOfFragments(1).fragmenter("simple")).get();
assertThat(response.getHits().hits()[0].highlightFields().isEmpty(), equalTo(true));
}
@Test
// https://github.com/elasticsearch/elasticsearch/issues/3211
public void testNumericHighlighting() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("test", "text", "type=string,index=analyzed",
"byte", "type=byte", "short", "type=short", "int", "type=integer", "long", "type=long",
"float", "type=float", "double", "type=double"));
ensureGreen();
client().prepareIndex("test", "test", "1").setSource("text", "elasticsearch test",
"byte", 25, "short", 42, "int", 100, "long", -1, "float", 3.2f, "double", 42.42).get();
refresh();
SearchResponse response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQueryBuilder.Type.BOOLEAN))
.addHighlightedField("text")
.addHighlightedField("byte")
.addHighlightedField("short")
.addHighlightedField("int")
.addHighlightedField("long")
.addHighlightedField("float")
.addHighlightedField("double")
.get();
// Highlighting of numeric fields is not supported, but it should not raise errors
// (this behavior is consistent with version 0.20)
assertHitCount(response, 1l);
}
@Test
// https://github.com/elasticsearch/elasticsearch/issues/3200
public void testResetTwice() throws Exception {
assertAcked(prepareCreate("test")
.setSettings(settingsBuilder()
.put(indexSettings())
.put("analysis.analyzer.my_analyzer.type", "pattern")
.put("analysis.analyzer.my_analyzer.pattern", "\\s+")
.build())
.addMapping("type", "text", "type=string,analyzer=my_analyzer"));
ensureGreen();
client().prepareIndex("test", "type", "1")
.setSource("text", "elasticsearch test").get();
refresh();
SearchResponse response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("text", "test").type(MatchQueryBuilder.Type.BOOLEAN))
.addHighlightedField("text").execute().actionGet();
// PatternAnalyzer will throw an exception if it is resetted twice
assertHitCount(response, 1l);
}
@Test
public void testHighlightUsesHighlightQuery() throws IOException {
assertAcked(prepareCreate("test")
.addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
ensureGreen();
index("test", "type1", "1", "text", "Testing the highlight query feature");
refresh();
HighlightBuilder.Field field = new HighlightBuilder.Field("text");
SearchRequestBuilder search = client().prepareSearch("test").setQuery(QueryBuilders.matchQuery("text", "testing"))
.addHighlightedField(field);
Matcher<String> searchQueryMatcher = equalTo("<em>Testing</em> the highlight query feature");
field.highlighterType("plain");
SearchResponse response = search.get();
assertHighlight(response, 0, "text", 0, searchQueryMatcher);
field.highlighterType("fvh");
response = search.get();
assertHighlight(response, 0, "text", 0, searchQueryMatcher);
field.highlighterType("postings");
response = search.get();
assertHighlight(response, 0, "text", 0, searchQueryMatcher);
Matcher<String> hlQueryMatcher = equalTo("Testing the highlight <em>query</em> feature");
field.highlightQuery(matchQuery("text", "query"));
field.highlighterType("fvh");
response = search.get();
assertHighlight(response, 0, "text", 0, hlQueryMatcher);
field.highlighterType("plain");
response = search.get();
assertHighlight(response, 0, "text", 0, hlQueryMatcher);
field.highlighterType("postings");
response = search.get();
assertHighlight(response, 0, "text", 0, hlQueryMatcher);
// Make sure the the highlightQuery is taken into account when it is set on the highlight context instead of the field
search.setHighlighterQuery(matchQuery("text", "query"));
field.highlighterType("fvh").highlightQuery(null);
response = search.get();
assertHighlight(response, 0, "text", 0, hlQueryMatcher);
field.highlighterType("plain");
response = search.get();
assertHighlight(response, 0, "text", 0, hlQueryMatcher);
field.highlighterType("postings");
response = search.get();
assertHighlight(response, 0, "text", 0, hlQueryMatcher);
}
private static String randomStoreField() {
if (randomBoolean()) {
return "store=yes,";
}
return "";
}
@Test
public void testHighlightNoMatchSize() throws IOException {
assertAcked(prepareCreate("test")
.addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
ensureGreen();
String text = "I am pretty long so some of me should get cut off. Second sentence";
index("test", "type1", "1", "text", text);
refresh();
// When you don't set noMatchSize you don't get any results if there isn't anything to highlight.
HighlightBuilder.Field field = new HighlightBuilder.Field("text")
.fragmentSize(21)
.numOfFragments(1)
.highlighterType("plain");
SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("fvh");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("postings");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
// When noMatchSize is set to 0 you also shouldn't get any
field.highlighterType("plain").noMatchSize(0);
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("fvh");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("postings");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
// When noMatchSize is between 0 and the size of the string
field.highlighterType("plain").noMatchSize(21);
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
// The FVH also works but the fragment is longer than the plain highlighter because of boundary_max_scan
field.highlighterType("fvh");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
// Postings hl also works but the fragment is the whole first sentence (size ignored)
field.highlighterType("postings");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
// We can also ask for a fragment longer than the input string and get the whole string
field.highlighterType("plain").noMatchSize(text.length() * 2);
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo(text));
field.highlighterType("fvh");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo(text));
//no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0)
field.highlighterType("postings");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
// We can also ask for a fragment exactly the size of the input field and get the whole field
field.highlighterType("plain").noMatchSize(text.length());
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo(text));
field.highlighterType("fvh");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo(text));
//no difference using postings hl as the noMatchSize is ignored (just needs to be greater than 0)
field.highlighterType("postings");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
// You can set noMatchSize globally in the highlighter as well
field.highlighterType("plain").noMatchSize(null);
response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
field.highlighterType("fvh");
response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
field.highlighterType("postings");
response = client().prepareSearch("test").setHighlighterNoMatchSize(21).addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
// We don't break if noMatchSize is less than zero though
field.highlighterType("plain").noMatchSize(randomIntBetween(Integer.MIN_VALUE, -1));
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("fvh");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("postings");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
}
@Test
public void testHighlightNoMatchSizeWithMultivaluedFields() throws IOException {
assertAcked(prepareCreate("test")
.addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
ensureGreen();
String text1 = "I am pretty long so some of me should get cut off. We'll see how that goes.";
String text2 = "I am short";
index("test", "type1", "1", "text", new String[] {text1, text2});
refresh();
// The no match fragment should come from the first value of a multi-valued field
HighlightBuilder.Field field = new HighlightBuilder.Field("text")
.fragmentSize(21)
.numOfFragments(1)
.highlighterType("plain")
.noMatchSize(21);
SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so"));
field.highlighterType("fvh");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some"));
// Postings hl also works but the fragment is the whole first sentence (size ignored)
field.highlighterType("postings");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("I am pretty long so some of me should get cut off."));
// And noMatchSize returns nothing when the first entry is empty string!
index("test", "type1", "2", "text", new String[] {"", text2});
refresh();
IdsQueryBuilder idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("2");
field.highlighterType("plain");
response = client().prepareSearch("test")
.setQuery(idsQueryBuilder)
.addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("fvh");
response = client().prepareSearch("test")
.setQuery(idsQueryBuilder)
.addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("postings");
response = client().prepareSearch("test")
.setQuery(idsQueryBuilder)
.addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
// But if the field was actually empty then you should get no highlighting field
index("test", "type1", "3", "text", new String[] {});
refresh();
idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("3");
field.highlighterType("plain");
response = client().prepareSearch("test")
.setQuery(idsQueryBuilder)
.addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("fvh");
response = client().prepareSearch("test")
.setQuery(idsQueryBuilder)
.addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("postings");
response = client().prepareSearch("test")
.setQuery(idsQueryBuilder)
.addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
// Same for if the field doesn't even exist on the document
index("test", "type1", "4");
refresh();
idsQueryBuilder = QueryBuilders.idsQuery("type1").addIds("4");
field.highlighterType("plain");
response = client().prepareSearch("test")
.setQuery(idsQueryBuilder)
.addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("fvh");
response = client().prepareSearch("test")
.setQuery(idsQueryBuilder)
.addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("fvh");
response = client().prepareSearch("test")
.setQuery(idsQueryBuilder)
.addHighlightedField(field).get();
assertNotHighlighted(response, 0, "postings");
// Again same if the field isn't mapped
field = new HighlightBuilder.Field("unmapped")
.highlighterType("plain")
.noMatchSize(21);
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("fvh");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
field.highlighterType("postings");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertNotHighlighted(response, 0, "text");
}
@Test
public void testHighlightNoMatchSizeNumberOfFragments() throws IOException {
assertAcked(prepareCreate("test")
.addMapping("type1", "text", "type=string," + randomStoreField() + "term_vector=with_positions_offsets,index_options=offsets"));
ensureGreen();
String text1 = "This is the first sentence. This is the second sentence." + HighlightUtils.PARAGRAPH_SEPARATOR;
String text2 = "This is the third sentence. This is the fourth sentence.";
String text3 = "This is the fifth sentence";
index("test", "type1", "1", "text", new String[] {text1, text2, text3});
refresh();
// The no match fragment should come from the first value of a multi-valued field
HighlightBuilder.Field field = new HighlightBuilder.Field("text")
.fragmentSize(1)
.numOfFragments(0)
.highlighterType("plain")
.noMatchSize(20);
SearchResponse response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first"));
field.highlighterType("fvh");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence"));
// Postings hl also works but the fragment is the whole first sentence (size ignored)
field.highlighterType("postings");
response = client().prepareSearch("test").addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 1, equalTo("This is the first sentence."));
//if there's a match we only return the values with matches (whole value as number_of_fragments == 0)
MatchQueryBuilder queryBuilder = QueryBuilders.matchQuery("text", "third fifth");
field.highlighterType("plain");
response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
field.highlighterType("fvh");
response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
field.highlighterType("postings");
response = client().prepareSearch("test").setQuery(queryBuilder).addHighlightedField(field).get();
assertHighlight(response, 0, "text", 0, 2, equalTo("This is the <em>third</em> sentence. This is the fourth sentence."));
assertHighlight(response, 0, "text", 1, 2, equalTo("This is the <em>fifth</em> sentence"));
}
@Test
public void testPostingsHighlighter() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy quick dog").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(termQuery("field1", "test"))
.highlight(highlight().field("field1").preTags("<xxx>").postTags("</xxx>"));
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
logger.info("--> searching on field1, highlighting on field1");
source = searchSource()
.query(termQuery("field1", "test"))
.highlight(highlight().field("field1").preTags("<xxx>").postTags("</xxx>"));
searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("this is a <xxx>test</xxx>"));
logger.info("--> searching on field2, highlighting on field2");
source = searchSource()
.query(termQuery("field2", "quick"))
.highlight(highlight().field("field2").order("score").preTags("<xxx>").postTags("</xxx>"));
searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> brown fox jumps over the lazy <xxx>quick</xxx> dog"));
logger.info("--> searching on field2, highlighting on field2");
source = searchSource()
.query(matchPhraseQuery("field2", "quick brown"))
.highlight(highlight().field("field2").preTags("<xxx>").postTags("</xxx>"));
searchResponse = client().search(searchRequest("test").source(source)).actionGet();
//phrase query results in highlighting all different terms regardless of their positions
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy <xxx>quick</xxx> dog"));
//lets fall back to the standard highlighter then, what people would do to highlight query matches
logger.info("--> searching on field2, highlighting on field2, falling back to the plain highlighter");
source = searchSource()
.query(matchPhraseQuery("_all", "quick brown"))
.highlight(highlight().field("field2").preTags("<xxx>").postTags("</xxx>").highlighterType("highlighter").requireFieldMatch(false));
searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy quick dog"));
}
@Test
public void testPostingsHighlighterMultipleFields() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()).get());
ensureGreen();
index("test", "type1", "1", "field1", "The <b>quick<b> brown fox. Second sentence.", "field2", "The <b>slow<b> brown fox. Second sentence.");
refresh();
SearchResponse response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("field1", "fox"))
.addHighlightedField(new HighlightBuilder.Field("field1").preTags("<1>").postTags("</1>").requireFieldMatch(true))
.get();
assertHighlight(response, 0, "field1", 0, 1, equalTo("The <b>quick<b> brown <1>fox</1>."));
}
@Test
public void testPostingsHighlighterNumberOfFragments() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1", "1")
.setSource("field1", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.",
"field2", "The quick brown fox jumps over the lazy dog. The lazy red fox jumps over the quick dog. The quick brown dog jumps over the lazy fox.").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(termQuery("field1", "fox"))
.highlight(highlight()
.field(new HighlightBuilder.Field("field1").numOfFragments(5).preTags("<field1>").postTags("</field1>")));
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field1", 0, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog."));
assertHighlight(searchResponse, 0, "field1", 1, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
assertHighlight(searchResponse, 0, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
client().prepareIndex("test", "type1", "2")
.setSource("field1", new String[]{"The quick brown fox jumps over the lazy dog. Second sentence not finished", "The lazy red fox jumps over the quick dog.", "The quick brown dog jumps over the lazy fox."}).get();
refresh();
source = searchSource()
.query(termQuery("field1", "fox"))
.highlight(highlight()
.field(new HighlightBuilder.Field("field1").numOfFragments(0).preTags("<field1>").postTags("</field1>")));
searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHitCount(searchResponse, 2l);
for (SearchHit searchHit : searchResponse.getHits()) {
if ("1".equals(searchHit.id())) {
assertHighlight(searchHit, "field1", 0, 1, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog. The lazy red <field1>fox</field1> jumps over the quick dog. The quick brown dog jumps over the lazy <field1>fox</field1>."));
} else if ("2".equals(searchHit.id())) {
assertHighlight(searchHit, "field1", 0, 3, equalTo("The quick brown <field1>fox</field1> jumps over the lazy dog. Second sentence not finished"));
assertHighlight(searchHit, "field1", 1, 3, equalTo("The lazy red <field1>fox</field1> jumps over the quick dog."));
assertHighlight(searchHit, "field1", 2, 3, equalTo("The quick brown dog jumps over the lazy <field1>fox</field1>."));
} else {
fail("Only hits with id 1 and 2 are returned");
}
}
}
@Test
public void testMultiMatchQueryHighlight() throws IOException {
String[] highlighterTypes = new String[] {"fvh", "plain", "postings"};
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("_all").field("store", "yes").field("index_options", "offsets").endObject()
.startObject("properties")
.startObject("field1").field("type", "string").field("index_options", "offsets").field("term_vector", "with_positions_offsets").endObject()
.startObject("field2").field("type", "string").field("index_options", "offsets").field("term_vector", "with_positions_offsets").endObject()
.endObject()
.endObject().endObject();
assertAcked(prepareCreate("test").addMapping("type1", mapping));
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field1", "The quick brown fox jumps over",
"field2", "The quick brown fox jumps over").get();
refresh();
final int iters = scaledRandomIntBetween(20, 30);
for (int i = 0; i < iters; i++) {
String highlighterType = rarely() ? null : RandomPicks.randomFrom(getRandom(), highlighterTypes);
MultiMatchQueryBuilder.Type[] supportedQueryTypes;
if ("postings".equals(highlighterType)) {
//phrase_prefix is not supported by postings highlighter, as it rewrites against an empty reader, the prefix will never match any term
supportedQueryTypes = new MultiMatchQueryBuilder.Type[]{MultiMatchQueryBuilder.Type.BEST_FIELDS, MultiMatchQueryBuilder.Type.CROSS_FIELDS, MultiMatchQueryBuilder.Type.MOST_FIELDS, MultiMatchQueryBuilder.Type.PHRASE};
} else {
supportedQueryTypes = MultiMatchQueryBuilder.Type.values();
}
MultiMatchQueryBuilder.Type matchQueryType = rarely() ? null : RandomPicks.randomFrom(getRandom(), supportedQueryTypes);
final MultiMatchQueryBuilder multiMatchQueryBuilder = multiMatchQuery("the quick brown fox", "field1", "field2").type(matchQueryType);
SearchSourceBuilder source = searchSource()
.query(multiMatchQueryBuilder)
.highlight(highlight().highlightQuery(randomBoolean() ? multiMatchQueryBuilder : null).highlighterType(highlighterType)
.field(new Field("field1").requireFieldMatch(true).preTags("<field1>").postTags("</field1>")));
logger.info("Running multi-match type: [" + matchQueryType + "] highlight with type: [" + highlighterType + "]");
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHitCount(searchResponse, 1l);
assertHighlight(searchResponse, 0, "field1", 0, anyOf(equalTo("<field1>The quick brown fox</field1> jumps over"),
equalTo("<field1>The</field1> <field1>quick</field1> <field1>brown</field1> <field1>fox</field1> jumps over")));
}
}
@Test
public void testPostingsHighlighterOrderByScore() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1")
.setSource("field1", new String[]{"This sentence contains one match, not that short. This sentence contains two sentence matches. This one contains no matches.",
"This is the second value's first sentence. This one contains no matches. This sentence contains three sentence occurrences (sentence).",
"One sentence match here and scored lower since the text is quite long, not that appealing. This one contains no matches."}).get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(termQuery("field1", "sentence"))
.highlight(highlight().field("field1").order("score"));
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
Map<String,HighlightField> highlightFieldMap = searchResponse.getHits().getAt(0).highlightFields();
assertThat(highlightFieldMap.size(), equalTo(1));
HighlightField field1 = highlightFieldMap.get("field1");
assertThat(field1.fragments().length, equalTo(5));
assertThat(field1.fragments()[0].string(), equalTo("This <em>sentence</em> contains three <em>sentence</em> occurrences (<em>sentence</em>)."));
assertThat(field1.fragments()[1].string(), equalTo("This <em>sentence</em> contains two <em>sentence</em> matches."));
assertThat(field1.fragments()[2].string(), equalTo("This is the second value's first <em>sentence</em>."));
assertThat(field1.fragments()[3].string(), equalTo("This <em>sentence</em> contains one match, not that short."));
assertThat(field1.fragments()[4].string(), equalTo("One <em>sentence</em> match here and scored lower since the text is quite long, not that appealing."));
}
@Test
public void testPostingsHighlighterEscapeHtml() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", "title", "type=string," + randomStoreField() + "index_options=offsets"));
ensureYellow();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < 5; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource("title", "This is a html escaping highlighting test for *&? elasticsearch");
}
indexRandom(true, indexRequestBuilders);
SearchResponse searchResponse = client().prepareSearch()
.setQuery(matchQuery("title", "test"))
.setHighlighterEncoder("html")
.addHighlightedField("title").get();
for (int i = 0; i < indexRequestBuilders.length; i++) {
assertHighlight(searchResponse, i, "title", 0, 1, equalTo("This is a html escaping highlighting <em>test</em> for *&?"));
}
}
@Test
public void testPostingsHighlighterMultiMapperWithStore() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1")
.startObject("properties")
.startObject("title").field("type", "multi_field").startObject("fields")
.startObject("title").field("type", "string").field("store", "yes").field("index_options", "offsets").field("analyzer", "classic").endObject()
.startObject("key").field("type", "string").field("store", "yes").field("index_options", "offsets").field("analyzer", "whitespace").endObject()
.endObject().endObject()
.endObject().endObject().endObject()));
ensureGreen();
client().prepareIndex("test", "type1", "1").setSource("title", "this is a test . Second sentence.").get();
refresh();
// simple search on body with standard analyzer with a simple field query
SearchResponse searchResponse = client().prepareSearch()
//lets make sure we analyze the query and we highlight the resulting terms
.setQuery(matchQuery("title", "This is a Test"))
.addHighlightedField("title").get();
assertHitCount(searchResponse, 1l);
SearchHit hit = searchResponse.getHits().getAt(0);
//stopwords are not highlighted since not indexed
assertHighlight(hit, "title", 0, 1, equalTo("this is a <em>test</em> ."));
// search on title.key and highlight on title
searchResponse = client().prepareSearch()
.setQuery(matchQuery("title.key", "this is a test"))
.addHighlightedField("title.key").get();
assertHitCount(searchResponse, 1l);
//stopwords are now highlighted since we used only whitespace analyzer here
assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em> ."));
}
@Test
public void testPostingsHighlighterMultiMapperFromSource() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("title").field("type", "multi_field").startObject("fields")
.startObject("title").field("type", "string").field("store", "no").field("index_options", "offsets").field("analyzer", "classic").endObject()
.startObject("key").field("type", "string").field("store", "no").field("index_options", "offsets").field("analyzer", "whitespace").endObject()
.endObject().endObject()
.endObject().endObject().endObject()));
ensureGreen();
client().prepareIndex("test", "type1", "1").setSource("title", "this is a test").get();
refresh();
// simple search on body with standard analyzer with a simple field query
SearchResponse searchResponse = client().prepareSearch()
.setQuery(matchQuery("title", "this is a test"))
.addHighlightedField("title")
.get();
assertHighlight(searchResponse, 0, "title", 0, 1, equalTo("this is a <em>test</em>"));
// search on title.key and highlight on title.key
searchResponse = client().prepareSearch()
.setQuery(matchQuery("title.key", "this is a test"))
.addHighlightedField("title.key").get();
assertHighlight(searchResponse, 0, "title.key", 0, 1, equalTo("<em>this</em> <em>is</em> <em>a</em> <em>test</em>"));
}
@Test
public void testPostingsHighlighterShouldFailIfNoOffsets() throws Exception {
assertAcked(prepareCreate("test")
.addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("title").field("type", "string").field("store", "yes").field("index_options", "docs").endObject()
.endObject().endObject().endObject()));
ensureGreen();
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[5];
for (int i = 0; i < indexRequestBuilders.length; i++) {
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i))
.setSource("title", "This is a test for the postings highlighter");
}
indexRandom(true, indexRequestBuilders);
SearchResponse search = client().prepareSearch()
.setQuery(matchQuery("title", "this is a test"))
.addHighlightedField("title")
.get();
assertNoFailures(search);
assertFailures(client().prepareSearch()
.setQuery(matchQuery("title", "this is a test"))
.addHighlightedField("title")
.setHighlighterType("postings-highlighter"),
RestStatus.BAD_REQUEST,
containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
assertFailures(client().prepareSearch()
.setQuery(matchQuery("title", "this is a test"))
.addHighlightedField("title")
.setHighlighterType("postings"),
RestStatus.BAD_REQUEST,
containsString("the field [title] should be indexed with positions and offsets in the postings list to be used with postings highlighter"));
//should not fail if there is a wildcard
assertNoFailures(client().prepareSearch()
.setQuery(matchQuery("title", "this is a test"))
.addHighlightedField("tit*")
.setHighlighterType("postings").get());
}
@Test
public void testPostingsHighlighterBoostingQuery() throws IOException {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.")
.get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource()
.query(boostingQuery().positive(termQuery("field2", "brown")).negative(termQuery("field2", "foobar")).negativeBoost(0.5f))
.highlight(highlight().field("field2").preTags("<x>").postTags("</x>"));
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick <x>brown</x> fox jumps over the lazy dog!"));
}
@Test
public void testPostingsHighlighterCommonTermsQuery() throws IOException {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource().query(commonTermsQuery("field2", "quick brown").cutoffFrequency(100))
.highlight(highlight().field("field2").preTags("<x>").postTags("</x>"));
SearchResponse searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHitCount(searchResponse, 1l);
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <x>quick</x> <x>brown</x> fox jumps over the lazy dog!"));
}
private static XContentBuilder type1PostingsffsetsMapping() throws IOException {
return XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("properties")
.startObject("field1").field("type", "string").field("index_options", "offsets").endObject()
.startObject("field2").field("type", "string").field("index_options", "offsets").endObject()
.endObject()
.endObject().endObject();
}
@Test
public void testPostingsHighlighterPrefixQuery() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
refresh();
logger.info("--> highlighting and searching on field2");
SearchSourceBuilder source = searchSource().query(prefixQuery("field2", "qui"))
.highlight(highlight().field("field2"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
}
@Test
public void testPostingsHighlighterFuzzyQuery() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
refresh();
logger.info("--> highlighting and searching on field2");
SearchSourceBuilder source = searchSource().query(fuzzyQuery("field2", "quck"))
.highlight(highlight().field("field2"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
}
@Test
public void testPostingsHighlighterRegexpQuery() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
refresh();
logger.info("--> highlighting and searching on field2");
SearchSourceBuilder source = searchSource().query(regexpQuery("field2", "qu[a-l]+k"))
.highlight(highlight().field("field2"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
}
@Test
public void testPostingsHighlighterWildcardQuery() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
refresh();
logger.info("--> highlighting and searching on field2");
SearchSourceBuilder source = searchSource().query(wildcardQuery("field2", "qui*"))
.highlight(highlight().field("field2"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
source = searchSource().query(wildcardQuery("field2", "qu*k"))
.highlight(highlight().field("field2"));
searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHitCount(searchResponse, 1l);
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
}
@Test
public void testPostingsHighlighterTermRangeQuery() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "aaab").get();
refresh();
logger.info("--> highlighting and searching on field2");
SearchSourceBuilder source = searchSource().query(rangeQuery("field2").gte("aaaa").lt("zzzz"))
.highlight(highlight().field("field2"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("<em>aaab</em>"));
}
@Test
public void testPostingsHighlighterQueryString() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.").get();
refresh();
logger.info("--> highlighting and searching on field2");
SearchSourceBuilder source = searchSource().query(queryStringQuery("qui*").defaultField("field2"))
.highlight(highlight().field("field2"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <em>quick</em> brown fox jumps over the lazy dog!"));
}
@Test
public void testPostingsHighlighterRegexpQueryWithinConstantScoreQuery() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource().query(constantScoreQuery(regexpQuery("field1", "pho[a-z]+")))
.highlight(highlight().field("field1"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
}
@Test
public void testPostingsHighlighterMultiTermQueryMultipleLevels() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource().query(boolQuery()
.should(constantScoreQuery(QueryBuilders.missingQuery("field1")))
.should(matchQuery("field1", "test"))
.should(filteredQuery(queryStringQuery("field1:photo*"), null)))
.highlight(highlight().field("field1"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
}
@Test
public void testPostingsHighlighterPrefixQueryWithinBooleanQuery() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource().query(boolQuery().must(prefixQuery("field1", "photo")).should(matchQuery("field1", "test").minimumShouldMatch("0")))
.highlight(highlight().field("field1"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
}
@Test
public void testPostingsHighlighterQueryStringWithinFilteredQuery() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "The photography word will get highlighted").get();
refresh();
logger.info("--> highlighting and searching on field1");
SearchSourceBuilder source = searchSource().query(filteredQuery(queryStringQuery("field1:photo*"), missingQuery("field_null")))
.highlight(highlight().field("field1"));
SearchResponse searchResponse = client().prepareSearch("test").setSource(source.buildAsBytes()).get();
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
}
@Test
@Slow
public void testPostingsHighlighterManyDocs() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
ensureGreen();
int COUNT = between(20, 100);
Map<String, String> prefixes = new HashMap<>(COUNT);
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT];
for (int i = 0; i < COUNT; i++) {
//generating text with word to highlight in a different position
//(https://github.com/elasticsearch/elasticsearch/issues/4103)
String prefix = randomAsciiOfLengthBetween(5, 30);
prefixes.put(String.valueOf(i), prefix);
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "Sentence " + prefix
+ " test. Sentence two.");
}
logger.info("--> indexing docs");
indexRandom(true, indexRequestBuilders);
logger.info("--> searching explicitly on field1 and highlighting on it");
SearchRequestBuilder searchRequestBuilder = client().prepareSearch()
.setSize(COUNT)
.setQuery(termQuery("field1", "test"))
.addHighlightedField("field1");
SearchResponse searchResponse =
searchRequestBuilder.get();
assertHitCount(searchResponse, (long)COUNT);
assertThat(searchResponse.getHits().hits().length, equalTo(COUNT));
for (SearchHit hit : searchResponse.getHits()) {
String prefix = prefixes.get(hit.id());
assertHighlight(hit, "field1", 0, 1, equalTo("Sentence " + prefix + " <em>test</em>."));
}
}
@Test
public void testFastVectorHighlighterPhraseBoost() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping()));
phraseBoostTestCase("fvh");
}
@Test
public void testPostingsHighlighterPhraseBoost() throws Exception {
assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping()));
phraseBoostTestCase("postings");
}
/**
* Test phrase boosting over normal term matches. Note that this will never pass with the plain highlighter
* because it doesn't support the concept of terms having a different weight based on position.
* @param highlighterType highlighter to test
*/
private void phraseBoostTestCase(String highlighterType) {
ensureGreen();
StringBuilder text = new StringBuilder();
text.append("words words junk junk junk junk junk junk junk junk highlight junk junk junk junk together junk\n");
for (int i = 0; i<10; i++) {
text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n");
}
text.append("highlight words together\n");
for (int i = 0; i<10; i++) {
text.append("junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk junk\n");
}
index("test", "type1", "1", "field1", text.toString());
refresh();
// Match queries
phraseBoostTestCaseForClauses(highlighterType, 100f,
matchQuery("field1", "highlight words together"),
matchPhraseQuery("field1", "highlight words together"));
// Query string with a single field
phraseBoostTestCaseForClauses(highlighterType, 100f,
queryStringQuery("highlight words together").field("field1"),
queryStringQuery("\"highlight words together\"").field("field1").autoGeneratePhraseQueries(true));
// Query string with a single field without dismax
phraseBoostTestCaseForClauses(highlighterType, 100f,
queryStringQuery("highlight words together").field("field1").useDisMax(false),
queryStringQuery("\"highlight words together\"").field("field1").useDisMax(false).autoGeneratePhraseQueries(true));
// Query string with more than one field
phraseBoostTestCaseForClauses(highlighterType, 100f,
queryStringQuery("highlight words together").field("field1").field("field2"),
queryStringQuery("\"highlight words together\"").field("field1").field("field2").autoGeneratePhraseQueries(true));
// Query string boosting the field
phraseBoostTestCaseForClauses(highlighterType, 1f,
queryStringQuery("highlight words together").field("field1"),
queryStringQuery("\"highlight words together\"").field("field1^100").autoGeneratePhraseQueries(true));
}
private <P extends QueryBuilder & BoostableQueryBuilder<?>> void
phraseBoostTestCaseForClauses(String highlighterType, float boost, QueryBuilder terms, P phrase) {
Matcher<String> highlightedMatcher = Matchers.either(containsString("<em>highlight words together</em>")).or(
containsString("<em>highlight</em> <em>words</em> <em>together</em>"));
SearchRequestBuilder search = client().prepareSearch("test").setHighlighterRequireFieldMatch(true)
.setHighlighterOrder("score").setHighlighterType(highlighterType)
.addHighlightedField("field1", 100, 1);
// Try with a bool query
phrase.boost(boost);
SearchResponse response = search.setQuery(boolQuery().must(terms).should(phrase)).get();
assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
phrase.boost(1);
// Try with a boosting query
response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(boost).negativeBoost(1)).get();
assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
// Try with a boosting query using a negative boost
response = search.setQuery(boostingQuery().positive(phrase).negative(terms).boost(1).negativeBoost(1/boost)).get();
assertHighlight(response, 0, "field1", 0, 1, highlightedMatcher);
}
}
| apache-2.0 |
Darsstar/framework | compatibility-server/src/test/java/com/vaadin/v7/tests/server/component/grid/declarative/GridDeclarativeTestBase.java | 6634 | /*
* Copyright 2000-2016 Vaadin Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.vaadin.v7.tests.server.component.grid.declarative;
import java.util.List;
import com.vaadin.tests.design.DeclarativeTestBase;
import com.vaadin.v7.ui.Grid;
import com.vaadin.v7.ui.Grid.Column;
import com.vaadin.v7.ui.Grid.FooterCell;
import com.vaadin.v7.ui.Grid.FooterRow;
import com.vaadin.v7.ui.Grid.HeaderCell;
import com.vaadin.v7.ui.Grid.HeaderRow;
public class GridDeclarativeTestBase extends DeclarativeTestBase<Grid> {
@Override
public Grid testRead(String design, Grid expected) {
return testRead(design, expected, false);
}
public Grid testRead(String design, Grid expected, boolean retestWrite) {
return testRead(design, expected, retestWrite, false);
}
public Grid testRead(String design, Grid expected, boolean retestWrite,
boolean writeData) {
Grid actual = super.testRead(design, expected);
compareGridColumns(expected, actual);
compareHeaders(expected, actual);
compareFooters(expected, actual);
if (retestWrite) {
testWrite(design, actual, writeData);
}
return actual;
}
private void compareHeaders(Grid expected, Grid actual) {
assertEquals("Different header row count", expected.getHeaderRowCount(),
actual.getHeaderRowCount());
for (int i = 0; i < expected.getHeaderRowCount(); ++i) {
HeaderRow expectedRow = expected.getHeaderRow(i);
HeaderRow actualRow = actual.getHeaderRow(i);
if (expectedRow.equals(expected.getDefaultHeaderRow())) {
assertEquals("Different index for default header row",
actual.getDefaultHeaderRow(), actualRow);
}
for (Column c : expected.getColumns()) {
String baseError = "Difference when comparing cell for " + c
+ " on header row " + i + ": ";
Object propertyId = c.getPropertyId();
HeaderCell expectedCell = expectedRow.getCell(propertyId);
HeaderCell actualCell = actualRow.getCell(propertyId);
switch (expectedCell.getCellType()) {
case TEXT:
assertEquals(baseError + "Text content",
expectedCell.getText(), actualCell.getText());
break;
case HTML:
assertEquals(baseError + "HTML content",
expectedCell.getHtml(), actualCell.getHtml());
break;
case WIDGET:
assertEquals(baseError + "Component content",
expectedCell.getComponent(),
actualCell.getComponent());
break;
}
}
}
}
private void compareFooters(Grid expected, Grid actual) {
assertEquals("Different footer row count", expected.getFooterRowCount(),
actual.getFooterRowCount());
for (int i = 0; i < expected.getFooterRowCount(); ++i) {
FooterRow expectedRow = expected.getFooterRow(i);
FooterRow actualRow = actual.getFooterRow(i);
for (Column c : expected.getColumns()) {
String baseError = "Difference when comparing cell for " + c
+ " on footer row " + i + ": ";
Object propertyId = c.getPropertyId();
FooterCell expectedCell = expectedRow.getCell(propertyId);
FooterCell actualCell = actualRow.getCell(propertyId);
switch (expectedCell.getCellType()) {
case TEXT:
assertEquals(baseError + "Text content",
expectedCell.getText(), actualCell.getText());
break;
case HTML:
assertEquals(baseError + "HTML content",
expectedCell.getHtml(), actualCell.getHtml());
break;
case WIDGET:
assertEquals(baseError + "Component content",
expectedCell.getComponent(),
actualCell.getComponent());
break;
}
}
}
}
private void compareGridColumns(Grid expected, Grid actual) {
List<Column> columns = expected.getColumns();
List<Column> actualColumns = actual.getColumns();
assertEquals("Different amount of columns", columns.size(),
actualColumns.size());
for (int i = 0; i < columns.size(); ++i) {
Column col1 = columns.get(i);
Column col2 = actualColumns.get(i);
String baseError = "Error when comparing columns for property "
+ col1.getPropertyId() + ": ";
assertEquals(baseError + "Property id", col1.getPropertyId(),
col2.getPropertyId());
assertEquals(baseError + "Width", col1.getWidth(), col2.getWidth());
assertEquals(baseError + "Maximum width", col1.getMaximumWidth(),
col2.getMaximumWidth());
assertEquals(baseError + "Minimum width", col1.getMinimumWidth(),
col2.getMinimumWidth());
assertEquals(baseError + "Expand ratio", col1.getExpandRatio(),
col2.getExpandRatio());
assertEquals(baseError + "Sortable", col1.isSortable(),
col2.isSortable());
assertEquals(baseError + "Editable", col1.isEditable(),
col2.isEditable());
assertEquals(baseError + "Hidable", col1.isHidable(),
col2.isHidable());
assertEquals(baseError + "Hidden", col1.isHidden(),
col2.isHidden());
assertEquals(baseError + "HidingToggleCaption",
col1.getHidingToggleCaption(),
col2.getHidingToggleCaption());
}
}
}
| apache-2.0 |
cristianonicolai/kie-wb-common | kie-wb-common-services/kie-wb-common-services-api/src/main/java/org/kie/workbench/common/services/shared/project/KieProject.java | 3694 | /*
* Copyright 2014 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.services.shared.project;
import org.guvnor.common.services.project.model.Project;
import org.jboss.errai.common.client.api.annotations.Portable;
import org.uberfire.backend.vfs.Path;
import org.uberfire.commons.validation.PortablePreconditions;
@Portable
public class KieProject
extends Project {
private Path kmoduleXMLPath;
private Path importsPath;
private Path packageNamesWhiteList;
public KieProject() {
//For Errai-marshalling
}
public KieProject( final Path rootPath,
final Path pomXMLPath,
final Path kmoduleXMLPath,
final Path importsPath,
final Path packageNamesWhiteList,
final String projectName ) {
super( rootPath,
pomXMLPath,
projectName );
this.kmoduleXMLPath = PortablePreconditions.checkNotNull( "kmoduleXMLPath",
kmoduleXMLPath );
this.importsPath = PortablePreconditions.checkNotNull( "importsPath",
importsPath );
this.packageNamesWhiteList = PortablePreconditions.checkNotNull( "packageNamesWhiteList",
packageNamesWhiteList );
}
public Path getKModuleXMLPath() {
return this.kmoduleXMLPath;
}
public Path getImportsPath() {
return this.importsPath;
}
public Path getPackageNamesWhiteList() {
return this.packageNamesWhiteList;
}
@Override
public boolean equals( Object o ) {
if ( this == o ) {
return true;
}
if ( !( o instanceof KieProject ) ) {
return false;
}
KieProject project = (KieProject) o;
if ( !rootPath.equals( project.rootPath ) ) {
return false;
}
if ( !pomXMLPath.equals( project.pomXMLPath ) ) {
return false;
}
if ( !kmoduleXMLPath.equals( project.kmoduleXMLPath ) ) {
return false;
}
if ( !importsPath.equals( project.importsPath ) ) {
return false;
}
if ( !packageNamesWhiteList.equals( project.packageNamesWhiteList ) ) {
return false;
}
if ( !projectName.equals( project.projectName ) ) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = rootPath.hashCode();
result = ~~result;
result = 31 * result + pomXMLPath.hashCode();
result = ~~result;
result = 31 * result + kmoduleXMLPath.hashCode();
result = ~~result;
result = 31 * result + importsPath.hashCode();
result = ~~result;
result = 31 * result + packageNamesWhiteList.hashCode();
result = ~~result;
result = 31 * result + projectName.hashCode();
result = ~~result;
return result;
}
}
| apache-2.0 |
MyPureCloud/kafka | clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java | 14953 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.kafka.common.TopicPartitionReplica;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.errors.TopicExistsException;
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
public class MockAdminClient extends AdminClient {
public static final String DEFAULT_CLUSTER_ID = "I4ZmrWqfT2e-upky_4fdPA";
private final List<Node> brokers;
private final Map<String, TopicMetadata> allTopics = new HashMap<>();
private final String clusterId;
private Node controller;
private int timeoutNextRequests = 0;
/**
* Creates MockAdminClient for a cluster with the given brokers. The Kafka cluster ID uses the default value from
* DEFAULT_CLUSTER_ID.
*
* @param brokers list of brokers in the cluster
* @param controller node that should start as the controller
*/
public MockAdminClient(List<Node> brokers, Node controller) {
this(brokers, controller, DEFAULT_CLUSTER_ID);
}
/**
* Creates MockAdminClient for a cluster with the given brokers.
* @param brokers list of brokers in the cluster
* @param controller node that should start as the controller
*/
public MockAdminClient(List<Node> brokers, Node controller, String clusterId) {
this.brokers = brokers;
controller(controller);
this.clusterId = clusterId;
}
public void controller(Node controller) {
if (!brokers.contains(controller))
throw new IllegalArgumentException("The controller node must be in the list of brokers");
this.controller = controller;
}
public void addTopic(boolean internal,
String name,
List<TopicPartitionInfo> partitions,
Map<String, String> configs) {
if (allTopics.containsKey(name)) {
throw new IllegalArgumentException(String.format("Topic %s was already added.", name));
}
List<Node> replicas = null;
for (TopicPartitionInfo partition : partitions) {
if (!brokers.contains(partition.leader())) {
throw new IllegalArgumentException("Leader broker unknown");
}
if (!brokers.containsAll(partition.replicas())) {
throw new IllegalArgumentException("Unknown brokers in replica list");
}
if (!brokers.containsAll(partition.isr())) {
throw new IllegalArgumentException("Unknown brokers in isr list");
}
if (replicas == null) {
replicas = partition.replicas();
} else if (!replicas.equals(partition.replicas())) {
throw new IllegalArgumentException("All partitions need to have the same replica nodes.");
}
}
allTopics.put(name, new TopicMetadata(internal, partitions, configs));
}
public void timeoutNextRequest(int numberOfRequest) {
timeoutNextRequests = numberOfRequest;
}
@Override
public DescribeClusterResult describeCluster(DescribeClusterOptions options) {
KafkaFutureImpl<Collection<Node>> nodesFuture = new KafkaFutureImpl<>();
KafkaFutureImpl<Node> controllerFuture = new KafkaFutureImpl<>();
KafkaFutureImpl<String> brokerIdFuture = new KafkaFutureImpl<>();
if (timeoutNextRequests > 0) {
nodesFuture.completeExceptionally(new TimeoutException());
controllerFuture.completeExceptionally(new TimeoutException());
brokerIdFuture.completeExceptionally(new TimeoutException());
--timeoutNextRequests;
} else {
nodesFuture.complete(brokers);
controllerFuture.complete(controller);
brokerIdFuture.complete(clusterId);
}
return new DescribeClusterResult(nodesFuture, controllerFuture, brokerIdFuture);
}
@Override
public CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options) {
Map<String, KafkaFuture<Void>> createTopicResult = new HashMap<>();
if (timeoutNextRequests > 0) {
for (final NewTopic newTopic : newTopics) {
String topicName = newTopic.name();
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
createTopicResult.put(topicName, future);
}
--timeoutNextRequests;
return new CreateTopicsResult(createTopicResult);
}
for (final NewTopic newTopic : newTopics) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
String topicName = newTopic.name();
if (allTopics.containsKey(topicName)) {
future.completeExceptionally(new TopicExistsException(String.format("Topic %s exists already.", topicName)));
createTopicResult.put(topicName, future);
}
int replicationFactor = newTopic.replicationFactor();
List<Node> replicas = new ArrayList<>(replicationFactor);
for (int i = 0; i < replicationFactor; ++i) {
replicas.add(brokers.get(i));
}
int numberOfPartitions = newTopic.numPartitions();
List<TopicPartitionInfo> partitions = new ArrayList<>(numberOfPartitions);
for (int p = 0; p < numberOfPartitions; ++p) {
partitions.add(new TopicPartitionInfo(p, brokers.get(0), replicas, Collections.<Node>emptyList()));
}
allTopics.put(topicName, new TopicMetadata(false, partitions, newTopic.configs()));
future.complete(null);
createTopicResult.put(topicName, future);
}
return new CreateTopicsResult(createTopicResult);
}
@Override
public ListTopicsResult listTopics(ListTopicsOptions options) {
Map<String, TopicListing> topicListings = new HashMap<>();
if (timeoutNextRequests > 0) {
KafkaFutureImpl<Map<String, TopicListing>> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
--timeoutNextRequests;
return new ListTopicsResult(future);
}
for (Map.Entry<String, TopicMetadata> topicDescription : allTopics.entrySet()) {
String topicName = topicDescription.getKey();
topicListings.put(topicName, new TopicListing(topicName, topicDescription.getValue().isInternalTopic));
}
KafkaFutureImpl<Map<String, TopicListing>> future = new KafkaFutureImpl<>();
future.complete(topicListings);
return new ListTopicsResult(future);
}
@Override
public DescribeTopicsResult describeTopics(Collection<String> topicNames, DescribeTopicsOptions options) {
Map<String, KafkaFuture<TopicDescription>> topicDescriptions = new HashMap<>();
if (timeoutNextRequests > 0) {
for (String requestedTopic : topicNames) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
topicDescriptions.put(requestedTopic, future);
}
--timeoutNextRequests;
return new DescribeTopicsResult(topicDescriptions);
}
for (String requestedTopic : topicNames) {
for (Map.Entry<String, TopicMetadata> topicDescription : allTopics.entrySet()) {
String topicName = topicDescription.getKey();
if (topicName.equals(requestedTopic)) {
TopicMetadata topicMetadata = topicDescription.getValue();
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.complete(new TopicDescription(topicName, topicMetadata.isInternalTopic, topicMetadata.partitions));
topicDescriptions.put(topicName, future);
break;
}
}
if (!topicDescriptions.containsKey(requestedTopic)) {
KafkaFutureImpl<TopicDescription> future = new KafkaFutureImpl<>();
future.completeExceptionally(new UnknownTopicOrPartitionException(
String.format("Topic %s unknown.", requestedTopic)));
topicDescriptions.put(requestedTopic, future);
}
}
return new DescribeTopicsResult(topicDescriptions);
}
@Override
public DeleteTopicsResult deleteTopics(Collection<String> topicsToDelete, DeleteTopicsOptions options) {
Map<String, KafkaFuture<Void>> deleteTopicsResult = new HashMap<>();
if (timeoutNextRequests > 0) {
for (final String topicName : topicsToDelete) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
future.completeExceptionally(new TimeoutException());
deleteTopicsResult.put(topicName, future);
}
--timeoutNextRequests;
return new DeleteTopicsResult(deleteTopicsResult);
}
for (final String topicName : topicsToDelete) {
KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
if (allTopics.remove(topicName) == null) {
future.completeExceptionally(new UnknownTopicOrPartitionException(String.format("Topic %s does not exist.", topicName)));
} else {
future.complete(null);
}
deleteTopicsResult.put(topicName, future);
}
return new DeleteTopicsResult(deleteTopicsResult);
}
@Override
public CreatePartitionsResult createPartitions(Map<String, NewPartitions> newPartitions, CreatePartitionsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public DeleteRecordsResult deleteRecords(Map<TopicPartition, RecordsToDelete> recordsToDelete, DeleteRecordsOptions options) {
Map<TopicPartition, KafkaFuture<DeletedRecords>> deletedRecordsResult = new HashMap<>();
if (recordsToDelete.isEmpty()) {
return new DeleteRecordsResult(deletedRecordsResult);
} else {
throw new UnsupportedOperationException("Not implemented yet");
}
}
@Override
public CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public DescribeAclsResult describeAcls(AclBindingFilter filter, DescribeAclsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) {
Map<ConfigResource, KafkaFuture<Config>> configescriptions = new HashMap<>();
for (ConfigResource resource : resources) {
if (resource.type() == ConfigResource.Type.TOPIC) {
Map<String, String> configs = allTopics.get(resource.name()).configs;
List<ConfigEntry> configEntries = new ArrayList<>();
for (Map.Entry<String, String> entry : configs.entrySet()) {
configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue()));
}
KafkaFutureImpl<Config> future = new KafkaFutureImpl<>();
future.complete(new Config(configEntries));
configescriptions.put(resource, future);
} else {
throw new UnsupportedOperationException("Not implemented yet");
}
}
return new DescribeConfigsResult(configescriptions);
}
@Override
public AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, AlterConfigsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment, AlterReplicaLogDirsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options) {
throw new UnsupportedOperationException("Not implemented yet");
}
@Override
public void close(long duration, TimeUnit unit) {}
private final static class TopicMetadata {
final boolean isInternalTopic;
final List<TopicPartitionInfo> partitions;
final Map<String, String> configs;
TopicMetadata(boolean isInternalTopic,
List<TopicPartitionInfo> partitions,
Map<String, String> configs) {
this.isInternalTopic = isInternalTopic;
this.partitions = partitions;
this.configs = configs != null ? configs : Collections.<String, String>emptyMap();
}
}
}
| apache-2.0 |
RLDevOps/Demo | src/main/java/org/olat/core/util/notifications/SubscriptionItem.java | 1974 | /**
* OLAT - Online Learning and Training<br>
* http://www.olat.org
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); <br>
* you may not use this file except in compliance with the License.<br>
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing,<br>
* software distributed under the License is distributed on an "AS IS" BASIS, <br>
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br>
* See the License for the specific language governing permissions and <br>
* limitations under the License.
* <p>
* Copyright (c) 1999-2006 at Multimedia- & E-Learning Services (MELS),<br>
* University of Zurich, Switzerland.
* <p>
*/
package org.olat.core.util.notifications;
/**
* Description: This item contains the title for the resource, as also a top-level link to it. SubsInfo holds all the detail info, if one/multiple news are available.
* <P/>
* Initial Date: Mar 9, 2005
*
* @author Felix Jost
*/
public class SubscriptionItem {
private String title;
private String link;
private String description;
private SubscriptionInfo subsInfo = null;
/**
* @param title
* @param link
* @param description
*/
public SubscriptionItem(String title, String link, String description) {
super();
this.title = title;
this.link = link;
this.description = description;
}
/**
* @return the description in html
*/
public String getDescription() {
return description;
}
/**
* @return the html-link
*/
public String getLink() {
return link;
}
/**
* @return the title
*/
public String getTitle() {
return title;
}
/**
* @param subsInfo The subsInfo to set.
*/
public void setSubsInfo(SubscriptionInfo subsInfo) {
this.subsInfo = subsInfo;
}
/**
* @return Returns the subsInfo.
*/
public SubscriptionInfo getSubsInfo() {
return subsInfo;
}
}
| apache-2.0 |
apache/wink | wink-common/src/test/java/org/apache/wink/common/internal/providers/jaxb/AbstractJAXBCollectionProviderTest.java | 12020 | /*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*******************************************************************************/
package org.apache.wink.common.internal.providers.jaxb;
import java.io.ByteArrayInputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
import javax.ws.rs.core.GenericEntity;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.ext.ContextResolver;
import javax.ws.rs.ext.MessageBodyReader;
import javax.ws.rs.ext.MessageBodyWriter;
import javax.ws.rs.ext.Providers;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.stream.XMLStreamException;
import org.apache.wink.common.RuntimeContext;
import org.apache.wink.common.internal.WinkConfiguration;
import org.apache.wink.common.internal.contexts.MediaTypeCharsetAdjuster;
import org.apache.wink.common.internal.providers.entity.xml.JAXBCollectionXmlProvider;
import org.apache.wink.common.internal.providers.jaxb.jaxb1.AddNumbers;
import org.apache.wink.common.internal.providers.jaxb.jaxb1.MyPojo;
import org.apache.wink.common.internal.runtime.RuntimeContextTLS;
import org.apache.wink.common.model.JAXBUnmarshalOptions;
import org.apache.wink.common.model.XmlFormattingOptions;
import org.jmock.Expectations;
import org.jmock.integration.junit3.MockObjectTestCase;
import org.junit.After;
import org.junit.Before;
import org.w3c.dom.Document;
public class AbstractJAXBCollectionProviderTest extends MockObjectTestCase {
public class MyJAXBXmlProvider extends JAXBCollectionXmlProvider {
MyJAXBXmlProvider() {
super();
providers = AbstractJAXBCollectionProviderTest.this.providers;
}
/*
* simulate what would happen if application had supplied a JAXBContext provider
*/
@Override
protected JAXBContext getContext(Class<?> type, MediaType mediaType)
throws JAXBException {
// use JAXBContext.newInstance(String). The default in AbstractJAXBProvider is JAXBContext.newInstance(Class)
return JAXBContext.newInstance(type.getPackage().getName());
}
}
static String path = null;
static {
String classpath = System.getProperty("java.class.path");
StringTokenizer tokenizer = new StringTokenizer(classpath, System.getProperty("path.separator"));
while (tokenizer.hasMoreTokens()) {
path = tokenizer.nextToken();
if (path.endsWith("test-classes")) {
break;
}
}
// for windows:
int driveIndex = path.indexOf(":");
if(driveIndex != -1) {
path = path.substring(driveIndex + 1);
}
}
static final String xml = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
"<addNumberss>" +
"<ns2:addNumbers xmlns:ns2=\"http://org/apache/wink/common/internal/providers/jaxb/jaxb1\">" +
"<ns2:arg0>1</ns2:arg0>" +
"<ns2:arg1>2</ns2:arg1>" +
"</ns2:addNumbers>" +
"<ns2:addNumbers xmlns:ns2=\"http://org/apache/wink/common/internal/providers/jaxb/jaxb1\">" +
"<ns2:arg0>1</ns2:arg0>" +
"<ns2:arg1>3</ns2:arg1>" +
"</ns2:addNumbers>" +
"</addNumberss>";
static final String xmlMyPojo = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
"<myPojos>" +
"<ns2:myPojo xmlns:ns2=\"http://org/apache/wink/common/internal/providers/jaxb/jaxb1\">" +
"<ns2:stringdata>1</ns2:stringdata>" +
"</ns2:myPojo>" +
"<ns2:myPojo xmlns:ns2=\"http://org/apache/wink/common/internal/providers/jaxb/jaxb1\">" +
"<ns2:stringdata>2</ns2:stringdata>" +
"</ns2:myPojo>" +
"</myPojos>";
static final String xmlWithDTD = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
"<!DOCTYPE data [<!ENTITY file SYSTEM \"file:"+ path +"/etc/ProvidersJAXBTest.txt\">]>" +
"<addNumberss>" +
"<ns2:addNumbers xmlns:ns2=\"http://org/apache/wink/common/internal/providers/jaxb/jaxb1\">" +
"<ns2:arg0>&file;</ns2:arg0>" +
"<ns2:arg1>2</ns2:arg1>" +
"</ns2:addNumbers>" +
"<ns2:addNumbers xmlns:ns2=\"http://org/apache/wink/common/internal/providers/jaxb/jaxb1\">" +
"<ns2:arg0>&file;</ns2:arg0>" +
"<ns2:arg1>3</ns2:arg1>" +
"</ns2:addNumbers>" +
"</addNumberss>";
static final String xmlMyPojoWithDTD = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>" +
"<!DOCTYPE data [<!ENTITY file SYSTEM \"file:"+ path +"/etc/ProvidersJAXBTest.txt\">]>" +
"<myPojos>" +
"<ns2:myPojo xmlns:ns2=\"http://org/apache/wink/common/internal/providers/jaxb/jaxb1\">" +
"<ns2:stringdata>&file;</ns2:stringdata>" +
"</ns2:myPojo>" +
"<ns2:myPojo xmlns:ns2=\"http://org/apache/wink/common/internal/providers/jaxb/jaxb1\">" +
"<ns2:stringdata>&file;</ns2:stringdata>" +
"</ns2:myPojo>" +
"</myPojos>";
private MessageBodyReader jaxbProviderReader = null;
private MessageBodyWriter jaxbProviderWriter = null;
private Providers providers;
public class MyJAXBContextResolver implements ContextResolver<JAXBContext> {
public JAXBContext getContext(Class<?> arg0) {
try {
return JAXBContext.newInstance(arg0);
} catch (JAXBException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
}
@Before
public void setUp() {
providers = mock(Providers.class);
final RuntimeContext runtimeContext = mock(RuntimeContext.class);
final WinkConfiguration winkConfiguration = mock(WinkConfiguration.class);
checking(new Expectations() {{
allowing(providers).getContextResolver(JAXBContext.class, MediaType.TEXT_XML_TYPE); will(returnValue(new MyJAXBContextResolver()));
allowing(providers).getContextResolver(XmlFormattingOptions.class, MediaType.TEXT_XML_TYPE); will(returnValue(null));
allowing(providers).getContextResolver(JAXBUnmarshalOptions.class, MediaType.TEXT_XML_TYPE); will(returnValue(null));
allowing(runtimeContext).getAttribute(MediaTypeCharsetAdjuster.class); will(returnValue(null));
allowing(runtimeContext).getAttribute(WinkConfiguration.class); will(returnValue(winkConfiguration));
allowing(winkConfiguration).getProperties(); will(returnValue(null));
}});
RuntimeContextTLS.setRuntimeContext(runtimeContext);
jaxbProviderReader = new MyJAXBXmlProvider();
jaxbProviderWriter = new MyJAXBXmlProvider();
}
@After
public void tearDown() {
// clean up the mess.
RuntimeContextTLS.setRuntimeContext(null);
}
public void testXml() throws Exception {
GenericEntity<List<AddNumbers>> type1 =
new GenericEntity<List<AddNumbers>>(new ArrayList<AddNumbers>()) {
};
assertTrue(jaxbProviderReader.isReadable(type1.getRawType(), type1.getType(), null, MediaType.TEXT_XML_TYPE));
ByteArrayInputStream bais = new ByteArrayInputStream(xml.getBytes());
Object obj = jaxbProviderReader.readFrom(type1.getRawType(), type1.getType(), null, MediaType.TEXT_XML_TYPE, null, bais);
assertTrue(obj instanceof ArrayList);
ArrayList alist = (ArrayList)obj;
// make sure the objects in the returned list are the actual jaxb objects, not JAXBElement, and the unmarshal worked:
assertEquals(1, ((AddNumbers)alist.get(0)).getArg0());
assertEquals(2, ((AddNumbers)alist.get(0)).getArg1());
assertEquals(1, ((AddNumbers)alist.get(1)).getArg0());
assertEquals(3, ((AddNumbers)alist.get(1)).getArg1());
}
public void testXmlWithDTD() throws Exception {
GenericEntity<List<AddNumbers>> type1 =
new GenericEntity<List<AddNumbers>>(new ArrayList<AddNumbers>()) {
};
assertTrue(jaxbProviderReader.isReadable(type1.getRawType(), type1.getType(), null, MediaType.TEXT_XML_TYPE));
Exception ex = null;
try {
ByteArrayInputStream bais = new ByteArrayInputStream(xmlWithDTD.getBytes());
Object obj = jaxbProviderReader.readFrom(type1.getRawType(), type1.getType(), null, MediaType.TEXT_XML_TYPE, null, bais);
fail("should have got an exception");
} catch (Exception e) {
ex = e;
}
assertTrue("expected an XMLStreamException", ex.getCause() instanceof XMLStreamException);
// parse it just as a sanity check to make sure our xml is good. No exceptions means good xml!
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document dom = db.parse(new ByteArrayInputStream(xmlWithDTD.getBytes()));
}
public void testXmlMyPojo() throws Exception {
GenericEntity<List<MyPojo>> type1 =
new GenericEntity<List<MyPojo>>(new ArrayList<MyPojo>()) {
};
assertTrue(jaxbProviderReader.isReadable(type1.getRawType(), type1.getType(), null, MediaType.TEXT_XML_TYPE));
ByteArrayInputStream bais = new ByteArrayInputStream(xmlMyPojo.getBytes());
Object obj = jaxbProviderReader.readFrom(type1.getRawType(), type1.getType(), null, MediaType.TEXT_XML_TYPE, null, bais);
assertTrue(obj instanceof ArrayList);
ArrayList alist = (ArrayList)obj;
// make sure the objects in the returned list are the actual jaxb objects, not JAXBElement, and the unmarshal worked:
assertEquals("1", ((MyPojo)alist.get(0)).getStringdata());
assertEquals("2", ((MyPojo)alist.get(1)).getStringdata());
}
public void testXmlMyPojoWithDTD() throws Exception {
GenericEntity<List<MyPojo>> type1 =
new GenericEntity<List<MyPojo>>(new ArrayList<MyPojo>()) {
};
assertTrue(jaxbProviderReader.isReadable(type1.getRawType(), type1.getType(), null, MediaType.TEXT_XML_TYPE));
Exception ex = null;
try {
ByteArrayInputStream bais = new ByteArrayInputStream(xmlMyPojoWithDTD.getBytes());
Object obj = jaxbProviderReader.readFrom(type1.getRawType(), type1.getType(), null, MediaType.TEXT_XML_TYPE, null, bais);
fail("should have got an exception");
} catch (Exception e) {
ex = e;
}
assertTrue("expected an XMLStreamException", ex.getCause() instanceof XMLStreamException);
// parse it just as a sanity check to make sure our xml is good. No exceptions means good xml!
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document dom = db.parse(new ByteArrayInputStream(xmlMyPojoWithDTD.getBytes()));
}
}
| apache-2.0 |
mdanielwork/intellij-community | platform/platform-api/src/com/intellij/ide/errorTreeView/ErrorTreeElementKind.java | 1965 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ide.errorTreeView;
import com.intellij.ide.IdeBundle;
import com.intellij.util.ui.MessageCategory;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
/**
* @author Eugene Zhuravlev
*/
public enum ErrorTreeElementKind {
INFO ("INFO", IdeBundle.message("errortree.information")),
ERROR ("ERROR", IdeBundle.message("errortree.error")),
WARNING ("WARNING", IdeBundle.message("errortree.warning")),
NOTE ("NOTE", IdeBundle.message("errortree.note")),
GENERIC ("GENERIC", "");
private final String myText;
private final String myPresentableText;
ErrorTreeElementKind(@NonNls String text, String presentableText) {
myText = text;
myPresentableText = presentableText;
}
public String toString() {
return myText; // for debug purposes
}
public String getPresentableText() {
return myPresentableText;
}
@NotNull
public static ErrorTreeElementKind convertMessageFromCompilerErrorType(int type) {
switch(type) {
case MessageCategory.ERROR : return ERROR;
case MessageCategory.WARNING : return WARNING;
case MessageCategory.INFORMATION : return INFO;
case MessageCategory.STATISTICS : return INFO;
case MessageCategory.SIMPLE : return GENERIC;
case MessageCategory.NOTE : return NOTE;
default : return GENERIC;
}
}
}
| apache-2.0 |
ptkool/spark | core/src/test/scala/org/apache/spark/util/UtilsSuite.scala | 52276 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataOutput, DataOutputStream, File,
FileOutputStream, InputStream, PrintStream, SequenceInputStream}
import java.lang.{Double => JDouble, Float => JFloat}
import java.lang.reflect.Field
import java.net.{BindException, ServerSocket, URI}
import java.nio.{ByteBuffer, ByteOrder}
import java.nio.charset.StandardCharsets.UTF_8
import java.text.DecimalFormatSymbols
import java.util.Locale
import java.util.concurrent.TimeUnit
import java.util.zip.GZIPOutputStream
import scala.collection.mutable.ListBuffer
import scala.util.Random
import com.google.common.io.Files
import org.apache.commons.io.IOUtils
import org.apache.commons.lang3.{JavaVersion, SystemUtils}
import org.apache.commons.math3.stat.inference.ChiSquareTest
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkConf, SparkException, SparkFunSuite, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.scheduler.SparkListener
import org.apache.spark.util.io.ChunkedByteBufferInputStream
class UtilsSuite extends SparkFunSuite with ResetSystemProperties with Logging {
test("timeConversion") {
// Test -1
assert(Utils.timeStringAsSeconds("-1") === -1)
// Test zero
assert(Utils.timeStringAsSeconds("0") === 0)
assert(Utils.timeStringAsSeconds("1") === 1)
assert(Utils.timeStringAsSeconds("1s") === 1)
assert(Utils.timeStringAsSeconds("1000ms") === 1)
assert(Utils.timeStringAsSeconds("1000000us") === 1)
assert(Utils.timeStringAsSeconds("1m") === TimeUnit.MINUTES.toSeconds(1))
assert(Utils.timeStringAsSeconds("1min") === TimeUnit.MINUTES.toSeconds(1))
assert(Utils.timeStringAsSeconds("1h") === TimeUnit.HOURS.toSeconds(1))
assert(Utils.timeStringAsSeconds("1d") === TimeUnit.DAYS.toSeconds(1))
assert(Utils.timeStringAsMs("1") === 1)
assert(Utils.timeStringAsMs("1ms") === 1)
assert(Utils.timeStringAsMs("1000us") === 1)
assert(Utils.timeStringAsMs("1s") === TimeUnit.SECONDS.toMillis(1))
assert(Utils.timeStringAsMs("1m") === TimeUnit.MINUTES.toMillis(1))
assert(Utils.timeStringAsMs("1min") === TimeUnit.MINUTES.toMillis(1))
assert(Utils.timeStringAsMs("1h") === TimeUnit.HOURS.toMillis(1))
assert(Utils.timeStringAsMs("1d") === TimeUnit.DAYS.toMillis(1))
// Test invalid strings
intercept[NumberFormatException] {
Utils.timeStringAsMs("600l")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This breaks 600s")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This breaks 600ds")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("600s This breaks")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This 123s breaks")
}
}
test("Test byteString conversion") {
// Test zero
assert(Utils.byteStringAsBytes("0") === 0)
assert(Utils.byteStringAsGb("1") === 1)
assert(Utils.byteStringAsGb("1g") === 1)
assert(Utils.byteStringAsGb("1023m") === 0)
assert(Utils.byteStringAsGb("1024m") === 1)
assert(Utils.byteStringAsGb("1048575k") === 0)
assert(Utils.byteStringAsGb("1048576k") === 1)
assert(Utils.byteStringAsGb("1k") === 0)
assert(Utils.byteStringAsGb("1t") === ByteUnit.TiB.toGiB(1))
assert(Utils.byteStringAsGb("1p") === ByteUnit.PiB.toGiB(1))
assert(Utils.byteStringAsMb("1") === 1)
assert(Utils.byteStringAsMb("1m") === 1)
assert(Utils.byteStringAsMb("1048575b") === 0)
assert(Utils.byteStringAsMb("1048576b") === 1)
assert(Utils.byteStringAsMb("1023k") === 0)
assert(Utils.byteStringAsMb("1024k") === 1)
assert(Utils.byteStringAsMb("3645k") === 3)
assert(Utils.byteStringAsMb("1024gb") === 1048576)
assert(Utils.byteStringAsMb("1g") === ByteUnit.GiB.toMiB(1))
assert(Utils.byteStringAsMb("1t") === ByteUnit.TiB.toMiB(1))
assert(Utils.byteStringAsMb("1p") === ByteUnit.PiB.toMiB(1))
assert(Utils.byteStringAsKb("1") === 1)
assert(Utils.byteStringAsKb("1k") === 1)
assert(Utils.byteStringAsKb("1m") === ByteUnit.MiB.toKiB(1))
assert(Utils.byteStringAsKb("1g") === ByteUnit.GiB.toKiB(1))
assert(Utils.byteStringAsKb("1t") === ByteUnit.TiB.toKiB(1))
assert(Utils.byteStringAsKb("1p") === ByteUnit.PiB.toKiB(1))
assert(Utils.byteStringAsBytes("1") === 1)
assert(Utils.byteStringAsBytes("1k") === ByteUnit.KiB.toBytes(1))
assert(Utils.byteStringAsBytes("1m") === ByteUnit.MiB.toBytes(1))
assert(Utils.byteStringAsBytes("1g") === ByteUnit.GiB.toBytes(1))
assert(Utils.byteStringAsBytes("1t") === ByteUnit.TiB.toBytes(1))
assert(Utils.byteStringAsBytes("1p") === ByteUnit.PiB.toBytes(1))
// Overflow handling, 1073741824p exceeds Long.MAX_VALUE if converted straight to Bytes
// This demonstrates that we can have e.g 1024^3 PiB without overflowing.
assert(Utils.byteStringAsGb("1073741824p") === ByteUnit.PiB.toGiB(1073741824))
assert(Utils.byteStringAsMb("1073741824p") === ByteUnit.PiB.toMiB(1073741824))
// Run this to confirm it doesn't throw an exception
assert(Utils.byteStringAsBytes("9223372036854775807") === 9223372036854775807L)
assert(ByteUnit.PiB.toPiB(9223372036854775807L) === 9223372036854775807L)
// Test overflow exception
intercept[IllegalArgumentException] {
// This value exceeds Long.MAX when converted to bytes
Utils.byteStringAsBytes("9223372036854775808")
}
// Test overflow exception
intercept[IllegalArgumentException] {
// This value exceeds Long.MAX when converted to TiB
ByteUnit.PiB.toTiB(9223372036854775807L)
}
// Test fractional string
intercept[NumberFormatException] {
Utils.byteStringAsMb("0.064")
}
// Test fractional string
intercept[NumberFormatException] {
Utils.byteStringAsMb("0.064m")
}
// Test invalid strings
intercept[NumberFormatException] {
Utils.byteStringAsBytes("500ub")
}
// Test invalid strings
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This breaks 600b")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This breaks 600")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("600gb This breaks")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This 123mb breaks")
}
}
test("bytesToString") {
assert(Utils.bytesToString(10) === "10.0 B")
assert(Utils.bytesToString(1500) === "1500.0 B")
assert(Utils.bytesToString(2000000) === "1953.1 KiB")
assert(Utils.bytesToString(2097152) === "2.0 MiB")
assert(Utils.bytesToString(2306867) === "2.2 MiB")
assert(Utils.bytesToString(5368709120L) === "5.0 GiB")
assert(Utils.bytesToString(5L * (1L << 40)) === "5.0 TiB")
assert(Utils.bytesToString(5L * (1L << 50)) === "5.0 PiB")
assert(Utils.bytesToString(5L * (1L << 60)) === "5.0 EiB")
assert(Utils.bytesToString(BigInt(1L << 11) * (1L << 60)) === "2.36E+21 B")
}
test("copyStream") {
// input array initialization
val bytes = Array.ofDim[Byte](9000)
Random.nextBytes(bytes)
val os = new ByteArrayOutputStream()
Utils.copyStream(new ByteArrayInputStream(bytes), os)
assert(os.toByteArray.toList.equals(bytes.toList))
}
test("copyStreamUpTo") {
// input array initialization
val bytes = Array.ofDim[Byte](1200)
Random.nextBytes(bytes)
val limit = 1000
// testing for inputLength less than, equal to and greater than limit
(limit - 2 to limit + 2).foreach { inputLength =>
val in = new ByteArrayInputStream(bytes.take(inputLength))
val mergedStream = Utils.copyStreamUpTo(in, limit)
try {
// Get a handle on the buffered data, to make sure memory gets freed once we read past the
// end of it. Need to use reflection to get handle on inner structures for this check
val byteBufferInputStream = if (mergedStream.isInstanceOf[ChunkedByteBufferInputStream]) {
assert(inputLength < limit)
mergedStream.asInstanceOf[ChunkedByteBufferInputStream]
} else {
assert(inputLength >= limit)
val sequenceStream = mergedStream.asInstanceOf[SequenceInputStream]
val fieldValue = getFieldValue(sequenceStream, "in")
assert(fieldValue.isInstanceOf[ChunkedByteBufferInputStream])
fieldValue.asInstanceOf[ChunkedByteBufferInputStream]
}
(0 until inputLength).foreach { idx =>
assert(bytes(idx) === mergedStream.read().asInstanceOf[Byte])
if (idx == limit) {
assert(byteBufferInputStream.chunkedByteBuffer === null)
}
}
assert(mergedStream.read() === -1)
assert(byteBufferInputStream.chunkedByteBuffer === null)
} finally {
IOUtils.closeQuietly(mergedStream)
IOUtils.closeQuietly(in)
}
}
}
private def getFieldValue(obj: AnyRef, fieldName: String): Any = {
val field: Field = obj.getClass().getDeclaredField(fieldName)
if (field.isAccessible()) {
field.get(obj)
} else {
field.setAccessible(true)
val result = field.get(obj)
field.setAccessible(false)
result
}
}
test("memoryStringToMb") {
assert(Utils.memoryStringToMb("1") === 0)
assert(Utils.memoryStringToMb("1048575") === 0)
assert(Utils.memoryStringToMb("3145728") === 3)
assert(Utils.memoryStringToMb("1024k") === 1)
assert(Utils.memoryStringToMb("5000k") === 4)
assert(Utils.memoryStringToMb("4024k") === Utils.memoryStringToMb("4024K"))
assert(Utils.memoryStringToMb("1024m") === 1024)
assert(Utils.memoryStringToMb("5000m") === 5000)
assert(Utils.memoryStringToMb("4024m") === Utils.memoryStringToMb("4024M"))
assert(Utils.memoryStringToMb("2g") === 2048)
assert(Utils.memoryStringToMb("3g") === Utils.memoryStringToMb("3G"))
assert(Utils.memoryStringToMb("2t") === 2097152)
assert(Utils.memoryStringToMb("3t") === Utils.memoryStringToMb("3T"))
}
test("splitCommandString") {
assert(Utils.splitCommandString("") === Seq())
assert(Utils.splitCommandString("a") === Seq("a"))
assert(Utils.splitCommandString("aaa") === Seq("aaa"))
assert(Utils.splitCommandString("a b c") === Seq("a", "b", "c"))
assert(Utils.splitCommandString(" a b\t c ") === Seq("a", "b", "c"))
assert(Utils.splitCommandString("a 'b c'") === Seq("a", "b c"))
assert(Utils.splitCommandString("a 'b c' d") === Seq("a", "b c", "d"))
assert(Utils.splitCommandString("'b c'") === Seq("b c"))
assert(Utils.splitCommandString("a \"b c\"") === Seq("a", "b c"))
assert(Utils.splitCommandString("a \"b c\" d") === Seq("a", "b c", "d"))
assert(Utils.splitCommandString("\"b c\"") === Seq("b c"))
assert(Utils.splitCommandString("a 'b\" c' \"d' e\"") === Seq("a", "b\" c", "d' e"))
assert(Utils.splitCommandString("a\t'b\nc'\nd") === Seq("a", "b\nc", "d"))
assert(Utils.splitCommandString("a \"b\\\\c\"") === Seq("a", "b\\c"))
assert(Utils.splitCommandString("a \"b\\\"c\"") === Seq("a", "b\"c"))
assert(Utils.splitCommandString("a 'b\\\"c'") === Seq("a", "b\\\"c"))
assert(Utils.splitCommandString("'a'b") === Seq("ab"))
assert(Utils.splitCommandString("'a''b'") === Seq("ab"))
assert(Utils.splitCommandString("\"a\"b") === Seq("ab"))
assert(Utils.splitCommandString("\"a\"\"b\"") === Seq("ab"))
assert(Utils.splitCommandString("''") === Seq(""))
assert(Utils.splitCommandString("\"\"") === Seq(""))
}
test("string formatting of time durations") {
val second = 1000
val minute = second * 60
val hour = minute * 60
def str: (Long) => String = Utils.msDurationToString(_)
val sep = new DecimalFormatSymbols(Locale.US).getDecimalSeparator
assert(str(123) === "123 ms")
assert(str(second) === "1" + sep + "0 s")
assert(str(second + 462) === "1" + sep + "5 s")
assert(str(hour) === "1" + sep + "00 h")
assert(str(minute) === "1" + sep + "0 m")
assert(str(minute + 4 * second + 34) === "1" + sep + "1 m")
assert(str(10 * hour + minute + 4 * second) === "10" + sep + "02 h")
assert(str(10 * hour + 59 * minute + 59 * second + 999) === "11" + sep + "00 h")
}
def getSuffix(isCompressed: Boolean): String = {
if (isCompressed) {
".gz"
} else {
""
}
}
def writeLogFile(path: String, content: Array[Byte]): Unit = {
val outputStream = if (path.endsWith(".gz")) {
new GZIPOutputStream(new FileOutputStream(path))
} else {
new FileOutputStream(path)
}
IOUtils.write(content, outputStream)
outputStream.close()
content.size
}
private val workerConf = new SparkConf()
def testOffsetBytes(isCompressed: Boolean): Unit = {
withTempDir { tmpDir2 =>
val suffix = getSuffix(isCompressed)
val f1Path = tmpDir2 + "/f1" + suffix
writeLogFile(f1Path, "1\n2\n3\n4\n5\n6\n7\n8\n9\n".getBytes(UTF_8))
val f1Length = Utils.getFileLength(new File(f1Path), workerConf)
// Read first few bytes
assert(Utils.offsetBytes(f1Path, f1Length, 0, 5) === "1\n2\n3")
// Read some middle bytes
assert(Utils.offsetBytes(f1Path, f1Length, 4, 11) === "3\n4\n5\n6")
// Read last few bytes
assert(Utils.offsetBytes(f1Path, f1Length, 12, 18) === "7\n8\n9\n")
// Read some nonexistent bytes in the beginning
assert(Utils.offsetBytes(f1Path, f1Length, -5, 5) === "1\n2\n3")
// Read some nonexistent bytes at the end
assert(Utils.offsetBytes(f1Path, f1Length, 12, 22) === "7\n8\n9\n")
// Read some nonexistent bytes on both ends
assert(Utils.offsetBytes(f1Path, f1Length, -3, 25) === "1\n2\n3\n4\n5\n6\n7\n8\n9\n")
}
}
test("reading offset bytes of a file") {
testOffsetBytes(isCompressed = false)
}
test("reading offset bytes of a file (compressed)") {
testOffsetBytes(isCompressed = true)
}
def testOffsetBytesMultipleFiles(isCompressed: Boolean): Unit = {
withTempDir { tmpDir =>
val suffix = getSuffix(isCompressed)
val files = (1 to 3).map(i =>
new File(tmpDir, i.toString + suffix)) :+ new File(tmpDir, "4")
writeLogFile(files(0).getAbsolutePath, "0123456789".getBytes(UTF_8))
writeLogFile(files(1).getAbsolutePath, "abcdefghij".getBytes(UTF_8))
writeLogFile(files(2).getAbsolutePath, "ABCDEFGHIJ".getBytes(UTF_8))
writeLogFile(files(3).getAbsolutePath, "9876543210".getBytes(UTF_8))
val fileLengths = files.map(Utils.getFileLength(_, workerConf))
// Read first few bytes in the 1st file
assert(Utils.offsetBytes(files, fileLengths, 0, 5) === "01234")
// Read bytes within the 1st file
assert(Utils.offsetBytes(files, fileLengths, 5, 8) === "567")
// Read bytes across 1st and 2nd file
assert(Utils.offsetBytes(files, fileLengths, 8, 18) === "89abcdefgh")
// Read bytes across 1st, 2nd and 3rd file
assert(Utils.offsetBytes(files, fileLengths, 5, 24) === "56789abcdefghijABCD")
// Read bytes across 3rd and 4th file
assert(Utils.offsetBytes(files, fileLengths, 25, 35) === "FGHIJ98765")
// Read some nonexistent bytes in the beginning
assert(Utils.offsetBytes(files, fileLengths, -5, 18) === "0123456789abcdefgh")
// Read some nonexistent bytes at the end
assert(Utils.offsetBytes(files, fileLengths, 18, 45) === "ijABCDEFGHIJ9876543210")
// Read some nonexistent bytes on both ends
assert(Utils.offsetBytes(files, fileLengths, -5, 45) ===
"0123456789abcdefghijABCDEFGHIJ9876543210")
}
}
test("reading offset bytes across multiple files") {
testOffsetBytesMultipleFiles(isCompressed = false)
}
test("reading offset bytes across multiple files (compressed)") {
testOffsetBytesMultipleFiles(isCompressed = true)
}
test("deserialize long value") {
val testval : Long = 9730889947L
val bbuf = ByteBuffer.allocate(8)
assert(bbuf.hasArray)
bbuf.order(ByteOrder.BIG_ENDIAN)
bbuf.putLong(testval)
assert(bbuf.array.length === 8)
assert(Utils.deserializeLongValue(bbuf.array) === testval)
}
test("writeByteBuffer should not change ByteBuffer position") {
// Test a buffer with an underlying array, for both writeByteBuffer methods.
val testBuffer = ByteBuffer.wrap(Array[Byte](1, 2, 3, 4))
assert(testBuffer.hasArray)
val bytesOut = new ByteBufferOutputStream(4096)
Utils.writeByteBuffer(testBuffer, bytesOut)
assert(testBuffer.position() === 0)
val dataOut = new DataOutputStream(bytesOut)
Utils.writeByteBuffer(testBuffer, dataOut: DataOutput)
assert(testBuffer.position() === 0)
// Test a buffer without an underlying array, for both writeByteBuffer methods.
val testDirectBuffer = ByteBuffer.allocateDirect(8)
assert(!testDirectBuffer.hasArray())
Utils.writeByteBuffer(testDirectBuffer, bytesOut)
assert(testDirectBuffer.position() === 0)
Utils.writeByteBuffer(testDirectBuffer, dataOut: DataOutput)
assert(testDirectBuffer.position() === 0)
}
test("get iterator size") {
val empty = Seq[Int]()
assert(Utils.getIteratorSize(empty.toIterator) === 0L)
val iterator = Iterator.range(0, 5)
assert(Utils.getIteratorSize(iterator) === 5L)
}
test("getIteratorZipWithIndex") {
val iterator = Utils.getIteratorZipWithIndex(Iterator(0, 1, 2), -1L + Int.MaxValue)
assert(iterator.toArray === Array(
(0, -1L + Int.MaxValue), (1, 0L + Int.MaxValue), (2, 1L + Int.MaxValue)
))
intercept[IllegalArgumentException] {
Utils.getIteratorZipWithIndex(Iterator(0, 1, 2), -1L)
}
}
test("doesDirectoryContainFilesNewerThan") {
// create some temporary directories and files
withTempDir { parent =>
// The parent directory has two child directories
val child1: File = Utils.createTempDir(parent.getCanonicalPath)
val child2: File = Utils.createTempDir(parent.getCanonicalPath)
val child3: File = Utils.createTempDir(child1.getCanonicalPath)
// set the last modified time of child1 to 30 secs old
child1.setLastModified(System.currentTimeMillis() - (1000 * 30))
// although child1 is old, child2 is still new so return true
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
child2.setLastModified(System.currentTimeMillis - (1000 * 30))
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
parent.setLastModified(System.currentTimeMillis - (1000 * 30))
// although parent and its immediate children are new, child3 is still old
// we expect a full recursive search for new files.
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
child3.setLastModified(System.currentTimeMillis - (1000 * 30))
assert(!Utils.doesDirectoryContainAnyNewFiles(parent, 5))
}
}
test("resolveURI") {
def assertResolves(before: String, after: String): Unit = {
// This should test only single paths
assert(before.split(",").length === 1)
def resolve(uri: String): String = Utils.resolveURI(uri).toString
assert(resolve(before) === after)
assert(resolve(after) === after)
// Repeated invocations of resolveURI should yield the same result
assert(resolve(resolve(after)) === after)
assert(resolve(resolve(resolve(after))) === after)
}
val rawCwd = System.getProperty("user.dir")
val cwd = if (Utils.isWindows) s"/$rawCwd".replace("\\", "/") else rawCwd
assertResolves("hdfs:/root/spark.jar", "hdfs:/root/spark.jar")
assertResolves("hdfs:///root/spark.jar#app.jar", "hdfs:///root/spark.jar#app.jar")
assertResolves("spark.jar", s"file:$cwd/spark.jar")
assertResolves("spark.jar#app.jar", s"file:$cwd/spark.jar#app.jar")
assertResolves("path to/file.txt", s"file:$cwd/path%20to/file.txt")
if (Utils.isWindows) {
assertResolves("C:\\path\\to\\file.txt", "file:/C:/path/to/file.txt")
assertResolves("C:\\path to\\file.txt", "file:/C:/path%20to/file.txt")
}
assertResolves("file:/C:/path/to/file.txt", "file:/C:/path/to/file.txt")
assertResolves("file:///C:/path/to/file.txt", "file:///C:/path/to/file.txt")
assertResolves("file:/C:/file.txt#alias.txt", "file:/C:/file.txt#alias.txt")
assertResolves("file:foo", "file:foo")
assertResolves("file:foo:baby", "file:foo:baby")
}
test("resolveURIs with multiple paths") {
def assertResolves(before: String, after: String): Unit = {
def resolve(uri: String): String = Utils.resolveURIs(uri)
assert(resolve(before) === after)
assert(resolve(after) === after)
// Repeated invocations of resolveURIs should yield the same result
assert(resolve(resolve(after)) === after)
assert(resolve(resolve(resolve(after))) === after)
}
val rawCwd = System.getProperty("user.dir")
val cwd = if (Utils.isWindows) s"/$rawCwd".replace("\\", "/") else rawCwd
assertResolves("jar1,jar2", s"file:$cwd/jar1,file:$cwd/jar2")
assertResolves("file:/jar1,file:/jar2", "file:/jar1,file:/jar2")
assertResolves("hdfs:/jar1,file:/jar2,jar3", s"hdfs:/jar1,file:/jar2,file:$cwd/jar3")
assertResolves("hdfs:/jar1,file:/jar2,jar3,jar4#jar5,path to/jar6",
s"hdfs:/jar1,file:/jar2,file:$cwd/jar3,file:$cwd/jar4#jar5,file:$cwd/path%20to/jar6")
if (Utils.isWindows) {
assertResolves("""hdfs:/jar1,file:/jar2,jar3,C:\pi.py#py.pi,C:\path to\jar4""",
s"hdfs:/jar1,file:/jar2,file:$cwd/jar3,file:/C:/pi.py%23py.pi,file:/C:/path%20to/jar4")
}
assertResolves(",jar1,jar2", s"file:$cwd/jar1,file:$cwd/jar2")
// Also test resolveURIs with single paths
assertResolves("hdfs:/root/spark.jar", "hdfs:/root/spark.jar")
}
test("nonLocalPaths") {
assert(Utils.nonLocalPaths("spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("file:/spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("file:///spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("local:/spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("local:///spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/spark.jar") === Array("hdfs:/spark.jar"))
assert(Utils.nonLocalPaths("hdfs:///spark.jar") === Array("hdfs:///spark.jar"))
assert(Utils.nonLocalPaths("file:/spark.jar,local:/smart.jar,family.py") === Array.empty)
assert(Utils.nonLocalPaths("local:/spark.jar,file:/smart.jar,family.py") === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/spark.jar,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("hdfs:/spark.jar,path to/a.jar,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("hdfs:/spark.jar,s3:/smart.jar,local.py,file:/hello/pi.py") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("local.py,hdfs:/spark.jar,file:/hello/pi.py,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
// Test Windows paths
assert(Utils.nonLocalPaths("C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("file:/C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("file:///C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("local:/C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("local:///C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/a.jar,C:/my.jar,s3:/another.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
assert(Utils.nonLocalPaths("D:/your.jar,hdfs:/a.jar,s3:/another.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
assert(Utils.nonLocalPaths("hdfs:/a.jar,s3:/another.jar,e:/our.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
}
test("isBindCollision") {
// Negatives
assert(!Utils.isBindCollision(null))
assert(!Utils.isBindCollision(new Exception))
assert(!Utils.isBindCollision(new Exception(new Exception)))
assert(!Utils.isBindCollision(new Exception(new BindException)))
// Positives
val be = new BindException("Random Message")
val be1 = new Exception(new BindException("Random Message"))
val be2 = new Exception(new Exception(new BindException("Random Message")))
assert(Utils.isBindCollision(be))
assert(Utils.isBindCollision(be1))
assert(Utils.isBindCollision(be2))
// Actual bind exception
var server1: ServerSocket = null
var server2: ServerSocket = null
try {
server1 = new java.net.ServerSocket(0)
server2 = new java.net.ServerSocket(server1.getLocalPort)
} catch {
case e: Exception =>
assert(e.isInstanceOf[java.net.BindException])
assert(Utils.isBindCollision(e))
} finally {
Option(server1).foreach(_.close())
Option(server2).foreach(_.close())
}
}
// Test for using the util function to change our log levels.
test("log4j log level change") {
val current = org.apache.log4j.Logger.getRootLogger().getLevel()
try {
Utils.setLogLevel(org.apache.log4j.Level.ALL)
assert(log.isInfoEnabled())
Utils.setLogLevel(org.apache.log4j.Level.ERROR)
assert(!log.isInfoEnabled())
assert(log.isErrorEnabled())
} finally {
// Best effort at undoing changes this test made.
Utils.setLogLevel(current)
}
}
test("deleteRecursively") {
val tempDir1 = Utils.createTempDir()
assert(tempDir1.exists())
Utils.deleteRecursively(tempDir1)
assert(!tempDir1.exists())
val tempDir2 = Utils.createTempDir()
val sourceFile1 = new File(tempDir2, "foo.txt")
Files.touch(sourceFile1)
assert(sourceFile1.exists())
Utils.deleteRecursively(sourceFile1)
assert(!sourceFile1.exists())
val tempDir3 = new File(tempDir2, "subdir")
assert(tempDir3.mkdir())
val sourceFile2 = new File(tempDir3, "bar.txt")
Files.touch(sourceFile2)
assert(sourceFile2.exists())
Utils.deleteRecursively(tempDir2)
assert(!tempDir2.exists())
assert(!tempDir3.exists())
assert(!sourceFile2.exists())
}
test("loading properties from file") {
withTempDir { tmpDir =>
val outFile = File.createTempFile("test-load-spark-properties", "test", tmpDir)
System.setProperty("spark.test.fileNameLoadB", "2")
Files.write("spark.test.fileNameLoadA true\n" +
"spark.test.fileNameLoadB 1\n", outFile, UTF_8)
val properties = Utils.getPropertiesFromFile(outFile.getAbsolutePath)
properties
.filter { case (k, v) => k.startsWith("spark.")}
.foreach { case (k, v) => sys.props.getOrElseUpdate(k, v)}
val sparkConf = new SparkConf
assert(sparkConf.getBoolean("spark.test.fileNameLoadA", false))
assert(sparkConf.getInt("spark.test.fileNameLoadB", 1) === 2)
}
}
test("timeIt with prepare") {
var cnt = 0
val prepare = () => {
cnt += 1
Thread.sleep(1000)
}
val time = Utils.timeIt(2)({}, Some(prepare))
require(cnt === 2, "prepare should be called twice")
require(time < TimeUnit.MILLISECONDS.toNanos(500), "preparation time should not count")
}
test("fetch hcfs dir") {
withTempDir { tempDir =>
val sourceDir = new File(tempDir, "source-dir")
sourceDir.mkdir()
val innerSourceDir = Utils.createTempDir(root = sourceDir.getPath)
val sourceFile = File.createTempFile("someprefix", "somesuffix", innerSourceDir)
val targetDir = new File(tempDir, "target-dir")
Files.write("some text", sourceFile, UTF_8)
val path =
if (Utils.isWindows) {
new Path("file:/" + sourceDir.getAbsolutePath.replace("\\", "/"))
} else {
new Path("file://" + sourceDir.getAbsolutePath)
}
val conf = new Configuration()
val fs = Utils.getHadoopFileSystem(path.toString, conf)
assert(!targetDir.isDirectory())
Utils.fetchHcfsFile(path, targetDir, fs, new SparkConf(), conf, false)
assert(targetDir.isDirectory())
// Copy again to make sure it doesn't error if the dir already exists.
Utils.fetchHcfsFile(path, targetDir, fs, new SparkConf(), conf, false)
val destDir = new File(targetDir, sourceDir.getName())
assert(destDir.isDirectory())
val destInnerDir = new File(destDir, innerSourceDir.getName)
assert(destInnerDir.isDirectory())
val destInnerFile = new File(destInnerDir, sourceFile.getName)
assert(destInnerFile.isFile())
val filePath =
if (Utils.isWindows) {
new Path("file:/" + sourceFile.getAbsolutePath.replace("\\", "/"))
} else {
new Path("file://" + sourceFile.getAbsolutePath)
}
val testFileDir = new File(tempDir, "test-filename")
val testFileName = "testFName"
val testFilefs = Utils.getHadoopFileSystem(filePath.toString, conf)
Utils.fetchHcfsFile(filePath, testFileDir, testFilefs, new SparkConf(),
conf, false, Some(testFileName))
val newFileName = new File(testFileDir, testFileName)
assert(newFileName.isFile())
}
}
test("shutdown hook manager") {
val manager = new SparkShutdownHookManager()
val output = new ListBuffer[Int]()
val hook1 = manager.add(1, () => output += 1)
manager.add(3, () => output += 3)
manager.add(2, () => output += 2)
manager.add(4, () => output += 4)
manager.remove(hook1)
manager.runAll()
assert(output.toList === List(4, 3, 2))
}
test("isInDirectory") {
val tmpDir = new File(sys.props("java.io.tmpdir"))
val parentDir = new File(tmpDir, "parent-dir")
val childDir1 = new File(parentDir, "child-dir-1")
val childDir1b = new File(parentDir, "child-dir-1b")
val childFile1 = new File(parentDir, "child-file-1.txt")
val childDir2 = new File(childDir1, "child-dir-2")
val childDir2b = new File(childDir1, "child-dir-2b")
val childFile2 = new File(childDir1, "child-file-2.txt")
val childFile3 = new File(childDir2, "child-file-3.txt")
val nullFile: File = null
parentDir.mkdir()
childDir1.mkdir()
childDir1b.mkdir()
childDir2.mkdir()
childDir2b.mkdir()
childFile1.createNewFile()
childFile2.createNewFile()
childFile3.createNewFile()
// Identity
assert(Utils.isInDirectory(parentDir, parentDir))
assert(Utils.isInDirectory(childDir1, childDir1))
assert(Utils.isInDirectory(childDir2, childDir2))
// Valid ancestor-descendant pairs
assert(Utils.isInDirectory(parentDir, childDir1))
assert(Utils.isInDirectory(parentDir, childFile1))
assert(Utils.isInDirectory(parentDir, childDir2))
assert(Utils.isInDirectory(parentDir, childFile2))
assert(Utils.isInDirectory(parentDir, childFile3))
assert(Utils.isInDirectory(childDir1, childDir2))
assert(Utils.isInDirectory(childDir1, childFile2))
assert(Utils.isInDirectory(childDir1, childFile3))
assert(Utils.isInDirectory(childDir2, childFile3))
// Inverted ancestor-descendant pairs should fail
assert(!Utils.isInDirectory(childDir1, parentDir))
assert(!Utils.isInDirectory(childDir2, parentDir))
assert(!Utils.isInDirectory(childDir2, childDir1))
assert(!Utils.isInDirectory(childFile1, parentDir))
assert(!Utils.isInDirectory(childFile2, parentDir))
assert(!Utils.isInDirectory(childFile3, parentDir))
assert(!Utils.isInDirectory(childFile2, childDir1))
assert(!Utils.isInDirectory(childFile3, childDir1))
assert(!Utils.isInDirectory(childFile3, childDir2))
// Non-existent files or directories should fail
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one.txt")))
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one/two.txt")))
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one/two/three.txt")))
// Siblings should fail
assert(!Utils.isInDirectory(childDir1, childDir1b))
assert(!Utils.isInDirectory(childDir1, childFile1))
assert(!Utils.isInDirectory(childDir2, childDir2b))
assert(!Utils.isInDirectory(childDir2, childFile2))
// Null files should fail without throwing NPE
assert(!Utils.isInDirectory(parentDir, nullFile))
assert(!Utils.isInDirectory(childFile3, nullFile))
assert(!Utils.isInDirectory(nullFile, parentDir))
assert(!Utils.isInDirectory(nullFile, childFile3))
}
test("circular buffer: if nothing was written to the buffer, display nothing") {
val buffer = new CircularBuffer(4)
assert(buffer.toString === "")
}
test("circular buffer: if the buffer isn't full, print only the contents written") {
val buffer = new CircularBuffer(10)
val stream = new PrintStream(buffer, true, UTF_8.name())
stream.print("test")
assert(buffer.toString === "test")
}
test("circular buffer: data written == size of the buffer") {
val buffer = new CircularBuffer(4)
val stream = new PrintStream(buffer, true, UTF_8.name())
// fill the buffer to its exact size so that it just hits overflow
stream.print("test")
assert(buffer.toString === "test")
// add more data to the buffer
stream.print("12")
assert(buffer.toString === "st12")
}
test("circular buffer: multiple overflow") {
val buffer = new CircularBuffer(25)
val stream = new PrintStream(buffer, true, UTF_8.name())
stream.print("test circular test circular test circular test circular test circular")
assert(buffer.toString === "st circular test circular")
}
test("isDynamicAllocationEnabled") {
val conf = new SparkConf()
conf.set("spark.master", "yarn")
conf.set(SUBMIT_DEPLOY_MODE, "client")
assert(Utils.isDynamicAllocationEnabled(conf) === false)
assert(Utils.isDynamicAllocationEnabled(
conf.set(DYN_ALLOCATION_ENABLED, false)) === false)
assert(Utils.isDynamicAllocationEnabled(
conf.set(DYN_ALLOCATION_ENABLED, true)))
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.executor.instances", "1")))
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.executor.instances", "0")))
assert(Utils.isDynamicAllocationEnabled(conf.set("spark.master", "local")) === false)
assert(Utils.isDynamicAllocationEnabled(conf.set(DYN_ALLOCATION_TESTING, true)))
}
test("getDynamicAllocationInitialExecutors") {
val conf = new SparkConf()
assert(Utils.getDynamicAllocationInitialExecutors(conf) === 0)
assert(Utils.getDynamicAllocationInitialExecutors(
conf.set(DYN_ALLOCATION_MIN_EXECUTORS, 3)) === 3)
assert(Utils.getDynamicAllocationInitialExecutors( // should use minExecutors
conf.set("spark.executor.instances", "2")) === 3)
assert(Utils.getDynamicAllocationInitialExecutors( // should use executor.instances
conf.set("spark.executor.instances", "4")) === 4)
assert(Utils.getDynamicAllocationInitialExecutors( // should use executor.instances
conf.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 3)) === 4)
assert(Utils.getDynamicAllocationInitialExecutors( // should use initialExecutors
conf.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 5)) === 5)
assert(Utils.getDynamicAllocationInitialExecutors( // should use minExecutors
conf.set(DYN_ALLOCATION_INITIAL_EXECUTORS, 2)
.set("spark.executor.instances", "1")) === 3)
}
test("Set Spark CallerContext") {
val context = "test"
new CallerContext(context).setCurrentContext()
if (CallerContext.callerContextSupported) {
val callerContext = Utils.classForName("org.apache.hadoop.ipc.CallerContext")
assert(s"SPARK_$context" ===
callerContext.getMethod("getCurrent").invoke(null).toString)
}
}
test("encodeFileNameToURIRawPath") {
assert(Utils.encodeFileNameToURIRawPath("abc") === "abc")
assert(Utils.encodeFileNameToURIRawPath("abc xyz") === "abc%20xyz")
assert(Utils.encodeFileNameToURIRawPath("abc:xyz") === "abc:xyz")
}
test("decodeFileNameInURI") {
assert(Utils.decodeFileNameInURI(new URI("files:///abc/xyz")) === "xyz")
assert(Utils.decodeFileNameInURI(new URI("files:///abc")) === "abc")
assert(Utils.decodeFileNameInURI(new URI("files:///abc%20xyz")) === "abc xyz")
}
test("Kill process") {
// Verify that we can terminate a process even if it is in a bad state. This is only run
// on UNIX since it does some OS specific things to verify the correct behavior.
if (SystemUtils.IS_OS_UNIX) {
def getPid(p: Process): Int = {
val f = p.getClass().getDeclaredField("pid")
f.setAccessible(true)
f.get(p).asInstanceOf[Int]
}
def pidExists(pid: Int): Boolean = {
val p = Runtime.getRuntime.exec(s"kill -0 $pid")
p.waitFor()
p.exitValue() == 0
}
def signal(pid: Int, s: String): Unit = {
val p = Runtime.getRuntime.exec(s"kill -$s $pid")
p.waitFor()
}
// Start up a process that runs 'sleep 10'. Terminate the process and assert it takes
// less time and the process is no longer there.
val startTimeNs = System.nanoTime()
val process = new ProcessBuilder("sleep", "10").start()
val pid = getPid(process)
try {
assert(pidExists(pid))
val terminated = Utils.terminateProcess(process, 5000)
assert(terminated.isDefined)
process.waitFor(5, TimeUnit.SECONDS)
val durationNs = System.nanoTime() - startTimeNs
assert(durationNs < TimeUnit.SECONDS.toNanos(5))
assert(!pidExists(pid))
} finally {
// Forcibly kill the test process just in case.
signal(pid, "SIGKILL")
}
if (SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_1_8)) {
// We'll make sure that forcibly terminating a process works by
// creating a very misbehaving process. It ignores SIGTERM and has been SIGSTOPed. On
// older versions of java, this will *not* terminate.
val file = File.createTempFile("temp-file-name", ".tmp")
file.deleteOnExit()
val cmd =
s"""
|#!/bin/bash
|trap "" SIGTERM
|sleep 10
""".stripMargin
Files.write(cmd.getBytes(UTF_8), file)
file.getAbsoluteFile.setExecutable(true)
val process = new ProcessBuilder(file.getAbsolutePath).start()
val pid = getPid(process)
assert(pidExists(pid))
try {
signal(pid, "SIGSTOP")
val startNs = System.nanoTime()
val terminated = Utils.terminateProcess(process, 5000)
assert(terminated.isDefined)
process.waitFor(5, TimeUnit.SECONDS)
val duration = System.nanoTime() - startNs
// add a little extra time to allow a force kill to finish
assert(duration < TimeUnit.SECONDS.toNanos(6))
assert(!pidExists(pid))
} finally {
signal(pid, "SIGKILL")
}
}
}
}
test("chi square test of randomizeInPlace") {
// Parameters
val arraySize = 10
val numTrials = 1000
val threshold = 0.05
val seed = 1L
// results(i)(j): how many times Utils.randomize moves an element from position j to position i
val results = Array.ofDim[Long](arraySize, arraySize)
// This must be seeded because even a fair random process will fail this test with
// probability equal to the value of `threshold`, which is inconvenient for a unit test.
val rand = new java.util.Random(seed)
val range = 0 until arraySize
for {
_ <- 0 until numTrials
trial = Utils.randomizeInPlace(range.toArray, rand)
i <- range
} results(i)(trial(i)) += 1L
val chi = new ChiSquareTest()
// We expect an even distribution; this array will be rescaled by `chiSquareTest`
val expected = Array.fill(arraySize * arraySize)(1.0)
val observed = results.flatten
// Performs Pearson's chi-squared test. Using the sum-of-squares as the test statistic, gives
// the probability of a uniform distribution producing results as extreme as `observed`
val pValue = chi.chiSquareTest(expected, observed)
assert(pValue > threshold)
}
test("redact sensitive information") {
val sparkConf = new SparkConf
// Set some secret keys
val secretKeys = Seq(
"spark.executorEnv.HADOOP_CREDSTORE_PASSWORD",
"spark.my.password",
"spark.my.sECreT")
secretKeys.foreach { key => sparkConf.set(key, "sensitive_value") }
// Set a non-secret key
sparkConf.set("spark.regular.property", "regular_value")
// Set a property with a regular key but secret in the value
sparkConf.set("spark.sensitive.property", "has_secret_in_value")
// Redact sensitive information
val redactedConf = Utils.redact(sparkConf, sparkConf.getAll).toMap
// Assert that secret information got redacted while the regular property remained the same
secretKeys.foreach { key => assert(redactedConf(key) === Utils.REDACTION_REPLACEMENT_TEXT) }
assert(redactedConf("spark.regular.property") === "regular_value")
assert(redactedConf("spark.sensitive.property") === Utils.REDACTION_REPLACEMENT_TEXT)
}
test("redact sensitive information in command line args") {
val sparkConf = new SparkConf
// Set some secret keys
val secretKeysWithSameValue = Seq(
"spark.executorEnv.HADOOP_CREDSTORE_PASSWORD",
"spark.my.password",
"spark.my.sECreT")
val cmdArgsForSecretWithSameValue = secretKeysWithSameValue.map(s => s"-D$s=sensitive_value")
val secretKeys = secretKeysWithSameValue ++ Seq("spark.your.password")
val cmdArgsForSecret = cmdArgsForSecretWithSameValue ++ Seq(
// Have '=' twice
"-Dspark.your.password=sensitive=sensitive2"
)
val ignoredArgs = Seq(
// starts with -D but no assignment
"-Ddummy",
// secret value contained not starting with -D (we don't care about this case for now)
"spark.my.password=sensitive_value",
// edge case: not started with -D, but matched pattern after first '-'
"--Dspark.my.password=sensitive_value")
val cmdArgs = cmdArgsForSecret ++ ignoredArgs ++ Seq(
// Set a non-secret key
"-Dspark.regular.property=regular_value",
// Set a property with a regular key but secret in the value
"-Dspark.sensitive.property=has_secret_in_value")
// Redact sensitive information
val redactedCmdArgs = Utils.redactCommandLineArgs(sparkConf, cmdArgs)
// These arguments should be left as they were:
// 1) argument without -D option is not applied
// 2) -D option without key-value assignment is not applied
assert(ignoredArgs.forall(redactedCmdArgs.contains))
val redactedCmdArgMap = redactedCmdArgs.filterNot(ignoredArgs.contains).map { cmd =>
val keyValue = cmd.substring("-D".length).split("=")
keyValue(0) -> keyValue.tail.mkString("=")
}.toMap
// Assert that secret information got redacted while the regular property remained the same
secretKeys.foreach { key =>
assert(redactedCmdArgMap(key) === Utils.REDACTION_REPLACEMENT_TEXT)
}
assert(redactedCmdArgMap("spark.regular.property") === "regular_value")
assert(redactedCmdArgMap("spark.sensitive.property") === Utils.REDACTION_REPLACEMENT_TEXT)
}
test("redact sensitive information in sequence of key value pairs") {
val secretKeys = Some("my.password".r)
assert(Utils.redact(secretKeys, Seq(("spark.my.password", "12345"))) ===
Seq(("spark.my.password", Utils.REDACTION_REPLACEMENT_TEXT)))
assert(Utils.redact(secretKeys, Seq(("anything", "spark.my.password=12345"))) ===
Seq(("anything", Utils.REDACTION_REPLACEMENT_TEXT)))
assert(Utils.redact(secretKeys, Seq((999, "spark.my.password=12345"))) ===
Seq((999, Utils.REDACTION_REPLACEMENT_TEXT)))
// Do not redact when value type is not string
assert(Utils.redact(secretKeys, Seq(("my.password", 12345))) ===
Seq(("my.password", 12345)))
}
test("tryWithSafeFinally") {
var e = new Error("Block0")
val finallyBlockError = new Error("Finally Block")
var isErrorOccurred = false
// if the try and finally blocks throw different exception instances
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = { throw finallyBlockError })
} catch {
case t: Error =>
assert(t.getSuppressed.head == finallyBlockError)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try and finally blocks throw the same exception instance then it should not
// try to add to suppressed and get IllegalArgumentException
e = new Error("Block1")
isErrorOccurred = false
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = { throw e })
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try throws the exception and finally doesn't throw exception
e = new Error("Block2")
isErrorOccurred = false
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = {})
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try and finally block don't throw exception
Utils.tryWithSafeFinally {}(finallyBlock = {})
}
test("tryWithSafeFinallyAndFailureCallbacks") {
var e = new Error("Block0")
val catchBlockError = new Error("Catch Block")
val finallyBlockError = new Error("Finally Block")
var isErrorOccurred = false
TaskContext.setTaskContext(TaskContext.empty())
// if the try, catch and finally blocks throw different exception instances
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(
catchBlock = { throw catchBlockError }, finallyBlock = { throw finallyBlockError })
} catch {
case t: Error =>
assert(t.getSuppressed.head == catchBlockError)
assert(t.getSuppressed.last == finallyBlockError)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try, catch and finally blocks throw the same exception instance then it should not
// try to add to suppressed and get IllegalArgumentException
e = new Error("Block1")
isErrorOccurred = false
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(catchBlock = { throw e },
finallyBlock = { throw e })
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try throws the exception, catch and finally don't throw exceptions
e = new Error("Block2")
isErrorOccurred = false
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(catchBlock = {}, finallyBlock = {})
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try, catch and finally blocks don't throw exceptions
Utils.tryWithSafeFinallyAndFailureCallbacks {}(catchBlock = {}, finallyBlock = {})
TaskContext.unset
}
test("load extensions") {
val extensions = Seq(
classOf[SimpleExtension],
classOf[ExtensionWithConf],
classOf[UnregisterableExtension]).map(_.getName())
val conf = new SparkConf(false)
val instances = Utils.loadExtensions(classOf[Object], extensions, conf)
assert(instances.size === 2)
assert(instances.count(_.isInstanceOf[SimpleExtension]) === 1)
val extWithConf = instances.find(_.isInstanceOf[ExtensionWithConf])
.map(_.asInstanceOf[ExtensionWithConf])
.get
assert(extWithConf.conf eq conf)
class NestedExtension { }
val invalid = Seq(classOf[NestedExtension].getName())
intercept[SparkException] {
Utils.loadExtensions(classOf[Object], invalid, conf)
}
val error = Seq(classOf[ExtensionWithError].getName())
intercept[IllegalArgumentException] {
Utils.loadExtensions(classOf[Object], error, conf)
}
val wrongType = Seq(classOf[ListenerImpl].getName())
intercept[IllegalArgumentException] {
Utils.loadExtensions(classOf[Seq[_]], wrongType, conf)
}
}
test("check Kubernetes master URL") {
val k8sMasterURLHttps = Utils.checkAndGetK8sMasterUrl("k8s://https://host:port")
assert(k8sMasterURLHttps === "k8s://https://host:port")
val k8sMasterURLHttp = Utils.checkAndGetK8sMasterUrl("k8s://http://host:port")
assert(k8sMasterURLHttp === "k8s://http://host:port")
val k8sMasterURLWithoutScheme = Utils.checkAndGetK8sMasterUrl("k8s://127.0.0.1:8443")
assert(k8sMasterURLWithoutScheme === "k8s://https://127.0.0.1:8443")
val k8sMasterURLWithoutScheme2 = Utils.checkAndGetK8sMasterUrl("k8s://127.0.0.1")
assert(k8sMasterURLWithoutScheme2 === "k8s://https://127.0.0.1")
intercept[IllegalArgumentException] {
Utils.checkAndGetK8sMasterUrl("k8s:https://host:port")
}
intercept[IllegalArgumentException] {
Utils.checkAndGetK8sMasterUrl("k8s://foo://host:port")
}
}
test("stringHalfWidth") {
// scalastyle:off nonascii
assert(Utils.stringHalfWidth(null) == 0)
assert(Utils.stringHalfWidth("") == 0)
assert(Utils.stringHalfWidth("ab c") == 4)
assert(Utils.stringHalfWidth("1098") == 4)
assert(Utils.stringHalfWidth("mø") == 2)
assert(Utils.stringHalfWidth("γύρ") == 3)
assert(Utils.stringHalfWidth("pê") == 2)
assert(Utils.stringHalfWidth("ー") == 2)
assert(Utils.stringHalfWidth("测") == 2)
assert(Utils.stringHalfWidth("か") == 2)
assert(Utils.stringHalfWidth("걸") == 2)
assert(Utils.stringHalfWidth("à") == 1)
assert(Utils.stringHalfWidth("焼") == 2)
assert(Utils.stringHalfWidth("羍む") == 4)
assert(Utils.stringHalfWidth("뺭ᾘ") == 3)
assert(Utils.stringHalfWidth("\u0967\u0968\u0969") == 3)
// scalastyle:on nonascii
}
test("trimExceptCRLF standalone") {
val crlfSet = Set("\r", "\n")
val nonPrintableButCRLF = (0 to 32).map(_.toChar.toString).toSet -- crlfSet
// identity for CRLF
crlfSet.foreach { s => Utils.trimExceptCRLF(s) === s }
// empty for other non-printables
nonPrintableButCRLF.foreach { s => assert(Utils.trimExceptCRLF(s) === "") }
// identity for a printable string
assert(Utils.trimExceptCRLF("a") === "a")
// identity for strings with CRLF
crlfSet.foreach { s =>
assert(Utils.trimExceptCRLF(s"${s}a") === s"${s}a")
assert(Utils.trimExceptCRLF(s"a${s}") === s"a${s}")
assert(Utils.trimExceptCRLF(s"b${s}b") === s"b${s}b")
}
// trim nonPrintableButCRLF except when inside a string
nonPrintableButCRLF.foreach { s =>
assert(Utils.trimExceptCRLF(s"${s}a") === "a")
assert(Utils.trimExceptCRLF(s"a${s}") === "a")
assert(Utils.trimExceptCRLF(s"b${s}b") === s"b${s}b")
}
}
}
private class SimpleExtension
private class ExtensionWithConf(val conf: SparkConf)
private class UnregisterableExtension {
throw new UnsupportedOperationException()
}
private class ExtensionWithError {
throw new IllegalArgumentException()
}
private class ListenerImpl extends SparkListener
| apache-2.0 |
cexbrayat/camel | components/camel-crypto/src/main/java/org/apache/camel/component/crypto/processor/SigningProcessor.java | 3569 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.crypto.processor;
import java.security.KeyStore;
import java.security.PrivateKey;
import java.security.SecureRandom;
import java.security.Signature;
import static java.lang.String.format;
import org.apache.camel.Exchange;
import org.apache.camel.Message;
import org.apache.camel.component.crypto.DigitalSignatureConfiguration;
import org.apache.camel.component.crypto.DigitalSignatureConstants;
import org.apache.commons.codec.binary.Base64;
import static org.apache.camel.component.crypto.DigitalSignatureConstants.SIGNATURE_PRIVATE_KEY;
public class SigningProcessor extends DigitalSignatureProcessor {
public SigningProcessor(DigitalSignatureConfiguration configuration) {
super(configuration);
}
public void process(Exchange exchange) throws Exception {
Signature service = initSignatureService(exchange);
calculateSignature(exchange, service);
byte[] signature = service.sign();
Message in = exchange.getIn();
clearMessageHeaders(in);
Message out = exchange.getOut();
out.copyFrom(in);
out.setHeader(config.getSignatureHeader(), new Base64().encode(signature));
}
protected Signature initSignatureService(Exchange exchange) throws Exception {
PrivateKey pk = getPrivateKeyFromKeystoreOrExchange(exchange);
SecureRandom random = config.getSecureRandom();
Signature service = createSignatureService();
if (random != null) {
service.initSign(pk, random);
} else {
service.initSign(pk);
}
return service;
}
private PrivateKey getPrivateKeyFromKeystoreOrExchange(Exchange exchange) throws Exception {
PrivateKey pk = config.getPrivateKey(getAlias(exchange), getKeyPassword(exchange));
if (pk == null) {
pk = exchange.getIn().getHeader(SIGNATURE_PRIVATE_KEY, PrivateKey.class);
if (pk == null) {
throw new IllegalStateException(format("Cannot sign message as no Private Key has been supplied. Either supply one in"
+ " the route definition sign(keystore, alias) or sign(privateKey) or via the message header '%s'", SIGNATURE_PRIVATE_KEY));
}
}
return pk;
}
protected char[] getKeyPassword(Exchange exchange) throws Exception {
KeyStore keystore = config.getKeystore();
char[] password = null;
if (keystore != null) {
password = exchange.getIn().getHeader(DigitalSignatureConstants.KEYSTORE_PASSWORD, char[].class);
if (password == null) {
password = config.getPassword();
}
}
return password;
}
}
| apache-2.0 |
munifgebara/angular4 | random-quote4/app/app.module.js | 510 | (function(app) {
var NgModule = ng.core.NgModule;
var BrowserModule = ng.platformBrowser.BrowserModule;
var QuoteService = app.QuoteService;
var RandomQuoteComponent = app.RandomQuoteComponent;
var AppComponent = app.AppComponent;
app.AppModule = NgModule({
imports: [BrowserModule],
declarations: [AppComponent, RandomQuoteComponent],
providers: [QuoteService],
bootstrap: [AppComponent]
})
.Class({
constructor: function() { }
});
})(window.app || (window.app = {}));
| apache-2.0 |
tomdoherty/chef | lib/chef/knife/user_reregister.rb | 1534 | #
# Author:: Steven Danna (<steve@chef.io>)
# Copyright:: Copyright 2012-2016, Chef Software Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require "chef/knife"
class Chef
class Knife
class UserReregister < Knife
deps do
require "chef/user_v1"
end
banner "knife user reregister USER (options)"
option :file,
short: "-f FILE",
long: "--file FILE",
description: "Write the private key to a file"
def run
@user_name = @name_args[0]
if @user_name.nil?
show_usage
ui.fatal("You must specify a user name")
exit 1
end
user = Chef::UserV1.load(@user_name)
user.reregister
Chef::Log.trace("Updated user data: #{user.inspect}")
key = user.private_key
if config[:file]
File.open(config[:file], "w") do |f|
f.print(key)
end
else
ui.msg key
end
end
end
end
end
| apache-2.0 |
allure-framework/allure1 | allure-report-face/src/plugins/defects/defects-widget/DefectsWidget.js | 172 | import {ItemView} from 'backbone.marionette';
import template from './DefectsWidget.hbs';
export default class DefectsWidget extends ItemView {
template = template;
}
| apache-2.0 |
USC-NSL/p4c-behavioral | p4c_bm/of.py | 8178 | # OF Match fields
of_match_vals = {}
of_match_vals["OFPXMT_OFB_IN_PORT" ] = "match_fields->fields.in_port"
of_match_vals["OFPXMT_OFB_IN_PHY_PORT" ] = "match_fields->fields.in_phy_port"
of_match_vals["OFPXMT_OFB_METADATA" ] = "match_fields->fields.metadata"
of_match_vals["OFPXMT_OFB_ETH_DST" ] = "match_fields->fields.eth_dst.addr"
of_match_vals["OFPXMT_OFB_ETH_SRC" ] = "match_fields->fields.eth_src.addr"
of_match_vals["OFPXMT_OFB_ETH_TYPE" ] = "match_fields->fields.eth_type"
of_match_vals["OFPXMT_OFB_VLAN_VID" ] = "match_fields->fields.vlan_vid"
of_match_vals["OFPXMT_OFB_VLAN_PCP" ] = "match_fields->fields.vlan_pcp"
of_match_vals["OFPXMT_OFB_IP_DSCP" ] = "match_fields->fields.ip_dscp"
of_match_vals["OFPXMT_OFB_IP_ECN" ] = "match_fields->fields.ip_ecn"
of_match_vals["OFPXMT_OFB_IP_PROTO" ] = "match_fields->fields.ip_proto"
of_match_vals["OFPXMT_OFB_IPV4_SRC" ] = "match_fields->fields.ipv4_src"
of_match_vals["OFPXMT_OFB_IPV4_DST" ] = "match_fields->fields.ipv4_dst"
of_match_vals["OFPXMT_OFB_TCP_SRC" ] = "match_fields->fields.tcp_src"
of_match_vals["OFPXMT_OFB_TCP_DST" ] = "match_fields->fields.tcp_dst"
of_match_vals["OFPXMT_OFB_UDP_SRC" ] = "match_fields->fields.udp_src"
of_match_vals["OFPXMT_OFB_UDP_DST" ] = "match_fields->fields.udp_dst"
of_match_vals["OFPXMT_OFB_SCTP_SRC" ] = "match_fields->fields.sctp_src"
of_match_vals["OFPXMT_OFB_SCTP_DST" ] = "match_fields->fields.sctp_dst"
of_match_vals["OFPXMT_OFB_ICMPV4_TYPE" ] = "match_fields->fields.icmpv4_type"
of_match_vals["OFPXMT_OFB_ICMPV4_CODE" ] = "match_fields->fields.icmpv4_code"
of_match_vals["OFPXMT_OFB_ARP_OP" ] = "match_fields->fields.arp_op"
of_match_vals["OFPXMT_OFB_ARP_SPA" ] = "match_fields->fields.arp_spa"
of_match_vals["OFPXMT_OFB_ARP_TPA" ] = "match_fields->fields.arp_tpa"
of_match_vals["OFPXMT_OFB_ARP_SHA" ] = "match_fields->fields.arp_sha"
of_match_vals["OFPXMT_OFB_ARP_THA" ] = "match_fields->fields.arp_tha"
of_match_vals["OFPXMT_OFB_IPV6_SRC" ] = "match_fields->fields.ipv6_src"
of_match_vals["OFPXMT_OFB_IPV6_DST" ] = "match_fields->fields.ipv6_dst"
of_match_vals["OFPXMT_OFB_IPV6_FLABEL" ] = "match_fields->fields.ipv6_flabel"
of_match_vals["OFPXMT_OFB_ICMPV6_TYPE" ] = "match_fields->fields.icmpv6_type"
of_match_vals["OFPXMT_OFB_ICMPV6_CODE" ] = "match_fields->fields.icmpv6_code"
of_match_vals["OFPXMT_OFB_IPV6_ND_TARGET"] = "match_fields->fields.ipv6_nd_target"
of_match_vals["OFPXMT_OFB_IPV6_ND_SLL" ] = "match_fields->fields.ipv6_nd_sll"
of_match_vals["OFPXMT_OFB_IPV6_ND_TLL" ] = "match_fields->fields.ipv6_nd_tll"
of_match_vals["OFPXMT_OFB_MPLS_LABEL" ] = "match_fields->fields.mpls_label"
of_match_vals["OFPXMT_OFB_MPLS_TC" ] = "match_fields->fields.mpls_tc"
of_match_vals["OFPXMT_OFP_MPLS_BOS" ] = "match_fields->fields.mpls_bos"
of_match_vals["OFPXMT_OFB_PBB_ISID" ] = "match_fields->fields.pbb_uca"
of_match_vals["OFPXMT_OFB_TUNNEL_ID" ] = "match_fields->fields.tunnel_id"
of_match_vals["OFPXMT_OFB_IPV6_EXTHDR" ] = "match_fields->fields.ipv6_exthdr"
#OF Match masks
of_match_masks = {}
of_match_masks["OFPXMT_OFB_IN_PORT" ] = "match_fields->masks.in_port"
of_match_masks["OFPXMT_OFB_IN_PHY_PORT" ] = "match_fields->masks.in_phy_port"
of_match_masks["OFPXMT_OFB_METADATA" ] = "match_fields->masks.metadata"
of_match_masks["OFPXMT_OFB_ETH_DST" ] = "match_fields->masks.eth_dst"
of_match_masks["OFPXMT_OFB_ETH_SRC" ] = "match_fields->masks.eth_src"
of_match_masks["OFPXMT_OFB_ETH_TYPE" ] = "match_fields->masks.eth_type"
of_match_masks["OFPXMT_OFB_VLAN_VID" ] = "match_fields->masks.vlan_vid"
of_match_masks["OFPXMT_OFB_VLAN_PCP" ] = "match_fields->masks.vlan_pcp"
of_match_masks["OFPXMT_OFB_IP_DSCP" ] = "match_fields->masks.ip_dscp"
of_match_masks["OFPXMT_OFB_IP_ECN" ] = "match_fields->masks.ip_ecn"
of_match_masks["OFPXMT_OFB_IP_PROTO" ] = "match_fields->masks.ip_proto"
of_match_masks["OFPXMT_OFB_IPV4_SRC" ] = "match_fields->masks.ipv4_src"
of_match_masks["OFPXMT_OFB_IPV4_DST" ] = "match_fields->masks.ipv4_dst"
of_match_masks["OFPXMT_OFB_TCP_SRC" ] = "match_fields->masks.tcp_src"
of_match_masks["OFPXMT_OFB_TCP_DST" ] = "match_fields->masks.tcp_dst"
of_match_masks["OFPXMT_OFB_UDP_SRC" ] = "match_fields->masks.udp_src"
of_match_masks["OFPXMT_OFB_UDP_DST" ] = "match_fields->masks.udp_dst"
of_match_masks["OFPXMT_OFB_SCTP_SRC" ] = "match_fields->masks.sctp_src"
of_match_masks["OFPXMT_OFB_SCTP_DST" ] = "match_fields->masks.sctp_dst"
of_match_masks["OFPXMT_OFB_ICMPV4_TYPE" ] = "match_fields->masks.icmpv4_type"
of_match_masks["OFPXMT_OFB_ICMPV4_CODE" ] = "match_fields->masks.icmpv4_code"
of_match_masks["OFPXMT_OFB_ARP_OP" ] = "match_fields->masks.arp_op"
of_match_masks["OFPXMT_OFB_ARP_SPA" ] = "match_fields->masks.arp_spa"
of_match_masks["OFPXMT_OFB_ARP_TPA" ] = "match_fields->masks.arp_tpa"
of_match_masks["OFPXMT_OFB_ARP_SHA" ] = "match_fields->masks.arp_sha"
of_match_masks["OFPXMT_OFB_ARP_THA" ] = "match_fields->masks.arp_tha"
of_match_masks["OFPXMT_OFB_IPV6_SRC" ] = "match_fields->masks.ipv6_src"
of_match_masks["OFPXMT_OFB_IPV6_DST" ] = "match_fields->masks.ipv6_dst"
of_match_masks["OFPXMT_OFB_IPV6_FLABEL" ] = "match_fields->masks.ipv6_flabel"
of_match_masks["OFPXMT_OFB_ICMPV6_TYPE" ] = "match_fields->masks.icmpv6_type"
of_match_masks["OFPXMT_OFB_ICMPV6_CODE" ] = "match_fields->masks.icmpv6_code"
of_match_masks["OFPXMT_OFB_IPV6_ND_TARGET"] = "match_fields->masks.ipv6_nd_target"
of_match_masks["OFPXMT_OFB_IPV6_ND_SLL" ] = "match_fields->masks.ipv6_nd_sll"
of_match_masks["OFPXMT_OFB_IPV6_ND_TLL" ] = "match_fields->masks.ipv6_nd_tll"
of_match_masks["OFPXMT_OFB_MPLS_LABEL" ] = "match_fields->masks.mpls_label"
of_match_masks["OFPXMT_OFB_MPLS_TC" ] = "match_fields->masks.mpls_tc"
of_match_masks["OFPXMT_OFP_MPLS_BOS" ] = "match_fields->masks.mpls_bos"
of_match_masks["OFPXMT_OFB_PBB_ISID" ] = "match_fields->masks.pbb_uca"
of_match_masks["OFPXMT_OFB_TUNNEL_ID" ] = "match_fields->masks.tunnel_id"
of_match_masks["OFPXMT_OFB_IPV6_EXTHDR" ] = "match_fields->masks.ipv6_exthdr"
# OF Action types
of_action_vals = {}
of_action_vals["ofpat_output" ] = "OFPAT_OUTPUT"
of_action_vals["ofpat_copy_ttl_out" ] = "OFPAT_COPY_TTL_OUT"
of_action_vals["ofpat_copy_ttl_in" ] = "OFPAT_COPY_TTL_IN"
of_action_vals["ofpat_set_mpls_ttl" ] = "OFPAT_SET_MPLS_TTL"
of_action_vals["ofpat_dec_mpls_ttl" ] = "OFPAT_DEC_MPLS_TTL"
of_action_vals["ofpat_push_vlan" ] = "OFPAT_PUSH_VLAN"
of_action_vals["ofpat_pop_vlan" ] = "OFPAT_POP_VLAN"
of_action_vals["ofpat_push_mpls" ] = "OFPAT_PUSH_MPLS"
of_action_vals["ofpat_pop_mpls" ] = "OFPAT_POP_MPLS"
of_action_vals["ofpat_set_queue" ] = "OFPAT_SET_QUEUE"
of_action_vals["ofpat_group" ] = "OFPAT_GROUP"
of_action_vals["ofpat_set_nw_ttl_ipv4"] = "OFPAT_SET_NW_TTL"
of_action_vals["ofpat_set_nw_ttl_ipv6"] = "OFPAT_SET_NW_TTL"
of_action_vals["ofpat_dec_nw_ttl_ipv4"] = "OFPAT_DEC_NW_TTL"
of_action_vals["ofpat_dec_nw_ttl_ipv6"] = "OFPAT_DEC_NW_TTL"
of_action_vals["ofpat_set_field" ] = "OFPAT_SET_FIELD"
of_action_vals["ofpat_push_pbb" ] = "OFPAT_PUSH_PBB"
of_action_vals["ofpat_pop_pbb" ] = "OFPAT_POP_PBB"
of_action_vals[ "OFPAT_EXPERIMENTER" ] = 0xffff
# Set field types
of_set_fields = {}
of_set_fields["ofpat_set_vlan_vid"] = "OFPXMT_OFB_VLAN_VID"
| apache-2.0 |
ay65535/hbase-0.94.0 | src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java | 8150 | /*
* Copyright 2011 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.io.hfile;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.hbase.*;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import static org.junit.Assert.*;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@RunWith(Parameterized.class)
@Category(MediumTests.class)
public class TestFixedFileTrailer {
private static final Log LOG = LogFactory.getLog(TestFixedFileTrailer.class);
/** The number of used fields by version. Indexed by version minus one. */
private static final int[] NUM_FIELDS_BY_VERSION = new int[] { 9, 14 };
private HBaseTestingUtility util = new HBaseTestingUtility();
private FileSystem fs;
private ByteArrayOutputStream baos = new ByteArrayOutputStream();
private int version;
static {
assert NUM_FIELDS_BY_VERSION.length == HFile.MAX_FORMAT_VERSION
- HFile.MIN_FORMAT_VERSION + 1;
}
public TestFixedFileTrailer(int version) {
this.version = version;
}
@Parameters
public static Collection<Object[]> getParameters() {
List<Object[]> versionsToTest = new ArrayList<Object[]>();
for (int v = HFile.MIN_FORMAT_VERSION; v <= HFile.MAX_FORMAT_VERSION; ++v)
versionsToTest.add(new Integer[] { v } );
return versionsToTest;
}
@Before
public void setUp() throws IOException {
fs = FileSystem.get(util.getConfiguration());
}
@Test
public void testTrailer() throws IOException {
FixedFileTrailer t = new FixedFileTrailer(version,
HFileBlock.MINOR_VERSION_NO_CHECKSUM);
t.setDataIndexCount(3);
t.setEntryCount(((long) Integer.MAX_VALUE) + 1);
if (version == 1) {
t.setFileInfoOffset(876);
}
if (version == 2) {
t.setLastDataBlockOffset(291);
t.setNumDataIndexLevels(3);
t.setComparatorClass(KeyValue.KEY_COMPARATOR.getClass());
t.setFirstDataBlockOffset(9081723123L); // Completely unrealistic.
t.setUncompressedDataIndexSize(827398717L); // Something random.
}
t.setLoadOnOpenOffset(128);
t.setMetaIndexCount(7);
t.setTotalUncompressedBytes(129731987);
{
DataOutputStream dos = new DataOutputStream(baos); // Limited scope.
t.serialize(dos);
dos.flush();
assertEquals(dos.size(), FixedFileTrailer.getTrailerSize(version));
}
byte[] bytes = baos.toByteArray();
baos.reset();
assertEquals(bytes.length, FixedFileTrailer.getTrailerSize(version));
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
// Finished writing, trying to read.
{
DataInputStream dis = new DataInputStream(bais);
FixedFileTrailer t2 = new FixedFileTrailer(version,
HFileBlock.MINOR_VERSION_NO_CHECKSUM);
t2.deserialize(dis);
assertEquals(-1, bais.read()); // Ensure we have read everything.
checkLoadedTrailer(version, t, t2);
}
// Now check what happens if the trailer is corrupted.
Path trailerPath = new Path(util.getDataTestDir(), "trailer_"
+ version);
{
for (byte invalidVersion : new byte[] { HFile.MIN_FORMAT_VERSION - 1,
HFile.MAX_FORMAT_VERSION + 1}) {
bytes[bytes.length - 1] = invalidVersion;
writeTrailer(trailerPath, null, bytes);
try {
readTrailer(trailerPath);
fail("Exception expected");
} catch (IOException ex) {
// Make it easy to debug this.
String msg = ex.getMessage();
String cleanMsg = msg.replaceAll(
"^(java(\\.[a-zA-Z]+)+:\\s+)?|\\s+\\(.*\\)\\s*$", "");
assertEquals("Actual exception message is \"" + msg + "\".\n" +
"Cleaned-up message", // will be followed by " expected: ..."
"Invalid HFile version: " + invalidVersion, cleanMsg);
LOG.info("Got an expected exception: " + msg);
}
}
}
// Now write the trailer into a file and auto-detect the version.
writeTrailer(trailerPath, t, null);
FixedFileTrailer t4 = readTrailer(trailerPath);
checkLoadedTrailer(version, t, t4);
String trailerStr = t.toString();
assertEquals("Invalid number of fields in the string representation "
+ "of the trailer: " + trailerStr, NUM_FIELDS_BY_VERSION[version - 1],
trailerStr.split(", ").length);
assertEquals(trailerStr, t4.toString());
}
private FixedFileTrailer readTrailer(Path trailerPath) throws IOException {
FSDataInputStream fsdis = fs.open(trailerPath);
FixedFileTrailer trailerRead = FixedFileTrailer.readFromStream(fsdis,
fs.getFileStatus(trailerPath).getLen());
fsdis.close();
return trailerRead;
}
private void writeTrailer(Path trailerPath, FixedFileTrailer t,
byte[] useBytesInstead) throws IOException {
assert (t == null) != (useBytesInstead == null); // Expect one non-null.
FSDataOutputStream fsdos = fs.create(trailerPath);
fsdos.write(135); // to make deserializer's job less trivial
if (useBytesInstead != null) {
fsdos.write(useBytesInstead);
} else {
t.serialize(fsdos);
}
fsdos.close();
}
private void checkLoadedTrailer(int version, FixedFileTrailer expected,
FixedFileTrailer loaded) throws IOException {
assertEquals(version, loaded.getMajorVersion());
assertEquals(expected.getDataIndexCount(), loaded.getDataIndexCount());
assertEquals(Math.min(expected.getEntryCount(),
version == 1 ? Integer.MAX_VALUE : Long.MAX_VALUE),
loaded.getEntryCount());
if (version == 1) {
assertEquals(expected.getFileInfoOffset(), loaded.getFileInfoOffset());
}
if (version == 2) {
assertEquals(expected.getLastDataBlockOffset(),
loaded.getLastDataBlockOffset());
assertEquals(expected.getNumDataIndexLevels(),
loaded.getNumDataIndexLevels());
assertEquals(expected.createComparator().getClass().getName(),
loaded.createComparator().getClass().getName());
assertEquals(expected.getFirstDataBlockOffset(),
loaded.getFirstDataBlockOffset());
assertTrue(
expected.createComparator() instanceof KeyValue.KeyComparator);
assertEquals(expected.getUncompressedDataIndexSize(),
loaded.getUncompressedDataIndexSize());
}
assertEquals(expected.getLoadOnOpenDataOffset(),
loaded.getLoadOnOpenDataOffset());
assertEquals(expected.getMetaIndexCount(), loaded.getMetaIndexCount());
assertEquals(expected.getTotalUncompressedBytes(),
loaded.getTotalUncompressedBytes());
}
@org.junit.Rule
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
}
| apache-2.0 |
opera/terago | vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go | 4562 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"errors"
)
const (
VERSION_MASK = 0xffff0000
VERSION_1 = 0x80010000
)
type TProtocol interface {
WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
WriteMessageEnd() error
WriteStructBegin(name string) error
WriteStructEnd() error
WriteFieldBegin(name string, typeId TType, id int16) error
WriteFieldEnd() error
WriteFieldStop() error
WriteMapBegin(keyType TType, valueType TType, size int) error
WriteMapEnd() error
WriteListBegin(elemType TType, size int) error
WriteListEnd() error
WriteSetBegin(elemType TType, size int) error
WriteSetEnd() error
WriteBool(value bool) error
WriteByte(value int8) error
WriteI16(value int16) error
WriteI32(value int32) error
WriteI64(value int64) error
WriteDouble(value float64) error
WriteString(value string) error
WriteBinary(value []byte) error
ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
ReadMessageEnd() error
ReadStructBegin() (name string, err error)
ReadStructEnd() error
ReadFieldBegin() (name string, typeId TType, id int16, err error)
ReadFieldEnd() error
ReadMapBegin() (keyType TType, valueType TType, size int, err error)
ReadMapEnd() error
ReadListBegin() (elemType TType, size int, err error)
ReadListEnd() error
ReadSetBegin() (elemType TType, size int, err error)
ReadSetEnd() error
ReadBool() (value bool, err error)
ReadByte() (value int8, err error)
ReadI16() (value int16, err error)
ReadI32() (value int32, err error)
ReadI64() (value int64, err error)
ReadDouble() (value float64, err error)
ReadString() (value string, err error)
ReadBinary() (value []byte, err error)
Skip(fieldType TType) (err error)
Flush() (err error)
Transport() TTransport
}
// The maximum recursive depth the skip() function will traverse
const DEFAULT_RECURSION_DEPTH = 64
// Skips over the next data element from the provided input TProtocol object.
func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
}
// Skips over the next data element from the provided input TProtocol object.
func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
if maxDepth <= 0 {
return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
}
switch fieldType {
case STOP:
return
case BOOL:
_, err = self.ReadBool()
return
case BYTE:
_, err = self.ReadByte()
return
case I16:
_, err = self.ReadI16()
return
case I32:
_, err = self.ReadI32()
return
case I64:
_, err = self.ReadI64()
return
case DOUBLE:
_, err = self.ReadDouble()
return
case STRING:
_, err = self.ReadString()
return
case STRUCT:
if _, err = self.ReadStructBegin(); err != nil {
return err
}
for {
_, typeId, _, _ := self.ReadFieldBegin()
if typeId == STOP {
break
}
err := Skip(self, typeId, maxDepth-1)
if err != nil {
return err
}
self.ReadFieldEnd()
}
return self.ReadStructEnd()
case MAP:
keyType, valueType, size, err := self.ReadMapBegin()
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(self, keyType, maxDepth-1)
if err != nil {
return err
}
self.Skip(valueType)
}
return self.ReadMapEnd()
case SET:
elemType, size, err := self.ReadSetBegin()
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(self, elemType, maxDepth-1)
if err != nil {
return err
}
}
return self.ReadSetEnd()
case LIST:
elemType, size, err := self.ReadListBegin()
if err != nil {
return err
}
for i := 0; i < size; i++ {
err := Skip(self, elemType, maxDepth-1)
if err != nil {
return err
}
}
return self.ReadListEnd()
}
return nil
}
| apache-2.0 |
armando-migliaccio/tempest | tempest/api/compute/v3/servers/test_attach_interfaces.py | 4461 | # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.test import attr
import time
class AttachInterfacesV3TestJSON(base.BaseV3ComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
if not cls.config.service_available.neutron:
raise cls.skipException("Neutron is required")
super(AttachInterfacesV3TestJSON, cls).setUpClass()
cls.client = cls.interfaces_client
def _check_interface(self, iface, port_id=None, network_id=None,
fixed_ip=None):
self.assertIn('port_state', iface)
if port_id:
self.assertEqual(iface['port_id'], port_id)
if network_id:
self.assertEqual(iface['net_id'], network_id)
if fixed_ip:
self.assertEqual(iface['fixed_ips'][0]['ip_address'], fixed_ip)
def _create_server_get_interfaces(self):
resp, server = self.create_test_server(wait_until='ACTIVE')
resp, ifs = self.client.list_interfaces(server['id'])
resp, body = self.client.wait_for_interface_status(
server['id'], ifs[0]['port_id'], 'ACTIVE')
ifs[0]['port_state'] = body['port_state']
return server, ifs
def _test_create_interface(self, server):
resp, iface = self.client.create_interface(server['id'])
resp, iface = self.client.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface)
return iface
def _test_create_interface_by_network_id(self, server, ifs):
network_id = ifs[0]['net_id']
resp, iface = self.client.create_interface(server['id'],
network_id=network_id)
resp, iface = self.client.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface, network_id=network_id)
return iface
def _test_show_interface(self, server, ifs):
iface = ifs[0]
resp, _iface = self.client.show_interface(server['id'],
iface['port_id'])
self.assertEqual(iface, _iface)
def _test_delete_interface(self, server, ifs):
# NOTE(danms): delete not the first or last, but one in the middle
iface = ifs[1]
self.client.delete_interface(server['id'], iface['port_id'])
for i in range(0, 5):
_r, _ifs = self.client.list_interfaces(server['id'])
if len(ifs) != len(_ifs):
break
time.sleep(1)
self.assertEqual(len(_ifs), len(ifs) - 1)
for _iface in _ifs:
self.assertNotEqual(iface['port_id'], _iface['port_id'])
return _ifs
def _compare_iface_list(self, list1, list2):
# NOTE(danms): port_state will likely have changed, so just
# confirm the port_ids are the same at least
list1 = [x['port_id'] for x in list1]
list2 = [x['port_id'] for x in list2]
self.assertEqual(sorted(list1), sorted(list2))
@attr(type='gate')
def test_create_list_show_delete_interfaces(self):
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertTrue(interface_count > 0)
self._check_interface(ifs[0])
iface = self._test_create_interface(server)
ifs.append(iface)
iface = self._test_create_interface_by_network_id(server, ifs)
ifs.append(iface)
resp, _ifs = self.client.list_interfaces(server['id'])
self._compare_iface_list(ifs, _ifs)
self._test_show_interface(server, ifs)
_ifs = self._test_delete_interface(server, ifs)
self.assertEqual(len(ifs) - 1, len(_ifs))
class AttachInterfacesV3TestXML(AttachInterfacesV3TestJSON):
_interface = 'xml'
| apache-2.0 |
matejonnet/pnc | integration-test/src/test/java/org/jboss/pnc/integration/utils/JsonMatcher.java | 1999 | /**
* JBoss, Home of Professional Open Source.
* Copyright 2014-2022 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.pnc.integration.utils;
import org.hamcrest.CustomMatcher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.invoke.MethodHandles;
import java.util.function.Consumer;
import java.util.stream.Stream;
import static io.restassured.path.json.JsonPath.from;
public class JsonMatcher {
private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static CustomMatcher<String> containsJsonAttribute(
String jsonAttribute,
Consumer<String>... actionWhenMatches) {
return new CustomMatcher<String>("matchesJson") {
@Override
public boolean matches(Object o) {
String rawJson = String.valueOf(o);
logger.debug("Evaluating raw JSON: " + rawJson);
Object value = from(rawJson).get(jsonAttribute);
logger.debug("Got value from JSon: " + value);
if (value != null) {
if (actionWhenMatches != null) {
Stream.of(actionWhenMatches).forEach(action -> action.accept(String.valueOf(value)));
}
return true;
}
return false;
}
};
}
}
| apache-2.0 |
coreyauger/scalajs-react-components | demo/src/main/scala/chandu0101/scalajs/react/components/demo/routes/AppRouter.scala | 6149 | package chandu0101.scalajs.react.components.demo.routes
import chandu0101.scalajs.react.components.demo.components.{AppHeader, ScalaCSSTutorial}
import chandu0101.scalajs.react.components.demo.pages._
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra.router2.{Resolution, RouterConfigDsl, RouterCtl, _}
import japgolly.scalajs.react.vdom.prefix_<^._
import org.scalajs.dom
import scala.scalajs.js.Dynamic.{global => g}
/**
* Created by chandrasekharkode .
*/
object AppRouter {
sealed trait Page
case object Home extends Page
case object ScalaCSSDoc extends Page
case class ReactListViewPages(p: LeftRoute) extends Page
case class ReactTreeViewPages(p: LeftRoute) extends Page
case class ReactPopoverPages(p: LeftRoute) extends Page
case class ReactTablePages(p: LeftRoute) extends Page
case class GoogleMapPages(p: LeftRoute) extends Page
case class ReactTagsInputPages(p: LeftRoute) extends Page
case class ReactSelectPages(p: LeftRoute) extends Page
case class ReactGeomIconPages(p: LeftRoute) extends Page
case class MuiPages(p: LeftRoute) extends Page
case class ReactInfinitePages(p: LeftRoute) extends Page
case class SpinnerPages(p: LeftRoute) extends Page
val config = RouterConfigDsl[Page].buildConfig { dsl =>
import dsl._
val reactListViewRoutes: Rule = ReactListViewRouteModule.routes.prefixPath_/("#reactlistview").pmap[Page](ReactListViewPages) { case ReactListViewPages(p) => p}
val reactTreeViewRoutes: Rule = ReactTreeViewRouteModule.routes.prefixPath_/("#reacttreeview").pmap[Page](ReactTreeViewPages) { case ReactTreeViewPages(p) => p}
val reactPopoverRoutes: Rule = ReactPopoverRouteModule.routes.prefixPath_/("#reactpopover").pmap[Page](ReactPopoverPages) { case ReactPopoverPages(p) => p}
val reactTableRoutes: Rule = ReactTableRouteModule.routes.prefixPath_/("#reacttable").pmap[Page](ReactTablePages) { case ReactTablePages(p) => p}
val reactTagsInputRoutes: Rule = ReactTagsInputRouteModule.routes.prefixPath_/("#reacttagsinput").pmap[Page](ReactTagsInputPages) { case ReactTagsInputPages(p) => p}
val reactSelectRoutes: Rule = ReactSelectRouteModule.routes.prefixPath_/("#reactselect").pmap[Page](ReactSelectPages) { case ReactSelectPages(p) => p}
val reactGeomIconRoutes: Rule = ReactGeomIcontRouteModule.routes.prefixPath_/("#reactgeomicon").pmap[Page](ReactGeomIconPages) { case ReactGeomIconPages(p) => p}
val reactInfiniteRoutes: Rule = ReactInfiniteRouteModule.routes.prefixPath_/("#reactinite").pmap[Page](ReactInfinitePages) { case ReactInfinitePages(p) => p}
val googleMapRoutes: Rule = GoogleMapRouteModule.routes.prefixPath_/("#googlemap").pmap[Page](GoogleMapPages) { case GoogleMapPages(p) => p}
val muiRoutes: Rule = MuiRouteModule.routes.prefixPath_/("#materialui").pmap[Page](MuiPages) { case MuiPages(p) => p}
val spinnerRoutes: Rule = SpinnerRouteModule.routes.prefixPath_/("#spinner").pmap[Page](SpinnerPages) { case SpinnerPages(p) => p}
(trimSlashes
| staticRoute(root, Home) ~> renderR(ctrl => HomePage(ctrl))
| staticRoute("#scalacss", ScalaCSSDoc) ~> render(ScalaCSSTutorial())
| reactListViewRoutes
| reactTreeViewRoutes
| reactTagsInputRoutes
| reactSelectRoutes
| reactGeomIconRoutes
| reactTableRoutes
| reactInfiniteRoutes
| spinnerRoutes
| muiRoutes
| reactPopoverRoutes
| googleMapRoutes
).notFound(redirectToPage(Home)(Redirect.Replace))
.renderWith(layout)
}
def layout(c: RouterCtl[Page], r: Resolution[Page]) = {
<.div(
AppHeader(),
r.render(),
<.div(^.textAlign := "center", ^.key := "footer")(
<.hr(),
<.p("Built using scalajs-react")
)
)
}
val homePageMenu = Vector(
HomePage.ComponentInfo(name = "Material UI", imagePath = g.materialuiImage.toString, route = MuiPages(MuiRouteModule.Info), tags = Stream("materialui", "material", "framework")),
HomePage.ComponentInfo(name = "React ListView", imagePath = g.reactListViewImage.toString, route = ReactListViewPages(ReactListViewRouteModule.Info), tags = Stream("list view", "search", "listview")),
HomePage.ComponentInfo(name = "Google Map", imagePath = g.googleMapImage.toString, route = GoogleMapPages(GoogleMapRouteModule.Info), tags = Stream("google", "map", "googlemap")),
// HomePage.ComponentInfo(name = "React TreeView", imagePath = g.reactTreeViewImage.toString, route = ReactTreeViewPages(ReactTreeViewRouteModule.Info), tags = Stream("tree view", "search", "treeview")),
HomePage.ComponentInfo(name = "React Table", imagePath = g.reactTableImage.toString, route = ReactTablePages(ReactTableRouteModule.Info), tags = Stream("table", "search", "pagination", "sorting", "cutom cell")),
HomePage.ComponentInfo(name = "React Tags Input", imagePath = g.reactTagsInputImage.toString, route = ReactTagsInputPages(ReactTagsInputRouteModule.Info), tags = Stream("tags", "input")),
HomePage.ComponentInfo(name = "React Select", imagePath = g.reactSelectImage.toString, route = ReactSelectPages(ReactSelectRouteModule.Info), tags = Stream("select", "multi", "search", "filter", "multi select")),
HomePage.ComponentInfo(name = "React GeomIcons", imagePath = g.reactGeomIconImage.toString, route = ReactGeomIconPages(ReactGeomIcontRouteModule.Info), tags = Stream("icons", "svg")),
HomePage.ComponentInfo(name = "React Infinite", imagePath = g.reactInfiniteImage.toString, route = ReactInfinitePages(ReactInfiniteRouteModule.Info), tags = Stream("infinite scroll", "listview")),
HomePage.ComponentInfo(name = "Spinner", imagePath = g.spinnerImage.toString, route = SpinnerPages(SpinnerRouteModule.Info), tags = Stream("spinner"))
// HomePage.ComponentInfo(name = "React Popover", imagePath = g.reactPopoverImage.toString, route = ReactPopoverPages(ReactPopoverRouteModule.Info), tags = Stream("modal", "popover"))
)
val baseUrl =
if (dom.window.location.hostname == "localhost")
BaseUrl.fromWindowOrigin_/
else
BaseUrl.fromWindowOrigin / "sjrc/"
val router = Router(baseUrl, config)
}
| apache-2.0 |
Tycheo/coffeemud | com/planet_ink/coffee_mud/Commands/Load.java | 9566 | package com.planet_ink.coffee_mud.Commands;
import com.planet_ink.coffee_mud.core.interfaces.*;
import com.planet_ink.coffee_mud.core.*;
import com.planet_ink.coffee_mud.core.CMClass.CMObjectType;
import com.planet_ink.coffee_mud.core.collections.*;
import com.planet_ink.coffee_mud.Abilities.interfaces.*;
import com.planet_ink.coffee_mud.Areas.interfaces.*;
import com.planet_ink.coffee_mud.Behaviors.interfaces.*;
import com.planet_ink.coffee_mud.CharClasses.interfaces.*;
import com.planet_ink.coffee_mud.Commands.interfaces.*;
import com.planet_ink.coffee_mud.Common.interfaces.*;
import com.planet_ink.coffee_mud.Exits.interfaces.*;
import com.planet_ink.coffee_mud.Items.interfaces.*;
import com.planet_ink.coffee_mud.Libraries.interfaces.*;
import com.planet_ink.coffee_mud.Locales.interfaces.*;
import com.planet_ink.coffee_mud.MOBS.interfaces.*;
import com.planet_ink.coffee_mud.Races.interfaces.*;
import java.io.*;
import java.util.*;
/*
Copyright 2004-2015 Bo Zimmerman
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
@SuppressWarnings({"unchecked","rawtypes"})
public class Load extends StdCommand
{
public Load(){}
private final String[] access=I(new String[]{"LOAD"});
@Override public String[] getAccessWords(){return access;}
public final String[] combine(final String[] set1, final CMClass.CMObjectType[] set2)
{
final String[] fset=new String[set1.length+set2.length];
for(int x=0;x<set1.length;x++)
fset[x]=set1[x];
for(int x=0;x<set2.length;x++)
fset[set1.length+x]=set2[x].toString();
return fset;
}
public final String ARCHON_LIST[]=combine(new String[]{"RESOURCE","FACTION"},CMClass.CMObjectType.values());
public final Ammunition getNextAmmunition(String type, List<Ammunition> ammos)
{
for(final Ammunition ammo : ammos)
if((!ammo.amDestroyed())&&(ammo.usesRemaining() > 0)&&(ammo.ammunitionType().equalsIgnoreCase(type)))
return ammo;
return null;
}
@Override
public boolean execute(MOB mob, Vector commands, int metaFlags)
throws java.io.IOException
{
if(mob==null)
return true;
boolean tryArchon=CMSecurity.isAllowed(mob,mob.location(),CMSecurity.SecFlag.LOADUNLOAD);
if(commands.size()<3)
{
if(tryArchon)
mob.tell(L("LOAD what? Try @x1 [CLASSNAME]",CMParms.toStringList(ARCHON_LIST)));
else
mob.tell(L("Load what where?"));
return false;
}
String what=(String)commands.get(1);
String name=CMParms.combine(commands,2);
if(tryArchon)
{
final Item I=mob.fetchWieldedItem();
if((I instanceof AmmunitionWeapon)&&((AmmunitionWeapon)I).requiresAmmunition())
tryArchon=false;
for(final String aList : ARCHON_LIST)
if(what.equalsIgnoreCase(aList))
tryArchon=true;
}
if(!tryArchon)
{
commands.remove(0);
final XVector ammoV=new XVector(what);
final List<Item> baseAmmoItems=CMLib.english().fetchItemList(mob,mob,null,ammoV,Wearable.FILTER_UNWORNONLY,false);
final List<Ammunition> ammos=new XVector<Ammunition>();
for (Item I : baseAmmoItems)
{
if(I instanceof Ammunition)
{
ammos.add((Ammunition)I);
}
}
if(baseAmmoItems.size()==0)
mob.tell(L("You don't seem to have any ammunition like that."));
else
if((ammos.size()==0)&&(!what.equalsIgnoreCase("all")))
mob.tell(L("You can't seem to use that as ammunition."));
else
{
commands.remove(0);
final List<Item> baseItems=CMLib.english().fetchItemList(mob,mob,null,commands,Wearable.FILTER_ANY,false);
final List<AmmunitionWeapon> items=new XVector<AmmunitionWeapon>();
for (Item I : baseItems)
{
if((I instanceof AmmunitionWeapon)&&((AmmunitionWeapon)I).requiresAmmunition())
items.add((AmmunitionWeapon)I);
}
boolean doneOne=false;
if(baseItems.size()==0)
mob.tell(L("You don't seem to have that."));
else
if(items.size()==0)
mob.tell(L("You can't seem to load that."));
else
for(final AmmunitionWeapon W : items)
{
Ammunition ammunition = getNextAmmunition(W.ammunitionType(),ammos);
if(ammunition==null)
{
mob.tell(L("You are all out of @x1.",W.ammunitionType()));
}
else
while((ammunition != null)
&&((W.ammunitionRemaining() < W.ammunitionCapacity())||(!doneOne)))
{
final CMMsg newMsg=CMClass.getMsg(mob,W,ammunition,CMMsg.MSG_RELOAD,L("<S-NAME> reload(s) <T-NAME> with <O-NAME>."));
if(mob.location().okMessage(mob,newMsg))
{
doneOne=true;
mob.location().send(mob,newMsg);
ammunition = getNextAmmunition(W.ammunitionType(),ammos);
}
else
break;
}
}
}
}
else
{
if((what.equalsIgnoreCase("FACTION"))
&&(CMSecurity.isAllowed(mob,mob.location(),CMSecurity.SecFlag.CMDFACTIONS)))
{
final Faction F=CMLib.factions().getFaction(name);
if(F==null)
mob.tell(L("Faction file '@x1' was not found.",name));
else
mob.tell(L("Faction '@x1' from file '@x2' was loaded.",F.name(),name));
return false;
}
else
if(what.equalsIgnoreCase("RESOURCE"))
{
final CMFile F=new CMFile(name,mob,CMFile.FLAG_LOGERRORS);
if((!F.exists())||(!F.canRead()))
mob.tell(L("File '@x1' could not be accessed.",name));
else
{
final StringBuffer buf=Resources.getFileResource(name,true); // enforces its own security
if((buf==null)||(buf.length()==0))
mob.tell(L("Resource '@x1' was not found.",name));
else
mob.tell(L("Resource '@x1' was loaded.",name));
}
}
else
if(CMSecurity.isASysOp(mob))
{
try
{
if(name.toUpperCase().endsWith(".JAVA"))
{
while(name.startsWith("/"))
name=name.substring(1);
Class<?> C=null;
Object CO=null;
try
{
C=Class.forName("com.sun.tools.javac.Main", true, CMClass.instance());
if(C!=null)
CO=C.newInstance();
}catch(final Exception e)
{
Log.errOut("Load",e.getMessage());
}
final ByteArrayOutputStream bout=new ByteArrayOutputStream();
final PrintWriter pout=new PrintWriter(new OutputStreamWriter(bout));
if(CO==null)
{
mob.tell(L("Unable to instantiate compiler. You might try including your Java JDK's lib/tools.jar in your classpath next time you boot the mud."));
return false;
}
final String[] args=new String[]{name};
if(C!=null)
{
final java.lang.reflect.Method M=C.getMethod("compile",new Class[]{args.getClass(),PrintWriter.class});
final Object returnVal=M.invoke(CO,new Object[]{args,pout});
if((returnVal instanceof Integer)&&(((Integer)returnVal).intValue()!=0))
{
pout.flush();
mob.tell(L("Compile failed:"));
if(mob.session()!=null)
mob.session().rawOut(bout.toString());
return false;
}
}
name=name.substring(0,name.length()-5)+".class";
}
String unloadClassName=name;
if(unloadClassName.toUpperCase().endsWith(".CLASS"))
unloadClassName=unloadClassName.substring(0,unloadClassName.length()-6);
unloadClassName=unloadClassName.replace('\\','.');
unloadClassName=unloadClassName.replace('/','.');
if(what.equalsIgnoreCase("CLASS"))
{
final Object O=CMClass.getObjectOrPrototype(unloadClassName);
if(O!=null)
{
final CMClass.CMObjectType x=CMClass.getObjectType(O);
if(x!=null)
what=x.toString();
}
}
final CMObjectType whatType=CMClass.findObjectType(what);
if(whatType==null)
mob.tell(L("Don't know how to load a '@x1'. Try one of the following: @x2",what,CMParms.toStringList(ARCHON_LIST)));
else
{
final Object O=CMClass.getObjectOrPrototype(unloadClassName);
if((O instanceof CMObject)
&&(name.toUpperCase().endsWith(".CLASS"))
&&(CMClass.delClass(whatType,(CMObject)O)))
mob.tell(L("@x1 was unloaded.",unloadClassName));
if(CMClass.loadClass(whatType,name,false))
{
mob.tell(L("@x1 @x2 was successfully loaded.",CMStrings.capitalizeAndLower(what),name));
return true;
}
}
}
catch(final java.lang.Error err)
{
mob.tell(err.getMessage());
}
catch(final Exception t)
{
Log.errOut("Load",t.getClass().getName()+": "+t.getMessage());
}
mob.tell(L("@x1 @x2 was not loaded.",CMStrings.capitalizeAndLower(what),name));
}
}
return false;
}
@Override public boolean canBeOrdered(){return true;}
@Override public boolean securityCheck(MOB mob){return super.securityCheck(mob);}
@Override public double combatActionsCost(final MOB mob, final List<String> cmds){return CMProps.getCommandCombatActionCost(ID());}
@Override public double actionsCost(final MOB mob, final List<String> cmds){return CMProps.getCommandActionCost(ID());}
}
| apache-2.0 |
modernpharaohs/city-new | vendor/pingpong/generators/Console/ScaffoldCommand.php | 1561 | <?php
namespace Pingpong\Generators\Console;
use Illuminate\Console\Command;
use Pingpong\Generators\ScaffoldGenerator;
use Symfony\Component\Console\Input\InputArgument;
use Symfony\Component\Console\Input\InputOption;
class ScaffoldCommand extends Command
{
/**
* The name of command.
*
* @var string
*/
protected $name = 'generate:scaffold';
/**
* The description of command.
*
* @var string
*/
protected $description = 'Generate a new scaffold resource.';
/**
* Execute the command.
*/
public function fire()
{
(new ScaffoldGenerator($this))->run();
}
/**
* The array of command arguments.
*
* @return array
*/
public function getArguments()
{
return [
['entity', InputArgument::REQUIRED, 'The entity name.', null],
];
}
/**
* The array of command options.
*
* @return array
*/
public function getOptions()
{
return [
['fields', null, InputOption::VALUE_OPTIONAL, 'The fields of migration. Separated with comma (,).', null],
['prefix', null, InputOption::VALUE_OPTIONAL, 'The prefix path & routes.', null],
['no-question', null, InputOption::VALUE_NONE, 'Don\'t ask any question.', null],
['existing', 'e', InputOption::VALUE_NONE, 'Generate scaffold from an existing table.', null],
['force', 'f', InputOption::VALUE_NONE, 'Force the creation if file already exists.', null],
];
}
}
| apache-2.0 |
sih4sing5hong5/google-sites-liberation | src/test/java/com/google/sites/liberation/export/InMemoryEntryStoreTest.java | 3444 | /*
* Copyright (C) 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.sites.liberation.export;
import static org.junit.Assert.*;
import com.google.gdata.data.PlainTextConstruct;
import com.google.gdata.data.sites.BaseContentEntry;
import com.google.gdata.data.sites.WebPageEntry;
import com.google.sites.liberation.export.EntryStore;
import com.google.sites.liberation.export.InMemoryEntryStore;
import com.google.sites.liberation.util.EntryUtils;
import org.junit.Before;
import org.junit.Test;
import java.util.Collection;
/**
* @author bsimon@google.com (Benjamin Simon)
*/
public class InMemoryEntryStoreTest {
private EntryStore entryStore;
@Before
public void setUp() {
entryStore = new InMemoryEntryStore();
}
@Test
public void testGetEntry() {
assertNull(entryStore.getEntry("entry1"));
BaseContentEntry<?> entry1 = getNewEntry("entry1");
BaseContentEntry<?> entry2 = getNewEntry("entry2", "entry1");
BaseContentEntry<?> entry3 = getNewEntry("entry3", "entry2");
entryStore.addEntry(entry1);
entryStore.addEntry(entry2);
entryStore.addEntry(entry3);
assertEquals(entry1, entryStore.getEntry("entry1"));
assertEquals(entry2, entryStore.getEntry("entry2"));
assertEquals(entry3, entryStore.getEntry("entry3"));
assertNull(entryStore.getEntry("entry4"));
}
@Test
public void testGetChildren() {
assertTrue(entryStore.getChildren("entry1").isEmpty());
BaseContentEntry<?> entry1 = getNewEntry("entry1");
BaseContentEntry<?> entry2 = getNewEntry("entry2", "entry1");
BaseContentEntry<?> entry3 = getNewEntry("entry3", "entry1");
BaseContentEntry<?> entry4 = getNewEntry("entry4", "entry2");
entryStore.addEntry(entry1);
entryStore.addEntry(entry2);
entryStore.addEntry(entry3);
entryStore.addEntry(entry4);
Collection<BaseContentEntry<?>> children1 = entryStore.getChildren("entry1");
assertEquals(2, children1.size());
assertTrue(children1.contains(entry2));
assertTrue(children1.contains(entry3));
Collection<BaseContentEntry<?>> children2 = entryStore.getChildren("entry2");
assertEquals(1, children2.size());
assertTrue(children2.contains(entry4));
assertTrue(entryStore.getChildren("entry3").isEmpty());
assertTrue(entryStore.getChildren("entry4").isEmpty());
}
private BaseContentEntry<?> getNewEntry(String id) {
return getNewEntry(id, null, "");
}
private BaseContentEntry<?> getNewEntry(String id, String parentId) {
return getNewEntry(id, parentId, "");
}
private BaseContentEntry<?> getNewEntry(String id, String parentId,
String title) {
WebPageEntry entry = new WebPageEntry();
entry.setId(id);
if (parentId != null) {
EntryUtils.setParentId(entry, parentId);
}
if (title != null) {
entry.setTitle(new PlainTextConstruct(title));
}
return entry;
}
}
| apache-2.0 |
scottyaslan/nifi-registry | nifi-registry-core/nifi-registry-framework/src/main/java/org/apache/nifi/registry/serialization/jackson/JacksonExtensionSerializer.java | 1390 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.registry.serialization.jackson;
import com.fasterxml.jackson.core.type.TypeReference;
import org.apache.nifi.registry.extension.component.manifest.Extension;
import org.apache.nifi.registry.serialization.SerializationException;
/**
* A Jackson serializer for Extensions.
*/
public class JacksonExtensionSerializer extends JacksonSerializer<Extension> {
@Override
TypeReference<SerializationContainer<Extension>> getDeserializeTypeRef() throws SerializationException {
return new TypeReference<SerializationContainer<Extension>>() {};
}
}
| apache-2.0 |
filipw/apress-recipes-webapi | Chapter 11/11-8/Apress.Recipes.WebApi/Apress.Recipes.WebApi/IntegrationTest.cs | 2540 | using System;
using System.Collections.Generic;
using System.Linq;
using System.Net.Http;
using System.Net.Http.Formatting;
using System.Reflection;
using System.Web.Http.Dispatcher;
using Microsoft.Owin.Testing;
using Moq;
using Xunit;
namespace Apress.Recipes.WebApi
{
public class IntegrationTests
{
private readonly TestServer _server;
private readonly Mock<ILoggingService> _loggingService;
public IntegrationTests()
{
_loggingService = new Mock<ILoggingService>();
_server = TestServer.Create<Startup>();
LoggingMiddleware.LoggingService = new Lazy<ILoggingService>(() => _loggingService.Object);
}
[Fact]
public async void GetHelloReturnsCorrectResponse()
{
var response = await _server.HttpClient.GetAsync("/hello");
var result = await response.Content.ReadAsStringAsync();
Assert.Equal("Hello World", result);
}
[Fact]
public async void GetHelloSetsMaxAgeTo100()
{
var response = await _server.HttpClient.GetAsync("/hello");
Assert.Equal(TimeSpan.FromSeconds(100), response.Headers.CacheControl.MaxAge);
}
[Fact]
public async void GetHelloGoesThroughLoggingHandler()
{
var response = await _server.HttpClient.GetAsync("/hello");
_loggingService.Verify(i => i.Log("http://www.apress.com/hello"), Times.Once);
}
[Fact]
public async void PostCanRespondInXml()
{
var message = new MessageDto
{
Text = "This is XML"
};
var response = await _server.HttpClient.PostAsXmlAsync("/hello", message);
var result = await response.Content.ReadAsAsync<MessageDto>(new[] { new XmlMediaTypeFormatter() });
Assert.Equal(message.Text, result.Text);
}
[Fact]
public async void PostCanRespondInJson()
{
var message = new MessageDto
{
Text = "This is JSON"
};
var response = await _server.HttpClient.PostAsJsonAsync("/hello", message);
var result = await response.Content.ReadAsAsync<MessageDto>(new[] { new JsonMediaTypeFormatter() });
Assert.Equal(message.Text, result.Text);
}
public void Dispose()
{
if (_server != null)
{
_server.Dispose();
}
}
}
} | apache-2.0 |
opennetworkinglab/onos | protocols/p4runtime/ctl/src/main/java/org/onosproject/p4runtime/ctl/controller/PacketInEvent.java | 2200 | /*
* Copyright 2019-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.p4runtime.ctl.controller;
import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
import org.onosproject.net.DeviceId;
import org.onosproject.net.pi.runtime.PiPacketOperation;
import org.onosproject.p4runtime.api.P4RuntimePacketIn;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* P4Runtime packet-in.
*/
public final class PacketInEvent implements P4RuntimePacketIn {
private final DeviceId deviceId;
private final PiPacketOperation operation;
public PacketInEvent(DeviceId deviceId, PiPacketOperation operation) {
this.deviceId = checkNotNull(deviceId);
this.operation = checkNotNull(operation);
}
@Override
public DeviceId deviceId() {
return deviceId;
}
@Override
public PiPacketOperation packetOperation() {
return operation;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PacketInEvent that = (PacketInEvent) o;
return Objects.equal(deviceId, that.deviceId) &&
Objects.equal(operation, that.operation);
}
@Override
public int hashCode() {
return Objects.hashCode(deviceId, operation);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("deviceId", deviceId)
.add("operation", operation)
.toString();
}
}
| apache-2.0 |
flofreud/aws-sdk-java | aws-java-sdk-iot/src/main/java/com/amazonaws/services/iot/model/CertificateDescription.java | 19188 | /*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
* Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.iot.model;
import java.io.Serializable;
/**
* <p>
* Describes a certificate.
* </p>
*/
public class CertificateDescription implements Serializable, Cloneable {
/**
* <p>
* The ARN of the certificate.
* </p>
*/
private String certificateArn;
/**
* <p>
* The ID of the certificate.
* </p>
*/
private String certificateId;
/**
* <p>
* The certificate ID of the CA certificate used to sign this certificate.
* </p>
*/
private String caCertificateId;
/**
* <p>
* The status of the certificate.
* </p>
*/
private String status;
/**
* <p>
* The certificate data, in PEM format.
* </p>
*/
private String certificatePem;
/**
* <p>
* The ID of the AWS account that owns the certificate.
* </p>
*/
private String ownedBy;
/**
* <p>
* The ID of the AWS account of the previous owner of the certificate.
* </p>
*/
private String previousOwnedBy;
/**
* <p>
* The date and time the certificate was created.
* </p>
*/
private java.util.Date creationDate;
/**
* <p>
* The date and time the certificate was last modified.
* </p>
*/
private java.util.Date lastModifiedDate;
/**
* <p>
* The transfer data.
* </p>
*/
private TransferData transferData;
/**
* <p>
* The ARN of the certificate.
* </p>
*
* @param certificateArn
* The ARN of the certificate.
*/
public void setCertificateArn(String certificateArn) {
this.certificateArn = certificateArn;
}
/**
* <p>
* The ARN of the certificate.
* </p>
*
* @return The ARN of the certificate.
*/
public String getCertificateArn() {
return this.certificateArn;
}
/**
* <p>
* The ARN of the certificate.
* </p>
*
* @param certificateArn
* The ARN of the certificate.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CertificateDescription withCertificateArn(String certificateArn) {
setCertificateArn(certificateArn);
return this;
}
/**
* <p>
* The ID of the certificate.
* </p>
*
* @param certificateId
* The ID of the certificate.
*/
public void setCertificateId(String certificateId) {
this.certificateId = certificateId;
}
/**
* <p>
* The ID of the certificate.
* </p>
*
* @return The ID of the certificate.
*/
public String getCertificateId() {
return this.certificateId;
}
/**
* <p>
* The ID of the certificate.
* </p>
*
* @param certificateId
* The ID of the certificate.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CertificateDescription withCertificateId(String certificateId) {
setCertificateId(certificateId);
return this;
}
/**
* <p>
* The certificate ID of the CA certificate used to sign this certificate.
* </p>
*
* @param caCertificateId
* The certificate ID of the CA certificate used to sign this
* certificate.
*/
public void setCaCertificateId(String caCertificateId) {
this.caCertificateId = caCertificateId;
}
/**
* <p>
* The certificate ID of the CA certificate used to sign this certificate.
* </p>
*
* @return The certificate ID of the CA certificate used to sign this
* certificate.
*/
public String getCaCertificateId() {
return this.caCertificateId;
}
/**
* <p>
* The certificate ID of the CA certificate used to sign this certificate.
* </p>
*
* @param caCertificateId
* The certificate ID of the CA certificate used to sign this
* certificate.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CertificateDescription withCaCertificateId(String caCertificateId) {
setCaCertificateId(caCertificateId);
return this;
}
/**
* <p>
* The status of the certificate.
* </p>
*
* @param status
* The status of the certificate.
* @see CertificateStatus
*/
public void setStatus(String status) {
this.status = status;
}
/**
* <p>
* The status of the certificate.
* </p>
*
* @return The status of the certificate.
* @see CertificateStatus
*/
public String getStatus() {
return this.status;
}
/**
* <p>
* The status of the certificate.
* </p>
*
* @param status
* The status of the certificate.
* @return Returns a reference to this object so that method calls can be
* chained together.
* @see CertificateStatus
*/
public CertificateDescription withStatus(String status) {
setStatus(status);
return this;
}
/**
* <p>
* The status of the certificate.
* </p>
*
* @param status
* The status of the certificate.
* @see CertificateStatus
*/
public void setStatus(CertificateStatus status) {
this.status = status.toString();
}
/**
* <p>
* The status of the certificate.
* </p>
*
* @param status
* The status of the certificate.
* @return Returns a reference to this object so that method calls can be
* chained together.
* @see CertificateStatus
*/
public CertificateDescription withStatus(CertificateStatus status) {
setStatus(status);
return this;
}
/**
* <p>
* The certificate data, in PEM format.
* </p>
*
* @param certificatePem
* The certificate data, in PEM format.
*/
public void setCertificatePem(String certificatePem) {
this.certificatePem = certificatePem;
}
/**
* <p>
* The certificate data, in PEM format.
* </p>
*
* @return The certificate data, in PEM format.
*/
public String getCertificatePem() {
return this.certificatePem;
}
/**
* <p>
* The certificate data, in PEM format.
* </p>
*
* @param certificatePem
* The certificate data, in PEM format.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CertificateDescription withCertificatePem(String certificatePem) {
setCertificatePem(certificatePem);
return this;
}
/**
* <p>
* The ID of the AWS account that owns the certificate.
* </p>
*
* @param ownedBy
* The ID of the AWS account that owns the certificate.
*/
public void setOwnedBy(String ownedBy) {
this.ownedBy = ownedBy;
}
/**
* <p>
* The ID of the AWS account that owns the certificate.
* </p>
*
* @return The ID of the AWS account that owns the certificate.
*/
public String getOwnedBy() {
return this.ownedBy;
}
/**
* <p>
* The ID of the AWS account that owns the certificate.
* </p>
*
* @param ownedBy
* The ID of the AWS account that owns the certificate.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CertificateDescription withOwnedBy(String ownedBy) {
setOwnedBy(ownedBy);
return this;
}
/**
* <p>
* The ID of the AWS account of the previous owner of the certificate.
* </p>
*
* @param previousOwnedBy
* The ID of the AWS account of the previous owner of the
* certificate.
*/
public void setPreviousOwnedBy(String previousOwnedBy) {
this.previousOwnedBy = previousOwnedBy;
}
/**
* <p>
* The ID of the AWS account of the previous owner of the certificate.
* </p>
*
* @return The ID of the AWS account of the previous owner of the
* certificate.
*/
public String getPreviousOwnedBy() {
return this.previousOwnedBy;
}
/**
* <p>
* The ID of the AWS account of the previous owner of the certificate.
* </p>
*
* @param previousOwnedBy
* The ID of the AWS account of the previous owner of the
* certificate.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CertificateDescription withPreviousOwnedBy(String previousOwnedBy) {
setPreviousOwnedBy(previousOwnedBy);
return this;
}
/**
* <p>
* The date and time the certificate was created.
* </p>
*
* @param creationDate
* The date and time the certificate was created.
*/
public void setCreationDate(java.util.Date creationDate) {
this.creationDate = creationDate;
}
/**
* <p>
* The date and time the certificate was created.
* </p>
*
* @return The date and time the certificate was created.
*/
public java.util.Date getCreationDate() {
return this.creationDate;
}
/**
* <p>
* The date and time the certificate was created.
* </p>
*
* @param creationDate
* The date and time the certificate was created.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CertificateDescription withCreationDate(java.util.Date creationDate) {
setCreationDate(creationDate);
return this;
}
/**
* <p>
* The date and time the certificate was last modified.
* </p>
*
* @param lastModifiedDate
* The date and time the certificate was last modified.
*/
public void setLastModifiedDate(java.util.Date lastModifiedDate) {
this.lastModifiedDate = lastModifiedDate;
}
/**
* <p>
* The date and time the certificate was last modified.
* </p>
*
* @return The date and time the certificate was last modified.
*/
public java.util.Date getLastModifiedDate() {
return this.lastModifiedDate;
}
/**
* <p>
* The date and time the certificate was last modified.
* </p>
*
* @param lastModifiedDate
* The date and time the certificate was last modified.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CertificateDescription withLastModifiedDate(
java.util.Date lastModifiedDate) {
setLastModifiedDate(lastModifiedDate);
return this;
}
/**
* <p>
* The transfer data.
* </p>
*
* @param transferData
* The transfer data.
*/
public void setTransferData(TransferData transferData) {
this.transferData = transferData;
}
/**
* <p>
* The transfer data.
* </p>
*
* @return The transfer data.
*/
public TransferData getTransferData() {
return this.transferData;
}
/**
* <p>
* The transfer data.
* </p>
*
* @param transferData
* The transfer data.
* @return Returns a reference to this object so that method calls can be
* chained together.
*/
public CertificateDescription withTransferData(TransferData transferData) {
setTransferData(transferData);
return this;
}
/**
* Returns a string representation of this object; useful for testing and
* debugging.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getCertificateArn() != null)
sb.append("CertificateArn: " + getCertificateArn() + ",");
if (getCertificateId() != null)
sb.append("CertificateId: " + getCertificateId() + ",");
if (getCaCertificateId() != null)
sb.append("CaCertificateId: " + getCaCertificateId() + ",");
if (getStatus() != null)
sb.append("Status: " + getStatus() + ",");
if (getCertificatePem() != null)
sb.append("CertificatePem: " + getCertificatePem() + ",");
if (getOwnedBy() != null)
sb.append("OwnedBy: " + getOwnedBy() + ",");
if (getPreviousOwnedBy() != null)
sb.append("PreviousOwnedBy: " + getPreviousOwnedBy() + ",");
if (getCreationDate() != null)
sb.append("CreationDate: " + getCreationDate() + ",");
if (getLastModifiedDate() != null)
sb.append("LastModifiedDate: " + getLastModifiedDate() + ",");
if (getTransferData() != null)
sb.append("TransferData: " + getTransferData());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CertificateDescription == false)
return false;
CertificateDescription other = (CertificateDescription) obj;
if (other.getCertificateArn() == null
^ this.getCertificateArn() == null)
return false;
if (other.getCertificateArn() != null
&& other.getCertificateArn().equals(this.getCertificateArn()) == false)
return false;
if (other.getCertificateId() == null ^ this.getCertificateId() == null)
return false;
if (other.getCertificateId() != null
&& other.getCertificateId().equals(this.getCertificateId()) == false)
return false;
if (other.getCaCertificateId() == null
^ this.getCaCertificateId() == null)
return false;
if (other.getCaCertificateId() != null
&& other.getCaCertificateId().equals(this.getCaCertificateId()) == false)
return false;
if (other.getStatus() == null ^ this.getStatus() == null)
return false;
if (other.getStatus() != null
&& other.getStatus().equals(this.getStatus()) == false)
return false;
if (other.getCertificatePem() == null
^ this.getCertificatePem() == null)
return false;
if (other.getCertificatePem() != null
&& other.getCertificatePem().equals(this.getCertificatePem()) == false)
return false;
if (other.getOwnedBy() == null ^ this.getOwnedBy() == null)
return false;
if (other.getOwnedBy() != null
&& other.getOwnedBy().equals(this.getOwnedBy()) == false)
return false;
if (other.getPreviousOwnedBy() == null
^ this.getPreviousOwnedBy() == null)
return false;
if (other.getPreviousOwnedBy() != null
&& other.getPreviousOwnedBy().equals(this.getPreviousOwnedBy()) == false)
return false;
if (other.getCreationDate() == null ^ this.getCreationDate() == null)
return false;
if (other.getCreationDate() != null
&& other.getCreationDate().equals(this.getCreationDate()) == false)
return false;
if (other.getLastModifiedDate() == null
^ this.getLastModifiedDate() == null)
return false;
if (other.getLastModifiedDate() != null
&& other.getLastModifiedDate().equals(
this.getLastModifiedDate()) == false)
return false;
if (other.getTransferData() == null ^ this.getTransferData() == null)
return false;
if (other.getTransferData() != null
&& other.getTransferData().equals(this.getTransferData()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime
* hashCode
+ ((getCertificateArn() == null) ? 0 : getCertificateArn()
.hashCode());
hashCode = prime
* hashCode
+ ((getCertificateId() == null) ? 0 : getCertificateId()
.hashCode());
hashCode = prime
* hashCode
+ ((getCaCertificateId() == null) ? 0 : getCaCertificateId()
.hashCode());
hashCode = prime * hashCode
+ ((getStatus() == null) ? 0 : getStatus().hashCode());
hashCode = prime
* hashCode
+ ((getCertificatePem() == null) ? 0 : getCertificatePem()
.hashCode());
hashCode = prime * hashCode
+ ((getOwnedBy() == null) ? 0 : getOwnedBy().hashCode());
hashCode = prime
* hashCode
+ ((getPreviousOwnedBy() == null) ? 0 : getPreviousOwnedBy()
.hashCode());
hashCode = prime
* hashCode
+ ((getCreationDate() == null) ? 0 : getCreationDate()
.hashCode());
hashCode = prime
* hashCode
+ ((getLastModifiedDate() == null) ? 0 : getLastModifiedDate()
.hashCode());
hashCode = prime
* hashCode
+ ((getTransferData() == null) ? 0 : getTransferData()
.hashCode());
return hashCode;
}
@Override
public CertificateDescription clone() {
try {
return (CertificateDescription) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException(
"Got a CloneNotSupportedException from Object.clone() "
+ "even though we're Cloneable!", e);
}
}
}
| apache-2.0 |
anshuiisc/storm-Allbolts-wiring | storm-core/src/jvm/org/apache/storm/daemon/supervisor/timer/SupervisorHeartbeat.java | 3674 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.daemon.supervisor.timer;
import org.apache.storm.Config;
import org.apache.storm.cluster.IStormClusterState;
import org.apache.storm.daemon.supervisor.Supervisor;
import org.apache.storm.generated.SupervisorInfo;
import org.apache.storm.utils.Time;
import org.apache.storm.utils.Utils;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class SupervisorHeartbeat implements Runnable {
private final IStormClusterState stormClusterState;
private final String supervisorId;
private final Map<String, Object> conf;
private final Supervisor supervisor;
public SupervisorHeartbeat(Map<String, Object> conf, Supervisor supervisor) {
this.stormClusterState = supervisor.getStormClusterState();
this.supervisorId = supervisor.getId();
this.supervisor = supervisor;
this.conf = conf;
}
private SupervisorInfo buildSupervisorInfo(Map<String, Object> conf, Supervisor supervisor) {
SupervisorInfo supervisorInfo = new SupervisorInfo();
supervisorInfo.set_time_secs(Time.currentTimeSecs());
supervisorInfo.set_hostname(supervisor.getHostName());
supervisorInfo.set_assignment_id(supervisor.getAssignmentId());
List<Long> usedPorts = new ArrayList<>();
usedPorts.addAll(supervisor.getCurrAssignment().get().keySet());
supervisorInfo.set_used_ports(usedPorts);
List metaDatas = (List)supervisor.getiSupervisor().getMetadata();
List<Long> portList = new ArrayList<>();
if (metaDatas != null){
for (Object data : metaDatas){
Integer port = Utils.getInt(data);
if (port != null)
portList.add(port.longValue());
}
}
supervisorInfo.set_meta(portList);
supervisorInfo.set_scheduler_meta((Map<String, String>) conf.get(Config.SUPERVISOR_SCHEDULER_META));
supervisorInfo.set_uptime_secs(supervisor.getUpTime().upTime());
supervisorInfo.set_version(supervisor.getStormVersion());
supervisorInfo.set_resources_map(mkSupervisorCapacities(conf));
return supervisorInfo;
}
private Map<String, Double> mkSupervisorCapacities(Map conf) {
Map<String, Double> ret = new HashMap<String, Double>();
Double mem = Utils.getDouble(conf.get(Config.SUPERVISOR_MEMORY_CAPACITY_MB), 4096.0);
ret.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, mem);
Double cpu = Utils.getDouble(conf.get(Config.SUPERVISOR_CPU_CAPACITY), 400.0);
ret.put(Config.SUPERVISOR_CPU_CAPACITY, cpu);
return ret;
}
@Override
public void run() {
SupervisorInfo supervisorInfo = buildSupervisorInfo(conf, supervisor);
stormClusterState.supervisorHeartbeat(supervisorId, supervisorInfo);
}
}
| apache-2.0 |
lshain-android-source/tools-idea | java/debugger/impl/src/com/intellij/debugger/ui/HotSwapUIImpl.java | 13668 | /*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.debugger.ui;
import com.intellij.CommonBundle;
import com.intellij.compiler.CompilerWorkspaceConfiguration;
import com.intellij.debugger.DebuggerBundle;
import com.intellij.debugger.DebuggerManager;
import com.intellij.debugger.DebuggerManagerEx;
import com.intellij.debugger.impl.DebuggerManagerAdapter;
import com.intellij.debugger.impl.DebuggerSession;
import com.intellij.debugger.impl.HotSwapFile;
import com.intellij.debugger.impl.HotSwapManager;
import com.intellij.debugger.settings.DebuggerSettings;
import com.intellij.notification.NotificationType;
import com.intellij.openapi.application.Application;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.application.ModalityState;
import com.intellij.openapi.compiler.CompilationStatusListener;
import com.intellij.openapi.compiler.CompileContext;
import com.intellij.openapi.compiler.CompilerManager;
import com.intellij.openapi.compiler.CompilerTopics;
import com.intellij.openapi.components.ProjectComponent;
import com.intellij.openapi.progress.ProgressManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.ui.DialogWrapper;
import com.intellij.openapi.ui.Messages;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.Ref;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.psi.PsiDocumentManager;
import com.intellij.util.PairFunction;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.messages.MessageBus;
import com.intellij.util.messages.MessageBusConnection;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import java.util.*;
import java.util.concurrent.atomic.AtomicReference;
/**
* User: lex
* Date: Oct 2, 2003
* Time: 6:00:55 PM
*/
public class HotSwapUIImpl extends HotSwapUI implements ProjectComponent {
private final List<HotSwapVetoableListener> myListeners = ContainerUtil.createLockFreeCopyOnWriteList();
private boolean myAskBeforeHotswap = true;
private final Project myProject;
private boolean myPerformHotswapAfterThisCompilation = true;
public HotSwapUIImpl(final Project project, final MessageBus bus, DebuggerManager debugManager) {
myProject = project;
((DebuggerManagerEx)debugManager).addDebuggerManagerListener(new DebuggerManagerAdapter() {
private MessageBusConnection myConn = null;
private int mySessionCount = 0;
@Override
public void sessionAttached(DebuggerSession session) {
if (mySessionCount++ == 0) {
myConn = bus.connect();
myConn.subscribe(CompilerTopics.COMPILATION_STATUS, new MyCompilationStatusListener());
}
}
@Override
public void sessionDetached(DebuggerSession session) {
mySessionCount = Math.max(0, mySessionCount - 1);
if (mySessionCount == 0) {
final MessageBusConnection conn = myConn;
if (conn != null) {
Disposer.dispose(conn);
myConn = null;
}
}
}
});
}
public void projectOpened() {
}
public void projectClosed() {
}
@NotNull
public String getComponentName() {
return "HotSwapUI";
}
public void initComponent() {
}
public void disposeComponent() {
}
public void addListener(HotSwapVetoableListener listener) {
myListeners.add(listener);
}
public void removeListener(HotSwapVetoableListener listener) {
myListeners.remove(listener);
}
private boolean shouldDisplayHangWarning(DebuggerSettings settings, List<DebuggerSession> sessions) {
if (!settings.HOTSWAP_HANG_WARNING_ENABLED) {
return false;
}
// todo: return false if yourkit agent is inactive
for (DebuggerSession session : sessions) {
if (session.isPaused()) {
return true;
}
}
return false;
}
private void hotSwapSessions(final List<DebuggerSession> sessions, @Nullable final Map<String, List<String>> generatedPaths) {
final boolean shouldAskBeforeHotswap = myAskBeforeHotswap;
myAskBeforeHotswap = true;
// need this because search with PSI is perormed during hotswap
PsiDocumentManager.getInstance(myProject).commitAllDocuments();
final DebuggerSettings settings = DebuggerSettings.getInstance();
final String runHotswap = settings.RUN_HOTSWAP_AFTER_COMPILE;
final boolean shouldDisplayHangWarning = shouldDisplayHangWarning(settings, sessions);
if (shouldAskBeforeHotswap && DebuggerSettings.RUN_HOTSWAP_NEVER.equals(runHotswap)) {
return;
}
final boolean isOutOfProcessMode = CompilerWorkspaceConfiguration.getInstance(myProject).useOutOfProcessBuild();
final boolean shouldPerformScan = !isOutOfProcessMode || generatedPaths == null;
final HotSwapProgressImpl findClassesProgress;
if (shouldPerformScan) {
findClassesProgress = new HotSwapProgressImpl(myProject);
}
else {
boolean createProgress = false;
for (DebuggerSession session : sessions) {
if (session.isModifiedClassesScanRequired()) {
createProgress = true;
break;
}
}
findClassesProgress = createProgress ? new HotSwapProgressImpl(myProject) : null;
}
ApplicationManager.getApplication().executeOnPooledThread(new Runnable() {
public void run() {
final Map<DebuggerSession, Map<String, HotSwapFile>> modifiedClasses;
if (shouldPerformScan) {
modifiedClasses = scanForModifiedClassesWithProgress(sessions, findClassesProgress, !isOutOfProcessMode);
}
else {
final List<DebuggerSession> toScan = new ArrayList<DebuggerSession>();
final List<DebuggerSession> toUseGenerated = new ArrayList<DebuggerSession>();
for (DebuggerSession session : sessions) {
(session.isModifiedClassesScanRequired() ? toScan : toUseGenerated).add(session);
session.setModifiedClassesScanRequired(false);
}
modifiedClasses = new HashMap<DebuggerSession, Map<String, HotSwapFile>>();
if (!toUseGenerated.isEmpty()) {
modifiedClasses.putAll(HotSwapManager.findModifiedClasses(toUseGenerated, generatedPaths));
}
if (!toScan.isEmpty()) {
modifiedClasses.putAll(scanForModifiedClassesWithProgress(toScan, findClassesProgress, !isOutOfProcessMode));
}
}
final Application application = ApplicationManager.getApplication();
if (modifiedClasses.isEmpty()) {
final String message = DebuggerBundle.message("status.hotswap.uptodate");
HotSwapProgressImpl.NOTIFICATION_GROUP.createNotification(message, NotificationType.INFORMATION).notify(myProject);
return;
}
application.invokeLater(new Runnable() {
public void run() {
if (shouldAskBeforeHotswap && !DebuggerSettings.RUN_HOTSWAP_ALWAYS.equals(runHotswap)) {
final RunHotswapDialog dialog = new RunHotswapDialog(myProject, sessions, shouldDisplayHangWarning);
dialog.show();
if (!dialog.isOK()) {
for (DebuggerSession session : modifiedClasses.keySet()) {
session.setModifiedClassesScanRequired(true);
}
return;
}
final Set<DebuggerSession> toReload = new HashSet<DebuggerSession>(dialog.getSessionsToReload());
for (DebuggerSession session : modifiedClasses.keySet()) {
if (!toReload.contains(session)) {
session.setModifiedClassesScanRequired(true);
}
}
modifiedClasses.keySet().retainAll(toReload);
}
else {
if (shouldDisplayHangWarning) {
final int answer = Messages.showCheckboxMessageDialog(
DebuggerBundle.message("hotswap.dialog.hang.warning"),
DebuggerBundle.message("hotswap.dialog.title"),
new String[]{"Perform &Reload Classes", "&Skip Reload Classes"},
CommonBundle.message("dialog.options.do.not.show"),
false, 1, 1, Messages.getWarningIcon(),
new PairFunction<Integer, JCheckBox, Integer>() {
@Override
public Integer fun(Integer exitCode, JCheckBox cb) {
settings.HOTSWAP_HANG_WARNING_ENABLED = !cb.isSelected();
return exitCode == DialogWrapper.OK_EXIT_CODE ? exitCode : DialogWrapper.CANCEL_EXIT_CODE;
}
}
);
if (answer == DialogWrapper.CANCEL_EXIT_CODE) {
for (DebuggerSession session : modifiedClasses.keySet()) {
session.setModifiedClassesScanRequired(true);
}
return;
}
}
}
if (!modifiedClasses.isEmpty()) {
final HotSwapProgressImpl progress = new HotSwapProgressImpl(myProject);
application.executeOnPooledThread(new Runnable() {
public void run() {
reloadModifiedClasses(modifiedClasses, progress);
}
});
}
}
}, ModalityState.NON_MODAL);
}
});
}
private static Map<DebuggerSession, Map<String, HotSwapFile>> scanForModifiedClassesWithProgress(final List<DebuggerSession> sessions,
final HotSwapProgressImpl progress,
final boolean scanWithVFS) {
final Ref<Map<DebuggerSession, Map<String, HotSwapFile>>> result = Ref.create(null);
ProgressManager.getInstance().runProcess(new Runnable() {
public void run() {
try {
result.set(HotSwapManager.scanForModifiedClasses(sessions, progress, scanWithVFS));
}
finally {
progress.finished();
}
}
}, progress.getProgressIndicator());
return result.get();
}
private static void reloadModifiedClasses(final Map<DebuggerSession, Map<String, HotSwapFile>> modifiedClasses,
final HotSwapProgressImpl progress) {
ProgressManager.getInstance().runProcess(new Runnable() {
public void run() {
HotSwapManager.reloadModifiedClasses(modifiedClasses, progress);
progress.finished();
}
}, progress.getProgressIndicator());
}
public void reloadChangedClasses(final DebuggerSession session, boolean compileBeforeHotswap) {
dontAskHotswapAfterThisCompilation();
if (compileBeforeHotswap) {
CompilerManager.getInstance(session.getProject()).make(null);
}
else {
if (session.isAttached()) {
hotSwapSessions(Collections.singletonList(session), null);
}
}
}
public void dontPerformHotswapAfterThisCompilation() {
myPerformHotswapAfterThisCompilation = false;
}
public void dontAskHotswapAfterThisCompilation() {
myAskBeforeHotswap = false;
}
private class MyCompilationStatusListener implements CompilationStatusListener {
private final AtomicReference<Map<String, List<String>>>
myGeneratedPaths = new AtomicReference<Map<String, List<String>>>(new HashMap<String, List<String>>());
public void fileGenerated(String outputRoot, String relativePath) {
if (StringUtil.endsWith(relativePath, ".class")) {
// collect only classes
final Map<String, List<String>> map = myGeneratedPaths.get();
List<String> paths = map.get(outputRoot);
if (paths == null) {
paths = new ArrayList<String>();
map.put(outputRoot, paths);
}
paths.add(relativePath);
}
}
public void compilationFinished(boolean aborted, int errors, int warnings, CompileContext compileContext) {
final Map<String, List<String>> generated = myGeneratedPaths.getAndSet(new HashMap<String, List<String>>());
if (myProject.isDisposed()) {
return;
}
if (errors == 0 && !aborted && myPerformHotswapAfterThisCompilation) {
for (HotSwapVetoableListener listener : myListeners) {
if (!listener.shouldHotSwap(compileContext)) {
return;
}
}
final List<DebuggerSession> sessions = new ArrayList<DebuggerSession>();
Collection<DebuggerSession> debuggerSessions = DebuggerManagerEx.getInstanceEx(myProject).getSessions();
for (final DebuggerSession debuggerSession : debuggerSessions) {
if (debuggerSession.isAttached() && debuggerSession.getProcess().canRedefineClasses()) {
sessions.add(debuggerSession);
}
}
if (!sessions.isEmpty()) {
hotSwapSessions(sessions, generated);
}
}
myPerformHotswapAfterThisCompilation = true;
}
}
}
| apache-2.0 |
aledsage/testng | src/test/java/test/listeners/MyClassListener.java | 674 | package test.listeners;
import org.testng.IClassListener;
import org.testng.IMethodInstance;
import org.testng.ITestClass;
import java.util.ArrayList;
import java.util.List;
public class MyClassListener implements IClassListener {
public static final List<String> beforeNames = new ArrayList<>();
public static final List<String> afterNames = new ArrayList<>();
@Override
public void onBeforeClass(ITestClass testClass, IMethodInstance mi) {
beforeNames.add(testClass.getRealClass().getSimpleName());
}
@Override
public void onAfterClass(ITestClass testClass, IMethodInstance mi) {
afterNames.add(testClass.getRealClass().getSimpleName());
}
}
| apache-2.0 |
ntt-sic/cinder | cinder/tests/api/contrib/test_volume_type_encryption.py | 19901 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import json
import webob
from xml.dom import minidom
from cinder.api.contrib import volume_type_encryption
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common.notifier import api as notifier_api
from cinder.openstack.common.notifier import test_notifier
from cinder import test
from cinder.tests.api import fakes
from cinder.volume import volume_types
def return_volume_type_encryption_db(context, volume_type_id, session):
return stub_volume_type_encryption()
def return_volume_type_encryption(context, volume_type_id):
return stub_volume_type_encryption()
def stub_volume_type_encryption():
values = {
'cipher': 'fake_cipher',
'control_location': 'front-end',
'key_size': 256,
'provider': 'fake_provider',
'volume_type_id': 'fake_type_id',
}
return values
def volume_type_encryption_get(context, volume_type_id):
pass
class VolumeTypeEncryptionTest(test.TestCase):
def setUp(self):
super(VolumeTypeEncryptionTest, self).setUp()
self.flags(connection_type='fake',
host='fake',
notification_driver=[test_notifier.__name__])
self.api_path = '/v2/fake/os-volume-types/1/encryption'
"""to reset notifier drivers left over from other api/contrib tests"""
notifier_api._reset_drivers()
test_notifier.NOTIFICATIONS = []
def tearDown(self):
notifier_api._reset_drivers()
super(VolumeTypeEncryptionTest, self).tearDown()
def _get_response(self, volume_type, admin=True,
url='/v2/fake/types/%s/encryption',
req_method='GET', req_body=None,
req_headers=None):
ctxt = context.RequestContext('fake', 'fake', is_admin=admin)
req = webob.Request.blank(url % volume_type['id'])
req.method = req_method
req.body = req_body
if req_headers:
req.headers['Content-Type'] = req_headers
return req.get_response(fakes.wsgi_app(fake_auth_context=ctxt))
def test_index(self):
self.stubs.Set(db, 'volume_type_encryption_get',
return_volume_type_encryption)
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
res = self._get_response(volume_type)
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
expected = stub_volume_type_encryption()
self.assertEqual(expected, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_index_invalid_type(self):
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
res = self._get_response(volume_type)
self.assertEqual(404, res.status_code)
res_dict = json.loads(res.body)
expected = {
'itemNotFound': {
'code': 404,
'message': ('Volume type %s could not be found.'
% volume_type['id'])
}
}
self.assertEqual(expected, res_dict)
def test_show_key_size(self):
self.stubs.Set(db, 'volume_type_encryption_get',
return_volume_type_encryption)
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
res = self._get_response(volume_type,
url='/v2/fake/types/%s/encryption/key_size')
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_code)
self.assertEqual(256, res_dict['key_size'])
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_show_provider(self):
self.stubs.Set(db, 'volume_type_encryption_get',
return_volume_type_encryption)
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
res = self._get_response(volume_type,
url='/v2/fake/types/%s/encryption/provider')
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_code)
self.assertEqual('fake_provider', res_dict['provider'])
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_show_item_not_found(self):
self.stubs.Set(db, 'volume_type_encryption_get',
return_volume_type_encryption)
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
res = self._get_response(volume_type,
url='/v2/fake/types/%s/encryption/fake')
res_dict = json.loads(res.body)
self.assertEqual(404, res.status_code)
expected = {
'itemNotFound': {
'code': 404,
'message': ('The resource could not be found.')
}
}
self.assertEqual(expected, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def _create(self, cipher, control_location, key_size, provider):
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
body = {"encryption": {'cipher': cipher,
'control_location': control_location,
'key_size': key_size,
'provider': provider,
'volume_type_id': volume_type['id']}}
self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
res = self._get_response(volume_type)
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_code)
# Confirm that volume type has no encryption information
# before create.
self.assertEqual('{}', res.body)
# Create encryption specs for the volume type
# with the defined body.
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
self.assertEqual(len(test_notifier.NOTIFICATIONS), 1)
# check response
self.assertIn('encryption', res_dict)
self.assertEqual(cipher, res_dict['encryption']['cipher'])
self.assertEqual(control_location,
res_dict['encryption']['control_location'])
self.assertEqual(key_size, res_dict['encryption']['key_size'])
self.assertEqual(provider, res_dict['encryption']['provider'])
self.assertEqual(volume_type['id'],
res_dict['encryption']['volume_type_id'])
# check database
encryption = db.volume_type_encryption_get(context.get_admin_context(),
volume_type['id'])
self.assertIsNotNone(encryption)
self.assertEqual(cipher, encryption['cipher'])
self.assertEqual(key_size, encryption['key_size'])
self.assertEqual(provider, encryption['provider'])
self.assertEqual(volume_type['id'], encryption['volume_type_id'])
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_create_json(self):
self._create('fake_cipher', 'front-end', 128, 'fake_encryptor')
def test_create_xml(self):
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
ctxt = context.RequestContext('fake', 'fake', is_admin=True)
req = webob.Request.blank('/v2/fake/types/%s/encryption'
% volume_type['id'])
req.method = 'POST'
req.body = ('<encryption provider="test_provider" '
'cipher="cipher" control_location="front-end" />')
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app(fake_auth_context=ctxt))
self.assertEqual(res.status_int, 200)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_create_invalid_volume_type(self):
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
body = {"encryption": {'cipher': 'cipher',
'control_location': 'front-end',
'key_size': 128,
'provider': 'fake_provider',
'volume_type_id': 'volume_type'}}
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
self.assertEqual(len(test_notifier.NOTIFICATIONS), 0)
self.assertEqual(404, res.status_code)
expected = {
'itemNotFound': {
'code': 404,
'message': ('Volume type %s could not be found.'
% volume_type['id'])
}
}
self.assertEqual(expected, res_dict)
def test_create_encryption_type_exists(self):
self.stubs.Set(db, 'volume_type_encryption_get',
return_volume_type_encryption)
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
body = {"encryption": {'cipher': 'cipher',
'control_location': 'front-end',
'key_size': 128,
'provider': 'fake_provider',
'volume_type_id': volume_type['id']}}
# Try to create encryption specs for a volume type
# that already has them.
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
expected = {
'badRequest': {
'code': 400,
'message': ('Volume type encryption for type '
'fake_type_id already exists.')
}
}
self.assertEqual(expected, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def _encryption_create_bad_body(self, body,
msg='Create body is not valid.'):
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
expected = {
'badRequest': {
'code': 400,
'message': (msg)
}
}
self.assertEqual(expected, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_create_no_body(self):
self._encryption_create_bad_body(body=None)
def test_create_malformed_entity(self):
body = {'encryption': 'string'}
self._encryption_create_bad_body(body=body)
def test_create_negative_key_size(self):
body = {"encryption": {'cipher': 'cipher',
'key_size': -128,
'provider': 'fake_provider',
'volume_type_id': 'volume_type'}}
msg = 'Invalid input received: key_size must be non-negative'
self._encryption_create_bad_body(body=body, msg=msg)
def test_create_none_key_size(self):
self._create('fake_cipher', 'front-end', None, 'fake_encryptor')
def test_create_invalid_control_location(self):
body = {"encryption": {'cipher': 'cipher',
'control_location': 'fake_control',
'provider': 'fake_provider',
'volume_type_id': 'volume_type'}}
msg = ("Invalid input received: Valid control location are: "
"['front-end', 'back-end']")
self._encryption_create_bad_body(body=body, msg=msg)
def test_create_no_provider(self):
body = {"encryption": {'cipher': 'cipher',
'volume_type_id': 'volume_type'}}
msg = ("Invalid input received: provider must be defined")
self._encryption_create_bad_body(body=body, msg=msg)
def test_delete(self):
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
# Test that before create, there's nothing with a get
res = self._get_response(volume_type, req_method='GET',
req_headers='application/json',
url='/v2/fake/types/%s/encryption')
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual({}, res_dict)
body = {"encryption": {'cipher': 'cipher',
'key_size': 128,
'control_location': 'front-end',
'provider': 'fake_provider',
'volume_type_id': volume_type['id']}}
# Create, and test that get returns something
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res_dict = json.loads(res.body)
res = self._get_response(volume_type, req_method='GET',
req_headers='application/json',
url='/v2/fake/types/%s/encryption')
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual(volume_type['id'], res_dict['volume_type_id'])
# Delete, and test that get returns nothing
res = self._get_response(volume_type, req_method='DELETE',
req_headers='application/json',
url='/v2/fake/types/%s/encryption/provider')
self.assertEqual(202, res.status_code)
self.assertEqual(0, len(res.body))
res = self._get_response(volume_type, req_method='GET',
req_headers='application/json',
url='/v2/fake/types/%s/encryption')
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual({}, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
def test_delete_with_volume_in_use(self):
# Create the volume type and volumes with the volume type.
volume_type = {
'id': 'fake_type_id',
'name': 'fake_type',
}
db.volume_type_create(context.get_admin_context(), volume_type)
db.volume_create(context.get_admin_context(),
{'id': 'fake_id',
'display_description': 'Test Desc',
'size': 20,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy',
'volume_type_id': volume_type['id']})
db.volume_create(context.get_admin_context(),
{'id': 'fake_id2',
'display_description': 'Test Desc2',
'size': 2,
'status': 'creating',
'instance_uuid': None,
'host': 'dummy',
'volume_type_id': volume_type['id']})
body = {"encryption": {'cipher': 'cipher',
'key_size': 128,
'control_location': 'front-end',
'provider': 'fake_provider',
'volume_type_id': volume_type['id']}}
# Create encryption with volume type, and test with GET
res = self._get_response(volume_type, req_method='POST',
req_body=json.dumps(body),
req_headers='application/json')
res = self._get_response(volume_type, req_method='GET',
req_headers='application/json',
url='/v2/fake/types/%s/encryption')
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual(volume_type['id'], res_dict['volume_type_id'])
# Delete, and test that there is an error since volumes exist
res = self._get_response(volume_type, req_method='DELETE',
req_headers='application/json',
url='/v2/fake/types/%s/encryption/provider')
self.assertEqual(400, res.status_code)
res_dict = json.loads(res.body)
expected = {
'badRequest': {
'code': 400,
'message': 'Cannot delete encryption specs. '
'Volume type in use.'
}
}
self.assertEqual(expected, res_dict)
# Delete the volumes
db.volume_destroy(context.get_admin_context(), 'fake_id')
db.volume_destroy(context.get_admin_context(), 'fake_id2')
# Delete, and test that get returns nothing
res = self._get_response(volume_type, req_method='DELETE',
req_headers='application/json',
url='/v2/fake/types/%s/encryption/provider')
self.assertEqual(202, res.status_code)
self.assertEqual(0, len(res.body))
res = self._get_response(volume_type, req_method='GET',
req_headers='application/json',
url='/v2/fake/types/%s/encryption')
self.assertEqual(200, res.status_code)
res_dict = json.loads(res.body)
self.assertEqual({}, res_dict)
db.volume_type_destroy(context.get_admin_context(), volume_type['id'])
| apache-2.0 |
mdanielwork/intellij-community | xml/impl/src/com/intellij/psi/impl/source/html/HtmlScriptLanguageInjector.java | 2931 | /*
* Copyright 2000-2013 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.psi.impl.source.html;
import com.intellij.lang.Language;
import com.intellij.lang.LanguageUtil;
import com.intellij.lang.injection.MultiHostInjector;
import com.intellij.lang.injection.MultiHostRegistrar;
import com.intellij.openapi.util.TextRange;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiLanguageInjectionHost;
import com.intellij.psi.xml.XmlTag;
import com.intellij.psi.xml.XmlText;
import com.intellij.xml.util.HtmlUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
public class HtmlScriptLanguageInjector implements MultiHostInjector {
/**
* Finds language to be injected into <script> tag
*
* @param xmlTag <script> tag
* @return language to inject or null if no language found or not a script tag at all
*/
@Nullable
public static Language getScriptLanguageToInject(@NotNull XmlTag xmlTag) {
if (!HtmlUtil.isScriptTag(xmlTag)) {
return null;
}
String mimeType = xmlTag.getAttributeValue("type");
if (mimeType != null && mimeType.endsWith("+json")) {
mimeType = "application/json";
}
Collection<Language> languages = Language.findInstancesByMimeType(mimeType);
return !languages.isEmpty() ? languages.iterator().next() : Language.ANY;
}
@Override
public void getLanguagesToInject(@NotNull MultiHostRegistrar registrar, @NotNull PsiElement host) {
if (!host.isValid() || !(host instanceof XmlText) || !HtmlUtil.isHtmlTagContainingFile(host)) {
return;
}
XmlTag scriptTag = ((XmlText)host).getParentTag();
if (scriptTag == null) {
return;
}
final Language language = getScriptLanguageToInject(scriptTag);
if (language == null || HtmlScriptInjectionBlockerExtension.isInjectionBlocked(scriptTag, language)) {
return;
}
if (LanguageUtil.isInjectableLanguage(language)) {
registrar
.startInjecting(language)
.addPlace(null, null, (PsiLanguageInjectionHost)host, TextRange.create(0, host.getTextLength()))
.doneInjecting();
}
}
@NotNull
@Override
public List<? extends Class<? extends PsiElement>> elementsToInjectIn() {
return Collections.singletonList(XmlText.class);
}
}
| apache-2.0 |
halober/ovirt-engine | frontend/webadmin/modules/webadmin/src/main/java/org/ovirt/engine/ui/webadmin/uicommon/model/ClusterPolicyModelProvider.java | 3435 | package org.ovirt.engine.ui.webadmin.uicommon.model;
import org.ovirt.engine.core.common.scheduling.ClusterPolicy;
import org.ovirt.engine.ui.common.presenter.AbstractModelBoundPopupPresenterWidget;
import org.ovirt.engine.ui.common.presenter.popup.DefaultConfirmationPopupPresenterWidget;
import org.ovirt.engine.ui.common.presenter.popup.RemoveConfirmationPopupPresenterWidget;
import org.ovirt.engine.ui.common.uicommon.model.SearchableTabModelProvider;
import org.ovirt.engine.ui.uicommonweb.UICommand;
import org.ovirt.engine.ui.uicommonweb.models.ConfirmationModel;
import org.ovirt.engine.ui.uicommonweb.models.Model;
import org.ovirt.engine.ui.uicommonweb.models.configure.scheduling.ClusterPolicyListModel;
import org.ovirt.engine.ui.webadmin.section.main.presenter.popup.scheduling.ClusterPolicyPopupPresenterWidget;
import org.ovirt.engine.ui.webadmin.section.main.presenter.popup.scheduling.ManagePolicyUnitPopupPresenterWidget;
import com.google.gwt.event.shared.EventBus;
import com.google.inject.Inject;
import com.google.inject.Provider;
public class ClusterPolicyModelProvider extends SearchableTabModelProvider<ClusterPolicy, ClusterPolicyListModel> {
private final Provider<ClusterPolicyPopupPresenterWidget> clusterPolicyPopupProvider;
private final Provider<RemoveConfirmationPopupPresenterWidget> removeConfirmPopupProvider;
private final Provider<ManagePolicyUnitPopupPresenterWidget> policyUnitPopupProvider;
@Inject
public ClusterPolicyModelProvider(EventBus eventBus,
Provider<DefaultConfirmationPopupPresenterWidget> defaultConfirmPopupProvider,
final Provider<ClusterPolicyPopupPresenterWidget> clusterPolicyPopupProvider,
final Provider<RemoveConfirmationPopupPresenterWidget> removeConfirmPopupProvider,
final Provider<ManagePolicyUnitPopupPresenterWidget> policyUnitPopupProvider) {
super(eventBus, defaultConfirmPopupProvider);
this.clusterPolicyPopupProvider = clusterPolicyPopupProvider;
this.removeConfirmPopupProvider = removeConfirmPopupProvider;
this.policyUnitPopupProvider = policyUnitPopupProvider;
}
@Override
public ClusterPolicyListModel getModel() {
return getCommonModel().getClusterPolicyListModel();
}
@Override
public AbstractModelBoundPopupPresenterWidget<? extends Model, ?> getModelPopup(ClusterPolicyListModel source,
UICommand lastExecutedCommand, Model windowModel) {
if (lastExecutedCommand.equals(getModel().getNewCommand())
|| lastExecutedCommand.equals(getModel().getEditCommand())
|| lastExecutedCommand.equals(getModel().getCloneCommand())) {
return clusterPolicyPopupProvider.get();
} else if (lastExecutedCommand.equals(getModel().getManagePolicyUnitCommand())) {
return policyUnitPopupProvider.get();
} else {
return super.getModelPopup(source, lastExecutedCommand, windowModel);
}
}
@Override
public AbstractModelBoundPopupPresenterWidget<? extends ConfirmationModel, ?> getConfirmModelPopup(ClusterPolicyListModel source,
UICommand lastExecutedCommand) {
if (lastExecutedCommand.equals(getModel().getRemoveCommand())) {
return removeConfirmPopupProvider.get();
} else {
return super.getConfirmModelPopup(source, lastExecutedCommand);
}
}
}
| apache-2.0 |
badvision/acs-aem-commons | bundle/src/test/java/com/adobe/acs/commons/workflow/synthetic/impl/SyntheticGraniteWorkflowRunnerImplTest.java | 10359 | /*
* #%L
* ACS AEM Commons Bundle
* %%
* Copyright (C) 2016 Adobe
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package com.adobe.acs.commons.workflow.synthetic.impl;
import com.adobe.acs.commons.workflow.synthetic.SyntheticWorkflowRunner;
import com.adobe.acs.commons.workflow.synthetic.SyntheticWorkflowStep;
import com.adobe.acs.commons.workflow.synthetic.impl.granitetestprocesses.NoNextWorkflowProcess;
import com.adobe.acs.commons.workflow.synthetic.impl.granitetestprocesses.ReadDataWorkflowProcess;
import com.adobe.acs.commons.workflow.synthetic.impl.granitetestprocesses.RestartWorkflowProcess;
import com.adobe.acs.commons.workflow.synthetic.impl.granitetestprocesses.SetDataWorkflowProcess;
import com.adobe.acs.commons.workflow.synthetic.impl.granitetestprocesses.TerminateDataWorkflowProcess;
import com.adobe.acs.commons.workflow.synthetic.impl.granitetestprocesses.UpdateWorkflowDataWorkflowProcess;
import com.adobe.acs.commons.workflow.synthetic.impl.granitetestprocesses.WFArgsWorkflowProcess;
import com.adobe.acs.commons.workflow.synthetic.impl.granitetestprocesses.WFDataWorkflowProcess;
import com.adobe.granite.workflow.WorkflowSession;
import com.adobe.granite.workflow.exec.WorkItem;
import com.adobe.granite.workflow.metadata.MetaDataMap;
import org.apache.sling.api.resource.ResourceResolver;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import javax.jcr.Session;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
public class SyntheticGraniteWorkflowRunnerImplTest {
@Mock
ResourceResolver resourceResolver;
@Mock
Session session;
SyntheticWorkflowRunnerImpl swr = new SyntheticWorkflowRunnerImpl();
List<SyntheticWorkflowStep> workflowSteps;
@Before
public void setUp() {
workflowSteps = new ArrayList<>();
when(resourceResolver.adaptTo(Session.class)).thenReturn(session);
}
@Test
public void testExecute_WFData() throws Exception {
Map<Object, Object> map = new HashMap<Object, Object>();
map.put("process.label", "test");
swr.bindGraniteWorkflowProcesses(new WFDataWorkflowProcess(), map);
workflowSteps.add(swr.getSyntheticWorkflowStep("test",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
swr.execute(resourceResolver,
"/content/test",
workflowSteps, false, false);
}
@Test
public void testExecute_PassingDataBetweenProcesses() throws Exception {
Map<Object, Object> map = new HashMap<Object, Object>();
map.put("process.label", "set");
swr.bindGraniteWorkflowProcesses(new SetDataWorkflowProcess(), map);
map.put("process.label", "read");
swr.bindGraniteWorkflowProcesses(new ReadDataWorkflowProcess(), map);
workflowSteps.add(swr.getSyntheticWorkflowStep("set",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
workflowSteps.add(swr.getSyntheticWorkflowStep("read",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
swr.execute(resourceResolver,
"/content/test",
workflowSteps, false, false);
}
@Test
public void testExecute_updateWorkflowData() throws Exception {
Map<Object, Object> map = new HashMap<Object, Object>();
map.put("process.label", "update");
swr.bindGraniteWorkflowProcesses(new UpdateWorkflowDataWorkflowProcess(), map);
map.put("process.label", "read");
swr.bindGraniteWorkflowProcesses(new ReadDataWorkflowProcess(), map);
workflowSteps.add(swr.getSyntheticWorkflowStep("update",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
workflowSteps.add(swr.getSyntheticWorkflowStep("read",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
swr.execute(resourceResolver,
"/content/test",
workflowSteps, false, false);
}
@Test
public void testExecute_ProcessArgs() throws Exception {
Map<String, Object> wfArgs = new HashMap<String, Object>();
wfArgs.put("hello", "world");
Map<Object, Object> map = new HashMap<Object, Object>();
map.put("process.label", "wf-args");
swr.bindGraniteWorkflowProcesses(new WFArgsWorkflowProcess(wfArgs), map);
/** WF Process Metadata */
workflowSteps.add(swr.getSyntheticWorkflowStep("wf-args",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL,
wfArgs));
swr.execute(resourceResolver,
"/content/test",
workflowSteps, false, false);
}
@Test
public void testExecute_Restart() throws Exception {
Map<Object, Object> map = new HashMap<Object, Object>();
map.put("process.label", "restart");
RestartWorkflowProcess restartWorkflowProcess = spy(new RestartWorkflowProcess());
swr.bindGraniteWorkflowProcesses(restartWorkflowProcess, map);
/** Restart */
workflowSteps.add(swr.getSyntheticWorkflowStep("restart",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
swr.execute(resourceResolver,
"/content/test",
workflowSteps, false, false);
verify(restartWorkflowProcess, times(3)).execute(any(WorkItem.class), any(WorkflowSession.class),
any(MetaDataMap.class));
}
@Test
public void testExecute_Terminate() throws Exception {
when(session.hasPendingChanges()).thenReturn(true).thenReturn(false);
Map<Object, Object> map = new HashMap<Object, Object>();
map.put("process.label", "terminate");
TerminateDataWorkflowProcess terminateDataWorkflowProcess = spy(new TerminateDataWorkflowProcess());
swr.bindGraniteWorkflowProcesses(terminateDataWorkflowProcess, map);
map.put("process.label", "nonext");
swr.bindGraniteWorkflowProcesses(new NoNextWorkflowProcess(), map);
workflowSteps.add(swr.getSyntheticWorkflowStep("terminate",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
workflowSteps.add(swr.getSyntheticWorkflowStep("nonext",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
swr.execute(resourceResolver,
"/content/test",
workflowSteps, true, false);
}
@Test
public void testExecute_Terminate_autoSaveAtEnd() throws Exception {
when(session.hasPendingChanges()).thenReturn(true).thenReturn(false);
Map<Object, Object> map = new HashMap<Object, Object>();
map.put("process.label", "terminate");
TerminateDataWorkflowProcess terminateDataWorkflowProcess = spy(new TerminateDataWorkflowProcess());
swr.bindGraniteWorkflowProcesses(terminateDataWorkflowProcess, map);
workflowSteps.add(swr.getSyntheticWorkflowStep("terminate",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
swr.execute(resourceResolver,
"/content/test",
workflowSteps, false, true);
verify(terminateDataWorkflowProcess, times(1)).execute(any(WorkItem.class), any(WorkflowSession.class),
any(MetaDataMap.class));
verify(session, times(1)).save();
}
@Test
public void testExecute_Complete_noSave() throws Exception {
when(session.hasPendingChanges()).thenReturn(true).thenReturn(false);
Map<Object, Object> map = new HashMap<Object, Object>();
map.put("process.label", "terminate");
TerminateDataWorkflowProcess terminateDataWorkflowProcess = spy(new TerminateDataWorkflowProcess());
swr.bindGraniteWorkflowProcesses(terminateDataWorkflowProcess, map);
workflowSteps.add(swr.getSyntheticWorkflowStep("terminate",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL));
swr.execute(resourceResolver,
"/content/test",
workflowSteps, false, false);
verify(terminateDataWorkflowProcess, times(1)).execute(any(WorkItem.class), any(WorkflowSession.class),
any(MetaDataMap.class));
verify(session, times(0)).save();
}
@Test
public void testExecute_MultipleProcesses() throws Exception {
Map<String, Object> wfArgs1 = new HashMap<String, Object>();
wfArgs1.put("hello", "world");
Map<String, Object> wfArgs2 = new HashMap<String, Object>();
wfArgs2.put("goodbye", "moon");
final Map<Object, Object> map = new HashMap<Object, Object>();
map.put("process.label", "multi1");
swr.bindGraniteWorkflowProcesses(new WFArgsWorkflowProcess(wfArgs1), map);
map.put("process.label", "multi2");
swr.bindGraniteWorkflowProcesses(new WFArgsWorkflowProcess(wfArgs2), map);
workflowSteps.add(swr.getSyntheticWorkflowStep("multi1",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL,
wfArgs1));
workflowSteps.add(swr.getSyntheticWorkflowStep("multi2",
SyntheticWorkflowRunner.WorkflowProcessIdType.PROCESS_LABEL,
wfArgs2));
swr.execute(resourceResolver,
"/content/test",
workflowSteps,
false,
false);
}
} | apache-2.0 |
philliprower/cas | support/cas-server-support-jms-ticket-registry/src/test/java/org/apereo/cas/ticket/registry/queue/AbstractTicketMessageQueueCommandTests.java | 2835 | package org.apereo.cas.ticket.registry.queue;
import org.apereo.cas.config.CasCoreAuthenticationConfiguration;
import org.apereo.cas.config.CasCoreAuthenticationHandlersConfiguration;
import org.apereo.cas.config.CasCoreAuthenticationMetadataConfiguration;
import org.apereo.cas.config.CasCoreAuthenticationPolicyConfiguration;
import org.apereo.cas.config.CasCoreAuthenticationPrincipalConfiguration;
import org.apereo.cas.config.CasCoreAuthenticationServiceSelectionStrategyConfiguration;
import org.apereo.cas.config.CasCoreAuthenticationSupportConfiguration;
import org.apereo.cas.config.CasCoreConfiguration;
import org.apereo.cas.config.CasCoreHttpConfiguration;
import org.apereo.cas.config.CasCoreServicesAuthenticationConfiguration;
import org.apereo.cas.config.CasCoreServicesConfiguration;
import org.apereo.cas.config.CasCoreTicketCatalogConfiguration;
import org.apereo.cas.config.CasCoreTicketsConfiguration;
import org.apereo.cas.config.CasCoreUtilConfiguration;
import org.apereo.cas.config.CasCoreWebConfiguration;
import org.apereo.cas.config.CasPersonDirectoryConfiguration;
import org.apereo.cas.config.support.CasWebApplicationServiceFactoryConfiguration;
import org.apereo.cas.logout.config.CasCoreLogoutConfiguration;
import org.apereo.cas.ticket.registry.TicketRegistry;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.autoconfigure.aop.AopAutoConfiguration;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.cloud.autoconfigure.RefreshAutoConfiguration;
/**
* This is {@link AbstractTicketMessageQueueCommandTests}.
*
* @author Misagh Moayyed
* @since 5.2.0
*/
@SpringBootTest(classes = {
CasCoreTicketsConfiguration.class,
CasCoreTicketCatalogConfiguration.class,
CasCoreLogoutConfiguration.class,
CasCoreHttpConfiguration.class,
CasCoreAuthenticationConfiguration.class,
CasCoreServicesAuthenticationConfiguration.class,
CasCoreAuthenticationMetadataConfiguration.class,
CasCoreAuthenticationPolicyConfiguration.class,
CasCoreAuthenticationPrincipalConfiguration.class,
CasCoreAuthenticationHandlersConfiguration.class,
CasCoreAuthenticationSupportConfiguration.class,
CasPersonDirectoryConfiguration.class,
CasCoreServicesConfiguration.class,
CasWebApplicationServiceFactoryConfiguration.class,
CasCoreUtilConfiguration.class,
CasCoreConfiguration.class,
CasCoreAuthenticationServiceSelectionStrategyConfiguration.class,
AopAutoConfiguration.class,
RefreshAutoConfiguration.class,
CasCoreWebConfiguration.class})
public abstract class AbstractTicketMessageQueueCommandTests {
@Autowired
@Qualifier("ticketRegistry")
protected TicketRegistry ticketRegistry;
}
| apache-2.0 |
apache/syncope | core/idrepo/logic/src/main/java/org/apache/syncope/core/logic/init/AuditAccessor.java | 2328 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.syncope.core.logic.init;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.core.LoggerContext;
import org.apache.logging.log4j.core.config.LoggerConfig;
import org.apache.syncope.common.lib.types.AuditLoggerName;
import org.apache.syncope.core.spring.security.AuthContextUtils;
import org.springframework.transaction.annotation.Transactional;
import org.apache.syncope.core.persistence.api.entity.AuditConf;
import org.apache.syncope.core.persistence.api.dao.AuditConfDAO;
/**
* Domain-sensible (via {@code @Transactional} access to audit data.
*
* @see AuditLoader
*/
public class AuditAccessor {
protected final AuditConfDAO auditConfDAO;
public AuditAccessor(final AuditConfDAO auditConfDAO) {
this.auditConfDAO = auditConfDAO;
}
@Transactional
public void synchronizeLoggingWithAudit(final LoggerContext ctx) {
Map<String, AuditConf> audits = auditConfDAO.findAll().stream().
collect(Collectors.toMap(
audit -> AuditLoggerName.getAuditEventLoggerName(AuthContextUtils.getDomain(), audit.getKey()),
Function.identity()));
audits.forEach((logger, audit) -> {
LoggerConfig logConf = ctx.getConfiguration().getLoggerConfig(logger);
logConf.setLevel(audit.isActive() ? Level.DEBUG : Level.OFF);
});
ctx.updateLoggers();
}
}
| apache-2.0 |
qma/pants | tests/python/pants_test/cache/test_pinger.py | 2603 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import threading
import time
from six.moves import SimpleHTTPServer, socketserver
from pants.cache.pinger import Pinger
from pants_test.base_test import BaseTest
def get_delayed_handler(delay):
class DelayResponseHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_HEAD(self):
time.sleep(delay)
self.send_response(200)
self.end_headers()
return DelayResponseHandler
class TestPinger(BaseTest):
timeout_seconds = .6
slow_seconds = .05
fast_seconds = 0
def setup_delayed_server(self, delay):
server = socketserver.TCPServer(("", 0), get_delayed_handler(delay))
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
def setUp(self):
timeout = self.setup_delayed_server(self.timeout_seconds)
slow = self.setup_delayed_server(self.slow_seconds)
fast = self.setup_delayed_server(self.fast_seconds)
self.servers = [timeout, slow, fast]
self.fast_netloc = 'localhost:{}'.format(fast.socket.getsockname()[1])
self.slow_netloc = 'localhost:{}'.format(slow.socket.getsockname()[1])
self.timeout_netloc = 'localhost:{}'.format(timeout.socket.getsockname()[1])
def test_pinger_times_correct(self):
test = Pinger(timeout=.5, tries=2)
netlocs = [self.fast_netloc, self.slow_netloc, self.timeout_netloc]
ping_results = dict(test.pings(netlocs))
self.assertLess(ping_results[self.fast_netloc], ping_results[self.slow_netloc])
self.assertEqual(ping_results[self.timeout_netloc], Pinger.UNREACHABLE)
def test_pinger_timeout_config(self):
test = Pinger(timeout=self.slow_seconds - .01, tries=2)
netlocs = [self.fast_netloc, self.slow_netloc]
ping_results = dict(test.pings(netlocs))
self.assertLess(ping_results[self.fast_netloc], 1)
self.assertEqual(ping_results[self.slow_netloc], Pinger.UNREACHABLE)
def test_global_pinger_memo(self):
fast_pinger = Pinger(timeout=self.slow_seconds - .01, tries=2)
slow_pinger = Pinger(timeout=self.timeout_seconds, tries=2)
self.assertEqual(fast_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE)
self.assertNotEqual(slow_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE)
def tearDown(self):
for server in self.servers:
server.shutdown()
| apache-2.0 |
patcadelina/rundeck | core/src/main/java/com/dtolabs/rundeck/core/execution/ExecArg.java | 3037 | package com.dtolabs.rundeck.core.execution;
import java.util.List;
/**
* Represents a command-line argument, that may need to be quoted/escaped before use, and may contain a list of other
* arguments to be quoted.
*/
public abstract class ExecArg {
/**
* @return true if this arg contains a list of other args
*
*/
public abstract boolean isList();
private boolean quoted = true;
/**
* @return the sublist of args, if {@link #isList()} returns true, null otherwise
*/
public abstract List<ExecArg> getList();
/**
* @return the string value of this argument if {@link #isList()} returns false, null otherwise
*/
public abstract String getString();
/**
* Accept a visitor
*
* @param converter visitor
*/
public abstract void accept(Visitor converter);
/**
* @return true if this arg should be quoted
*/
public boolean isQuoted() {
return quoted;
}
/**
* Set whether this arg should be quoted
*
* @param quoted quoted
*/
void setQuoted(boolean quoted) {
this.quoted = quoted;
}
/**
* Visitor to visit args
*/
public static interface Visitor {
public void visit(ExecArg arg);
}
/**
* Represents a single argument string
*/
static class StringArg extends ExecArg {
final String arg;
public StringArg(String arg, boolean quoted) {
this.arg = arg;
setQuoted(quoted);
}
@Override
public String toString() {
return arg;
}
@Override
public String getString() {
return arg;
}
@Override
public boolean isList() {
return false;
}
@Override
public List<ExecArg> getList() {
return null;
}
@Override
public void accept(Visitor visitor) {
visitor.visit(this);
}
}
/**
* Represents a sub list of arguments
*/
static class ListArg extends ExecArg {
private ExecArgList args;
ListArg() {
args = null;
}
@Override
public boolean isList() {
return true;
}
@Override
public String getString() {
return null;
}
@Override
public List<ExecArg> getList() {
return getArgs().getList();
}
@Override
public void accept(Visitor visitor) {
visitor.visit(this);
}
protected ExecArgList getArgs() {
return args;
}
protected void setArgs(ExecArgList list) {
this.args = list;
}
@Override
public String toString() {
return "[" +
args +
']';
}
}
public static ExecArg fromString(String arg, boolean quoted) {
return new StringArg(arg, quoted);
}
}
| apache-2.0 |
Deepnekroz/kaa | client/client-multi/client-cpp/kaa/context/SimpleExecutorContext.hpp | 1781 | /**
* Copyright 2014-2016 CyberVision, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SIMPLEEXECUTORCONTEXT_HPP_
#define SIMPLEEXECUTORCONTEXT_HPP_
#include <memory>
#include "kaa/context/AbstractExecutorContext.hpp"
namespace kaa {
class SimpleExecutorContext : public AbstractExecutorContext {
public:
SimpleExecutorContext(std::size_t lifeCycleThreadCount = DEFAULT_THREAD_COUNT
, std::size_t apiThreadCount = DEFAULT_THREAD_COUNT
, std::size_t callbackThreadCount = DEFAULT_THREAD_COUNT);
virtual IThreadPool& getLifeCycleExecutor() { return *lifeCycleExecutor_; }
virtual IThreadPool& getApiExecutor() { return *apiExecutor_; }
virtual IThreadPool& getCallbackExecutor() { return *callbackExecutor_; }
public:
static const std::size_t DEFAULT_THREAD_COUNT = 1;
protected:
virtual void doInit();
virtual void doStop();
private:
const std::size_t apiThreadCount_;
const std::size_t callbackThreadCount_;
const std::size_t lifeCycleThreadCount_;
IThreadPoolPtr apiExecutor_;
IThreadPoolPtr callbackExecutor_;
IThreadPoolPtr lifeCycleExecutor_;
};
} /* namespace kaa */
#endif /* SIMPLEEXECUTORCONTEXT_HPP_ */
| apache-2.0 |
susinda/devstudio-tooling-esb | plugins/org.wso2.developerstudio.eclipse.gmf.esb/src/org/wso2/developerstudio/eclipse/gmf/esb/TaskTriggerType.java | 4877 | /**
* <copyright>
* </copyright>
*
* $Id$
*/
package org.wso2.developerstudio.eclipse.gmf.esb;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.eclipse.emf.common.util.Enumerator;
/**
* <!-- begin-user-doc -->
* A representation of the literals of the enumeration '<em><b>Task Trigger Type</b></em>',
* and utility methods for working with them.
* <!-- end-user-doc -->
* @see org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage#getTaskTriggerType()
* @model
* @generated
*/
public enum TaskTriggerType implements Enumerator {
/**
* The '<em><b>Simple</b></em>' literal object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #SIMPLE_VALUE
* @generated
* @ordered
*/
SIMPLE(0, "Simple", "Simple"),
/**
* The '<em><b>Cron</b></em>' literal object.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @see #CRON_VALUE
* @generated
* @ordered
*/
CRON(1, "Cron", "Cron");
/**
* The '<em><b>Simple</b></em>' literal value.
* <!-- begin-user-doc -->
* <p>
* If the meaning of '<em><b>Simple</b></em>' literal object isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @see #SIMPLE
* @model name="Simple"
* @generated
* @ordered
*/
public static final int SIMPLE_VALUE = 0;
/**
* The '<em><b>Cron</b></em>' literal value.
* <!-- begin-user-doc -->
* <p>
* If the meaning of '<em><b>Cron</b></em>' literal object isn't clear,
* there really should be more of a description here...
* </p>
* <!-- end-user-doc -->
* @see #CRON
* @model name="Cron"
* @generated
* @ordered
*/
public static final int CRON_VALUE = 1;
/**
* An array of all the '<em><b>Task Trigger Type</b></em>' enumerators.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private static final TaskTriggerType[] VALUES_ARRAY =
new TaskTriggerType[] {
SIMPLE,
CRON,
};
/**
* A public read-only list of all the '<em><b>Task Trigger Type</b></em>' enumerators.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public static final List<TaskTriggerType> VALUES = Collections.unmodifiableList(Arrays.asList(VALUES_ARRAY));
/**
* Returns the '<em><b>Task Trigger Type</b></em>' literal with the specified literal value.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param literal the literal.
* @return the matching enumerator or <code>null</code>.
* @generated
*/
public static TaskTriggerType get(String literal) {
for (int i = 0; i < VALUES_ARRAY.length; ++i) {
TaskTriggerType result = VALUES_ARRAY[i];
if (result.toString().equals(literal)) {
return result;
}
}
return null;
}
/**
* Returns the '<em><b>Task Trigger Type</b></em>' literal with the specified name.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param name the name.
* @return the matching enumerator or <code>null</code>.
* @generated
*/
public static TaskTriggerType getByName(String name) {
for (int i = 0; i < VALUES_ARRAY.length; ++i) {
TaskTriggerType result = VALUES_ARRAY[i];
if (result.getName().equals(name)) {
return result;
}
}
return null;
}
/**
* Returns the '<em><b>Task Trigger Type</b></em>' literal with the specified integer value.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @param value the integer value.
* @return the matching enumerator or <code>null</code>.
* @generated
*/
public static TaskTriggerType get(int value) {
switch (value) {
case SIMPLE_VALUE: return SIMPLE;
case CRON_VALUE: return CRON;
}
return null;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private final int value;
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private final String name;
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private final String literal;
/**
* Only this class can construct instances.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
private TaskTriggerType(int value, String name, String literal) {
this.value = value;
this.name = name;
this.literal = literal;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public int getValue() {
return value;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public String getName() {
return name;
}
/**
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
public String getLiteral() {
return literal;
}
/**
* Returns the literal value of the enumerator, which is its string representation.
* <!-- begin-user-doc -->
* <!-- end-user-doc -->
* @generated
*/
@Override
public String toString() {
return literal;
}
} //TaskTriggerType
| apache-2.0 |
tejal29/pants | tests/python/pants_test/option/test_custom_types.py | 2293 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.option.custom_types import dict_type, list_type
from pants.option.errors import ParseError
class CustomTypesTest(unittest.TestCase):
def _do_test(self, expected_val, s):
if isinstance(expected_val, dict):
val = dict_type(s)
elif isinstance(expected_val, (list, tuple)):
val = list_type(s)
else:
raise Exception('Expected value {0} is of unsupported type: {1}'.format(expected_val,
type(expected_val)))
self.assertEquals(expected_val, val)
def _do_test_dict_error(self, s):
with self.assertRaises(ParseError):
self._do_test({}, s)
def _do_test_list_error(self, s):
with self.assertRaises(ParseError):
self._do_test([], s)
def test_dict(self):
self._do_test({}, '{}')
self._do_test({ 'a': 'b' }, '{ "a": "b" }')
self._do_test({ 'a': 'b' }, "{ 'a': 'b' }")
self._do_test({ 'a': [1, 2, 3] }, '{ "a": [1, 2, 3] }')
self._do_test({ 'a': [1, 2, 3, 4] }, '{ "a": [1, 2] + [3, 4] }')
self._do_test({}, {})
self._do_test({ 'a': 'b' }, { 'a': 'b' })
self._do_test({ 'a': [1, 2, 3] }, { 'a': [1, 2, 3] })
self._do_test_dict_error('[]')
self._do_test_dict_error('[1, 2, 3]')
self._do_test_dict_error('1')
self._do_test_dict_error('"a"')
def test_list(self):
self._do_test([], '[]')
self._do_test([1, 2, 3], '[1, 2, 3]')
self._do_test((1, 2, 3), '1,2,3')
self._do_test([1, 2, 3, 4], '[1, 2] + [3, 4]')
self._do_test((1, 2, 3, 4), '(1, 2) + (3, 4)')
self._do_test(['a', 'b', 'c'], '["a", "b", "c"]')
self._do_test(['a', 'b', 'c'], "['a', 'b', 'c']")
self._do_test([], [])
self._do_test([1, 2, 3], [1, 2, 3])
self._do_test((1, 2, 3), (1, 2, 3))
self._do_test(['a', 'b', 'c'], ['a', 'b', 'c'])
self._do_test_list_error('{}')
self._do_test_list_error('{"a": "b"}')
self._do_test_list_error('1')
self._do_test_list_error('"a"')
| apache-2.0 |
crobby/oshinko-cli | vendor/github.com/openshift/origin/test/integration/newapp_test.go | 76636 | package integration
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"time"
"github.com/AaronO/go-git-http"
"github.com/AaronO/go-git-http/auth"
"github.com/elazarl/goproxy"
docker "github.com/fsouza/go-dockerclient"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ktypes "k8s.io/apimachinery/pkg/types"
utilerrs "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
kwatch "k8s.io/apimachinery/pkg/watch"
krest "k8s.io/client-go/rest"
clientgotesting "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme"
kapi "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/kubectl/categories"
"k8s.io/kubernetes/pkg/kubectl/resource"
appsapi "github.com/openshift/origin/pkg/apps/apis/apps"
buildapi "github.com/openshift/origin/pkg/build/apis/build"
"github.com/openshift/origin/pkg/generate"
"github.com/openshift/origin/pkg/generate/app"
apptest "github.com/openshift/origin/pkg/generate/app/test"
"github.com/openshift/origin/pkg/generate/dockerfile"
"github.com/openshift/origin/pkg/generate/git"
"github.com/openshift/origin/pkg/generate/jenkinsfile"
"github.com/openshift/origin/pkg/generate/source"
imageapi "github.com/openshift/origin/pkg/image/apis/image"
imagefake "github.com/openshift/origin/pkg/image/generated/internalclientset/fake"
imageinternalversion "github.com/openshift/origin/pkg/image/generated/internalclientset/typed/image/internalversion"
dockerregistry "github.com/openshift/origin/pkg/image/importer/dockerv1client"
clicmd "github.com/openshift/origin/pkg/oc/cli/cmd"
"github.com/openshift/origin/pkg/oc/generate/app/cmd"
routefake "github.com/openshift/origin/pkg/route/generated/internalclientset/fake"
templateapi "github.com/openshift/origin/pkg/template/apis/template"
templatefake "github.com/openshift/origin/pkg/template/generated/internalclientset/fake"
_ "github.com/openshift/origin/pkg/api/install"
"github.com/openshift/origin/test/util"
s2igit "github.com/openshift/source-to-image/pkg/scm/git"
)
func skipExternalGit(t *testing.T) {
if len(os.Getenv("SKIP_EXTERNAL_GIT")) > 0 {
t.Skip("external Git tests are disabled")
}
}
func TestNewAppAddArguments(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "test-newapp")
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer os.RemoveAll(tmpDir)
testDir := filepath.Join(tmpDir, "test/one/two/three")
err = os.MkdirAll(testDir, 0777)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
tests := map[string]struct {
args []string
env []string
parms []string
repos []string
components []string
unknown []string
}{
"components": {
args: []string{"one", "two+three", "four~five"},
components: []string{"one", "two+three", "four~five"},
unknown: []string{},
},
"source": {
args: []string{".", testDir, "git://github.com/openshift/origin.git"},
repos: []string{".", testDir, "git://github.com/openshift/origin.git"},
unknown: []string{},
},
"source custom ref": {
args: []string{"https://github.com/openshift/ruby-hello-world#beta4"},
repos: []string{"https://github.com/openshift/ruby-hello-world#beta4"},
unknown: []string{},
},
"env": {
args: []string{"first=one", "second=two", "third=three"},
env: []string{"first=one", "second=two", "third=three"},
unknown: []string{},
},
"mix 1": {
args: []string{"git://github.com/openshift/origin.git", "mysql+ruby~git@github.com/openshift/origin.git", "env1=test", "ruby-helloworld-sample"},
repos: []string{"git://github.com/openshift/origin.git"},
components: []string{"mysql+ruby~git@github.com/openshift/origin.git", "ruby-helloworld-sample"},
env: []string{"env1=test"},
unknown: []string{},
},
}
for n, c := range tests {
a := &cmd.AppConfig{}
unknown := a.AddArguments(c.args)
if !reflect.DeepEqual(a.Environment, c.env) {
t.Errorf("%s: Different env variables. Expected: %v, Actual: %v", n, c.env, a.Environment)
}
if !reflect.DeepEqual(a.SourceRepositories, c.repos) {
t.Errorf("%s: Different source repos. Expected: %v, Actual: %v", n, c.repos, a.SourceRepositories)
}
if !reflect.DeepEqual(a.Components, c.components) {
t.Errorf("%s: Different components. Expected: %v, Actual: %v", n, c.components, a.Components)
}
if !reflect.DeepEqual(unknown, c.unknown) {
t.Errorf("%s: Different unknown result. Expected: %v, Actual: %v", n, c.unknown, unknown)
}
}
}
func TestNewAppResolve(t *testing.T) {
tests := []struct {
name string
cfg cmd.AppConfig
components app.ComponentReferences
expectedErr string
}{
{
name: "Resolver error",
components: app.ComponentReferences{
app.ComponentReference(&app.ComponentInput{
Value: "mysql:invalid",
Resolver: app.UniqueExactOrInexactMatchResolver{
Searcher: app.DockerRegistrySearcher{
Client: dockerregistry.NewClient(10*time.Second, true),
},
},
})},
expectedErr: `no match for "mysql:invalid`,
},
{
name: "Successful mysql builder",
components: app.ComponentReferences{
app.ComponentReference(&app.ComponentInput{
Value: "mysql",
ResolvedMatch: &app.ComponentMatch{
Builder: true,
},
})},
expectedErr: "",
},
{
name: "Unable to build source code",
components: app.ComponentReferences{
app.ComponentReference(&app.ComponentInput{
Value: "mysql",
ExpectToBuild: true,
})},
expectedErr: "no resolver",
},
{
name: "Successful docker build",
cfg: cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Strategy: generate.StrategyDocker,
},
},
components: app.ComponentReferences{
app.ComponentReference(&app.ComponentInput{
Value: "mysql",
ExpectToBuild: true,
})},
expectedErr: "",
},
}
for _, test := range tests {
err := test.components.Resolve()
if err != nil {
if !strings.Contains(err.Error(), test.expectedErr) {
t.Errorf("%s: Invalid error: Expected %s, got %v", test.name, test.expectedErr, err)
}
} else if len(test.expectedErr) != 0 {
t.Errorf("%s: Expected %s error but got none", test.name, test.expectedErr)
}
}
}
func TestNewAppDetectSource(t *testing.T) {
skipExternalGit(t)
gitLocalDir, err := s2igit.CreateLocalGitDirectory()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(gitLocalDir)
dockerSearcher := app.DockerRegistrySearcher{
Client: dockerregistry.NewClient(10*time.Second, true),
}
mocks := MockSourceRepositories(t, gitLocalDir)
tests := []struct {
name string
cfg *cmd.AppConfig
repositories []*app.SourceRepository
expectedLang string
expectedErr string
}{
{
name: "detect source - ruby",
cfg: &cmd.AppConfig{
Resolvers: cmd.Resolvers{
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
DockerSearcher: dockerSearcher,
},
},
repositories: []*app.SourceRepository{mocks[0]},
expectedLang: "ruby",
expectedErr: "",
},
}
for _, test := range tests {
err := cmd.DetectSource(test.repositories, test.cfg.Detector, &test.cfg.GenerationInputs)
if err != nil {
if !strings.Contains(err.Error(), test.expectedErr) {
t.Errorf("%s: Invalid error: Expected %s, got %v", test.name, test.expectedErr, err)
}
} else if len(test.expectedErr) != 0 {
t.Errorf("%s: Expected %s error but got none", test.name, test.expectedErr)
}
for _, repo := range test.repositories {
info := repo.Info()
if info == nil {
t.Errorf("%s: expected repository info to be populated; it is nil", test.name)
continue
}
if term := strings.Join(info.Terms(), ","); term != test.expectedLang {
t.Errorf("%s: expected repository info term to be %s; got %s\n", test.name, test.expectedLang, term)
}
}
}
}
func mapContains(a, b map[string]string) bool {
for k, v := range a {
if v2, exists := b[k]; !exists || v != v2 {
return false
}
}
return true
}
// ExactMatchDockerSearcher returns a match with the value that was passed in
// and a march score of 0.0(exact)
type ExactMatchDockerSearcher struct {
Errs []error
}
// Search always returns a match for every term passed in
func (r *ExactMatchDockerSearcher) Search(precise bool, terms ...string) (app.ComponentMatches, []error) {
matches := app.ComponentMatches{}
for _, value := range terms {
matches = append(matches, &app.ComponentMatch{
Value: value,
Name: value,
Argument: fmt.Sprintf("--docker-image=%q", value),
Description: fmt.Sprintf("Docker image %q", value),
Score: 0.0,
})
}
return matches, r.Errs
}
// Some circular reference detection requires ImageStreams to
// be created with Tag support. The ExactMatchDirectTagDockerSearcher
// creates a Matcher which triggers the logic to enable tag support.
type ExactMatchDirectTagDockerSearcher struct {
Errs []error
}
func (r *ExactMatchDirectTagDockerSearcher) Search(precise bool, terms ...string) (app.ComponentMatches, []error) {
matches := app.ComponentMatches{}
for _, value := range terms {
matches = append(matches, &app.ComponentMatch{
Value: value,
Name: value,
Argument: fmt.Sprintf("--docker-image=%q", value),
Description: fmt.Sprintf("Docker image %q", value),
Score: 0.0,
Image: &imageapi.DockerImage{},
Meta: map[string]string{"direct-tag": "1"},
})
}
return matches, r.Errs
}
func TestNewAppRunAll(t *testing.T) {
skipExternalGit(t)
dockerSearcher := app.DockerRegistrySearcher{
Client: dockerregistry.NewClient(10*time.Second, true),
}
failImageClient := &imagefake.Clientset{}
failImageClient.AddReactor("get", "images", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, errors.NewInternalError(fmt.Errorf(""))
})
okTemplateClient := &templatefake.Clientset{}
okImageClient := &imagefake.Clientset{}
okRouteClient := &routefake.Clientset{}
tests := []struct {
name string
config *cmd.AppConfig
expected map[string][]string
expectedName string
expectedErr error
errFn func(error) bool
expectInsecure sets.String
expectedVolumes map[string]string
checkPort string
}{
{
name: "successful ruby app generation",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
},
Resolvers: cmd.Resolvers{
ImageStreamByAnnotationSearcher: app.NewImageStreamByAnnotationSearcher(okImageClient.Image(), okImageClient.Image(), []string{"default"}),
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
DockerSearcher: fakeDockerSearcher(),
ImageStreamSearcher: app.ImageStreamSearcher{
Client: okImageClient.Image(),
ImageStreamImages: okImageClient.Image(),
Namespaces: []string{"default"},
},
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
GenerationInputs: cmd.GenerationInputs{
Strategy: generate.StrategySource,
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"ruby-hello-world", "ruby"},
"buildConfig": {"ruby-hello-world"},
"deploymentConfig": {"ruby-hello-world"},
"service": {"ruby-hello-world"},
},
expectedName: "ruby-hello-world",
expectedVolumes: nil,
expectedErr: nil,
},
{
name: "successful ruby app generation with labels",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
},
Resolvers: cmd.Resolvers{
DockerSearcher: fakeDockerSearcher(),
ImageStreamSearcher: app.ImageStreamSearcher{
Client: okImageClient.Image(),
ImageStreamImages: okImageClient.Image(),
Namespaces: []string{"default"},
},
ImageStreamByAnnotationSearcher: app.NewImageStreamByAnnotationSearcher(okImageClient.Image(), okImageClient.Image(), []string{"default"}),
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
GenerationInputs: cmd.GenerationInputs{
Strategy: generate.StrategySource,
Labels: map[string]string{"label1": "value1", "label2": "value2"},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"ruby-hello-world", "ruby"},
"buildConfig": {"ruby-hello-world"},
"deploymentConfig": {"ruby-hello-world"},
"service": {"ruby-hello-world"},
},
expectedName: "ruby-hello-world",
expectedVolumes: nil,
expectedErr: nil,
},
{
name: "successful docker app generation",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
},
Resolvers: cmd.Resolvers{
DockerSearcher: fakeSimpleDockerSearcher(),
ImageStreamSearcher: app.ImageStreamSearcher{
Client: okImageClient.Image(),
ImageStreamImages: okImageClient.Image(),
Namespaces: []string{"default"},
},
ImageStreamByAnnotationSearcher: app.NewImageStreamByAnnotationSearcher(okImageClient.Image(), okImageClient.Image(), []string{"default"}),
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
GenerationInputs: cmd.GenerationInputs{
Strategy: generate.StrategyDocker,
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
checkPort: "8080",
expected: map[string][]string{
"imageStream": {"ruby-hello-world", "ruby-22-centos7"},
"buildConfig": {"ruby-hello-world"},
"deploymentConfig": {"ruby-hello-world"},
"service": {"ruby-hello-world"},
},
expectedName: "ruby-hello-world",
expectedErr: nil,
},
{
name: "app generation using context dir",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/sti-ruby"},
},
GenerationInputs: cmd.GenerationInputs{
ContextDir: "2.0/test/rack-test-app",
},
Resolvers: cmd.Resolvers{
DockerSearcher: dockerSearcher,
ImageStreamSearcher: fakeImageStreamSearcher(),
ImageStreamByAnnotationSearcher: app.NewImageStreamByAnnotationSearcher(okImageClient.Image(), okImageClient.Image(), []string{"default"}),
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"sti-ruby"},
"buildConfig": {"sti-ruby"},
"deploymentConfig": {"sti-ruby"},
"service": {"sti-ruby"},
},
expectedName: "sti-ruby",
expectedVolumes: nil,
expectedErr: nil,
},
{
name: "failed app generation using missing context dir",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/sti-ruby"},
},
GenerationInputs: cmd.GenerationInputs{
ContextDir: "2.0/test/missing-dir",
},
Resolvers: cmd.Resolvers{
DockerSearcher: dockerSearcher,
ImageStreamSearcher: fakeImageStreamSearcher(),
ImageStreamByAnnotationSearcher: app.NewImageStreamByAnnotationSearcher(okImageClient.Image(), okImageClient.Image(), []string{"default"}),
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"sti-ruby"},
"buildConfig": {"sti-ruby"},
"deploymentConfig": {"sti-ruby"},
"service": {"sti-ruby"},
},
expectedName: "sti-ruby",
expectedVolumes: nil,
errFn: func(err error) bool {
return err.Error() == "supplied context directory '2.0/test/missing-dir' does not exist in 'https://github.com/openshift/sti-ruby'"
},
},
{
name: "insecure registry generation",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
Components: []string{"myrepo:5000/myco/example"},
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
},
GenerationInputs: cmd.GenerationInputs{
Strategy: generate.StrategySource,
InsecureRegistry: true,
},
Resolvers: cmd.Resolvers{
DockerSearcher: app.DockerClientSearcher{
Client: &apptest.FakeDockerClient{
Images: []docker.APIImages{{RepoTags: []string{"myrepo:5000/myco/example"}}},
Image: dockerBuilderImage(),
},
Insecure: true,
RegistrySearcher: &ExactMatchDockerSearcher{},
},
ImageStreamSearcher: app.ImageStreamSearcher{
Client: okImageClient.Image(),
ImageStreamImages: okImageClient.Image(),
Namespaces: []string{"default"},
},
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{},
},
TemplateFileSearcher: &app.TemplateFileSearcher{
Builder: resource.NewBuilder(&resource.Mapper{
RESTMapper: legacyscheme.Registry.RESTMapper(),
ObjectTyper: legacyscheme.Scheme,
ClientMapper: resource.DisabledClientForMapping{},
Decoder: legacyscheme.Codecs.UniversalDecoder(),
}, nil, &categories.SimpleCategoryExpander{}),
},
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"example", "ruby-hello-world"},
"buildConfig": {"ruby-hello-world"},
"deploymentConfig": {"ruby-hello-world"},
"service": {"ruby-hello-world"},
},
expectedName: "ruby-hello-world",
expectedErr: nil,
expectedVolumes: nil,
expectInsecure: sets.NewString("example"),
},
{
name: "emptyDir volumes",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
DockerImages: []string{"mysql"},
},
Resolvers: cmd.Resolvers{
DockerSearcher: dockerSearcher,
ImageStreamSearcher: app.ImageStreamSearcher{
Client: okImageClient.Image(),
ImageStreamImages: okImageClient.Image(),
Namespaces: []string{"default"},
},
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"mysql"},
"deploymentConfig": {"mysql"},
"service": {"mysql"},
"volumeMounts": {"mysql-volume-1"},
},
expectedName: "mysql",
expectedVolumes: map[string]string{
"mysql-volume-1": "EmptyDir",
},
expectedErr: nil,
},
{
name: "Docker build",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
},
Resolvers: cmd.Resolvers{
DockerSearcher: app.DockerClientSearcher{
Client: &apptest.FakeDockerClient{
Images: []docker.APIImages{{RepoTags: []string{"centos/ruby-22-centos7"}}},
Image: dockerBuilderImage(),
},
Insecure: true,
RegistrySearcher: &ExactMatchDockerSearcher{},
},
ImageStreamSearcher: app.ImageStreamSearcher{
Client: okImageClient.Image(),
ImageStreamImages: okImageClient.Image(),
Namespaces: []string{"default"},
},
ImageStreamByAnnotationSearcher: app.NewImageStreamByAnnotationSearcher(okImageClient.Image(), okImageClient.Image(), []string{"default"}),
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"ruby-hello-world", "ruby-22-centos7"},
"buildConfig": {"ruby-hello-world"},
"deploymentConfig": {"ruby-hello-world"},
"service": {"ruby-hello-world"},
},
expectedName: "ruby-hello-world",
expectedErr: nil,
},
{
name: "Docker build with no registry image",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
},
Resolvers: cmd.Resolvers{
DockerSearcher: app.DockerClientSearcher{
Client: &apptest.FakeDockerClient{
Images: []docker.APIImages{{RepoTags: []string{"centos/ruby-22-centos7"}}},
Image: dockerBuilderImage(),
},
Insecure: true,
},
ImageStreamSearcher: app.ImageStreamSearcher{
Client: okImageClient.Image(),
ImageStreamImages: okImageClient.Image(),
Namespaces: []string{"default"},
},
ImageStreamByAnnotationSearcher: app.NewImageStreamByAnnotationSearcher(okImageClient.Image(), okImageClient.Image(), []string{"default"}),
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"ruby-hello-world"},
"buildConfig": {"ruby-hello-world"},
"deploymentConfig": {"ruby-hello-world"},
"service": {"ruby-hello-world"},
},
expectedName: "ruby-hello-world",
expectedErr: nil,
},
{
name: "custom name",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
DockerImages: []string{"mysql"},
},
GenerationInputs: cmd.GenerationInputs{
Name: "custom",
},
Resolvers: cmd.Resolvers{
DockerSearcher: app.DockerClientSearcher{
Client: &apptest.FakeDockerClient{
Images: []docker.APIImages{{RepoTags: []string{"mysql"}}},
Image: &docker.Image{
Config: &docker.Config{
ExposedPorts: map[docker.Port]struct{}{
"8080/tcp": {},
},
},
},
},
RegistrySearcher: &ExactMatchDockerSearcher{},
},
ImageStreamSearcher: app.ImageStreamSearcher{
Client: okImageClient.Image(),
ImageStreamImages: okImageClient.Image(),
Namespaces: []string{"default"},
},
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"custom"},
"deploymentConfig": {"custom"},
"service": {"custom"},
},
expectedName: "custom",
expectedErr: nil,
},
{
name: "partial matches",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
DockerImages: []string{"mysql"},
},
GenerationInputs: cmd.GenerationInputs{
Name: "custom",
},
Resolvers: cmd.Resolvers{
DockerSearcher: app.DockerClientSearcher{
RegistrySearcher: &ExactMatchDockerSearcher{Errs: []error{errors.NewInternalError(fmt.Errorf("test error"))}},
},
ImageStreamSearcher: app.ImageStreamSearcher{
Client: failImageClient.Image(),
ImageStreamImages: okImageClient.Image(),
Namespaces: []string{"default"},
},
TemplateSearcher: app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: map[string][]string{
"imageStream": {"custom"},
"deploymentConfig": {"custom"},
"service": {"custom"},
},
expectedName: "custom",
errFn: func(err error) bool {
err = err.(utilerrs.Aggregate).Errors()[0]
match, ok := err.(app.ErrNoMatch)
if !ok {
return false
}
if match.Value != "mysql" {
return false
}
t.Logf("%#v", match.Errs[0])
return len(match.Errs) == 1 && strings.Contains(match.Errs[0].Error(), "test error")
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.config.Out, test.config.ErrOut = os.Stdout, os.Stderr
test.config.Deploy = true
test.config.ImageClient = &NewAppFakeImageClient{
proxy: test.config.ImageClient,
}
res, err := test.config.Run()
if test.errFn != nil {
if !test.errFn(err) {
t.Errorf("%s: Error mismatch! Unexpected error: %#v", test.name, err)
return
}
} else if err != test.expectedErr {
t.Errorf("%s: Error mismatch! Expected %v, got %v", test.name, test.expectedErr, err)
return
}
if err != nil {
return
}
if res.Name != test.expectedName {
t.Errorf("%s: Name was not correct: %v", test.name, res.Name)
return
}
imageStreams := []*imageapi.ImageStream{}
got := map[string][]string{}
gotVolumes := map[string]string{}
for _, obj := range res.List.Items {
switch tp := obj.(type) {
case *buildapi.BuildConfig:
got["buildConfig"] = append(got["buildConfig"], tp.Name)
case *kapi.Service:
if test.checkPort != "" {
if len(tp.Spec.Ports) == 0 {
t.Errorf("%s: did not get any ports in service", test.name)
break
}
expectedPort, _ := strconv.Atoi(test.checkPort)
if tp.Spec.Ports[0].Port != int32(expectedPort) {
t.Errorf("%s: did not get expected port in service. Expected: %d. Got %d\n",
test.name, expectedPort, tp.Spec.Ports[0].Port)
}
}
if test.config.Labels != nil {
if !mapContains(test.config.Labels, tp.Spec.Selector) {
t.Errorf("%s: did not get expected service selector. Expected: %v. Got: %v",
test.name, test.config.Labels, tp.Spec.Selector)
}
}
got["service"] = append(got["service"], tp.Name)
case *imageapi.ImageStream:
got["imageStream"] = append(got["imageStream"], tp.Name)
imageStreams = append(imageStreams, tp)
case *appsapi.DeploymentConfig:
got["deploymentConfig"] = append(got["deploymentConfig"], tp.Name)
if podTemplate := tp.Spec.Template; podTemplate != nil {
for _, volume := range podTemplate.Spec.Volumes {
if volume.VolumeSource.EmptyDir != nil {
gotVolumes[volume.Name] = "EmptyDir"
} else {
gotVolumes[volume.Name] = "UNKNOWN"
}
}
for _, container := range podTemplate.Spec.Containers {
for _, volumeMount := range container.VolumeMounts {
got["volumeMounts"] = append(got["volumeMounts"], volumeMount.Name)
}
}
}
if test.config.Labels != nil {
if !mapContains(test.config.Labels, tp.Spec.Selector) {
t.Errorf("%s: did not get expected deployment config rc selector. Expected: %v. Got: %v",
test.name, test.config.Labels, tp.Spec.Selector)
}
}
}
}
if len(test.expected) != len(got) {
t.Errorf("%s: Resource kind size mismatch! Expected %d, got %d", test.name, len(test.expected), len(got))
return
}
for k, exp := range test.expected {
g, ok := got[k]
if !ok {
t.Errorf("%s: Didn't find expected kind %s", test.name, k)
}
sort.Strings(g)
sort.Strings(exp)
if !reflect.DeepEqual(g, exp) {
t.Errorf("%s: %s resource names mismatch! Expected %v, got %v", test.name, k, exp, g)
continue
}
}
if len(test.expectedVolumes) != len(gotVolumes) {
t.Errorf("%s: Volume count mismatch! Expected %d, got %d", test.name, len(test.expectedVolumes), len(gotVolumes))
return
}
for k, exp := range test.expectedVolumes {
g, ok := gotVolumes[k]
if !ok {
t.Errorf("%s: Didn't find expected volume %s", test.name, k)
}
if g != exp {
t.Errorf("%s: Expected volume of type %s, got %s", test.name, g, exp)
}
}
if test.expectedName != res.Name {
t.Errorf("%s: Unexpected name: %s", test.name, test.expectedName)
}
if test.expectInsecure == nil {
return
}
for _, stream := range imageStreams {
_, hasAnnotation := stream.Annotations[imageapi.InsecureRepositoryAnnotation]
if test.expectInsecure.Has(stream.Name) && !hasAnnotation {
t.Errorf("%s: Expected insecure annotation for stream: %s, but did not get one.", test.name, stream.Name)
}
if !test.expectInsecure.Has(stream.Name) && hasAnnotation {
t.Errorf("%s: Got insecure annotation for stream: %s, and was not expecting one.", test.name, stream.Name)
}
}
})
}
}
func TestNewAppRunBuilds(t *testing.T) {
skipExternalGit(t)
tests := []struct {
name string
config *cmd.AppConfig
expected map[string][]string
expectedErr func(error) bool
checkResult func(*cmd.AppResult) error
checkOutput func(stdout, stderr io.Reader) error
}{
{
name: "successful build from dockerfile",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM openshift/origin:v1.0.6\nUSER foo",
},
},
expected: map[string][]string{
"buildConfig": {"origin"},
// There's a single image stream, but different tags: input from
// openshift/origin:v1.0.6, output to openshift/origin:latest.
"imageStream": {"origin"},
},
},
{
name: "successful ruby app generation",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
DockerImages: []string{"centos/ruby-22-centos7", "openshift/nodejs-010-centos7"},
},
GenerationInputs: cmd.GenerationInputs{
OutputDocker: true,
},
},
expected: map[string][]string{
// TODO: this test used to silently ignore components that were not builders (i.e. user input)
// That's bad, so the code should either error in this case or be a bit smarter.
"buildConfig": {"ruby-hello-world", "ruby-hello-world-1"},
"imageStream": {"nodejs-010-centos7", "ruby-22-centos7"},
},
},
{
name: "successful build with no output",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM centos",
NoOutput: true,
},
},
expected: map[string][]string{
"buildConfig": {"centos"},
"imageStream": {"centos"},
},
checkResult: func(res *cmd.AppResult) error {
for _, item := range res.List.Items {
switch t := item.(type) {
case *buildapi.BuildConfig:
got := t.Spec.Output.To
want := (*kapi.ObjectReference)(nil)
if !reflect.DeepEqual(got, want) {
return fmt.Errorf("build.Spec.Output.To = %v; want %v", got, want)
}
return nil
}
}
return fmt.Errorf("BuildConfig not found; got %v", res.List.Items)
},
},
{
name: "successful build from dockerfile with custom name",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM openshift/origin-base\nUSER foo",
Name: "foobar",
},
},
expected: map[string][]string{
"buildConfig": {"foobar"},
"imageStream": {"origin-base", "foobar"},
},
},
{
name: "successful build from dockerfile with --to",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM openshift/origin-base\nUSER foo",
Name: "foobar",
To: "destination/reference:tag",
},
},
expected: map[string][]string{
"buildConfig": {"foobar"},
"imageStream": {"origin-base", "reference"},
},
},
{
name: "successful build from dockerfile with --to and --to-docker=true",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM openshift/origin-base\nUSER foo",
Name: "foobar",
To: "destination/reference:tag",
OutputDocker: true,
},
},
expected: map[string][]string{
"buildConfig": {"foobar"},
"imageStream": {"origin-base"},
},
checkResult: func(res *cmd.AppResult) error {
for _, item := range res.List.Items {
switch t := item.(type) {
case *buildapi.BuildConfig:
got := t.Spec.Output.To
want := &kapi.ObjectReference{
Kind: "DockerImage",
Name: "destination/reference:tag",
}
if !reflect.DeepEqual(got, want) {
return fmt.Errorf("build.Spec.Output.To = %v; want %v", got, want)
}
return nil
}
}
return fmt.Errorf("BuildConfig not found; got %v", res.List.Items)
},
},
{
name: "successful generation of BC with multiple sources: repo + Dockerfile",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
},
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM centos/ruby-22-centos7\nRUN false",
},
},
expected: map[string][]string{
"buildConfig": {"ruby-hello-world"},
"imageStream": {"ruby-22-centos7", "ruby-hello-world"},
},
checkResult: func(res *cmd.AppResult) error {
var bc *buildapi.BuildConfig
for _, item := range res.List.Items {
switch v := item.(type) {
case *buildapi.BuildConfig:
if bc != nil {
return fmt.Errorf("want one BuildConfig got multiple: %#v", res.List.Items)
}
bc = v
}
}
if bc == nil {
return fmt.Errorf("want one BuildConfig got none: %#v", res.List.Items)
}
var got string
if bc.Spec.Source.Dockerfile != nil {
got = *bc.Spec.Source.Dockerfile
}
want := "FROM centos/ruby-22-centos7\nRUN false"
if got != want {
return fmt.Errorf("bc.Spec.Source.Dockerfile = %q; want %q", got, want)
}
return nil
},
},
{
name: "unsuccessful build from dockerfile due to strategy conflict",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM openshift/origin-base\nUSER foo",
Strategy: generate.StrategySource,
},
},
expectedErr: func(err error) bool {
return err.Error() == "when directly referencing a Dockerfile, the strategy must must be 'docker'"
},
},
{
name: "unsuccessful build from dockerfile due to missing FROM instruction",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "USER foo",
Strategy: generate.StrategyDocker,
},
},
expectedErr: func(err error) bool {
return err.Error() == "the Dockerfile in the repository \"\" has no FROM instruction"
},
},
{
name: "unsuccessful generation of BC with multiple repos and Dockerfile",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{
"https://github.com/openshift/ruby-hello-world",
"https://github.com/openshift/django-ex",
},
},
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM centos/ruby-22-centos7\nRUN false",
},
},
expectedErr: func(err error) bool {
return err.Error() == "--dockerfile cannot be used with multiple source repositories"
},
},
{
name: "successful input image source build with a repository",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{
"https://github.com/openshift/ruby-hello-world",
},
},
GenerationInputs: cmd.GenerationInputs{
SourceImage: "centos/mongodb-26-centos7",
SourceImagePath: "/src:dst",
},
},
expected: map[string][]string{
"buildConfig": {"ruby-hello-world"},
"imageStream": {"mongodb-26-centos7", "ruby-22-centos7", "ruby-hello-world"},
},
checkResult: func(res *cmd.AppResult) error {
var bc *buildapi.BuildConfig
for _, item := range res.List.Items {
switch v := item.(type) {
case *buildapi.BuildConfig:
if bc != nil {
return fmt.Errorf("want one BuildConfig got multiple: %#v", res.List.Items)
}
bc = v
}
}
if bc == nil {
return fmt.Errorf("want one BuildConfig got none: %#v", res.List.Items)
}
var got string
want := "mongodb-26-centos7:latest"
got = bc.Spec.Source.Images[0].From.Name
if got != want {
return fmt.Errorf("bc.Spec.Source.Image.From.Name = %q; want %q", got, want)
}
want = "ImageStreamTag"
got = bc.Spec.Source.Images[0].From.Kind
if got != want {
return fmt.Errorf("bc.Spec.Source.Image.From.Kind = %q; want %q", got, want)
}
want = "/src"
got = bc.Spec.Source.Images[0].Paths[0].SourcePath
if got != want {
return fmt.Errorf("bc.Spec.Source.Image.Paths[0].SourcePath = %q; want %q", got, want)
}
want = "dst"
got = bc.Spec.Source.Images[0].Paths[0].DestinationDir
if got != want {
return fmt.Errorf("bc.Spec.Source.Image.Paths[0].DestinationDir = %q; want %q", got, want)
}
return nil
},
},
{
name: "successful input image source build with no repository",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
Components: []string{"openshift/nodejs-010-centos7"},
},
GenerationInputs: cmd.GenerationInputs{
To: "outputimage",
SourceImage: "centos/mongodb-26-centos7",
SourceImagePath: "/src:dst",
},
},
expected: map[string][]string{
"buildConfig": {"outputimage"},
"imageStream": {"mongodb-26-centos7", "nodejs-010-centos7", "outputimage"},
},
checkResult: func(res *cmd.AppResult) error {
var bc *buildapi.BuildConfig
for _, item := range res.List.Items {
switch v := item.(type) {
case *buildapi.BuildConfig:
if bc != nil {
return fmt.Errorf("want one BuildConfig got multiple: %#v", res.List.Items)
}
bc = v
}
}
if bc == nil {
return fmt.Errorf("want one BuildConfig got none: %#v", res.List.Items)
}
var got string
want := "mongodb-26-centos7:latest"
got = bc.Spec.Source.Images[0].From.Name
if got != want {
return fmt.Errorf("bc.Spec.Source.Image.From.Name = %q; want %q", got, want)
}
want = "ImageStreamTag"
got = bc.Spec.Source.Images[0].From.Kind
if got != want {
return fmt.Errorf("bc.Spec.Source.Image.From.Kind = %q; want %q", got, want)
}
want = "/src"
got = bc.Spec.Source.Images[0].Paths[0].SourcePath
if got != want {
return fmt.Errorf("bc.Spec.Source.Image.Paths[0].SourcePath = %q; want %q", got, want)
}
want = "dst"
got = bc.Spec.Source.Images[0].Paths[0].DestinationDir
if got != want {
return fmt.Errorf("bc.Spec.Source.Image.Paths[0].DestinationDir = %q; want %q", got, want)
}
return nil
},
},
{
name: "successful build from source with autodetected jenkinsfile",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{
"https://github.com/openshift/nodejs-ex",
},
},
GenerationInputs: cmd.GenerationInputs{
ContextDir: "openshift/pipeline",
},
},
expected: map[string][]string{
"buildConfig": {"nodejs-ex"},
},
checkResult: func(res *cmd.AppResult) error {
if len(res.List.Items) != 1 {
return fmt.Errorf("expected one Item returned")
}
bc, ok := res.List.Items[0].(*buildapi.BuildConfig)
if !ok {
return fmt.Errorf("expected Item of type *buildapi.BuildConfig")
}
if !reflect.DeepEqual(bc.Spec.Output, buildapi.BuildOutput{}) {
return fmt.Errorf("invalid bc.Spec.Output, got %#v", bc.Spec.Output)
}
if !reflect.DeepEqual(bc.Spec.Source, buildapi.BuildSource{
ContextDir: "openshift/pipeline",
Git: &buildapi.GitBuildSource{URI: "https://github.com/openshift/nodejs-ex"},
Secrets: []buildapi.SecretBuildSource{},
}) {
return fmt.Errorf("invalid bc.Spec.Source, got %#v", bc.Spec.Source)
}
if !reflect.DeepEqual(bc.Spec.Strategy, buildapi.BuildStrategy{JenkinsPipelineStrategy: &buildapi.JenkinsPipelineBuildStrategy{}}) {
return fmt.Errorf("invalid bc.Spec.Strategy, got %#v", bc.Spec.Strategy)
}
return nil
},
},
{
name: "successful build from component with source with pipeline strategy",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
Components: []string{
"centos/nodejs-4-centos7~https://github.com/openshift/nodejs-ex",
},
},
GenerationInputs: cmd.GenerationInputs{
ContextDir: "openshift/pipeline",
Strategy: generate.StrategyPipeline,
},
},
expected: map[string][]string{
"buildConfig": {"nodejs-ex"},
},
checkResult: func(res *cmd.AppResult) error {
if len(res.List.Items) != 1 {
return fmt.Errorf("expected one Item returned")
}
bc, ok := res.List.Items[0].(*buildapi.BuildConfig)
if !ok {
return fmt.Errorf("expected Item of type *buildapi.BuildConfig")
}
if !reflect.DeepEqual(bc.Spec.Output, buildapi.BuildOutput{}) {
return fmt.Errorf("invalid bc.Spec.Output, got %#v", bc.Spec.Output)
}
if !reflect.DeepEqual(bc.Spec.Source, buildapi.BuildSource{
ContextDir: "openshift/pipeline",
Git: &buildapi.GitBuildSource{URI: "https://github.com/openshift/nodejs-ex"},
Secrets: []buildapi.SecretBuildSource{},
}) {
return fmt.Errorf("invalid bc.Spec.Source, got %#v", bc.Spec.Source.Git)
}
if !reflect.DeepEqual(bc.Spec.Strategy, buildapi.BuildStrategy{JenkinsPipelineStrategy: &buildapi.JenkinsPipelineBuildStrategy{}}) {
return fmt.Errorf("invalid bc.Spec.Strategy, got %#v", bc.Spec.Strategy)
}
return nil
},
},
{
name: "successful build from source with jenkinsfile with pipeline strategy",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{
"https://github.com/openshift/nodejs-ex",
},
},
GenerationInputs: cmd.GenerationInputs{
ContextDir: "openshift/pipeline",
Strategy: generate.StrategyPipeline,
},
},
expected: map[string][]string{
"buildConfig": {"nodejs-ex"},
},
},
{
name: "failed build from source with jenkinsfile with docker strategy",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{
"https://github.com/openshift/nodejs-ex",
},
},
GenerationInputs: cmd.GenerationInputs{
ContextDir: "openshift/pipeline",
Strategy: generate.StrategyDocker,
},
},
expectedErr: func(err error) bool {
return strings.HasPrefix(err.Error(), "No Dockerfile was found in the repository")
},
},
{
name: "failed build from source without jenkinsfile with pipeline strategy",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{
"https://github.com/openshift/nodejs-ex",
},
},
GenerationInputs: cmd.GenerationInputs{
Strategy: generate.StrategyPipeline,
},
},
expectedErr: func(err error) bool {
return strings.HasPrefix(err.Error(), "No Jenkinsfile was found in the repository")
},
},
}
for _, test := range tests {
stdout, stderr := PrepareAppConfig(test.config)
test.config.ImageClient = &NewAppFakeImageClient{
proxy: test.config.ImageClient,
}
res, err := test.config.Run()
if (test.expectedErr == nil && err != nil) || (test.expectedErr != nil && !test.expectedErr(err)) {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
if err != nil {
continue
}
if test.checkOutput != nil {
if err := test.checkOutput(stdout, stderr); err != nil {
t.Error(err)
continue
}
}
got := map[string][]string{}
for _, obj := range res.List.Items {
switch tp := obj.(type) {
case *buildapi.BuildConfig:
got["buildConfig"] = append(got["buildConfig"], tp.Name)
case *imageapi.ImageStream:
got["imageStream"] = append(got["imageStream"], tp.Name)
}
}
if len(test.expected) != len(got) {
t.Errorf("%s: Resource kind size mismatch! Expected %d, got %d", test.name, len(test.expected), len(got))
continue
}
for k, exp := range test.expected {
g, ok := got[k]
if !ok {
t.Errorf("%s: Didn't find expected kind %s", test.name, k)
}
sort.Strings(g)
sort.Strings(exp)
if !reflect.DeepEqual(g, exp) {
t.Errorf("%s: Resource names mismatch! Expected %v, got %v", test.name, exp, g)
continue
}
}
if test.checkResult != nil {
if err := test.checkResult(res); err != nil {
t.Errorf("%s: unexpected result: %v", test.name, err)
}
}
}
}
func TestNewAppBuildOutputCycleDetection(t *testing.T) {
skipExternalGit(t)
tests := []struct {
name string
config *cmd.AppConfig
expected map[string][]string
expectedErr func(error) bool
checkOutput func(stdout, stderr io.Reader) error
}{
{
name: "successful build with warning that output docker-image may trigger input ImageStream change; legacy ImageStream without tags",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
OutputDocker: true,
To: "centos/ruby-22-centos7",
Dockerfile: "FROM centos/ruby-22-centos7:latest",
},
},
expected: map[string][]string{
"buildConfig": {"ruby-22-centos7"},
"imageStream": {"ruby-22-centos7"},
},
checkOutput: func(stdout, stderr io.Reader) error {
got, err := ioutil.ReadAll(stderr)
if err != nil {
return err
}
want := "--> WARNING: output image of \"centos/ruby-22-centos7:latest\" should be different than input\n"
if string(got) != want {
return fmt.Errorf("stderr: got %q; want %q", got, want)
}
return nil
},
},
{
name: "successful build from dockerfile with identical input and output image references with warning(1)",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM centos\nRUN yum install -y httpd",
To: "centos",
},
},
expected: map[string][]string{
"buildConfig": {"centos"},
"imageStream": {"centos"},
},
checkOutput: func(stdout, stderr io.Reader) error {
got, err := ioutil.ReadAll(stderr)
if err != nil {
return err
}
want := "--> WARNING: output image of \"centos:latest\" should be different than input\n"
if string(got) != want {
return fmt.Errorf("stderr: got %q; want %q", got, want)
}
return nil
},
},
{
name: "successful build from dockerfile with identical input and output image references with warning(2)",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM openshift/ruby-22-centos7\nRUN yum install -y httpd",
To: "ruby-22-centos7",
},
},
expected: map[string][]string{
"buildConfig": {"ruby-22-centos7"},
"imageStream": {"ruby-22-centos7"},
},
checkOutput: func(stdout, stderr io.Reader) error {
got, err := ioutil.ReadAll(stderr)
if err != nil {
return err
}
want := "--> WARNING: output image of \"openshift/ruby-22-centos7:latest\" should be different than input\n"
if string(got) != want {
return fmt.Errorf("stderr: got %q; want %q", got, want)
}
return nil
},
},
{
name: "unsuccessful build from dockerfile due to identical input and output image references(1)",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM centos\nRUN yum install -y httpd",
},
},
expectedErr: func(err error) bool {
e := app.CircularOutputReferenceError{
Reference: "centos:latest",
}
return err.Error() == fmt.Errorf("%v, set a different tag with --to", e).Error()
},
},
{
name: "unsuccessful build from dockerfile due to identical input and output image references(2)",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
Dockerfile: "FROM openshift/ruby-22-centos7\nRUN yum install -y httpd",
},
},
expectedErr: func(err error) bool {
e := app.CircularOutputReferenceError{
Reference: "openshift/ruby-22-centos7:latest",
}
return err.Error() == fmt.Errorf("%v, set a different tag with --to", e).Error()
},
},
{
name: "successful build with warning that output docker-image may trigger input ImageStream change",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
OutputDocker: true,
To: "centos/ruby-22-centos7",
Dockerfile: "FROM centos/ruby-22-centos7",
},
Resolvers: cmd.Resolvers{
DockerSearcher: app.DockerClientSearcher{
Client: &apptest.FakeDockerClient{},
Insecure: true,
RegistrySearcher: &ExactMatchDirectTagDockerSearcher{},
},
},
},
expected: map[string][]string{
"buildConfig": {"ruby-22-centos7"},
"imageStream": {"ruby-22-centos7"},
},
checkOutput: func(stdout, stderr io.Reader) error {
got, err := ioutil.ReadAll(stderr)
if err != nil {
return err
}
want := "--> WARNING: output image of \"centos/ruby-22-centos7:latest\" should be different than input\n"
if string(got) != want {
return fmt.Errorf("stderr: got %q; want %q", got, want)
}
return nil
},
},
{
name: "successful build with warning that output docker-image may trigger input ImageStream change; latest variation",
config: &cmd.AppConfig{
GenerationInputs: cmd.GenerationInputs{
OutputDocker: true,
To: "centos/ruby-22-centos7",
Dockerfile: "FROM centos/ruby-22-centos7:latest",
},
Resolvers: cmd.Resolvers{
DockerSearcher: app.DockerClientSearcher{
Client: &apptest.FakeDockerClient{},
Insecure: true,
RegistrySearcher: &ExactMatchDirectTagDockerSearcher{},
},
},
},
expected: map[string][]string{
"buildConfig": {"ruby-22-centos7"},
"imageStream": {"ruby-22-centos7"},
},
checkOutput: func(stdout, stderr io.Reader) error {
got, err := ioutil.ReadAll(stderr)
if err != nil {
return err
}
want := "--> WARNING: output image of \"centos/ruby-22-centos7:latest\" should be different than input\n"
if string(got) != want {
return fmt.Errorf("stderr: got %q; want %q", got, want)
}
return nil
},
},
}
for _, test := range tests {
stdout, stderr := PrepareAppConfig(test.config)
test.config.ImageClient = &NewAppFakeImageClient{
proxy: test.config.ImageClient,
}
res, err := test.config.Run()
if (test.expectedErr == nil && err != nil) || (test.expectedErr != nil && !test.expectedErr(err)) {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
if err != nil {
continue
}
if test.checkOutput != nil {
if err := test.checkOutput(stdout, stderr); err != nil {
t.Errorf("Error during test %q: %v", test.name, err)
continue
}
}
got := map[string][]string{}
for _, obj := range res.List.Items {
switch tp := obj.(type) {
case *buildapi.BuildConfig:
got["buildConfig"] = append(got["buildConfig"], tp.Name)
case *imageapi.ImageStream:
got["imageStream"] = append(got["imageStream"], tp.Name)
}
}
if len(test.expected) != len(got) {
t.Errorf("%s: Resource kind size mismatch! Expected %d, got %d", test.name, len(test.expected), len(got))
continue
}
for k, exp := range test.expected {
g, ok := got[k]
if !ok {
t.Errorf("%s: Didn't find expected kind %s", test.name, k)
}
sort.Strings(g)
sort.Strings(exp)
if !reflect.DeepEqual(g, exp) {
t.Errorf("%s: Resource names mismatch! Expected %v, got %v", test.name, exp, g)
continue
}
}
}
}
func TestNewAppNewBuildEnvVars(t *testing.T) {
skipExternalGit(t)
dockerSearcher := app.DockerRegistrySearcher{
Client: dockerregistry.NewClient(10*time.Second, true),
}
okTemplateClient := &templatefake.Clientset{}
okImageClient := &imagefake.Clientset{}
okRouteClient := &routefake.Clientset{}
tests := []struct {
name string
config *cmd.AppConfig
expected []kapi.EnvVar
expectedErr error
}{
{
name: "explicit environment variables for buildConfig and deploymentConfig",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
DockerImages: []string{"centos/ruby-22-centos7", "openshift/nodejs-010-centos7"},
},
GenerationInputs: cmd.GenerationInputs{
OutputDocker: true,
BuildEnvironment: []string{"BUILD_ENV_1=env_value_1", "BUILD_ENV_2=env_value_2"},
},
Resolvers: cmd.Resolvers{
DockerSearcher: dockerSearcher,
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: []kapi.EnvVar{
{Name: "BUILD_ENV_1", Value: "env_value_1"},
{Name: "BUILD_ENV_2", Value: "env_value_2"},
},
expectedErr: nil,
},
}
for _, test := range tests {
test.config.Out, test.config.ErrOut = os.Stdout, os.Stderr
test.config.ExpectToBuild = true
res, err := test.config.Run()
if err != test.expectedErr {
t.Errorf("%s: Error mismatch! Expected %v, got %v", test.name, test.expectedErr, err)
continue
}
got := []kapi.EnvVar{}
for _, obj := range res.List.Items {
switch tp := obj.(type) {
case *buildapi.BuildConfig:
got = tp.Spec.Strategy.SourceStrategy.Env
break
}
}
if !reflect.DeepEqual(test.expected, got) {
t.Errorf("%s: unexpected output. Expected: %#v, Got: %#v", test.name, test.expected, got)
continue
}
}
}
func TestNewAppBuildConfigEnvVarsAndSecrets(t *testing.T) {
skipExternalGit(t)
dockerSearcher := app.DockerRegistrySearcher{
Client: dockerregistry.NewClient(10*time.Second, true),
}
okTemplateClient := &templatefake.Clientset{}
okImageClient := &imagefake.Clientset{}
okRouteClient := &routefake.Clientset{}
tests := []struct {
name string
config *cmd.AppConfig
expected []kapi.EnvVar
expectedSecrets map[string]string
expectedErr error
}{
{
name: "explicit environment variables for buildConfig and deploymentConfig",
config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
SourceRepositories: []string{"https://github.com/openshift/ruby-hello-world"},
DockerImages: []string{"centos/ruby-22-centos7", "centos/mongodb-26-centos7"},
},
GenerationInputs: cmd.GenerationInputs{
OutputDocker: true,
Environment: []string{"BUILD_ENV_1=env_value_1", "BUILD_ENV_2=env_value_2"},
Secrets: []string{"foo:/var", "bar"},
},
Resolvers: cmd.Resolvers{
DockerSearcher: dockerSearcher,
Detector: app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
},
},
Typer: legacyscheme.Scheme,
ImageClient: okImageClient.Image(),
TemplateClient: okTemplateClient.Template(),
RouteClient: okRouteClient.Route(),
OriginNamespace: "default",
},
expected: []kapi.EnvVar{},
expectedSecrets: map[string]string{"foo": "/var", "bar": "."},
expectedErr: nil,
},
}
for _, test := range tests {
test.config.Out, test.config.ErrOut = os.Stdout, os.Stderr
test.config.Deploy = true
res, err := test.config.Run()
if err != test.expectedErr {
t.Errorf("%s: Error mismatch! Expected %v, got %v", test.name, test.expectedErr, err)
continue
}
got := []kapi.EnvVar{}
gotSecrets := []buildapi.SecretBuildSource{}
for _, obj := range res.List.Items {
switch tp := obj.(type) {
case *buildapi.BuildConfig:
got = tp.Spec.Strategy.SourceStrategy.Env
gotSecrets = tp.Spec.Source.Secrets
break
}
}
for secretName, destDir := range test.expectedSecrets {
found := false
for _, got := range gotSecrets {
if got.Secret.Name == secretName && got.DestinationDir == destDir {
found = true
continue
}
}
if !found {
t.Errorf("expected secret %q and destination %q, got %#v", secretName, destDir, gotSecrets)
continue
}
}
if !reflect.DeepEqual(test.expected, got) {
t.Errorf("%s: unexpected output. Expected: %#v, Got: %#v", test.name, test.expected, got)
continue
}
}
}
func TestNewAppSourceAuthRequired(t *testing.T) {
tests := []struct {
name string
passwordProtected bool
useProxy bool
expectAuthRequired bool
}{
{
name: "no auth",
passwordProtected: false,
useProxy: false,
expectAuthRequired: false,
},
{
name: "basic auth",
passwordProtected: true,
useProxy: false,
expectAuthRequired: true,
},
{
name: "proxy required",
passwordProtected: false,
useProxy: true,
expectAuthRequired: true,
},
{
name: "basic auth and proxy required",
passwordProtected: true,
useProxy: true,
expectAuthRequired: true,
},
}
for _, test := range tests {
url, tempRepoDir := setupLocalGitRepo(t, test.passwordProtected, test.useProxy)
sourceRepo, err := app.NewSourceRepository(url, generate.StrategySource)
if err != nil {
t.Fatalf("%v", err)
}
detector := app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
}
if err = sourceRepo.Detect(detector, true); err != nil {
t.Fatalf("%v", err)
}
_, sourceRef, err := app.StrategyAndSourceForRepository(sourceRepo, nil)
if err != nil {
t.Fatalf("%v", err)
}
if test.expectAuthRequired != sourceRef.RequiresAuth {
t.Errorf("%s: unexpected auth required result. Expected: %v. Actual: %v", test.name, test.expectAuthRequired, sourceRef.RequiresAuth)
}
os.RemoveAll(tempRepoDir)
}
}
func TestNewAppListAndSearch(t *testing.T) {
tests := []struct {
name string
options clicmd.NewAppOptions
expectedOutput string
}{
{
name: "search, no oldversion",
options: clicmd.NewAppOptions{
ObjectGeneratorOptions: &clicmd.ObjectGeneratorOptions{
Config: &cmd.AppConfig{
ComponentInputs: cmd.ComponentInputs{
ImageStreams: []string{"ruby"},
},
AsSearch: true,
}},
},
expectedOutput: "Image streams (oc new-app --image-stream=<image-stream> [--code=<source>])\n-----\nruby\n Project: default\n Tags: latest\n\n",
},
{
name: "list, no oldversion",
options: clicmd.NewAppOptions{
ObjectGeneratorOptions: &clicmd.ObjectGeneratorOptions{
Config: &cmd.AppConfig{
AsList: true,
}},
},
expectedOutput: "Image streams (oc new-app --image-stream=<image-stream> [--code=<source>])\n-----\nruby\n Project: default\n Tags: latest\n\n",
},
}
for _, test := range tests {
stdout, stderr := PrepareAppConfig(test.options.Config)
test.options.Action.Out, test.options.ErrOut = stdout, stderr
test.options.BaseName = "oc"
test.options.CommandName = "new-app"
err := test.options.RunNewApp()
if err != nil {
t.Errorf("expected err == nil, got err == %v", err)
}
if stderr.Len() > 0 {
t.Errorf("expected stderr == %q, got stderr == %q", "", stderr.Bytes())
}
if string(stdout.Bytes()) != test.expectedOutput {
t.Errorf("expected stdout == %q, got stdout == %q", test.expectedOutput, stdout.Bytes())
}
}
}
func setupLocalGitRepo(t *testing.T, passwordProtected bool, requireProxy bool) (string, string) {
// Create test directories
testDir, err := ioutil.TempDir(util.GetBaseDir(), "gitauth")
if err != nil {
t.Fatalf("%v", err)
}
initialRepoDir := filepath.Join(testDir, "initial-repo")
if err = os.Mkdir(initialRepoDir, 0755); err != nil {
t.Fatalf("%v", err)
}
gitHomeDir := filepath.Join(testDir, "git-home")
if err = os.Mkdir(gitHomeDir, 0755); err != nil {
t.Fatalf("%v", err)
}
testRepoDir := filepath.Join(gitHomeDir, "test-repo")
if err = os.Mkdir(testRepoDir, 0755); err != nil {
t.Fatalf("%v", err)
}
userHomeDir := filepath.Join(testDir, "user-home")
if err = os.Mkdir(userHomeDir, 0755); err != nil {
t.Fatalf("%v", err)
}
// Set initial repo contents
gitRepo := git.NewRepositoryWithEnv([]string{
"GIT_AUTHOR_NAME=developer",
"GIT_AUTHOR_EMAIL=developer@example.com",
"GIT_COMMITTER_NAME=developer",
"GIT_COMMITTER_EMAIL=developer@example.com",
})
if err = gitRepo.Init(initialRepoDir, false); err != nil {
t.Fatalf("%v", err)
}
if err = ioutil.WriteFile(filepath.Join(initialRepoDir, "Dockerfile"), []byte("FROM mysql\nLABEL mylabel=myvalue\n"), 0644); err != nil {
t.Fatalf("%v", err)
}
if err = gitRepo.Add(initialRepoDir, "."); err != nil {
t.Fatalf("%v", err)
}
if err = gitRepo.Commit(initialRepoDir, "initial commit"); err != nil {
t.Fatalf("%v", err)
}
// Clone to repository inside gitHomeDir
if err = gitRepo.CloneBare(testRepoDir, initialRepoDir); err != nil {
t.Fatalf("%v", err)
}
// Initialize test git server
var gitHandler http.Handler
gitHandler = githttp.New(gitHomeDir)
// If password protected, set handler to require password
user := "gituser"
password := "gitpass"
if passwordProtected {
authenticator := auth.Authenticator(func(info auth.AuthInfo) (bool, error) {
if info.Username != user && info.Password != password {
return false, nil
}
return true, nil
})
gitHandler = authenticator(gitHandler)
}
gitServer := httptest.NewServer(gitHandler)
gitURLString := fmt.Sprintf("%s/%s", gitServer.URL, "test-repo")
var proxyServer *httptest.Server
// If proxy required, create a simple proxy server that will forward any host to the git server
if requireProxy {
gitURL, err := url.Parse(gitURLString)
if err != nil {
t.Fatalf("%v", err)
}
proxy := goproxy.NewProxyHttpServer()
proxy.OnRequest().DoFunc(
func(r *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {
r.URL.Host = gitURL.Host
return r, nil
})
gitURLString = "http://example.com/test-repo"
proxyServer = httptest.NewServer(proxy)
}
gitConfig := `
[user]
name = developer
email = developer@org.org
`
if passwordProtected {
authSection := `
[url %q]
insteadOf = %s
`
urlWithAuth, err := url.Parse(gitURLString)
if err != nil {
t.Fatalf("%v", err)
}
urlWithAuth.User = url.UserPassword(user, password)
authSection = fmt.Sprintf(authSection, urlWithAuth.String(), gitURLString)
gitConfig += authSection
}
if requireProxy {
proxySection := `
[http]
proxy = %s
`
proxySection = fmt.Sprintf(proxySection, proxyServer.URL)
gitConfig += proxySection
}
if err = ioutil.WriteFile(filepath.Join(userHomeDir, ".gitconfig"), []byte(gitConfig), 0644); err != nil {
t.Fatalf("%v", err)
}
os.Setenv("HOME", userHomeDir)
os.Setenv("GIT_ASKPASS", "true")
return gitURLString, testDir
}
func builderImageStream() *imageapi.ImageStream {
return &imageapi.ImageStream{
ObjectMeta: metav1.ObjectMeta{
Name: "ruby",
Namespace: "default",
ResourceVersion: "1",
},
Spec: imageapi.ImageStreamSpec{
Tags: map[string]imageapi.TagReference{
"oldversion": {
Annotations: map[string]string{
"tags": "hidden",
},
},
},
},
Status: imageapi.ImageStreamStatus{
Tags: map[string]imageapi.TagEventList{
"latest": {
Items: []imageapi.TagEvent{
{
Image: "the-image-id",
},
},
},
"oldversion": {
Items: []imageapi.TagEvent{
{
Image: "the-image-id",
},
},
},
},
DockerImageRepository: "example/ruby:latest",
},
}
}
func builderImageStreams() *imageapi.ImageStreamList {
return &imageapi.ImageStreamList{
Items: []imageapi.ImageStream{*builderImageStream()},
}
}
func builderImage() *imageapi.ImageStreamImage {
return &imageapi.ImageStreamImage{
Image: imageapi.Image{
DockerImageReference: "example/ruby:latest",
DockerImageMetadata: imageapi.DockerImage{
Config: &imageapi.DockerConfig{
Env: []string{
"STI_SCRIPTS_URL=http://repo/git/ruby",
},
ExposedPorts: map[string]struct{}{
"8080/tcp": {},
},
},
},
},
}
}
func dockerBuilderImage() *docker.Image {
return &docker.Image{
ID: "ruby",
Config: &docker.Config{
Env: []string{
"STI_SCRIPTS_URL=http://repo/git/ruby",
},
ExposedPorts: map[docker.Port]struct{}{
"8080/tcp": {},
},
},
}
}
func fakeImageStreamSearcher() app.Searcher {
client := &imagefake.Clientset{}
client.AddReactor("get", "imagestreams", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
return true, builderImageStream(), nil
})
client.AddReactor("list", "imagestreams", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
return true, builderImageStreams(), nil
})
client.AddReactor("get", "imagestreamimages", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
return true, builderImage(), nil
})
return app.ImageStreamSearcher{
Client: client.Image(),
ImageStreamImages: client.Image(),
Namespaces: []string{"default"},
}
}
func fakeTemplateSearcher() app.Searcher {
client := &templatefake.Clientset{}
client.AddReactor("list", "templates", func(action clientgotesting.Action) (handled bool, ret runtime.Object, err error) {
return true, templateList(), nil
})
return app.TemplateSearcher{
Client: client.Template(),
Namespaces: []string{"default"},
}
}
func templateList() *templateapi.TemplateList {
return &templateapi.TemplateList{
Items: []templateapi.Template{
{
Objects: []runtime.Object{},
ObjectMeta: metav1.ObjectMeta{
Name: "first-stored-template",
Namespace: "default",
},
},
},
}
}
func fakeDockerSearcher() app.Searcher {
return app.DockerClientSearcher{
Client: &apptest.FakeDockerClient{
Images: []docker.APIImages{{RepoTags: []string{"library/ruby:latest"}}},
Image: dockerBuilderImage(),
},
Insecure: true,
RegistrySearcher: &ExactMatchDockerSearcher{},
}
}
func fakeSimpleDockerSearcher() app.Searcher {
return app.DockerClientSearcher{
Client: &apptest.FakeDockerClient{
Images: []docker.APIImages{{RepoTags: []string{"centos/ruby-22-centos7"}}},
Image: &docker.Image{
ID: "ruby",
Config: &docker.Config{
Env: []string{},
},
},
},
RegistrySearcher: &ExactMatchDockerSearcher{},
}
}
// MockSourceRepositories is a set of mocked source repositories used for
// testing
func MockSourceRepositories(t *testing.T, file string) []*app.SourceRepository {
var b []*app.SourceRepository
for _, location := range []string{
"https://github.com/openshift/ruby-hello-world.git",
file,
} {
s, err := app.NewSourceRepository(location, generate.StrategySource)
if err != nil {
t.Fatal(err)
}
b = append(b, s)
}
return b
}
// PrepareAppConfig sets fields in config appropriate for running tests. It
// returns two buffers bound to stdout and stderr.
func PrepareAppConfig(config *cmd.AppConfig) (stdout, stderr *bytes.Buffer) {
config.ExpectToBuild = true
stdout, stderr = new(bytes.Buffer), new(bytes.Buffer)
config.Out, config.ErrOut = stdout, stderr
okTemplateClient := &templatefake.Clientset{}
okImageClient := &imagefake.Clientset{}
okRouteClient := &routefake.Clientset{}
config.Detector = app.SourceRepositoryEnumerator{
Detectors: source.DefaultDetectors,
DockerfileTester: dockerfile.NewTester(),
JenkinsfileTester: jenkinsfile.NewTester(),
}
if config.DockerSearcher == nil {
config.DockerSearcher = app.DockerRegistrySearcher{
Client: dockerregistry.NewClient(10*time.Second, true),
}
}
config.ImageStreamByAnnotationSearcher = fakeImageStreamSearcher()
config.ImageStreamSearcher = fakeImageStreamSearcher()
config.OriginNamespace = "default"
config.ImageClient = okImageClient.Image()
config.TemplateClient = okTemplateClient.Template()
config.RouteClient = okRouteClient.Route()
config.TemplateSearcher = app.TemplateSearcher{
Client: okTemplateClient.Template(),
Namespaces: []string{"openshift", "default"},
}
config.Typer = legacyscheme.Scheme
return
}
// NewAppFakeImageClient implements ImageClient interface and overrides some of
// the default fake client behavior around default, empty imagestreams
type NewAppFakeImageClient struct {
proxy imageinternalversion.ImageInterface
}
func (c *NewAppFakeImageClient) Images() imageinternalversion.ImageResourceInterface {
return c.proxy.Images()
}
func (c *NewAppFakeImageClient) ImageSignatures() imageinternalversion.ImageSignatureInterface {
return c.proxy.ImageSignatures()
}
func (c *NewAppFakeImageClient) ImageStreams(namespace string) imageinternalversion.ImageStreamInterface {
return &NewAppFakeImageStreams{
proxy: c.proxy.ImageStreams(namespace),
}
}
func (c *NewAppFakeImageClient) ImageStreamImages(namespace string) imageinternalversion.ImageStreamImageInterface {
return c.proxy.ImageStreamImages(namespace)
}
func (c *NewAppFakeImageClient) ImageStreamImports(namespace string) imageinternalversion.ImageStreamImportInterface {
return c.proxy.ImageStreamImports(namespace)
}
func (c *NewAppFakeImageClient) ImageStreamMappings(namespace string) imageinternalversion.ImageStreamMappingInterface {
return c.proxy.ImageStreamMappings(namespace)
}
func (c *NewAppFakeImageClient) ImageStreamTags(namespace string) imageinternalversion.ImageStreamTagInterface {
return c.proxy.ImageStreamTags(namespace)
}
func (c *NewAppFakeImageClient) RESTClient() krest.Interface {
return c.proxy.RESTClient()
}
// NewAppFakeImageStreams implements the ImageStreamInterface and overrides some of the
// default fake client behavior round default, empty imagestreams
type NewAppFakeImageStreams struct {
proxy imageinternalversion.ImageStreamInterface
}
func (c *NewAppFakeImageStreams) Get(name string, options metav1.GetOptions) (result *imageapi.ImageStream, err error) {
result, err = c.proxy.Get(name, options)
if err != nil {
return nil, err
}
if len(result.Name) == 0 {
// the default faker will return an empty image stream struct if it
// cannot find an entry for the given name ... we want nil for our tests,
// just like the real client
return nil, nil
}
return result, nil
}
func (c *NewAppFakeImageStreams) List(opts metav1.ListOptions) (result *imageapi.ImageStreamList, err error) {
return c.proxy.List(opts)
}
func (c *NewAppFakeImageStreams) Watch(opts metav1.ListOptions) (kwatch.Interface, error) {
return c.proxy.Watch(opts)
}
func (c *NewAppFakeImageStreams) Create(imageStream *imageapi.ImageStream) (result *imageapi.ImageStream, err error) {
return c.proxy.Create(imageStream)
}
func (c *NewAppFakeImageStreams) Update(imageStream *imageapi.ImageStream) (result *imageapi.ImageStream, err error) {
return c.proxy.Update(imageStream)
}
func (c *NewAppFakeImageStreams) UpdateStatus(imageStream *imageapi.ImageStream) (*imageapi.ImageStream, error) {
return c.proxy.UpdateStatus(imageStream)
}
func (c *NewAppFakeImageStreams) Delete(name string, options *metav1.DeleteOptions) error {
return c.proxy.Delete(name, options)
}
func (c *NewAppFakeImageStreams) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
return c.proxy.DeleteCollection(options, listOptions)
}
func (c *NewAppFakeImageStreams) Patch(name string, pt ktypes.PatchType, data []byte, subresources ...string) (result *imageapi.ImageStream, err error) {
return c.proxy.Patch(name, pt, data, subresources...)
}
func (c *NewAppFakeImageStreams) Secrets(imageStreamName string, opts metav1.ListOptions) (result *kapi.SecretList, err error) {
return c.proxy.Secrets(imageStreamName, opts)
}
| apache-2.0 |
RachelTucker/ds3_net_sdk | Ds3/ResponseParsers/CancelFormatTapeSpectraS3ResponseParser.cs | 1723 | /*
* ******************************************************************************
* Copyright 2014-2017 Spectra Logic Corporation. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use
* this file except in compliance with the License. A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file.
* This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
* ****************************************************************************
*/
// This code is auto-generated, do not modify
using Ds3.Calls;
using Ds3.Models;
using Ds3.Runtime;
using System.Linq;
using System.Net;
using System.Xml.Linq;
namespace Ds3.ResponseParsers
{
internal class CancelFormatTapeSpectraS3ResponseParser : IResponseParser<CancelFormatTapeSpectraS3Request, CancelFormatTapeSpectraS3Response>
{
public CancelFormatTapeSpectraS3Response Parse(CancelFormatTapeSpectraS3Request request, IWebResponse response)
{
using (response)
{
ResponseParseUtilities.HandleStatusCode(response, (HttpStatusCode)200);
using (var stream = response.GetResponseStream())
{
return new CancelFormatTapeSpectraS3Response(
ModelParsers.ParseTape(
XmlExtensions.ReadDocument(stream).ElementOrThrow("Data"))
);
}
}
}
}
} | apache-2.0 |
odracci/tomighty | src/main/java/org/tomighty/ui/about/AboutDialog.java | 3585 | /*
* Copyright (c) 2010 Célio Cidral Junior
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.tomighty.ui.about;
import java.awt.BorderLayout;
import java.awt.Color;
import java.awt.Component;
import java.awt.FlowLayout;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import javax.swing.BorderFactory;
import javax.swing.JButton;
import javax.swing.JDialog;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextArea;
import javax.swing.border.Border;
import org.tomighty.i18n.Messages;
import org.tomighty.ioc.Initializable;
import org.tomighty.ioc.Inject;
import org.tomighty.resources.Images;
import org.tomighty.resources.Text;
@SuppressWarnings("serial")
public class AboutDialog extends JDialog implements Initializable {
private static final int MARGIN = 10;
@Inject private Messages messages;
@Inject private Text text;
@Inject private Images images;
private JPanel panel;
private JLabel title;
private JTextArea license;
private JButton closeButton;
public AboutDialog() {
createPanel();
configureDialog();
}
@Override
public void initialize() {
title.setText("Tomighty "+text.projectVersion());
license.setText(text.license());
closeButton.setText(messages.get("Close"));
setTitle(messages.get("About Tomighty"));
setIconImages(images.tomatoes());
pack();
setLocationRelativeTo(null);
}
public void showDialog() {
setVisible(true);
}
private void configureDialog() {
setAlwaysOnTop(true);
setContentPane(panel);
setResizable(false);
}
private void createPanel() {
panel = new JPanel(new BorderLayout(0, MARGIN));
panel.setBorder(emptyBorder());
panel.add(title(), BorderLayout.NORTH);
panel.add(text(), BorderLayout.CENTER);
panel.add(closeButton(), BorderLayout.SOUTH);
}
private Component title() {
title = new JLabel();
title.setHorizontalAlignment(JLabel.CENTER);
JLabel url = new JLabel("http://www.tomighty.org", JLabel.CENTER);
title.setFont(title.getFont().deriveFont(25f));
JPanel panel = new JPanel(new BorderLayout());
panel.add(title, BorderLayout.NORTH);
panel.add(url, BorderLayout.SOUTH);
return panel;
}
private Component text() {
license = new JTextArea();
license.setFont(getFont());
license.setBackground(getBackground());
license.setEditable(false);
license.setBorder(BorderFactory.createCompoundBorder(
BorderFactory.createLineBorder(Color.GRAY),
emptyBorder()));
return license;
}
private Component closeButton() {
closeButton = new JButton();
closeButton.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent e) {
setVisible(false);
}
});
JPanel panel = new JPanel(new FlowLayout());
panel.add(closeButton);
return panel;
}
private Border emptyBorder() {
return BorderFactory.createEmptyBorder(MARGIN, MARGIN, MARGIN, MARGIN);
}
}
| apache-2.0 |
grdryn/origin | pkg/security/admission/admission_test.go | 19692 | package admission
import (
"reflect"
"strings"
"testing"
kadmission "github.com/GoogleCloudPlatform/kubernetes/pkg/admission"
kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/auth/user"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
kscc "github.com/GoogleCloudPlatform/kubernetes/pkg/securitycontextconstraints"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
allocator "github.com/openshift/origin/pkg/security"
)
func NewTestAdmission(store cache.Store, kclient client.Interface) kadmission.Interface {
return &constraint{
Handler: kadmission.NewHandler(kadmission.Create),
client: kclient,
store: store,
}
}
func TestAdmit(t *testing.T) {
// create the annotated namespace and add it to the fake client
namespace := &kapi.Namespace{
ObjectMeta: kapi.ObjectMeta{
Name: "default",
Annotations: map[string]string{
allocator.UIDRangeAnnotation: "1/3",
allocator.MCSAnnotation: "s0:c1,c0",
},
},
}
serviceAccount := &kapi.ServiceAccount{
ObjectMeta: kapi.ObjectMeta{
Name: "default",
},
}
tc := testclient.NewSimpleFake(namespace, serviceAccount)
// create scc that requires allocation retrieval
saSCC := &kapi.SecurityContextConstraints{
ObjectMeta: kapi.ObjectMeta{
Name: "scc-sa",
},
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAsRange,
},
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyMustRunAs,
},
Groups: []string{"system:serviceaccounts"},
}
// create scc that has specific requirements that shouldn't match but is permissioned to
// service accounts to test exact matches
var exactUID int64 = 999
saExactSCC := &kapi.SecurityContextConstraints{
ObjectMeta: kapi.ObjectMeta{
Name: "scc-sa-exact",
},
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAs,
UID: &exactUID,
},
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyMustRunAs,
SELinuxOptions: &kapi.SELinuxOptions{
Level: "s9:z0,z1",
},
},
Groups: []string{"system:serviceaccounts"},
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
store.Add(saExactSCC)
store.Add(saSCC)
// create the admission plugin
p := NewTestAdmission(store, tc)
// setup test data
// goodPod is empty and should not be used directly for testing since we're providing
// two different SCCs. Since no values are specified it would be allowed to match either
// SCC when defaults are filled in.
goodPod := func() *kapi.Pod {
return &kapi.Pod{
Spec: kapi.PodSpec{
ServiceAccountName: "default",
Containers: []kapi.Container{
{
SecurityContext: &kapi.SecurityContext{},
},
},
},
}
}
uidNotInRange := goodPod()
var uid int64 = 1001
uidNotInRange.Spec.Containers[0].SecurityContext.RunAsUser = &uid
invalidMCSLabels := goodPod()
invalidMCSLabels.Spec.Containers[0].SecurityContext.SELinuxOptions = &kapi.SELinuxOptions{
Level: "s1:q0,q1",
}
disallowedPriv := goodPod()
var priv bool = true
disallowedPriv.Spec.Containers[0].SecurityContext.Privileged = &priv
// specifies a UID in the range of the preallocated UID annotation
specifyUIDInRange := goodPod()
var goodUID int64 = 3
specifyUIDInRange.Spec.Containers[0].SecurityContext.RunAsUser = &goodUID
// specifies an mcs label that matches the preallocated mcs annotation
specifyLabels := goodPod()
specifyLabels.Spec.Containers[0].SecurityContext.SELinuxOptions = &kapi.SELinuxOptions{
Level: "s0:c1,c0",
}
testCases := map[string]struct {
pod *kapi.Pod
shouldAdmit bool
expectedUID int64
expectedLevel string
expectedPriv bool
}{
"uidNotInRange": {
pod: uidNotInRange,
shouldAdmit: false,
},
"invalidMCSLabels": {
pod: invalidMCSLabels,
shouldAdmit: false,
},
"disallowedPriv": {
pod: disallowedPriv,
shouldAdmit: false,
},
"specifyUIDInRange": {
pod: specifyUIDInRange,
shouldAdmit: true,
expectedUID: *specifyUIDInRange.Spec.Containers[0].SecurityContext.RunAsUser,
expectedLevel: "s0:c1,c0",
},
"specifyLabels": {
pod: specifyLabels,
shouldAdmit: true,
expectedUID: 1,
expectedLevel: specifyLabels.Spec.Containers[0].SecurityContext.SELinuxOptions.Level,
},
}
for k, v := range testCases {
attrs := kadmission.NewAttributesRecord(v.pod, "Pod", "namespace", "", string(kapi.ResourcePods), "", kadmission.Create, &user.DefaultInfo{})
err := p.Admit(attrs)
if v.shouldAdmit && err != nil {
t.Errorf("%s expected no errors but received %v", k, err)
}
if !v.shouldAdmit && err == nil {
t.Errorf("%s expected errors but received none", k)
}
if v.shouldAdmit {
validatedSCC, ok := v.pod.Annotations[allocator.ValidatedSCCAnnotation]
if !ok {
t.Errorf("%s expected to find the validated annotation on the pod for the scc but found none", k)
}
if validatedSCC != saSCC.Name {
t.Errorf("%s should have validated against %s but found %s", k, saSCC.Name, validatedSCC)
}
if *v.pod.Spec.Containers[0].SecurityContext.RunAsUser != v.expectedUID {
t.Errorf("%s expected UID %d but found %d", k, v.expectedUID, *v.pod.Spec.Containers[0].SecurityContext.RunAsUser)
}
if v.pod.Spec.Containers[0].SecurityContext.SELinuxOptions.Level != v.expectedLevel {
t.Errorf("%s expected Level %s but found %s", k, v.expectedLevel, v.pod.Spec.Containers[0].SecurityContext.SELinuxOptions.Level)
}
}
}
// now add an escalated scc to the group and re-run the cases that expected failure, they should
// now pass by validating against the escalated scc.
adminSCC := &kapi.SecurityContextConstraints{
ObjectMeta: kapi.ObjectMeta{
Name: "scc-admin",
},
AllowPrivilegedContainer: true,
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyRunAsAny,
},
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyRunAsAny,
},
Groups: []string{"system:serviceaccounts"},
}
store.Add(adminSCC)
for k, v := range testCases {
if !v.shouldAdmit {
attrs := kadmission.NewAttributesRecord(v.pod, "Pod", "namespace", "", string(kapi.ResourcePods), "", kadmission.Create, &user.DefaultInfo{})
err := p.Admit(attrs)
if err != nil {
t.Errorf("Expected %s to pass with escalated scc but got error %v", k, err)
}
validatedSCC, ok := v.pod.Annotations[allocator.ValidatedSCCAnnotation]
if !ok {
t.Errorf("%s expected to find the validated annotation on the pod for the scc but found none", k)
}
if validatedSCC != adminSCC.Name {
t.Errorf("%s should have validated against %s but found %s", k, adminSCC.Name, validatedSCC)
}
}
}
}
func TestAssignSecurityContext(t *testing.T) {
// set up test data
// scc that will deny privileged container requests and has a default value for a field (uid)
var uid int64 = 9999
scc := &kapi.SecurityContextConstraints{
ObjectMeta: kapi.ObjectMeta{
Name: "test scc",
},
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyRunAsAny,
},
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAs,
UID: &uid,
},
}
provider, err := kscc.NewSimpleProvider(scc)
if err != nil {
t.Fatalf("failed to create provider: %v", err)
}
createContainer := func(priv bool) kapi.Container {
return kapi.Container{
SecurityContext: &kapi.SecurityContext{
Privileged: &priv,
},
}
}
// these are set up such that the containers always have a nil uid. If the case should not
// validate then the uids should not have been updated by the strategy. If the case should
// validate then uids should be set. This is ensuring that we're hanging on to the old SC
// as we generate/validate and only updating the original container if the entire pod validates
testCases := map[string]struct {
pod *kapi.Pod
shouldValidate bool
expectedUID *int64
}{
"container SC is not changed when invalid": {
pod: &kapi.Pod{
Spec: kapi.PodSpec{
Containers: []kapi.Container{createContainer(true)},
},
},
shouldValidate: false,
},
"must validate all containers": {
pod: &kapi.Pod{
Spec: kapi.PodSpec{
// good pod and bad pod
Containers: []kapi.Container{createContainer(false), createContainer(true)},
},
},
shouldValidate: false,
},
"pod validates": {
pod: &kapi.Pod{
Spec: kapi.PodSpec{
Containers: []kapi.Container{createContainer(false)},
},
},
shouldValidate: true,
},
}
for k, v := range testCases {
errs := assignSecurityContext(provider, v.pod)
if v.shouldValidate && len(errs) > 0 {
t.Errorf("%s expected to validate but received errors %v", k, errs)
continue
}
if !v.shouldValidate && len(errs) == 0 {
t.Errorf("%s expected validation errors but received none")
continue
}
// if we shouldn't have validated ensure that uid is not set on the containers
if !v.shouldValidate {
for _, c := range v.pod.Spec.Containers {
if c.SecurityContext.RunAsUser != nil {
t.Errorf("%s had non-nil UID %d. UID should not be set on test cases that dont' validate", k, *c.SecurityContext.RunAsUser)
}
}
}
// if we validated then the pod sc should be updated now with the defaults from the SCC
if v.shouldValidate {
for _, c := range v.pod.Spec.Containers {
if *c.SecurityContext.RunAsUser != uid {
t.Errorf("%s expected uid to be defaulted to %d but found %v", k, uid, c.SecurityContext.RunAsUser)
}
}
}
}
}
func TestCreateProvidersFromConstraints(t *testing.T) {
namespaceValid := &kapi.Namespace{
ObjectMeta: kapi.ObjectMeta{
Name: "default",
Annotations: map[string]string{
allocator.UIDRangeAnnotation: "1/3",
allocator.MCSAnnotation: "s0:c1,c0",
},
},
}
namespaceNoUID := &kapi.Namespace{
ObjectMeta: kapi.ObjectMeta{
Name: "default",
Annotations: map[string]string{
allocator.MCSAnnotation: "s0:c1,c0",
},
},
}
namespaceNoMCS := &kapi.Namespace{
ObjectMeta: kapi.ObjectMeta{
Name: "default",
Annotations: map[string]string{
allocator.UIDRangeAnnotation: "1/3",
},
},
}
testCases := map[string]struct {
// use a generating function so we can test for non-mutation
scc func() *kapi.SecurityContextConstraints
namespace *kapi.Namespace
expectedErr string
}{
"valid non-preallocated scc": {
scc: func() *kapi.SecurityContextConstraints {
return &kapi.SecurityContextConstraints{
ObjectMeta: kapi.ObjectMeta{
Name: "valid non-preallocated scc",
},
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyRunAsAny,
},
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyRunAsAny,
},
}
},
namespace: namespaceValid,
},
"valid pre-allocated scc": {
scc: func() *kapi.SecurityContextConstraints {
return &kapi.SecurityContextConstraints{
ObjectMeta: kapi.ObjectMeta{
Name: "valid pre-allocated scc",
},
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyMustRunAs,
SELinuxOptions: &kapi.SELinuxOptions{User: "myuser"},
},
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAsRange,
},
}
},
namespace: namespaceValid,
},
"pre-allocated no uid annotation": {
scc: func() *kapi.SecurityContextConstraints {
return &kapi.SecurityContextConstraints{
ObjectMeta: kapi.ObjectMeta{
Name: "pre-allocated no uid annotation",
},
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyMustRunAs,
},
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAsRange,
},
}
},
namespace: namespaceNoUID,
expectedErr: "unable to find pre-allocated uid annotation",
},
"pre-allocated no mcs annotation": {
scc: func() *kapi.SecurityContextConstraints {
return &kapi.SecurityContextConstraints{
ObjectMeta: kapi.ObjectMeta{
Name: "pre-allocated no mcs annotation",
},
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyMustRunAs,
},
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAsRange,
},
}
},
namespace: namespaceNoMCS,
expectedErr: "unable to find pre-allocated mcs annotation",
},
"bad scc strategy options": {
scc: func() *kapi.SecurityContextConstraints {
return &kapi.SecurityContextConstraints{
ObjectMeta: kapi.ObjectMeta{
Name: "bad scc user options",
},
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyRunAsAny,
},
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAs,
},
}
},
namespace: namespaceValid,
expectedErr: "MustRunAs requires a UID",
},
}
for k, v := range testCases {
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
// create the admission handler
tc := testclient.NewSimpleFake(v.namespace)
admit := &constraint{
Handler: kadmission.NewHandler(kadmission.Create),
client: tc,
store: store,
}
scc := v.scc()
// create the providers, this method only needs the namespace
attributes := kadmission.NewAttributesRecord(nil, "", v.namespace.Name, "", "", "", kadmission.Create, nil)
_, errs := admit.createProvidersFromConstraints(attributes.GetNamespace(), []*kapi.SecurityContextConstraints{scc})
if !reflect.DeepEqual(scc, v.scc()) {
diff := util.ObjectDiff(scc, v.scc())
t.Errorf("%s createProvidersFromConstraints mutated constraints. diff:\n%s", k, diff)
}
if len(v.expectedErr) > 0 && len(errs) != 1 {
t.Errorf("%s expected a single error '%s' but received %v", k, errs)
continue
}
if len(v.expectedErr) == 0 && len(errs) != 0 {
t.Errorf("%s did not expect an error but received %v", k, errs)
continue
}
// check that we got the error we expected
if len(v.expectedErr) > 0 {
if !strings.Contains(errs[0].Error(), v.expectedErr) {
t.Errorf("%s expected error '%s' but received %v", k, v.expectedErr, errs[0])
}
}
}
}
func TestMatchingSecurityContextConstraints(t *testing.T) {
sccs := []*kapi.SecurityContextConstraints{
{
ObjectMeta: kapi.ObjectMeta{
Name: "match group",
},
Groups: []string{"group"},
},
{
ObjectMeta: kapi.ObjectMeta{
Name: "match user",
},
Users: []string{"user"},
},
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
for _, v := range sccs {
store.Add(v)
}
// single match cases
testCases := map[string]struct {
userInfo user.Info
expectedSCC string
}{
"find none": {
userInfo: &user.DefaultInfo{
Name: "foo",
Groups: []string{"bar"},
},
},
"find user": {
userInfo: &user.DefaultInfo{
Name: "user",
Groups: []string{"bar"},
},
expectedSCC: "match user",
},
"find group": {
userInfo: &user.DefaultInfo{
Name: "foo",
Groups: []string{"group"},
},
expectedSCC: "match group",
},
}
for k, v := range testCases {
sccs, err := getMatchingSecurityContextConstraints(store, v.userInfo)
if err != nil {
t.Errorf("%s received error %v", k, err)
continue
}
if v.expectedSCC == "" {
if len(sccs) > 0 {
t.Errorf("%s expected to match 0 sccs but found %d: %#v", k, len(sccs), sccs)
}
}
if v.expectedSCC != "" {
if len(sccs) != 1 {
t.Errorf("%s returned more than one scc, use case can not validate: %#v", k, sccs)
continue
}
if v.expectedSCC != sccs[0].Name {
t.Errorf("%s expected to match %s but found %s", k, v.expectedSCC, sccs[0].Name)
}
}
}
// check that we can match many at once
userInfo := &user.DefaultInfo{
Name: "user",
Groups: []string{"group"},
}
sccs, err := getMatchingSecurityContextConstraints(store, userInfo)
if err != nil {
t.Fatalf("matching many sccs returned error %v", err)
}
if len(sccs) != 2 {
t.Errorf("matching many sccs expected to match 2 sccs but found %d: %#v", len(sccs), sccs)
}
}
func TestRequiresPreAllocatedUIDRange(t *testing.T) {
var uid int64 = 1
testCases := map[string]struct {
scc *kapi.SecurityContextConstraints
requires bool
}{
"must run as": {
scc: &kapi.SecurityContextConstraints{
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAs,
},
},
},
"run as any": {
scc: &kapi.SecurityContextConstraints{
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyRunAsAny,
},
},
},
"run as non-root": {
scc: &kapi.SecurityContextConstraints{
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAsNonRoot,
},
},
},
"run as range": {
scc: &kapi.SecurityContextConstraints{
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAsRange,
},
},
requires: true,
},
"run as range with specified params": {
scc: &kapi.SecurityContextConstraints{
RunAsUser: kapi.RunAsUserStrategyOptions{
Type: kapi.RunAsUserStrategyMustRunAsRange,
UIDRangeMin: &uid,
UIDRangeMax: &uid,
},
},
},
}
for k, v := range testCases {
result := requiresPreAllocatedUIDRange(v.scc)
if result != v.requires {
t.Errorf("%s expected result %t but got %t", k, v.requires, result)
}
}
}
func TestRequiresPreAllocatedSELinuxLevel(t *testing.T) {
testCases := map[string]struct {
scc *kapi.SecurityContextConstraints
requires bool
}{
"must run as": {
scc: &kapi.SecurityContextConstraints{
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyMustRunAs,
},
},
requires: true,
},
"must with level specified": {
scc: &kapi.SecurityContextConstraints{
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyMustRunAs,
SELinuxOptions: &kapi.SELinuxOptions{
Level: "foo",
},
},
},
},
"run as any": {
scc: &kapi.SecurityContextConstraints{
SELinuxContext: kapi.SELinuxContextStrategyOptions{
Type: kapi.SELinuxStrategyRunAsAny,
},
},
},
}
for k, v := range testCases {
result := requiresPreAllocatedSELinuxLevel(v.scc)
if result != v.requires {
t.Errorf("%s expected result %t but got %t", k, v.requires, result)
}
}
}
func TestDeduplicateSecurityContextConstraints(t *testing.T) {
duped := []*kapi.SecurityContextConstraints{
{ObjectMeta: kapi.ObjectMeta{Name: "a"}},
{ObjectMeta: kapi.ObjectMeta{Name: "a"}},
{ObjectMeta: kapi.ObjectMeta{Name: "b"}},
{ObjectMeta: kapi.ObjectMeta{Name: "b"}},
{ObjectMeta: kapi.ObjectMeta{Name: "c"}},
{ObjectMeta: kapi.ObjectMeta{Name: "d"}},
{ObjectMeta: kapi.ObjectMeta{Name: "e"}},
{ObjectMeta: kapi.ObjectMeta{Name: "e"}},
}
deduped := deduplicateSecurityContextConstraints(duped)
if len(deduped) != 5 {
t.Fatalf("expected to have 5 remaining sccs but found %d: %v", len(deduped), deduped)
}
constraintCounts := map[string]int{}
for _, scc := range deduped {
if _, ok := constraintCounts[scc.Name]; !ok {
constraintCounts[scc.Name] = 0
}
constraintCounts[scc.Name] = constraintCounts[scc.Name] + 1
}
for k, v := range constraintCounts {
if v > 1 {
t.Errorf("%s was found %d times after de-duping", k, v)
}
}
}
| apache-2.0 |
nestor-by/akka-persistence-eventsourcing | src/main/scala/io/scalac/seed/route/UserRoute.scala | 1292 | package io.scalac.seed.route
import akka.actor._
import io.scalac.seed.domain.UserAggregate
import io.scalac.seed.service._
import spray.httpx.Json4sSupport
import spray.routing._
import spray.routing.authentication.BasicAuth
object UserRoute {
case class ChangePasswordRequest(pass: String)
}
trait UserRoute extends HttpService with Json4sSupport with RequestHandlerCreator with UserAuthenticator {
import UserAggregateManager._
val userAggregateManager: ActorRef
val userRoute =
pathPrefix("user") {
pathEndOrSingleSlash {
post {
entity(as[RegisterUser]) { cmd =>
serveRegister(cmd)
}
}
} ~
path("password") {
post {
authenticate(BasicAuth(userAuthenticator _, realm = "secure site")) { user =>
entity(as[UserRoute.ChangePasswordRequest]) { cmd =>
serveUpdate(ChangeUserPassword(user.id, cmd.pass))
}
}
}
}
}
private def serveRegister(message : AggregateManager.Command): Route =
ctx => handleRegister[UserAggregate.User](ctx, userAggregateManager, message)
private def serveUpdate(message : AggregateManager.Command): Route =
ctx => handleUpdate[UserAggregate.User](ctx, userAggregateManager, message)
} | apache-2.0 |
icexelloss/arrow | js/src/factories.ts | 12287 | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
import { Field } from './schema.js';
import * as dtypes from './type.js';
import { Data, DataProps } from './data.js';
import { BuilderType } from './interfaces.js';
import { Vector, makeVector } from './vector.js';
import { Builder, BuilderOptions } from './builder.js';
import { instance as getBuilderConstructor } from './visitor/builderctor.js';
import { ArrayDataType, BigIntArray, JavaScriptArrayDataType, TypedArray, TypedArrayDataType } from './interfaces.js';
export function makeBuilder<T extends dtypes.DataType = any, TNull = any>(options: BuilderOptions<T, TNull>): BuilderType<T, TNull> {
const type = options.type;
const builder = new (getBuilderConstructor.getVisitFn<T>(type)())(options) as Builder<T, TNull>;
if (type.children && type.children.length > 0) {
const children = options['children'] || [] as BuilderOptions[];
const defaultOptions = { 'nullValues': options['nullValues'] };
const getChildOptions = Array.isArray(children)
? ((_: Field, i: number) => children[i] || defaultOptions)
: (({ name }: Field) => children[name] || defaultOptions);
for (const [index, field] of type.children.entries()) {
const { type } = field;
const opts = getChildOptions(field, index);
builder.children.push(makeBuilder({ ...opts, type }));
}
}
return builder as BuilderType<T, TNull>;
}
/**
* Creates a Vector from a JavaScript array via a {@link Builder}.
* Use {@link makeVector} if you only want to create a vector from a typed array.
*
* @example
* ```ts
* const vf64 = vectorFromArray([1, 2, 3]);
* const vi8 = vectorFromArray([1, 2, 3], new Int8);
* const vdict = vectorFromArray(['foo', 'bar']);
* ```
*/
export function vectorFromArray(values: readonly (null | undefined)[], type?: dtypes.Null): Vector<dtypes.Null>;
export function vectorFromArray(values: readonly (null | undefined | boolean)[], type?: dtypes.Bool): Vector<dtypes.Bool>;
export function vectorFromArray<T extends dtypes.Utf8 | dtypes.Dictionary<dtypes.Utf8> = dtypes.Dictionary<dtypes.Utf8, dtypes.Int32>>(values: readonly (null | undefined | string)[], type?: T): Vector<T>;
export function vectorFromArray<T extends dtypes.Date_>(values: readonly (null | undefined | Date)[], type?: T): Vector<T>;
export function vectorFromArray<T extends dtypes.Int>(values: readonly (null | undefined | number)[], type: T): Vector<T>;
export function vectorFromArray<T extends dtypes.Int64 | dtypes.Uint64 = dtypes.Int64>(values: readonly (null | undefined | bigint)[], type?: T): Vector<T>;
export function vectorFromArray<T extends dtypes.Float = dtypes.Float64>(values: readonly (null | undefined | number)[], type?: T): Vector<T>;
export function vectorFromArray<T extends dtypes.DataType>(values: readonly (unknown)[], type: T): Vector<T>;
export function vectorFromArray<T extends readonly unknown[]>(values: T): Vector<JavaScriptArrayDataType<T>>;
/** Creates a Vector from a typed array via {@link makeVector}. */
export function vectorFromArray<T extends TypedArray | BigIntArray>(data: T): Vector<TypedArrayDataType<T>>;
export function vectorFromArray<T extends dtypes.DataType>(data: Data<T>): Vector<T>;
export function vectorFromArray<T extends dtypes.DataType>(data: Vector<T>): Vector<T>;
export function vectorFromArray<T extends dtypes.DataType>(data: DataProps<T>): Vector<T>;
export function vectorFromArray<T extends TypedArray | BigIntArray | readonly unknown[]>(data: T): Vector<ArrayDataType<T>>;
export function vectorFromArray(init: any, type?: dtypes.DataType) {
if (init instanceof Data || init instanceof Vector || init.type instanceof dtypes.DataType || ArrayBuffer.isView(init)) {
return makeVector(init as any);
}
const options: IterableBuilderOptions = { type: type ?? inferType(init), nullValues: [null] };
const chunks = [...builderThroughIterable(options)(init)];
const vector = chunks.length === 1 ? chunks[0] : chunks.reduce((a, b) => a.concat(b));
if (dtypes.DataType.isDictionary(vector.type)) {
return vector.memoize();
}
return vector;
}
/** @ignore */
function inferType(value: readonly unknown[]): dtypes.DataType {
if (value.length === 0) { return new dtypes.Null; }
let nullsCount = 0;
// @ts-ignore
let arraysCount = 0;
// @ts-ignore
let objectsCount = 0;
let numbersCount = 0;
let stringsCount = 0;
let bigintsCount = 0;
let booleansCount = 0;
let datesCount = 0;
for (const val of value) {
if (val == null) { ++nullsCount; continue; }
switch (typeof val) {
case 'bigint': ++bigintsCount; continue;
case 'boolean': ++booleansCount; continue;
case 'number': ++numbersCount; continue;
case 'string': ++stringsCount; continue;
case 'object':
if (Array.isArray(val)) {
++arraysCount;
} else if (Object.prototype.toString.call(val) === '[object Date]') {
++datesCount;
} else {
++objectsCount;
}
continue;
}
throw new TypeError('Unable to infer Vector type from input values, explicit type declaration expected');
}
if (numbersCount + nullsCount === value.length) {
return new dtypes.Float64;
} else if (stringsCount + nullsCount === value.length) {
return new dtypes.Dictionary(new dtypes.Utf8, new dtypes.Int32);
} else if (bigintsCount + nullsCount === value.length) {
return new dtypes.Int64;
} else if (booleansCount + nullsCount === value.length) {
return new dtypes.Bool;
} else if (datesCount + nullsCount === value.length) {
return new dtypes.DateMillisecond;
}
// TODO: add more types to infererence
throw new TypeError('Unable to infer Vector type from input values, explicit type declaration expected');
}
/**
* A set of options to create an Iterable or AsyncIterable `Builder` transform function.
* @see {@link builderThroughIterable}
* @see {@link builderThroughAsyncIterable}
*/
export interface IterableBuilderOptions<T extends dtypes.DataType = any, TNull = any> extends BuilderOptions<T, TNull> {
highWaterMark?: number;
queueingStrategy?: 'bytes' | 'count';
dictionaryHashFunction?: (value: any) => string | number;
valueToChildTypeId?: (builder: Builder<T, TNull>, value: any, offset: number) => number;
}
/** @ignore */
type ThroughIterable<T extends dtypes.DataType = any, TNull = any> = (source: Iterable<T['TValue'] | TNull>) => IterableIterator<Vector<T>>;
/**
* Transform a synchronous `Iterable` of arbitrary JavaScript values into a
* sequence of Arrow Vector<T> following the chunking semantics defined in
* the supplied `options` argument.
*
* This function returns a function that accepts an `Iterable` of values to
* transform. When called, this function returns an Iterator of `Vector<T>`.
*
* The resulting `Iterator<Vector<T>>` yields Vectors based on the
* `queueingStrategy` and `highWaterMark` specified in the `options` argument.
*
* * If `queueingStrategy` is `"count"` (or omitted), The `Iterator<Vector<T>>`
* will flush the underlying `Builder` (and yield a new `Vector<T>`) once the
* Builder's `length` reaches or exceeds the supplied `highWaterMark`.
* * If `queueingStrategy` is `"bytes"`, the `Iterator<Vector<T>>` will flush
* the underlying `Builder` (and yield a new `Vector<T>`) once its `byteLength`
* reaches or exceeds the supplied `highWaterMark`.
*
* @param {IterableBuilderOptions<T, TNull>} options An object of properties which determine the `Builder` to create and the chunking semantics to use.
* @returns A function which accepts a JavaScript `Iterable` of values to
* write, and returns an `Iterator` that yields Vectors according
* to the chunking semantics defined in the `options` argument.
* @nocollapse
*/
export function builderThroughIterable<T extends dtypes.DataType = any, TNull = any>(options: IterableBuilderOptions<T, TNull>) {
const { ['queueingStrategy']: queueingStrategy = 'count' } = options;
const { ['highWaterMark']: highWaterMark = queueingStrategy !== 'bytes' ? Number.POSITIVE_INFINITY : 2 ** 14 } = options;
const sizeProperty: 'length' | 'byteLength' = queueingStrategy !== 'bytes' ? 'length' : 'byteLength';
return function* (source: Iterable<T['TValue'] | TNull>) {
let numChunks = 0;
const builder = makeBuilder(options);
for (const value of source) {
if (builder.append(value)[sizeProperty] >= highWaterMark) {
++numChunks && (yield builder.toVector());
}
}
if (builder.finish().length > 0 || numChunks === 0) {
yield builder.toVector();
}
} as ThroughIterable<T, TNull>;
}
/** @ignore */
type ThroughAsyncIterable<T extends dtypes.DataType = any, TNull = any> = (source: Iterable<T['TValue'] | TNull> | AsyncIterable<T['TValue'] | TNull>) => AsyncIterableIterator<Vector<T>>;
/**
* Transform an `AsyncIterable` of arbitrary JavaScript values into a
* sequence of Arrow Vector<T> following the chunking semantics defined in
* the supplied `options` argument.
*
* This function returns a function that accepts an `AsyncIterable` of values to
* transform. When called, this function returns an AsyncIterator of `Vector<T>`.
*
* The resulting `AsyncIterator<Vector<T>>` yields Vectors based on the
* `queueingStrategy` and `highWaterMark` specified in the `options` argument.
*
* * If `queueingStrategy` is `"count"` (or omitted), The `AsyncIterator<Vector<T>>`
* will flush the underlying `Builder` (and yield a new `Vector<T>`) once the
* Builder's `length` reaches or exceeds the supplied `highWaterMark`.
* * If `queueingStrategy` is `"bytes"`, the `AsyncIterator<Vector<T>>` will flush
* the underlying `Builder` (and yield a new `Vector<T>`) once its `byteLength`
* reaches or exceeds the supplied `highWaterMark`.
*
* @param {IterableBuilderOptions<T, TNull>} options An object of properties which determine the `Builder` to create and the chunking semantics to use.
* @returns A function which accepts a JavaScript `AsyncIterable` of values
* to write, and returns an `AsyncIterator` that yields Vectors
* according to the chunking semantics defined in the `options`
* argument.
* @nocollapse
*/
export function builderThroughAsyncIterable<T extends dtypes.DataType = any, TNull = any>(options: IterableBuilderOptions<T, TNull>) {
const { ['queueingStrategy']: queueingStrategy = 'count' } = options;
const { ['highWaterMark']: highWaterMark = queueingStrategy !== 'bytes' ? Number.POSITIVE_INFINITY : 2 ** 14 } = options;
const sizeProperty: 'length' | 'byteLength' = queueingStrategy !== 'bytes' ? 'length' : 'byteLength';
return async function* (source: Iterable<T['TValue'] | TNull> | AsyncIterable<T['TValue'] | TNull>) {
let numChunks = 0;
const builder = makeBuilder(options);
for await (const value of source) {
if (builder.append(value)[sizeProperty] >= highWaterMark) {
++numChunks && (yield builder.toVector());
}
}
if (builder.finish().length > 0 || numChunks === 0) {
yield builder.toVector();
}
} as ThroughAsyncIterable<T, TNull>;
}
| apache-2.0 |
Medusar/rocketmq-commet | rocketmq-broker/src/main/java/com/alibaba/rocketmq/broker/offset/ConsumerOffsetManager.java | 9141 | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.rocketmq.broker.offset;
import com.alibaba.rocketmq.broker.BrokerController;
import com.alibaba.rocketmq.broker.BrokerPathConfigHelper;
import com.alibaba.rocketmq.common.ConfigManager;
import com.alibaba.rocketmq.common.UtilAll;
import com.alibaba.rocketmq.common.constant.LoggerName;
import com.alibaba.rocketmq.remoting.protocol.RemotingSerializable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
/**
*
* @author shijia.wxr
*/
public class ConsumerOffsetManager extends ConfigManager {
private static final Logger log = LoggerFactory.getLogger(LoggerName.BrokerLoggerName);
private static final String TOPIC_GROUP_SEPARATOR = "@";
private ConcurrentHashMap<String/* topic@group */, ConcurrentHashMap<Integer, Long>> offsetTable =
new ConcurrentHashMap<String, ConcurrentHashMap<Integer, Long>>(512);
private transient BrokerController brokerController;
public ConsumerOffsetManager() {
}
public ConsumerOffsetManager(BrokerController brokerController) {
this.brokerController = brokerController;
}
public void scanUnsubscribedTopic() {
Iterator<Entry<String, ConcurrentHashMap<Integer, Long>>> it = this.offsetTable.entrySet().iterator();
while (it.hasNext()) {
Entry<String, ConcurrentHashMap<Integer, Long>> next = it.next();
String topicAtGroup = next.getKey();
String[] arrays = topicAtGroup.split(TOPIC_GROUP_SEPARATOR);
if (arrays != null && arrays.length == 2) {
String topic = arrays[0];
String group = arrays[1];
if (null == brokerController.getConsumerManager().findSubscriptionData(group, topic)
&& this.offsetBehindMuchThanData(topic, next.getValue())) {
it.remove();
log.warn("remove topic offset, {}", topicAtGroup);
}
}
}
}
private boolean offsetBehindMuchThanData(final String topic, ConcurrentHashMap<Integer, Long> table) {
Iterator<Entry<Integer, Long>> it = table.entrySet().iterator();
boolean result = !table.isEmpty();
while (it.hasNext() && result) {
Entry<Integer, Long> next = it.next();
long minOffsetInStore = this.brokerController.getMessageStore().getMinOffsetInQuque(topic, next.getKey());
long offsetInPersist = next.getValue();
if (offsetInPersist > minOffsetInStore) {
result = false;
}
else {
result = true;
}
}
return result;
}
public Set<String> whichTopicByConsumer(final String group) {
Set<String> topics = new HashSet<String>();
Iterator<Entry<String, ConcurrentHashMap<Integer, Long>>> it = this.offsetTable.entrySet().iterator();
while (it.hasNext()) {
Entry<String, ConcurrentHashMap<Integer, Long>> next = it.next();
String topicAtGroup = next.getKey();
String[] arrays = topicAtGroup.split(TOPIC_GROUP_SEPARATOR);
if (arrays != null && arrays.length == 2) {
if (group.equals(arrays[1])) {
topics.add(arrays[0]);
}
}
}
return topics;
}
public Set<String> whichGroupByTopic(final String topic) {
Set<String> groups = new HashSet<String>();
Iterator<Entry<String, ConcurrentHashMap<Integer, Long>>> it = this.offsetTable.entrySet().iterator();
while (it.hasNext()) {
Entry<String, ConcurrentHashMap<Integer, Long>> next = it.next();
String topicAtGroup = next.getKey();
String[] arrays = topicAtGroup.split(TOPIC_GROUP_SEPARATOR);
if (arrays != null && arrays.length == 2) {
if (topic.equals(arrays[0])) {
groups.add(arrays[1]);
}
}
}
return groups;
}
public void commitOffset(final String group, final String topic, final int queueId, final long offset) {
// topic@group
String key = topic + TOPIC_GROUP_SEPARATOR + group;
this.commitOffset(key, queueId, offset);
}
public long queryOffset(final String group, final String topic, final int queueId) {
// topic@group
String key = topic + TOPIC_GROUP_SEPARATOR + group;
ConcurrentHashMap<Integer, Long> map = this.offsetTable.get(key);
if (null != map) {
Long offset = map.get(queueId);
if (offset != null)
return offset;
}
return -1;
}
private void commitOffset(final String key, final int queueId, final long offset) {
ConcurrentHashMap<Integer, Long> map = this.offsetTable.get(key);
if (null == map) {
map = new ConcurrentHashMap<Integer, Long>(32);
map.put(queueId, offset);
this.offsetTable.put(key, map);
}
else {
map.put(queueId, offset);
}
}
public String encode() {
return this.encode(false);
}
public String encode(final boolean prettyFormat) {
return RemotingSerializable.toJson(this, prettyFormat);
}
@Override
public void decode(String jsonString) {
if (jsonString != null) {
ConsumerOffsetManager obj = RemotingSerializable.fromJson(jsonString, ConsumerOffsetManager.class);
if (obj != null) {
this.offsetTable = obj.offsetTable;
}
}
}
@Override
public String configFilePath() {
return BrokerPathConfigHelper.getConsumerOffsetPath(this.brokerController.getMessageStoreConfig().getStorePathRootDir());
}
public ConcurrentHashMap<String, ConcurrentHashMap<Integer, Long>> getOffsetTable() {
return offsetTable;
}
public void setOffsetTable(ConcurrentHashMap<String, ConcurrentHashMap<Integer, Long>> offsetTable) {
this.offsetTable = offsetTable;
}
public Map<Integer, Long> queryMinOffsetInAllGroup(final String topic, final String filterGroups) {
Map<Integer, Long> queueMinOffset = new HashMap<Integer, Long>();
Set<String> topicGroups = this.offsetTable.keySet();
if (!UtilAll.isBlank(filterGroups)) {
for (String group : filterGroups.split(",")) {
Iterator<String> it = topicGroups.iterator();
while (it.hasNext()) {
if (group.equals(it.next().split(TOPIC_GROUP_SEPARATOR)[1])) {
it.remove();
}
}
}
}
for (String topicGroup : topicGroups) {
String[] topicGroupArr = topicGroup.split(TOPIC_GROUP_SEPARATOR);
if (topic.equals(topicGroupArr[0])) {
for (Entry<Integer, Long> entry : this.offsetTable.get(topicGroup).entrySet()) {
long minOffset = this.brokerController.getMessageStore().getMinOffsetInQuque(topic, entry.getKey());
if (entry.getValue() >= minOffset) {
Long offset = queueMinOffset.get(entry.getKey());
if (offset == null) {
queueMinOffset.put(entry.getKey(), Math.min(Long.MAX_VALUE, entry.getValue()));
}
else {
queueMinOffset.put(entry.getKey(), Math.min(entry.getValue(), offset));
}
}
}
}
}
return queueMinOffset;
}
public Map<Integer, Long> queryOffset(final String group, final String topic) {
// topic@group
String key = topic + TOPIC_GROUP_SEPARATOR + group;
return this.offsetTable.get(key);
}
public void cloneOffset(final String srcGroup, final String destGroup, final String topic) {
ConcurrentHashMap<Integer, Long> offsets = this.offsetTable.get(topic + TOPIC_GROUP_SEPARATOR + srcGroup);
if (offsets != null) {
this.offsetTable.put(topic + TOPIC_GROUP_SEPARATOR + destGroup, offsets);
}
}
}
| apache-2.0 |
electrum/drift | drift-transport-apache/src/test/java/io/airlift/drift/transport/apache/scribe/apache/ResultCode.java | 1442 | /*
* Copyright (C) 2012 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.airlift.drift.transport.apache.scribe.apache;
import org.apache.thrift.TEnum;
public enum ResultCode
implements TEnum
{
OK(0),
TRY_LATER(1);
private final int value;
ResultCode(int value)
{
this.value = value;
}
/**
* Get the integer value of this enum value, as defined in the Thrift IDL.
*/
@Override
public int getValue()
{
return value;
}
/**
* Find a the enum type by its integer value, as defined in the Thrift IDL.
*
* @return null if the value is not found.
*/
public static ResultCode findByValue(int value)
{
switch (value) {
case 0:
return OK;
case 1:
return TRY_LATER;
default:
return null;
}
}
}
| apache-2.0 |
nathanchen/netty-netty-5.0.0.Alpha1 | example/src/main/java/io/netty/example/socksproxy/SocksServerConnectHandler.java | 4277 | /*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.example.socksproxy;
import io.netty.bootstrap.Bootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOption;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.codec.socks.SocksCmdRequest;
import io.netty.handler.codec.socks.SocksCmdResponse;
import io.netty.handler.codec.socks.SocksCmdStatus;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.GenericFutureListener;
import io.netty.util.concurrent.Promise;
@ChannelHandler.Sharable
public final class SocksServerConnectHandler extends SimpleChannelInboundHandler<SocksCmdRequest> {
private static final String name = "SOCKS_SERVER_CONNECT_HANDLER";
public static String getName() {
return name;
}
private final Bootstrap b = new Bootstrap();
@Override
public void messageReceived(final ChannelHandlerContext ctx, final SocksCmdRequest request) throws Exception {
Promise<Channel> promise = ctx.executor().newPromise();
promise.addListener(
new GenericFutureListener<Future<Channel>>() {
@Override
public void operationComplete(final Future<Channel> future) throws Exception {
final Channel outboundChannel = future.getNow();
if (future.isSuccess()) {
ctx.channel().writeAndFlush(new SocksCmdResponse(SocksCmdStatus.SUCCESS, request.addressType()))
.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture channelFuture) throws Exception {
ctx.pipeline().remove(getName());
outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
ctx.channel().pipeline().addLast(new RelayHandler(outboundChannel));
}
});
} else {
ctx.channel().writeAndFlush(new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));
SocksServerUtils.closeOnFlush(ctx.channel());
}
}
});
final Channel inboundChannel = ctx.channel();
b.group(inboundChannel.eventLoop())
.channel(NioSocketChannel.class)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000)
.option(ChannelOption.SO_KEEPALIVE, true)
.handler(new DirectClientInitializer(promise));
b.connect(request.host(), request.port()).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
// Connection established use handler provided results
} else {
// Close the connection if the connection attempt has failed.
ctx.channel().writeAndFlush(
new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));
SocksServerUtils.closeOnFlush(ctx.channel());
}
}
});
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
SocksServerUtils.closeOnFlush(ctx.channel());
}
}
| apache-2.0 |
aws/aws-sdk-cpp | aws-cpp-sdk-cognito-idp/source/model/UpdateUserPoolClientResult.cpp | 1027 | /**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/cognito-idp/model/UpdateUserPoolClientResult.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/UnreferencedParam.h>
#include <utility>
using namespace Aws::CognitoIdentityProvider::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
using namespace Aws;
UpdateUserPoolClientResult::UpdateUserPoolClientResult()
{
}
UpdateUserPoolClientResult::UpdateUserPoolClientResult(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
*this = result;
}
UpdateUserPoolClientResult& UpdateUserPoolClientResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
JsonView jsonValue = result.GetPayload().View();
if(jsonValue.ValueExists("UserPoolClient"))
{
m_userPoolClient = jsonValue.GetObject("UserPoolClient");
}
return *this;
}
| apache-2.0 |
vmlinz/ruby-china-android | src/org/rubychina/android/fragment/UserRecentlyCreatedTopicListFragment.java | 2071 | /*Copyright (C) 2012 Longerian (http://www.longerian.me)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.*/
package org.rubychina.android.fragment;
import org.rubychina.android.api.request.UserRecentlyCreatedTopicsRequest;
import org.rubychina.android.type.User;
import android.os.Bundle;
public class UserRecentlyCreatedTopicListFragment extends
UserRelativeTopicListFragment {
private UserRecentlyCreatedTopicsRequest request;
public static UserRecentlyCreatedTopicListFragment newInstance(User user) {
UserRecentlyCreatedTopicListFragment f = new UserRecentlyCreatedTopicListFragment();
Bundle bundle = new Bundle();
bundle.putParcelable(USER, user);
f.setArguments(bundle);
return f;
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
startTopicsRequest(user);
isActive = true;
}
@Override
public void onDestroyView() {
super.onDestroyView();
isActive = false;
cancelTopicsRequest();
}
public void startTopicsRequest(User user) {
if(request == null) {
request = new UserRecentlyCreatedTopicsRequest();
}
request.setLogin(user.getLogin());
request.setSize(rubyChina.getApp().getPageSize());
rubyChina.getClient().request(request, new UserRelativeTopicsCallback());
rubyChina.showIndeterminateProgressBar();
}
private void cancelTopicsRequest() {
if(request != null) {
rubyChina.getClient().cancel(request);
rubyChina.hideIndeterminateProgressBar();
}
}
}
| apache-2.0 |
ccri/geomesa | geomesa-features/geomesa-feature-kryo/src/main/scala/org/locationtech/geomesa/features/kryo/json/JsonPathParser.scala | 5359 | /***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.kryo.json
import org.locationtech.geomesa.features.kryo.json.JsonPathParser.JsonPathFunction.JsonPathFunction
import org.locationtech.geomesa.features.kryo.json.JsonPathParser._
import org.locationtech.geomesa.utils.text.BasicParser
import org.parboiled.Context
import org.parboiled.errors.{ErrorUtils, ParsingException}
import org.parboiled.scala._
/**
* Parses a json path string into a sequence of selectors. See https://github.com/jayway/JsonPath for examples.
*
* Does not support filter predicates.
*/
object JsonPathParser {
private val Parser = new JsonPathParser()
@throws(classOf[ParsingException])
def parse(path: String, report: Boolean = true): Seq[PathElement] = {
if (path == null) {
throw new IllegalArgumentException("Path must not be null")
}
val runner = if (report) { ReportingParseRunner(Parser.Path) } else { BasicParseRunner(Parser.Path) }
val fixedPath = if (path.startsWith("$")) { path } else s"$$.$path"
val parsing = runner.run(fixedPath)
parsing.result.getOrElse {
throw new ParsingException(s"Invalid json path: ${ErrorUtils.printParseErrors(parsing)}")
}
}
def print(path: Seq[PathElement], dollar: Boolean = true): String = {
require(path.nonEmpty, "Path must be non-empty")
if (dollar) {
path.mkString("$", "", "")
} else {
val string = path.mkString
if (string.charAt(0) == '.') {
// trim off leading self-selector to correspond to dot notation selection
string.substring(1)
} else {
string
}
}
}
sealed trait PathElement
// attribute: .foo or ['foo']
case class PathAttribute(name: String, bracketed: Boolean = false) extends PathElement {
override def toString: String = if (bracketed) { s"['$name']" } else { s".$name" }
}
// enumerated index: [1]
case class PathIndex(index: Int) extends PathElement {
override def toString: String = s"[$index]"
}
// enumerated indices: [1,2,5]
case class PathIndices(indices: Seq[Int]) extends PathElement {
override def toString: String = indices.mkString("[", ",", "]")
}
// any attribute: .*
case object PathAttributeWildCard extends PathElement {
override val toString: String = ".*"
}
// any index: [*]
case object PathIndexWildCard extends PathElement {
override val toString: String = "[*]"
}
// deep scan: ..
case object PathDeepScan extends PathElement {
override val toString: String = ".."
}
// path function: .min(), .max(), .avg(), .length()
// not implemented: stddev
case class PathFunction(function: JsonPathFunction) extends PathElement {
override def toString: String = s".$function()"
}
object JsonPathFunction extends Enumeration {
type JsonPathFunction = Value
val min, max, avg, length = Value
}
}
private class JsonPathParser extends BasicParser {
// main parsing rule
def Path: Rule1[Seq[PathElement]] =
rule { "$" ~ zeroOrMore(Element) ~ optional(Function) ~~> ((e, f) => e ++ f.toSeq) ~ EOI }
def Element: Rule1[PathElement] = rule {
Attribute | ArrayIndex | ArrayIndices | ArrayIndexRange | BracketedAttribute | AttributeWildCard | IndexWildCard | DeepScan
}
def IndexWildCard: Rule1[PathElement] = rule { "[*]" ~ push(PathIndexWildCard) }
def AttributeWildCard: Rule1[PathElement] = rule { ".*" ~ push(PathAttributeWildCard) }
// we have to push the deep scan directly onto the stack as there is no forward matching and
// it's ridiculous trying to combine Rule1's and Rule2's
def DeepScan: Rule1[PathElement] = rule { "." ~ toRunAction(pushDeepScan) ~ (Attribute | BracketedAttribute | AttributeWildCard) }
// note: this assumes that we are inside a zeroOrMore, which is currently the case
// the zeroOrMore will have pushed a single list onto the value stack - we append our value to that
private def pushDeepScan(context: Context[Any]): Unit =
context.getValueStack.push(PathDeepScan :: context.getValueStack.pop.asInstanceOf[List[_]])
def ArrayIndex: Rule1[PathIndex] = rule { "[" ~ int ~ "]" ~~> PathIndex }
def ArrayIndices: Rule1[PathIndices] =
rule { "[" ~ int ~ "," ~ oneOrMore(int, ",") ~ "]" ~~> ((n0, n) => PathIndices(n.+:(n0))) }
def ArrayIndexRange: Rule1[PathIndices] =
rule { "[" ~ int ~ ":" ~ int ~ "]" ~~> ((n0, n1) => PathIndices(n0 until n1)) }
def Attribute: Rule1[PathAttribute] = rule { "." ~ oneOrMore(char) ~> { (s) => PathAttribute(s) } ~ !"()" }
def BracketedAttribute: Rule1[PathAttribute] =
rule { "[" ~ bracketedString ~~> { (s) => PathAttribute(s, bracketed = true) } ~ "]" }
private def bracketedString: Rule1[String] = unquotedString | singleQuotedString
def Function: Rule1[PathFunction] = rule {
"." ~ ("min" | "max" | "avg" | "length") ~> ((f) => PathFunction(JsonPathFunction.withName(f))) ~ "()"
}
}
| apache-2.0 |
sergecodd/FireFox-OS | B2G/gecko/js/src/tests/js1_7/regress/regress-416705.js | 1076 | // |reftest| pref(javascript.options.xml.content,true)
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//-----------------------------------------------------------------------------
var BUGNUMBER = 416705;
var summary = 'throw from xml filter crashes';
var actual = 'No Crash';
var expect = 6;
//-----------------------------------------------------------------------------
test();
//-----------------------------------------------------------------------------
function test()
{
enterFunc ('test');
printBugNumber(BUGNUMBER);
printStatus (summary);
var g;
function f()
{
try {
<><a/><b/></>.(let (a=1, b = 2, c = 3)
(g = function() { a += b+c; return a; }, xxx));
} catch (e) {
}
}
f();
var actual = g();
reportCompare(expect, actual, summary);
exitFunc ('test');
}
| apache-2.0 |
keith-turner/accumulo | server/monitor/src/main/java/org/apache/accumulo/monitor/rest/zk/ZookeeperResource.java | 1707 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.accumulo.monitor.rest.zk;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
import org.apache.accumulo.monitor.ZooKeeperStatus;
import org.apache.accumulo.monitor.ZooKeeperStatus.ZooKeeperState;
/**
* Generates a new ZooKeeper information as a JSON object
*
* @since 2.0.0
*/
@Path("/zk")
@Produces({MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML})
public class ZookeeperResource {
/**
* Generates a list of zookeeper information
*
* @return Zookeeper information
*/
@GET
public ZKInformation getZKInformation() {
ZKInformation zk = new ZKInformation();
// Adds new zk to the list
for (ZooKeeperState k : ZooKeeperStatus.getZooKeeperStatus()) {
if (k.clients >= 0) {
zk.addZK(new ZooKeeper(k.keeper, k.mode, k.clients));
}
}
return zk;
}
}
| apache-2.0 |
weswigham/TypeScript | tests/cases/fourslash/unusedParameterInLambda1.ts | 336 | /// <reference path='fourslash.ts' />
// @noUnusedLocals: true
// @noUnusedParameters: true
////[|/*~a*/(/*~b*/x/*~c*/:/*~d*/number/*~e*/)/*~f*/ => /*~g*/{/*~h*/}/*~i*/|]
verify.codeFix({
description: "Remove declaration for: 'x'",
index: 0,
newRangeContent: "/*~a*/(/*~e*/)/*~f*/ => /*~g*/{/*~h*/}/*~i*/",
});
| apache-2.0 |
AsynkronIT/gam | router/routeractor_pool_test.go | 2251 | package router
import (
"testing"
"github.com/AsynkronIT/protoactor-go/actor"
"github.com/stretchr/testify/mock"
)
func TestPoolRouterActor_Receive_AddRoute(t *testing.T) {
state := new(testRouterState)
a := poolRouterActor{state: state}
p1 := system.NewLocalPID("p1")
c := new(mockContext)
c.On("Message").Return(&AddRoutee{p1})
c.On("Watch", p1).Once()
state.On("GetRoutees").Return(&actor.PIDSet{})
state.On("SetRoutees", actor.NewPIDSet(p1)).Once()
a.Receive(c)
mock.AssertExpectationsForObjects(t, state, c)
}
func TestPoolRouterActor_Receive_AddRoute_NoDuplicates(t *testing.T) {
state := new(testRouterState)
a := poolRouterActor{state: state}
p1 := system.NewLocalPID("p1")
c := new(mockContext)
c.On("Message").Return(&AddRoutee{p1})
state.On("GetRoutees").Return(actor.NewPIDSet(p1))
a.Receive(c)
mock.AssertExpectationsForObjects(t, state, c)
}
func TestPoolRouterActor_Receive_RemoveRoute(t *testing.T) {
state := new(testRouterState)
a := poolRouterActor{state: state}
p1, pr1 := spawnMockProcess("p1")
defer removeMockProcess(p1)
pr1.On("SendUserMessage", p1, &actor.PoisonPill{}).Once()
p2 := system.NewLocalPID("p2")
c := new(mockContext)
c.On("Message").Return(&RemoveRoutee{p1})
c.On("Unwatch", p1).Once()
c.On("Send")
state.On("GetRoutees").Return(actor.NewPIDSet(p1, p2))
state.On("SetRoutees", actor.NewPIDSet(p2)).Once()
a.Receive(c)
mock.AssertExpectationsForObjects(t, state, c)
}
func TestPoolRouterActor_Receive_BroadcastMessage(t *testing.T) {
state := new(testRouterState)
a := poolRouterActor{state: state}
p1 := system.NewLocalPID("p1")
p2 := system.NewLocalPID("p2")
child := new(mockProcess)
child.On("SendUserMessage", mock.Anything, mock.Anything).Times(2)
system.ProcessRegistry.Add(child, "p1")
system.ProcessRegistry.Add(child, "p2")
defer func() {
system.ProcessRegistry.Remove(&actor.PID{Id: "p1"})
system.ProcessRegistry.Remove(&actor.PID{Id: "p2"})
}()
c := new(mockContext)
c.On("Message").Return(&BroadcastMessage{"hi"})
c.On("Sender").Return((*actor.PID)(nil))
c.On("RequestWithCustomSender").Twice()
state.On("GetRoutees").Return(actor.NewPIDSet(p1, p2))
a.Receive(c)
mock.AssertExpectationsForObjects(t, state, c, child)
}
| apache-2.0 |
ehlerst/hound-testing | resources/exchange.rb | 981 | # resources/exchange.rb
#
# Author: Simple Finance <ops@simple.com>
# License: Apache License, Version 2.0
#
# Copyright 2013 Simple Finance Technology Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Declare, manage, and delete RabbitMQ exchanges.
actions(:declare, :delete)
default_action(:declare)
attribute(:exchange, kind_of: String, name_attribute: true)
attribute(:vhost, kind_of: String, required: true)
attribute(:attrs, kind_of: Hash, default: {})
| apache-2.0 |
messaoudiDEV/citypocketBackoffice | Symfony/vendor/giggsey/libphonenumber-for-php/src/geocoding/data/fr/230.php | 227 | <?php
/**
* This file is automatically @generated by {@link GeneratePhonePrefixData}.
* Please don't modify it directly.
*/
return array (
2302 => 'Région Nord',
2304 => 'Région Centrale',
2306 => 'Région Sud',
);
| apache-2.0 |
cushon/error-prone | core/src/test/java/com/google/errorprone/testdata/ExtendedMultipleTopLevelClassesWithNoErrors.java | 830 | /*
* Copyright 2012 The Error Prone Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.errorprone.testdata;
public class ExtendedMultipleTopLevelClassesWithNoErrors
extends MultipleTopLevelClassesWithNoErrors {
ExtendedMultipleTopLevelClassesWithNoErrors() {
super(0, 0);
}
}
| apache-2.0 |
jaltekruse/incubator-drill | exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/local/LocalPStoreProvider.java | 2530 | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.drill.exec.store.sys.local;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.ConcurrentMap;
import org.apache.drill.common.config.DrillConfig;
import org.apache.drill.exec.ExecConstants;
import org.apache.drill.exec.store.sys.PStore;
import org.apache.drill.exec.store.sys.PStoreConfig;
import org.apache.drill.exec.store.sys.PStoreProvider;
import org.apache.drill.exec.store.sys.PStoreRegistry;
import com.google.common.collect.Maps;
/**
* A really simple provider that stores data in the local file system, one value per file.
*/
public class LocalPStoreProvider implements PStoreProvider{
static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(LocalPStoreProvider.class);
private File path;
private final boolean enableWrite;
private ConcurrentMap<PStoreConfig<?>, PStore<?>> pstores;
public LocalPStoreProvider(DrillConfig config) {
path = new File(config.getString(ExecConstants.SYS_STORE_PROVIDER_LOCAL_PATH));
enableWrite = config.getBoolean(ExecConstants.SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE);
if (!enableWrite) {
pstores = Maps.newConcurrentMap();
}
}
public LocalPStoreProvider(PStoreRegistry registry) {
this(registry.getConfig());
}
@Override
public void close() {
}
@Override
public <V> PStore<V> getPStore(PStoreConfig<V> storeConfig) throws IOException {
if (enableWrite) {
return new LocalPStore<V>(path, storeConfig);
} else {
PStore<V> p = new NoWriteLocalPStore<V>();
PStore<?> p2 = pstores.putIfAbsent(storeConfig, p);
if(p2 != null) {
return (PStore<V>) p2;
}
return p;
}
}
@Override
public void start() {
}
}
| apache-2.0 |
caskdata/coopr-provisioner | lib/provisioner/worker/plugins/automators/chef_solo_automator/resources/cookbooks/dcos/resources/dcos_user.rb | 1601 | #
# Cookbook Name:: dcos
# Recipe:: default
#
# Copyright 2018, Chef Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
resource_name :dcos_user
property :zk_host, String,
default: 'zk-1.zk:2181,zk-2.zk:2181,zk-3.zk:2181,zk-4.zk:2181,'\
'zk-5.zk:2181',
required: true
property :email, String, required: false
load_current_value do
require 'zookeeper'
include Zookeeper::Constants
z = Zookeeper.new(zk_host)
user_node = z.get(path: "/dcos/users/#{email}")
email user_node[:data] if user_node[:rc] == ZOK
end
action :create do
# If there is a change, remove and replace the current data
converge_if_changed :email do
require 'zookeeper'
include Zookeeper::Constants
z = Zookeeper.new(zk_host)
z.delete(path: "/dcos/users/#{email}") # Fails cleanly if it doesn't exist.
z.create(path: "/dcos/users/#{email}", data: email)
end
end
action :delete do
require 'zookeeper'
include Zookeeper::Constants
# Remove the user node from Zookeeper
z = Zookeeper.new(zk_host)
z.delete(path: "/dcos/users/#{email}")
end
| apache-2.0 |
Asdafers/MyAL | app/Lib/TestUtils.php | 18969 | <?php
define('TEAM_ID_1', 1);
define('TEAM_ID_2', 2);
define('TEAM_ID_EMPTY', 3);
define('PLAYER_ID_1', 1);
define('PLAYER_ID_2', 2);
define('PLAYER_ID_3', 3);
define('PLAYER_ID_4', 4);
define('GAME_MASTER_ID_1', 5);
define('GAME_MASTER_ID_2', 6);
define('XP_TO_REACH_LEVEL_10', 2200);
define('XP_TO_REACH_LEVEL_20', 8000);
class TestUtils {
private $models = array(
'ActivityRequisiteSummary',
'Tag',
'Configuration',
'XpLog',
'Notification',
'Timeline',
'LogVote',
'Log',
'BadgeLog',
'BadgeRequisite',
'ActivityRequisite',
'Badge',
'Activity',
'Domain',
'Team',
'Player',
'PlayerType',
'LogTag'
);
private $views = array(
'PlayerActivitySummary',
'LastWeekLog',
'BadgeClaimed',
'BadgeActivityProgress'
);
public function __construct() {
foreach ($this->models as $model) {
$this->$model = ClassRegistry::init($model);
}
foreach ($this->views as $model) {
$this->$model = ClassRegistry::init($model);
}
}
public function clearDatabase() {
foreach ($this->models as $model) {
$deleted = $this->$model->deleteAll(array($model . '.id <>' => 0), false);
$table = $this->$model->table;
}
}
public function generatePlayer($name = 'Player', $type = PLAYER_TYPE_PLAYER) {
$this->generatePlayerTypes();
$email = md5($name) . '@email.com';
$saved = $this->Player->save(array('Player' => array(
'name' => $name,
'player_type_id' => $type,
'email' => $email,
'password' => 123456,
'repeat_password' => 123456,
'team_id' => null
)));
return $saved;
}
public function generatePlayerTypes() {
if ($this->PlayerType->find('count') === 0) {
$this->PlayerType->saveMany(array(
array('id' => PLAYER_TYPE_PLAYER, 'name' => 'Player'),
array('id' => PLAYER_TYPE_GAME_MASTER, 'name' => 'Game Master')
));
}
}
public function generateLogTags() {
$this->LogTag->saveMany(array(
array('log_id' => 1, 'tag_id' => 1),
array('log_id' => 2, 'tag_id' => 2),
array('log_id' => 3, 'tag_id' => 3),
array('log_id' => 4, 'tag_id' => 4),
array('log_id' => 5, 'tag_id' => 5),
array('log_id' => 6, 'tag_id' => 6),
array('log_id' => 7, 'tag_id' => 7),
array('log_id' => 8, 'tag_id' => 8)
));
}
public function generateTags() {
$this->Tag->saveMany(array(
array('id' => 1, 'Tag 1', 'color' => '#000000', 'bonus_type' => '+', 'bonus_value' => 20, 'player_id_owner' => GAME_MASTER_ID_1, 'new' => 1, 'inactive' => 0),
array('id' => 2, 'Tag 2', 'color' => '#000000', 'bonus_type' => '%', 'bonus_value' => 20, 'player_id_owner' => GAME_MASTER_ID_1, 'new' => 1, 'inactive' => 0),
array('id' => 3, 'Tag 3', 'color' => '#000000', 'bonus_type' => '+', 'bonus_value' => 20, 'player_id_owner' => GAME_MASTER_ID_1, 'new' => 0, 'inactive' => 0),
array('id' => 4, 'Tag 4', 'color' => '#000000', 'bonus_type' => '%', 'bonus_value' => 20, 'player_id_owner' => GAME_MASTER_ID_1, 'new' => 0, 'inactive' => 0),
array('id' => 5, 'Tag 5', 'color' => '#000000', 'bonus_type' => '+', 'bonus_value' => 20, 'player_id_owner' => GAME_MASTER_ID_1, 'new' => 0, 'inactive' => 1),
array('id' => 6, 'Tag 6', 'color' => '#000000', 'bonus_type' => '%', 'bonus_value' => 20, 'player_id_owner' => GAME_MASTER_ID_1, 'new' => 0, 'inactive' => 1),
array('id' => 7, 'Tag 7', 'color' => '#000000', 'bonus_type' => '+', 'bonus_value' => 20, 'player_id_owner' => GAME_MASTER_ID_1, 'new' => 0, 'inactive' => 1),
array('id' => 8, 'Tag 8', 'color' => '#000000', 'bonus_type' => '%', 'bonus_value' => 20, 'player_id_owner' => GAME_MASTER_ID_1, 'new' => 0, 'inactive' => 1)
));
}
public function generatePlayers() {
$this->generatePlayerTypes();
$this->Player->saveMany(array(
array('id' => PLAYER_ID_1, 'player_type_id' => PLAYER_TYPE_PLAYER, 'name' => 'Player 1', 'email' => 'email1@email.com', 'password' => '123456', 'repeat_password' => '123456', 'xp' => 500, 'credly_id' => null, 'credly_email' => null, 'verified_in' => date('Y-m-d H:i:s')),
array('id' => PLAYER_ID_2, 'player_type_id' => PLAYER_TYPE_PLAYER, 'name' => 'Player 2', 'email' => 'email2@email.com', 'password' => '123456', 'repeat_password' => '123456', 'xp' => 100, 'credly_id' => null, 'credly_email' => null, 'verified_in' => date('Y-m-d H:i:s')),
array('id' => PLAYER_ID_3, 'player_type_id' => PLAYER_TYPE_PLAYER, 'name' => 'Player 3', 'email' => 'email3@email.com', 'password' => '123456', 'repeat_password' => '123456', 'xp' => 100, 'credly_id' => null, 'credly_email' => null, 'verified_in' => date('Y-m-d H:i:s')),
// Account not verified
array('id' => PLAYER_ID_4, 'player_type_id' => PLAYER_TYPE_PLAYER, 'name' => 'Player 4', 'email' => 'email3@email.com', 'password' => '123456', 'repeat_password' => '123456', 'xp' => 100, 'credly_id' => null, 'credly_email' => null, 'verified_in' => null),
array('id' => GAME_MASTER_ID_1, 'player_type_id' => PLAYER_TYPE_GAME_MASTER, 'name' => 'GameMaster 1', 'email' => 'scrummaster1@email.com', 'password' => '123456', 'repeat_password' => '123456', 'xp' => 999, 'credly_id' => null, 'credly_email' => null, 'verified_in' => date('Y-m-d H:i:s')),
array('id' => GAME_MASTER_ID_2, 'player_type_id' => PLAYER_TYPE_GAME_MASTER, 'name' => 'GameMaster 2', 'email' => 'scrummaster2@email.com', 'password' => '123456', 'repeat_password' => '123456', 'xp' => 999, 'credly_id' => null, 'credly_email' => null, 'verified_in' => date('Y-m-d H:i:s'))
));
$this->Team->updateAll(
array('Team.player_id_owner' => GAME_MASTER_ID_1),
array('Team.id' => array(TEAM_ID_1, TEAM_ID_2))
);
$this->Player->updateAll(
array('team_id' => TEAM_ID_1),
array('Player.id' => array(PLAYER_ID_1, PLAYER_ID_2))
);
$this->Player->updateAll(
array('team_id' => TEAM_ID_2),
array('Player.id' => array(PLAYER_ID_3, PLAYER_ID_4))
);
}
public function generateTeams() {
$this->Team->saveMany(array(
array('id' => TEAM_ID_1, 'name' => 'Team 1'),
array('id' => TEAM_ID_2, 'name' => 'Team 2'),
array('id' => TEAM_ID_EMPTY, 'name' => 'Team Empty'),
));
}
public function generateDomains() {
$this->Domain->saveMany(array(
array('id' => 1, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Domain 1', 'description' => 'Domain description...', 'abbr' => 'DM1', 'color' => '#aaaaaa', 'player_type_id' => PLAYER_TYPE_PLAYER),
array('id' => 2, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Domain 2', 'description' => 'Domain description...', 'abbr' => 'DM2', 'color' => '#bbbbbb', 'player_type_id' => PLAYER_TYPE_PLAYER),
array('id' => 3, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'GM Domain', 'description' => 'SM Domain description...', 'abbr' => 'SM', 'color' => '#cccccc', 'player_type_id' => PLAYER_TYPE_GAME_MASTER)
));
}
public function generateActivities() {
$this->Activity->saveMany(array(
array('id' => 1, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 1', 'reported' => 1, 'acceptance_votes' => 1, 'rejection_votes' => 2, 'domain_id' => 1, 'xp' => rand(5, 100)),
array('id' => 2, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 2', 'reported' => 10, 'acceptance_votes' => 2, 'rejection_votes' => 1, 'domain_id' => 1, 'xp' => rand(5, 100)),
array('id' => 3, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 3', 'reported' => 100, 'acceptance_votes' => 1, 'rejection_votes' => 2, 'domain_id' => 1, 'xp' => rand(5, 100)),
array('id' => 4, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 4', 'reported' => 1000, 'acceptance_votes' => 2, 'rejection_votes' => 1, 'domain_id' => 1, 'xp' => rand(5, 100)),
array('id' => 5, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 5', 'reported' => 10000, 'acceptance_votes' => 1, 'rejection_votes' => 2, 'domain_id' => 2, 'xp' => rand(5, 100)),
array('id' => 6, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 6', 'reported' => 100000, 'acceptance_votes' => 2, 'rejection_votes' => 1, 'domain_id' => 2, 'xp' => rand(5, 100)),
array('id' => 7, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 7', 'reported' => 1000000, 'acceptance_votes' => 1, 'rejection_votes' => 2, 'domain_id' => 2, 'xp' => rand(5, 100)),
array('id' => 8, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 8', 'reported' => 10000000, 'acceptance_votes' => 2, 'rejection_votes' => 1, 'domain_id' => 2, 'xp' => XP_TO_REACH_LEVEL_10),
array('id' => 9, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 9', 'reported' => 100000000, 'acceptance_votes' => 1, 'rejection_votes' => 2, 'domain_id' => 2, 'xp' => XP_TO_REACH_LEVEL_20),
array('id' => 10, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Activity 10', 'reported' => 0, 'acceptance_votes' => 2, 'rejection_votes' => 1, 'domain_id' => 2, 'xp' => 1000),
array('id' => 11, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'SM Activity 1', 'reported' => 0, 'acceptance_votes' => 1, 'rejection_votes' => 2, 'domain_id' => 3, 'xp' => 1000),
array('id' => 12, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'SM Activity 2', 'reported' => 0, 'acceptance_votes' => 2, 'rejection_votes' => 1, 'domain_id' => 3, 'xp' => 1000)
));
}
public function generateInactiveActivities() {
$this->Activity->saveMany(array(
array('id' => 15, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Inactive Activity 1', 'domain_id' => 1, 'inactive' => 1),
array('id' => 16, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Inactive Activity 2', 'domain_id' => 1, 'inactive' => 1),
array('id' => 17, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Inactive Activity 3', 'domain_id' => 1, 'inactive' => 1),
array('id' => 18, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Inactive Activity 4', 'domain_id' => 1, 'inactive' => 1),
array('id' => 19, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Inactive Activity 5', 'domain_id' => 2, 'inactive' => 1),
array('id' => 20, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Inactive Activity 6', 'domain_id' => 2, 'inactive' => 1),
array('id' => 21, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Inactive Activity 7', 'domain_id' => 2, 'inactive' => 1),
array('id' => 22, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Inactive Activity 8', 'domain_id' => 2, 'inactive' => 1),
));
}
public function generateBadges() {
$this->Badge->saveMany(array(
array('id' => 1, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Badge 1', 'domain_id' => 1, 'abbr' => 'BG1'),
array('id' => 2, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Badge 2', 'domain_id' => 1, 'abbr' => 'BG2'),
array('id' => 3, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Badge 3', 'domain_id' => 2, 'abbr' => 'BG3'),
array('id' => 4, 'player_id_owner' => GAME_MASTER_ID_1, 'name' => 'Badge 4', 'domain_id' => 2, 'abbr' => 'BG4'),
));
}
public function generateBadgeRequisites() {
$this->BadgeRequisite->saveMany(array(
array('badge_id' => 4, 'badge_id_requisite' => 3),
array('badge_id' => 3, 'badge_id_requisite' => 2),
array('badge_id' => 2, 'badge_id_requisite' => 1)
));
}
public function generateActivityRequisites() {
$this->ActivityRequisite->saveMany(array(
array('id' => 1, 'badge_id' => 1, 'activity_id' => 1, 'count' => 1),
array('id' => 2, 'badge_id' => 2, 'activity_id' => 2, 'count' => 1),
array('id' => 3, 'badge_id' => 3, 'activity_id' => 3, 'count' => 1),
array('id' => 4, 'badge_id' => 4, 'activity_id' => 4, 'count' => 1)
));
$this->ActivityRequisiteSummary->saveMany(array(
array('id' => 1, 'badge_id' => 1, 'activity_requisite_id' => 1, 'times' => 0, 'player_id_owner' => GAME_MASTER_ID_1),
array('id' => 2, 'badge_id' => 2, 'activity_requisite_id' => 2, 'times' => 0, 'player_id_owner' => GAME_MASTER_ID_1),
array('id' => 3, 'badge_id' => 3, 'activity_requisite_id' => 3, 'times' => 0, 'player_id_owner' => GAME_MASTER_ID_1),
array('id' => 4, 'badge_id' => 4, 'activity_requisite_id' => 4, 'times' => 0, 'player_id_owner' => GAME_MASTER_ID_1)
));
}
public function generateNotifications() {
$this->Notification->saveMany(array(
array('id' => 1, 'title' => 'Notification title', 'player_id' => PLAYER_ID_1, 'type' => 'success', 'read' => 0),
array('id' => 2, 'title' => 'Notification title', 'player_id' => PLAYER_ID_1, 'type' => 'success', 'read' => 0),
array('id' => 3, 'title' => 'Notification title', 'player_id' => PLAYER_ID_2, 'type' => 'success', 'read' => 0),
array('id' => 4, 'title' => 'Notification title', 'player_id' => PLAYER_ID_2, 'type' => 'success', 'read' => 0),
array('id' => 5, 'title' => 'Notification title', 'player_id' => PLAYER_ID_2, 'type' => 'success', 'read' => 1),
array('id' => 6, 'title' => 'Notification title', 'player_id' => PLAYER_ID_2, 'type' => 'success', 'read' => 1),
array('id' => 7, 'title' => 'Notification title', 'player_id' => PLAYER_ID_1, 'type' => 'success', 'read' => 1),
array('id' => 8, 'title' => 'Notification title', 'player_id' => PLAYER_ID_1, 'type' => 'success', 'read' => 1),
));
}
public function generateBadgeLogs() {
$this->BadgeLog->saveMany(array(
array('badge_id' => 1, 'player_id' => PLAYER_ID_1),
array('badge_id' => 2, 'player_id' => PLAYER_ID_1),
array('badge_id' => 3, 'player_id' => PLAYER_ID_1),
array('badge_id' => 4, 'player_id' => PLAYER_ID_1),
array('badge_id' => 1, 'player_id' => PLAYER_ID_2),
array('badge_id' => 2, 'player_id' => PLAYER_ID_2),
array('badge_id' => 3, 'player_id' => PLAYER_ID_2),
array('badge_id' => 4, 'player_id' => PLAYER_ID_2),
));
}
public function generateLogs() {
$currentDate = (new DateTime())->format('Y-m-d');
$lastWeek = new DateTime();
$lastWeek->modify('-7 day');
$lastWeek = $lastWeek->format('Y-m-d');
$lastMonth = new DateTime();
$lastMonth->modify('-1 month - 1 day');
$lastMonth = $lastMonth->format('Y-m-d');
$this->Log->saveMany(array(
array('id' => 1, 'description' => 'random description ' . md5(rand()), 'domain_id' => 1, 'activity_id' => 1, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => $currentDate),
array('id' => 2, 'description' => 'random description ' . md5(rand()), 'domain_id' => 1, 'activity_id' => 2, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => $currentDate),
array('id' => 3, 'description' => 'random description ' . md5(rand()), 'domain_id' => 1, 'activity_id' => 3, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => $lastWeek),
array('id' => 4, 'description' => 'random description ' . md5(rand()), 'domain_id' => 1, 'activity_id' => 4, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => $lastMonth),
array('id' => 5, 'description' => 'random description ' . md5(rand()), 'domain_id' => 2, 'activity_id' => 5, 'player_id' => PLAYER_ID_2, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => $currentDate),
array('id' => 6, 'description' => 'random description ' . md5(rand()), 'domain_id' => 2, 'activity_id' => 6, 'player_id' => PLAYER_ID_2, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => $currentDate),
array('id' => 7, 'description' => 'random description ' . md5(rand()), 'domain_id' => 2, 'activity_id' => 7, 'player_id' => PLAYER_ID_2, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => $lastWeek),
array('id' => 8, 'description' => 'random description ' . md5(rand()), 'domain_id' => 2, 'activity_id' => 8, 'player_id' => PLAYER_ID_2, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => $lastMonth),
), array('validate' => false));
$result = $this->Log->query('UPDATE log SET reviewed = NOW(), accepted = NOW()');
}
public function generateLogs2() {
$this->Log->saveMany(array(
array('activity_id' => 8, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'xp' => XP_TO_REACH_LEVEL_10, 'acquired' => date('Y-m-d H:i:s')),
array('activity_id' => 9, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'xp' => XP_TO_REACH_LEVEL_20, 'acquired' => date('Y-m-d H:i:s')),
), array('validate' => false));
$result = $this->Log->query('UPDATE log SET reviewed = NOW()');
}
public function generateLogsNotReviewed() {
$this->Log->saveMany(array(
array('activity_id' => 1, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => '2014-01-01'),
array('activity_id' => 2, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => '2014-01-01'),
array('activity_id' => 3, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => '2014-01-01'),
array('activity_id' => 4, 'player_id' => PLAYER_ID_1, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => '2014-01-01'),
array('activity_id' => 5, 'player_id' => PLAYER_ID_2, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => '2014-01-01'),
array('activity_id' => 6, 'player_id' => PLAYER_ID_2, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => '2014-01-01'),
array('activity_id' => 7, 'player_id' => PLAYER_ID_2, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => '2014-01-01'),
array('activity_id' => 8, 'player_id' => PLAYER_ID_2, 'player_id_owner' => GAME_MASTER_ID_1, 'acquired' => '2014-01-01'),
), array('validate' => false));
}
} | apache-2.0 |
michalszynkiewicz/pnc | ui/app/common/restclient/_restclient.js | 914 | /*
* JBoss, Home of Professional Open Source.
* Copyright 2014 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* jshint unused: false */
'use strict';
(function() {
var module = angular.module('pnc.common.restclient', [
'ngResource',
'pnc.util'
]);
module.value('REST_BASE_URL', '/pnc-rest/rest');
})();
| apache-2.0 |
ytfei/guava | guava-tests/test/com/google/common/base/PredicatesTest.java | 32535 | /*
* Copyright (C) 2005 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.base;
import static com.google.common.base.CharMatcher.WHITESPACE;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.collect.ImmutableSet;
import com.google.common.testing.ClassSanityTester;
import com.google.common.testing.EqualsTester;
import com.google.common.testing.NullPointerTester;
import com.google.common.testing.SerializableTester;
import junit.framework.TestCase;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.regex.Pattern;
/**
* Unit test for {@link Predicates}.
*
* @author Kevin Bourrillion
*/
@GwtCompatible(emulated = true)
public class PredicatesTest extends TestCase {
private static final Predicate<Integer> TRUE = Predicates.alwaysTrue();
private static final Predicate<Integer> FALSE = Predicates.alwaysFalse();
private static final Predicate<Integer> NEVER_REACHED =
new Predicate<Integer>() {
@Override
public boolean apply(Integer i) {
fail("This predicate should never have been evaluated");
return false;
}
};
/** Instantiable predicate with reasonable hashCode() and equals() methods. */
static class IsOdd implements Predicate<Integer>, Serializable {
private static final long serialVersionUID = 0x150ddL;
@Override
public boolean apply(Integer i) {
return (i.intValue() & 1) == 1;
}
@Override public int hashCode() {
return 0x150dd;
}
@Override public boolean equals(Object obj) {
return obj instanceof IsOdd;
}
@Override public String toString() {
return "IsOdd";
}
}
/**
* Generates a new Predicate per call.
*
* <p>Creating a new Predicate each time helps catch cases where code is
* using {@code x == y} instead of {@code x.equals(y)}.
*/
private static IsOdd isOdd() {
return new IsOdd();
}
/*
* Tests for Predicates.alwaysTrue().
*/
public void testAlwaysTrue_apply() {
assertEvalsToTrue(Predicates.alwaysTrue());
}
public void testAlwaysTrue_equality() throws Exception {
new EqualsTester()
.addEqualityGroup(TRUE, Predicates.alwaysTrue())
.addEqualityGroup(isOdd())
.addEqualityGroup(Predicates.alwaysFalse())
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testAlwaysTrue_serialization() {
checkSerialization(Predicates.alwaysTrue());
}
/*
* Tests for Predicates.alwaysFalse().
*/
public void testAlwaysFalse_apply() throws Exception {
assertEvalsToFalse(Predicates.alwaysFalse());
}
public void testAlwaysFalse_equality() throws Exception {
new EqualsTester()
.addEqualityGroup(FALSE, Predicates.alwaysFalse())
.addEqualityGroup(isOdd())
.addEqualityGroup(Predicates.alwaysTrue())
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testAlwaysFalse_serialization() {
checkSerialization(Predicates.alwaysFalse());
}
/*
* Tests for Predicates.not(predicate).
*/
public void testNot_apply() {
assertEvalsToTrue(Predicates.not(FALSE));
assertEvalsToFalse(Predicates.not(TRUE));
assertEvalsLikeOdd(Predicates.not(Predicates.not(isOdd())));
}
public void testNot_equality() {
new EqualsTester()
.addEqualityGroup(Predicates.not(isOdd()), Predicates.not(isOdd()))
.addEqualityGroup(Predicates.not(TRUE))
.addEqualityGroup(isOdd())
.testEquals();
}
public void testNot_equalityForNotOfKnownValues() {
new EqualsTester()
.addEqualityGroup(TRUE, Predicates.alwaysTrue())
.addEqualityGroup(FALSE)
.addEqualityGroup(Predicates.not(TRUE))
.testEquals();
new EqualsTester()
.addEqualityGroup(FALSE, Predicates.alwaysFalse())
.addEqualityGroup(TRUE)
.addEqualityGroup(Predicates.not(FALSE))
.testEquals();
new EqualsTester()
.addEqualityGroup(Predicates.isNull(), Predicates.isNull())
.addEqualityGroup(Predicates.notNull())
.addEqualityGroup(Predicates.not(Predicates.isNull()))
.testEquals();
new EqualsTester()
.addEqualityGroup(Predicates.notNull(), Predicates.notNull())
.addEqualityGroup(Predicates.isNull())
.addEqualityGroup(Predicates.not(Predicates.notNull()))
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testNot_serialization() {
checkSerialization(Predicates.not(isOdd()));
}
/*
* Tests for all the different flavors of Predicates.and().
*/
@SuppressWarnings("unchecked")
public void testAnd_applyNoArgs() {
assertEvalsToTrue(Predicates.and());
}
@SuppressWarnings("unchecked")
public void testAnd_equalityNoArgs() {
new EqualsTester()
.addEqualityGroup(Predicates.and(), Predicates.and())
.addEqualityGroup(Predicates.and(FALSE))
.addEqualityGroup(Predicates.or())
.testEquals();
}
@GwtIncompatible("SerializableTester")
@SuppressWarnings("unchecked")
public void testAnd_serializationNoArgs() {
checkSerialization(Predicates.and());
}
@SuppressWarnings("unchecked")
public void testAnd_applyOneArg() {
assertEvalsLikeOdd(Predicates.and(isOdd()));
}
@SuppressWarnings("unchecked")
public void testAnd_equalityOneArg() {
Object[] notEqualObjects = {Predicates.and(NEVER_REACHED, FALSE)};
new EqualsTester()
.addEqualityGroup(
Predicates.and(NEVER_REACHED), Predicates.and(NEVER_REACHED))
.addEqualityGroup(notEqualObjects)
.addEqualityGroup(Predicates.and(isOdd()))
.addEqualityGroup(Predicates.and())
.addEqualityGroup(Predicates.or(NEVER_REACHED))
.testEquals();
}
@GwtIncompatible("SerializableTester")
@SuppressWarnings("unchecked")
public void testAnd_serializationOneArg() {
checkSerialization(Predicates.and(isOdd()));
}
public void testAnd_applyBinary() {
assertEvalsLikeOdd(Predicates.and(isOdd(), TRUE));
assertEvalsLikeOdd(Predicates.and(TRUE, isOdd()));
assertEvalsToFalse(Predicates.and(FALSE, NEVER_REACHED));
}
@SuppressWarnings("unchecked")
public void testAnd_equalityBinary() {
new EqualsTester()
.addEqualityGroup(
Predicates.and(TRUE, NEVER_REACHED),
Predicates.and(TRUE, NEVER_REACHED))
.addEqualityGroup(Predicates.and(NEVER_REACHED, TRUE))
.addEqualityGroup(Predicates.and(TRUE))
.addEqualityGroup(Predicates.or(TRUE, NEVER_REACHED))
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testAnd_serializationBinary() {
checkSerialization(Predicates.and(TRUE, isOdd()));
}
@SuppressWarnings("unchecked")
public void testAnd_applyTernary() {
assertEvalsLikeOdd(Predicates.and(isOdd(), TRUE, TRUE));
assertEvalsLikeOdd(Predicates.and(TRUE, isOdd(), TRUE));
assertEvalsLikeOdd(Predicates.and(TRUE, TRUE, isOdd()));
assertEvalsToFalse(Predicates.and(TRUE, FALSE, NEVER_REACHED));
}
@SuppressWarnings("unchecked")
public void testAnd_equalityTernary() {
new EqualsTester()
.addEqualityGroup(
Predicates.and(TRUE, isOdd(), NEVER_REACHED),
Predicates.and(TRUE, isOdd(), NEVER_REACHED))
.addEqualityGroup(Predicates.and(isOdd(), NEVER_REACHED, TRUE))
.addEqualityGroup(Predicates.and(TRUE))
.addEqualityGroup(Predicates.or(TRUE, isOdd(), NEVER_REACHED))
.testEquals();
}
@GwtIncompatible("SerializableTester")
@SuppressWarnings("unchecked")
public void testAnd_serializationTernary() {
checkSerialization(Predicates.and(TRUE, isOdd(), FALSE));
}
@SuppressWarnings("unchecked")
public void testAnd_applyIterable() {
Collection<Predicate<Integer>> empty = Arrays.asList();
assertEvalsToTrue(Predicates.and(empty));
assertEvalsLikeOdd(Predicates.and(Arrays.asList(isOdd())));
assertEvalsLikeOdd(Predicates.and(Arrays.asList(TRUE, isOdd())));
assertEvalsToFalse(Predicates.and(Arrays.asList(FALSE, NEVER_REACHED)));
}
@SuppressWarnings("unchecked")
public void testAnd_equalityIterable() {
new EqualsTester()
.addEqualityGroup(
Predicates.and(Arrays.asList(TRUE, NEVER_REACHED)),
Predicates.and(Arrays.asList(TRUE, NEVER_REACHED)),
Predicates.and(TRUE, NEVER_REACHED))
.addEqualityGroup(Predicates.and(FALSE, NEVER_REACHED))
.addEqualityGroup(Predicates.or(TRUE, NEVER_REACHED))
.testEquals();
}
@GwtIncompatible("SerializableTester")
@SuppressWarnings("unchecked")
public void testAnd_serializationIterable() {
checkSerialization(Predicates.and(Arrays.asList(TRUE, FALSE)));
}
@SuppressWarnings("unchecked")
public void testAnd_arrayDefensivelyCopied() {
Predicate[] array = {Predicates.alwaysFalse()};
Predicate<Object> predicate = Predicates.and(array);
assertFalse(predicate.apply(1));
array[0] = Predicates.alwaysTrue();
assertFalse(predicate.apply(1));
}
@SuppressWarnings("unchecked")
public void testAnd_listDefensivelyCopied() {
List list = new ArrayList<Predicate>();
Predicate<Object> predicate = Predicates.and(list);
assertTrue(predicate.apply(1));
list.add(Predicates.alwaysFalse());
assertTrue(predicate.apply(1));
}
@SuppressWarnings("unchecked")
public void testAnd_iterableDefensivelyCopied() {
final List list = new ArrayList<Predicate>();
Iterable iterable = new Iterable<Predicate>() {
@Override
public Iterator<Predicate> iterator() {
return list.iterator();
}
};
Predicate<Object> predicate = Predicates.and(iterable);
assertTrue(predicate.apply(1));
list.add(Predicates.alwaysFalse());
assertTrue(predicate.apply(1));
}
/*
* Tests for all the different flavors of Predicates.or().
*/
@SuppressWarnings("unchecked")
public void testOr_applyNoArgs() {
assertEvalsToFalse(Predicates.or());
}
@SuppressWarnings("unchecked")
public void testOr_equalityNoArgs() {
new EqualsTester()
.addEqualityGroup(Predicates.or(), Predicates.or())
.addEqualityGroup(Predicates.or(TRUE))
.addEqualityGroup(Predicates.and())
.testEquals();
}
@GwtIncompatible("SerializableTester")
@SuppressWarnings("unchecked")
public void testOr_serializationNoArgs() {
checkSerialization(Predicates.or());
}
@SuppressWarnings("unchecked")
public void testOr_applyOneArg() {
assertEvalsToTrue(Predicates.or(TRUE));
assertEvalsToFalse(Predicates.or(FALSE));
}
@SuppressWarnings("unchecked")
public void testOr_equalityOneArg() {
new EqualsTester()
.addEqualityGroup(
Predicates.or(NEVER_REACHED), Predicates.or(NEVER_REACHED))
.addEqualityGroup(Predicates.or(NEVER_REACHED, TRUE))
.addEqualityGroup(Predicates.or(TRUE))
.addEqualityGroup(Predicates.or())
.addEqualityGroup(Predicates.and(NEVER_REACHED))
.testEquals();
}
@GwtIncompatible("SerializableTester")
@SuppressWarnings("unchecked")
public void testOr_serializationOneArg() {
checkSerialization(Predicates.or(isOdd()));
}
public void testOr_applyBinary() {
Predicate<Integer> falseOrFalse = Predicates.or(FALSE, FALSE);
Predicate<Integer> falseOrTrue = Predicates.or(FALSE, TRUE);
Predicate<Integer> trueOrAnything = Predicates.or(TRUE, NEVER_REACHED);
assertEvalsToFalse(falseOrFalse);
assertEvalsToTrue(falseOrTrue);
assertEvalsToTrue(trueOrAnything);
}
@SuppressWarnings("unchecked")
public void testOr_equalityBinary() {
new EqualsTester()
.addEqualityGroup(
Predicates.or(FALSE, NEVER_REACHED),
Predicates.or(FALSE, NEVER_REACHED))
.addEqualityGroup(Predicates.or(NEVER_REACHED, FALSE))
.addEqualityGroup(Predicates.or(TRUE))
.addEqualityGroup(Predicates.and(FALSE, NEVER_REACHED))
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testOr_serializationBinary() {
checkSerialization(Predicates.or(isOdd(), TRUE));
}
@SuppressWarnings("unchecked")
public void testOr_applyTernary() {
assertEvalsLikeOdd(Predicates.or(isOdd(), FALSE, FALSE));
assertEvalsLikeOdd(Predicates.or(FALSE, isOdd(), FALSE));
assertEvalsLikeOdd(Predicates.or(FALSE, FALSE, isOdd()));
assertEvalsToTrue(Predicates.or(FALSE, TRUE, NEVER_REACHED));
}
@SuppressWarnings("unchecked")
public void testOr_equalityTernary() {
new EqualsTester()
.addEqualityGroup(
Predicates.or(FALSE, NEVER_REACHED, TRUE),
Predicates.or(FALSE, NEVER_REACHED, TRUE))
.addEqualityGroup(Predicates.or(TRUE, NEVER_REACHED, FALSE))
.addEqualityGroup(Predicates.or(TRUE))
.addEqualityGroup(Predicates.and(FALSE, NEVER_REACHED, TRUE))
.testEquals();
}
@GwtIncompatible("SerializableTester")
@SuppressWarnings("unchecked")
public void testOr_serializationTernary() {
checkSerialization(Predicates.or(FALSE, isOdd(), TRUE));
}
@SuppressWarnings("unchecked")
public void testOr_applyIterable() {
Predicate<Integer> vacuouslyFalse =
Predicates.or(Collections.<Predicate<Integer>>emptyList());
Predicate<Integer> troo = Predicates.or(Collections.singletonList(TRUE));
/*
* newLinkedList() takes varargs. TRUE and FALSE are both instances of
* Predicate<Integer>, so the call is safe.
*/
Predicate<Integer> trueAndFalse = Predicates.or(Arrays.asList(TRUE, FALSE));
assertEvalsToFalse(vacuouslyFalse);
assertEvalsToTrue(troo);
assertEvalsToTrue(trueAndFalse);
}
@SuppressWarnings("unchecked")
public void testOr_equalityIterable() {
new EqualsTester()
.addEqualityGroup(
Predicates.or(Arrays.asList(FALSE, NEVER_REACHED)),
Predicates.or(Arrays.asList(FALSE, NEVER_REACHED)),
Predicates.or(FALSE, NEVER_REACHED))
.addEqualityGroup(Predicates.or(TRUE, NEVER_REACHED))
.addEqualityGroup(Predicates.and(FALSE, NEVER_REACHED))
.testEquals();
}
@GwtIncompatible("SerializableTester")
@SuppressWarnings("unchecked")
public void testOr_serializationIterable() {
Predicate<Integer> pre = Predicates.or(Arrays.asList(TRUE, FALSE));
Predicate<Integer> post = SerializableTester.reserializeAndAssert(pre);
assertEquals(pre.apply(0), post.apply(0));
}
@SuppressWarnings("unchecked")
public void testOr_arrayDefensivelyCopied() {
Predicate[] array = {Predicates.alwaysFalse()};
Predicate<Object> predicate = Predicates.or(array);
assertFalse(predicate.apply(1));
array[0] = Predicates.alwaysTrue();
assertFalse(predicate.apply(1));
}
@SuppressWarnings("unchecked")
public void testOr_listDefensivelyCopied() {
List list = new ArrayList<Predicate>();
Predicate<Object> predicate = Predicates.or(list);
assertFalse(predicate.apply(1));
list.add(Predicates.alwaysTrue());
assertFalse(predicate.apply(1));
}
@SuppressWarnings("unchecked")
public void testOr_iterableDefensivelyCopied() {
final List list = new ArrayList<Predicate>();
Iterable iterable = new Iterable<Predicate>() {
@Override
public Iterator<Predicate> iterator() {
return list.iterator();
}
};
Predicate<Object> predicate = Predicates.or(iterable);
assertFalse(predicate.apply(1));
list.add(Predicates.alwaysTrue());
assertFalse(predicate.apply(1));
}
/*
* Tests for Predicates.equalTo(x).
*/
public void testIsEqualTo_apply() {
Predicate<Integer> isOne = Predicates.equalTo(1);
assertTrue(isOne.apply(1));
assertFalse(isOne.apply(2));
assertFalse(isOne.apply(null));
}
public void testIsEqualTo_equality() {
new EqualsTester()
.addEqualityGroup(Predicates.equalTo(1), Predicates.equalTo(1))
.addEqualityGroup(Predicates.equalTo(2))
.addEqualityGroup(Predicates.equalTo(null))
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testIsEqualTo_serialization() {
checkSerialization(Predicates.equalTo(1));
}
public void testIsEqualToNull_apply() {
Predicate<Integer> isNull = Predicates.equalTo(null);
assertTrue(isNull.apply(null));
assertFalse(isNull.apply(1));
}
public void testIsEqualToNull_equality() {
new EqualsTester()
.addEqualityGroup(Predicates.equalTo(null), Predicates.equalTo(null))
.addEqualityGroup(Predicates.equalTo(1))
.addEqualityGroup(Predicates.equalTo("null"))
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testIsEqualToNull_serialization() {
checkSerialization(Predicates.equalTo(null));
}
/**
* Tests for Predicates.instanceOf(x).
* TODO: Fix the comment style after fixing annotation stripper to remove
* comments properly. Currently, all tests before the comments are removed
* as well.
*/
@GwtIncompatible("Predicates.instanceOf")
public void testIsInstanceOf_apply() {
Predicate<Object> isInteger = Predicates.instanceOf(Integer.class);
assertTrue(isInteger.apply(1));
assertFalse(isInteger.apply(2.0f));
assertFalse(isInteger.apply(""));
assertFalse(isInteger.apply(null));
}
@GwtIncompatible("Predicates.instanceOf")
public void testIsInstanceOf_subclass() {
Predicate<Object> isNumber = Predicates.instanceOf(Number.class);
assertTrue(isNumber.apply(1));
assertTrue(isNumber.apply(2.0f));
assertFalse(isNumber.apply(""));
assertFalse(isNumber.apply(null));
}
@GwtIncompatible("Predicates.instanceOf")
public void testIsInstanceOf_interface() {
Predicate<Object> isComparable = Predicates.instanceOf(Comparable.class);
assertTrue(isComparable.apply(1));
assertTrue(isComparable.apply(2.0f));
assertTrue(isComparable.apply(""));
assertFalse(isComparable.apply(null));
}
@GwtIncompatible("Predicates.instanceOf")
public void testIsInstanceOf_equality() {
new EqualsTester()
.addEqualityGroup(
Predicates.instanceOf(Integer.class),
Predicates.instanceOf(Integer.class))
.addEqualityGroup(Predicates.instanceOf(Number.class))
.addEqualityGroup(Predicates.instanceOf(Float.class))
.testEquals();
}
@GwtIncompatible("Predicates.instanceOf, SerializableTester")
public void testIsInstanceOf_serialization() {
checkSerialization(Predicates.instanceOf(Integer.class));
}
@GwtIncompatible("Predicates.assignableFrom")
public void testIsAssignableFrom_apply() {
Predicate<Class<?>> isInteger = Predicates.assignableFrom(Integer.class);
assertTrue(isInteger.apply(Integer.class));
assertFalse(isInteger.apply(Float.class));
try {
isInteger.apply(null);
fail();
} catch(NullPointerException expected) {}
}
@GwtIncompatible("Predicates.assignableFrom")
public void testIsAssignableFrom_subclass() {
Predicate<Class<?>> isNumber = Predicates.assignableFrom(Number.class);
assertTrue(isNumber.apply(Integer.class));
assertTrue(isNumber.apply(Float.class));
}
@GwtIncompatible("Predicates.assignableFrom")
public void testIsAssignableFrom_interface() {
Predicate<Class<?>> isComparable =
Predicates.assignableFrom(Comparable.class);
assertTrue(isComparable.apply(Integer.class));
assertTrue(isComparable.apply(Float.class));
}
@GwtIncompatible("Predicates.assignableFrom")
public void testIsAssignableFrom_equality() {
new EqualsTester()
.addEqualityGroup(
Predicates.assignableFrom(Integer.class),
Predicates.assignableFrom(Integer.class))
.addEqualityGroup(Predicates.assignableFrom(Number.class))
.addEqualityGroup(Predicates.assignableFrom(Float.class))
.testEquals();
}
@GwtIncompatible("Predicates.assignableFrom, SerializableTester")
public void testIsAssignableFrom_serialization() {
Predicate<Class<?>> predicate =
Predicates.assignableFrom(Integer.class);
Predicate<Class<?>> reserialized =
SerializableTester.reserializeAndAssert(predicate);
assertEvalsLike(predicate, reserialized, Integer.class);
assertEvalsLike(predicate, reserialized, Float.class);
assertEvalsLike(predicate, reserialized, null);
}
/*
* Tests for Predicates.isNull()
*/
public void testIsNull_apply() {
Predicate<Integer> isNull = Predicates.isNull();
assertTrue(isNull.apply(null));
assertFalse(isNull.apply(1));
}
public void testIsNull_equality() {
new EqualsTester()
.addEqualityGroup(Predicates.isNull(), Predicates.isNull())
.addEqualityGroup(Predicates.notNull())
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testIsNull_serialization() {
Predicate<String> pre = Predicates.isNull();
Predicate<String> post = SerializableTester.reserializeAndAssert(pre);
assertEquals(pre.apply("foo"), post.apply("foo"));
assertEquals(pre.apply(null), post.apply(null));
}
public void testNotNull_apply() {
Predicate<Integer> notNull = Predicates.notNull();
assertFalse(notNull.apply(null));
assertTrue(notNull.apply(1));
}
public void testNotNull_equality() {
new EqualsTester()
.addEqualityGroup(Predicates.notNull(), Predicates.notNull())
.addEqualityGroup(Predicates.isNull())
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testNotNull_serialization() {
checkSerialization(Predicates.notNull());
}
public void testIn_apply() {
Collection<Integer> nums = Arrays.asList(1, 5);
Predicate<Integer> isOneOrFive = Predicates.in(nums);
assertTrue(isOneOrFive.apply(1));
assertTrue(isOneOrFive.apply(5));
assertFalse(isOneOrFive.apply(3));
assertFalse(isOneOrFive.apply(null));
}
public void testIn_equality() {
Collection<Integer> nums = ImmutableSet.of(1, 5);
Collection<Integer> sameOrder = ImmutableSet.of(1, 5);
Collection<Integer> differentOrder = ImmutableSet.of(5, 1);
Collection<Integer> differentNums = ImmutableSet.of(1, 3, 5);
new EqualsTester()
.addEqualityGroup(Predicates.in(nums), Predicates.in(nums),
Predicates.in(sameOrder), Predicates.in(differentOrder))
.addEqualityGroup(Predicates.in(differentNums))
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testIn_serialization() {
checkSerialization(Predicates.in(Arrays.asList(1, 2, 3, null)));
}
public void testIn_handlesNullPointerException() {
class CollectionThatThrowsNPE<T> extends ArrayList<T> {
private static final long serialVersionUID = 1L;
@Override public boolean contains(Object element) {
Preconditions.checkNotNull(element);
return super.contains(element);
}
}
Collection<Integer> nums = new CollectionThatThrowsNPE<Integer>();
Predicate<Integer> isFalse = Predicates.in(nums);
assertFalse(isFalse.apply(null));
}
public void testIn_handlesClassCastException() {
class CollectionThatThrowsCCE<T> extends ArrayList<T> {
private static final long serialVersionUID = 1L;
@Override public boolean contains(Object element) {
throw new ClassCastException("");
}
}
Collection<Integer> nums = new CollectionThatThrowsCCE<Integer>();
nums.add(3);
Predicate<Integer> isThree = Predicates.in(nums);
assertFalse(isThree.apply(3));
}
/*
* Tests that compilation will work when applying explicit types.
*/
@SuppressWarnings("unused")
public void testIn_compilesWithExplicitSupertype() {
Collection<Number> nums = ImmutableSet.of();
Predicate<Number> p1 = Predicates.in(nums);
Predicate<Object> p2 = Predicates.<Object>in(nums);
// The next two lines are not expected to compile.
// Predicate<Integer> p3 = Predicates.in(nums);
// Predicate<Integer> p4 = Predicates.<Integer>in(nums);
}
@GwtIncompatible("NullPointerTester")
public void testNullPointerExceptions() {
NullPointerTester tester = new NullPointerTester();
tester.testAllPublicStaticMethods(Predicates.class);
}
@SuppressWarnings("unchecked") // varargs
@GwtIncompatible("SerializbleTester")
public void testCascadingSerialization() throws Exception {
// Eclipse says Predicate<Integer>; javac says Predicate<Object>.
Predicate<? super Integer> nasty = Predicates.not(Predicates.and(
Predicates.or(
Predicates.equalTo((Object) 1), Predicates.equalTo(null),
Predicates.alwaysFalse(), Predicates.alwaysTrue(),
Predicates.isNull(), Predicates.notNull(),
Predicates.in(Arrays.asList(1)))));
assertEvalsToFalse(nasty);
Predicate<? super Integer> stillNasty =
SerializableTester.reserializeAndAssert(nasty);
assertEvalsToFalse(stillNasty);
}
// enum singleton pattern
private enum TrimStringFunction implements Function<String, String> {
INSTANCE;
@Override
public String apply(String string) {
return WHITESPACE.trimFrom(string);
}
}
public void testCompose() {
Function<String, String> trim = TrimStringFunction.INSTANCE;
Predicate<String> equalsFoo = Predicates.equalTo("Foo");
Predicate<String> equalsBar = Predicates.equalTo("Bar");
Predicate<String> trimEqualsFoo = Predicates.compose(equalsFoo, trim);
Function<String, String> identity = Functions.identity();
assertTrue(trimEqualsFoo.apply("Foo"));
assertTrue(trimEqualsFoo.apply(" Foo "));
assertFalse(trimEqualsFoo.apply("Foo-b-que"));
new EqualsTester()
.addEqualityGroup(trimEqualsFoo, Predicates.compose(equalsFoo, trim))
.addEqualityGroup(equalsFoo)
.addEqualityGroup(trim)
.addEqualityGroup(Predicates.compose(equalsFoo, identity))
.addEqualityGroup(Predicates.compose(equalsBar, trim))
.testEquals();
}
@GwtIncompatible("SerializableTester")
public void testComposeSerialization() {
Function<String, String> trim = TrimStringFunction.INSTANCE;
Predicate<String> equalsFoo = Predicates.equalTo("Foo");
Predicate<String> trimEqualsFoo = Predicates.compose(equalsFoo, trim);
SerializableTester.reserializeAndAssert(trimEqualsFoo);
}
/**
* Tests for Predicates.contains(Pattern) and .containsPattern(String).
* We assume the regex level works, so there are only trivial tests of that
* aspect.
* TODO: Fix comment style once annotation stripper is fixed.
*/
@GwtIncompatible("Predicates.containsPattern")
public void testContainsPattern_apply() {
Predicate<CharSequence> isFoobar =
Predicates.containsPattern("^Fo.*o.*bar$");
assertTrue(isFoobar.apply("Foxyzoabcbar"));
assertFalse(isFoobar.apply("Foobarx"));
}
@GwtIncompatible("Predicates.containsPattern")
public void testContains_apply() {
Predicate<CharSequence> isFoobar =
Predicates.contains(Pattern.compile("^Fo.*o.*bar$"));
assertTrue(isFoobar.apply("Foxyzoabcbar"));
assertFalse(isFoobar.apply("Foobarx"));
}
@GwtIncompatible("NullPointerTester")
public void testContainsPattern_nulls() throws Exception {
NullPointerTester tester = new NullPointerTester();
Predicate<CharSequence> isWooString = Predicates.containsPattern("Woo");
tester.testAllPublicInstanceMethods(isWooString);
}
@GwtIncompatible("NullPointerTester")
public void testContains_nulls() throws Exception {
NullPointerTester tester = new NullPointerTester();
Predicate<CharSequence> isWooPattern =
Predicates.contains(Pattern.compile("Woo"));
tester.testAllPublicInstanceMethods(isWooPattern);
}
@GwtIncompatible("SerializableTester")
public void testContainsPattern_serialization() {
Predicate<CharSequence> pre = Predicates.containsPattern("foo");
Predicate<CharSequence> post = SerializableTester.reserializeAndAssert(pre);
assertEquals(pre.apply("foo"), post.apply("foo"));
}
@GwtIncompatible("java.util.regex.Pattern")
public void testContains_equals() {
new EqualsTester()
.addEqualityGroup(
Predicates.contains(Pattern.compile("foo")),
Predicates.containsPattern("foo"))
.addEqualityGroup(
Predicates.contains(
Pattern.compile("foo", Pattern.CASE_INSENSITIVE)))
.addEqualityGroup(
Predicates.containsPattern("bar"))
.testEquals();
}
public void assertEqualHashCode(
Predicate<? super Integer> expected, Predicate<? super Integer> actual) {
assertEquals(actual.toString() + " should hash like " + expected.toString(),
expected.hashCode(), actual.hashCode());
}
public void testHashCodeForBooleanOperations() {
Predicate<Integer> p1 = Predicates.isNull();
Predicate<Integer> p2 = isOdd();
// Make sure that hash codes are not computed per-instance.
assertEqualHashCode(
Predicates.not(p1),
Predicates.not(p1));
assertEqualHashCode(
Predicates.and(p1, p2),
Predicates.and(p1, p2));
assertEqualHashCode(
Predicates.or(p1, p2),
Predicates.or(p1, p2));
// While not a contractual requirement, we'd like the hash codes for ands
// & ors of the same predicates to not collide.
assertTrue(Predicates.and(p1, p2).hashCode() != Predicates.or(p1, p2).hashCode());
}
@GwtIncompatible("reflection")
public void testNulls() throws Exception {
new ClassSanityTester().forAllPublicStaticMethods(Predicates.class).testNulls();
}
@GwtIncompatible("reflection")
public void testEqualsAndSerializable() throws Exception {
new ClassSanityTester().forAllPublicStaticMethods(Predicates.class).testEqualsAndSerializable();
}
private static void assertEvalsToTrue(Predicate<? super Integer> predicate) {
assertTrue(predicate.apply(0));
assertTrue(predicate.apply(1));
assertTrue(predicate.apply(null));
}
private static void assertEvalsToFalse(Predicate<? super Integer> predicate) {
assertFalse(predicate.apply(0));
assertFalse(predicate.apply(1));
assertFalse(predicate.apply(null));
}
private static void assertEvalsLikeOdd(Predicate<? super Integer> predicate) {
assertEvalsLike(isOdd(), predicate);
}
private static void assertEvalsLike(
Predicate<? super Integer> expected,
Predicate<? super Integer> actual) {
assertEvalsLike(expected, actual, 0);
assertEvalsLike(expected, actual, 1);
assertEvalsLike(expected, actual, null);
}
private static <T> void assertEvalsLike(
Predicate<? super T> expected,
Predicate<? super T> actual,
T input) {
Boolean expectedResult = null;
RuntimeException expectedRuntimeException = null;
try {
expectedResult = expected.apply(input);
} catch (RuntimeException e) {
expectedRuntimeException = e;
}
Boolean actualResult = null;
RuntimeException actualRuntimeException = null;
try {
actualResult = actual.apply(input);
} catch (RuntimeException e) {
actualRuntimeException = e;
}
assertEquals(expectedResult, actualResult);
if (expectedRuntimeException != null) {
assertNotNull(actualRuntimeException);
assertEquals(
expectedRuntimeException.getClass(),
actualRuntimeException.getClass());
}
}
@GwtIncompatible("SerializableTester")
private static void checkSerialization(Predicate<? super Integer> predicate) {
Predicate<? super Integer> reserialized =
SerializableTester.reserializeAndAssert(predicate);
assertEvalsLike(predicate, reserialized);
}
}
| apache-2.0 |
jtulach/RxJava | src/test/java/rx/plugins/RxJavaPluginsTest.java | 5521 | /**
* Copyright 2014 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.plugins;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import rx.Observable;
import rx.Subscriber;
public class RxJavaPluginsTest {
@Before
public void resetBefore() {
RxJavaPlugins.getInstance().reset();
}
@After
public void resetAfter() {
RxJavaPlugins.getInstance().reset();
}
@Test
public void testErrorHandlerDefaultImpl() {
RxJavaErrorHandler impl = new RxJavaPlugins().getErrorHandler();
assertSame(RxJavaPlugins.DEFAULT_ERROR_HANDLER, impl);
}
@Test
public void testErrorHandlerViaRegisterMethod() {
RxJavaPlugins p = new RxJavaPlugins();
p.registerErrorHandler(new RxJavaErrorHandlerTestImpl());
RxJavaErrorHandler impl = p.getErrorHandler();
assertTrue(impl instanceof RxJavaErrorHandlerTestImpl);
}
@Test
public void testErrorHandlerViaProperty() {
try {
RxJavaPlugins p = new RxJavaPlugins();
String fullClass = getFullClassNameForTestClass(RxJavaErrorHandlerTestImpl.class);
System.setProperty("rxjava.plugin.RxJavaErrorHandler.implementation", fullClass);
RxJavaErrorHandler impl = p.getErrorHandler();
assertTrue(impl instanceof RxJavaErrorHandlerTestImpl);
} finally {
System.clearProperty("rxjava.plugin.RxJavaErrorHandler.implementation");
}
}
// inside test so it is stripped from Javadocs
public static class RxJavaErrorHandlerTestImpl extends RxJavaErrorHandler {
private volatile Throwable e;
private volatile int count = 0;
@Override
public void handleError(Throwable e) {
e.printStackTrace();
this.e = e;
count++;
}
}
@Test
public void testObservableExecutionHookDefaultImpl() {
RxJavaPlugins p = new RxJavaPlugins();
RxJavaObservableExecutionHook impl = p.getObservableExecutionHook();
assertTrue(impl instanceof RxJavaObservableExecutionHookDefault);
}
@Test
public void testObservableExecutionHookViaRegisterMethod() {
RxJavaPlugins p = new RxJavaPlugins();
p.registerObservableExecutionHook(new RxJavaObservableExecutionHookTestImpl());
RxJavaObservableExecutionHook impl = p.getObservableExecutionHook();
assertTrue(impl instanceof RxJavaObservableExecutionHookTestImpl);
}
@Test
public void testObservableExecutionHookViaProperty() {
try {
RxJavaPlugins p = new RxJavaPlugins();
String fullClass = getFullClassNameForTestClass(RxJavaObservableExecutionHookTestImpl.class);
System.setProperty("rxjava.plugin.RxJavaObservableExecutionHook.implementation", fullClass);
RxJavaObservableExecutionHook impl = p.getObservableExecutionHook();
assertTrue(impl instanceof RxJavaObservableExecutionHookTestImpl);
} finally {
System.clearProperty("rxjava.plugin.RxJavaObservableExecutionHook.implementation");
}
}
@Test
public void testOnErrorWhenImplementedViaSubscribe() {
RxJavaErrorHandlerTestImpl errorHandler = new RxJavaErrorHandlerTestImpl();
RxJavaPlugins.getInstance().registerErrorHandler(errorHandler);
RuntimeException re = new RuntimeException("test onError");
Observable.error(re).subscribe(new Subscriber<Object>() {
@Override
public void onCompleted() {
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(Object args) {
}
});
assertEquals(re, errorHandler.e);
assertEquals(1, errorHandler.count);
}
@Test
public void testOnErrorWhenNotImplemented() {
RxJavaErrorHandlerTestImpl errorHandler = new RxJavaErrorHandlerTestImpl();
RxJavaPlugins.getInstance().registerErrorHandler(errorHandler);
RuntimeException re = new RuntimeException("test onError");
try {
Observable.error(re).subscribe();
fail("should fail");
} catch (Throwable e) {
// ignore as we expect it to throw
}
assertEquals(re, errorHandler.e);
assertEquals(1, errorHandler.count);
}
// inside test so it is stripped from Javadocs
public static class RxJavaObservableExecutionHookTestImpl extends RxJavaObservableExecutionHook {
// just use defaults
}
private static String getFullClassNameForTestClass(Class<?> cls) {
return RxJavaPlugins.class.getPackage().getName() + "." + RxJavaPluginsTest.class.getSimpleName() + "$" + cls.getSimpleName();
}
}
| apache-2.0 |
SangKa/RxJS-Docs-CN | spec/operators/sequenceEqual-spec.ts | 11473 | import * as _ from 'lodash';
import marbleTestingSignature = require('../helpers/marble-testing'); // tslint:disable-line:no-require-imports
declare const { asDiagram, rxTestScheduler, time, type };
declare const hot: typeof marbleTestingSignature.hot;
declare const cold: typeof marbleTestingSignature.cold;
declare const expectObservable: typeof marbleTestingSignature.expectObservable;
declare const expectSubscriptions: typeof marbleTestingSignature.expectSubscriptions;
const booleans = { T: true, F: false };
/** @test {sequenceEqual} */
describe('Observable.prototype.sequenceEqual', () => {
asDiagram('sequenceEqual(observable)')('should return true for two equal sequences', () => {
const s1 = hot('--a--^--b--c--d--e--f--g--|');
const s1subs = '^ !';
const s2 = hot('-----^-----b--c--d-e-f------g-|');
const s2subs = '^ !';
const expected = '-------------------------(T|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(s1subs);
expectSubscriptions(s2.subscriptions).toBe(s2subs);
});
it('should return false for two sync observables that are unequal in length', () => {
const s1 = cold('(abcdefg|)');
const s2 = cold('(abc|)');
const expected = '(F|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
});
it('should return true for two sync observables that match', () => {
const s1 = cold('(abcdefg|)');
const s2 = cold('(abcdefg|)');
const expected = '(T|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
});
it('should return true for two observables that match when the last one emits and completes in the same frame', () => {
const s1 = hot('--a--^--b--c--d--e--f--g--|');
const s1subs = '^ !';
const s2 = hot('-----^--b--c--d--e--f--g------|');
const s2subs = '^ !';
const expected = '-------------------------(T|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(s1subs);
expectSubscriptions(s2.subscriptions).toBe(s2subs);
});
it('should return true for two observables that match when the last one emits and completes in the same frame', () => {
const s1 = hot('--a--^--b--c--d--e--f--g--|');
const s1subs = '^ !';
const s2 = hot('-----^--b--c--d--e--f---------(g|)');
const s2subs = '^ !';
const expected = '-------------------------(T|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(s1subs);
expectSubscriptions(s2.subscriptions).toBe(s2subs);
});
it('should error with an errored source', () => {
const s1 = hot('--a--^--b---c---#');
const s2 = hot('--a--^--b---c-----|');
const expected = '-----------#';
const sub = '^ !';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(sub);
expectSubscriptions(s2.subscriptions).toBe(sub);
});
it('should error with an errored compareTo', () => {
const s1 = hot('--a--^--b---c-----|');
const s2 = hot('--a--^--b---c---#');
const expected = '-----------#';
const sub = '^ !';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(sub);
expectSubscriptions(s2.subscriptions).toBe(sub);
});
it('should error if the source is a throw', () => {
const s1 = cold('#'); // throw
const s2 = cold('---a--b--c--|');
const expected = '#'; // throw
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected);
});
it('should never return if source is a never', () => {
const s1 = cold('------------'); // never
const s2 = cold('--a--b--c--|');
const expected = '------------'; // never
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected);
});
it('should never return if compareTo is a never', () => {
const s1 = cold('--a--b--c--|');
const s2 = cold('------------'); // never
const expected = '------------'; // never
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected);
});
it('should return false if source is empty and compareTo is not', () => {
const s1 = cold('|'); // empty
const s2 = cold('------a------');
const expected = '------(F|)';
const subs = '^ !';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(subs);
expectSubscriptions(s2.subscriptions).toBe(subs);
});
it('should return false if compareTo is empty and source is not', () => {
const s1 = cold('------a------');
const s2 = cold('|'); // empty
const expected = '------(F|)';
const subs = '^ !';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(subs);
expectSubscriptions(s2.subscriptions).toBe(subs);
});
it('should return never if compareTo is empty and source is never', () => {
const s1 = cold('-');
const s2 = cold('|');
const expected = '-';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected);
});
it('should return never if source is empty and compareTo is never', () => {
const s1 = cold('|');
const s2 = cold('-');
const expected = '-';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected);
});
it('should error if the comparor errors', () => {
const s1 = hot('--a--^--b-----c------d--|');
const s1subs = '^ !';
const s2 = hot('-----^--------x---y---z-------|');
const s2subs = '^ !';
const expected = '-------------#';
let i = 0;
const source = s1.sequenceEqual(s2, (a: any, b: any) => {
if (++i === 2) {
throw new Error('shazbot');
}
return a.value === b.value;
});
const values = {
a: null,
b: { value: 'bees knees' },
c: { value: 'carpy dumb' },
d: { value: 'derp' },
x: { value: 'bees knees', foo: 'lol' },
y: { value: 'carpy dumb', scooby: 'doo' },
z: { value: 'derp', weCouldBe: 'dancin, yeah' }
};
expectObservable(source).toBe(expected, _.assign(booleans, values), new Error('shazbot'));
expectSubscriptions(s1.subscriptions).toBe(s1subs);
expectSubscriptions(s2.subscriptions).toBe(s2subs);
});
it('should use the provided comparor', () => {
const s1 = hot('--a--^--b-----c------d--|');
const s1subs = '^ !';
const s2 = hot('-----^--------x---y---z-------|');
const s2subs = '^ !';
const expected = '-------------------------(T|)';
const source = s1.sequenceEqual(s2, (a: any, b: any) => a.value === b.value);
const values = {
a: null,
b: { value: 'bees knees' },
c: { value: 'carpy dumb' },
d: { value: 'derp' },
x: { value: 'bees knees', foo: 'lol' },
y: { value: 'carpy dumb', scooby: 'doo' },
z: { value: 'derp', weCouldBe: 'dancin, yeah' }
};
expectObservable(source).toBe(expected, _.assign(booleans, values));
expectSubscriptions(s1.subscriptions).toBe(s1subs);
expectSubscriptions(s2.subscriptions).toBe(s2subs);
});
it('should return false for two unequal sequences, compareTo finishing last', () => {
const s1 = hot('--a--^--b--c--d--e--f--g--|');
const s1subs = '^ !';
const s2 = hot('-----^-----b--c--d-e-f------z-|');
const s2subs = '^ !';
const expected = '-----------------------(F|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(s1subs);
expectSubscriptions(s2.subscriptions).toBe(s2subs);
});
it('should return false for two unequal sequences, early wrong value from source', () => {
const s1 = hot('--a--^--b--c---x-----------|');
const s1subs = '^ !';
const s2 = hot('-----^--b--c--d--e--f--|');
const s2subs = '^ !';
const expected = '----------(F|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(s1subs);
expectSubscriptions(s2.subscriptions).toBe(s2subs);
});
it('should return false when the source emits an extra value after the compareTo completes', () => {
const s1 = hot('--a--^--b--c--d--e--f--g--h--|');
const s1subs = '^ !';
const s2 = hot('-----^--b--c--d-|');
const s2subs = '^ !';
const expected = '------------(F|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(s1subs);
expectSubscriptions(s2.subscriptions).toBe(s2subs);
});
it('should return false when the compareTo emits an extra value after the source completes', () => {
const s1 = hot('--a--^--b--c--d-|');
const s1subs = '^ !';
const s2 = hot('-----^--b--c--d--e--f--g--h--|');
const s2subs = '^ !';
const expected = '------------(F|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
expectSubscriptions(s1.subscriptions).toBe(s1subs);
expectSubscriptions(s2.subscriptions).toBe(s2subs);
});
it('should return true for two empty observables', () => {
const s1 = cold('|');
const s2 = cold('|');
const expected = '(T|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
});
it('should return false for an empty observable and an observable that emits', () => {
const s1 = cold('|');
const s2 = cold('---a--|');
const expected = '---(F|)';
const source = s1.sequenceEqual(s2);
expectObservable(source).toBe(expected, booleans);
});
it('should return compare hot and cold observables', () => {
const s1 = hot('---a--^---b---c---d---e---f---g---h---i---j---|');
const s2 = cold( '----b---c-|');
const expected1 = '------------(F|)';
const subs1 = '^ !';
const delay = '-------------------|';
const s3 = cold( '-f---g---h---i---j---|');
const expected2 = ' ---------------------(T|)';
const subs2 = ' ^ !';
const test1 = s1.sequenceEqual(s2);
const test2 = s1.sequenceEqual(s3);
expectObservable(test1).toBe(expected1, booleans);
rxTestScheduler.schedule(() => expectObservable(test2).toBe(expected2, booleans), time(delay));
expectSubscriptions(s2.subscriptions).toBe(subs1);
expectSubscriptions(s3.subscriptions).toBe(subs2);
});
});
| apache-2.0 |
Harmiox/harmiox.github.io | harmiox/tuffy/old/admin/skins/touch/_item_block.inc.php | 3619 | <?php
/**
* This is the template that displays the item block
*
* This file is not meant to be called directly.
* It is meant to be called by an include in the main.page.php template (or other templates)
*
* b2evolution - {@link http://b2evolution.net/}
* Released under GNU GPL License - {@link http://b2evolution.net/about/gnu-gpl-license}
* @copyright (c)2003-2015 by Francois Planque - {@link http://fplanque.com/}
*
* @package evoskins
* @subpackage touch
*/
if( !defined('EVO_MAIN_INIT') ) die( 'Please, do not access this page directly.' );
global $Item;
// Default params:
$params = array_merge( array(
'feature_block' => false,
'content_mode' => 'auto', // 'auto' will auto select depending on $disp-detail
'item_class' => 'post',
'image_size' => 'fit-400x320',
), $params );
?>
<div class="post" id="<?php $Item->anchor_id() ?>" lang="<?php $Item->lang() ?>">
<?php if( $Item->is_intro() ) { ?>
<div class="sticky-icon"></div>
<?php } ?>
<?php
if( ! $Item->is_intro() )
{ // Link to comments, trackbacks, etc.:
$Item->feedback_link( array(
'type' => 'feedbacks',
'link_before' => '<div class="comment-bubble">',
'link_after' => '</div>',
'link_text_zero' => '',
'link_text_one' => '1',
'link_text_more' => '%d',
'link_title' => '',
) );
}
?>
<a class="post-arrow" id="arrow-<?php echo $Item->ID; ?>" href="javascript:"></a>
<div class="calendar">
<div class="cal-month month-<?php $Item->issue_time( array( 'time_format' => 'm', 'before' => '', 'after' => '' ) ); ?>"><?php $Item->issue_time( array( 'time_format' => 'M', 'before' => '', 'after' => '' ) ); ?></div>
<div class="cal-date"><?php $Item->issue_time( array( 'time_format' => 'j', 'before' => '', 'after' => '' ) ); ?></div>
</div>
<?php
$Item->locale_temp_switch(); // Temporarily switch to post locale (useful for multilingual blogs)
?>
<?php
if( ! $Item->is_intro() )
{ // Display only if we're not displaying an intro post:
$Item->edit_link( array( // Link to backoffice for editing
'before' => '<div class="post-actions">',
'after' => '</div>',
) );
}
?>
<?php
$Item->title( array(
'before' => '<h2 class="post-title">',
'after' => '</h2>',
) );
?>
<div class="post-author">
<span class="lead">By</span> <?php $Item->author( array( 'link_text' => 'preferredname' ) ) ?><br />
<?php
if( ! $Item->is_intro() )
{ // Display only if we're not displaying an intro post:
$Item->categories( array(
'before' => '<span class="lead">'.T_('Categories').'</span> ',
'after' => '<br />',
'include_main' => true,
'include_other' => true,
'include_external'=> true,
'link_categories' => true,
) );
}
?>
<?php
// List all tags attached to this post:
$Item->tags( array(
'before' => '<span class="lead">'.T_('Tags').':</span>',
'after' => '',
'separator' => ', ',
) );
?>
</div>
<div class="clearer"></div>
<div id="entry-<?php echo $Item->ID ?>" style="display:none" class="mainentry left-justified">
<?php
// ---------------------- POST CONTENT INCLUDED HERE ----------------------
skin_include( '_item_content.inc.php', $params );
// Note: You can customize the default item content by copying the generic
// /skins/_item_content.inc.php file into the current skin folder.
// -------------------------- END OF POST CONTENT -------------------------
?>
</div>
<div class="clearer"></div>
</div>
<?php
locale_restore_previous(); // Restore previous locale (Blog locale)
?> | apache-2.0 |
BigDataehealthTools/GNOME_Viewer | Web/models.py | 215 | # from __future__ import unicode_literals
from django.db import models
# Create your models here.
class User(models.Model):
name = models.CharField(max_length=100)
email = models.CharField(max_length=150) | apache-2.0 |
weikipeng/RxJava | src/main/java/rx/internal/operators/SingleDoOnSubscribe.java | 1398 | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.internal.operators;
import rx.*;
import rx.exceptions.Exceptions;
import rx.functions.Action0;
/**
* Call an Action0 when the subscription happens to the source.
*
* @param <T> the value type
*/
public final class SingleDoOnSubscribe<T> implements Single.OnSubscribe<T> {
final Single.OnSubscribe<T> source;
final Action0 onSubscribe;
public SingleDoOnSubscribe(Single.OnSubscribe<T> source, Action0 onSubscribe) {
this.source = source;
this.onSubscribe = onSubscribe;
}
@Override
public void call(SingleSubscriber<? super T> t) {
try {
onSubscribe.call();
} catch (Throwable ex) {
Exceptions.throwIfFatal(ex);
t.onError(ex);
return;
}
source.call(t);
}
}
| apache-2.0 |
alisonbnt/home-shell | src/entities/hsextra.py | 647 | __author__ = 'alisonbento'
import src.base.arrayparsableentity as parsable
class HomeShellExtra(parsable.ArrayParsableEntity):
def __init__(self, extra_id=0, appliance_id=0, extra_key=None, extra_value=None, extra_date=None, created=None):
self.id = extra_id
self.appliance_id = appliance_id
self.extra_key = extra_key
self.extra_value = extra_value
self.extra_date = extra_date
self.created = created
def to_array(self):
return {
'id': self.id,
'key': self.extra_key,
'value': self.extra_value,
'date': self.extra_date
} | apache-2.0 |
contentbox-modules/contentbox-s3-filebrowser | modules/contentbox-admin/modules/contentbox-filebrowser/includes/js/src/fbSelectCallbacks.js | 1585 | /**
*********************************************************************************
* Copyright since 2005 ColdBox Framework by Luis Majano and Ortus Solutions, Corp
* www.coldbox.org | www.luismajano.com | www.ortussolutions.com
*********************************************************************************
* This callbacks js is used to place common editor, OS software callbacks so they
* can be reused. You can also modify it to add your own.
*/
/**
* CKEditor Call Back
* @param sPath
* @param sURL
* @param sType
*/
function fbCKSelect( sPath, sURL, sType ){
/**
* Get specific URL param
* @param {string} paramName Param Nam
* @return {string} The cleaned param name
*/
var getURLParam = function( paramName ){
var reParam = new RegExp( '(?:[\?&]|&)' + paramName + '=([^&]+)', 'i' );
var match = window.location.search.match( reParam );
return ( match && match.length > 1 ) ? match[ 1 ] : '' ;
};
if( !sPath.length || sType === "dir" ){
alert( "Please select a file first." );
return;
}
var funcNum = getURLParam( 'CKEditorFuncNum' );
window.opener.CKEDITOR.tools.callFunction( funcNum, sURL );
window.close();
}
/**
* Generic close callback
*/
function fbGenericClose(){
window.close();
}
/**
* Testing select calback
* @param path
* @param rPath
* @param type
*/
function fbTestChoose(path, rPath, type){
alert( "Path: " + path + '\n URL: ' + rPath + '\n Type: ' + type);
}
/**
* Cancel called
* @return {[type]} [description]
*/
function fbTestCancel(){
alert('Cancel Called');
} | apache-2.0 |
hooman/swift | utils/swift_build_support/swift_build_support/targets.py | 13954 | # swift_build_support/targets.py - Build target helpers -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import os
import platform
from . import shell
try:
from build_swift.build_swift.wrappers import xcrun
except ImportError:
from build_swift.wrappers import xcrun
class Platform(object):
"""
Abstract representation of a platform Swift can run on.
"""
def __init__(self, name, archs, sdk_name=None):
"""
Create a platform with the given name and list of architectures.
"""
self.name = name
self.targets = [Target(self, arch) for arch in archs]
# FIXME: Eliminate this argument; apparently the SDK names are
# internally a private implementation detail of the build script, so we
# should just make them the same as the platform name.
self.sdk_name = name.upper() if sdk_name is None else sdk_name
# Add a property for each arch.
for target in self.targets:
setattr(self, target.arch, target)
@property
def is_darwin(self):
"""Convenience function for checking if this is a Darwin platform."""
return isinstance(self, DarwinPlatform)
@property
def supports_benchmark(self):
# By default, we don't support benchmarks on most platforms.
return False
@property
def uses_host_tests(self):
"""
Check if this is a Darwin platform that needs a connected device
for tests.
"""
# By default, we don't use connected devices on most platforms.
return False
def contains(self, target_name):
"""
Returns True if the given target name belongs to a one of this
platform's targets.
"""
for target in self.targets:
if target.name == target_name:
return True
return False
def swift_flags(self, args):
"""
Swift compiler flags for a platform, useful for cross-compiling
"""
return ''
def cmake_options(self, args):
"""
CMake flags to build for a platform, useful for cross-compiling
"""
return ''
class DarwinPlatform(Platform):
def __init__(self, name, archs, sdk_name=None, is_simulator=False):
self.is_simulator = is_simulator
super(DarwinPlatform, self).__init__(name, archs, sdk_name)
@property
def is_embedded(self):
"""Check if this is a Darwin platform for embedded devices."""
return self.name != "macosx" and self.name != "maccatalyst"
@property
def supports_benchmark(self):
# By default, on Darwin we support benchmarks on all non-simulator
# platforms.
return not self.is_simulator
@property
def uses_host_tests(self):
"""
Check if this is a Darwin platform that needs a connected device
for tests.
"""
return self.is_embedded and not self.is_simulator
def sdk_supports_architecture(self, arch, toolchain):
"""
Convenience function for checking whether the SDK supports the
target architecture.
"""
# The names match up with the xcrun SDK names.
xcrun_sdk_name = self.name
# 32-bit iOS and iOS simulator are supported, but are not covered
# by the SDK settings. Handle this special case here.
if (xcrun_sdk_name == 'iphoneos' and
(arch == 'armv7' or arch == 'armv7s')):
return True
if (xcrun_sdk_name == 'iphonesimulator' and arch == 'i386'):
return True
sdk_path = xcrun.sdk_path(sdk=xcrun_sdk_name, toolchain=toolchain)
if not sdk_path:
raise RuntimeError('Cannot find SDK path for %s' % xcrun_sdk_name)
# Find the SDKSettings.plist for this sdK
plistCommand = [
'/usr/libexec/PlistBuddy',
'-c',
'Print :SupportedTargets:%s:Archs' % (self.name),
'%s/SDKSettings.plist' % (sdk_path)
]
sdk_archs = shell.capture(plistCommand, dry_run=False, echo=True)
return arch in sdk_archs
class AndroidPlatform(Platform):
@property
def uses_host_tests(self):
"""
Check if this is a Darwin platform that needs a connected device
for tests.
"""
return True
def swift_flags(self, args):
flags = '-target %s-unknown-linux-android%s ' % (args.android_arch,
args.android_api_level)
flags += '-resource-dir %s/swift-%s-%s/lib/swift ' % (
args.build_root, self.name, args.android_arch)
android_toolchain_path = '%s/toolchains/llvm/prebuilt/%s' % (
args.android_ndk, StdlibDeploymentTarget.host_target().name)
flags += '-sdk %s/sysroot ' % (android_toolchain_path)
flags += '-tools-directory %s/bin' % (android_toolchain_path)
return flags
def cmake_options(self, args):
options = '-DCMAKE_SYSTEM_NAME=Android '
options += '-DCMAKE_SYSTEM_VERSION=%s ' % (args.android_api_level)
options += '-DCMAKE_SYSTEM_PROCESSOR=%s ' % (args.android_arch if not
args.android_arch == 'armv7'
else 'armv7-a')
options += '-DCMAKE_ANDROID_NDK:PATH=%s' % (args.android_ndk)
return options
class Target(object):
"""
Abstract representation of a target Swift can run on.
"""
def __init__(self, platform, arch):
self.platform = platform
self.arch = arch
# Delegate to the platform, this is usually not arch specific.
self.supports_benchmark = self.platform.supports_benchmark
@property
def name(self):
return "{}-{}".format(self.platform.name, self.arch)
class StdlibDeploymentTarget(object):
OSX = DarwinPlatform("macosx", archs=["x86_64", "arm64"],
sdk_name="OSX")
iOS = DarwinPlatform("iphoneos", archs=["armv7", "armv7s", "arm64", "arm64e"],
sdk_name="IOS")
iOSSimulator = DarwinPlatform("iphonesimulator", archs=["i386", "x86_64", "arm64"],
sdk_name="IOS_SIMULATOR",
is_simulator=True)
# Never build/test benchmarks on iOS armv7s.
iOS.armv7s.supports_benchmark = False
AppleTV = DarwinPlatform("appletvos", archs=["arm64"],
sdk_name="TVOS")
AppleTVSimulator = DarwinPlatform("appletvsimulator", archs=["x86_64", "arm64"],
sdk_name="TVOS_SIMULATOR",
is_simulator=True)
AppleWatch = DarwinPlatform("watchos", archs=["armv7k", "arm64_32"],
sdk_name="WATCHOS")
AppleWatchSimulator = DarwinPlatform("watchsimulator",
archs=["i386", "x86_64", "arm64"],
sdk_name="WATCHOS_SIMULATOR",
is_simulator=True)
# A platform that's not tied to any particular OS, and it meant to be used
# to build the stdlib as standalone and/or statically linked.
Freestanding = Platform("freestanding",
archs=["i386", "x86_64", "armv7", "armv7s", "armv7k",
"arm64", "arm64e"])
Linux = Platform("linux", archs=[
"x86_64",
"i686",
"armv6",
"armv7",
"aarch64",
"powerpc64",
"powerpc64le",
"s390x"])
FreeBSD = Platform("freebsd", archs=["x86_64"])
OpenBSD = Platform("openbsd", archs=["amd64"])
Cygwin = Platform("cygwin", archs=["x86_64"])
Android = AndroidPlatform("android", archs=["armv7", "aarch64", "x86_64"])
Windows = Platform("windows", archs=["x86_64"])
Haiku = Platform("haiku", archs=["x86_64"])
# The list of known platforms.
known_platforms = [
OSX,
iOS, iOSSimulator,
AppleTV, AppleTVSimulator,
AppleWatch, AppleWatchSimulator,
Freestanding,
Linux,
FreeBSD,
OpenBSD,
Cygwin,
Android,
Windows,
Haiku]
# Cache of targets by name.
_targets_by_name = dict((target.name, target)
for platform in known_platforms
for target in platform.targets)
_sdk_targets = {
'OSX': OSX.targets,
'IOS': iOS.targets,
'IOS_SIMULATOR': iOSSimulator.targets,
'TVOS': AppleTV.targets,
'TVOS_SIMULATOR': AppleTVSimulator.targets,
'WATCHOS': AppleWatch.targets,
'WATCHOS_SIMULATOR': AppleWatchSimulator.targets,
}
@staticmethod
def host_target():
"""
Return the host target for the build machine, if it is one of
the recognized targets. Otherwise, throw a NotImplementedError.
"""
system = platform.system()
machine = platform.machine()
if system == 'Linux':
if 'ANDROID_DATA' in os.environ:
if machine.startswith('armv7'):
return StdlibDeploymentTarget.Android.armv7
elif machine == 'aarch64':
return StdlibDeploymentTarget.Android.aarch64
raise NotImplementedError('Android System with architecture '
'"%s" is not supported' % machine)
if machine == 'x86_64':
return StdlibDeploymentTarget.Linux.x86_64
elif machine == 'i686':
return StdlibDeploymentTarget.Linux.i686
elif machine.startswith('armv7'):
# linux-armv7* is canonicalized to 'linux-armv7'
return StdlibDeploymentTarget.Linux.armv7
elif machine.startswith('armv6'):
# linux-armv6* is canonicalized to 'linux-armv6'
return StdlibDeploymentTarget.Linux.armv6
elif machine == 'aarch64':
return StdlibDeploymentTarget.Linux.aarch64
elif machine == 'ppc64':
return StdlibDeploymentTarget.Linux.powerpc64
elif machine == 'ppc64le':
return StdlibDeploymentTarget.Linux.powerpc64le
elif machine == 's390x':
return StdlibDeploymentTarget.Linux.s390x
elif system == 'Darwin':
if machine == 'x86_64':
return StdlibDeploymentTarget.OSX.x86_64
elif machine == 'arm64':
return StdlibDeploymentTarget.OSX.arm64
elif machine == 'arm64e':
return StdlibDeploymentTarget.OSX.arm64e
elif system == 'FreeBSD':
if machine == 'amd64':
return StdlibDeploymentTarget.FreeBSD.x86_64
elif system == 'OpenBSD':
if machine == 'amd64':
return StdlibDeploymentTarget.OpenBSD.amd64
elif system == 'CYGWIN_NT-10.0':
if machine == 'x86_64':
return StdlibDeploymentTarget.Cygwin.x86_64
elif system == 'Windows':
if machine == "AMD64":
return StdlibDeploymentTarget.Windows.x86_64
elif system == 'Haiku':
if machine == 'x86_64':
return StdlibDeploymentTarget.Haiku.x86_64
raise NotImplementedError('System "%s" with architecture "%s" is not '
'supported' % (system, machine))
@classmethod
def get_target_for_name(cls, name):
return cls._targets_by_name.get(name)
@classmethod
def get_targets_by_name(cls, names):
return [cls.get_target_for_name(name) for name in names]
@classmethod
def get_target_names(cls):
return sorted([name for (name, target) in
cls._targets_by_name.items()])
@classmethod
def get_migrated_targets_for_sdk(cls, sdk_name):
return cls._sdk_targets.get(sdk_name, None)
@classmethod
def get_all_migrated_sdks(cls):
return cls._sdk_targets.keys()
def install_prefix():
"""
Returns the default path at which built Swift products (like bin, lib,
and include) will be installed, based on the host machine's operating
system.
"""
if platform.system() == 'Darwin':
return '/Applications/Xcode.app/Contents/Developer/Toolchains/' + \
'XcodeDefault.xctoolchain/usr'
else:
return '/usr'
def darwin_toolchain_prefix(darwin_install_prefix):
"""
Given the install prefix for a Darwin system, and assuming that that path
is to a .xctoolchain directory, return the path to the .xctoolchain
directory.
"""
return os.path.split(darwin_install_prefix)[0]
def toolchain_path(install_destdir, install_prefix):
"""
Given the install prefix for a Darwin system, and assuming that that path
is to a .xctoolchain directory, return the path to the .xctoolchain
directory in the given install directory.
This toolchain is being populated during the build-script invocation.
Downstream products can use products that were previously installed into
this toolchain.
"""
built_toolchain_path = install_destdir
if platform.system() == 'Darwin':
# The prefix is an absolute path, so concatenate without os.path.
built_toolchain_path += darwin_toolchain_prefix(install_prefix) + "/usr"
else:
built_toolchain_path += install_prefix
return built_toolchain_path
| apache-2.0 |
avdi/rust | src/test/run-pass/option-unwrap.rs | 940 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(unsafe_destructor)]
use std::cell::Cell;
struct dtor<'a> {
x: &'a Cell<isize>,
}
#[unsafe_destructor]
impl<'a> Drop for dtor<'a> {
fn drop(&mut self) {
self.x.set(self.x.get() - 1);
}
}
fn unwrap<T>(o: Option<T>) -> T {
match o {
Some(v) => v,
None => panic!()
}
}
pub fn main() {
let x = &Cell::new(1);
{
let b = Some(dtor { x:x });
let _c = unwrap(b);
}
assert_eq!(x.get(), 0);
}
| apache-2.0 |
avano/fabric8 | fabric/fabric-zookeeper/src/test/java/io/fabric8/zookeeper/utils/ZookeeperServerTestSupport.java | 3487 | /*
* Copyright 2005-2017 Red Hat, Inc.
*
* Red Hat licenses this file to you under the Apache License, version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package io.fabric8.zookeeper.utils;
import java.io.File;
import java.net.ServerSocket;
import java.util.Date;
import java.util.List;
import org.apache.commons.codec.binary.Hex;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
import org.apache.curator.retry.RetryOneTime;
import org.apache.zookeeper.server.NIOServerCnxnFactory;
import org.apache.zookeeper.server.ServerConfig;
import org.apache.zookeeper.server.ZooKeeperServer;
import org.apache.zookeeper.server.persistence.FileTxnSnapLog;
import org.junit.After;
import org.junit.Before;
public abstract class ZookeeperServerTestSupport {
protected CuratorFramework curator;
protected NIOServerCnxnFactory cnxnFactory;
@Before
public void init() throws Exception {
int port = findFreePort();
curator = CuratorFrameworkFactory.builder()
.connectString("localhost:" + port)
.retryPolicy(new RetryOneTime(1000))
.build();
curator.start();
cnxnFactory = startZooKeeper(port);
curator.getZookeeperClient().blockUntilConnectedOrTimedOut();
}
@After
public void cleanup() throws Exception {
curator.close();
cnxnFactory.shutdown();
}
protected int findFreePort() throws Exception {
ServerSocket ss = new ServerSocket(0);
int port = ss.getLocalPort();
ss.close();
return port;
}
protected NIOServerCnxnFactory startZooKeeper(int port) throws Exception {
ServerConfig cfg = new ServerConfig();
cfg.parse(new String[] { Integer.toString(port), "target/zk/data-" + String.format("%15d", new Date().getTime()) });
ZooKeeperServer zkServer = new ZooKeeperServer();
FileTxnSnapLog ftxn = new FileTxnSnapLog(new File(cfg.getDataLogDir()), new File(cfg.getDataDir()));
zkServer.setTxnLogFactory(ftxn);
zkServer.setTickTime(cfg.getTickTime());
zkServer.setMinSessionTimeout(cfg.getMinSessionTimeout());
zkServer.setMaxSessionTimeout(cfg.getMaxSessionTimeout());
NIOServerCnxnFactory cnxnFactory = new NIOServerCnxnFactory();
cnxnFactory.configure(cfg.getClientPortAddress(), cfg.getMaxClientCnxns());
cnxnFactory.startup(zkServer);
return cnxnFactory;
}
/**
* Dumps content of give ZK path to stdout
* @param path
*/
protected void dump(String path) throws Exception {
byte[] data = curator.getData().forPath(path);
System.out.printf("%s%s\n", path, (data == null ? "" : " # " + new String(Hex.encodeHex(data, false))));
List<String> children = curator.getChildren().forPath(path);
for (String child : children) {
String fp = path + "/" + child;
dump(fp);
}
}
}
| apache-2.0 |
danilovalente/spring-richclient | spring-richclient-core/src/test/java/org/springframework/binding/value/support/TypeConverterTests.java | 2052 | /*
* Copyright 2002-2004 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.springframework.binding.value.support;
import org.springframework.binding.value.ValueModel;
import org.springframework.rules.closure.Closure;
import org.springframework.richclient.test.SpringRichTestCase;
/**
* Tests class {@link TypeConverter}.
*
* @author Oliver Hutchison
*/
public class TypeConverterTests extends SpringRichTestCase {
private ValueModel vm = new ValueHolder("whatever!");
private TypeConverter tc = new TypeConverter(vm, new UpperConverter(), new LowerConverter());
public void testConvertsTo() {
vm.setValue("test");
assertEquals("TEST", tc.getValue());
assertEquals("test", vm.getValue());
}
public void testConvertsFrom() {
tc.setValue("TEST");
assertEquals("TEST", tc.getValue());
assertEquals("test", vm.getValue());
}
public void testDoesNotSetWrappedValueWhenConvertedValueHasNotChanged() {
vm.setValue("tEsT");
tc.setValue("TEST");
assertEquals("TEST", tc.getValue());
assertEquals("tEsT", vm.getValue());
}
private static class UpperConverter implements Closure {
public Object call(Object argument) {
return ((String)argument).toUpperCase();
}
}
private static class LowerConverter implements Closure {
public Object call(Object argument) {
return ((String)argument).toLowerCase();
}
}
}
| apache-2.0 |
asimshankar/tensorflow | tensorflow/contrib/checkpoint/python/containers.py | 3049 | """Checkpointable data structures."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training.checkpointable import base as checkpointable_lib
from tensorflow.python.training.checkpointable import data_structures
class UniqueNameTracker(data_structures.CheckpointableDataStructure):
"""Adds dependencies on checkpointable objects with name hints.
Useful for creating dependencies with locally unique names.
Example usage:
```python
class SlotManager(tf.contrib.checkpoint.Checkpointable):
def __init__(self):
# Create a dependency named "slotdeps" on the container.
self.slotdeps = tf.contrib.checkpoint.UniqueNameTracker()
slotdeps = self.slotdeps
slots = []
slots.append(slotdeps.track(tf.Variable(3.), "x")) # Named "x"
slots.append(slotdeps.track(tf.Variable(4.), "y"))
slots.append(slotdeps.track(tf.Variable(5.), "x")) # Named "x_1"
```
"""
def __init__(self):
super(UniqueNameTracker, self).__init__()
self._maybe_initialize_checkpointable()
self._name_counts = {}
@property
def _values(self):
return [dep.ref for dep in self._checkpoint_dependencies]
def track(self, checkpointable, base_name):
"""Add a dependency on `checkpointable`.
Args:
checkpointable: An object to add a checkpoint dependency on.
base_name: A name hint, which is uniquified to determine the dependency
name.
Returns:
`checkpointable`, for chaining.
Raises:
ValueError: If `checkpointable` is not a checkpointable object.
"""
if not isinstance(checkpointable, checkpointable_lib.CheckpointableBase):
raise ValueError(
("Expected a checkpointable value, got %s which does not inherit "
"from CheckpointableBase.") % (checkpointable,))
def _format_name(prefix, number):
if number > 0:
return "%s_%d" % (prefix, number)
else:
return prefix
count = self._name_counts.get(base_name, 0)
candidate = _format_name(base_name, count)
while self._lookup_dependency(candidate) is not None:
count += 1
candidate = _format_name(base_name, count)
self._name_counts[base_name] = count + 1
self._track_value(checkpointable, name=candidate)
return checkpointable
| apache-2.0 |
rednaxelafx/apache-spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala | 71897 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.catalog
import scala.concurrent.duration._
import org.scalatest.concurrent.Eventually
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{FunctionIdentifier, QualifiedTableName, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.plans.logical.{Command, Range, SubqueryAlias, View}
import org.apache.spark.sql.connector.catalog.CatalogManager
import org.apache.spark.sql.connector.catalog.SupportsNamespaces.PROP_OWNER
import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf}
import org.apache.spark.sql.types._
class InMemorySessionCatalogSuite extends SessionCatalogSuite {
protected val utils = new CatalogTestUtils {
override val tableInputFormat: String = "com.fruit.eyephone.CameraInputFormat"
override val tableOutputFormat: String = "com.fruit.eyephone.CameraOutputFormat"
override val defaultProvider: String = "parquet"
override def newEmptyCatalog(): ExternalCatalog = new InMemoryCatalog
}
}
/**
* Tests for [[SessionCatalog]]
*
* Note: many of the methods here are very similar to the ones in [[ExternalCatalogSuite]].
* This is because [[SessionCatalog]] and [[ExternalCatalog]] share many similar method
* signatures but do not extend a common parent. This is largely by design but
* unfortunately leads to very similar test code in two places.
*/
abstract class SessionCatalogSuite extends AnalysisTest with Eventually {
protected val utils: CatalogTestUtils
protected val isHiveExternalCatalog = false
import utils._
private def withBasicCatalog(f: SessionCatalog => Unit): Unit = {
val catalog = new SessionCatalog(newBasicCatalog())
try {
f(catalog)
} finally {
catalog.reset()
}
}
private def withEmptyCatalog(f: SessionCatalog => Unit): Unit = {
val catalog = new SessionCatalog(newEmptyCatalog())
catalog.createDatabase(newDb("default"), ignoreIfExists = true)
try {
f(catalog)
} finally {
catalog.reset()
}
}
private def withConfAndEmptyCatalog(conf: SQLConf)(f: SessionCatalog => Unit): Unit = {
val catalog = new SessionCatalog(newEmptyCatalog(), new SimpleFunctionRegistry(), conf)
catalog.createDatabase(newDb("default"), ignoreIfExists = true)
try {
f(catalog)
} finally {
catalog.reset()
}
}
// --------------------------------------------------------------------------
// Databases
// --------------------------------------------------------------------------
test("basic create and list databases") {
withEmptyCatalog { catalog =>
assert(catalog.databaseExists("default"))
assert(!catalog.databaseExists("testing"))
assert(!catalog.databaseExists("testing2"))
catalog.createDatabase(newDb("testing"), ignoreIfExists = false)
assert(catalog.databaseExists("testing"))
assert(catalog.listDatabases().toSet == Set("default", "testing"))
catalog.createDatabase(newDb("testing2"), ignoreIfExists = false)
assert(catalog.listDatabases().toSet == Set("default", "testing", "testing2"))
assert(catalog.databaseExists("testing2"))
assert(!catalog.databaseExists("does_not_exist"))
}
}
def testInvalidName(func: (String) => Unit): Unit = {
// scalastyle:off
// non ascii characters are not allowed in the source code, so we disable the scalastyle.
val name = "砖"
// scalastyle:on
val e = intercept[AnalysisException] {
func(name)
}.getMessage
assert(e.contains(s"`$name` is not a valid name for tables/databases."))
}
test("create databases using invalid names") {
withEmptyCatalog { catalog =>
testInvalidName(
name => catalog.createDatabase(newDb(name), ignoreIfExists = true))
}
}
test("get database when a database exists") {
withBasicCatalog { catalog =>
val db1 = catalog.getDatabaseMetadata("db1")
assert(db1.name == "db1")
assert(db1.description.contains("db1"))
}
}
test("get database should throw exception when the database does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.getDatabaseMetadata("db_that_does_not_exist")
}
}
}
test("list databases without pattern") {
withBasicCatalog { catalog =>
assert(catalog.listDatabases().toSet == Set("default", "db1", "db2", "db3"))
}
}
test("list databases with pattern") {
withBasicCatalog { catalog =>
assert(catalog.listDatabases("db").toSet == Set.empty)
assert(catalog.listDatabases("db*").toSet == Set("db1", "db2", "db3"))
assert(catalog.listDatabases("*1").toSet == Set("db1"))
assert(catalog.listDatabases("db2").toSet == Set("db2"))
}
}
test("drop database") {
withBasicCatalog { catalog =>
catalog.dropDatabase("db1", ignoreIfNotExists = false, cascade = false)
assert(catalog.listDatabases().toSet == Set("default", "db2", "db3"))
}
}
test("drop database when the database is not empty") {
// Throw exception if there are functions left
withBasicCatalog { catalog =>
catalog.externalCatalog.dropTable("db2", "tbl1", ignoreIfNotExists = false, purge = false)
catalog.externalCatalog.dropTable("db2", "tbl2", ignoreIfNotExists = false, purge = false)
intercept[AnalysisException] {
catalog.dropDatabase("db2", ignoreIfNotExists = false, cascade = false)
}
}
withBasicCatalog { catalog =>
// Throw exception if there are tables left
catalog.externalCatalog.dropFunction("db2", "func1")
intercept[AnalysisException] {
catalog.dropDatabase("db2", ignoreIfNotExists = false, cascade = false)
}
}
withBasicCatalog { catalog =>
// When cascade is true, it should drop them
catalog.externalCatalog.dropDatabase("db2", ignoreIfNotExists = false, cascade = true)
assert(catalog.listDatabases().toSet == Set("default", "db1", "db3"))
}
}
test("drop database when the database does not exist") {
withBasicCatalog { catalog =>
// TODO: fix this inconsistent between HiveExternalCatalog and InMemoryCatalog
if (isHiveExternalCatalog) {
val e = intercept[AnalysisException] {
catalog.dropDatabase("db_that_does_not_exist", ignoreIfNotExists = false, cascade = false)
}.getMessage
assert(e.contains(
"org.apache.hadoop.hive.metastore.api.NoSuchObjectException: db_that_does_not_exist"))
} else {
intercept[NoSuchDatabaseException] {
catalog.dropDatabase("db_that_does_not_exist", ignoreIfNotExists = false, cascade = false)
}
}
catalog.dropDatabase("db_that_does_not_exist", ignoreIfNotExists = true, cascade = false)
}
}
test("drop current database and drop default database") {
withBasicCatalog { catalog =>
catalog.setCurrentDatabase("db1")
assert(catalog.getCurrentDatabase == "db1")
catalog.dropDatabase("db1", ignoreIfNotExists = false, cascade = true)
intercept[NoSuchDatabaseException] {
catalog.createTable(newTable("tbl1", "db1"), ignoreIfExists = false)
}
catalog.setCurrentDatabase("default")
assert(catalog.getCurrentDatabase == "default")
intercept[AnalysisException] {
catalog.dropDatabase("default", ignoreIfNotExists = false, cascade = true)
}
}
}
test("alter database") {
withBasicCatalog { catalog =>
val db1 = catalog.getDatabaseMetadata("db1")
// Note: alter properties here because Hive does not support altering other fields
catalog.alterDatabase(db1.copy(properties = Map("k" -> "v3", "good" -> "true")))
val newDb1 = catalog.getDatabaseMetadata("db1")
assert((db1.properties -- Seq(PROP_OWNER)).isEmpty)
assert((newDb1.properties -- Seq(PROP_OWNER)).size == 2)
assert(newDb1.properties.get("k") == Some("v3"))
assert(newDb1.properties.get("good") == Some("true"))
}
}
test("alter database should throw exception when the database does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.alterDatabase(newDb("unknown_db"))
}
}
}
test("get/set current database") {
withBasicCatalog { catalog =>
assert(catalog.getCurrentDatabase == "default")
catalog.setCurrentDatabase("db2")
assert(catalog.getCurrentDatabase == "db2")
intercept[NoSuchDatabaseException] {
catalog.setCurrentDatabase("deebo")
}
catalog.createDatabase(newDb("deebo"), ignoreIfExists = false)
catalog.setCurrentDatabase("deebo")
assert(catalog.getCurrentDatabase == "deebo")
}
}
// --------------------------------------------------------------------------
// Tables
// --------------------------------------------------------------------------
test("create table") {
withBasicCatalog { catalog =>
assert(catalog.externalCatalog.listTables("db1").isEmpty)
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
catalog.createTable(newTable("tbl3", "db1"), ignoreIfExists = false)
catalog.createTable(newTable("tbl3", "db2"), ignoreIfExists = false)
assert(catalog.externalCatalog.listTables("db1").toSet == Set("tbl3"))
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2", "tbl3"))
// Create table without explicitly specifying database
catalog.setCurrentDatabase("db1")
catalog.createTable(newTable("tbl4"), ignoreIfExists = false)
assert(catalog.externalCatalog.listTables("db1").toSet == Set("tbl3", "tbl4"))
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2", "tbl3"))
}
}
test("create tables using invalid names") {
withEmptyCatalog { catalog =>
testInvalidName(name => catalog.createTable(newTable(name, "db1"), ignoreIfExists = false))
}
}
test("create table when database does not exist") {
withBasicCatalog { catalog =>
// Creating table in non-existent database should always fail
intercept[NoSuchDatabaseException] {
catalog.createTable(newTable("tbl1", "does_not_exist"), ignoreIfExists = false)
}
intercept[NoSuchDatabaseException] {
catalog.createTable(newTable("tbl1", "does_not_exist"), ignoreIfExists = true)
}
// Table already exists
intercept[TableAlreadyExistsException] {
catalog.createTable(newTable("tbl1", "db2"), ignoreIfExists = false)
}
catalog.createTable(newTable("tbl1", "db2"), ignoreIfExists = true)
}
}
test("create temp view") {
withBasicCatalog { catalog =>
val tempTable1 = Range(1, 10, 1, 10)
val tempTable2 = Range(1, 20, 2, 10)
catalog.createTempView("tbl1", tempTable1, overrideIfExists = false)
catalog.createTempView("tbl2", tempTable2, overrideIfExists = false)
assert(catalog.getTempView("tbl1") == Option(tempTable1))
assert(catalog.getTempView("tbl2") == Option(tempTable2))
assert(catalog.getTempView("tbl3").isEmpty)
// Temporary view already exists
intercept[TempTableAlreadyExistsException] {
catalog.createTempView("tbl1", tempTable1, overrideIfExists = false)
}
// Temporary view already exists but we override it
catalog.createTempView("tbl1", tempTable2, overrideIfExists = true)
assert(catalog.getTempView("tbl1") == Option(tempTable2))
}
}
test("drop table") {
withBasicCatalog { catalog =>
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
catalog.dropTable(TableIdentifier("tbl1", Some("db2")), ignoreIfNotExists = false,
purge = false)
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl2"))
// Drop table without explicitly specifying database
catalog.setCurrentDatabase("db2")
catalog.dropTable(TableIdentifier("tbl2"), ignoreIfNotExists = false, purge = false)
assert(catalog.externalCatalog.listTables("db2").isEmpty)
}
}
test("drop table when database/table does not exist") {
withBasicCatalog { catalog =>
// Should always throw exception when the database does not exist
intercept[NoSuchDatabaseException] {
catalog.dropTable(TableIdentifier("tbl1", Some("unknown_db")), ignoreIfNotExists = false,
purge = false)
}
intercept[NoSuchDatabaseException] {
catalog.dropTable(TableIdentifier("tbl1", Some("unknown_db")), ignoreIfNotExists = true,
purge = false)
}
intercept[NoSuchTableException] {
catalog.dropTable(TableIdentifier("unknown_table", Some("db2")), ignoreIfNotExists = false,
purge = false)
}
catalog.dropTable(TableIdentifier("unknown_table", Some("db2")), ignoreIfNotExists = true,
purge = false)
}
}
test("drop temp table") {
withBasicCatalog { catalog =>
val tempTable = Range(1, 10, 2, 10)
catalog.createTempView("tbl1", tempTable, overrideIfExists = false)
catalog.setCurrentDatabase("db2")
assert(catalog.getTempView("tbl1") == Some(tempTable))
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
// If database is not specified, temp table should be dropped first
catalog.dropTable(TableIdentifier("tbl1"), ignoreIfNotExists = false, purge = false)
assert(catalog.getTempView("tbl1") == None)
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
// If temp table does not exist, the table in the current database should be dropped
catalog.dropTable(TableIdentifier("tbl1"), ignoreIfNotExists = false, purge = false)
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl2"))
// If database is specified, temp tables are never dropped
catalog.createTempView("tbl1", tempTable, overrideIfExists = false)
catalog.createTable(newTable("tbl1", "db2"), ignoreIfExists = false)
catalog.dropTable(TableIdentifier("tbl1", Some("db2")), ignoreIfNotExists = false,
purge = false)
assert(catalog.getTempView("tbl1") == Some(tempTable))
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl2"))
}
}
test("rename table") {
withBasicCatalog { catalog =>
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
catalog.renameTable(TableIdentifier("tbl1", Some("db2")), TableIdentifier("tblone"))
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tblone", "tbl2"))
catalog.renameTable(TableIdentifier("tbl2", Some("db2")), TableIdentifier("tbltwo"))
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tblone", "tbltwo"))
// Rename table without explicitly specifying database
catalog.setCurrentDatabase("db2")
catalog.renameTable(TableIdentifier("tbltwo"), TableIdentifier("table_two"))
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tblone", "table_two"))
// Renaming "db2.tblone" to "db1.tblones" should fail because databases don't match
intercept[AnalysisException] {
catalog.renameTable(
TableIdentifier("tblone", Some("db2")), TableIdentifier("tblones", Some("db1")))
}
// The new table already exists
intercept[TableAlreadyExistsException] {
catalog.renameTable(
TableIdentifier("tblone", Some("db2")),
TableIdentifier("table_two"))
}
}
}
test("rename tables to an invalid name") {
withBasicCatalog { catalog =>
testInvalidName(
name => catalog.renameTable(TableIdentifier("tbl1", Some("db2")), TableIdentifier(name)))
}
}
test("rename table when database/table does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.renameTable(TableIdentifier("tbl1", Some("unknown_db")), TableIdentifier("tbl2"))
}
intercept[NoSuchTableException] {
catalog.renameTable(TableIdentifier("unknown_table", Some("db2")), TableIdentifier("tbl2"))
}
}
}
test("rename temp table") {
withBasicCatalog { catalog =>
val tempTable = Range(1, 10, 2, 10)
catalog.createTempView("tbl1", tempTable, overrideIfExists = false)
catalog.setCurrentDatabase("db2")
assert(catalog.getTempView("tbl1") == Option(tempTable))
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
// If database is not specified, temp table should be renamed first
catalog.renameTable(TableIdentifier("tbl1"), TableIdentifier("tbl3"))
assert(catalog.getTempView("tbl1").isEmpty)
assert(catalog.getTempView("tbl3") == Option(tempTable))
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl2"))
// If database is specified, temp tables are never renamed
catalog.renameTable(TableIdentifier("tbl2", Some("db2")), TableIdentifier("tbl4"))
assert(catalog.getTempView("tbl3") == Option(tempTable))
assert(catalog.getTempView("tbl4").isEmpty)
assert(catalog.externalCatalog.listTables("db2").toSet == Set("tbl1", "tbl4"))
}
}
test("alter table") {
withBasicCatalog { catalog =>
val tbl1 = catalog.externalCatalog.getTable("db2", "tbl1")
catalog.alterTable(tbl1.copy(properties = Map("toh" -> "frem")))
val newTbl1 = catalog.externalCatalog.getTable("db2", "tbl1")
assert(!tbl1.properties.contains("toh"))
assert(newTbl1.properties.size == tbl1.properties.size + 1)
assert(newTbl1.properties.get("toh") == Some("frem"))
// Alter table without explicitly specifying database
catalog.setCurrentDatabase("db2")
catalog.alterTable(tbl1.copy(identifier = TableIdentifier("tbl1")))
val newestTbl1 = catalog.externalCatalog.getTable("db2", "tbl1")
// For hive serde table, hive metastore will set transient_lastDdlTime in table's properties,
// and its value will be modified, here we ignore it when comparing the two tables.
assert(newestTbl1.copy(properties = Map.empty) == tbl1.copy(properties = Map.empty))
}
}
test("alter table when database/table does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.alterTable(newTable("tbl1", "unknown_db"))
}
intercept[NoSuchTableException] {
catalog.alterTable(newTable("unknown_table", "db2"))
}
}
}
test("alter table stats") {
withBasicCatalog { catalog =>
val tableId = TableIdentifier("tbl1", Some("db2"))
val oldTableStats = catalog.getTableMetadata(tableId).stats
assert(oldTableStats.isEmpty)
val newStats = CatalogStatistics(sizeInBytes = 1)
catalog.alterTableStats(tableId, Some(newStats))
val newTableStats = catalog.getTableMetadata(tableId).stats
assert(newTableStats.get == newStats)
}
}
test("alter table add columns") {
withBasicCatalog { sessionCatalog =>
sessionCatalog.createTable(newTable("t1", "default"), ignoreIfExists = false)
val oldTab = sessionCatalog.externalCatalog.getTable("default", "t1")
sessionCatalog.alterTableDataSchema(
TableIdentifier("t1", Some("default")),
StructType(oldTab.dataSchema.add("c3", IntegerType)))
val newTab = sessionCatalog.externalCatalog.getTable("default", "t1")
// construct the expected table schema
val expectedTableSchema = StructType(oldTab.dataSchema.fields ++
Seq(StructField("c3", IntegerType)) ++ oldTab.partitionSchema)
assert(newTab.schema == expectedTableSchema)
}
}
test("alter table drop columns") {
withBasicCatalog { sessionCatalog =>
sessionCatalog.createTable(newTable("t1", "default"), ignoreIfExists = false)
val oldTab = sessionCatalog.externalCatalog.getTable("default", "t1")
val e = intercept[AnalysisException] {
sessionCatalog.alterTableDataSchema(
TableIdentifier("t1", Some("default")), StructType(oldTab.dataSchema.drop(1)))
}.getMessage
assert(e.contains("We don't support dropping columns yet."))
}
}
test("get table") {
withBasicCatalog { catalog =>
assert(catalog.getTableMetadata(TableIdentifier("tbl1", Some("db2")))
== catalog.externalCatalog.getTable("db2", "tbl1"))
// Get table without explicitly specifying database
catalog.setCurrentDatabase("db2")
assert(catalog.getTableMetadata(TableIdentifier("tbl1"))
== catalog.externalCatalog.getTable("db2", "tbl1"))
}
}
test("get table when database/table does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.getTableMetadata(TableIdentifier("tbl1", Some("unknown_db")))
}
intercept[NoSuchTableException] {
catalog.getTableMetadata(TableIdentifier("unknown_table", Some("db2")))
}
}
}
test("get tables by name") {
withBasicCatalog { catalog =>
assert(catalog.getTablesByName(
Seq(
TableIdentifier("tbl1", Some("db2")),
TableIdentifier("tbl2", Some("db2"))
)
) == catalog.externalCatalog.getTablesByName("db2", Seq("tbl1", "tbl2")))
// Get table without explicitly specifying database
catalog.setCurrentDatabase("db2")
assert(catalog.getTablesByName(
Seq(
TableIdentifier("tbl1"),
TableIdentifier("tbl2")
)
) == catalog.externalCatalog.getTablesByName("db2", Seq("tbl1", "tbl2")))
}
}
test("get tables by name when some tables do not exist") {
withBasicCatalog { catalog =>
assert(catalog.getTablesByName(
Seq(
TableIdentifier("tbl1", Some("db2")),
TableIdentifier("tblnotexit", Some("db2"))
)
) == catalog.externalCatalog.getTablesByName("db2", Seq("tbl1")))
// Get table without explicitly specifying database
catalog.setCurrentDatabase("db2")
assert(catalog.getTablesByName(
Seq(
TableIdentifier("tbl1"),
TableIdentifier("tblnotexit")
)
) == catalog.externalCatalog.getTablesByName("db2", Seq("tbl1")))
}
}
test("get tables by name when contains invalid name") {
// scalastyle:off
val name = "砖"
// scalastyle:on
withBasicCatalog { catalog =>
assert(catalog.getTablesByName(
Seq(
TableIdentifier("tbl1", Some("db2")),
TableIdentifier(name, Some("db2"))
)
) == catalog.externalCatalog.getTablesByName("db2", Seq("tbl1")))
// Get table without explicitly specifying database
catalog.setCurrentDatabase("db2")
assert(catalog.getTablesByName(
Seq(
TableIdentifier("tbl1"),
TableIdentifier(name)
)
) == catalog.externalCatalog.getTablesByName("db2", Seq("tbl1")))
}
}
test("get tables by name when empty") {
withBasicCatalog { catalog =>
assert(catalog.getTablesByName(Seq.empty)
== catalog.externalCatalog.getTablesByName("db2", Seq.empty))
// Get table without explicitly specifying database
catalog.setCurrentDatabase("db2")
assert(catalog.getTablesByName(Seq.empty)
== catalog.externalCatalog.getTablesByName("db2", Seq.empty))
}
}
test("get tables by name when tables belong to different databases") {
withBasicCatalog { catalog =>
intercept[AnalysisException](catalog.getTablesByName(
Seq(
TableIdentifier("tbl1", Some("db1")),
TableIdentifier("tbl2", Some("db2"))
)
))
// Get table without explicitly specifying database
catalog.setCurrentDatabase("db2")
intercept[AnalysisException](catalog.getTablesByName(
Seq(
TableIdentifier("tbl1", Some("db1")),
TableIdentifier("tbl2")
)
))
}
}
test("lookup table relation") {
withBasicCatalog { catalog =>
val tempTable1 = Range(1, 10, 1, 10)
val metastoreTable1 = catalog.externalCatalog.getTable("db2", "tbl1")
catalog.createTempView("tbl1", tempTable1, overrideIfExists = false)
catalog.setCurrentDatabase("db2")
// If we explicitly specify the database, we'll look up the relation in that database
assert(catalog.lookupRelation(TableIdentifier("tbl1", Some("db2"))).children.head
.asInstanceOf[UnresolvedCatalogRelation].tableMeta == metastoreTable1)
// Otherwise, we'll first look up a temporary table with the same name
assert(catalog.lookupRelation(TableIdentifier("tbl1"))
== SubqueryAlias("tbl1", tempTable1))
// Then, if that does not exist, look up the relation in the current database
catalog.dropTable(TableIdentifier("tbl1"), ignoreIfNotExists = false, purge = false)
assert(catalog.lookupRelation(TableIdentifier("tbl1")).children.head
.asInstanceOf[UnresolvedCatalogRelation].tableMeta == metastoreTable1)
}
}
test("look up view relation") {
withBasicCatalog { catalog =>
val props = CatalogTable.catalogAndNamespaceToProps("cat1", Seq("ns1"))
catalog.createTable(
newView("db3", "view1", props),
ignoreIfExists = false)
val metadata = catalog.externalCatalog.getTable("db3", "view1")
assert(metadata.viewText.isDefined)
assert(metadata.viewCatalogAndNamespace == Seq("cat1", "ns1"))
// Look up a view.
catalog.setCurrentDatabase("default")
val view = View(desc = metadata, output = metadata.schema.toAttributes,
child = CatalystSqlParser.parsePlan(metadata.viewText.get))
comparePlans(catalog.lookupRelation(TableIdentifier("view1", Some("db3"))),
SubqueryAlias(Seq(CatalogManager.SESSION_CATALOG_NAME, "db3", "view1"), view))
// Look up a view using current database of the session catalog.
catalog.setCurrentDatabase("db3")
comparePlans(catalog.lookupRelation(TableIdentifier("view1")),
SubqueryAlias(Seq(CatalogManager.SESSION_CATALOG_NAME, "db3", "view1"), view))
}
}
test("look up view created before Spark 3.0") {
withBasicCatalog { catalog =>
val oldView = newView("db3", "view2", Map(CatalogTable.VIEW_DEFAULT_DATABASE -> "db2"))
catalog.createTable(oldView, ignoreIfExists = false)
val metadata = catalog.externalCatalog.getTable("db3", "view2")
assert(metadata.viewText.isDefined)
assert(metadata.viewCatalogAndNamespace == Seq(CatalogManager.SESSION_CATALOG_NAME, "db2"))
val view = View(desc = metadata, output = metadata.schema.toAttributes,
child = CatalystSqlParser.parsePlan(metadata.viewText.get))
comparePlans(catalog.lookupRelation(TableIdentifier("view2", Some("db3"))),
SubqueryAlias(Seq(CatalogManager.SESSION_CATALOG_NAME, "db3", "view2"), view))
}
}
test("table exists") {
withBasicCatalog { catalog =>
assert(catalog.tableExists(TableIdentifier("tbl1", Some("db2"))))
assert(catalog.tableExists(TableIdentifier("tbl2", Some("db2"))))
assert(!catalog.tableExists(TableIdentifier("tbl3", Some("db2"))))
assert(!catalog.tableExists(TableIdentifier("tbl1", Some("db1"))))
assert(!catalog.tableExists(TableIdentifier("tbl2", Some("db1"))))
// If database is explicitly specified, do not check temporary tables
val tempTable = Range(1, 10, 1, 10)
assert(!catalog.tableExists(TableIdentifier("tbl3", Some("db2"))))
// If database is not explicitly specified, check the current database
catalog.setCurrentDatabase("db2")
assert(catalog.tableExists(TableIdentifier("tbl1")))
assert(catalog.tableExists(TableIdentifier("tbl2")))
catalog.createTempView("tbl3", tempTable, overrideIfExists = false)
// tableExists should not check temp view.
assert(!catalog.tableExists(TableIdentifier("tbl3")))
}
}
test("getTempViewOrPermanentTableMetadata on temporary views") {
withBasicCatalog { catalog =>
val tempTable = Range(1, 10, 2, 10)
intercept[NoSuchTableException] {
catalog.getTempViewOrPermanentTableMetadata(TableIdentifier("view1"))
}.getMessage
intercept[NoSuchTableException] {
catalog.getTempViewOrPermanentTableMetadata(TableIdentifier("view1", Some("default")))
}.getMessage
catalog.createTempView("view1", tempTable, overrideIfExists = false)
assert(catalog.getTempViewOrPermanentTableMetadata(
TableIdentifier("view1")).identifier.table == "view1")
assert(catalog.getTempViewOrPermanentTableMetadata(
TableIdentifier("view1")).schema(0).name == "id")
intercept[NoSuchTableException] {
catalog.getTempViewOrPermanentTableMetadata(TableIdentifier("view1", Some("default")))
}.getMessage
}
}
test("list tables without pattern") {
withBasicCatalog { catalog =>
val tempTable = Range(1, 10, 2, 10)
catalog.createTempView("tbl1", tempTable, overrideIfExists = false)
catalog.createTempView("tbl4", tempTable, overrideIfExists = false)
assert(catalog.listTables("db1").toSet ==
Set(TableIdentifier("tbl1"), TableIdentifier("tbl4")))
assert(catalog.listTables("db2").toSet ==
Set(TableIdentifier("tbl1"),
TableIdentifier("tbl4"),
TableIdentifier("tbl1", Some("db2")),
TableIdentifier("tbl2", Some("db2"))))
intercept[NoSuchDatabaseException] {
catalog.listTables("unknown_db")
}
}
}
test("list tables with pattern") {
withBasicCatalog { catalog =>
val tempTable = Range(1, 10, 2, 10)
catalog.createTempView("tbl1", tempTable, overrideIfExists = false)
catalog.createTempView("tbl4", tempTable, overrideIfExists = false)
assert(catalog.listTables("db1", "*").toSet == catalog.listTables("db1").toSet)
assert(catalog.listTables("db2", "*").toSet == catalog.listTables("db2").toSet)
assert(catalog.listTables("db2", "tbl*").toSet ==
Set(TableIdentifier("tbl1"),
TableIdentifier("tbl4"),
TableIdentifier("tbl1", Some("db2")),
TableIdentifier("tbl2", Some("db2"))))
assert(catalog.listTables("db2", "*1").toSet ==
Set(TableIdentifier("tbl1"), TableIdentifier("tbl1", Some("db2"))))
intercept[NoSuchDatabaseException] {
catalog.listTables("unknown_db", "*")
}
}
}
test("list tables with pattern and includeLocalTempViews") {
withEmptyCatalog { catalog =>
catalog.createDatabase(newDb("mydb"), ignoreIfExists = false)
catalog.createTable(newTable("tbl1", "mydb"), ignoreIfExists = false)
catalog.createTable(newTable("tbl2", "mydb"), ignoreIfExists = false)
val tempTable = Range(1, 10, 2, 10)
catalog.createTempView("temp_view1", tempTable, overrideIfExists = false)
catalog.createTempView("temp_view4", tempTable, overrideIfExists = false)
assert(catalog.listTables("mydb").toSet == catalog.listTables("mydb", "*").toSet)
assert(catalog.listTables("mydb").toSet == catalog.listTables("mydb", "*", true).toSet)
assert(catalog.listTables("mydb").toSet ==
catalog.listTables("mydb", "*", false).toSet ++ catalog.listLocalTempViews("*"))
assert(catalog.listTables("mydb", "*", true).toSet ==
Set(TableIdentifier("tbl1", Some("mydb")),
TableIdentifier("tbl2", Some("mydb")),
TableIdentifier("temp_view1"),
TableIdentifier("temp_view4")))
assert(catalog.listTables("mydb", "*", false).toSet ==
Set(TableIdentifier("tbl1", Some("mydb")), TableIdentifier("tbl2", Some("mydb"))))
assert(catalog.listTables("mydb", "tbl*", true).toSet ==
Set(TableIdentifier("tbl1", Some("mydb")), TableIdentifier("tbl2", Some("mydb"))))
assert(catalog.listTables("mydb", "tbl*", false).toSet ==
Set(TableIdentifier("tbl1", Some("mydb")), TableIdentifier("tbl2", Some("mydb"))))
assert(catalog.listTables("mydb", "temp_view*", true).toSet ==
Set(TableIdentifier("temp_view1"), TableIdentifier("temp_view4")))
assert(catalog.listTables("mydb", "temp_view*", false).toSet == Set.empty)
}
}
test("list temporary view with pattern") {
withBasicCatalog { catalog =>
val tempTable = Range(1, 10, 2, 10)
catalog.createTempView("temp_view1", tempTable, overrideIfExists = false)
catalog.createTempView("temp_view4", tempTable, overrideIfExists = false)
assert(catalog.listLocalTempViews("*").toSet ==
Set(TableIdentifier("temp_view1"), TableIdentifier("temp_view4")))
assert(catalog.listLocalTempViews("temp_view*").toSet ==
Set(TableIdentifier("temp_view1"), TableIdentifier("temp_view4")))
assert(catalog.listLocalTempViews("*1").toSet == Set(TableIdentifier("temp_view1")))
assert(catalog.listLocalTempViews("does_not_exist").toSet == Set.empty)
}
}
test("list global temporary view and local temporary view with pattern") {
withBasicCatalog { catalog =>
val tempTable = Range(1, 10, 2, 10)
catalog.createTempView("temp_view1", tempTable, overrideIfExists = false)
catalog.createTempView("temp_view4", tempTable, overrideIfExists = false)
catalog.globalTempViewManager.create("global_temp_view1", tempTable, overrideIfExists = false)
catalog.globalTempViewManager.create("global_temp_view2", tempTable, overrideIfExists = false)
assert(catalog.listTables(catalog.globalTempViewManager.database, "*").toSet ==
Set(TableIdentifier("temp_view1"),
TableIdentifier("temp_view4"),
TableIdentifier("global_temp_view1", Some(catalog.globalTempViewManager.database)),
TableIdentifier("global_temp_view2", Some(catalog.globalTempViewManager.database))))
assert(catalog.listTables(catalog.globalTempViewManager.database, "*temp_view1").toSet ==
Set(TableIdentifier("temp_view1"),
TableIdentifier("global_temp_view1", Some(catalog.globalTempViewManager.database))))
assert(catalog.listTables(catalog.globalTempViewManager.database, "global*").toSet ==
Set(TableIdentifier("global_temp_view1", Some(catalog.globalTempViewManager.database)),
TableIdentifier("global_temp_view2", Some(catalog.globalTempViewManager.database))))
}
}
// --------------------------------------------------------------------------
// Partitions
// --------------------------------------------------------------------------
test("basic create and list partitions") {
withEmptyCatalog { catalog =>
catalog.createDatabase(newDb("mydb"), ignoreIfExists = false)
catalog.createTable(newTable("tbl", "mydb"), ignoreIfExists = false)
catalog.createPartitions(
TableIdentifier("tbl", Some("mydb")), Seq(part1, part2), ignoreIfExists = false)
assert(catalogPartitionsEqual(
catalog.externalCatalog.listPartitions("mydb", "tbl"), part1, part2))
// Create partitions without explicitly specifying database
catalog.setCurrentDatabase("mydb")
catalog.createPartitions(
TableIdentifier("tbl"), Seq(partWithMixedOrder), ignoreIfExists = false)
assert(catalogPartitionsEqual(
catalog.externalCatalog.listPartitions("mydb", "tbl"), part1, part2, partWithMixedOrder))
}
}
test("create partitions when database/table does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.createPartitions(
TableIdentifier("tbl1", Some("unknown_db")), Seq(), ignoreIfExists = false)
}
intercept[NoSuchTableException] {
catalog.createPartitions(
TableIdentifier("does_not_exist", Some("db2")), Seq(), ignoreIfExists = false)
}
}
}
test("create partitions that already exist") {
withBasicCatalog { catalog =>
intercept[AnalysisException] {
catalog.createPartitions(
TableIdentifier("tbl2", Some("db2")), Seq(part1), ignoreIfExists = false)
}
catalog.createPartitions(
TableIdentifier("tbl2", Some("db2")), Seq(part1), ignoreIfExists = true)
}
}
test("create partitions with invalid part spec") {
withBasicCatalog { catalog =>
var e = intercept[AnalysisException] {
catalog.createPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(part1, partWithLessColumns), ignoreIfExists = false)
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl2`'"))
e = intercept[AnalysisException] {
catalog.createPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(part1, partWithMoreColumns), ignoreIfExists = true)
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl2`'"))
e = intercept[AnalysisException] {
catalog.createPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(partWithUnknownColumns, part1), ignoreIfExists = true)
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl2`'"))
e = intercept[AnalysisException] {
catalog.createPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(partWithEmptyValue, part1), ignoreIfExists = true)
}
assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " +
"empty partition column value"))
}
}
test("drop partitions") {
withBasicCatalog { catalog =>
assert(catalogPartitionsEqual(
catalog.externalCatalog.listPartitions("db2", "tbl2"), part1, part2))
catalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(part1.spec),
ignoreIfNotExists = false,
purge = false,
retainData = false)
assert(catalogPartitionsEqual(
catalog.externalCatalog.listPartitions("db2", "tbl2"), part2))
// Drop partitions without explicitly specifying database
catalog.setCurrentDatabase("db2")
catalog.dropPartitions(
TableIdentifier("tbl2"),
Seq(part2.spec),
ignoreIfNotExists = false,
purge = false,
retainData = false)
assert(catalog.externalCatalog.listPartitions("db2", "tbl2").isEmpty)
// Drop multiple partitions at once
catalog.createPartitions(
TableIdentifier("tbl2", Some("db2")), Seq(part1, part2), ignoreIfExists = false)
assert(catalogPartitionsEqual(
catalog.externalCatalog.listPartitions("db2", "tbl2"), part1, part2))
catalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(part1.spec, part2.spec),
ignoreIfNotExists = false,
purge = false,
retainData = false)
assert(catalog.externalCatalog.listPartitions("db2", "tbl2").isEmpty)
}
}
test("drop partitions when database/table does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.dropPartitions(
TableIdentifier("tbl1", Some("unknown_db")),
Seq(),
ignoreIfNotExists = false,
purge = false,
retainData = false)
}
intercept[NoSuchTableException] {
catalog.dropPartitions(
TableIdentifier("does_not_exist", Some("db2")),
Seq(),
ignoreIfNotExists = false,
purge = false,
retainData = false)
}
}
}
test("drop partitions that do not exist") {
withBasicCatalog { catalog =>
intercept[AnalysisException] {
catalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(part3.spec),
ignoreIfNotExists = false,
purge = false,
retainData = false)
}
catalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(part3.spec),
ignoreIfNotExists = true,
purge = false,
retainData = false)
}
}
test("drop partitions with invalid partition spec") {
withBasicCatalog { catalog =>
var e = intercept[AnalysisException] {
catalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(partWithMoreColumns.spec),
ignoreIfNotExists = false,
purge = false,
retainData = false)
}
assert(e.getMessage.contains(
"Partition spec is invalid. The spec (a, b, c) must be contained within " +
"the partition spec (a, b) defined in table '`db2`.`tbl2`'"))
e = intercept[AnalysisException] {
catalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(partWithUnknownColumns.spec),
ignoreIfNotExists = false,
purge = false,
retainData = false)
}
assert(e.getMessage.contains(
"Partition spec is invalid. The spec (a, unknown) must be contained within " +
"the partition spec (a, b) defined in table '`db2`.`tbl2`'"))
e = intercept[AnalysisException] {
catalog.dropPartitions(
TableIdentifier("tbl2", Some("db2")),
Seq(partWithEmptyValue.spec, part1.spec),
ignoreIfNotExists = false,
purge = false,
retainData = false)
}
assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " +
"empty partition column value"))
}
}
test("get partition") {
withBasicCatalog { catalog =>
assert(catalog.getPartition(
TableIdentifier("tbl2", Some("db2")), part1.spec).spec == part1.spec)
assert(catalog.getPartition(
TableIdentifier("tbl2", Some("db2")), part2.spec).spec == part2.spec)
// Get partition without explicitly specifying database
catalog.setCurrentDatabase("db2")
assert(catalog.getPartition(TableIdentifier("tbl2"), part1.spec).spec == part1.spec)
assert(catalog.getPartition(TableIdentifier("tbl2"), part2.spec).spec == part2.spec)
// Get non-existent partition
intercept[AnalysisException] {
catalog.getPartition(TableIdentifier("tbl2"), part3.spec)
}
}
}
test("get partition when database/table does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.getPartition(TableIdentifier("tbl1", Some("unknown_db")), part1.spec)
}
intercept[NoSuchTableException] {
catalog.getPartition(TableIdentifier("does_not_exist", Some("db2")), part1.spec)
}
}
}
test("get partition with invalid partition spec") {
withBasicCatalog { catalog =>
var e = intercept[AnalysisException] {
catalog.getPartition(TableIdentifier("tbl1", Some("db2")), partWithLessColumns.spec)
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl1`'"))
e = intercept[AnalysisException] {
catalog.getPartition(TableIdentifier("tbl1", Some("db2")), partWithMoreColumns.spec)
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl1`'"))
e = intercept[AnalysisException] {
catalog.getPartition(TableIdentifier("tbl1", Some("db2")), partWithUnknownColumns.spec)
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl1`'"))
e = intercept[AnalysisException] {
catalog.getPartition(TableIdentifier("tbl1", Some("db2")), partWithEmptyValue.spec)
}
assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " +
"empty partition column value"))
}
}
test("rename partitions") {
withBasicCatalog { catalog =>
val newPart1 = part1.copy(spec = Map("a" -> "100", "b" -> "101"))
val newPart2 = part2.copy(spec = Map("a" -> "200", "b" -> "201"))
val newSpecs = Seq(newPart1.spec, newPart2.spec)
catalog.renamePartitions(
TableIdentifier("tbl2", Some("db2")), Seq(part1.spec, part2.spec), newSpecs)
assert(catalog.getPartition(
TableIdentifier("tbl2", Some("db2")), newPart1.spec).spec === newPart1.spec)
assert(catalog.getPartition(
TableIdentifier("tbl2", Some("db2")), newPart2.spec).spec === newPart2.spec)
intercept[AnalysisException] {
catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part1.spec)
}
intercept[AnalysisException] {
catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part2.spec)
}
// Rename partitions without explicitly specifying database
catalog.setCurrentDatabase("db2")
catalog.renamePartitions(TableIdentifier("tbl2"), newSpecs, Seq(part1.spec, part2.spec))
assert(catalog.getPartition(TableIdentifier("tbl2"), part1.spec).spec === part1.spec)
assert(catalog.getPartition(TableIdentifier("tbl2"), part2.spec).spec === part2.spec)
intercept[AnalysisException] {
catalog.getPartition(TableIdentifier("tbl2"), newPart1.spec)
}
intercept[AnalysisException] {
catalog.getPartition(TableIdentifier("tbl2"), newPart2.spec)
}
}
}
test("rename partitions when database/table does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.renamePartitions(
TableIdentifier("tbl1", Some("unknown_db")), Seq(part1.spec), Seq(part2.spec))
}
intercept[NoSuchTableException] {
catalog.renamePartitions(
TableIdentifier("does_not_exist", Some("db2")), Seq(part1.spec), Seq(part2.spec))
}
}
}
test("rename partition with invalid partition spec") {
withBasicCatalog { catalog =>
var e = intercept[AnalysisException] {
catalog.renamePartitions(
TableIdentifier("tbl1", Some("db2")),
Seq(part1.spec), Seq(partWithLessColumns.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl1`'"))
e = intercept[AnalysisException] {
catalog.renamePartitions(
TableIdentifier("tbl1", Some("db2")),
Seq(part1.spec), Seq(partWithMoreColumns.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl1`'"))
e = intercept[AnalysisException] {
catalog.renamePartitions(
TableIdentifier("tbl1", Some("db2")),
Seq(part1.spec), Seq(partWithUnknownColumns.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl1`'"))
e = intercept[AnalysisException] {
catalog.renamePartitions(
TableIdentifier("tbl1", Some("db2")),
Seq(part1.spec), Seq(partWithEmptyValue.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " +
"empty partition column value"))
}
}
test("alter partitions") {
withBasicCatalog { catalog =>
val newLocation = newUriForDatabase()
// Alter but keep spec the same
val oldPart1 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part1.spec)
val oldPart2 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part2.spec)
catalog.alterPartitions(TableIdentifier("tbl2", Some("db2")), Seq(
oldPart1.copy(storage = storageFormat.copy(locationUri = Some(newLocation))),
oldPart2.copy(storage = storageFormat.copy(locationUri = Some(newLocation)))))
val newPart1 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part1.spec)
val newPart2 = catalog.getPartition(TableIdentifier("tbl2", Some("db2")), part2.spec)
assert(newPart1.storage.locationUri == Some(newLocation))
assert(newPart2.storage.locationUri == Some(newLocation))
assert(oldPart1.storage.locationUri != Some(newLocation))
assert(oldPart2.storage.locationUri != Some(newLocation))
// Alter partitions without explicitly specifying database
catalog.setCurrentDatabase("db2")
catalog.alterPartitions(TableIdentifier("tbl2"), Seq(oldPart1, oldPart2))
val newerPart1 = catalog.getPartition(TableIdentifier("tbl2"), part1.spec)
val newerPart2 = catalog.getPartition(TableIdentifier("tbl2"), part2.spec)
assert(oldPart1.storage.locationUri == newerPart1.storage.locationUri)
assert(oldPart2.storage.locationUri == newerPart2.storage.locationUri)
// Alter but change spec, should fail because new partition specs do not exist yet
val badPart1 = part1.copy(spec = Map("a" -> "v1", "b" -> "v2"))
val badPart2 = part2.copy(spec = Map("a" -> "v3", "b" -> "v4"))
intercept[AnalysisException] {
catalog.alterPartitions(TableIdentifier("tbl2", Some("db2")), Seq(badPart1, badPart2))
}
}
}
test("alter partitions when database/table does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.alterPartitions(TableIdentifier("tbl1", Some("unknown_db")), Seq(part1))
}
intercept[NoSuchTableException] {
catalog.alterPartitions(TableIdentifier("does_not_exist", Some("db2")), Seq(part1))
}
}
}
test("alter partition with invalid partition spec") {
withBasicCatalog { catalog =>
var e = intercept[AnalysisException] {
catalog.alterPartitions(TableIdentifier("tbl1", Some("db2")), Seq(partWithLessColumns))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl1`'"))
e = intercept[AnalysisException] {
catalog.alterPartitions(TableIdentifier("tbl1", Some("db2")), Seq(partWithMoreColumns))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl1`'"))
e = intercept[AnalysisException] {
catalog.alterPartitions(TableIdentifier("tbl1", Some("db2")), Seq(partWithUnknownColumns))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must match " +
"the partition spec (a, b) defined in table '`db2`.`tbl1`'"))
e = intercept[AnalysisException] {
catalog.alterPartitions(TableIdentifier("tbl1", Some("db2")), Seq(partWithEmptyValue))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " +
"empty partition column value"))
}
}
test("list partition names") {
withBasicCatalog { catalog =>
val expectedPartitionNames = Seq("a=1/b=2", "a=3/b=4")
assert(catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2"))) ==
expectedPartitionNames)
// List partition names without explicitly specifying database
catalog.setCurrentDatabase("db2")
assert(catalog.listPartitionNames(TableIdentifier("tbl2")) == expectedPartitionNames)
}
}
test("list partition names with partial partition spec") {
withBasicCatalog { catalog =>
assert(
catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2")), Some(Map("a" -> "1"))) ==
Seq("a=1/b=2"))
}
}
test("list partition names with invalid partial partition spec") {
withBasicCatalog { catalog =>
var e = intercept[AnalysisException] {
catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2")),
Some(partWithMoreColumns.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must be " +
"contained within the partition spec (a, b) defined in table '`db2`.`tbl2`'"))
e = intercept[AnalysisException] {
catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2")),
Some(partWithUnknownColumns.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must be " +
"contained within the partition spec (a, b) defined in table '`db2`.`tbl2`'"))
e = intercept[AnalysisException] {
catalog.listPartitionNames(TableIdentifier("tbl2", Some("db2")),
Some(partWithEmptyValue.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " +
"empty partition column value"))
}
}
test("list partitions") {
withBasicCatalog { catalog =>
assert(catalogPartitionsEqual(
catalog.listPartitions(TableIdentifier("tbl2", Some("db2"))), part1, part2))
// List partitions without explicitly specifying database
catalog.setCurrentDatabase("db2")
assert(catalogPartitionsEqual(catalog.listPartitions(TableIdentifier("tbl2")), part1, part2))
}
}
test("list partitions with partial partition spec") {
withBasicCatalog { catalog =>
assert(catalogPartitionsEqual(
catalog.listPartitions(TableIdentifier("tbl2", Some("db2")), Some(Map("a" -> "1"))), part1))
}
}
test("list partitions with invalid partial partition spec") {
withBasicCatalog { catalog =>
var e = intercept[AnalysisException] {
catalog.listPartitions(TableIdentifier("tbl2", Some("db2")), Some(partWithMoreColumns.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, b, c) must be " +
"contained within the partition spec (a, b) defined in table '`db2`.`tbl2`'"))
e = intercept[AnalysisException] {
catalog.listPartitions(TableIdentifier("tbl2", Some("db2")),
Some(partWithUnknownColumns.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec (a, unknown) must be " +
"contained within the partition spec (a, b) defined in table '`db2`.`tbl2`'"))
e = intercept[AnalysisException] {
catalog.listPartitions(TableIdentifier("tbl2", Some("db2")), Some(partWithEmptyValue.spec))
}
assert(e.getMessage.contains("Partition spec is invalid. The spec ([a=3, b=]) contains an " +
"empty partition column value"))
}
}
test("list partitions when database/table does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.listPartitions(TableIdentifier("tbl1", Some("unknown_db")))
}
intercept[NoSuchTableException] {
catalog.listPartitions(TableIdentifier("does_not_exist", Some("db2")))
}
}
}
private def catalogPartitionsEqual(
actualParts: Seq[CatalogTablePartition],
expectedParts: CatalogTablePartition*): Boolean = {
// ExternalCatalog may set a default location for partitions, here we ignore the partition
// location when comparing them.
// And for hive serde table, hive metastore will set some values(e.g.transient_lastDdlTime)
// in table's parameters and storage's properties, here we also ignore them.
val actualPartsNormalize = actualParts.map(p =>
p.copy(parameters = Map.empty, createTime = -1, lastAccessTime = -1,
storage = p.storage.copy(
properties = Map.empty, locationUri = None, serde = None))).toSet
val expectedPartsNormalize = expectedParts.map(p =>
p.copy(parameters = Map.empty, createTime = -1, lastAccessTime = -1,
storage = p.storage.copy(
properties = Map.empty, locationUri = None, serde = None))).toSet
actualPartsNormalize == expectedPartsNormalize
}
// --------------------------------------------------------------------------
// Functions
// --------------------------------------------------------------------------
test("basic create and list functions") {
withEmptyCatalog { catalog =>
catalog.createDatabase(newDb("mydb"), ignoreIfExists = false)
catalog.createFunction(newFunc("myfunc", Some("mydb")), ignoreIfExists = false)
assert(catalog.externalCatalog.listFunctions("mydb", "*").toSet == Set("myfunc"))
// Create function without explicitly specifying database
catalog.setCurrentDatabase("mydb")
catalog.createFunction(newFunc("myfunc2"), ignoreIfExists = false)
assert(catalog.externalCatalog.listFunctions("mydb", "*").toSet == Set("myfunc", "myfunc2"))
}
}
test("create function when database does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.createFunction(
newFunc("func5", Some("does_not_exist")), ignoreIfExists = false)
}
}
}
test("create function that already exists") {
withBasicCatalog { catalog =>
intercept[FunctionAlreadyExistsException] {
catalog.createFunction(newFunc("func1", Some("db2")), ignoreIfExists = false)
}
catalog.createFunction(newFunc("func1", Some("db2")), ignoreIfExists = true)
}
}
test("create temp function") {
withBasicCatalog { catalog =>
val tempFunc1 = (e: Seq[Expression]) => e.head
val tempFunc2 = (e: Seq[Expression]) => e.last
catalog.registerFunction(
newFunc("temp1", None), overrideIfExists = false, functionBuilder = Some(tempFunc1))
catalog.registerFunction(
newFunc("temp2", None), overrideIfExists = false, functionBuilder = Some(tempFunc2))
val arguments = Seq(Literal(1), Literal(2), Literal(3))
assert(catalog.lookupFunction(FunctionIdentifier("temp1"), arguments) === Literal(1))
assert(catalog.lookupFunction(FunctionIdentifier("temp2"), arguments) === Literal(3))
// Temporary function does not exist.
intercept[NoSuchFunctionException] {
catalog.lookupFunction(FunctionIdentifier("temp3"), arguments)
}
val tempFunc3 = (e: Seq[Expression]) => Literal(e.size)
// Temporary function already exists
val e = intercept[AnalysisException] {
catalog.registerFunction(
newFunc("temp1", None), overrideIfExists = false, functionBuilder = Some(tempFunc3))
}.getMessage
assert(e.contains("Function temp1 already exists"))
// Temporary function is overridden
catalog.registerFunction(
newFunc("temp1", None), overrideIfExists = true, functionBuilder = Some(tempFunc3))
assert(
catalog.lookupFunction(
FunctionIdentifier("temp1"), arguments) === Literal(arguments.length))
}
}
test("isTemporaryFunction") {
withBasicCatalog { catalog =>
// Returns false when the function does not exist
assert(!catalog.isTemporaryFunction(FunctionIdentifier("temp1")))
val tempFunc1 = (e: Seq[Expression]) => e.head
catalog.registerFunction(
newFunc("temp1", None), overrideIfExists = false, functionBuilder = Some(tempFunc1))
// Returns true when the function is temporary
assert(catalog.isTemporaryFunction(FunctionIdentifier("temp1")))
// Returns false when the function is permanent
assert(catalog.externalCatalog.listFunctions("db2", "*").toSet == Set("func1"))
assert(!catalog.isTemporaryFunction(FunctionIdentifier("func1", Some("db2"))))
assert(!catalog.isTemporaryFunction(FunctionIdentifier("db2.func1")))
catalog.setCurrentDatabase("db2")
assert(!catalog.isTemporaryFunction(FunctionIdentifier("func1")))
// Returns false when the function is built-in or hive
assert(FunctionRegistry.builtin.functionExists(FunctionIdentifier("sum")))
assert(!catalog.isTemporaryFunction(FunctionIdentifier("sum")))
assert(!catalog.isTemporaryFunction(FunctionIdentifier("histogram_numeric")))
}
}
test("isRegisteredFunction") {
withBasicCatalog { catalog =>
// Returns false when the function does not register
assert(!catalog.isRegisteredFunction(FunctionIdentifier("temp1")))
// Returns true when the function does register
val tempFunc1 = (e: Seq[Expression]) => e.head
catalog.registerFunction(newFunc("iff", None), overrideIfExists = false,
functionBuilder = Some(tempFunc1) )
assert(catalog.isRegisteredFunction(FunctionIdentifier("iff")))
// Returns false when using the createFunction
catalog.createFunction(newFunc("sum", Some("db2")), ignoreIfExists = false)
assert(!catalog.isRegisteredFunction(FunctionIdentifier("sum")))
assert(!catalog.isRegisteredFunction(FunctionIdentifier("sum", Some("db2"))))
}
}
test("isPersistentFunction") {
withBasicCatalog { catalog =>
// Returns false when the function does not register
assert(!catalog.isPersistentFunction(FunctionIdentifier("temp2")))
// Returns false when the function does register
val tempFunc2 = (e: Seq[Expression]) => e.head
catalog.registerFunction(newFunc("iff", None), overrideIfExists = false,
functionBuilder = Some(tempFunc2))
assert(!catalog.isPersistentFunction(FunctionIdentifier("iff")))
// Return true when using the createFunction
catalog.createFunction(newFunc("sum", Some("db2")), ignoreIfExists = false)
assert(catalog.isPersistentFunction(FunctionIdentifier("sum", Some("db2"))))
assert(!catalog.isPersistentFunction(FunctionIdentifier("db2.sum")))
}
}
test("drop function") {
withBasicCatalog { catalog =>
assert(catalog.externalCatalog.listFunctions("db2", "*").toSet == Set("func1"))
catalog.dropFunction(
FunctionIdentifier("func1", Some("db2")), ignoreIfNotExists = false)
assert(catalog.externalCatalog.listFunctions("db2", "*").isEmpty)
// Drop function without explicitly specifying database
catalog.setCurrentDatabase("db2")
catalog.createFunction(newFunc("func2", Some("db2")), ignoreIfExists = false)
assert(catalog.externalCatalog.listFunctions("db2", "*").toSet == Set("func2"))
catalog.dropFunction(FunctionIdentifier("func2"), ignoreIfNotExists = false)
assert(catalog.externalCatalog.listFunctions("db2", "*").isEmpty)
}
}
test("drop function when database/function does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.dropFunction(
FunctionIdentifier("something", Some("unknown_db")), ignoreIfNotExists = false)
}
intercept[NoSuchPermanentFunctionException] {
catalog.dropFunction(FunctionIdentifier("does_not_exist"), ignoreIfNotExists = false)
}
catalog.dropFunction(FunctionIdentifier("does_not_exist"), ignoreIfNotExists = true)
}
}
test("drop temp function") {
withBasicCatalog { catalog =>
val tempFunc = (e: Seq[Expression]) => e.head
catalog.registerFunction(
newFunc("func1", None), overrideIfExists = false, functionBuilder = Some(tempFunc))
val arguments = Seq(Literal(1), Literal(2), Literal(3))
assert(catalog.lookupFunction(FunctionIdentifier("func1"), arguments) === Literal(1))
catalog.dropTempFunction("func1", ignoreIfNotExists = false)
intercept[NoSuchFunctionException] {
catalog.lookupFunction(FunctionIdentifier("func1"), arguments)
}
intercept[NoSuchTempFunctionException] {
catalog.dropTempFunction("func1", ignoreIfNotExists = false)
}
catalog.dropTempFunction("func1", ignoreIfNotExists = true)
}
}
test("get function") {
withBasicCatalog { catalog =>
val expected =
CatalogFunction(FunctionIdentifier("func1", Some("db2")), funcClass,
Seq.empty[FunctionResource])
assert(catalog.getFunctionMetadata(FunctionIdentifier("func1", Some("db2"))) == expected)
// Get function without explicitly specifying database
catalog.setCurrentDatabase("db2")
assert(catalog.getFunctionMetadata(FunctionIdentifier("func1")) == expected)
}
}
test("get function when database/function does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.getFunctionMetadata(FunctionIdentifier("func1", Some("unknown_db")))
}
intercept[NoSuchFunctionException] {
catalog.getFunctionMetadata(FunctionIdentifier("does_not_exist", Some("db2")))
}
}
}
test("lookup temp function") {
withBasicCatalog { catalog =>
val tempFunc1 = (e: Seq[Expression]) => e.head
catalog.registerFunction(
newFunc("func1", None), overrideIfExists = false, functionBuilder = Some(tempFunc1))
assert(catalog.lookupFunction(
FunctionIdentifier("func1"), Seq(Literal(1), Literal(2), Literal(3))) == Literal(1))
catalog.dropTempFunction("func1", ignoreIfNotExists = false)
intercept[NoSuchFunctionException] {
catalog.lookupFunction(FunctionIdentifier("func1"), Seq(Literal(1), Literal(2), Literal(3)))
}
}
}
test("list functions") {
withBasicCatalog { catalog =>
val funcMeta1 = newFunc("func1", None)
val funcMeta2 = newFunc("yes_me", None)
val tempFunc1 = (e: Seq[Expression]) => e.head
val tempFunc2 = (e: Seq[Expression]) => e.last
catalog.createFunction(newFunc("func2", Some("db2")), ignoreIfExists = false)
catalog.createFunction(newFunc("not_me", Some("db2")), ignoreIfExists = false)
catalog.registerFunction(
funcMeta1, overrideIfExists = false, functionBuilder = Some(tempFunc1))
catalog.registerFunction(
funcMeta2, overrideIfExists = false, functionBuilder = Some(tempFunc2))
assert(catalog.listFunctions("db1", "*").map(_._1).toSet ==
Set(FunctionIdentifier("func1"),
FunctionIdentifier("yes_me")))
assert(catalog.listFunctions("db2", "*").map(_._1).toSet ==
Set(FunctionIdentifier("func1"),
FunctionIdentifier("yes_me"),
FunctionIdentifier("func1", Some("db2")),
FunctionIdentifier("func2", Some("db2")),
FunctionIdentifier("not_me", Some("db2"))))
assert(catalog.listFunctions("db2", "func*").map(_._1).toSet ==
Set(FunctionIdentifier("func1"),
FunctionIdentifier("func1", Some("db2")),
FunctionIdentifier("func2", Some("db2"))))
}
}
test("list functions when database does not exist") {
withBasicCatalog { catalog =>
intercept[NoSuchDatabaseException] {
catalog.listFunctions("unknown_db", "func*")
}
}
}
test("copy SessionCatalog state - temp views") {
withEmptyCatalog { original =>
val tempTable1 = Range(1, 10, 1, 10)
original.createTempView("copytest1", tempTable1, overrideIfExists = false)
// check if tables copied over
val clone = new SessionCatalog(original.externalCatalog)
original.copyStateTo(clone)
assert(original ne clone)
assert(clone.getTempView("copytest1") == Some(tempTable1))
// check if clone and original independent
clone.dropTable(TableIdentifier("copytest1"), ignoreIfNotExists = false, purge = false)
assert(original.getTempView("copytest1") == Some(tempTable1))
val tempTable2 = Range(1, 20, 2, 10)
original.createTempView("copytest2", tempTable2, overrideIfExists = false)
assert(clone.getTempView("copytest2").isEmpty)
}
}
test("copy SessionCatalog state - current db") {
withEmptyCatalog { original =>
val db1 = "db1"
val db2 = "db2"
val db3 = "db3"
original.externalCatalog.createDatabase(newDb(db1), ignoreIfExists = true)
original.externalCatalog.createDatabase(newDb(db2), ignoreIfExists = true)
original.externalCatalog.createDatabase(newDb(db3), ignoreIfExists = true)
original.setCurrentDatabase(db1)
// check if current db copied over
val clone = new SessionCatalog(original.externalCatalog)
original.copyStateTo(clone)
assert(original ne clone)
assert(clone.getCurrentDatabase == db1)
// check if clone and original independent
clone.setCurrentDatabase(db2)
assert(original.getCurrentDatabase == db1)
original.setCurrentDatabase(db3)
assert(clone.getCurrentDatabase == db2)
}
}
test("SPARK-19737: detect undefined functions without triggering relation resolution") {
import org.apache.spark.sql.catalyst.dsl.plans._
Seq(true, false) foreach { caseSensitive =>
val conf = new SQLConf().copy(SQLConf.CASE_SENSITIVE -> caseSensitive)
val catalog = new SessionCatalog(newBasicCatalog(), new SimpleFunctionRegistry, conf)
catalog.setCurrentDatabase("db1")
try {
val analyzer = new Analyzer(catalog, conf)
// The analyzer should report the undefined function rather than the undefined table first.
val cause = intercept[AnalysisException] {
analyzer.execute(
UnresolvedRelation(TableIdentifier("undefined_table")).select(
UnresolvedFunction("undefined_fn", Nil, isDistinct = false)
)
)
}
assert(cause.getMessage.contains("Undefined function: 'undefined_fn'"))
// SPARK-21318: the error message should contains the current database name
assert(cause.getMessage.contains("db1"))
} finally {
catalog.reset()
}
}
}
test("SPARK-24544: test print actual failure cause when look up function failed") {
withBasicCatalog { catalog =>
val cause = intercept[NoSuchFunctionException] {
catalog.failFunctionLookup(FunctionIdentifier("failureFunc"),
Some(new Exception("Actual error")))
}
// fullStackTrace will be printed, but `cause.getMessage` has been
// override in `AnalysisException`,so here we get the root cause
// exception message for check.
assert(cause.cause.get.getMessage.contains("Actual error"))
}
}
test("expire table relation cache if TTL is configured") {
case class TestCommand() extends Command
val conf = new SQLConf()
conf.setConf(StaticSQLConf.METADATA_CACHE_TTL_SECONDS, 1L)
withConfAndEmptyCatalog(conf) { catalog =>
val table = QualifiedTableName(catalog.getCurrentDatabase, "test")
// First, make sure the test table is not cached.
assert(catalog.getCachedTable(table) === null)
catalog.cacheTable(table, TestCommand())
assert(catalog.getCachedTable(table) !== null)
// Wait until the cache expiration.
eventually(timeout(3.seconds)) {
// And the cache is gone.
assert(catalog.getCachedTable(table) === null)
}
}
}
}
| apache-2.0 |
peridotperiod/isis | mothballed/component/viewer/bdd/common/src/main/java/org/apache/isis/viewer/bdd/common/fixtures/DebugObjectStorePeer.java | 1592 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.viewer.bdd.common.fixtures;
import org.apache.isis.core.commons.debug.DebugString;
import org.apache.isis.core.runtime.system.persistence.ObjectStore;
import org.apache.isis.viewer.bdd.common.AliasRegistry;
import org.apache.isis.viewer.bdd.common.CellBinding;
public class DebugObjectStorePeer extends AbstractFixturePeer {
public DebugObjectStorePeer(final AliasRegistry aliasesRegistry, final CellBinding... cellBindings) {
super(aliasesRegistry, cellBindings);
}
public String debugObjectStore() {
final ObjectStore objectStore = getObjectStore();
final DebugString debug = new DebugString();
objectStore.debugData(debug);
return debug.toString().replaceAll("\n", "<br>");
}
}
| apache-2.0 |
itaiin/arrow | java/vector/src/main/java/org/apache/arrow/vector/types/pojo/DictionaryEncoding.java | 2271 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.arrow.vector.types.pojo;
import java.util.Objects;
import org.apache.arrow.vector.types.pojo.ArrowType.Int;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonGetter;
import com.fasterxml.jackson.annotation.JsonProperty;
public class DictionaryEncoding {
private final long id;
private final boolean ordered;
private final Int indexType;
@JsonCreator
public DictionaryEncoding(
@JsonProperty("id") long id,
@JsonProperty("isOrdered") boolean ordered,
@JsonProperty("indexType") Int indexType) {
this.id = id;
this.ordered = ordered;
this.indexType = indexType == null ? new Int(32, true) : indexType;
}
public long getId() {
return id;
}
@JsonGetter("isOrdered")
public boolean isOrdered() {
return ordered;
}
public Int getIndexType() {
return indexType;
}
@Override
public String toString() {
return "DictionaryEncoding[id=" + id + ",ordered=" + ordered + ",indexType=" + indexType + "]";
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
}
DictionaryEncoding that = (DictionaryEncoding) o;
return id == that.id && ordered == that.ordered && Objects.equals(indexType, that.indexType);
}
@Override
public int hashCode() {
return Objects.hash(id, ordered, indexType);
}
}
| apache-2.0 |
aveshagarwal/kubernetes | staging/src/k8s.io/apiserver/pkg/server/options/server_run_options.go | 8863 | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"net"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apiserver/pkg/server"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"github.com/spf13/pflag"
)
// ServerRunOptions contains the options while running a generic api server.
type ServerRunOptions struct {
AdvertiseAddress net.IP
CorsAllowedOriginList []string
ExternalHost string
MaxRequestsInFlight int
MaxMutatingRequestsInFlight int
RequestTimeout time.Duration
LivezGracePeriod time.Duration
MinRequestTimeout int
ShutdownDelayDuration time.Duration
// We intentionally did not add a flag for this option. Users of the
// apiserver library can wire it to a flag.
JSONPatchMaxCopyBytes int64
// The limit on the request body size that would be accepted and
// decoded in a write request. 0 means no limit.
// We intentionally did not add a flag for this option. Users of the
// apiserver library can wire it to a flag.
MaxRequestBodyBytes int64
TargetRAMMB int
EnableInflightQuotaHandler bool
}
func NewServerRunOptions() *ServerRunOptions {
defaults := server.NewConfig(serializer.CodecFactory{})
return &ServerRunOptions{
MaxRequestsInFlight: defaults.MaxRequestsInFlight,
MaxMutatingRequestsInFlight: defaults.MaxMutatingRequestsInFlight,
RequestTimeout: defaults.RequestTimeout,
LivezGracePeriod: defaults.LivezGracePeriod,
MinRequestTimeout: defaults.MinRequestTimeout,
ShutdownDelayDuration: defaults.ShutdownDelayDuration,
JSONPatchMaxCopyBytes: defaults.JSONPatchMaxCopyBytes,
MaxRequestBodyBytes: defaults.MaxRequestBodyBytes,
EnableInflightQuotaHandler: true,
}
}
// ApplyOptions applies the run options to the method receiver and returns self
func (s *ServerRunOptions) ApplyTo(c *server.Config) error {
c.CorsAllowedOriginList = s.CorsAllowedOriginList
c.ExternalAddress = s.ExternalHost
c.MaxRequestsInFlight = s.MaxRequestsInFlight
c.MaxMutatingRequestsInFlight = s.MaxMutatingRequestsInFlight
c.LivezGracePeriod = s.LivezGracePeriod
c.RequestTimeout = s.RequestTimeout
c.MinRequestTimeout = s.MinRequestTimeout
c.ShutdownDelayDuration = s.ShutdownDelayDuration
c.JSONPatchMaxCopyBytes = s.JSONPatchMaxCopyBytes
c.MaxRequestBodyBytes = s.MaxRequestBodyBytes
c.PublicAddress = s.AdvertiseAddress
return nil
}
// DefaultAdvertiseAddress sets the field AdvertiseAddress if unset. The field will be set based on the SecureServingOptions.
func (s *ServerRunOptions) DefaultAdvertiseAddress(secure *SecureServingOptions) error {
if secure == nil {
return nil
}
if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() {
hostIP, err := secure.DefaultExternalAddress()
if err != nil {
return fmt.Errorf("Unable to find suitable network address.error='%v'. "+
"Try to set the AdvertiseAddress directly or provide a valid BindAddress to fix this.", err)
}
s.AdvertiseAddress = hostIP
}
return nil
}
// Validate checks validation of ServerRunOptions
func (s *ServerRunOptions) Validate() []error {
errors := []error{}
if s.TargetRAMMB < 0 {
errors = append(errors, fmt.Errorf("--target-ram-mb can not be negative value"))
}
if s.LivezGracePeriod < 0 {
errors = append(errors, fmt.Errorf("--livez-grace-period can not be a negative value"))
}
if s.MaxRequestsInFlight < 0 {
errors = append(errors, fmt.Errorf("--max-requests-inflight can not be negative value"))
}
if s.MaxMutatingRequestsInFlight < 0 {
errors = append(errors, fmt.Errorf("--max-mutating-requests-inflight can not be negative value"))
}
if s.RequestTimeout.Nanoseconds() < 0 {
errors = append(errors, fmt.Errorf("--request-timeout can not be negative value"))
}
if s.MinRequestTimeout < 0 {
errors = append(errors, fmt.Errorf("--min-request-timeout can not be negative value"))
}
if s.ShutdownDelayDuration < 0 {
errors = append(errors, fmt.Errorf("--shutdown-delay-duration can not be negative value"))
}
if s.JSONPatchMaxCopyBytes < 0 {
errors = append(errors, fmt.Errorf("--json-patch-max-copy-bytes can not be negative value"))
}
if s.MaxRequestBodyBytes < 0 {
errors = append(errors, fmt.Errorf("--max-resource-write-bytes can not be negative value"))
}
return errors
}
// AddUniversalFlags adds flags for a specific APIServer to the specified FlagSet
func (s *ServerRunOptions) AddUniversalFlags(fs *pflag.FlagSet) {
// Note: the weird ""+ in below lines seems to be the only way to get gofmt to
// arrange these text blocks sensibly. Grrr.
fs.IPVar(&s.AdvertiseAddress, "advertise-address", s.AdvertiseAddress, ""+
"The IP address on which to advertise the apiserver to members of the cluster. This "+
"address must be reachable by the rest of the cluster. If blank, the --bind-address "+
"will be used. If --bind-address is unspecified, the host's default interface will "+
"be used.")
fs.StringSliceVar(&s.CorsAllowedOriginList, "cors-allowed-origins", s.CorsAllowedOriginList, ""+
"List of allowed origins for CORS, comma separated. An allowed origin can be a regular "+
"expression to support subdomain matching. If this list is empty CORS will not be enabled.")
fs.IntVar(&s.TargetRAMMB, "target-ram-mb", s.TargetRAMMB,
"Memory limit for apiserver in MB (used to configure sizes of caches, etc.)")
fs.StringVar(&s.ExternalHost, "external-hostname", s.ExternalHost,
"The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs or OpenID Discovery).")
deprecatedMasterServiceNamespace := metav1.NamespaceDefault
fs.StringVar(&deprecatedMasterServiceNamespace, "master-service-namespace", deprecatedMasterServiceNamespace, ""+
"DEPRECATED: the namespace from which the Kubernetes master services should be injected into pods.")
fs.IntVar(&s.MaxRequestsInFlight, "max-requests-inflight", s.MaxRequestsInFlight, ""+
"The maximum number of non-mutating requests in flight at a given time. When the server exceeds this, "+
"it rejects requests. Zero for no limit.")
fs.IntVar(&s.MaxMutatingRequestsInFlight, "max-mutating-requests-inflight", s.MaxMutatingRequestsInFlight, ""+
"The maximum number of mutating requests in flight at a given time. When the server exceeds this, "+
"it rejects requests. Zero for no limit.")
fs.DurationVar(&s.RequestTimeout, "request-timeout", s.RequestTimeout, ""+
"An optional field indicating the duration a handler must keep a request open before timing "+
"it out. This is the default request timeout for requests but may be overridden by flags such as "+
"--min-request-timeout for specific types of requests.")
fs.DurationVar(&s.LivezGracePeriod, "livez-grace-period", s.LivezGracePeriod, ""+
"This option represents the maximum amount of time it should take for apiserver to complete its startup sequence "+
"and become live. From apiserver's start time to when this amount of time has elapsed, /livez will assume "+
"that unfinished post-start hooks will complete successfully and therefore return true.")
fs.IntVar(&s.MinRequestTimeout, "min-request-timeout", s.MinRequestTimeout, ""+
"An optional field indicating the minimum number of seconds a handler must keep "+
"a request open before timing it out. Currently only honored by the watch request "+
"handler, which picks a randomized value above this number as the connection timeout, "+
"to spread out load.")
fs.BoolVar(&s.EnableInflightQuotaHandler, "enable-inflight-quota-handler", s.EnableInflightQuotaHandler, ""+
"If true and the APIPriorityAndFairness feature gate is enabled, replace the max-in-flight handler with an enhanced one that queues and dispatches with priority and fairness")
fs.DurationVar(&s.ShutdownDelayDuration, "shutdown-delay-duration", s.ShutdownDelayDuration, ""+
"Time to delay the termination. During that time the server keeps serving requests normally and /healthz "+
"returns success, but /readyz immediately returns failure. Graceful termination starts after this delay "+
"has elapsed. This can be used to allow load balancer to stop sending traffic to this server.")
utilfeature.DefaultMutableFeatureGate.AddFlag(fs)
}
| apache-2.0 |
bodaodev-billydai/felix | shell/commands/src/main/java/org/apache/felix/karaf/shell/commands/CatAction.java | 3728 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.felix.karaf.shell.commands;
import java.io.IOException;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.FileReader;
import java.io.File;
import java.io.InputStreamReader;
import java.net.URL;
import java.net.URI;
import java.net.MalformedURLException;
import java.util.List;
import org.apache.felix.karaf.shell.console.OsgiCommandSupport;
import org.apache.felix.gogo.commands.Argument;
import org.apache.felix.gogo.commands.Option;
import org.apache.felix.gogo.commands.Command;
/**
* Concatenate and print files and/or URLs.
*
* @version $Rev: 593392 $ $Date: 2007-11-09 03:14:15 +0100 (Fri, 09 Nov 2007) $
*/
@Command(scope = "shell", name = "cat", description = "Displays the content of a file or url")
public class CatAction extends OsgiCommandSupport {
@Option(name = "-n", aliases = {}, description = "The number the output lines, starting at 1.", required = false, multiValued = false)
private boolean displayLineNumbers;
@Argument(index = 0, name = "paths or urls", description = "A list of file paths or urls to display separated by whitespaces (use - for STDIN)", required = true, multiValued = true)
private List<String> paths;
protected Object doExecute() throws Exception {
//
// Support "-" if length is one, and read from io.in
// This will help test command pipelines.
//
if (paths.size() == 1 && "-".equals(paths.get(0))) {
log.info("Printing STDIN");
cat(new BufferedReader(new InputStreamReader(System.in)));
}
else {
for (String filename : paths) {
BufferedReader reader;
// First try a URL
try {
URL url = new URL(filename);
log.info("Printing URL: " + url);
reader = new BufferedReader(new InputStreamReader(url.openStream()));
}
catch (MalformedURLException ignore) {
// They try a file
File file = new File(filename);
log.info("Printing file: " + file);
reader = new BufferedReader(new FileReader(file));
}
try {
cat(reader);
}
finally {
try {
reader.close();
} catch (IOException e) {
// Ignore
}
}
}
}
return null;
}
private void cat(final BufferedReader reader) throws IOException
{
String line;
int lineno = 1;
while ((line = reader.readLine()) != null) {
if (displayLineNumbers) {
System.out.print(String.format("%6d ", lineno++));
}
System.out.println(line);
}
}
}
| apache-2.0 |
dangrozasv/google-api-ads-ruby | ads_common/lib/ads_common/auth/client_login_handler.rb | 6800 | # Encoding: utf-8
#
# Authors:: api.dklimkin@gmail.com (Danial Klimkin)
#
# Copyright:: Copyright 2010, Google Inc. All Rights Reserved.
#
# License:: Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This module manages ClientLogin authentication. It either uses a user-provided
# auth token, or automatically connects to Google's ClientLogin service and
# generates an auth token that can be used to login to an API.
require 'cgi'
require 'ads_common/http'
require 'ads_common/auth/base_handler'
require 'ads_common/errors'
module AdsCommon
module Auth
# Credentials class to handle ClientLogin authentication.
class ClientLoginHandler < AdsCommon::Auth::BaseHandler
ACCOUNT_TYPE = 'GOOGLE'
AUTH_PATH = '/accounts/ClientLogin'
AUTH_PREFIX = 'GoogleLogin auth='
CAPTCHA_PATH = '/accounts/'
# Initializes the ClientLoginHandler with all the necessary details.
def initialize(config, auth_server, service_name)
super(config)
@server = auth_server
@service_name = service_name
end
# Invalidates the stored token if the email, password or provided auth
# token have changed.
def property_changed(prop, value)
if [:email, :password].include?(prop)
@token = nil
end
if :auth_token.eql?(prop)
@token = create_token_from_string(value)
end
end
# Handle specific ClientLogin errors.
def handle_error(error)
# TODO: Add support for automatically regenerating auth tokens when they
# expire.
get_logger().error(error)
raise error
end
# Returns authorization string.
def auth_string(credentials)
return [AUTH_PREFIX, get_token(credentials)].join
end
private
# Auxiliary method to validate the credentials for token generation.
#
# Args:
# - credentials: a hash with the credentials for the account being
# accessed
#
# Raises:
# - AdsCommon::Errors::AuthError if validation fails
#
def validate_credentials(credentials)
if credentials.nil?
raise AdsCommon::Errors::AuthError, 'No credentials supplied.'
end
if credentials[:auth_token].nil?
if credentials[:email].nil?
raise AdsCommon::Errors::AuthError,
'Email address not included in credentials.'
end
if credentials[:password].nil?
raise AdsCommon::Errors::AuthError,
'Password not included in credentials.'
end
else
if credentials[:email] and credentials[:password]
get_logger().warn('Both auth_token and login credentials present' +
', preferring auth_token.')
end
end
end
# Auxiliary method to generate an authentication token for login in via
# the ClientLogin API.
#
# Args:
# - credentials: a hash with the credentials for the account being
# accessed
#
# Returns:
# - The auth token for the account
#
# Raises:
# - AdsCommon::Errors::AuthError if authentication fails
#
def create_token(credentials)
token = credentials.include?(:auth_token) ?
create_token_from_string(credentials[:auth_token]) :
generate_token(credentials)
return token
end
# Creates token for provided auth string. Trivial for this handler.
def create_token_from_string(token_string)
return token_string
end
# Prepares POST data for ClientLogin request.
def get_login_data(credentials)
email = CGI.escape(credentials[:email])
password = CGI.escape(credentials[:password])
service_name = @service_name
data = "accountType=%s&Email=%s&Passwd=%s&service=%s" %
[ACCOUNT_TYPE, email, password, service_name]
if credentials[:logintoken] and credentials[:logincaptcha]
data += "&logintoken=%s&logincaptcha=%s" %
[CGI.escape(credentials[:logintoken]),
CGI.escape(credentials[:logincaptcha])]
end
return data
end
# Generates new client login token based on credentials.
def generate_token(credentials)
validate_credentials(credentials)
url = @server + AUTH_PATH
data = get_login_data(credentials)
headers = {'Content-Type' => 'application/x-www-form-urlencoded'}
response = AdsCommon::Http.post_response(url, data, @config, headers)
results = parse_token_text(response.body)
if response.code == 200 and results.include?('Auth')
return results['Auth']
else
handle_login_error(credentials, response, results)
end
end
# Raises relevant error based on response and parsed results.
def handle_login_error(credentials, response, results)
# Handling for known errors.
if 'CaptchaRequired'.eql?(results['Error'])
captcha_url = @server + CAPTCHA_PATH + results['CaptchaUrl']
raise AdsCommon::Errors::CaptchaRequiredError.new(results['Error'],
results['CaptchaToken'], captcha_url, results['Url'])
end
# For other errors throwing a generic error.
error_message = "ClientLogin failed for email '%s': HTTP code %d." %
[credentials[:email], response.code]
error_str = results['Error'] || response.body
error_message += " Error: %s." % error_str if error_str
if results.include?('Info')
error_message += " Info: %s." % results['Info']
end
raise AdsCommon::Errors::AuthError.new(error_message, error_str,
results['Info'])
end
# Extracts key-value pairs from ClientLogin server response.
#
# Args:
# - text: server response string
#
# Returns:
# Hash of key-value pairs
#
def parse_token_text(text)
return text.split("\n").inject({}) do |result, line|
key, *values = line.split('=')
result[key] = values.join('=')
result
end
end
end
end
end
| apache-2.0 |
jeremyepling/TypeScript | tests/baselines/reference/subtypingWithNumericIndexer.js | 2509 | //// [subtypingWithNumericIndexer.ts]
// Derived type indexer must be subtype of base type indexer
interface Base { foo: string; }
interface Derived extends Base { bar: string; }
interface Derived2 extends Derived { baz: string; }
class A {
[x: number]: Base;
}
class B extends A {
[x: number]: Derived; // ok
}
class B2 extends A {
[x: number]: Derived2; // ok
}
module Generics {
class A<T extends Base> {
[x: number]: T;
}
class B extends A<Base> {
[x: number]: Derived; // ok
}
class B2 extends A<Base> {
[x: number]: Derived2; // ok
}
class B3<T extends Base> extends A<T> {
[x: number]: Derived; // error, BUG?
}
class B4<T extends Base> extends A<T> {
[x: number]: Derived2; // error, BUG?
}
}
//// [subtypingWithNumericIndexer.js]
// Derived type indexer must be subtype of base type indexer
var __extends = (this && this.__extends) || function (d, b) {
for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p];
function __() { this.constructor = d; }
d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
};
var A = (function () {
function A() {
}
return A;
}());
var B = (function (_super) {
__extends(B, _super);
function B() {
return _super.apply(this, arguments) || this;
}
return B;
}(A));
var B2 = (function (_super) {
__extends(B2, _super);
function B2() {
return _super.apply(this, arguments) || this;
}
return B2;
}(A));
var Generics;
(function (Generics) {
var A = (function () {
function A() {
}
return A;
}());
var B = (function (_super) {
__extends(B, _super);
function B() {
return _super.apply(this, arguments) || this;
}
return B;
}(A));
var B2 = (function (_super) {
__extends(B2, _super);
function B2() {
return _super.apply(this, arguments) || this;
}
return B2;
}(A));
var B3 = (function (_super) {
__extends(B3, _super);
function B3() {
return _super.apply(this, arguments) || this;
}
return B3;
}(A));
var B4 = (function (_super) {
__extends(B4, _super);
function B4() {
return _super.apply(this, arguments) || this;
}
return B4;
}(A));
})(Generics || (Generics = {}));
| apache-2.0 |