code stringlengths 3 1.05M | repo_name stringlengths 4 116 | path stringlengths 4 991 | language stringclasses 9 values | license stringclasses 15 values | size int32 3 1.05M |
|---|---|---|---|---|---|
import { IServerGroup } from '@spinnaker/core';
import { ICloudFoundrySpace, ICloudFoundryDroplet } from 'cloudfoundry/domain';
import { ICloudFoundryInstance } from 'cloudfoundry/domain/ICloudFoundryInstance';
export interface ICloudFoundryServerGroup extends IServerGroup {
appsManagerUri?: string;
diskQuota: number;
healthCheckType: string;
healthCheckHttpEndpoint: string;
state: 'STARTED' | 'STOPPED';
instances: ICloudFoundryInstance[];
metricsUri?: string;
memory: number;
space: ICloudFoundrySpace;
droplet?: ICloudFoundryDroplet;
serviceInstances: ICloudFoundryServiceInstance[];
env: ICloudFoundryEnvVar[];
ciBuild: ICloudFoundryBuildInfo;
appArtifact: ICloudFoundryArtifactInfo;
pipelineId: string;
}
export interface ICloudFoundryServiceInstance {
name: string;
plan: string;
service: string;
tags?: string[];
}
export interface ICloudFoundryEnvVar {
key: string;
value: string;
}
export interface ICloudFoundryBuildInfo {
jobName: string;
jobNumber: string;
jobUrl: string;
}
export interface ICloudFoundryArtifactInfo {
name: string;
version: string;
url: string;
}
| sgarlick987/deck | app/scripts/modules/cloudfoundry/src/domain/ICloudFoundryServerGroup.ts | TypeScript | apache-2.0 | 1,141 |
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"bytes"
"fmt"
"os"
"path"
"strings"
"github.com/ghodss/yaml"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
api "k8s.io/client-go/pkg/api/v1"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/images"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
)
// Static pod definitions in golang form are included below so that `kubeadm init` can get going.
const (
DefaultClusterName = "kubernetes"
DefaultCloudConfigPath = "/etc/kubernetes/cloud-config"
etcd = "etcd"
apiServer = "apiserver"
controllerManager = "controller-manager"
scheduler = "scheduler"
proxy = "proxy"
kubeAPIServer = "kube-apiserver"
kubeControllerManager = "kube-controller-manager"
kubeScheduler = "kube-scheduler"
kubeProxy = "kube-proxy"
)
// WriteStaticPodManifests builds manifest objects based on user provided configuration and then dumps it to disk
// where kubelet will pick and schedule them.
func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
volumes := []api.Volume{k8sVolume(cfg)}
volumeMounts := []api.VolumeMount{k8sVolumeMount()}
if isCertsVolumeMountNeeded() {
volumes = append(volumes, certsVolume(cfg))
volumeMounts = append(volumeMounts, certsVolumeMount())
}
if isPkiVolumeMountNeeded() {
volumes = append(volumes, pkiVolume(cfg))
volumeMounts = append(volumeMounts, pkiVolumeMount())
}
// Prepare static pod specs
staticPodSpecs := map[string]api.Pod{
kubeAPIServer: componentPod(api.Container{
Name: kubeAPIServer,
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getAPIServerCommand(cfg, false),
VolumeMounts: volumeMounts,
LivenessProbe: componentProbe(8080, "/healthz"),
Resources: componentResources("250m"),
Env: getProxyEnvVars(),
}, volumes...),
kubeControllerManager: componentPod(api.Container{
Name: kubeControllerManager,
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getControllerManagerCommand(cfg, false),
VolumeMounts: volumeMounts,
LivenessProbe: componentProbe(10252, "/healthz"),
Resources: componentResources("200m"),
Env: getProxyEnvVars(),
}, volumes...),
kubeScheduler: componentPod(api.Container{
Name: kubeScheduler,
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
Command: getSchedulerCommand(cfg, false),
LivenessProbe: componentProbe(10251, "/healthz"),
Resources: componentResources("100m"),
Env: getProxyEnvVars(),
}),
}
// Add etcd static pod spec only if external etcd is not configured
if len(cfg.Etcd.Endpoints) == 0 {
etcdPod := componentPod(api.Container{
Name: etcd,
Command: []string{
"etcd",
"--listen-client-urls=http://127.0.0.1:2379",
"--advertise-client-urls=http://127.0.0.1:2379",
"--data-dir=/var/lib/etcd",
},
VolumeMounts: []api.VolumeMount{certsVolumeMount(), etcdVolumeMount(), k8sVolumeMount()},
Image: images.GetCoreImage(images.KubeEtcdImage, cfg, kubeadmapi.GlobalEnvParams.EtcdImage),
LivenessProbe: componentProbe(2379, "/health"),
Resources: componentResources("200m"),
}, certsVolume(cfg), etcdVolume(cfg), k8sVolume(cfg))
etcdPod.Spec.SecurityContext = &api.PodSecurityContext{
SELinuxOptions: &api.SELinuxOptions{
// Unconfine the etcd container so it can write to /var/lib/etcd with SELinux enforcing:
Type: "spc_t",
},
}
staticPodSpecs[etcd] = etcdPod
}
manifestsPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests")
if err := os.MkdirAll(manifestsPath, 0700); err != nil {
return fmt.Errorf("failed to create directory %q [%v]", manifestsPath, err)
}
for name, spec := range staticPodSpecs {
filename := path.Join(manifestsPath, name+".yaml")
serialized, err := yaml.Marshal(spec)
if err != nil {
return fmt.Errorf("failed to marshal manifest for %q to YAML [%v]", name, err)
}
if err := cmdutil.DumpReaderToFile(bytes.NewReader(serialized), filename); err != nil {
return fmt.Errorf("failed to create static pod manifest file for %q (%q) [%v]", name, filename, err)
}
}
return nil
}
// etcdVolume exposes a path on the host in order to guarantee data survival during reboot.
func etcdVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
return api.Volume{
Name: "etcd",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: kubeadmapi.GlobalEnvParams.HostEtcdPath},
},
}
}
func etcdVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "etcd",
MountPath: "/var/lib/etcd",
}
}
func isCertsVolumeMountNeeded() bool {
// Always return true for now. We may add conditional logic here for images which do not require host mounting /etc/ssl
// hyperkube for example already has valid ca-certificates installed
return true
}
// certsVolume exposes host SSL certificates to pod containers.
func certsVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
return api.Volume{
Name: "certs",
VolumeSource: api.VolumeSource{
// TODO(phase1+) make path configurable
HostPath: &api.HostPathVolumeSource{Path: "/etc/ssl/certs"},
},
}
}
func certsVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "certs",
MountPath: "/etc/ssl/certs",
}
}
func isPkiVolumeMountNeeded() bool {
// On some systems were we host-mount /etc/ssl/certs, it is also required to mount /etc/pki. This is needed
// due to symlinks pointing from files in /etc/ssl/certs into /etc/pki/
if _, err := os.Stat("/etc/pki"); err == nil {
return true
}
return false
}
func pkiVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
return api.Volume{
Name: "pki",
VolumeSource: api.VolumeSource{
// TODO(phase1+) make path configurable
HostPath: &api.HostPathVolumeSource{Path: "/etc/pki"},
},
}
}
func pkiVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "pki",
MountPath: "/etc/pki",
}
}
func flockVolume() api.Volume {
return api.Volume{
Name: "var-lock",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: "/var/lock"},
},
}
}
func flockVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "var-lock",
MountPath: "/var/lock",
ReadOnly: false,
}
}
func k8sVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
return api.Volume{
Name: "k8s",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: kubeadmapi.GlobalEnvParams.KubernetesDir},
},
}
}
func k8sVolumeMount() api.VolumeMount {
return api.VolumeMount{
Name: "k8s",
MountPath: "/etc/kubernetes/",
ReadOnly: true,
}
}
func componentResources(cpu string) api.ResourceRequirements {
return api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse(cpu),
},
}
}
func componentProbe(port int, path string) *api.Probe {
return &api.Probe{
Handler: api.Handler{
HTTPGet: &api.HTTPGetAction{
Host: "127.0.0.1",
Path: path,
Port: intstr.FromInt(port),
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
FailureThreshold: 8,
}
}
func componentPod(container api.Container, volumes ...api.Volume) api.Pod {
return api.Pod{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Pod",
},
ObjectMeta: metav1.ObjectMeta{
Name: container.Name,
Namespace: "kube-system",
Labels: map[string]string{"component": container.Name, "tier": "control-plane"},
},
Spec: api.PodSpec{
Containers: []api.Container{container},
HostNetwork: true,
Volumes: volumes,
},
}
}
func getComponentBaseCommand(component string) []string {
if kubeadmapi.GlobalEnvParams.HyperkubeImage != "" {
return []string{"/hyperkube", component}
}
return []string{"kube-" + component}
}
func getCertFilePath(certName string) string {
return path.Join(kubeadmapi.GlobalEnvParams.HostPKIPath, certName)
}
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
var command []string
// self-hosted apiserver needs to wait on a lock
if selfHosted {
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"}
}
command = append(getComponentBaseCommand(apiServer),
"--insecure-bind-address=127.0.0.1",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
"--service-cluster-ip-range="+cfg.Networking.ServiceSubnet,
"--service-account-key-file="+getCertFilePath(kubeadmconstants.ServiceAccountPublicKeyName),
"--client-ca-file="+getCertFilePath(kubeadmconstants.CACertName),
"--tls-cert-file="+getCertFilePath(kubeadmconstants.APIServerCertName),
"--tls-private-key-file="+getCertFilePath(kubeadmconstants.APIServerKeyName),
"--kubelet-client-certificate="+getCertFilePath(kubeadmconstants.APIServerKubeletClientCertName),
"--kubelet-client-key="+getCertFilePath(kubeadmconstants.APIServerKubeletClientKeyName),
"--token-auth-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/tokens.csv",
fmt.Sprintf("--secure-port=%d", cfg.API.Port),
"--allow-privileged",
"--storage-backend=etcd3",
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
// add options to configure the front proxy. Without the generated client cert, this will never be useable
// so add it unconditionally with recommended values
"--requestheader-username-headers=X-Remote-User",
"--requestheader-group-headers=X-Remote-Group",
"--requestheader-extra-headers-prefix=X-Remote-Extra-",
"--requestheader-client-ca-file="+getCertFilePath(kubeadmconstants.FrontProxyCACertName),
"--requestheader-allowed-names=front-proxy-client",
)
if cfg.AuthorizationMode != "" {
command = append(command, "--authorization-mode="+cfg.AuthorizationMode)
switch cfg.AuthorizationMode {
case kubeadmconstants.AuthzModeABAC:
command = append(command, "--authorization-policy-file="+path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.AuthorizationPolicyFile))
case kubeadmconstants.AuthzModeWebhook:
command = append(command, "--authorization-webhook-config-file="+path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.AuthorizationWebhookConfigFile))
}
}
// Use first address we are given
if len(cfg.API.AdvertiseAddresses) > 0 {
if selfHosted {
command = append(command, "--advertise-address=$(POD_IP)")
} else {
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
}
}
// Check if the user decided to use an external etcd cluster
if len(cfg.Etcd.Endpoints) > 0 {
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
} else {
command = append(command, "--etcd-servers=http://127.0.0.1:2379")
}
// Is etcd secured?
if cfg.Etcd.CAFile != "" {
command = append(command, fmt.Sprintf("--etcd-cafile=%s", cfg.Etcd.CAFile))
}
if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" {
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", cfg.Etcd.CertFile)
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", cfg.Etcd.KeyFile)
command = append(command, etcdClientFileArg, etcdKeyFileArg)
}
if cfg.CloudProvider != "" {
command = append(command, "--cloud-provider="+cfg.CloudProvider)
// Only append the --cloud-config option if there's a such file
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
}
}
return command
}
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
var command []string
// self-hosted controller-manager needs to wait on a lock
if selfHosted {
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/controller-manager.lock"}
}
command = append(getComponentBaseCommand(controllerManager),
"--address=127.0.0.1",
"--leader-elect",
"--master=127.0.0.1:8080",
"--cluster-name="+DefaultClusterName,
"--root-ca-file="+getCertFilePath(kubeadmconstants.CACertName),
"--service-account-private-key-file="+getCertFilePath(kubeadmconstants.ServiceAccountPrivateKeyName),
"--cluster-signing-cert-file="+getCertFilePath(kubeadmconstants.CACertName),
"--cluster-signing-key-file="+getCertFilePath(kubeadmconstants.CAKeyName),
"--insecure-experimental-approve-all-kubelet-csrs-for-group="+kubeadmconstants.CSVTokenBootstrapGroup,
)
if cfg.CloudProvider != "" {
command = append(command, "--cloud-provider="+cfg.CloudProvider)
// Only append the --cloud-config option if there's a such file
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
}
}
// Let the controller-manager allocate Node CIDRs for the Pod network.
// Each node will get a subspace of the address CIDR provided with --pod-network-cidr.
if cfg.Networking.PodSubnet != "" {
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet)
}
return command
}
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
var command []string
// self-hosted apiserver needs to wait on a lock
if selfHosted {
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"}
}
command = append(getComponentBaseCommand(scheduler),
"--address=127.0.0.1",
"--leader-elect",
"--master=127.0.0.1:8080",
)
return command
}
func getProxyEnvVars() []api.EnvVar {
envs := []api.EnvVar{}
for _, env := range os.Environ() {
pos := strings.Index(env, "=")
if pos == -1 {
// malformed environment variable, skip it.
continue
}
name := env[:pos]
value := env[pos+1:]
if strings.HasSuffix(strings.ToLower(name), "_proxy") && value != "" {
envVar := api.EnvVar{Name: name, Value: value}
envs = append(envs, envVar)
}
}
return envs
}
func getSelfHostedAPIServerEnv() []api.EnvVar {
podIPEnvVar := api.EnvVar{
Name: "POD_IP",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
}
return append(getProxyEnvVars(), podIPEnvVar)
}
| shashidharatd/kubernetes | cmd/kubeadm/app/master/manifests.go | GO | apache-2.0 | 15,281 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tamaya;
import static org.assertj.core.api.Assertions.fail;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class InvocationRecorder {
private List<Invocation> invocations = new ArrayList<>();
private Object record(Object instance, Method method, Object[] args) throws Throwable {
Invocation invocation = new Invocation(method.getName(), args);
this.invocations.add(invocation);
return method.invoke(instance, args);
}
public <T> T createProxy(Object instance, Class<T>... types) {
return (T) Proxy.newProxyInstance(
getClass().getClassLoader(), types,
(proxy,method,params) -> this.record(instance, method, params));
}
public void recordMethodCall(Object... params) {
Exception e = new Exception();
String methodName = e.getStackTrace()[1].getMethodName();
invocations.add(new Invocation(methodName, params));
}
public static final class Invocation{
public String methodName;
public Object[] params;
public Invocation(String methodName, Object[] params) {
this.methodName = methodName;
this.params = params;
}
}
public List<Invocation> getInvocations(){
return invocations;
}
public void assertInvocation(String method, Object... params){
for(Invocation invocation:invocations){
if(invocation.methodName.equals(method)){
if(Arrays.equals(invocation.params, params)){
return;
}
}
}
fail("No such invocation: "+method + Arrays.toString(params));
}
}
| apache/incubator-tamaya | code/api/src/test/java/org/apache/tamaya/InvocationRecorder.java | Java | apache-2.0 | 2,591 |
// Package pb provides underlying implementation for qy and mp
package pb
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
// AccessTokenResponse stores the normal result of access token fetching.
type AccessTokenResponse struct {
AccessToken string `json:"access_token"`
ExpiresIn float64 `json:"expires_in"`
}
// AccessTokenErrorResponse stores the error result of access token fetching.
type AccessTokenErrorResponse struct {
Errcode string
Errmsg string
}
// FetchAccessToken provides underlying access token fetching implementation.
func FetchAccessToken(requestLine string) (string, float64, error) {
resp, err := http.Get(requestLine)
if err != nil || resp.StatusCode != http.StatusOK {
return "", 0.0, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", 0.0, err
}
//Json Decoding
if bytes.Contains(body, []byte("access_token")) {
atr := AccessTokenResponse{}
err = json.Unmarshal(body, &atr)
if err != nil {
return "", 0.0, err
}
return atr.AccessToken, atr.ExpiresIn, nil
}
ater := AccessTokenErrorResponse{}
err = json.Unmarshal(body, &ater)
if err != nil {
return "", 0.0, err
}
return "", 0.0, fmt.Errorf("%s", ater.Errmsg)
}
| wmydz1/gowechat | pb/accesstoken.go | GO | apache-2.0 | 1,252 |
/*
* Copyright 2016 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.screens.projecteditor.backend.server;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.guvnor.common.services.backend.metadata.MetadataServerSideService;
import org.guvnor.common.services.backend.util.CommentedOptionFactory;
import org.guvnor.common.services.project.backend.server.utils.POMContentHandler;
import org.guvnor.common.services.project.model.GAV;
import org.guvnor.common.services.project.model.MavenRepositoryMetadata;
import org.guvnor.common.services.project.model.MavenRepositorySource;
import org.guvnor.common.services.project.model.POM;
import org.guvnor.common.services.project.model.ProjectRepositories;
import org.guvnor.common.services.project.service.DeploymentMode;
import org.guvnor.common.services.project.service.GAVAlreadyExistsException;
import org.guvnor.common.services.project.service.ProjectRepositoriesService;
import org.guvnor.common.services.project.service.ProjectRepositoryResolver;
import org.guvnor.common.services.shared.metadata.model.Metadata;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.kie.workbench.common.screens.defaulteditor.service.DefaultEditorContent;
import org.kie.workbench.common.screens.defaulteditor.service.DefaultEditorService;
import org.kie.workbench.common.screens.projecteditor.service.PomEditorService;
import org.kie.workbench.common.services.shared.project.KieProject;
import org.kie.workbench.common.services.shared.project.KieProjectService;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import org.uberfire.backend.vfs.Path;
import org.uberfire.io.IOService;
import org.uberfire.java.nio.base.options.CommentedOption;
import org.uberfire.java.nio.file.FileSystem;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
@RunWith(MockitoJUnitRunner.class)
public class PomEditorServiceImplTest {
@Mock
private IOService ioService;
@Mock
private DefaultEditorService defaultEditorService;
@Mock
private MetadataServerSideService metadataService;
@Mock
private CommentedOptionFactory commentedOptionFactory;
@Mock
private KieProjectService projectService;
@Mock
private ProjectRepositoryResolver repositoryResolver;
@Mock
private ProjectRepositoriesService projectRepositoriesService;
@Mock
private Path pomPath;
@Mock
private Metadata metaData;
@Mock
private KieProject project;
@Mock
private POM pom;
@Mock
private Path projectRepositoriesPath;
private PomEditorService service;
private String pomPathUri = "default://p0/pom.xml";
private Map<String, Object> attributes = new HashMap<String, Object>();
private DefaultEditorContent content = new DefaultEditorContent();
private POMContentHandler pomContentHandler = new POMContentHandler();
private String pomXml = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
"<project xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\" xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n" +
"<modelVersion>4.0.0</modelVersion>\n" +
"<groupId>groupId</groupId>\n" +
"<artifactId>artifactId</artifactId>\n" +
"<version>0.0.1</version>\n" +
"<name>name</name>\n" +
"<description>description</description>\n" +
"</project>";
private String comment = "comment";
@BeforeClass
public static void setupSystemProperties() {
//These are not needed for the tests
System.setProperty( "org.uberfire.nio.git.daemon.enabled",
"false" );
System.setProperty( "org.uberfire.nio.git.ssh.enabled",
"false" );
System.setProperty( "org.uberfire.sys.repo.monitor.disabled",
"true" );
}
@Before
public void setup() {
service = new PomEditorServiceImpl( ioService,
defaultEditorService,
metadataService,
commentedOptionFactory,
projectService,
pomContentHandler,
repositoryResolver,
projectRepositoriesService );
when( pomPath.toURI() ).thenReturn( pomPathUri );
when( defaultEditorService.loadContent( pomPath ) ).thenReturn( content );
when( metadataService.setUpAttributes( eq( pomPath ),
any( Metadata.class ) ) ).thenReturn( attributes );
when( projectService.resolveProject( pomPath ) ).thenReturn( project );
when( project.getRepositoriesPath() ).thenReturn( projectRepositoriesPath );
when( project.getPom() ).thenReturn( pom );
}
@Test
public void testLoad() {
final DefaultEditorContent content = service.loadContent( pomPath );
assertNotNull( content );
assertEquals( this.content,
content );
}
@Test
public void testSaveNonClashingGAVChangeToGAV() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>();
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final ArgumentCaptor<MavenRepositoryMetadata> resolvedRepositoriesCaptor = ArgumentCaptor.forClass( MavenRepositoryMetadata.class );
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
resolvedRepositoriesCaptor.capture() ) ).thenReturn( Collections.EMPTY_SET );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.2" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
times( 1 ) ).load( projectRepositoriesPath );
verify( repositoryResolver,
times( 1 ) ).getRepositoriesResolvingArtifact( eq( pomXml ) );
final List<MavenRepositoryMetadata> resolvedRepositories = resolvedRepositoriesCaptor.getAllValues();
assertNotNull( resolvedRepositories );
assertEquals( 0,
resolvedRepositories.size() );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveNonClashingGAVNoChangeToGAV() {
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( projectRepositoriesPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveNonClashingGAVFilteredChangeToGAV() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>() {{
add( new ProjectRepositories.ProjectRepository( true,
new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) ) );
}};
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final ArgumentCaptor<MavenRepositoryMetadata> resolvedRepositoriesCaptor = ArgumentCaptor.forClass( MavenRepositoryMetadata.class );
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
resolvedRepositoriesCaptor.capture() ) ).thenReturn( Collections.EMPTY_SET );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.2" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
times( 1 ) ).load( projectRepositoriesPath );
verify( repositoryResolver,
times( 1 ) ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
final List<MavenRepositoryMetadata> resolvedRepositories = resolvedRepositoriesCaptor.getAllValues();
assertNotNull( resolvedRepositories );
assertEquals( 1,
resolvedRepositories.size() );
final MavenRepositoryMetadata repositoryMetadata = resolvedRepositories.get( 0 );
assertEquals( "local-id",
repositoryMetadata.getId() );
assertEquals( "local-url",
repositoryMetadata.getUrl() );
assertEquals( MavenRepositorySource.LOCAL,
repositoryMetadata.getSource() );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveNonClashingGAVFilteredNoChangeToGAV() {
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( projectRepositoriesPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveClashingGAVChangeToGAV() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>() {{
add( new ProjectRepositories.ProjectRepository( true,
new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) ) );
}};
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final Set<MavenRepositoryMetadata> clashingRepositories = new HashSet<MavenRepositoryMetadata>() {{
add( new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) );
}};
final ArgumentCaptor<MavenRepositoryMetadata> resolvedRepositoriesCaptor = ArgumentCaptor.forClass( MavenRepositoryMetadata.class );
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
resolvedRepositoriesCaptor.capture() ) ).thenReturn( clashingRepositories );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.2" ) );
try {
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
} catch ( GAVAlreadyExistsException e ) {
// This is expected! We catch here rather than let JUnit handle it with
// @Test(expected = GAVAlreadyExistsException.class) so we can verify
// that only the expected methods have been invoked.
} catch ( Exception e ) {
fail( e.getMessage() );
}
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
times( 1 ) ).load( projectRepositoriesPath );
verify( repositoryResolver,
times( 1 ) ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
final List<MavenRepositoryMetadata> resolvedRepositories = resolvedRepositoriesCaptor.getAllValues();
assertNotNull( resolvedRepositories );
assertEquals( 1,
resolvedRepositories.size() );
final MavenRepositoryMetadata repositoryMetadata = resolvedRepositories.get( 0 );
assertEquals( "local-id",
repositoryMetadata.getId() );
assertEquals( "local-url",
repositoryMetadata.getUrl() );
assertEquals( MavenRepositorySource.LOCAL,
repositoryMetadata.getSource() );
verify( ioService,
never() ).startBatch( any( FileSystem.class ) );
verify( ioService,
never() ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
never() ).endBatch();
}
@Test
public void testSaveClashingGAVNoChangeToGAV() {
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
try {
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.VALIDATED );
} catch ( GAVAlreadyExistsException e ) {
// This is should not be thrown if the GAV has not changed.
fail( e.getMessage() );
}
verify( projectService,
times( 1 ) ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( projectRepositoriesPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
@Test
public void testSaveClashingGAVForced() {
final Set<ProjectRepositories.ProjectRepository> projectRepositoriesMetadata = new HashSet<ProjectRepositories.ProjectRepository>() {{
add( new ProjectRepositories.ProjectRepository( true,
new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) ) );
}};
final ProjectRepositories projectRepositories = new ProjectRepositories( projectRepositoriesMetadata );
when( projectRepositoriesService.load( projectRepositoriesPath ) ).thenReturn( projectRepositories );
final Set<MavenRepositoryMetadata> clashingRepositories = new HashSet<MavenRepositoryMetadata>() {{
add( new MavenRepositoryMetadata( "local-id",
"local-url",
MavenRepositorySource.LOCAL ) );
}};
when( repositoryResolver.getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) ) ).thenReturn( clashingRepositories );
when( pom.getGav() ).thenReturn( new GAV( "groupId",
"artifactId",
"0.0.1" ) );
try {
service.save( pomPath,
pomXml,
metaData,
comment,
DeploymentMode.FORCED );
} catch ( GAVAlreadyExistsException e ) {
fail( e.getMessage() );
}
verify( projectService,
never() ).resolveProject( pomPath );
verify( projectRepositoriesService,
never() ).load( pomPath );
verify( repositoryResolver,
never() ).getRepositoriesResolvingArtifact( eq( pomXml ),
any( MavenRepositoryMetadata.class ) );
verify( ioService,
times( 1 ) ).startBatch( any( FileSystem.class ) );
verify( ioService,
times( 1 ) ).write( any( org.uberfire.java.nio.file.Path.class ),
eq( pomXml ),
eq( attributes ),
any( CommentedOption.class ) );
verify( ioService,
times( 1 ) ).endBatch();
}
}
| dgutierr/kie-wb-common | kie-wb-common-screens/kie-wb-common-project-editor/kie-wb-common-project-editor-backend/src/test/java/org/kie/workbench/common/screens/projecteditor/backend/server/PomEditorServiceImplTest.java | Java | apache-2.0 | 21,647 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication.regionserver;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.client.RetryingCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec;
import org.apache.hadoop.hbase.wal.WAL.Entry;
import org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers;
import org.apache.hadoop.hbase.wal.WALSplitter.OutputSink;
import org.apache.hadoop.hbase.wal.WALSplitter.PipelineController;
import org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer;
import org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter;
import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint;
import org.apache.hadoop.hbase.replication.WALEntryFilter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.util.StringUtils;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.protobuf.ServiceException;
/**
* A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint
* which receives the WAL edits from the WAL, and sends the edits to replicas
* of regions.
*/
@InterfaceAudience.Private
public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
private static final Log LOG = LogFactory.getLog(RegionReplicaReplicationEndpoint.class);
private Configuration conf;
private ClusterConnection connection;
// Reuse WALSplitter constructs as a WAL pipe
private PipelineController controller;
private RegionReplicaOutputSink outputSink;
private EntryBuffers entryBuffers;
// Number of writer threads
private int numWriterThreads;
private int operationTimeout;
private ExecutorService pool;
@Override
public void init(Context context) throws IOException {
super.init(context);
this.conf = HBaseConfiguration.create(context.getConfiguration());
String codecClassName = conf
.get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName);
this.numWriterThreads = this.conf.getInt(
"hbase.region.replica.replication.writer.threads", 3);
controller = new PipelineController();
entryBuffers = new EntryBuffers(controller,
this.conf.getInt("hbase.region.replica.replication.buffersize",
128*1024*1024));
// use the regular RPC timeout for replica replication RPC's
this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
}
@Override
protected void doStart() {
try {
connection = (ClusterConnection) HConnectionManager.createConnection(ctx.getConfiguration());
this.pool = getDefaultThreadPool(conf);
outputSink = new RegionReplicaOutputSink(controller, entryBuffers, connection, pool,
numWriterThreads, operationTimeout);
outputSink.startWriterThreads();
super.doStart();
} catch (IOException ex) {
LOG.warn("Received exception while creating connection :" + ex);
notifyFailed(ex);
}
}
@Override
protected void doStop() {
if (outputSink != null) {
try {
outputSink.finishWritingAndClose();
} catch (IOException ex) {
LOG.warn("Got exception while trying to close OutputSink");
LOG.warn(ex);
}
}
if (this.pool != null) {
this.pool.shutdownNow();
try {
// wait for 10 sec
boolean shutdown = this.pool.awaitTermination(10000, TimeUnit.MILLISECONDS);
if (!shutdown) {
LOG.warn("Failed to shutdown the thread pool after 10 seconds");
}
} catch (InterruptedException e) {
LOG.warn("Got interrupted while waiting for the thread pool to shut down" + e);
}
}
if (connection != null) {
try {
connection.close();
} catch (IOException ex) {
LOG.warn("Got exception closing connection :" + ex);
}
}
super.doStop();
}
/**
* Returns a Thread pool for the RPC's to region replicas. Similar to
* Connection's thread pool.
*/
private ExecutorService getDefaultThreadPool(Configuration conf) {
int maxThreads = conf.getInt("hbase.region.replica.replication.threads.max", 256);
int coreThreads = conf.getInt("hbase.region.replica.replication.threads.core", 16);
if (maxThreads == 0) {
maxThreads = Runtime.getRuntime().availableProcessors() * 8;
}
if (coreThreads == 0) {
coreThreads = Runtime.getRuntime().availableProcessors() * 8;
}
long keepAliveTime = conf.getLong("hbase.region.replica.replication.threads.keepalivetime", 60);
LinkedBlockingQueue<Runnable> workQueue =
new LinkedBlockingQueue<Runnable>(maxThreads *
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
ThreadPoolExecutor tpe = new ThreadPoolExecutor(
coreThreads,
maxThreads,
keepAliveTime,
TimeUnit.SECONDS,
workQueue,
Threads.newDaemonThreadFactory(this.getClass().toString() + "-rpc-shared-"));
tpe.allowCoreThreadTimeOut(true);
return tpe;
}
@Override
public boolean replicate(ReplicateContext replicateContext) {
/* A note on batching in RegionReplicaReplicationEndpoint (RRRE):
*
* RRRE relies on batching from two different mechanisms. The first is the batching from
* ReplicationSource since RRRE is a ReplicationEndpoint driven by RS. RS reads from a single
* WAL file filling up a buffer of heap size "replication.source.size.capacity"(64MB) or at most
* "replication.source.nb.capacity" entries or until it sees the end of file (in live tailing).
* Then RS passes all the buffered edits in this replicate() call context. RRRE puts the edits
* to the WALSplitter.EntryBuffers which is a blocking buffer space of up to
* "hbase.region.replica.replication.buffersize" (128MB) in size. This buffer splits the edits
* based on regions.
*
* There are "hbase.region.replica.replication.writer.threads"(default 3) writer threads which
* pick largest per-region buffer and send it to the SinkWriter (see RegionReplicaOutputSink).
* The SinkWriter in this case will send the wal edits to all secondary region replicas in
* parallel via a retrying rpc call. EntryBuffers guarantees that while a buffer is
* being written to the sink, another buffer for the same region will not be made available to
* writers ensuring regions edits are not replayed out of order.
*
* The replicate() call won't return until all the buffers are sent and ack'd by the sinks so
* that the replication can assume all edits are persisted. We may be able to do a better
* pipelining between the replication thread and output sinks later if it becomes a bottleneck.
*/
while (this.isRunning()) {
try {
for (Entry entry: replicateContext.getEntries()) {
entryBuffers.appendEntry(entry);
}
outputSink.flush(); // make sure everything is flushed
return true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
} catch (IOException e) {
LOG.warn("Received IOException while trying to replicate"
+ StringUtils.stringifyException(e));
}
}
return false;
}
@Override
public boolean canReplicateToSameCluster() {
return true;
}
@Override
protected WALEntryFilter getScopeWALEntryFilter() {
// we do not care about scope. We replicate everything.
return null;
}
static class RegionReplicaOutputSink extends OutputSink {
private RegionReplicaSinkWriter sinkWriter;
public RegionReplicaOutputSink(PipelineController controller, EntryBuffers entryBuffers,
ClusterConnection connection, ExecutorService pool, int numWriters, int operationTimeout) {
super(controller, entryBuffers, numWriters);
this.sinkWriter = new RegionReplicaSinkWriter(this, connection, pool, operationTimeout);
}
@Override
public void append(RegionEntryBuffer buffer) throws IOException {
List<Entry> entries = buffer.getEntryBuffer();
if (entries.isEmpty() || entries.get(0).getEdit().getCells().isEmpty()) {
return;
}
sinkWriter.append(buffer.getTableName(), buffer.getEncodedRegionName(),
entries.get(0).getEdit().getCells().get(0).getRow(), entries);
}
@Override
public boolean flush() throws IOException {
// nothing much to do for now. Wait for the Writer threads to finish up
// append()'ing the data.
entryBuffers.waitUntilDrained();
return super.flush();
}
@Override
public List<Path> finishWritingAndClose() throws IOException {
finishWriting();
return null;
}
@Override
public Map<byte[], Long> getOutputCounts() {
return null; // only used in tests
}
@Override
public int getNumberOfRecoveredRegions() {
return 0;
}
AtomicLong getSkippedEditsCounter() {
return skippedEdits;
}
}
static class RegionReplicaSinkWriter extends SinkWriter {
RegionReplicaOutputSink sink;
ClusterConnection connection;
RpcControllerFactory rpcControllerFactory;
RpcRetryingCallerFactory rpcRetryingCallerFactory;
int operationTimeout;
ExecutorService pool;
Cache<TableName, Boolean> disabledAndDroppedTables;
public RegionReplicaSinkWriter(RegionReplicaOutputSink sink, ClusterConnection connection,
ExecutorService pool, int operationTimeout) {
this.sink = sink;
this.connection = connection;
this.operationTimeout = operationTimeout;
this.rpcRetryingCallerFactory
= RpcRetryingCallerFactory.instantiate(connection.getConfiguration());
this.rpcControllerFactory = RpcControllerFactory.instantiate(connection.getConfiguration());
this.pool = pool;
int nonExistentTableCacheExpiryMs = connection.getConfiguration()
.getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", 5000);
// A cache for non existing tables that have a default expiry of 5 sec. This means that if the
// table is created again with the same name, we might miss to replicate for that amount of
// time. But this cache prevents overloading meta requests for every edit from a deleted file.
disabledAndDroppedTables = CacheBuilder.newBuilder()
.expireAfterWrite(nonExistentTableCacheExpiryMs, TimeUnit.MILLISECONDS)
.initialCapacity(10)
.maximumSize(1000)
.build();
}
public void append(TableName tableName, byte[] encodedRegionName, byte[] row,
List<Entry> entries) throws IOException {
if (disabledAndDroppedTables.getIfPresent(tableName) != null) {
sink.getSkippedEditsCounter().incrementAndGet();
return;
}
// get the replicas of the primary region
RegionLocations locations = null;
try {
locations = getRegionLocations(connection, tableName, row, true, 0);
if (locations == null) {
throw new HBaseIOException("Cannot locate locations for "
+ tableName + ", row:" + Bytes.toStringBinary(row));
}
} catch (TableNotFoundException e) {
disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache. Value ignored
// skip this entry
sink.getSkippedEditsCounter().addAndGet(entries.size());
return;
}
if (locations.size() == 1) {
return;
}
ArrayList<Future<ReplicateWALEntryResponse>> tasks
= new ArrayList<Future<ReplicateWALEntryResponse>>(2);
// check whether we should still replay this entry. If the regions are changed, or the
// entry is not coming form the primary region, filter it out.
HRegionLocation primaryLocation = locations.getDefaultRegionLocation();
if (!Bytes.equals(primaryLocation.getRegionInfo().getEncodedNameAsBytes(),
encodedRegionName)) {
sink.getSkippedEditsCounter().addAndGet(entries.size());
return;
}
// All passed entries should belong to one region because it is coming from the EntryBuffers
// split per region. But the regions might split and merge (unlike log recovery case).
for (int replicaId = 0; replicaId < locations.size(); replicaId++) {
HRegionLocation location = locations.getRegionLocation(replicaId);
if (!RegionReplicaUtil.isDefaultReplica(replicaId)) {
HRegionInfo regionInfo = location == null
? RegionReplicaUtil.getRegionInfoForReplica(
locations.getDefaultRegionLocation().getRegionInfo(), replicaId)
: location.getRegionInfo();
RegionReplicaReplayCallable callable = new RegionReplicaReplayCallable(connection,
rpcControllerFactory, tableName, location, regionInfo, row, entries,
sink.getSkippedEditsCounter());
Future<ReplicateWALEntryResponse> task = pool.submit(
new RetryingRpcCallable<ReplicateWALEntryResponse>(rpcRetryingCallerFactory,
callable, operationTimeout));
tasks.add(task);
}
}
boolean tasksCancelled = false;
for (Future<ReplicateWALEntryResponse> task : tasks) {
try {
task.get();
} catch (InterruptedException e) {
throw new InterruptedIOException(e.getMessage());
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
// The table can be disabled or dropped at this time. For disabled tables, we have no
// cheap mechanism to detect this case because meta does not contain this information.
// HConnection.isTableDisabled() is a zk call which we cannot do for every replay RPC.
// So instead we start the replay RPC with retries and
// check whether the table is dropped or disabled which might cause
// SocketTimeoutException, or RetriesExhaustedException or similar if we get IOE.
if (cause instanceof TableNotFoundException || connection.isTableDisabled(tableName)) {
disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache for later.
if (!tasksCancelled) {
sink.getSkippedEditsCounter().addAndGet(entries.size());
tasksCancelled = true; // so that we do not add to skipped counter again
}
continue;
}
// otherwise rethrow
throw (IOException)cause;
}
// unexpected exception
throw new IOException(cause);
}
}
}
}
static class RetryingRpcCallable<V> implements Callable<V> {
RpcRetryingCallerFactory factory;
RetryingCallable<V> callable;
int timeout;
public RetryingRpcCallable(RpcRetryingCallerFactory factory, RetryingCallable<V> callable,
int timeout) {
this.factory = factory;
this.callable = callable;
this.timeout = timeout;
}
@Override
public V call() throws Exception {
return factory.<V>newCaller().callWithRetries(callable, timeout);
}
}
/**
* Calls replay on the passed edits for the given set of entries belonging to the region. It skips
* the entry if the region boundaries have changed or the region is gone.
*/
static class RegionReplicaReplayCallable
extends RegionAdminServiceCallable<ReplicateWALEntryResponse> {
// replicaId of the region replica that we want to replicate to
private final int replicaId;
private final List<Entry> entries;
private final byte[] initialEncodedRegionName;
private final AtomicLong skippedEntries;
private final RpcControllerFactory rpcControllerFactory;
private boolean skip;
public RegionReplicaReplayCallable(ClusterConnection connection,
RpcControllerFactory rpcControllerFactory, TableName tableName,
HRegionLocation location, HRegionInfo regionInfo, byte[] row,List<Entry> entries,
AtomicLong skippedEntries) {
super(connection, location, tableName, row);
this.replicaId = regionInfo.getReplicaId();
this.entries = entries;
this.rpcControllerFactory = rpcControllerFactory;
this.skippedEntries = skippedEntries;
this.initialEncodedRegionName = regionInfo.getEncodedNameAsBytes();
}
@Override
public HRegionLocation getLocation(boolean useCache) throws IOException {
RegionLocations rl = getRegionLocations(connection, tableName, row, useCache, replicaId);
if (rl == null) {
throw new HBaseIOException(getExceptionMessage());
}
location = rl.getRegionLocation(replicaId);
if (location == null) {
throw new HBaseIOException(getExceptionMessage());
}
// check whether we should still replay this entry. If the regions are changed, or the
// entry is not coming form the primary region, filter it out because we do not need it.
// Regions can change because of (1) region split (2) region merge (3) table recreated
if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(),
initialEncodedRegionName)) {
skip = true;
return null;
}
return location;
}
@Override
public ReplicateWALEntryResponse call(int timeout) throws IOException {
return replayToServer(this.entries, timeout);
}
private ReplicateWALEntryResponse replayToServer(List<Entry> entries, int timeout)
throws IOException {
if (entries.isEmpty() || skip) {
skippedEntries.incrementAndGet();
return ReplicateWALEntryResponse.newBuilder().build();
}
Entry[] entriesArray = new Entry[entries.size()];
entriesArray = entries.toArray(entriesArray);
// set the region name for the target region replica
Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
ReplicationProtbufUtil.buildReplicateWALEntryRequest(
entriesArray, location.getRegionInfo().getEncodedNameAsBytes());
try {
PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond());
controller.setCallTimeout(timeout);
controller.setPriority(tableName);
return stub.replay(controller, p.getFirst());
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
@Override
protected String getExceptionMessage() {
return super.getExceptionMessage() + " table=" + tableName
+ " ,replica=" + replicaId + ", row=" + Bytes.toStringBinary(row);
}
}
private static RegionLocations getRegionLocations(
ClusterConnection connection, TableName tableName, byte[] row,
boolean useCache, int replicaId)
throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException {
RegionLocations rl;
try {
rl = connection.locateRegion(tableName, row, useCache, true, replicaId);
} catch (DoNotRetryIOException e) {
throw e;
} catch (RetriesExhaustedException e) {
throw e;
} catch (InterruptedIOException e) {
throw e;
} catch (IOException e) {
throw new RetriesExhaustedException("Can't get the location", e);
}
if (rl == null) {
throw new RetriesExhaustedException("Can't get the locations");
}
return rl;
}
}
| drewpope/hbase | hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java | Java | apache-2.0 | 22,360 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.spring.initializr.actuate.stat;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.ClientInformation;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.DependencyInformation;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.ErrorStateInformation;
import io.spring.initializr.actuate.stat.ProjectRequestDocument.VersionInformation;
import io.spring.initializr.generator.version.Version;
import io.spring.initializr.metadata.InitializrMetadata;
import io.spring.initializr.web.project.ProjectFailedEvent;
import io.spring.initializr.web.project.ProjectRequest;
import io.spring.initializr.web.project.ProjectRequestEvent;
import io.spring.initializr.web.project.WebProjectRequest;
import io.spring.initializr.web.support.Agent;
import org.springframework.util.StringUtils;
/**
* Create {@link ProjectRequestDocument} instances.
*
* @author Stephane Nicoll
*/
public class ProjectRequestDocumentFactory {
public ProjectRequestDocument createDocument(ProjectRequestEvent event) {
InitializrMetadata metadata = event.getMetadata();
ProjectRequest request = event.getProjectRequest();
ProjectRequestDocument document = new ProjectRequestDocument();
document.setGenerationTimestamp(event.getTimestamp());
document.setGroupId(request.getGroupId());
document.setArtifactId(request.getArtifactId());
document.setPackageName(request.getPackageName());
document.setVersion(determineVersionInformation(request));
document.setClient(determineClientInformation(request));
document.setJavaVersion(request.getJavaVersion());
if (StringUtils.hasText(request.getJavaVersion())
&& metadata.getJavaVersions().get(request.getJavaVersion()) == null) {
document.triggerError().setJavaVersion(true);
}
document.setLanguage(request.getLanguage());
if (StringUtils.hasText(request.getLanguage()) && metadata.getLanguages().get(request.getLanguage()) == null) {
document.triggerError().setLanguage(true);
}
document.setPackaging(request.getPackaging());
if (StringUtils.hasText(request.getPackaging())
&& metadata.getPackagings().get(request.getPackaging()) == null) {
document.triggerError().setPackaging(true);
}
document.setType(request.getType());
document.setBuildSystem(determineBuildSystem(request));
if (StringUtils.hasText(request.getType()) && metadata.getTypes().get(request.getType()) == null) {
document.triggerError().setType(true);
}
// Let's not rely on the resolved dependencies here
List<String> dependencies = new ArrayList<>(request.getDependencies());
List<String> validDependencies = dependencies.stream()
.filter((id) -> metadata.getDependencies().get(id) != null).collect(Collectors.toList());
document.setDependencies(new DependencyInformation(validDependencies));
List<String> invalidDependencies = dependencies.stream().filter((id) -> (!validDependencies.contains(id)))
.collect(Collectors.toList());
if (!invalidDependencies.isEmpty()) {
document.triggerError().triggerInvalidDependencies(invalidDependencies);
}
// Let's make sure that the document is flagged as invalid no matter what
if (event instanceof ProjectFailedEvent) {
ErrorStateInformation errorState = document.triggerError();
ProjectFailedEvent failed = (ProjectFailedEvent) event;
if (failed.getCause() != null) {
errorState.setMessage(failed.getCause().getMessage());
}
}
return document;
}
private String determineBuildSystem(ProjectRequest request) {
String type = request.getType();
String[] elements = type.split("-");
return (elements.length == 2) ? elements[0] : null;
}
private VersionInformation determineVersionInformation(ProjectRequest request) {
Version version = Version.safeParse(request.getBootVersion());
if (version != null && version.getMajor() != null) {
return new VersionInformation(version);
}
return null;
}
private ClientInformation determineClientInformation(ProjectRequest request) {
if (request instanceof WebProjectRequest) {
WebProjectRequest webProjectRequest = (WebProjectRequest) request;
Agent agent = determineAgent(webProjectRequest);
String ip = determineIp(webProjectRequest);
String country = determineCountry(webProjectRequest);
if (agent != null || ip != null || country != null) {
return new ClientInformation(agent, ip, country);
}
}
return null;
}
private Agent determineAgent(WebProjectRequest request) {
String userAgent = (String) request.getParameters().get("user-agent");
if (StringUtils.hasText(userAgent)) {
return Agent.fromUserAgent(userAgent);
}
return null;
}
private String determineIp(WebProjectRequest request) {
String candidate = (String) request.getParameters().get("cf-connecting-ip");
return (StringUtils.hasText(candidate)) ? candidate : (String) request.getParameters().get("x-forwarded-for");
}
private String determineCountry(WebProjectRequest request) {
String candidate = (String) request.getParameters().get("cf-ipcountry");
if (StringUtils.hasText(candidate) && !"xx".equalsIgnoreCase(candidate)) {
return candidate;
}
return null;
}
}
| snicoll/initializr | initializr-actuator/src/main/java/io/spring/initializr/actuate/stat/ProjectRequestDocumentFactory.java | Java | apache-2.0 | 5,855 |
// HTMLParser Library - A java-based parser for HTML
// http://htmlparser.org
// Copyright (C) 2006 Claude Duguay
//
// Revision Control Information
//
// $URL: https://svn.sourceforge.net/svnroot/htmlparser/trunk/lexer/src/main/java/org/htmlparser/util/ParserException.java $
// $Author: derrickoswald $
// $Date: 2006-09-16 10:44:17 -0400 (Sat, 16 Sep 2006) $
// $Revision: 4 $
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the Common Public License; either
// version 1.0 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// Common Public License for more details.
//
// You should have received a copy of the Common Public License
// along with this library; if not, the license is available from
// the Open Source Initiative (OSI) website:
// http://opensource.org/licenses/cpl1.0.php
package org.htmlparser.util;
/**
* Library-specific support for chained exceptions.
*
* @see ChainedException
**/
public class ParserException
extends ChainedException
{
public ParserException() {}
public ParserException(String message)
{
super(message);
}
public ParserException(Throwable throwable)
{
super(throwable);
}
public ParserException(String message, Throwable throwable)
{
super(message, throwable);
}
}
| patrickfav/tuwien | master/swt workspace/HTMLParser/src/org/htmlparser/util/ParserException.java | Java | apache-2.0 | 1,512 |
package at.jku.sea.cloud.exceptions;
public class ArtifactNotPushOrPullableException extends RuntimeException {
private static final long serialVersionUID = 1L;
public ArtifactNotPushOrPullableException(final long aid) {
super("artifact (id=" + aid + ") references (type, package, project) existing only in WS");
}
} | OnurKirkizoglu/master_thesis | at.jku.sea.cloud/src/main/java/at/jku/sea/cloud/exceptions/ArtifactNotPushOrPullableException.java | Java | apache-2.0 | 328 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.security.PrivilegedAction;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.tools.DFSHAAdmin;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
/**
* Tool which allows the standby node's storage directories to be bootstrapped
* by copying the latest namespace snapshot from the active namenode. This is
* used when first configuring an HA cluster.
*/
@InterfaceAudience.Private
public class BootstrapStandby implements Tool, Configurable {
private static final Log LOG = LogFactory.getLog(BootstrapStandby.class);
private String nsId;
private String nnId;
private String otherNNId;
private URL otherHttpAddr;
private InetSocketAddress otherIpcAddr;
private Collection<URI> dirsToFormat;
private List<URI> editUrisToFormat;
private List<URI> sharedEditsUris;
private Configuration conf;
private boolean force = false;
private boolean interactive = true;
private boolean skipSharedEditsCheck = false;
// Exit/return codes.
static final int ERR_CODE_FAILED_CONNECT = 2;
static final int ERR_CODE_INVALID_VERSION = 3;
// Skip 4 - was used in previous versions, but no longer returned.
static final int ERR_CODE_ALREADY_FORMATTED = 5;
static final int ERR_CODE_LOGS_UNAVAILABLE = 6;
@Override
public int run(String[] args) throws Exception {
parseArgs(args);
parseConfAndFindOtherNN();
NameNode.checkAllowFormat(conf);
InetSocketAddress myAddr = NameNode.getAddress(conf);
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());
return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
return doRun();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
});
}
private void parseArgs(String[] args) {
for (String arg : args) {
if ("-force".equals(arg)) {
force = true;
} else if ("-nonInteractive".equals(arg)) {
interactive = false;
} else if ("-skipSharedEditsCheck".equals(arg)) {
skipSharedEditsCheck = true;
} else {
printUsage();
throw new HadoopIllegalArgumentException(
"Illegal argument: " + arg);
}
}
}
private void printUsage() {
System.err.println("Usage: " + this.getClass().getSimpleName() +
" [-force] [-nonInteractive] [-skipSharedEditsCheck]");
}
private NamenodeProtocol createNNProtocolProxy()
throws IOException {
return NameNodeProxies.createNonHAProxy(getConf(),
otherIpcAddr, NamenodeProtocol.class,
UserGroupInformation.getLoginUser(), true)
.getProxy();
}
private int doRun() throws IOException {
NamenodeProtocol proxy = createNNProtocolProxy();
NamespaceInfo nsInfo;
boolean isUpgradeFinalized;
try {
nsInfo = proxy.versionRequest();
isUpgradeFinalized = proxy.isUpgradeFinalized();
} catch (IOException ioe) {
LOG.fatal("Unable to fetch namespace information from active NN at " +
otherIpcAddr + ": " + ioe.getMessage());
if (LOG.isDebugEnabled()) {
LOG.debug("Full exception trace", ioe);
}
return ERR_CODE_FAILED_CONNECT;
}
if (!checkLayoutVersion(nsInfo)) {
LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion()
+ ") does not match " + "this node's layout version ("
+ HdfsConstants.NAMENODE_LAYOUT_VERSION + ")");
return ERR_CODE_INVALID_VERSION;
}
System.out.println(
"=====================================================\n" +
"About to bootstrap Standby ID " + nnId + " from:\n" +
" Nameservice ID: " + nsId + "\n" +
" Other Namenode ID: " + otherNNId + "\n" +
" Other NN's HTTP address: " + otherHttpAddr + "\n" +
" Other NN's IPC address: " + otherIpcAddr + "\n" +
" Namespace ID: " + nsInfo.getNamespaceID() + "\n" +
" Block pool ID: " + nsInfo.getBlockPoolID() + "\n" +
" Cluster ID: " + nsInfo.getClusterID() + "\n" +
" Layout version: " + nsInfo.getLayoutVersion() + "\n" +
" isUpgradeFinalized: " + isUpgradeFinalized + "\n" +
"=====================================================");
NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat);
if (!isUpgradeFinalized) {
// the remote NameNode is in upgrade state, this NameNode should also
// create the previous directory. First prepare the upgrade and rename
// the current dir to previous.tmp.
LOG.info("The active NameNode is in Upgrade. " +
"Prepare the upgrade for the standby NameNode as well.");
if (!doPreUpgrade(storage, nsInfo)) {
return ERR_CODE_ALREADY_FORMATTED;
}
} else if (!format(storage, nsInfo)) { // prompt the user to format storage
return ERR_CODE_ALREADY_FORMATTED;
}
// download the fsimage from active namenode
int download = downloadImage(storage, proxy);
if (download != 0) {
return download;
}
// finish the upgrade: rename previous.tmp to previous
if (!isUpgradeFinalized) {
doUpgrade(storage);
}
return 0;
}
/**
* Iterate over all the storage directories, checking if it should be
* formatted. Format the storage if necessary and allowed by the user.
* @return True if formatting is processed
*/
private boolean format(NNStorage storage, NamespaceInfo nsInfo)
throws IOException {
// Check with the user before blowing away data.
if (!Storage.confirmFormat(storage.dirIterable(null), force, interactive)) {
storage.close();
return false;
} else {
// Format the storage (writes VERSION file)
storage.format(nsInfo);
return true;
}
}
/**
* This is called when using bootstrapStandby for HA upgrade. The SBN should
* also create previous directory so that later when it starts, it understands
* that the cluster is in the upgrade state. This function renames the old
* current directory to previous.tmp.
*/
private boolean doPreUpgrade(NNStorage storage, NamespaceInfo nsInfo)
throws IOException {
boolean isFormatted = false;
Map<StorageDirectory, StorageState> dataDirStates =
new HashMap<StorageDirectory, StorageState>();
try {
isFormatted = FSImage.recoverStorageDirs(StartupOption.UPGRADE, storage,
dataDirStates);
if (dataDirStates.values().contains(StorageState.NOT_FORMATTED)) {
// recoverStorageDirs returns true if there is a formatted directory
isFormatted = false;
System.err.println("The original storage directory is not formatted.");
}
} catch (InconsistentFSStateException e) {
// if the storage is in a bad state,
LOG.warn("The storage directory is in an inconsistent state", e);
} finally {
storage.unlockAll();
}
// if there is InconsistentFSStateException or the storage is not formatted,
// format the storage. Although this format is done through the new
// software, since in HA setup the SBN is rolled back through
// "-bootstrapStandby", we should still be fine.
if (!isFormatted && !format(storage, nsInfo)) {
return false;
}
// make sure there is no previous directory
FSImage.checkUpgrade(storage);
// Do preUpgrade for each directory
for (Iterator<StorageDirectory> it = storage.dirIterator(false);
it.hasNext();) {
StorageDirectory sd = it.next();
try {
NNUpgradeUtil.renameCurToTmp(sd);
} catch (IOException e) {
LOG.error("Failed to move aside pre-upgrade storage " +
"in image directory " + sd.getRoot(), e);
throw e;
}
}
storage.setStorageInfo(nsInfo);
storage.setBlockPoolID(nsInfo.getBlockPoolID());
return true;
}
private void doUpgrade(NNStorage storage) throws IOException {
for (Iterator<StorageDirectory> it = storage.dirIterator(false);
it.hasNext();) {
StorageDirectory sd = it.next();
NNUpgradeUtil.doUpgrade(sd, storage);
}
}
private int downloadImage(NNStorage storage, NamenodeProtocol proxy)
throws IOException {
// Load the newly formatted image, using all of the directories
// (including shared edits)
final long imageTxId = proxy.getMostRecentCheckpointTxId();
final long curTxId = proxy.getTransactionID();
FSImage image = new FSImage(conf);
try {
image.getStorage().setStorageInfo(storage);
image.initEditLog(StartupOption.REGULAR);
assert image.getEditLog().isOpenForRead() :
"Expected edit log to be open for read";
// Ensure that we have enough edits already in the shared directory to
// start up from the last checkpoint on the active.
if (!skipSharedEditsCheck &&
!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
return ERR_CODE_LOGS_UNAVAILABLE;
}
image.getStorage().writeTransactionIdFileToStorage(curTxId);
// Download that checkpoint into our storage directories.
MD5Hash hash = TransferFsImage.downloadImageToStorage(
otherHttpAddr, imageTxId, storage, true);
image.saveDigestAndRenameCheckpointImage(NameNodeFile.IMAGE, imageTxId,
hash);
} catch (IOException ioe) {
image.close();
throw ioe;
}
return 0;
}
private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
long curTxIdOnOtherNode) {
if (imageTxId == curTxIdOnOtherNode) {
// The other node hasn't written any logs since the last checkpoint.
// This can be the case if the NN was freshly formatted as HA, and
// then started in standby mode, so it has no edit logs at all.
return true;
}
long firstTxIdInLogs = imageTxId + 1;
assert curTxIdOnOtherNode >= firstTxIdInLogs :
"first=" + firstTxIdInLogs + " onOtherNode=" + curTxIdOnOtherNode;
try {
Collection<EditLogInputStream> streams =
image.getEditLog().selectInputStreams(
firstTxIdInLogs, curTxIdOnOtherNode, null, true);
for (EditLogInputStream stream : streams) {
IOUtils.closeStream(stream);
}
return true;
} catch (IOException e) {
String msg = "Unable to read transaction ids " +
firstTxIdInLogs + "-" + curTxIdOnOtherNode +
" from the configured shared edits storage " +
Joiner.on(",").join(sharedEditsUris) + ". " +
"Please copy these logs into the shared edits storage " +
"or call saveNamespace on the active node.\n" +
"Error: " + e.getLocalizedMessage();
if (LOG.isDebugEnabled()) {
LOG.fatal(msg, e);
} else {
LOG.fatal(msg);
}
return false;
}
}
private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
return (nsInfo.getLayoutVersion() == HdfsConstants.NAMENODE_LAYOUT_VERSION);
}
private void parseConfAndFindOtherNN() throws IOException {
Configuration conf = getConf();
nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(conf, nsId)) {
throw new HadoopIllegalArgumentException(
"HA is not enabled for this namenode.");
}
nnId = HAUtil.getNameNodeId(conf, nsId);
NameNode.initializeGenericKeys(conf, nsId, nnId);
if (!HAUtil.usesSharedEditsDir(conf)) {
throw new HadoopIllegalArgumentException(
"Shared edits storage is not enabled for this namenode.");
}
Configuration otherNode = HAUtil.getConfForOtherNode(conf);
otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
otherIpcAddr = NameNode.getServiceAddress(otherNode, true);
Preconditions.checkArgument(otherIpcAddr.getPort() != 0 &&
!otherIpcAddr.getAddress().isAnyLocalAddress(),
"Could not determine valid IPC address for other NameNode (%s)" +
", got: %s", otherNNId, otherIpcAddr);
final String scheme = DFSUtil.getHttpClientScheme(conf);
otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(
otherIpcAddr.getHostName(), otherNode, scheme).toURL();
dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(
conf, false);
sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
@Override
public void setConf(Configuration conf) {
this.conf = DFSHAAdmin.addSecurityConfiguration(conf);
}
@Override
public Configuration getConf() {
return conf;
}
public static int run(String[] argv, Configuration conf) throws IOException {
BootstrapStandby bs = new BootstrapStandby();
bs.setConf(conf);
try {
return ToolRunner.run(bs, argv);
} catch (Exception e) {
if (e instanceof IOException) {
throw (IOException)e;
} else {
throw new IOException(e);
}
}
}
}
| tecknowledgeable/hadoop | hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java | Java | apache-2.0 | 16,024 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from ec2api.tests.functional import base
from ec2api.tests.functional import config
CONF = config.CONF
class SnapshotTest(base.EC2TestCase):
def test_create_delete_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
def test_describe_snapshots(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
ownerId = data['OwnerId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.describe_snapshots(SnapshotIds=[snapshot_id])
self.assertEqual(1, len(data['Snapshots']))
data = data['Snapshots'][0]
self.assertEqual(snapshot_id, data['SnapshotId'])
self.assertEqual(desc, data['Description'])
self.assertEqual(volume_id, data['VolumeId'])
self.assertEqual(1, data['VolumeSize'])
self.assertNotEmpty(data.get('State', ''))
if 'Encrypted' in data:
self.assertFalse(data['Encrypted'])
self.assertIsNotNone(data['StartTime'])
data = self.client.describe_snapshots(OwnerIds=[ownerId])
data = [s for s in data['Snapshots'] if s['SnapshotId'] == snapshot_id]
self.assertEqual(1, len(data))
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
self.assertRaises('InvalidSnapshot.NotFound',
self.client.describe_snapshots,
SnapshotIds=[snapshot_id])
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
def test_create_volume_from_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
vol1 = data
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
kwargs = {
'SnapshotId': snapshot_id,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id2 = data['VolumeId']
clean_vol2 = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id2)
self.get_volume_waiter().wait_available(volume_id2)
self.assertNotEqual(volume_id, volume_id2)
self.assertEqual(vol1['Size'], data['Size'])
self.assertEqual(snapshot_id, data['SnapshotId'])
data = self.client.describe_volumes(
Filters=[{'Name': 'snapshot-id', 'Values': [snapshot_id]}])
self.assertEqual(1, len(data['Volumes']))
self.assertEqual(volume_id2, data['Volumes'][0]['VolumeId'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_volume(VolumeId=volume_id2)
self.cancelResourceCleanUp(clean_vol2)
self.get_volume_waiter().wait_delete(volume_id2)
def test_create_increased_volume_from_snapshot(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
kwargs = {
'Size': 2,
'SnapshotId': snapshot_id,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id2 = data['VolumeId']
clean_vol2 = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id2)
self.get_volume_waiter().wait_available(volume_id2)
self.assertNotEqual(volume_id, volume_id2)
self.assertEqual(2, data['Size'])
self.assertEqual(snapshot_id, data['SnapshotId'])
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_volume(VolumeId=volume_id2)
self.cancelResourceCleanUp(clean_vol2)
self.get_volume_waiter().wait_delete(volume_id2)
@testtools.skipUnless(CONF.aws.run_incompatible_tests,
"Openstack can't delete volume with snapshots")
def test_delete_volume_with_snapshots(self):
kwargs = {
'Size': 1,
'AvailabilityZone': CONF.aws.aws_zone
}
data = self.client.create_volume(*[], **kwargs)
volume_id = data['VolumeId']
clean_vol = self.addResourceCleanUp(self.client.delete_volume,
VolumeId=volume_id)
self.get_volume_waiter().wait_available(volume_id)
desc = 'test snapshot'
kwargs = {
'VolumeId': volume_id,
'Description': desc
}
data = self.client.create_snapshot(*[], **kwargs)
snapshot_id = data['SnapshotId']
res_clean = self.addResourceCleanUp(self.client.delete_snapshot,
SnapshotId=snapshot_id)
self.get_snapshot_waiter().wait_available(snapshot_id,
final_set=('completed'))
data = self.client.delete_volume(VolumeId=volume_id)
self.cancelResourceCleanUp(clean_vol)
self.get_volume_waiter().wait_delete(volume_id)
data = self.client.delete_snapshot(SnapshotId=snapshot_id)
self.cancelResourceCleanUp(res_clean)
self.get_snapshot_waiter().wait_delete(snapshot_id)
| vishnu-kumar/ec2-api | ec2api/tests/functional/api/test_snapshots.py | Python | apache-2.0 | 10,696 |
#include <stdio.h>
#include "ali_api_core.h"
#include "ali_string_utils.h"
#include "ali_rds.h"
#include "json/value.h"
#include "json/reader.h"
using namespace aliyun;
namespace {
void Json2Type(const Json::Value& value, std::string* item);
void Json2Type(const Json::Value& value, RdsReleaseInstancePublicConnectionResponseType* item);
template<typename T>
class Json2Array {
public:
Json2Array(const Json::Value& value, std::vector<T>* vec) {
if(!value.isArray()) {
return;
}
for(int i = 0; i < value.size(); i++) {
T val;
Json2Type(value[i], &val);
vec->push_back(val);
}
}
};
void Json2Type(const Json::Value& value, std::string* item) {
*item = value.asString();
}
void Json2Type(const Json::Value& value, RdsReleaseInstancePublicConnectionResponseType* item) {
}
}
int Rds::ReleaseInstancePublicConnection(const RdsReleaseInstancePublicConnectionRequestType& req,
RdsReleaseInstancePublicConnectionResponseType* response,
RdsErrorInfo* error_info) {
std::string str_response;
int status_code;
int ret = 0;
bool parse_success = false;
std::string secheme = this->use_tls_ ? "https" : "http";
AliRpcRequest* req_rpc = new AliRpcRequest(version_,
appid_,
secret_,
secheme + "://" + host_);
if((!this->use_tls_) && this->proxy_host_ && this->proxy_host_[0]) {
req_rpc->SetHttpProxy( this->proxy_host_);
}
Json::Value val;
Json::Reader reader;
req_rpc->AddRequestQuery("Action","ReleaseInstancePublicConnection");
if(!req.owner_id.empty()) {
req_rpc->AddRequestQuery("OwnerId", req.owner_id);
}
if(!req.resource_owner_account.empty()) {
req_rpc->AddRequestQuery("ResourceOwnerAccount", req.resource_owner_account);
}
if(!req.resource_owner_id.empty()) {
req_rpc->AddRequestQuery("ResourceOwnerId", req.resource_owner_id);
}
if(!req.db_instance_id.empty()) {
req_rpc->AddRequestQuery("DBInstanceId", req.db_instance_id);
}
if(!req.current_connection_string.empty()) {
req_rpc->AddRequestQuery("CurrentConnectionString", req.current_connection_string);
}
if(!req.owner_account.empty()) {
req_rpc->AddRequestQuery("OwnerAccount", req.owner_account);
}
if(this->region_id_ && this->region_id_[0]) {
req_rpc->AddRequestQuery("RegionId", this->region_id_);
}
if(req_rpc->CommitRequest() != 0) {
if(error_info) {
error_info->code = "connect to host failed";
}
ret = -1;
goto out;
}
status_code = req_rpc->WaitResponseHeaderComplete();
req_rpc->ReadResponseBody(str_response);
if(status_code > 0 && !str_response.empty()){
parse_success = reader.parse(str_response, val);
}
if(!parse_success) {
if(error_info) {
error_info->code = "parse response failed";
}
ret = -1;
goto out;
}
if(status_code!= 200 && error_info && parse_success) {
error_info->request_id = val.isMember("RequestId") ? val["RequestId"].asString(): "";
error_info->code = val.isMember("Code") ? val["Code"].asString(): "";
error_info->host_id = val.isMember("HostId") ? val["HostId"].asString(): "";
error_info->message = val.isMember("Message") ? val["Message"].asString(): "";
}
if(status_code== 200 && response) {
Json2Type(val, response);
}
ret = status_code;
out:
delete req_rpc;
return ret;
}
| zcy421593/aliyun-openapi-cpp-sdk | aliyun-api-rds/2014-08-15/src/ali_rds_release_instance_public_connection.cc | C++ | apache-2.0 | 3,520 |
/**
* This package contains classes for mapping between Particles and Tuples.
*/
package nl.tno.sensorstorm.particlemapper; | sensorstorm/SensorStorm | SensorStorm/src/nl/tno/sensorstorm/particlemapper/package-info.java | Java | apache-2.0 | 128 |
/******************************************************************************
* Copyright 2017 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/localization/msf/common/io/velodyne_utility.h"
#include <pcl/io/pcd_io.h>
#include <yaml-cpp/yaml.h>
#include "cyber/common/log.h"
#include "modules/localization/msf/common/io/pcl_point_types.h"
namespace apollo {
namespace localization {
namespace msf {
namespace velodyne {
void LoadPcds(const std::string& file_path, const unsigned int frame_index,
const Eigen::Affine3d& pose, VelodyneFrame* velodyne_frame,
bool is_global) {
velodyne_frame->frame_index = frame_index;
velodyne_frame->pose = pose;
LoadPcds(file_path, frame_index, pose, &velodyne_frame->pt3ds,
&velodyne_frame->intensities, is_global);
}
void LoadPcds(const std::string& file_path, const unsigned int frame_index,
const Eigen::Affine3d& pose, std::vector<Eigen::Vector3d>* pt3ds,
std::vector<unsigned char>* intensities, bool is_global) {
Eigen::Affine3d pose_inv = pose.inverse();
pcl::PointCloud<PointXYZIT>::Ptr cloud(new pcl::PointCloud<PointXYZIT>);
if (pcl::io::loadPCDFile(file_path, *cloud) >= 0) {
if (cloud->height == 1 || cloud->width == 1) {
AERROR << "Un-organized-point-cloud";
for (unsigned int i = 0; i < cloud->size(); ++i) {
Eigen::Vector3d pt3d;
pt3d[0] = (*cloud)[i].x;
pt3d[1] = (*cloud)[i].y;
pt3d[2] = (*cloud)[i].z;
if (pt3d[0] == pt3d[0] && pt3d[1] == pt3d[1] && pt3d[2] == pt3d[2]) {
Eigen::Vector3d pt3d_local;
if (is_global) {
pt3d_local = pose_inv * pt3d;
} else {
pt3d_local = pt3d;
}
unsigned char intensity =
static_cast<unsigned char>((*cloud)[i].intensity);
pt3ds->push_back(pt3d_local);
intensities->push_back(intensity);
}
}
} else {
for (unsigned int h = 0; h < cloud->height; ++h) {
for (unsigned int w = 0; w < cloud->width; ++w) {
double x = cloud->at(w, h).x;
double y = cloud->at(w, h).y;
double z = cloud->at(w, h).z;
Eigen::Vector3d pt3d(x, y, z);
if (pt3d[0] == pt3d[0] && pt3d[1] == pt3d[1] && pt3d[2] == pt3d[2]) {
Eigen::Vector3d pt3d_local;
if (is_global) {
pt3d_local = pose_inv * pt3d;
} else {
pt3d_local = pt3d;
}
unsigned char intensity =
static_cast<unsigned char>(cloud->at(w, h).intensity);
pt3ds->push_back(pt3d_local);
intensities->push_back(intensity);
}
}
}
}
} else {
AERROR << "Failed to load PCD file: " << file_path;
}
}
void LoadPcdPoses(const std::string& file_path,
std::vector<Eigen::Affine3d>* poses,
std::vector<double>* timestamps) {
std::vector<unsigned int> pcd_indices;
LoadPcdPoses(file_path, poses, timestamps, &pcd_indices);
}
void LoadPcdPoses(const std::string& file_path,
std::vector<Eigen::Affine3d>* poses,
std::vector<double>* timestamps,
std::vector<unsigned int>* pcd_indices) {
poses->clear();
timestamps->clear();
pcd_indices->clear();
FILE* file = fopen(file_path.c_str(), "r");
if (file) {
unsigned int index;
double timestamp;
double x, y, z;
double qx, qy, qz, qr;
constexpr int kSize = 9;
while (fscanf(file, "%u %lf %lf %lf %lf %lf %lf %lf %lf\n", &index,
×tamp, &x, &y, &z, &qx, &qy, &qz, &qr) == kSize) {
Eigen::Translation3d trans(Eigen::Vector3d(x, y, z));
Eigen::Quaterniond quat(qr, qx, qy, qz);
poses->push_back(trans * quat);
timestamps->push_back(timestamp);
pcd_indices->push_back(index);
}
fclose(file);
} else {
AERROR << "Can't open file to read: " << file_path;
}
}
void LoadPosesAndStds(const std::string& file_path,
std::vector<Eigen::Affine3d>* poses,
std::vector<Eigen::Vector3d>* stds,
std::vector<double>* timestamps) {
poses->clear();
stds->clear();
timestamps->clear();
FILE* file = fopen(file_path.c_str(), "r");
if (file) {
unsigned int index;
double timestamp;
double x, y, z;
double qx, qy, qz, qr;
double std_x, std_y, std_z;
constexpr int kSize = 12;
while (fscanf(file, "%u %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
&index, ×tamp, &x, &y, &z, &qx, &qy, &qz, &qr, &std_x,
&std_y, &std_z) == kSize) {
Eigen::Translation3d trans(Eigen::Vector3d(x, y, z));
Eigen::Quaterniond quat(qr, qx, qy, qz);
poses->push_back(trans * quat);
timestamps->push_back(timestamp);
Eigen::Vector3d std;
std << std_x, std_y, std_z;
stds->push_back(std);
}
fclose(file);
} else {
AERROR << "Can't open file to read: " << file_path;
}
}
bool LoadExtrinsic(const std::string& file_path, Eigen::Affine3d* extrinsic) {
YAML::Node config = YAML::LoadFile(file_path);
if (config["transform"]) {
if (config["transform"]["translation"]) {
extrinsic->translation()(0) =
config["transform"]["translation"]["x"].as<double>();
extrinsic->translation()(1) =
config["transform"]["translation"]["y"].as<double>();
extrinsic->translation()(2) =
config["transform"]["translation"]["z"].as<double>();
if (config["transform"]["rotation"]) {
double qx = config["transform"]["rotation"]["x"].as<double>();
double qy = config["transform"]["rotation"]["y"].as<double>();
double qz = config["transform"]["rotation"]["z"].as<double>();
double qw = config["transform"]["rotation"]["w"].as<double>();
extrinsic->linear() =
Eigen::Quaterniond(qw, qx, qy, qz).toRotationMatrix();
return true;
}
}
}
return false;
}
} // namespace velodyne
} // namespace msf
} // namespace localization
} // namespace apollo
| msbeta/apollo | modules/localization/msf/common/io/velodyne_utility.cc | C++ | apache-2.0 | 6,790 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.ql.udf.generic;
import java.sql.Timestamp;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException;
import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF;
import org.apache.hadoop.hive.serde2.io.DateWritable;
import org.apache.hadoop.hive.serde2.io.TimestampWritable;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter.TimestampConverter;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
/**
* GenericUDFAddMonths.
*
* Add a number of months to the date. The time part of the string will be
* ignored.
*
*/
@Description(name = "add_months",
value = "_FUNC_(start_date, num_months) - Returns the date that is num_months after start_date.",
extended = "start_date is a string in the format 'yyyy-MM-dd HH:mm:ss' or"
+ " 'yyyy-MM-dd'. num_months is a number. The time part of start_date is "
+ "ignored.\n"
+ "Example:\n " + " > SELECT _FUNC_('2009-08-31', 1) FROM src LIMIT 1;\n" + " '2009-09-30'")
public class GenericUDFAddMonths extends GenericUDF {
private transient SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd");
private transient TimestampConverter timestampConverter;
private transient Converter textConverter;
private transient Converter dateWritableConverter;
private transient Converter intWritableConverter;
private transient PrimitiveCategory inputType1;
private transient PrimitiveCategory inputType2;
private final Calendar calendar = Calendar.getInstance();
private final Text output = new Text();
@Override
public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException {
if (arguments.length != 2) {
throw new UDFArgumentLengthException("add_months() requires 2 argument, got "
+ arguments.length);
}
if (arguments[0].getCategory() != ObjectInspector.Category.PRIMITIVE) {
throw new UDFArgumentTypeException(0, "Only primitive type arguments are accepted but "
+ arguments[0].getTypeName() + " is passed as first arguments");
}
if (arguments[1].getCategory() != ObjectInspector.Category.PRIMITIVE) {
throw new UDFArgumentTypeException(1, "Only primitive type arguments are accepted but "
+ arguments[1].getTypeName() + " is passed as second arguments");
}
inputType1 = ((PrimitiveObjectInspector) arguments[0]).getPrimitiveCategory();
ObjectInspector outputOI = PrimitiveObjectInspectorFactory.writableStringObjectInspector;
switch (inputType1) {
case STRING:
case VARCHAR:
case CHAR:
inputType1 = PrimitiveCategory.STRING;
textConverter = ObjectInspectorConverters.getConverter(
(PrimitiveObjectInspector) arguments[0],
PrimitiveObjectInspectorFactory.writableStringObjectInspector);
break;
case TIMESTAMP:
timestampConverter = new TimestampConverter((PrimitiveObjectInspector) arguments[0],
PrimitiveObjectInspectorFactory.writableTimestampObjectInspector);
break;
case DATE:
dateWritableConverter = ObjectInspectorConverters.getConverter(
(PrimitiveObjectInspector) arguments[0],
PrimitiveObjectInspectorFactory.writableDateObjectInspector);
break;
default:
throw new UDFArgumentTypeException(0,
"ADD_MONTHS() only takes STRING/TIMESTAMP/DATEWRITABLE types as first argument, got "
+ inputType1);
}
inputType2 = ((PrimitiveObjectInspector) arguments[1]).getPrimitiveCategory();
if (inputType2 != PrimitiveCategory.INT) {
throw new UDFArgumentTypeException(1,
"ADD_MONTHS() only takes INT types as second argument, got " + inputType2);
}
intWritableConverter = ObjectInspectorConverters.getConverter(
(PrimitiveObjectInspector) arguments[1],
PrimitiveObjectInspectorFactory.writableIntObjectInspector);
return outputOI;
}
@Override
public Object evaluate(DeferredObject[] arguments) throws HiveException {
if (arguments[0].get() == null) {
return null;
}
IntWritable toBeAdded = (IntWritable) intWritableConverter.convert(arguments[1].get());
if (toBeAdded == null) {
return null;
}
Date date;
switch (inputType1) {
case STRING:
String dateString = textConverter.convert(arguments[0].get()).toString();
try {
date = formatter.parse(dateString.toString());
} catch (ParseException e) {
return null;
}
break;
case TIMESTAMP:
Timestamp ts = ((TimestampWritable) timestampConverter.convert(arguments[0].get()))
.getTimestamp();
date = ts;
break;
case DATE:
DateWritable dw = (DateWritable) dateWritableConverter.convert(arguments[0].get());
date = dw.get();
break;
default:
throw new UDFArgumentTypeException(0,
"ADD_MONTHS() only takes STRING/TIMESTAMP/DATEWRITABLE types, got " + inputType1);
}
int numMonth = toBeAdded.get();
addMonth(date, numMonth);
Date newDate = calendar.getTime();
output.set(formatter.format(newDate));
return output;
}
@Override
public String getDisplayString(String[] children) {
return getStandardDisplayString("add_months", children);
}
protected Calendar addMonth(Date d, int numMonths) {
calendar.setTime(d);
boolean lastDatOfMonth = isLastDayOfMonth(calendar);
calendar.add(Calendar.MONTH, numMonths);
if (lastDatOfMonth) {
int maxDd = calendar.getActualMaximum(Calendar.DAY_OF_MONTH);
calendar.set(Calendar.DAY_OF_MONTH, maxDd);
}
return calendar;
}
protected boolean isLastDayOfMonth(Calendar cal) {
int maxDd = cal.getActualMaximum(Calendar.DAY_OF_MONTH);
int dd = cal.get(Calendar.DAY_OF_MONTH);
return dd == maxDd;
}
}
| WANdisco/amplab-hive | ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFAddMonths.java | Java | apache-2.0 | 7,518 |
using System;
using ProtoBuf.Services.Serialization;
namespace ProtoBuf.Services.WebAPI
{
public interface IProtoMetaProvider
{
string GetMetaData(Type type);
TypeMetaData FromJson(byte[] json);
}
} | maingi4/ProtoBuf.Services | ProtoBuf.Services.WebAPI/IProtoMetaProvider.cs | C# | apache-2.0 | 227 |
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta3
import (
"fmt"
"reflect"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
)
func addConversionFuncs() {
// Add non-generated conversion functions
err := api.Scheme.AddConversionFuncs(
convert_v1beta3_Container_To_api_Container,
convert_api_Container_To_v1beta3_Container,
)
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
// Add field conversion funcs.
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Pod",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"metadata.namespace",
"status.phase",
"spec.host":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Node",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
case "spec.unschedulable":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "ReplicationController",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name",
"status.replicas":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Event",
func(label, value string) (string, string, error) {
switch label {
case "involvedObject.kind",
"involvedObject.namespace",
"involvedObject.name",
"involvedObject.uid",
"involvedObject.apiVersion",
"involvedObject.resourceVersion",
"involvedObject.fieldPath",
"reason",
"source":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Namespace",
func(label, value string) (string, string, error) {
switch label {
case "status.phase":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "Secret",
func(label, value string) (string, string, error) {
switch label {
case "type":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
err = api.Scheme.AddFieldLabelConversionFunc("v1beta3", "ServiceAccount",
func(label, value string) (string, string, error) {
switch label {
case "metadata.name":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported: %s", label)
}
})
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
panic(err)
}
}
func convert_v1beta3_Container_To_api_Container(in *Container, out *api.Container, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*Container))(in)
}
out.Name = in.Name
out.Image = in.Image
if in.Command != nil {
out.Command = make([]string, len(in.Command))
for i := range in.Command {
out.Command[i] = in.Command[i]
}
}
if in.Args != nil {
out.Args = make([]string, len(in.Args))
for i := range in.Args {
out.Args[i] = in.Args[i]
}
}
out.WorkingDir = in.WorkingDir
if in.Ports != nil {
out.Ports = make([]api.ContainerPort, len(in.Ports))
for i := range in.Ports {
if err := convert_v1beta3_ContainerPort_To_api_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
}
if in.Env != nil {
out.Env = make([]api.EnvVar, len(in.Env))
for i := range in.Env {
if err := convert_v1beta3_EnvVar_To_api_EnvVar(&in.Env[i], &out.Env[i], s); err != nil {
return err
}
}
}
if err := s.Convert(&in.Resources, &out.Resources, 0); err != nil {
return err
}
if in.VolumeMounts != nil {
out.VolumeMounts = make([]api.VolumeMount, len(in.VolumeMounts))
for i := range in.VolumeMounts {
if err := convert_v1beta3_VolumeMount_To_api_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil {
return err
}
}
}
if in.LivenessProbe != nil {
out.LivenessProbe = new(api.Probe)
if err := convert_v1beta3_Probe_To_api_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil {
return err
}
} else {
out.LivenessProbe = nil
}
if in.ReadinessProbe != nil {
out.ReadinessProbe = new(api.Probe)
if err := convert_v1beta3_Probe_To_api_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil {
return err
}
} else {
out.ReadinessProbe = nil
}
if in.Lifecycle != nil {
out.Lifecycle = new(api.Lifecycle)
if err := convert_v1beta3_Lifecycle_To_api_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil {
return err
}
} else {
out.Lifecycle = nil
}
out.TerminationMessagePath = in.TerminationMessagePath
out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy)
if in.SecurityContext != nil {
if in.SecurityContext.Capabilities != nil {
if !reflect.DeepEqual(in.SecurityContext.Capabilities.Add, in.Capabilities.Add) ||
!reflect.DeepEqual(in.SecurityContext.Capabilities.Drop, in.Capabilities.Drop) {
return fmt.Errorf("container capability settings do not match security context settings, cannot convert")
}
}
if in.SecurityContext.Privileged != nil {
if in.Privileged != *in.SecurityContext.Privileged {
return fmt.Errorf("container privileged settings do not match security context settings, cannot convert")
}
}
}
if in.SecurityContext != nil {
out.SecurityContext = new(api.SecurityContext)
if err := convert_v1beta3_SecurityContext_To_api_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
return nil
}
func convert_api_Container_To_v1beta3_Container(in *api.Container, out *Container, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.Container))(in)
}
out.Name = in.Name
out.Image = in.Image
if in.Command != nil {
out.Command = make([]string, len(in.Command))
for i := range in.Command {
out.Command[i] = in.Command[i]
}
}
if in.Args != nil {
out.Args = make([]string, len(in.Args))
for i := range in.Args {
out.Args[i] = in.Args[i]
}
}
out.WorkingDir = in.WorkingDir
if in.Ports != nil {
out.Ports = make([]ContainerPort, len(in.Ports))
for i := range in.Ports {
if err := convert_api_ContainerPort_To_v1beta3_ContainerPort(&in.Ports[i], &out.Ports[i], s); err != nil {
return err
}
}
}
if in.Env != nil {
out.Env = make([]EnvVar, len(in.Env))
for i := range in.Env {
if err := convert_api_EnvVar_To_v1beta3_EnvVar(&in.Env[i], &out.Env[i], s); err != nil {
return err
}
}
}
if err := s.Convert(&in.Resources, &out.Resources, 0); err != nil {
return err
}
if in.VolumeMounts != nil {
out.VolumeMounts = make([]VolumeMount, len(in.VolumeMounts))
for i := range in.VolumeMounts {
if err := convert_api_VolumeMount_To_v1beta3_VolumeMount(&in.VolumeMounts[i], &out.VolumeMounts[i], s); err != nil {
return err
}
}
}
if in.LivenessProbe != nil {
out.LivenessProbe = new(Probe)
if err := convert_api_Probe_To_v1beta3_Probe(in.LivenessProbe, out.LivenessProbe, s); err != nil {
return err
}
} else {
out.LivenessProbe = nil
}
if in.ReadinessProbe != nil {
out.ReadinessProbe = new(Probe)
if err := convert_api_Probe_To_v1beta3_Probe(in.ReadinessProbe, out.ReadinessProbe, s); err != nil {
return err
}
} else {
out.ReadinessProbe = nil
}
if in.Lifecycle != nil {
out.Lifecycle = new(Lifecycle)
if err := convert_api_Lifecycle_To_v1beta3_Lifecycle(in.Lifecycle, out.Lifecycle, s); err != nil {
return err
}
} else {
out.Lifecycle = nil
}
out.TerminationMessagePath = in.TerminationMessagePath
out.ImagePullPolicy = PullPolicy(in.ImagePullPolicy)
if in.SecurityContext != nil {
out.SecurityContext = new(SecurityContext)
if err := convert_api_SecurityContext_To_v1beta3_SecurityContext(in.SecurityContext, out.SecurityContext, s); err != nil {
return err
}
} else {
out.SecurityContext = nil
}
// now that we've converted set the container field from security context
if out.SecurityContext != nil && out.SecurityContext.Privileged != nil {
out.Privileged = *out.SecurityContext.Privileged
}
// now that we've converted set the container field from security context
if out.SecurityContext != nil && out.SecurityContext.Capabilities != nil {
out.Capabilities = *out.SecurityContext.Capabilities
}
return nil
}
| bcbroussard/kubernetes | pkg/api/v1beta3/conversion.go | GO | apache-2.0 | 10,116 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.weex.ui.action;
import android.support.annotation.NonNull;
import android.support.annotation.RestrictTo;
import android.support.annotation.RestrictTo.Scope;
import android.support.annotation.WorkerThread;
import android.support.v4.util.ArrayMap;
import android.text.TextUtils;
import android.util.Log;
import org.apache.weex.BuildConfig;
import org.apache.weex.WXSDKInstance;
import org.apache.weex.WXSDKManager;
import org.apache.weex.common.WXErrorCode;
import org.apache.weex.dom.transition.WXTransition;
import org.apache.weex.performance.WXAnalyzerDataTransfer;
import org.apache.weex.performance.WXStateRecord;
import org.apache.weex.ui.component.WXComponent;
import org.apache.weex.ui.component.WXVContainer;
import org.apache.weex.utils.WXExceptionUtils;
import org.apache.weex.utils.WXLogUtils;
import org.apache.weex.utils.WXUtils;
import java.util.Arrays;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
public class GraphicActionAddElement extends GraphicActionAbstractAddElement {
private WXVContainer parent;
private WXComponent child;
private GraphicPosition layoutPosition;
private GraphicSize layoutSize;
private boolean isLayoutRTL;
public GraphicActionAddElement(@NonNull WXSDKInstance instance, String ref,
String componentType, String parentRef,
int index,
Map<String, String> style,
Map<String, String> attributes,
Set<String> events,
float[] margins,
float[] paddings,
float[] borders) {
super(instance, ref);
this.mComponentType = componentType;
this.mParentRef = parentRef;
this.mIndex = index;
this.mStyle = style;
this.mAttributes = attributes;
this.mEvents = events;
this.mPaddings = paddings;
this.mMargins = margins;
this.mBorders = borders;
if (instance.getContext() == null) {
return;
}
if (WXAnalyzerDataTransfer.isInteractionLogOpen()){
Log.d(WXAnalyzerDataTransfer.INTERACTION_TAG, "[client][addelementStart]"+instance.getInstanceId()+","+componentType+","+ref);
}
try {
parent = (WXVContainer) WXSDKManager.getInstance().getWXRenderManager()
.getWXComponent(getPageId(), mParentRef);
long start = WXUtils.getFixUnixTime();
BasicComponentData basicComponentData = new BasicComponentData(ref, mComponentType,
mParentRef);
child = createComponent(instance, parent, basicComponentData);
child.setTransition(WXTransition.fromMap(child.getStyles(), child));
long diff = WXUtils.getFixUnixTime()-start;
instance.getApmForInstance().componentCreateTime += diff;
if (null != parent && parent.isIgnoreInteraction){
child.isIgnoreInteraction = true;
}
if (!child.isIgnoreInteraction ){
Object flag = null;
if (null != child.getAttrs()){
flag = child.getAttrs().get("ignoreInteraction");
}
if ("false".equals(flag) || "0".equals(flag)){
child.isIgnoreInteraction = false;
}else if ("1".equals(flag) || "true".equals(flag) || child.isFixed()){
child.isIgnoreInteraction = true;
}
}
WXStateRecord.getInstance().recordAction(instance.getInstanceId(),"addElement");
} catch (ClassCastException e) {
Map<String, String> ext = new ArrayMap<>();
WXComponent parent = WXSDKManager.getInstance().getWXRenderManager()
.getWXComponent(getPageId(), mParentRef);
if (mStyle != null && !mStyle.isEmpty()) {
ext.put("child.style", mStyle.toString());
}
if (parent != null && parent.getStyles() != null && !parent.getStyles().isEmpty()) {
ext.put("parent.style", parent.getStyles().toString());
}
if (mAttributes != null && !mAttributes.isEmpty()) {
ext.put("child.attr", mAttributes.toString());
}
if (parent != null && parent.getAttrs() != null && !parent.getAttrs().isEmpty()) {
ext.put("parent.attr", parent.getAttrs().toString());
}
if (mEvents != null && !mEvents.isEmpty()) {
ext.put("child.event", mEvents.toString());
}
if (parent != null && parent.getEvents() != null && !parent.getEvents().isEmpty()) {
ext.put("parent.event", parent.getEvents().toString());
}
if (mMargins != null && mMargins.length > 0) {
ext.put("child.margin", Arrays.toString(mMargins));
}
if (parent != null && parent.getMargin() != null) {
ext.put("parent.margin", parent.getMargin().toString());
}
if (mPaddings != null && mPaddings.length > 0) {
ext.put("child.padding", Arrays.toString(mPaddings));
}
if (parent != null && parent.getPadding() != null) {
ext.put("parent.padding", parent.getPadding().toString());
}
if (mBorders != null && mBorders.length > 0) {
ext.put("child.border", Arrays.toString(mBorders));
}
if (parent != null && parent.getBorder() != null) {
ext.put("parent.border", parent.getBorder().toString());
}
WXExceptionUtils.commitCriticalExceptionRT(instance.getInstanceId(),
WXErrorCode.WX_RENDER_ERR_CONTAINER_TYPE,
"GraphicActionAddElement",
String.format(Locale.ENGLISH,"You are trying to add a %s to a %2$s, which is illegal as %2$s is not a container",
componentType,
WXSDKManager.getInstance().getWXRenderManager().getWXComponent(getPageId(), mParentRef).getComponentType()),
ext);
}
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setRTL(boolean isRTL){
this.isLayoutRTL = isRTL;
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setSize(GraphicSize graphicSize){
this.layoutSize = graphicSize;
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setPosition(GraphicPosition position){
this.layoutPosition = position;
}
@RestrictTo(Scope.LIBRARY)
@WorkerThread
public void setIndex(int index){
mIndex = index;
}
@Override
public void executeAction() {
super.executeAction();
try {
if (!TextUtils.equals(mComponentType, "video") && !TextUtils.equals(mComponentType, "videoplus"))
child.mIsAddElementToTree = true;
long start = WXUtils.getFixUnixTime();
parent.addChild(child, mIndex);
parent.createChildViewAt(mIndex);
child.setIsLayoutRTL(isLayoutRTL);
if(layoutPosition !=null && layoutSize != null) {
child.setDemission(layoutSize, layoutPosition);
}
child.applyLayoutAndEvent(child);
child.bindData(child);
long diff = WXUtils.getFixUnixTime() - start;
if (null != getWXSDKIntance()){
getWXSDKIntance().getApmForInstance().viewCreateTime +=diff;
}
} catch (Exception e) {
WXLogUtils.e("add component failed.", e);
}
}
}
| alibaba/weex | android/sdk/src/main/java/org/apache/weex/ui/action/GraphicActionAddElement.java | Java | apache-2.0 | 7,884 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.ResourceManager.V3.Snippets
{
// [START cloudresourcemanager_v3_generated_TagKeys_TestIamPermissions_sync]
using Google.Api.Gax;
using Google.Cloud.Iam.V1;
using Google.Cloud.ResourceManager.V3;
public sealed partial class GeneratedTagKeysClientSnippets
{
/// <summary>Snippet for TestIamPermissions</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public void TestIamPermissionsRequestObject()
{
// Create client
TagKeysClient tagKeysClient = TagKeysClient.Create();
// Initialize request argument(s)
TestIamPermissionsRequest request = new TestIamPermissionsRequest
{
ResourceAsResourceName = new UnparsedResourceName("a/wildcard/resource"),
Permissions = { "", },
};
// Make the request
TestIamPermissionsResponse response = tagKeysClient.TestIamPermissions(request);
}
}
// [END cloudresourcemanager_v3_generated_TagKeys_TestIamPermissions_sync]
}
| googleapis/google-cloud-dotnet | apis/Google.Cloud.ResourceManager.V3/Google.Cloud.ResourceManager.V3.GeneratedSnippets/TagKeysClient.TestIamPermissionsRequestObjectSnippet.g.cs | C# | apache-2.0 | 1,856 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.mallet;
import java.util.logging.Logger;
import java.util.Properties;
import java.io.*;
// Configuration parameters.
public class Conf {
private static final Logger logger = Logger.getLogger(Conf.class.getName());
private static Conf conf = new Conf();
private final String baseDirectory; //Base directory of this benchmark
private final String hiveServerHost;
private final String hiveServerPort;
private final int numberOfStreams;
private final String tpcDsToolDirectory;
private final String tempDirectory;
private final String malletDbDir;
private final int scale;
private final String user;
private boolean quickRunMode = false;
private boolean powerTestOnly = false;
private boolean singleQueryMode = false;
private int queryId;
private String dbSettings;
private String getProperty(Properties prop, String key) {
String value = prop.getProperty(key);
if (value == null) {
throw new ExceptionInInitializerError(key + " in conf file not found!");
}
return value;
}
private Conf() {
baseDirectory = System.getProperty("user.dir");
tempDirectory = System.getProperty("java.io.tmpdir");
tpcDsToolDirectory = baseDirectory + "/tools";
String confFile = baseDirectory + "/conf/conf.properties";
Properties prop = new Properties();
try {
FileInputStream in = new FileInputStream(confFile);
prop.load(in);
} catch (FileNotFoundException e) {
throw new ExceptionInInitializerError(e);
} catch (IOException e) {
throw new ExceptionInInitializerError(e);
}
hiveServerHost = getProperty(prop, "hiveServerHost");
hiveServerPort = getProperty(prop, "hiveServerPort");
numberOfStreams = Integer.parseInt(getProperty(prop, "numberOfStreams"));
// Multiple query streams are concurrently executed in a Throughput Test.
// The number of streams is any even number larger or equal to 4.
if (!(numberOfStreams >= 4 && ((numberOfStreams % 2) == 0))) {
throw new ExceptionInInitializerError("Number of streams for Throughput Test must be any even number larger or equal to 4.");
}
scale = Integer.parseInt(getProperty(prop, "scaleFactor"));
// Valid scale factors are 1,100,300,1000,3000,10000,30000,100000
int[] scaleFactors = {1, 100, 300, 1000, 3000, 10000, 30000, 100000};
int i;
for (i = 0; i < scaleFactors.length; i++) {
if (scale == scaleFactors[i]) {
break;
}
}
if (i >= scaleFactors.length) {
throw new ExceptionInInitializerError("Invalid scale factor.");
}
user = getProperty(prop, "user");
malletDbDir = getProperty(prop, "malletDbDir") + "/mallet/DATA";
}
public void parseCommandLine(String[] args) throws MalletException {
boolean argError = false;
for (int i = 0; i < args.length; i++) {
String arg = args[i];
if (arg.equalsIgnoreCase("--quickrun")) {
quickRunMode = true;
} else if (arg.equalsIgnoreCase("--powertest")) {
powerTestOnly = true;
} else if (arg.equalsIgnoreCase("--query")) {
powerTestOnly = true;
singleQueryMode = true;
if ((i + 1) >= args.length) {
argError = true;
break;
}
arg = args[i + 1];
try {
queryId = Integer.parseInt(arg);
} catch (NumberFormatException e) {
argError = true;
break;
}
if (queryId < 1 || queryId > 99) {
argError = true;
break;
}
i++;
} else {
argError = true;
break;
}
}
if (argError) {
throw new MalletException("Invalid command line arguments.");
}
}
public static Conf getConf() {
return conf;
}
public String getBaseDirectory() {
return baseDirectory;
}
public String getHiveServerHost() {
return hiveServerHost;
}
public String getHiveServerPort() {
return hiveServerPort;
}
public int getNumberOfStreams() {
return numberOfStreams;
}
public String getTpcDsToolDirectory() {
return tpcDsToolDirectory;
}
public String getTempDirectory() {
return tempDirectory;
}
public String getMalletDbDirectory() {
return malletDbDir;
}
public int getScale() {
return scale;
}
public String getUser() {
return user;
}
public boolean isQuickRunMode() {
return quickRunMode;
}
public boolean isPowerTestOnly() {
return powerTestOnly;
}
public boolean isSingleQueryMode() {
return singleQueryMode;
}
public int getQueryId() {
return queryId;
}
public String getDbSettings() {
if (dbSettings != null) {
return dbSettings;
}
String dbSettingsFile = getBaseDirectory() + "/conf/hive_settings.hql";
try {
dbSettings = Utility.readHqlFile(dbSettingsFile);
return dbSettings;
} catch (MalletException e) {
return null;
}
}
}
| wyg1990/Mallet | src/main/java/com/intel/mallet/Conf.java | Java | apache-2.0 | 5,783 |
package com.pacoapp.paco.ui;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.android.apps.paco.AccountChooser;
import com.google.android.gms.auth.GooglePlayServicesAvailabilityException;
import com.google.android.gms.auth.UserRecoverableAuthException;
import com.google.android.gms.common.ConnectionResult;
import com.google.android.gms.common.GooglePlayServicesUtil;
import com.pacoapp.paco.R;
import com.pacoapp.paco.UserPreferences;
import com.pacoapp.paco.net.AbstractAuthTokenTask;
import com.pacoapp.paco.net.GetAuthTokenInForeground;
import com.pacoapp.paco.net.NetworkClient;
import android.accounts.Account;
import android.accounts.AccountManager;
import android.accounts.AccountManagerCallback;
import android.accounts.AccountManagerFuture;
import android.accounts.OperationCanceledException;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.app.Dialog;
import android.content.Context;
import android.content.Intent;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.os.Build;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.widget.Toast;
public class SplashActivity extends Activity implements NetworkClient {
private static Logger Log = LoggerFactory.getLogger(SplashActivity.class);
public static final String EXTRA_ACCOUNTNAME = "extra_accountname";
public static final String EXTRA_CHANGING_EXISTING_ACCOUNT = "extra_changing_existing_account";
public static final int REQUEST_CODE_PICK_ACCOUNT = 1000;
public static final int REQUEST_CODE_RECOVER_FROM_AUTH_ERROR = 1001;
public static final int REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR = 1002;
protected static final int ACCOUNT_CHOOSER_REQUEST_CODE = 55;
private UserPreferences userPrefs;
private boolean changingExistingAccount;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.splash_screen);
Log.debug("SplashActivity onCreate()");
changingExistingAccount = getIntent().getBooleanExtra(EXTRA_CHANGING_EXISTING_ACCOUNT, false);
userPrefs = new UserPreferences(getApplicationContext());
Button loginButton = (Button)findViewById(R.id.loginButton);
loginButton.setOnClickListener(new View.OnClickListener() {
@SuppressLint("NewApi")
@Override
public void onClick(View v) {
authenticateUser();
}
});
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == REQUEST_CODE_PICK_ACCOUNT) {
if (resultCode == RESULT_OK) {
userPrefs.saveSelectedAccount(data.getStringExtra(AccountManager.KEY_ACCOUNT_NAME));
changingExistingAccount = false; // unset so that we don't loop in the picker forever
authenticateUser();
} else if (resultCode == RESULT_CANCELED) {
Toast.makeText(this, R.string.you_must_pick_an_account, Toast.LENGTH_SHORT).show();
}
} else if ((requestCode == REQUEST_CODE_RECOVER_FROM_AUTH_ERROR ||
requestCode == REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR)
&& resultCode == RESULT_OK) {
handleAuthorizeResult(resultCode, data);
return;
}
super.onActivityResult(requestCode, resultCode, data);
}
private void handleAuthorizeResult(int resultCode, Intent data) {
if (data == null) {
show("Unknown error, click the button again");
return;
}
if (resultCode == RESULT_OK) {
Log.info("Retrying");
getTask(this).execute();
return;
}
if (resultCode == RESULT_CANCELED) {
show("User rejected authorization.");
return;
}
show("Unknown error, click the button again");
}
protected void oldonActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == ACCOUNT_CHOOSER_REQUEST_CODE && resultCode == Activity.RESULT_OK) {
String accountName = null;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
accountName = data.getStringExtra(AccountManager.KEY_ACCOUNT_NAME);
} else {
accountName = data.getStringExtra(AccountChooser.ACCOUNT_NAME);
}
if (accountName != null) {
userPrefs.saveSelectedAccount(accountName);
getAuthAccessToken(accountName);
// String token = GoogleAuthUtil.getToken(this, accountName, PacoService.AUTH_TOKEN_TYPE_USERINFO_EMAIL);
// finish();
} else {
finish(); // TODO handler errors
}
} else {
Toast.makeText(this, R.string.you_must_pick_an_account, Toast.LENGTH_SHORT).show();
}
}
private void getAuthAccessToken(final String accountName) {
AccountManager accountManager = AccountManager.get(this);
Account[] accounts = accountManager.getAccountsByType("com.google");
Account account = null;
for (Account currentAccount : accounts) {
if (currentAccount.name.equals(accountName)) {
account = currentAccount;
break;
}
}
String accessToken = getAccessToken();
if (accessToken != null) {
Log.info("Invalidating previous OAuth2 access token: " + accessToken);
accountManager.invalidateAuthToken(account.type, accessToken);
setAccessToken(null);
}
String authTokenType = AbstractAuthTokenTask.AUTH_TOKEN_TYPE_USERINFO_EMAIL;
Log.info("Get access token for " + accountName + " using authTokenType " + authTokenType);
accountManager.getAuthToken(account, authTokenType, null, this,
new AccountManagerCallback<Bundle>() {
@Override
public void run(AccountManagerFuture<Bundle> future) {
try {
String accessToken = future.getResult().getString(AccountManager.KEY_AUTHTOKEN);
Log.info("Got OAuth2 access token: " + accessToken);
setAccessToken(accessToken);
//
// Intent result = new Intent();
// result.putExtra(AccountChooser.ACCOUNT_NAME, accountName);
// SplashActivity.this.setResult(0, result);
SplashActivity.this.finish();
// finish();
} catch (OperationCanceledException e) {
Log.error("The user has denied you access to the API");
} catch (Exception e) {
Log.error(e.getMessage());
Log.error("Exception: ", e);
}
}
}, null);
}
private void setAccessToken(String token) {
userPrefs.setAccessToken(token);
}
private String getAccessToken() {
return userPrefs.getAccessToken();
}
@Override
protected void onResume() {
super.onResume();
//handle case of broken Google Play Services
// TODO remove when we get a build that properly incorporates Google Play Services and resources
// and can build an apk with < 64k methods for Android < 5.0 phones
int resultCode = GooglePlayServicesUtil.isGooglePlayServicesAvailable(getApplicationContext());
if (resultCode != ConnectionResult.SUCCESS) {
try {
// if the class that Paco doesn't provide is not on the system, don't
// use it to show an error dialog. Instead make a toast or dialog.
SplashActivity.this.getClassLoader().loadClass("com.google.android.gms.common.R$string");
Dialog dialog = GooglePlayServicesUtil.getErrorDialog(resultCode,
SplashActivity.this,
REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR);
dialog.show();
} catch (ClassNotFoundException e) {
Toast.makeText(getApplicationContext(),
"GooglePlayServices " + getString(R.string.are_not_available_) + " " + getString(R.string.error) + ":\n" + getGooglePlayConnectionErrorString(resultCode),
Toast.LENGTH_LONG).show();
}
} else {
if (changingExistingAccount) {
authenticateUser();
}
}
}
public void authenticateUser() {
if (userPrefs.getSelectedAccount() == null || changingExistingAccount) {
pickUserAccount();
} else {
if (isDeviceOnline()) {
getTask(this).execute();
} else {
Toast.makeText(this, getString(R.string.network_required), Toast.LENGTH_LONG).show();
}
}
}
private AbstractAuthTokenTask getTask(SplashActivity activity) {
return new GetAuthTokenInForeground(activity);
}
@SuppressLint("NewApi")
public void pickUserAccount() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
Account account = null;
if (userPrefs.getSelectedAccount() != null) {
account = getAccountFor(userPrefs.getSelectedAccount());
}
Intent intent = AccountManager.newChooseAccountIntent(account, null,
new String[]{"com.google"},
changingExistingAccount,
null,
AbstractAuthTokenTask.AUTH_TOKEN_TYPE_USERINFO_EMAIL,
null, null);
startActivityForResult(intent, REQUEST_CODE_PICK_ACCOUNT);
} else {
Intent intent = new Intent(SplashActivity.this, AccountChooser.class);
startActivityForResult(intent, REQUEST_CODE_PICK_ACCOUNT);
}
}
private Account getAccountFor(String selectedAccount) {
AccountManager am = AccountManager.get(this);
Account[] accounts = am.getAccountsByType("com.google");
for (Account account : accounts) {
if (account.name.equals(selectedAccount)) {
return account;
}
}
return null;
}
/** Checks whether the device currently has a network connection */
private boolean isDeviceOnline() {
ConnectivityManager connMgr = (ConnectivityManager) getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo networkInfo = connMgr.getActiveNetworkInfo();
if (networkInfo != null && networkInfo.isConnected()) {
return true;
}
return false;
}
public void show(final String message) {
runOnUiThread(new Runnable() {
@Override
public void run() {
Toast.makeText(SplashActivity.this, message, Toast.LENGTH_LONG);
}
});
}
@Override
public void handleException(final Exception e) {
runOnUiThread(new Runnable() {
@Override
public void run() {
if (e instanceof GooglePlayServicesAvailabilityException) {
// The Google Play services APK is old, disabled, or not present.
// Show a dialog created by Google Play services that allows
// the user to update the APK
int statusCode = ((GooglePlayServicesAvailabilityException)e)
.getConnectionStatusCode();
try {
// TODO remove this when we can build Google Play Services in properly
// if the class that Paco doesn't provide is not on the system, don't
// use it to show an error dialog. Instead make a toast or dialog.
SplashActivity.this.getClassLoader().loadClass("com.google.android.gms.common.R$string");
Dialog dialog = GooglePlayServicesUtil.getErrorDialog(statusCode,
SplashActivity.this,
REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR);
dialog.show();
} catch (ClassNotFoundException e) {
String gpsError = getGooglePlayConnectionErrorString(statusCode);
Toast.makeText(getApplicationContext(),
getString(R.string.error) + ": " + gpsError,
Toast.LENGTH_LONG).show();
}
} else if (e instanceof UserRecoverableAuthException) {
// Unable to authenticate, such as when the user has not yet granted
// the app access to the account, but the user can fix this.
// Forward the user to an activity in Google Play services.
Intent intent = ((UserRecoverableAuthException)e).getIntent();
startActivityForResult(intent,
REQUEST_CODE_RECOVER_FROM_PLAY_SERVICES_ERROR);
}
}
});
}
public String getGooglePlayConnectionErrorString(int statusCode) {
String gpsError = "unknown";
switch(statusCode) {
case ConnectionResult.API_UNAVAILABLE:
gpsError = "API Unavailable";
break;
case ConnectionResult.CANCELED:
gpsError = "Canceled";
break;
case ConnectionResult.DEVELOPER_ERROR:
gpsError = "Developer Error";
break;
case ConnectionResult.INTERNAL_ERROR:
gpsError = "Internal error";
break;
case ConnectionResult.INTERRUPTED:
gpsError = "Interrupted";
break;
case ConnectionResult.INVALID_ACCOUNT:
gpsError = "Invalid Account";
break;
case ConnectionResult.LICENSE_CHECK_FAILED:
gpsError = "License Check Failed";
break;
case ConnectionResult.NETWORK_ERROR:
gpsError = "Network Error";
break;
case ConnectionResult.RESOLUTION_REQUIRED:
gpsError = "Resolution Required";
break;
case ConnectionResult.SERVICE_DISABLED:
gpsError = "Service Disabled";
break;
case ConnectionResult.SERVICE_INVALID:
gpsError = "Service Invalid";
break;
case ConnectionResult.SERVICE_MISSING:
gpsError = "Service Missing";
break;
case ConnectionResult.SERVICE_VERSION_UPDATE_REQUIRED:
gpsError = "Service version update required";
break;
case ConnectionResult.SIGN_IN_FAILED:
gpsError = "Sign in failed";
break;
case ConnectionResult.SIGN_IN_REQUIRED:
gpsError = "Sign in required";
break;
case ConnectionResult.SUCCESS:
gpsError = "Success";
break;
case ConnectionResult.TIMEOUT:
gpsError = "Timeout";
break;
default:
break;
}
return gpsError;
}
public void showAndFinish(String string) {
show(string);
finish();
}
@Override
public Context getContext() {
return this.getApplicationContext();
}
}
| google/paco | Paco/src/com/pacoapp/paco/ui/SplashActivity.java | Java | apache-2.0 | 14,782 |
/**
*
* Process Editor - Animation Package
*
* (C) 2009, 2010 inubit AG
* (C) 2014 the authors
*
*/
package com.inubit.research.animation;
import java.awt.Point;
import java.util.ArrayList;
import java.util.List;
import net.frapu.code.visualization.Configuration;
import net.frapu.code.visualization.LayoutUtils;
import net.frapu.code.visualization.ProcessEdge;
import net.frapu.code.visualization.ProcessEditor;
import net.frapu.code.visualization.ProcessModel;
import net.frapu.code.visualization.ProcessNode;
import net.frapu.code.visualization.ProcessObject;
import com.inubit.research.layouter.LayoutHelper;
import com.inubit.research.layouter.ProcessLayouter;
import com.inubit.research.layouter.WorkBenchSpecific.WorkbenchHandler;
import com.inubit.research.layouter.adapter.ProcessNodeAdapter;
import com.inubit.research.layouter.interfaces.AbstractModelAdapter;
/**
* @author ff
*
*/
public class LayoutingAnimator implements IAnimationListener {
/**
* Configuration Key values
*/
public static final String CONF_ANIMATION_SPEED = "LayouterAnimationSpeed";
private long start;
private ProcessLayouter f_layouter;
private int f_animationTime = -1;
private Animator animator;
private ProcessEditor f_editor;
private boolean f_layoutEdgesValue;
/**
*
*/
public LayoutingAnimator(ProcessLayouter layouter) {
f_layouter = layouter;
}
public ProcessLayouter getLayouter() {
return f_layouter;
}
/**
* Animates the layout of the model.
* @param model
* @param xstart
* @param ystart
* @param direction
* @throws Exception
*/
public void layoutModelWithAnimation(ProcessEditor editor, List<NodeAnimator> animList, int xstart, int ystart, int direction)
throws Exception {
// Animator orgAnimator = editor.getAnimator().getAnimator();
// if (orgAnimator != null) {
// orgAnimator.setRunning(false);
// }
// animator = new Animator(null, 60);
// animator.start();
// animator.setParent(editor);
f_editor = editor;
animator = editor.getAnimator().getAnimator();
ProcessModel model = editor.getModel();
ProcessModel copy = model.clone();
ProcessNode _selNode = findNode(editor.getSelectionHandler().getLastSelectedNode(), copy);
if (_selNode != null) {
ProcessNodeAdapter selectedNode = new ProcessNodeAdapter(_selNode);
f_layouter.setSelectedNode(selectedNode);
} else {
f_layouter.setSelectedNode(null);
}
// Fix all sizes to final size
if (animList != null) {
for (NodeAnimator a : animList) {
if (a instanceof DefaultNodeAnimator) {
DefaultNodeAnimator defA = (DefaultNodeAnimator) a;
// Check if node is contained in copy
if (model.getNodes().contains(defA.getNode())) {
// If found, set target size for layouting
findNode(defA.getNode(), copy).setSize(defA.getNewSize().width, defA.getNewSize().height);
}
}
}
}
Point _offset = determinePartialLayoutingRegion(editor, copy);
AbstractModelAdapter modelI = LayoutUtils.getAdapter(copy);
f_layouter.layoutModel(modelI, xstart, ystart, 0);
WorkbenchHandler.postProcess(f_layouter, copy);
int _animationTime = f_animationTime;
if (_animationTime == -1) {
_animationTime = LayoutHelper.toInt(Configuration.getInstance().getProperty(CONF_ANIMATION_SPEED, "6000"), 6000);
}
//writing back coords to wrappers
ArrayList<NodeAnimator> wrappers = new ArrayList<NodeAnimator>();
for (ProcessNode n : editor.getModel().getNodes()) {
DefaultNodeAnimator w = new DefaultNodeAnimator(n, animator);
w.setAnimationTime(_animationTime);
ProcessNode dup = findNode(n, copy);
if (dup != null) {
Point _pos = applyPartialLayoutingOffsetToNode(_offset, dup);
w.setNewCoords(_pos);
w.setNewSize(dup.getSize());
wrappers.add(w);
}
}
for (ProcessEdge edge : editor.getModel().getEdges()) {
DefaultEdgeAnimator w = new DefaultEdgeAnimator(edge, animator);
w.setAnimationTime(_animationTime);
ProcessEdge _e = (ProcessEdge) copy.getObjectById(edge.getId());
if (copy.getEdges().contains(_e)) {
applyPartialLayoutingOffsetToEdge(_offset, _e);
w.transformTo(_e);
wrappers.add(w);
}
}
// Check if additional animation list @todo Refactor :-)
if (animList != null) {
for (NodeAnimator a : animList) {
if (wrappers.contains(a)) {
//Already contained, modify
NodeAnimator org = wrappers.get(wrappers.indexOf(a));
if (org instanceof DefaultNodeAnimator) {
DefaultNodeAnimator defOrg = (DefaultNodeAnimator) org;
defOrg.setNewSize(((DefaultNodeAnimator) a).getNewSize());
}
}
}
}
if (wrappers.size() > 0) {
wrappers.get(0).addListener(this);
start = System.nanoTime();
}
f_layoutEdgesValue = editor.isLayoutEdges();
editor.setLayoutEdges(false);
animator.setAnimationObjects(wrappers);
}
private void applyPartialLayoutingOffsetToEdge(Point _offset, ProcessEdge _e) {
if (_offset.x != Integer.MAX_VALUE) {
List<Point> _rps = _e.getRoutingPoints();
if (_rps.size() > 2) {
_rps.remove(0);
_rps.remove(_rps.size() - 1);
for (Point p : _rps) {
p.x += _offset.x;
p.y += _offset.y;
}
//setting new routing points
_e.clearRoutingPoints();
for (int i = 0; i < _rps.size(); i++) {
_e.addRoutingPoint(i, _rps.get(i));
}
}
}
}
private Point applyPartialLayoutingOffsetToNode(Point _offset, ProcessNode dup) {
Point _pos = dup.getPos();
if (_offset.x != Integer.MAX_VALUE) {
_pos.x += _offset.x;
_pos.y += _offset.y;
}
return _pos;
}
/**
* used for partial layouting (if just some node are selected)
* @param editor
* @param copy
* @return
*/
private Point determinePartialLayoutingRegion(ProcessEditor editor,
ProcessModel copy) {
List<ProcessObject> _selectedNodes = editor.getSelectionHandler().getSelection();
Point _offset = new Point(Integer.MAX_VALUE, Integer.MAX_VALUE);
if (_selectedNodes.size() > 1) {
for (ProcessObject o : _selectedNodes) {
if (o instanceof ProcessNode) {
ProcessNode _n = (ProcessNode) o;
_offset.x = Math.min(_offset.x, _n.getPos().x - _n.getSize().width / 2);
_offset.y = Math.min(_offset.y, _n.getPos().y - _n.getSize().height / 2);
}
}
for (ProcessNode n : new ArrayList<ProcessNode>(copy.getNodes())) {
if (!_selectedNodes.contains(n)) {
copy.removeNode(n);
}
}
for (ProcessEdge e : new ArrayList<ProcessEdge>(copy.getEdges())) {
if (!_selectedNodes.contains(e)) {
copy.removeEdge(e);
}
}
}
return _offset;
}
@Override
public void animationFinished(NodeAnimator node) {
node.removeListener(this);
System.out.println("Animation took: " + (System.nanoTime() - start) / 1000000 + " ms");
f_editor.setLayoutEdges(f_layoutEdgesValue);
// Kill Animator thread
//animator.setRunning(false);
}
private ProcessNode findNode(ProcessNode original, ProcessModel copy) {
if (original != null) {
String _id = original.getProperty(ProcessNode.PROP_ID);
for (ProcessNode n : copy.getNodes()) {
if (n.getProperty(ProcessNode.PROP_ID).equals(_id)) {
return n;
}
}
}
return null;
}
/**
* can be used to override the user set animation time for special occassions
* @param time
*/
public void setCustomAnimationTime(int time) {
f_animationTime = time;
}
}
| bptlab/processeditor | src/com/inubit/research/animation/LayoutingAnimator.java | Java | apache-2.0 | 9,066 |
//CHECKSTYLE:FileLength:OFF
/*! ******************************************************************************
*
* Pentaho Data Integration
*
* Copyright (C) 2002-2013 by Pentaho : http://www.pentaho.com
*
*******************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
******************************************************************************/
package org.pentaho.di.trans;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.Deque;
import java.util.HashMap;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.vfs.FileName;
import org.apache.commons.vfs.FileObject;
import org.pentaho.di.cluster.SlaveServer;
import org.pentaho.di.core.BlockingBatchingRowSet;
import org.pentaho.di.core.BlockingRowSet;
import org.pentaho.di.core.Const;
import org.pentaho.di.core.Counter;
import org.pentaho.di.core.ExecutorInterface;
import org.pentaho.di.core.ExtensionDataInterface;
import org.pentaho.di.core.KettleEnvironment;
import org.pentaho.di.core.QueueRowSet;
import org.pentaho.di.core.Result;
import org.pentaho.di.core.ResultFile;
import org.pentaho.di.core.RowMetaAndData;
import org.pentaho.di.core.RowSet;
import org.pentaho.di.core.SingleRowRowSet;
import org.pentaho.di.core.database.Database;
import org.pentaho.di.core.database.DatabaseMeta;
import org.pentaho.di.core.database.DatabaseTransactionListener;
import org.pentaho.di.core.database.map.DatabaseConnectionMap;
import org.pentaho.di.core.exception.KettleDatabaseException;
import org.pentaho.di.core.exception.KettleException;
import org.pentaho.di.core.exception.KettleFileException;
import org.pentaho.di.core.exception.KettleTransException;
import org.pentaho.di.core.exception.KettleValueException;
import org.pentaho.di.core.extension.ExtensionPointHandler;
import org.pentaho.di.core.extension.KettleExtensionPoint;
import org.pentaho.di.core.logging.ChannelLogTable;
import org.pentaho.di.core.logging.HasLogChannelInterface;
import org.pentaho.di.core.logging.KettleLogStore;
import org.pentaho.di.core.logging.LogChannel;
import org.pentaho.di.core.logging.LogChannelInterface;
import org.pentaho.di.core.logging.LogLevel;
import org.pentaho.di.core.logging.LogStatus;
import org.pentaho.di.core.logging.LoggingHierarchy;
import org.pentaho.di.core.logging.LoggingMetric;
import org.pentaho.di.core.logging.LoggingObjectInterface;
import org.pentaho.di.core.logging.LoggingObjectType;
import org.pentaho.di.core.logging.LoggingRegistry;
import org.pentaho.di.core.logging.Metrics;
import org.pentaho.di.core.logging.MetricsLogTable;
import org.pentaho.di.core.logging.MetricsRegistry;
import org.pentaho.di.core.logging.PerformanceLogTable;
import org.pentaho.di.core.logging.StepLogTable;
import org.pentaho.di.core.logging.TransLogTable;
import org.pentaho.di.core.metrics.MetricsDuration;
import org.pentaho.di.core.metrics.MetricsSnapshotInterface;
import org.pentaho.di.core.metrics.MetricsUtil;
import org.pentaho.di.core.parameters.DuplicateParamException;
import org.pentaho.di.core.parameters.NamedParams;
import org.pentaho.di.core.parameters.NamedParamsDefault;
import org.pentaho.di.core.parameters.UnknownParamException;
import org.pentaho.di.core.row.RowMetaInterface;
import org.pentaho.di.core.row.ValueMeta;
import org.pentaho.di.core.util.EnvUtil;
import org.pentaho.di.core.variables.VariableSpace;
import org.pentaho.di.core.variables.Variables;
import org.pentaho.di.core.vfs.KettleVFS;
import org.pentaho.di.core.xml.XMLHandler;
import org.pentaho.di.i18n.BaseMessages;
import org.pentaho.di.job.DelegationListener;
import org.pentaho.di.job.Job;
import org.pentaho.di.partition.PartitionSchema;
import org.pentaho.di.repository.ObjectId;
import org.pentaho.di.repository.ObjectRevision;
import org.pentaho.di.repository.Repository;
import org.pentaho.di.repository.RepositoryDirectoryInterface;
import org.pentaho.di.resource.ResourceUtil;
import org.pentaho.di.resource.TopLevelResource;
import org.pentaho.di.trans.cluster.TransSplitter;
import org.pentaho.di.trans.performance.StepPerformanceSnapShot;
import org.pentaho.di.trans.step.BaseStep;
import org.pentaho.di.trans.step.BaseStepData.StepExecutionStatus;
import org.pentaho.di.trans.step.RunThread;
import org.pentaho.di.trans.step.StepAdapter;
import org.pentaho.di.trans.step.StepDataInterface;
import org.pentaho.di.trans.step.StepInitThread;
import org.pentaho.di.trans.step.StepInterface;
import org.pentaho.di.trans.step.StepListener;
import org.pentaho.di.trans.step.StepMeta;
import org.pentaho.di.trans.step.StepMetaDataCombi;
import org.pentaho.di.trans.step.StepPartitioningMeta;
import org.pentaho.di.trans.steps.mappinginput.MappingInput;
import org.pentaho.di.trans.steps.mappingoutput.MappingOutput;
import org.pentaho.di.www.AddExportServlet;
import org.pentaho.di.www.AddTransServlet;
import org.pentaho.di.www.PrepareExecutionTransServlet;
import org.pentaho.di.www.SlaveServerTransStatus;
import org.pentaho.di.www.SocketRepository;
import org.pentaho.di.www.StartExecutionTransServlet;
import org.pentaho.di.www.WebResult;
import org.pentaho.metastore.api.IMetaStore;
/**
* This class represents the information and operations associated with the concept of a Transformation. It loads,
* instantiates, initializes, runs, and monitors the execution of the transformation contained in the specified
* TransInfo object.
*
* @author Matt
* @since 07-04-2003
*
*/
public class Trans implements VariableSpace, NamedParams, HasLogChannelInterface, LoggingObjectInterface,
ExecutorInterface, ExtensionDataInterface {
/** The package name, used for internationalization of messages. */
private static Class<?> PKG = Trans.class; // for i18n purposes, needed by Translator2!!
/** The replay date format. */
public static final String REPLAY_DATE_FORMAT = "yyyy/MM/dd HH:mm:ss";
/** The log channel interface. */
protected LogChannelInterface log;
/** The log level. */
protected LogLevel logLevel = LogLevel.BASIC;
/** The container object id. */
protected String containerObjectId;
/** The log commit size. */
protected int logCommitSize = 10;
/** The transformation metadata to execute. */
protected TransMeta transMeta;
/**
* The repository we are referencing.
*/
protected Repository repository;
/**
* The MetaStore to use
*/
protected IMetaStore metaStore;
/**
* The job that's launching this transformation. This gives us access to the whole chain, including the parent
* variables, etc.
*/
private Job parentJob;
/**
* The transformation that is executing this transformation in case of mappings.
*/
private Trans parentTrans;
/** The parent logging object interface (this could be a transformation or a job). */
private LoggingObjectInterface parent;
/** The name of the mapping step that executes this transformation in case this is a mapping. */
private String mappingStepName;
/** Indicates that we want to monitor the running transformation in a GUI. */
private boolean monitored;
/**
* Indicates that we are running in preview mode...
*/
private boolean preview;
/** The date objects for logging information about the transformation such as start and end time, etc. */
private Date startDate, endDate, currentDate, logDate, depDate;
/** The job start and end date. */
private Date jobStartDate, jobEndDate;
/** The batch id. */
private long batchId;
/**
* This is the batch ID that is passed from job to job to transformation, if nothing is passed, it's the
* transformation's batch id.
*/
private long passedBatchId;
/** The variable bindings for the transformation. */
private VariableSpace variables = new Variables();
/** A list of all the row sets. */
private List<RowSet> rowsets;
/** A list of all the steps. */
private List<StepMetaDataCombi> steps;
/** The class number. */
public int class_nr;
/**
* The replayDate indicates that this transformation is a replay transformation for a transformation executed on
* replayDate. If replayDate is null, the transformation is not a replay.
*/
private Date replayDate;
/** Constant indicating a dispatch type of 1-to-1. */
public static final int TYPE_DISP_1_1 = 1;
/** Constant indicating a dispatch type of 1-to-N. */
public static final int TYPE_DISP_1_N = 2;
/** Constant indicating a dispatch type of N-to-1. */
public static final int TYPE_DISP_N_1 = 3;
/** Constant indicating a dispatch type of N-to-N. */
public static final int TYPE_DISP_N_N = 4;
/** Constant indicating a dispatch type of N-to-M. */
public static final int TYPE_DISP_N_M = 5;
/** Constant indicating a transformation status of Finished. */
public static final String STRING_FINISHED = "Finished";
/** Constant indicating a transformation status of Finished (with errors). */
public static final String STRING_FINISHED_WITH_ERRORS = "Finished (with errors)";
/** Constant indicating a transformation status of Running. */
public static final String STRING_RUNNING = "Running";
/** Constant indicating a transformation status of Paused. */
public static final String STRING_PAUSED = "Paused";
/** Constant indicating a transformation status of Preparing for execution. */
public static final String STRING_PREPARING = "Preparing executing";
/** Constant indicating a transformation status of Initializing. */
public static final String STRING_INITIALIZING = "Initializing";
/** Constant indicating a transformation status of Waiting. */
public static final String STRING_WAITING = "Waiting";
/** Constant indicating a transformation status of Stopped. */
public static final String STRING_STOPPED = "Stopped";
/** Constant indicating a transformation status of Halting. */
public static final String STRING_HALTING = "Halting";
/** Constant specifying a filename containing XML to inject into a ZIP file created during resource export. */
public static final String CONFIGURATION_IN_EXPORT_FILENAME = "__job_execution_configuration__.xml";
/** Whether safe mode is enabled. */
private boolean safeModeEnabled;
/** The thread name. */
@Deprecated
private String threadName;
/** The transaction ID */
private String transactionId;
/** Whether the transformation is preparing for execution. */
private volatile boolean preparing;
/** Whether the transformation is initializing. */
private boolean initializing;
/** Whether the transformation is running. */
private boolean running;
/** Whether the transformation is finished. */
private final AtomicBoolean finished;
/** Whether the transformation is paused. */
private AtomicBoolean paused;
/** Whether the transformation is stopped. */
private AtomicBoolean stopped;
/** The number of errors that have occurred during execution of the transformation. */
private AtomicInteger errors;
/** Whether the transformation is ready to start. */
private boolean readyToStart;
/** Step performance snapshots. */
private Map<String, List<StepPerformanceSnapShot>> stepPerformanceSnapShots;
/** The step performance snapshot timer. */
private Timer stepPerformanceSnapShotTimer;
/** A list of listeners attached to the transformation. */
private List<TransListener> transListeners;
/** A list of stop-event listeners attached to the transformation. */
private List<TransStoppedListener> transStoppedListeners;
/** In case this transformation starts to delegate work to a local transformation or job */
private List<DelegationListener> delegationListeners;
/** The number of finished steps. */
private int nrOfFinishedSteps;
/** The number of active steps. */
private int nrOfActiveSteps;
/** The named parameters. */
private NamedParams namedParams = new NamedParamsDefault();
/** The socket repository. */
private SocketRepository socketRepository;
/** The transformation log table database connection. */
private Database transLogTableDatabaseConnection;
/** The step performance snapshot sequence number. */
private AtomicInteger stepPerformanceSnapshotSeqNr;
/** The last written step performance sequence number. */
private int lastWrittenStepPerformanceSequenceNr;
/** The last step performance snapshot sequence number added. */
private int lastStepPerformanceSnapshotSeqNrAdded;
/** The active subtransformations. */
private Map<String, Trans> activeSubtransformations;
/** The active subjobs */
private Map<String, Job> activeSubjobs;
/** The step performance snapshot size limit. */
private int stepPerformanceSnapshotSizeLimit;
/** The servlet print writer. */
private PrintWriter servletPrintWriter;
/** The trans finished blocking queue. */
private ArrayBlockingQueue<Object> transFinishedBlockingQueue;
/** The name of the executing server */
private String executingServer;
/** The name of the executing user */
private String executingUser;
private Result previousResult;
protected List<RowMetaAndData> resultRows;
protected List<ResultFile> resultFiles;
/** The command line arguments for the transformation. */
protected String[] arguments;
/**
* A table of named counters.
*/
protected Hashtable<String, Counter> counters;
private HttpServletResponse servletResponse;
private HttpServletRequest servletRequest;
private Map<String, Object> extensionDataMap;
/**
* Instantiates a new transformation.
*/
public Trans() {
finished = new AtomicBoolean( false );
paused = new AtomicBoolean( false );
stopped = new AtomicBoolean( false );
transListeners = Collections.synchronizedList( new ArrayList<TransListener>() );
transStoppedListeners = Collections.synchronizedList( new ArrayList<TransStoppedListener>() );
delegationListeners = new ArrayList<DelegationListener>();
// Get a valid transactionId in case we run database transactional.
transactionId = calculateTransactionId();
threadName = transactionId; // / backward compatibility but deprecated!
errors = new AtomicInteger( 0 );
stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 );
lastWrittenStepPerformanceSequenceNr = 0;
activeSubtransformations = new HashMap<String, Trans>();
activeSubjobs = new HashMap<String, Job>();
resultRows = new ArrayList<RowMetaAndData>();
resultFiles = new ArrayList<ResultFile>();
counters = new Hashtable<String, Counter>();
extensionDataMap = new HashMap<String, Object>();
}
/**
* Initializes a transformation from transformation meta-data defined in memory.
*
* @param transMeta
* the transformation meta-data to use.
*/
public Trans( TransMeta transMeta ) {
this( transMeta, null );
}
/**
* Initializes a transformation from transformation meta-data defined in memory. Also take into account the parent log
* channel interface (job or transformation) for logging lineage purposes.
*
* @param transMeta
* the transformation meta-data to use.
* @param parent
* the parent job that is executing this transformation
*/
public Trans( TransMeta transMeta, LoggingObjectInterface parent ) {
this();
this.transMeta = transMeta;
setParent( parent );
initializeVariablesFrom( transMeta );
copyParametersFrom( transMeta );
transMeta.activateParameters();
// Get a valid transactionId in case we run database transactional.
transactionId = calculateTransactionId();
threadName = transactionId; // / backward compatibility but deprecated!
}
/**
* Sets the parent logging object.
*
* @param parent
* the new parent
*/
public void setParent( LoggingObjectInterface parent ) {
this.parent = parent;
this.log = new LogChannel( this, parent );
this.logLevel = log.getLogLevel();
this.containerObjectId = log.getContainerObjectId();
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationIsPreloaded" ) );
}
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "Trans.Log.NumberOfStepsToRun", String.valueOf( transMeta
.nrSteps() ), String.valueOf( transMeta.nrTransHops() ) ) );
}
}
/**
* Sets the default log commit size.
*/
private void setDefaultLogCommitSize() {
String propLogCommitSize = this.getVariable( "pentaho.log.commit.size" );
if ( propLogCommitSize != null ) {
// override the logCommit variable
try {
logCommitSize = Integer.parseInt( propLogCommitSize );
} catch ( Exception ignored ) {
logCommitSize = 10; // ignore parsing error and default to 10
}
}
}
/**
* Gets the log channel interface for the transformation.
*
* @return the log channel
* @see org.pentaho.di.core.logging.HasLogChannelInterface#getLogChannel()
*/
public LogChannelInterface getLogChannel() {
return log;
}
/**
* Sets the log channel interface for the transformation.
*
* @param log
* the new log channel interface
*/
public void setLog( LogChannelInterface log ) {
this.log = log;
}
/**
* Gets the name of the transformation.
*
* @return the transformation name
*/
public String getName() {
if ( transMeta == null ) {
return null;
}
return transMeta.getName();
}
/**
* Instantiates a new transformation using any of the provided parameters including the variable bindings, a
* repository, a name, a repository directory name, and a filename. This is a multi-purpose method that supports
* loading a transformation from a file (if the filename is provided but not a repository object) or from a repository
* (if the repository object, repository directory name, and transformation name are specified).
*
* @param parent
* the parent variable space and named params
* @param rep
* the repository
* @param name
* the name of the transformation
* @param dirname
* the dirname the repository directory name
* @param filename
* the filename containing the transformation definition
* @throws KettleException
* if any error occurs during loading, parsing, or creation of the transformation
*/
public <Parent extends VariableSpace & NamedParams> Trans( Parent parent, Repository rep, String name,
String dirname, String filename ) throws KettleException {
this();
try {
if ( rep != null ) {
RepositoryDirectoryInterface repdir = rep.findDirectory( dirname );
if ( repdir != null ) {
this.transMeta = rep.loadTransformation( name, repdir, null, false, null ); // reads last version
} else {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToLoadTransformation", name, dirname ) );
}
} else {
transMeta = new TransMeta( filename, false );
}
this.log = LogChannel.GENERAL;
transMeta.initializeVariablesFrom( parent );
initializeVariablesFrom( parent );
// PDI-3064 do not erase parameters from meta!
// instead of this - copy parameters to actual transformation
this.copyParametersFrom( parent );
this.activateParameters();
this.setDefaultLogCommitSize();
// Get a valid transactionId in case we run database transactional.
transactionId = calculateTransactionId();
threadName = transactionId; // / backward compatibility but deprecated!
} catch ( KettleException e ) {
throw new KettleException(
BaseMessages.getString( PKG, "Trans.Exception.UnableToOpenTransformation", name ), e );
}
}
/**
* Executes the transformation. This method will prepare the transformation for execution and then start all the
* threads associated with the transformation and its steps.
*
* @param arguments
* the arguments
* @throws KettleException
* if the transformation could not be prepared (initialized)
*/
public void execute( String[] arguments ) throws KettleException {
prepareExecution( arguments );
startThreads();
}
/**
* Prepares the transformation for execution. This includes setting the arguments and parameters as well as preparing
* and tracking the steps and hops in the transformation.
*
* @param arguments
* the arguments to use for this transformation
* @throws KettleException
* in case the transformation could not be prepared (initialized)
*/
public void prepareExecution( String[] arguments ) throws KettleException {
preparing = true;
startDate = null;
running = false;
log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_START );
log.snap( Metrics.METRIC_TRANSFORMATION_INIT_START );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationPrepareExecution.id, this );
checkCompatibility();
// Set the arguments on the transformation...
//
if ( arguments != null ) {
setArguments( arguments );
}
activateParameters();
transMeta.activateParameters();
if ( transMeta.getName() == null ) {
if ( transMeta.getFilename() != null ) {
log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForFilename", transMeta
.getFilename() ) );
}
} else {
log.logBasic( BaseMessages.getString( PKG, "Trans.Log.DispacthingStartedForTransformation", transMeta
.getName() ) );
}
if ( getArguments() != null ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.NumberOfArgumentsDetected", String
.valueOf( getArguments().length ) ) );
}
}
if ( isSafeModeEnabled() ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.SafeModeIsEnabled", transMeta.getName() ) );
}
}
if ( getReplayDate() != null ) {
SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT );
log.logBasic( BaseMessages.getString( PKG, "Trans.Log.ThisIsAReplayTransformation" )
+ df.format( getReplayDate() ) );
} else {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.ThisIsNotAReplayTransformation" ) );
}
}
// setInternalKettleVariables(this); --> Let's not do this, when running
// without file, for example remote, it spoils the fun
// extra check to see if the servlet print writer has some value in case
// folks want to test it locally...
//
if ( servletPrintWriter == null ) {
String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null );
if ( encoding == null ) {
servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) );
} else {
try {
servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out, encoding ) );
} catch ( UnsupportedEncodingException ex ) {
servletPrintWriter = new PrintWriter( new OutputStreamWriter( System.out ) );
}
}
}
// Keep track of all the row sets and allocated steps
//
steps = new ArrayList<StepMetaDataCombi>();
rowsets = new ArrayList<RowSet>();
List<StepMeta> hopsteps = transMeta.getTransHopSteps( false );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.FoundDefferentSteps", String.valueOf( hopsteps
.size() ) ) );
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingRowsets" ) );
}
// First allocate all the rowsets required!
// Note that a mapping doesn't receive ANY input or output rowsets...
//
for ( int i = 0; i < hopsteps.size(); i++ ) {
StepMeta thisStep = hopsteps.get( i );
if ( thisStep.isMapping() ) {
continue; // handled and allocated by the mapping step itself.
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString(
PKG, "Trans.Log.AllocateingRowsetsForStep", String.valueOf( i ), thisStep.getName() ) );
}
List<StepMeta> nextSteps = transMeta.findNextSteps( thisStep );
int nrTargets = nextSteps.size();
for ( int n = 0; n < nrTargets; n++ ) {
// What's the next step?
StepMeta nextStep = nextSteps.get( n );
if ( nextStep.isMapping() ) {
continue; // handled and allocated by the mapping step itself.
}
// How many times do we start the source step?
int thisCopies = thisStep.getCopies();
if ( thisCopies < 0 ) {
// This can only happen if a variable is used that didn't resolve to a positive integer value
//
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Log.StepCopiesNotCorrectlyDefined", thisStep.getName() ) );
}
// How many times do we start the target step?
int nextCopies = nextStep.getCopies();
// Are we re-partitioning?
boolean repartitioning;
if ( thisStep.isPartitioned() ) {
repartitioning = !thisStep.getStepPartitioningMeta()
.equals( nextStep.getStepPartitioningMeta() );
} else {
repartitioning = nextStep.isPartitioned();
}
int nrCopies;
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString(
PKG, "Trans.Log.copiesInfo", String.valueOf( thisCopies ), String.valueOf( nextCopies ) ) );
}
int dispatchType;
if ( thisCopies == 1 && nextCopies == 1 ) {
dispatchType = TYPE_DISP_1_1;
nrCopies = 1;
} else if ( thisCopies == 1 && nextCopies > 1 ) {
dispatchType = TYPE_DISP_1_N;
nrCopies = nextCopies;
} else if ( thisCopies > 1 && nextCopies == 1 ) {
dispatchType = TYPE_DISP_N_1;
nrCopies = thisCopies;
} else if ( thisCopies == nextCopies && !repartitioning ) {
dispatchType = TYPE_DISP_N_N;
nrCopies = nextCopies;
} else {
// > 1!
dispatchType = TYPE_DISP_N_M;
nrCopies = nextCopies;
} // Allocate a rowset for each destination step
// Allocate the rowsets
//
if ( dispatchType != TYPE_DISP_N_M ) {
for ( int c = 0; c < nrCopies; c++ ) {
RowSet rowSet;
switch ( transMeta.getTransformationType() ) {
case Normal:
// This is a temporary patch until the batching rowset has proven
// to be working in all situations.
// Currently there are stalling problems when dealing with small
// amounts of rows.
//
Boolean batchingRowSet =
ValueMeta.convertStringToBoolean( System.getProperty( Const.KETTLE_BATCHING_ROWSET ) );
if ( batchingRowSet != null && batchingRowSet.booleanValue() ) {
rowSet = new BlockingBatchingRowSet( transMeta.getSizeRowset() );
} else {
rowSet = new BlockingRowSet( transMeta.getSizeRowset() );
}
break;
case SerialSingleThreaded:
rowSet = new SingleRowRowSet();
break;
case SingleThreaded:
rowSet = new QueueRowSet();
break;
default:
throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() );
}
switch ( dispatchType ) {
case TYPE_DISP_1_1:
rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), 0 );
break;
case TYPE_DISP_1_N:
rowSet.setThreadNameFromToCopy( thisStep.getName(), 0, nextStep.getName(), c );
break;
case TYPE_DISP_N_1:
rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), 0 );
break;
case TYPE_DISP_N_N:
rowSet.setThreadNameFromToCopy( thisStep.getName(), c, nextStep.getName(), c );
break;
default:
break;
}
rowsets.add( rowSet );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet
.toString() ) );
}
}
} else {
// For each N source steps we have M target steps
//
// From each input step we go to all output steps.
// This allows maximum flexibility for re-partitioning,
// distribution...
for ( int s = 0; s < thisCopies; s++ ) {
for ( int t = 0; t < nextCopies; t++ ) {
BlockingRowSet rowSet = new BlockingRowSet( transMeta.getSizeRowset() );
rowSet.setThreadNameFromToCopy( thisStep.getName(), s, nextStep.getName(), t );
rowsets.add( rowSet );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.TransformationAllocatedNewRowset", rowSet
.toString() ) );
}
}
}
}
}
log
.logDetailed( BaseMessages
.getString(
PKG,
"Trans.Log.AllocatedRowsets", String.valueOf( rowsets.size() ), String.valueOf( i ), thisStep
.getName() )
+ " " );
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.AllocatingStepsAndStepData" ) );
}
// Allocate the steps & the data...
//
for ( int i = 0; i < hopsteps.size(); i++ ) {
StepMeta stepMeta = hopsteps.get( i );
String stepid = stepMeta.getStepID();
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString(
PKG, "Trans.Log.TransformationIsToAllocateStep", stepMeta.getName(), stepid ) );
}
// How many copies are launched of this step?
int nrCopies = stepMeta.getCopies();
if ( log.isDebug() ) {
log
.logDebug( BaseMessages
.getString( PKG, "Trans.Log.StepHasNumberRowCopies", String.valueOf( nrCopies ) ) );
}
// At least run once...
for ( int c = 0; c < nrCopies; c++ ) {
// Make sure we haven't started it yet!
if ( !hasStepStarted( stepMeta.getName(), c ) ) {
StepMetaDataCombi combi = new StepMetaDataCombi();
combi.stepname = stepMeta.getName();
combi.copy = c;
// The meta-data
combi.stepMeta = stepMeta;
combi.meta = stepMeta.getStepMetaInterface();
// Allocate the step data
StepDataInterface data = combi.meta.getStepData();
combi.data = data;
// Allocate the step
StepInterface step = combi.meta.getStep( stepMeta, data, c, transMeta, this );
// Copy the variables of the transformation to the step...
// don't share. Each copy of the step has its own variables.
//
step.initializeVariablesFrom( this );
step.setUsingThreadPriorityManagment( transMeta.isUsingThreadPriorityManagment() );
// Pass the connected repository & metaStore to the steps runtime
//
step.setRepository( repository );
step.setMetaStore( metaStore );
// If the step is partitioned, set the partitioning ID and some other
// things as well...
if ( stepMeta.isPartitioned() ) {
List<String> partitionIDs = stepMeta.getStepPartitioningMeta().getPartitionSchema().getPartitionIDs();
if ( partitionIDs != null && partitionIDs.size() > 0 ) {
step.setPartitionID( partitionIDs.get( c ) ); // Pass the partition ID
// to the step
}
}
// Save the step too
combi.step = step;
// Pass logging level and metrics gathering down to the step level.
// /
if ( combi.step instanceof LoggingObjectInterface ) {
LogChannelInterface logChannel = combi.step.getLogChannel();
logChannel.setLogLevel( logLevel );
logChannel.setGatheringMetrics( log.isGatheringMetrics() );
}
// Add to the bunch...
steps.add( combi );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationHasAllocatedANewStep", stepMeta
.getName(), String.valueOf( c ) ) );
}
}
}
}
// Now we need to verify if certain rowsets are not meant to be for error
// handling...
// Loop over the steps and for every step verify the output rowsets
// If a rowset is going to a target step in the steps error handling
// metadata, set it to the errorRowSet.
// The input rowsets are already in place, so the next step just accepts the
// rows.
// Metadata wise we need to do the same trick in TransMeta
//
for ( int s = 0; s < steps.size(); s++ ) {
StepMetaDataCombi combi = steps.get( s );
if ( combi.stepMeta.isDoingErrorHandling() ) {
combi.step.identifyErrorOutput();
}
}
// Now (optionally) write start log record!
// Make sure we synchronize appropriately to avoid duplicate batch IDs.
//
Object syncObject = this;
if ( parentJob != null ) {
syncObject = parentJob; // parallel execution in a job
}
if ( parentTrans != null ) {
syncObject = parentTrans; // multiple sub-transformations
}
synchronized ( syncObject ) {
calculateBatchIdAndDateRange();
beginProcessing();
}
// Set the partition-to-rowset mapping
//
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepMeta stepMeta = sid.stepMeta;
StepInterface baseStep = sid.step;
baseStep.setPartitioned( stepMeta.isPartitioned() );
// Now let's take a look at the source and target relation
//
// If this source step is not partitioned, and the target step is: it
// means we need to re-partition the incoming data.
// If both steps are partitioned on the same method and schema, we don't
// need to re-partition
// If both steps are partitioned on a different method or schema, we need
// to re-partition as well.
// If both steps are not partitioned, we don't need to re-partition
//
boolean isThisPartitioned = stepMeta.isPartitioned();
PartitionSchema thisPartitionSchema = null;
if ( isThisPartitioned ) {
thisPartitionSchema = stepMeta.getStepPartitioningMeta().getPartitionSchema();
}
boolean isNextPartitioned = false;
StepPartitioningMeta nextStepPartitioningMeta = null;
PartitionSchema nextPartitionSchema = null;
List<StepMeta> nextSteps = transMeta.findNextSteps( stepMeta );
int nrNext = nextSteps.size();
for ( int p = 0; p < nrNext; p++ ) {
StepMeta nextStep = nextSteps.get( p );
if ( nextStep.isPartitioned() ) {
isNextPartitioned = true;
nextStepPartitioningMeta = nextStep.getStepPartitioningMeta();
nextPartitionSchema = nextStepPartitioningMeta.getPartitionSchema();
}
}
baseStep.setRepartitioning( StepPartitioningMeta.PARTITIONING_METHOD_NONE );
// If the next step is partitioned differently, set re-partitioning, when
// running locally.
//
if ( ( !isThisPartitioned && isNextPartitioned )
|| ( isThisPartitioned && isNextPartitioned && !thisPartitionSchema.equals( nextPartitionSchema ) ) ) {
baseStep.setRepartitioning( nextStepPartitioningMeta.getMethodType() );
}
// For partitioning to a set of remove steps (repartitioning from a master
// to a set or remote output steps)
//
StepPartitioningMeta targetStepPartitioningMeta = baseStep.getStepMeta().getTargetStepPartitioningMeta();
if ( targetStepPartitioningMeta != null ) {
baseStep.setRepartitioning( targetStepPartitioningMeta.getMethodType() );
}
}
preparing = false;
initializing = true;
// Do a topology sort... Over 150 step (copies) things might be slowing down too much.
//
if ( isMonitored() && steps.size() < 150 ) {
doTopologySortOfSteps();
}
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString( PKG, "Trans.Log.InitialisingSteps", String.valueOf( steps.size() ) ) );
}
StepInitThread[] initThreads = new StepInitThread[steps.size()];
Thread[] threads = new Thread[steps.size()];
// Initialize all the threads...
//
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi sid = steps.get( i );
// Do the init code in the background!
// Init all steps at once, but ALL steps need to finish before we can
// continue properly!
//
initThreads[i] = new StepInitThread( sid, log );
// Put it in a separate thread!
//
threads[i] = new Thread( initThreads[i] );
threads[i].setName( "init of " + sid.stepname + "." + sid.copy + " (" + threads[i].getName() + ")" );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeInitialize.id, initThreads[i] );
threads[i].start();
}
for ( int i = 0; i < threads.length; i++ ) {
try {
threads[i].join();
ExtensionPointHandler
.callExtensionPoint( log, KettleExtensionPoint.StepAfterInitialize.id, initThreads[i] );
} catch ( Exception ex ) {
log.logError( "Error with init thread: " + ex.getMessage(), ex.getMessage() );
log.logError( Const.getStackTracker( ex ) );
}
}
initializing = false;
boolean ok = true;
// All step are initialized now: see if there was one that didn't do it
// correctly!
//
for ( int i = 0; i < initThreads.length; i++ ) {
StepMetaDataCombi combi = initThreads[i].getCombi();
if ( !initThreads[i].isOk() ) {
log.logError( BaseMessages
.getString( PKG, "Trans.Log.StepFailedToInit", combi.stepname + "." + combi.copy ) );
combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED );
ok = false;
} else {
combi.data.setStatus( StepExecutionStatus.STATUS_IDLE );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StepInitialized", combi.stepname
+ "." + combi.copy ) );
}
}
}
if ( !ok ) {
// Halt the other threads as well, signal end-of-the line to the outside
// world...
// Also explicitly call dispose() to clean up resources opened during
// init();
//
for ( int i = 0; i < initThreads.length; i++ ) {
StepMetaDataCombi combi = initThreads[i].getCombi();
// Dispose will overwrite the status, but we set it back right after
// this.
combi.step.dispose( combi.meta, combi.data );
if ( initThreads[i].isOk() ) {
combi.data.setStatus( StepExecutionStatus.STATUS_HALTED );
} else {
combi.data.setStatus( StepExecutionStatus.STATUS_STOPPED );
}
}
// Just for safety, fire the trans finished listeners...
try {
fireTransFinishedListeners();
} catch ( KettleException e ) {
//listeners produces errors
log.logError( BaseMessages.getString( PKG, "Trans.FinishListeners.Exception" ) );
//we will not pass this exception up to prepareExecuton() entry point.
} finally {
// Flag the transformation as finished even if exception was thrown
setFinished( true );
}
// Pass along the log during preview. Otherwise it becomes hard to see
// what went wrong.
//
if ( preview ) {
String logText = KettleLogStore.getAppender().getBuffer( getLogChannelId(), true ).toString();
throw new KettleException(
BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) + Const.CR + logText );
} else {
throw new KettleException(
BaseMessages.getString( PKG, "Trans.Log.FailToInitializeAtLeastOneStep" ) + Const.CR );
}
}
log.snap( Metrics.METRIC_TRANSFORMATION_INIT_STOP );
KettleEnvironment.setExecutionInformation( this, repository );
readyToStart = true;
}
@SuppressWarnings( "deprecation" )
private void checkCompatibility() {
// If we don't have a previous result and transMeta does have one, someone has been using a deprecated method.
//
if ( transMeta.getPreviousResult() != null && getPreviousResult() == null ) {
setPreviousResult( transMeta.getPreviousResult() );
}
// If we don't have arguments set and TransMeta has, someone has been using a deprecated method.
//
if ( transMeta.getArguments() != null && getArguments() == null ) {
setArguments( transMeta.getArguments() );
}
}
/**
* Starts the threads prepared by prepareThreads(). Before you start the threads, you can add RowListeners to them.
*
* @throws KettleException
* if there is a communication error with a remote output socket.
*/
public void startThreads() throws KettleException {
// Now prepare to start all the threads...
//
nrOfFinishedSteps = 0;
nrOfActiveSteps = 0;
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStartThreads.id, this );
fireTransStartedListeners();
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi sid = steps.get( i );
sid.step.markStart();
sid.step.initBeforeStart();
// also attach a Step Listener to detect when we're done...
//
StepListener stepListener = new StepListener() {
public void stepActive( Trans trans, StepMeta stepMeta, StepInterface step ) {
nrOfActiveSteps++;
if ( nrOfActiveSteps == 1 ) {
// Transformation goes from in-active to active...
// PDI-5229 sync added
synchronized ( transListeners ) {
for ( TransListener listener : transListeners ) {
listener.transActive( Trans.this );
}
}
}
}
public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) {
synchronized ( Trans.this ) {
nrOfFinishedSteps++;
if ( nrOfFinishedSteps >= steps.size() ) {
// Set the finished flag
//
setFinished( true );
// Grab the performance statistics one last time (if enabled)
//
addStepPerformanceSnapShot();
try {
fireTransFinishedListeners();
} catch ( Exception e ) {
step.setErrors( step.getErrors() + 1L );
log.logError( getName()
+ " : " + BaseMessages.getString( PKG, "Trans.Log.UnexpectedErrorAtTransformationEnd" ), e );
}
}
// If a step fails with an error, we want to kill/stop the others
// too...
//
if ( step.getErrors() > 0 ) {
log.logMinimal( BaseMessages.getString( PKG, "Trans.Log.TransformationDetectedErrors" ) );
log.logMinimal( BaseMessages.getString(
PKG, "Trans.Log.TransformationIsKillingTheOtherSteps" ) );
killAllNoWait();
}
}
}
};
// Make sure this is called first!
//
if ( sid.step instanceof BaseStep ) {
( (BaseStep) sid.step ).getStepListeners().add( 0, stepListener );
} else {
sid.step.addStepListener( stepListener );
}
}
if ( transMeta.isCapturingStepPerformanceSnapShots() ) {
stepPerformanceSnapshotSeqNr = new AtomicInteger( 0 );
stepPerformanceSnapShots = new ConcurrentHashMap<String, List<StepPerformanceSnapShot>>();
// Calculate the maximum number of snapshots to be kept in memory
//
String limitString = environmentSubstitute( transMeta.getStepPerformanceCapturingSizeLimit() );
if ( Const.isEmpty( limitString ) ) {
limitString = EnvUtil.getSystemProperty( Const.KETTLE_STEP_PERFORMANCE_SNAPSHOT_LIMIT );
}
stepPerformanceSnapshotSizeLimit = Const.toInt( limitString, 0 );
// Set a timer to collect the performance data from the running threads...
//
stepPerformanceSnapShotTimer = new Timer( "stepPerformanceSnapShot Timer: " + transMeta.getName() );
TimerTask timerTask = new TimerTask() {
public void run() {
if ( !isFinished() ) {
addStepPerformanceSnapShot();
}
}
};
stepPerformanceSnapShotTimer.schedule( timerTask, 100, transMeta.getStepPerformanceCapturingDelay() );
}
// Now start a thread to monitor the running transformation...
//
setFinished( false );
paused.set( false );
stopped.set( false );
transFinishedBlockingQueue = new ArrayBlockingQueue<Object>( 10 );
TransListener transListener = new TransAdapter() {
public void transFinished( Trans trans ) {
try {
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationFinish.id, trans );
} catch ( KettleException e ) {
throw new RuntimeException( "Error calling extension point at end of transformation", e );
}
// First of all, stop the performance snapshot timer if there is is
// one...
//
if ( transMeta.isCapturingStepPerformanceSnapShots() && stepPerformanceSnapShotTimer != null ) {
stepPerformanceSnapShotTimer.cancel();
}
setFinished( true );
running = false; // no longer running
log.snap( Metrics.METRIC_TRANSFORMATION_EXECUTION_STOP );
// If the user ran with metrics gathering enabled and a metrics logging table is configured, add another
// listener...
//
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
if ( metricsLogTable.isDefined() ) {
try {
writeMetricsInformation();
} catch ( Exception e ) {
log.logError( "Error writing metrics information", e );
errors.incrementAndGet();
}
}
// Close the unique connections when running database transactionally.
// This will commit or roll back the transaction based on the result of this transformation.
//
if ( transMeta.isUsingUniqueConnections() ) {
trans.closeUniqueDatabaseConnections( getResult() );
}
}
};
// This should always be done first so that the other listeners achieve a clean state to start from (setFinished and
// so on)
//
transListeners.add( 0, transListener );
running = true;
switch ( transMeta.getTransformationType() ) {
case Normal:
// Now start all the threads...
//
for ( int i = 0; i < steps.size(); i++ ) {
final StepMetaDataCombi combi = steps.get( i );
RunThread runThread = new RunThread( combi );
Thread thread = new Thread( runThread );
thread.setName( getName() + " - " + combi.stepname );
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepBeforeStart.id, combi );
// Call an extension point at the end of the step
//
combi.step.addStepListener( new StepAdapter() {
@Override
public void stepFinished( Trans trans, StepMeta stepMeta, StepInterface step ) {
try {
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.StepFinished.id, combi );
} catch ( KettleException e ) {
throw new RuntimeException( "Unexpected error in calling extension point upon step finish", e );
}
}
} );
thread.start();
}
break;
case SerialSingleThreaded:
new Thread( new Runnable() {
public void run() {
try {
// Always disable thread priority management, it will always slow us
// down...
//
for ( StepMetaDataCombi combi : steps ) {
combi.step.setUsingThreadPriorityManagment( false );
}
//
// This is a single threaded version...
//
// Sort the steps from start to finish...
//
Collections.sort( steps, new Comparator<StepMetaDataCombi>() {
public int compare( StepMetaDataCombi c1, StepMetaDataCombi c2 ) {
boolean c1BeforeC2 = transMeta.findPrevious( c2.stepMeta, c1.stepMeta );
if ( c1BeforeC2 ) {
return -1;
} else {
return 1;
}
}
} );
boolean[] stepDone = new boolean[steps.size()];
int nrDone = 0;
while ( nrDone < steps.size() && !isStopped() ) {
for ( int i = 0; i < steps.size() && !isStopped(); i++ ) {
StepMetaDataCombi combi = steps.get( i );
if ( !stepDone[i] ) {
// if (combi.step.canProcessOneRow() ||
// !combi.step.isRunning()) {
boolean cont = combi.step.processRow( combi.meta, combi.data );
if ( !cont ) {
stepDone[i] = true;
nrDone++;
}
// }
}
}
}
} catch ( Exception e ) {
errors.addAndGet( 1 );
log.logError( "Error executing single threaded", e );
} finally {
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi combi = steps.get( i );
combi.step.dispose( combi.meta, combi.data );
combi.step.markStop();
}
}
}
} ).start();
break;
case SingleThreaded:
// Don't do anything, this needs to be handled by the transformation
// executor!
//
break;
default:
break;
}
ExtensionPointHandler.callExtensionPoint( log, KettleExtensionPoint.TransformationStart.id, this );
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString(
PKG,
"Trans.Log.TransformationHasAllocated", String.valueOf( steps.size() ), String
.valueOf( rowsets.size() ) ) );
}
}
/**
* Make attempt to fire all registered listeners if possible.
*
* @throws KettleException
* if any errors occur during notification
*/
protected void fireTransFinishedListeners() throws KettleException {
// PDI-5229 sync added
synchronized ( transListeners ) {
if ( transListeners.size() == 0 ) {
return;
}
//prevent Exception from one listener to block others execution
List<KettleException> badGuys = new ArrayList<KettleException>( transListeners.size() );
for ( TransListener transListener : transListeners ) {
try {
transListener.transFinished( this );
} catch ( KettleException e ) {
badGuys.add( e );
}
}
// Signal for the the waitUntilFinished blocker...
transFinishedBlockingQueue.add( new Object() );
if ( !badGuys.isEmpty() ) {
//FIFO
throw new KettleException( badGuys.get( 0 ) );
}
}
}
/**
* Fires the start-event listeners (if any are registered).
*
* @throws KettleException
* if any errors occur during notification
*/
protected void fireTransStartedListeners() throws KettleException {
// PDI-5229 sync added
synchronized ( transListeners ) {
for ( TransListener transListener : transListeners ) {
transListener.transStarted( this );
}
}
}
/**
* Adds a step performance snapshot.
*/
protected void addStepPerformanceSnapShot() {
if ( stepPerformanceSnapShots == null ) {
return; // Race condition somewhere?
}
boolean pausedAndNotEmpty = isPaused() && !stepPerformanceSnapShots.isEmpty();
boolean stoppedAndNotEmpty = isStopped() && !stepPerformanceSnapShots.isEmpty();
if ( transMeta.isCapturingStepPerformanceSnapShots() && !pausedAndNotEmpty && !stoppedAndNotEmpty ) {
// get the statistics from the steps and keep them...
//
int seqNr = stepPerformanceSnapshotSeqNr.incrementAndGet();
for ( int i = 0; i < steps.size(); i++ ) {
StepMeta stepMeta = steps.get( i ).stepMeta;
StepInterface step = steps.get( i ).step;
StepPerformanceSnapShot snapShot =
new StepPerformanceSnapShot( seqNr, getBatchId(), new Date(), getName(), stepMeta.getName(), step
.getCopy(), step.getLinesRead(), step.getLinesWritten(), step.getLinesInput(), step
.getLinesOutput(), step.getLinesUpdated(), step.getLinesRejected(), step.getErrors() );
List<StepPerformanceSnapShot> snapShotList = stepPerformanceSnapShots.get( step.toString() );
StepPerformanceSnapShot previous;
if ( snapShotList == null ) {
snapShotList = new ArrayList<StepPerformanceSnapShot>();
stepPerformanceSnapShots.put( step.toString(), snapShotList );
previous = null;
} else {
previous = snapShotList.get( snapShotList.size() - 1 ); // the last one...
}
// Make the difference...
//
snapShot.diff( previous, step.rowsetInputSize(), step.rowsetOutputSize() );
synchronized ( stepPerformanceSnapShots ) {
snapShotList.add( snapShot );
if ( stepPerformanceSnapshotSizeLimit > 0 && snapShotList.size() > stepPerformanceSnapshotSizeLimit ) {
snapShotList.remove( 0 );
}
}
}
lastStepPerformanceSnapshotSeqNrAdded = stepPerformanceSnapshotSeqNr.get();
}
}
/**
* This method performs any cleanup operations, typically called after the transformation has finished. Specifically,
* after ALL the slave transformations in a clustered run have finished.
*/
public void cleanup() {
// Close all open server sockets.
// We can only close these after all processing has been confirmed to be finished.
//
if ( steps == null ) {
return;
}
for ( StepMetaDataCombi combi : steps ) {
combi.step.cleanup();
}
}
/**
* Logs a summary message for the specified step.
*
* @param si
* the step interface
*/
public void logSummary( StepInterface si ) {
log
.logBasic(
si.getStepname(),
BaseMessages
.getString(
PKG,
"Trans.Log.FinishedProcessing", String.valueOf( si.getLinesInput() ), String.valueOf( si
.getLinesOutput() ), String.valueOf( si.getLinesRead() ) )
+ BaseMessages.getString(
PKG, "Trans.Log.FinishedProcessing2", String.valueOf( si.getLinesWritten() ), String.valueOf( si
.getLinesUpdated() ), String.valueOf( si.getErrors() ) ) );
}
/**
* Waits until all RunThreads have finished.
*/
public void waitUntilFinished() {
try {
if ( transFinishedBlockingQueue == null ) {
return;
}
boolean wait = true;
while ( wait ) {
wait = transFinishedBlockingQueue.poll( 1, TimeUnit.DAYS ) == null;
}
} catch ( InterruptedException e ) {
throw new RuntimeException( "Waiting for transformation to be finished interrupted!", e );
}
}
/**
* Gets the number of errors that have occurred during execution of the transformation.
*
* @return the number of errors
*/
public int getErrors() {
int nrErrors = errors.get();
if ( steps == null ) {
return nrErrors;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.step.getErrors() != 0L ) {
nrErrors += sid.step.getErrors();
}
}
if ( nrErrors > 0 ) {
log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrorsDetected" ) );
}
return nrErrors;
}
/**
* Gets the number of steps in the transformation that are in an end state, such as Finished, Halted, or Stopped.
*
* @return the number of ended steps
*/
public int getEnded() {
int nrEnded = 0;
if ( steps == null ) {
return 0;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepDataInterface data = sid.data;
if ( ( sid.step != null && !sid.step.isRunning() ) || // Should normally not be needed anymore, status is kept in
// data.
data.getStatus() == StepExecutionStatus.STATUS_FINISHED || // Finished processing
data.getStatus() == StepExecutionStatus.STATUS_HALTED || // Not launching because of init error
data.getStatus() == StepExecutionStatus.STATUS_STOPPED // Stopped because of an error
) {
nrEnded++;
}
}
return nrEnded;
}
/**
* Checks if the transformation is finished\.
*
* @return true if the transformation is finished, false otherwise
*/
public boolean isFinished() {
return finished.get();
}
private void setFinished( boolean newValue ) {
finished.set( newValue );
}
public boolean isFinishedOrStopped() {
return isFinished() || isStopped();
}
/**
* Attempts to stops all running steps and subtransformations. If all steps have finished, the transformation is
* marked as Finished.
*/
public void killAll() {
if ( steps == null ) {
return;
}
int nrStepsFinished = 0;
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + sid.step.getStepname() );
}
// If thr is a mapping, this is cause for an endless loop
//
while ( sid.step.isRunning() ) {
sid.step.stopAll();
try {
Thread.sleep( 20 );
} catch ( Exception e ) {
log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() );
return;
}
}
if ( !sid.step.isRunning() ) {
nrStepsFinished++;
}
}
if ( nrStepsFinished == steps.size() ) {
setFinished( true );
}
}
/**
* Asks all steps to stop but doesn't wait around for it to happen. This is a special method for use with mappings.
*/
private void killAllNoWait() {
if ( steps == null ) {
return;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
if ( log.isDebug() ) {
log.logDebug( BaseMessages.getString( PKG, "Trans.Log.LookingAtStep" ) + step.getStepname() );
}
step.stopAll();
try {
Thread.sleep( 20 );
} catch ( Exception e ) {
log.logError( BaseMessages.getString( PKG, "Trans.Log.TransformationErrors" ) + e.toString() );
return;
}
}
}
/**
* Logs the execution statistics for the transformation for the specified time interval. If the total length of
* execution is supplied as the interval, then the statistics represent the average throughput (lines
* read/written/updated/rejected/etc. per second) for the entire execution.
*
* @param seconds
* the time interval (in seconds)
*/
public void printStats( int seconds ) {
log.logBasic( " " );
if ( steps == null ) {
return;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
long proc = step.getProcessed();
if ( seconds != 0 ) {
if ( step.getErrors() == 0 ) {
log
.logBasic( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step.getCopy(), String
.valueOf( proc ), String.valueOf( ( proc / seconds ) ) ) );
} else {
log
.logError( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessErrorInfo", step.getStepname(), "." + step.getCopy(), String.valueOf( step
.getErrors() ), String.valueOf( proc ), String.valueOf( proc / seconds ) ) );
}
} else {
if ( step.getErrors() == 0 ) {
log
.logBasic( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessSuccessfullyInfo", step.getStepname(), "." + step.getCopy(), String
.valueOf( proc ), seconds != 0 ? String.valueOf( ( proc / seconds ) ) : "-" ) );
} else {
log
.logError( BaseMessages
.getString(
PKG,
"Trans.Log.ProcessErrorInfo2", step.getStepname(), "." + step.getCopy(), String.valueOf( step
.getErrors() ), String.valueOf( proc ), String.valueOf( seconds ) ) );
}
}
}
}
/**
* Gets a representable metric of the "processed" lines of the last step.
*
* @return the number of lines processed by the last step
*/
public long getLastProcessed() {
if ( steps == null || steps.size() == 0 ) {
return 0L;
}
StepMetaDataCombi sid = steps.get( steps.size() - 1 );
return sid.step.getProcessed();
}
/**
* Finds the RowSet with the specified name.
*
* @param rowsetname
* the rowsetname
* @return the row set, or null if none found
*/
public RowSet findRowSet( String rowsetname ) {
// Start with the transformation.
for ( int i = 0; i < rowsets.size(); i++ ) {
// log.logDetailed("DIS: looking for RowSet ["+rowsetname+"] in nr "+i+" of "+threads.size()+" threads...");
RowSet rs = rowsets.get( i );
if ( rs.getName().equalsIgnoreCase( rowsetname ) ) {
return rs;
}
}
return null;
}
/**
* Finds the RowSet between two steps (or copies of steps).
*
* @param from
* the name of the "from" step
* @param fromcopy
* the copy number of the "from" step
* @param to
* the name of the "to" step
* @param tocopy
* the copy number of the "to" step
* @return the row set, or null if none found
*/
public RowSet findRowSet( String from, int fromcopy, String to, int tocopy ) {
// Start with the transformation.
for ( int i = 0; i < rowsets.size(); i++ ) {
RowSet rs = rowsets.get( i );
if ( rs.getOriginStepName().equalsIgnoreCase( from )
&& rs.getDestinationStepName().equalsIgnoreCase( to ) && rs.getOriginStepCopy() == fromcopy
&& rs.getDestinationStepCopy() == tocopy ) {
return rs;
}
}
return null;
}
/**
* Checks whether the specified step (or step copy) has started.
*
* @param sname
* the step name
* @param copy
* the copy number
* @return true the specified step (or step copy) has started, false otherwise
*/
public boolean hasStepStarted( String sname, int copy ) {
// log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!");
// log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads");
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
boolean started = ( sid.stepname != null && sid.stepname.equalsIgnoreCase( sname ) ) && sid.copy == copy;
if ( started ) {
return true;
}
}
return false;
}
/**
* Stops all steps from running, and alerts any registered listeners.
*/
public void stopAll() {
if ( steps == null ) {
return;
}
// log.logDetailed("DIS: Checking wether of not ["+sname+"]."+cnr+" has started!");
// log.logDetailed("DIS: hasStepStarted() looking in "+threads.size()+" threads");
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface rt = sid.step;
rt.setStopped( true );
rt.resumeRunning();
// Cancel queries etc. by force...
StepInterface si = rt;
try {
si.stopRunning( sid.meta, sid.data );
} catch ( Exception e ) {
log.logError( "Something went wrong while trying to stop the transformation: " + e.toString() );
log.logError( Const.getStackTracker( e ) );
}
sid.data.setStatus( StepExecutionStatus.STATUS_STOPPED );
}
// if it is stopped it is not paused
paused.set( false );
stopped.set( true );
// Fire the stopped listener...
//
synchronized ( transStoppedListeners ) {
for ( TransStoppedListener listener : transStoppedListeners ) {
listener.transStopped( this );
}
}
}
/**
* Gets the number of steps in this transformation.
*
* @return the number of steps
*/
public int nrSteps() {
if ( steps == null ) {
return 0;
}
return steps.size();
}
/**
* Gets the number of active (i.e. not finished) steps in this transformation
*
* @return the number of active steps
*/
public int nrActiveSteps() {
if ( steps == null ) {
return 0;
}
int nr = 0;
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
// without also considering a step status of not finished,
// the step execution results grid shows empty while
// the transformation has steps still running.
// if ( sid.step.isRunning() ) nr++;
if ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED ) {
nr++;
}
}
return nr;
}
/**
* Checks whether the transformation steps are running lookup.
*
* @return a boolean array associated with the step list, indicating whether that step is running a lookup.
*/
public boolean[] getTransStepIsRunningLookup() {
if ( steps == null ) {
return null;
}
boolean[] tResult = new boolean[steps.size()];
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
tResult[i] = ( sid.step.isRunning() || sid.step.getStatus() != StepExecutionStatus.STATUS_FINISHED );
}
return tResult;
}
/**
* Checks the execution status of each step in the transformations.
*
* @return an array associated with the step list, indicating the status of that step.
*/
public StepExecutionStatus[] getTransStepExecutionStatusLookup() {
if ( steps == null ) {
return null;
}
// we need this snapshot for the TransGridDelegate refresh method to handle the
// difference between a timed refresh and continual step status updates
int totalSteps = steps.size();
StepExecutionStatus[] tList = new StepExecutionStatus[totalSteps];
for ( int i = 0; i < totalSteps; i++ ) {
StepMetaDataCombi sid = steps.get( i );
tList[i] = sid.step.getStatus();
}
return tList;
}
/**
* Gets the run thread for the step at the specified index.
*
* @param i
* the index of the desired step
* @return a StepInterface object corresponding to the run thread for the specified step
*/
public StepInterface getRunThread( int i ) {
if ( steps == null ) {
return null;
}
return steps.get( i ).step;
}
/**
* Gets the run thread for the step with the specified name and copy number.
*
* @param name
* the step name
* @param copy
* the copy number
* @return a StepInterface object corresponding to the run thread for the specified step
*/
public StepInterface getRunThread( String name, int copy ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
if ( step.getStepname().equalsIgnoreCase( name ) && step.getCopy() == copy ) {
return step;
}
}
return null;
}
/**
* Calculate the batch id and date range for the transformation.
*
* @throws KettleTransException
* if there are any errors during calculation
*/
public void calculateBatchIdAndDateRange() throws KettleTransException {
TransLogTable transLogTable = transMeta.getTransLogTable();
currentDate = new Date();
logDate = new Date();
startDate = Const.MIN_DATE;
endDate = currentDate;
DatabaseMeta logConnection = transLogTable.getDatabaseMeta();
String logTable = environmentSubstitute( transLogTable.getActualTableName() );
String logSchema = environmentSubstitute( transLogTable.getActualSchemaName() );
try {
if ( logConnection != null ) {
String logSchemaAndTable = logConnection.getQuotedSchemaTableCombination( logSchema, logTable );
if ( Const.isEmpty( logTable ) ) {
// It doesn't make sense to start database logging without a table
// to log to.
throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.NoLogTableDefined" ) );
}
if ( Const.isEmpty( transMeta.getName() ) && logConnection != null && logTable != null ) {
throw new KettleException( BaseMessages
.getString( PKG, "Trans.Exception.NoTransnameAvailableForLogging" ) );
}
transLogTableDatabaseConnection = new Database( this, logConnection );
transLogTableDatabaseConnection.shareVariablesWith( this );
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningLogConnection", "" + logConnection ) );
}
transLogTableDatabaseConnection.connect();
transLogTableDatabaseConnection.setCommit( logCommitSize );
// See if we have to add a batch id...
// Do this first, before anything else to lock the complete table exclusively
//
if ( transLogTable.isBatchIdUsed() ) {
Long id_batch =
logConnection.getNextBatchId( transLogTableDatabaseConnection, logSchema, logTable, transLogTable
.getKeyField().getFieldName() );
setBatchId( id_batch.longValue() );
}
//
// Get the date range from the logging table: from the last end_date to now. (currentDate)
//
Object[] lastr =
transLogTableDatabaseConnection.getLastLogDate(
logSchemaAndTable, transMeta.getName(), false, LogStatus.END );
if ( lastr != null && lastr.length > 0 ) {
startDate = (Date) lastr[0];
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.StartDateFound" ) + startDate );
}
}
//
// OK, we have a date-range.
// However, perhaps we need to look at a table before we make a final judgment?
//
if ( transMeta.getMaxDateConnection() != null
&& transMeta.getMaxDateTable() != null && transMeta.getMaxDateTable().length() > 0
&& transMeta.getMaxDateField() != null && transMeta.getMaxDateField().length() > 0 ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LookingForMaxdateConnection", ""
+ transMeta.getMaxDateConnection() ) );
}
DatabaseMeta maxcon = transMeta.getMaxDateConnection();
if ( maxcon != null ) {
Database maxdb = new Database( this, maxcon );
maxdb.shareVariablesWith( this );
try {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.OpeningMaximumDateConnection" ) );
}
maxdb.connect();
maxdb.setCommit( logCommitSize );
//
// Determine the endDate by looking at a field in a table...
//
String sql = "SELECT MAX(" + transMeta.getMaxDateField() + ") FROM " + transMeta.getMaxDateTable();
RowMetaAndData r1 = maxdb.getOneRow( sql );
if ( r1 != null ) {
// OK, we have a value, what's the offset?
Date maxvalue = r1.getRowMeta().getDate( r1.getData(), 0 );
if ( maxvalue != null ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.LastDateFoundOnTheMaxdateConnection" )
+ r1 );
}
endDate.setTime( (long) ( maxvalue.getTime() + ( transMeta.getMaxDateOffset() * 1000 ) ) );
}
} else {
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString( PKG, "Trans.Log.NoLastDateFoundOnTheMaxdateConnection" ) );
}
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorConnectingToDatabase", "" + transMeta.getMaxDateConnection() ), e );
} finally {
maxdb.disconnect();
}
} else {
throw new KettleTransException( BaseMessages
.getString( PKG, "Trans.Exception.MaximumDateConnectionCouldNotBeFound", ""
+ transMeta.getMaxDateConnection() ) );
}
}
// Determine the last date of all dependend tables...
// Get the maximum in depdate...
if ( transMeta.nrDependencies() > 0 ) {
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.CheckingForMaxDependencyDate" ) );
}
//
// Maybe one of the tables where this transformation is dependent on has changed?
// If so we need to change the start-date!
//
depDate = Const.MIN_DATE;
Date maxdepdate = Const.MIN_DATE;
if ( lastr != null && lastr.length > 0 ) {
Date dep = (Date) lastr[1]; // #1: last depdate
if ( dep != null ) {
maxdepdate = dep;
depDate = dep;
}
}
for ( int i = 0; i < transMeta.nrDependencies(); i++ ) {
TransDependency td = transMeta.getDependency( i );
DatabaseMeta depcon = td.getDatabase();
if ( depcon != null ) {
Database depdb = new Database( this, depcon );
try {
depdb.connect();
depdb.setCommit( logCommitSize );
String sql = "SELECT MAX(" + td.getFieldname() + ") FROM " + td.getTablename();
RowMetaAndData r1 = depdb.getOneRow( sql );
if ( r1 != null ) {
// OK, we have a row, get the result!
Date maxvalue = (Date) r1.getData()[0];
if ( maxvalue != null ) {
if ( log.isDetailed() ) {
log
.logDetailed( BaseMessages
.getString(
PKG,
"Trans.Log.FoundDateFromTable", td.getTablename(), "." + td.getFieldname(), " = "
+ maxvalue.toString() ) );
}
if ( maxvalue.getTime() > maxdepdate.getTime() ) {
maxdepdate = maxvalue;
}
} else {
throw new KettleTransException(
BaseMessages
.getString(
PKG,
"Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td
.getTablename()
+ ".", td.getFieldname() ) );
}
} else {
throw new KettleTransException(
BaseMessages
.getString(
PKG,
"Trans.Exception.UnableToGetDependencyInfoFromDB", td.getDatabase().getName() + ".", td
.getTablename()
+ ".", td.getFieldname() ) );
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString( PKG, "Trans.Exception.ErrorInDatabase", ""
+ td.getDatabase() ), e );
} finally {
depdb.disconnect();
}
} else {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ConnectionCouldNotBeFound", "" + td.getDatabase() ) );
}
if ( log.isDetailed() ) {
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.Maxdepdate" )
+ ( XMLHandler.date2string( maxdepdate ) ) );
}
}
// OK, so we now have the maximum depdate;
// If it is larger, it means we have to read everything back in again.
// Maybe something has changed that we need!
//
if ( maxdepdate.getTime() > depDate.getTime() ) {
depDate = maxdepdate;
startDate = Const.MIN_DATE;
}
} else {
depDate = currentDate;
}
}
// OK, now we have a date-range. See if we need to set a maximum!
if ( transMeta.getMaxDateDifference() > 0.0 && // Do we have a difference specified?
startDate.getTime() > Const.MIN_DATE.getTime() // Is the startdate > Minimum?
) {
// See if the end-date is larger then Start_date + DIFF?
Date maxdesired = new Date( startDate.getTime() + ( (long) transMeta.getMaxDateDifference() * 1000 ) );
// If this is the case: lower the end-date. Pick up the next 'region' next time around.
// We do this to limit the workload in a single update session (e.g. for large fact tables)
//
if ( endDate.compareTo( maxdesired ) > 0 ) {
endDate = maxdesired;
}
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorCalculatingDateRange", logTable ), e );
}
// Be careful, We DO NOT close the trans log table database connection!!!
// It's closed later in beginProcessing() to prevent excessive connect/disconnect repetitions.
}
/**
* Begin processing. Also handle logging operations related to the start of the transformation
*
* @throws KettleTransException
* the kettle trans exception
*/
public void beginProcessing() throws KettleTransException {
TransLogTable transLogTable = transMeta.getTransLogTable();
int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 );
try {
String logTable = transLogTable.getActualTableName();
SimpleDateFormat df = new SimpleDateFormat( REPLAY_DATE_FORMAT );
log.logDetailed( BaseMessages.getString( PKG, "Trans.Log.TransformationCanBeReplayed" )
+ df.format( currentDate ) );
try {
if ( transLogTableDatabaseConnection != null
&& !Const.isEmpty( logTable ) && !Const.isEmpty( transMeta.getName() ) ) {
transLogTableDatabaseConnection.writeLogRecord( transLogTable, LogStatus.START, this, null );
// Pass in a commit to release transaction locks and to allow a user to actually see the log record.
//
if ( !transLogTableDatabaseConnection.isAutoCommit() ) {
transLogTableDatabaseConnection.commitLog( true, transLogTable );
}
// If we need to do periodic logging, make sure to install a timer for this...
//
if ( intervalInSeconds > 0 ) {
final Timer timer = new Timer( getName() + " - interval logging timer" );
TimerTask timerTask = new TimerTask() {
public void run() {
try {
endProcessing();
} catch ( Exception e ) {
log
.logError(
BaseMessages.getString( PKG, "Trans.Exception.UnableToPerformIntervalLogging" ), e );
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule( timerTask, intervalInSeconds * 1000, intervalInSeconds * 1000 );
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) {
timer.cancel();
}
} );
}
// Add a listener to make sure that the last record is also written when transformation finishes...
//
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) throws KettleException {
try {
endProcessing();
lastWrittenStepPerformanceSequenceNr =
writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.END );
} catch ( KettleException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e );
}
}
} );
}
// If we need to write out the step logging information, do so at the end of the transformation too...
//
StepLogTable stepLogTable = transMeta.getStepLogTable();
if ( stepLogTable.isDefined() ) {
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) throws KettleException {
try {
writeStepLogInformation();
} catch ( KettleException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e );
}
}
} );
}
// If we need to write the log channel hierarchy and lineage information, add a listener for that too...
//
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
if ( channelLogTable.isDefined() ) {
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) throws KettleException {
try {
writeLogChannelInformation();
} catch ( KettleException e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformLoggingAtTransEnd" ), e );
}
}
} );
}
// See if we need to write the step performance records at intervals too...
//
PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
int perfLogInterval = Const.toInt( environmentSubstitute( performanceLogTable.getLogInterval() ), -1 );
if ( performanceLogTable.isDefined() && perfLogInterval > 0 ) {
final Timer timer = new Timer( getName() + " - step performance log interval timer" );
TimerTask timerTask = new TimerTask() {
public void run() {
try {
lastWrittenStepPerformanceSequenceNr =
writeStepPerformanceLogRecords( lastWrittenStepPerformanceSequenceNr, LogStatus.RUNNING );
} catch ( Exception e ) {
log.logError( BaseMessages.getString(
PKG, "Trans.Exception.UnableToPerformIntervalPerformanceLogging" ), e );
// Also stop the show...
//
errors.incrementAndGet();
stopAll();
}
}
};
timer.schedule( timerTask, perfLogInterval * 1000, perfLogInterval * 1000 );
addTransListener( new TransAdapter() {
public void transFinished( Trans trans ) {
timer.cancel();
}
} );
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorWritingLogRecordToTable", logTable ), e );
} finally {
// If we use interval logging, we keep the connection open for performance reasons...
//
if ( transLogTableDatabaseConnection != null && ( intervalInSeconds <= 0 ) ) {
transLogTableDatabaseConnection.disconnect();
transLogTableDatabaseConnection = null;
}
}
} catch ( KettleException e ) {
throw new KettleTransException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToBeginProcessingTransformation" ), e );
}
}
/**
* Writes log channel information to a channel logging table (if one has been configured).
*
* @throws KettleException
* if any errors occur during logging
*/
protected void writeLogChannelInformation() throws KettleException {
Database db = null;
ChannelLogTable channelLogTable = transMeta.getChannelLogTable();
// PDI-7070: If parent trans or job has the same channel logging info, don't duplicate log entries
Trans t = getParentTrans();
if ( t != null ) {
if ( channelLogTable.equals( t.getTransMeta().getChannelLogTable() ) ) {
return;
}
}
Job j = getParentJob();
if ( j != null ) {
if ( channelLogTable.equals( j.getJobMeta().getChannelLogTable() ) ) {
return;
}
}
// end PDI-7070
try {
db = new Database( this, channelLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
List<LoggingHierarchy> loggingHierarchyList = getLoggingHierarchy();
for ( LoggingHierarchy loggingHierarchy : loggingHierarchyList ) {
db.writeLogRecord( channelLogTable, LogStatus.START, loggingHierarchy, null );
}
// Also time-out the log records in here...
//
db.cleanupLogRecords( channelLogTable );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToWriteLogChannelInformationToLogTable" ), e );
} finally {
if ( !db.isAutoCommit() ) {
db.commit( true );
}
db.disconnect();
}
}
/**
* Writes step information to a step logging table (if one has been configured).
*
* @throws KettleException
* if any errors occur during logging
*/
protected void writeStepLogInformation() throws KettleException {
Database db = null;
StepLogTable stepLogTable = transMeta.getStepLogTable();
try {
db = new Database( this, stepLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
for ( StepMetaDataCombi combi : steps ) {
db.writeLogRecord( stepLogTable, LogStatus.START, combi, null );
}
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToWriteStepInformationToLogTable" ), e );
} finally {
if ( !db.isAutoCommit() ) {
db.commit( true );
}
db.disconnect();
}
}
protected synchronized void writeMetricsInformation() throws KettleException {
//
List<MetricsDuration> metricsList =
MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_REGISTER_EXTENSIONS_START );
if ( !metricsList.isEmpty() ) {
System.out.println( metricsList.get( 0 ) );
}
metricsList =
MetricsUtil.getDuration( log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_REGISTRATION_START );
if ( !metricsList.isEmpty() ) {
System.out.println( metricsList.get( 0 ) );
}
long total = 0;
metricsList =
MetricsUtil.getDuration(
log.getLogChannelId(), Metrics.METRIC_PLUGIN_REGISTRY_PLUGIN_TYPE_REGISTRATION_START );
if ( metricsList != null ) {
for ( MetricsDuration duration : metricsList ) {
total += duration.getDuration();
System.out.println( " - " + duration.toString() + " Total=" + total );
}
}
Database db = null;
MetricsLogTable metricsLogTable = transMeta.getMetricsLogTable();
try {
db = new Database( this, metricsLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
List<String> logChannelIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() );
for ( String logChannelId : logChannelIds ) {
Deque<MetricsSnapshotInterface> snapshotList =
MetricsRegistry.getInstance().getSnapshotLists().get( logChannelId );
if ( snapshotList != null ) {
Iterator<MetricsSnapshotInterface> iterator = snapshotList.iterator();
while ( iterator.hasNext() ) {
MetricsSnapshotInterface snapshot = iterator.next();
db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null );
}
}
Map<String, MetricsSnapshotInterface> snapshotMap =
MetricsRegistry.getInstance().getSnapshotMaps().get( logChannelId );
if ( snapshotMap != null ) {
synchronized ( snapshotMap ) {
Iterator<MetricsSnapshotInterface> iterator = snapshotMap.values().iterator();
while ( iterator.hasNext() ) {
MetricsSnapshotInterface snapshot = iterator.next();
db.writeLogRecord( metricsLogTable, LogStatus.START, new LoggingMetric( batchId, snapshot ), null );
}
}
}
}
// Also time-out the log records in here...
//
db.cleanupLogRecords( metricsLogTable );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.UnableToWriteMetricsInformationToLogTable" ), e );
} finally {
if ( !db.isAutoCommit() ) {
db.commit( true );
}
db.disconnect();
}
}
/**
* Gets the result of the transformation. The Result object contains such measures as the number of errors, number of
* lines read/written/input/output/updated/rejected, etc.
*
* @return the Result object containing resulting measures from execution of the transformation
*/
public Result getResult() {
if ( steps == null ) {
return null;
}
Result result = new Result();
result.setNrErrors( errors.longValue() );
result.setResult( errors.longValue() == 0 );
TransLogTable transLogTable = transMeta.getTransLogTable();
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
result.setNrErrors( result.getNrErrors() + sid.step.getErrors() );
result.getResultFiles().putAll( step.getResultFiles() );
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_READ ) ) ) {
result.setNrLinesRead( result.getNrLinesRead() + step.getLinesRead() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_INPUT ) ) ) {
result.setNrLinesInput( result.getNrLinesInput() + step.getLinesInput() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_WRITTEN ) ) ) {
result.setNrLinesWritten( result.getNrLinesWritten() + step.getLinesWritten() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_OUTPUT ) ) ) {
result.setNrLinesOutput( result.getNrLinesOutput() + step.getLinesOutput() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_UPDATED ) ) ) {
result.setNrLinesUpdated( result.getNrLinesUpdated() + step.getLinesUpdated() );
}
if ( step.getStepname().equals( transLogTable.getSubjectString( TransLogTable.ID.LINES_REJECTED ) ) ) {
result.setNrLinesRejected( result.getNrLinesRejected() + step.getLinesRejected() );
}
}
result.setRows( resultRows );
if ( !Const.isEmpty( resultFiles ) ) {
result.setResultFiles( new HashMap<String, ResultFile>() );
for ( ResultFile resultFile : resultFiles ) {
result.getResultFiles().put( resultFile.toString(), resultFile );
}
}
result.setStopped( isStopped() );
result.setLogChannelId( log.getLogChannelId() );
return result;
}
/**
* End processing. Also handle any logging operations associated with the end of a transformation
*
* @return true if all end processing is successful, false otherwise
* @throws KettleException
* if any errors occur during processing
*/
private synchronized boolean endProcessing() throws KettleException {
LogStatus status;
if ( isFinished() ) {
if ( isStopped() ) {
status = LogStatus.STOP;
} else {
status = LogStatus.END;
}
} else if ( isPaused() ) {
status = LogStatus.PAUSED;
} else {
status = LogStatus.RUNNING;
}
TransLogTable transLogTable = transMeta.getTransLogTable();
int intervalInSeconds = Const.toInt( environmentSubstitute( transLogTable.getLogInterval() ), -1 );
logDate = new Date();
// OK, we have some logging to do...
//
DatabaseMeta logcon = transMeta.getTransLogTable().getDatabaseMeta();
String logTable = transMeta.getTransLogTable().getActualTableName();
if ( logcon != null ) {
Database ldb = null;
try {
// Let's not reconnect/disconnect all the time for performance reasons!
//
if ( transLogTableDatabaseConnection == null ) {
ldb = new Database( this, logcon );
ldb.shareVariablesWith( this );
ldb.connect();
ldb.setCommit( logCommitSize );
transLogTableDatabaseConnection = ldb;
} else {
ldb = transLogTableDatabaseConnection;
}
// Write to the standard transformation log table...
//
if ( !Const.isEmpty( logTable ) ) {
ldb.writeLogRecord( transLogTable, status, this, null );
}
// Also time-out the log records in here...
//
if ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) {
ldb.cleanupLogRecords( transLogTable );
}
// Commit the operations to prevent locking issues
//
if ( !ldb.isAutoCommit() ) {
ldb.commitLog( true, transMeta.getTransLogTable() );
}
} catch ( KettleDatabaseException e ) {
// PDI-9790 error write to log db is transaction error
log.logError( BaseMessages.getString( PKG, "Database.Error.WriteLogTable", logTable ), e );
errors.incrementAndGet();
//end PDI-9790
} catch ( Exception e ) {
throw new KettleException( BaseMessages
.getString( PKG, "Trans.Exception.ErrorWritingLogRecordToTable", transMeta
.getTransLogTable().getActualTableName() ), e );
} finally {
if ( intervalInSeconds <= 0 || ( status.equals( LogStatus.END ) || status.equals( LogStatus.STOP ) ) ) {
ldb.disconnect();
transLogTableDatabaseConnection = null; // disconnected
}
}
}
return true;
}
/**
* Write step performance log records.
*
* @param startSequenceNr
* the start sequence numberr
* @param status
* the logging status. If this is End, perform cleanup
* @return the new sequence number
* @throws KettleException
* if any errors occur during logging
*/
private int writeStepPerformanceLogRecords( int startSequenceNr, LogStatus status ) throws KettleException {
int lastSeqNr = 0;
Database ldb = null;
PerformanceLogTable performanceLogTable = transMeta.getPerformanceLogTable();
if ( !performanceLogTable.isDefined()
|| !transMeta.isCapturingStepPerformanceSnapShots() || stepPerformanceSnapShots == null
|| stepPerformanceSnapShots.isEmpty() ) {
return 0; // nothing to do here!
}
try {
ldb = new Database( this, performanceLogTable.getDatabaseMeta() );
ldb.shareVariablesWith( this );
ldb.connect();
ldb.setCommit( logCommitSize );
// Write to the step performance log table...
//
RowMetaInterface rowMeta = performanceLogTable.getLogRecord( LogStatus.START, null, null ).getRowMeta();
ldb.prepareInsert( rowMeta, performanceLogTable.getActualSchemaName(), performanceLogTable
.getActualTableName() );
synchronized ( stepPerformanceSnapShots ) {
Iterator<List<StepPerformanceSnapShot>> iterator = stepPerformanceSnapShots.values().iterator();
while ( iterator.hasNext() ) {
List<StepPerformanceSnapShot> snapshots = iterator.next();
synchronized ( snapshots ) {
Iterator<StepPerformanceSnapShot> snapshotsIterator = snapshots.iterator();
while ( snapshotsIterator.hasNext() ) {
StepPerformanceSnapShot snapshot = snapshotsIterator.next();
if ( snapshot.getSeqNr() >= startSequenceNr
&& snapshot.getSeqNr() <= lastStepPerformanceSnapshotSeqNrAdded ) {
RowMetaAndData row = performanceLogTable.getLogRecord( LogStatus.START, snapshot, null );
ldb.setValuesInsert( row.getRowMeta(), row.getData() );
ldb.insertRow( true );
}
lastSeqNr = snapshot.getSeqNr();
}
}
}
}
ldb.insertFinished( true );
// Finally, see if the log table needs cleaning up...
//
if ( status.equals( LogStatus.END ) ) {
ldb.cleanupLogRecords( performanceLogTable );
}
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorWritingStepPerformanceLogRecordToTable" ), e );
} finally {
if ( ldb != null ) {
ldb.disconnect();
}
}
return lastSeqNr + 1;
}
/**
* Close unique database connections. If there are errors in the Result, perform a rollback
*
* @param result
* the result of the transformation execution
*/
private void closeUniqueDatabaseConnections( Result result ) {
// Don't close any connections if the parent job is using the same transaction
//
if ( parentJob != null
&& transactionId != null && parentJob.getTransactionId() != null
&& transactionId.equals( parentJob.getTransactionId() ) ) {
return;
}
// Don't close any connections if the parent transformation is using the same transaction
//
if ( parentTrans != null
&& parentTrans.getTransMeta().isUsingUniqueConnections() && transactionId != null
&& parentTrans.getTransactionId() != null && transactionId.equals( parentTrans.getTransactionId() ) ) {
return;
}
// First we get all the database connections ...
//
DatabaseConnectionMap map = DatabaseConnectionMap.getInstance();
synchronized ( map ) {
List<Database> databaseList = new ArrayList<Database>( map.getMap().values() );
for ( Database database : databaseList ) {
if ( database.getConnectionGroup().equals( getTransactionId() ) ) {
try {
// This database connection belongs to this transformation.
// Let's roll it back if there is an error...
//
if ( result.getNrErrors() > 0 ) {
try {
database.rollback( true );
log.logBasic( BaseMessages.getString(
PKG, "Trans.Exception.TransactionsRolledBackOnConnection", database.toString() ) );
} catch ( Exception e ) {
throw new KettleDatabaseException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorRollingBackUniqueConnection", database.toString() ), e );
}
} else {
try {
database.commit( true );
log.logBasic( BaseMessages.getString(
PKG, "Trans.Exception.TransactionsCommittedOnConnection", database.toString() ) );
} catch ( Exception e ) {
throw new KettleDatabaseException( BaseMessages.getString(
PKG, "Trans.Exception.ErrorCommittingUniqueConnection", database.toString() ), e );
}
}
} catch ( Exception e ) {
log.logError( BaseMessages.getString(
PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", database.toString() ), e );
result.setNrErrors( result.getNrErrors() + 1 );
} finally {
try {
// This database connection belongs to this transformation.
database.closeConnectionOnly();
} catch ( Exception e ) {
log.logError( BaseMessages.getString(
PKG, "Trans.Exception.ErrorHandlingTransformationTransaction", database.toString() ), e );
result.setNrErrors( result.getNrErrors() + 1 );
} finally {
// Remove the database from the list...
//
map.removeConnection( database.getConnectionGroup(), database.getPartitionId(), database );
}
}
}
}
// Who else needs to be informed of the rollback or commit?
//
List<DatabaseTransactionListener> transactionListeners = map.getTransactionListeners( getTransactionId() );
if ( result.getNrErrors() > 0 ) {
for ( DatabaseTransactionListener listener : transactionListeners ) {
try {
listener.rollback();
} catch ( Exception e ) {
log.logError(
BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerRollback" ), e );
result.setNrErrors( result.getNrErrors() + 1 );
}
}
} else {
for ( DatabaseTransactionListener listener : transactionListeners ) {
try {
listener.commit();
} catch ( Exception e ) {
log.logError(
BaseMessages.getString( PKG, "Trans.Exception.ErrorHandlingTransactionListenerCommit" ), e );
result.setNrErrors( result.getNrErrors() + 1 );
}
}
}
}
}
/**
* Find the run thread for the step with the specified name.
*
* @param stepname
* the step name
* @return a StepInterface object corresponding to the run thread for the specified step
*/
public StepInterface findRunThread( String stepname ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface step = sid.step;
if ( step.getStepname().equalsIgnoreCase( stepname ) ) {
return step;
}
}
return null;
}
/**
* Find the base steps for the step with the specified name.
*
* @param stepname
* the step name
* @return the list of base steps for the specified step
*/
public List<StepInterface> findBaseSteps( String stepname ) {
List<StepInterface> baseSteps = new ArrayList<StepInterface>();
if ( steps == null ) {
return baseSteps;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface stepInterface = sid.step;
if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) {
baseSteps.add( stepInterface );
}
}
return baseSteps;
}
/**
* Find the executing step copy for the step with the specified name and copy number
*
* @param stepname
* the step name
* @param copynr
* @return the executing step found or null if no copy could be found.
*/
public StepInterface findStepInterface( String stepname, int copyNr ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface stepInterface = sid.step;
if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) && sid.copy == copyNr ) {
return stepInterface;
}
}
return null;
}
/**
* Find the available executing step copies for the step with the specified name
*
* @param stepname
* the step name
* @param copynr
* @return the list of executing step copies found or null if no steps are available yet (incorrect usage)
*/
public List<StepInterface> findStepInterfaces( String stepname ) {
if ( steps == null ) {
return null;
}
List<StepInterface> list = new ArrayList<StepInterface>();
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface stepInterface = sid.step;
if ( stepInterface.getStepname().equalsIgnoreCase( stepname ) ) {
list.add( stepInterface );
}
}
return list;
}
/**
* Find the data interface for the step with the specified name.
*
* @param name
* the step name
* @return the step data interface
*/
public StepDataInterface findDataInterface( String name ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
StepInterface rt = sid.step;
if ( rt.getStepname().equalsIgnoreCase( name ) ) {
return sid.data;
}
}
return null;
}
/**
* Gets the start date/time object for the transformation.
*
* @return Returns the startDate.
*/
public Date getStartDate() {
return startDate;
}
/**
* Gets the end date/time object for the transformation.
*
* @return Returns the endDate.
*/
public Date getEndDate() {
return endDate;
}
/**
* Checks whether the running transformation is being monitored.
*
* @return true the running transformation is being monitored, false otherwise
*/
public boolean isMonitored() {
return monitored;
}
/**
* Sets whether the running transformation should be monitored.
*
* @param monitored
* true if the running transformation should be monitored, false otherwise
*/
public void setMonitored( boolean monitored ) {
this.monitored = monitored;
}
/**
* Gets the meta-data for the transformation.
*
* @return Returns the transformation meta-data
*/
public TransMeta getTransMeta() {
return transMeta;
}
/**
* Sets the meta-data for the transformation.
*
* @param transMeta
* The transformation meta-data to set.
*/
public void setTransMeta( TransMeta transMeta ) {
this.transMeta = transMeta;
}
/**
* Gets the current date/time object.
*
* @return the current date
*/
public Date getCurrentDate() {
return currentDate;
}
/**
* Gets the dependency date for the transformation. A transformation can have a list of dependency fields. If any of
* these fields have a maximum date higher than the dependency date of the last run, the date range is set to to (-oo,
* now). The use-case is the incremental population of Slowly Changing Dimensions (SCD).
*
* @return Returns the dependency date
*/
public Date getDepDate() {
return depDate;
}
/**
* Gets the date the transformation was logged.
*
* @return the log date
*/
public Date getLogDate() {
return logDate;
}
/**
* Gets the rowsets for the transformation.
*
* @return a list of rowsets
*/
public List<RowSet> getRowsets() {
return rowsets;
}
/**
* Gets a list of steps in the transformation.
*
* @return a list of the steps in the transformation
*/
public List<StepMetaDataCombi> getSteps() {
return steps;
}
/**
* Gets a string representation of the transformation.
*
* @return the string representation of the transformation
* @see java.lang.Object#toString()
*/
public String toString() {
if ( transMeta == null || transMeta.getName() == null ) {
return getClass().getSimpleName();
}
// See if there is a parent transformation. If so, print the name of the parent here as well...
//
StringBuffer string = new StringBuffer();
// If we're running as a mapping, we get a reference to the calling (parent) transformation as well...
//
if ( getParentTrans() != null ) {
string.append( '[' ).append( getParentTrans().toString() ).append( ']' ).append( '.' );
}
// When we run a mapping we also set a mapping step name in there...
//
if ( !Const.isEmpty( mappingStepName ) ) {
string.append( '[' ).append( mappingStepName ).append( ']' ).append( '.' );
}
string.append( transMeta.getName() );
return string.toString();
}
/**
* Gets the mapping inputs for each step in the transformation.
*
* @return an array of MappingInputs
*/
public MappingInput[] findMappingInput() {
if ( steps == null ) {
return null;
}
List<MappingInput> list = new ArrayList<MappingInput>();
// Look in threads and find the MappingInput step thread...
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi smdc = steps.get( i );
StepInterface step = smdc.step;
if ( step.getStepID().equalsIgnoreCase( "MappingInput" ) ) {
list.add( (MappingInput) step );
}
}
return list.toArray( new MappingInput[list.size()] );
}
/**
* Gets the mapping outputs for each step in the transformation.
*
* @return an array of MappingOutputs
*/
public MappingOutput[] findMappingOutput() {
List<MappingOutput> list = new ArrayList<MappingOutput>();
if ( steps != null ) {
// Look in threads and find the MappingInput step thread...
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi smdc = steps.get( i );
StepInterface step = smdc.step;
if ( step.getStepID().equalsIgnoreCase( "MappingOutput" ) ) {
list.add( (MappingOutput) step );
}
}
}
return list.toArray( new MappingOutput[list.size()] );
}
/**
* Find the StepInterface (thread) by looking it up using the name.
*
* @param stepname
* The name of the step to look for
* @param copy
* the copy number of the step to look for
* @return the StepInterface or null if nothing was found.
*/
public StepInterface getStepInterface( String stepname, int copy ) {
if ( steps == null ) {
return null;
}
// Now start all the threads...
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.stepname.equalsIgnoreCase( stepname ) && sid.copy == copy ) {
return sid.step;
}
}
return null;
}
/**
* Gets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run
* again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line
* numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are
* passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors
* (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that
* created it probably) and when you get it back, re-run the last transformation.
*
* @return the replay date
*/
public Date getReplayDate() {
return replayDate;
}
/**
* Sets the replay date. The replay date is used to indicate that the transformation was replayed (re-tried, run
* again) with that particular replay date. You can use this in Text File/Excel Input to allow you to save error line
* numbers into a file (SOURCE_FILE.line for example) During replay, only the lines that have errors in them are
* passed to the next steps, the other lines are ignored. This is for the use case: if the document contained errors
* (bad dates, chars in numbers, etc), you simply send the document back to the source (the user/departement that
* created it probably) and when you get it back, re-run the last transformation.
*
* @param replayDate
* the new replay date
*/
public void setReplayDate( Date replayDate ) {
this.replayDate = replayDate;
}
/**
* Turn on safe mode during running: the transformation will run slower but with more checking enabled.
*
* @param safeModeEnabled
* true for safe mode
*/
public void setSafeModeEnabled( boolean safeModeEnabled ) {
this.safeModeEnabled = safeModeEnabled;
}
/**
* Checks whether safe mode is enabled.
*
* @return Returns true if the safe mode is enabled: the transformation will run slower but with more checking enabled
*/
public boolean isSafeModeEnabled() {
return safeModeEnabled;
}
/**
* This adds a row producer to the transformation that just got set up. It is preferable to run this BEFORE execute()
* but after prepareExecution()
*
* @param stepname
* The step to produce rows for
* @param copynr
* The copynr of the step to produce row for (normally 0 unless you have multiple copies running)
* @return the row producer
* @throws KettleException
* in case the thread/step to produce rows for could not be found.
* @see Trans#execute(String[])
* @see Trans#prepareExecution(String[])
*/
public RowProducer addRowProducer( String stepname, int copynr ) throws KettleException {
StepInterface stepInterface = getStepInterface( stepname, copynr );
if ( stepInterface == null ) {
throw new KettleException( "Unable to find thread with name " + stepname + " and copy number " + copynr );
}
// We are going to add an extra RowSet to this stepInterface.
RowSet rowSet;
switch ( transMeta.getTransformationType() ) {
case Normal:
rowSet = new BlockingRowSet( transMeta.getSizeRowset() );
break;
case SerialSingleThreaded:
rowSet = new SingleRowRowSet();
break;
case SingleThreaded:
rowSet = new QueueRowSet();
break;
default:
throw new KettleException( "Unhandled transformation type: " + transMeta.getTransformationType() );
}
// Add this rowset to the list of active rowsets for the selected step
stepInterface.getInputRowSets().add( rowSet );
return new RowProducer( stepInterface, rowSet );
}
/**
* Gets the parent job, or null if there is no parent.
*
* @return the parent job, or null if there is no parent
*/
public Job getParentJob() {
return parentJob;
}
/**
* Sets the parent job for the transformation.
*
* @param parentJob
* The parent job to set
*/
public void setParentJob( Job parentJob ) {
this.logLevel = parentJob.getLogLevel();
this.log.setLogLevel( logLevel );
this.parentJob = parentJob;
transactionId = calculateTransactionId();
}
/**
* Finds the StepDataInterface (currently) associated with the specified step.
*
* @param stepname
* The name of the step to look for
* @param stepcopy
* The copy number (0 based) of the step
* @return The StepDataInterface or null if non found.
*/
public StepDataInterface getStepDataInterface( String stepname, int stepcopy ) {
if ( steps == null ) {
return null;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.stepname.equals( stepname ) && sid.copy == stepcopy ) {
return sid.data;
}
}
return null;
}
/**
* Checks whether the transformation has any steps that are halted.
*
* @return true if one or more steps are halted, false otherwise
*/
public boolean hasHaltedSteps() {
// not yet 100% sure of this, if there are no steps... or none halted?
if ( steps == null ) {
return false;
}
for ( int i = 0; i < steps.size(); i++ ) {
StepMetaDataCombi sid = steps.get( i );
if ( sid.data.getStatus() == StepExecutionStatus.STATUS_HALTED ) {
return true;
}
}
return false;
}
/**
* Gets the job start date.
*
* @return the job start date
*/
public Date getJobStartDate() {
return jobStartDate;
}
/**
* Gets the job end date.
*
* @return the job end date
*/
public Date getJobEndDate() {
return jobEndDate;
}
/**
* Sets the job end date.
*
* @param jobEndDate
* the jobEndDate to set
*/
public void setJobEndDate( Date jobEndDate ) {
this.jobEndDate = jobEndDate;
}
/**
* Sets the job start date.
*
* @param jobStartDate
* the jobStartDate to set
*/
public void setJobStartDate( Date jobStartDate ) {
this.jobStartDate = jobStartDate;
}
/**
* Get the batch ID that is passed from the parent job to the transformation. If nothing is passed, it's the
* transformation's batch ID
*
* @return the parent job's batch ID, or the transformation's batch ID if there is no parent job
*/
public long getPassedBatchId() {
return passedBatchId;
}
/**
* Sets the passed batch ID of the transformation from the batch ID of the parent job.
*
* @param jobBatchId
* the jobBatchId to set
*/
public void setPassedBatchId( long jobBatchId ) {
this.passedBatchId = jobBatchId;
}
/**
* Gets the batch ID of the transformation.
*
* @return the batch ID of the transformation
*/
public long getBatchId() {
return batchId;
}
/**
* Sets the batch ID of the transformation.
*
* @param batchId
* the batch ID to set
*/
public void setBatchId( long batchId ) {
this.batchId = batchId;
}
/**
* Gets the name of the thread that contains the transformation.
*
* @deprecated please use getTransactionId() instead
* @return the thread name
*/
@Deprecated
public String getThreadName() {
return threadName;
}
/**
* Sets the thread name for the transformation.
*
* @deprecated please use setTransactionId() instead
* @param threadName
* the thread name
*/
@Deprecated
public void setThreadName( String threadName ) {
this.threadName = threadName;
}
/**
* Gets the status of the transformation (Halting, Finished, Paused, etc.)
*
* @return the status of the transformation
*/
public String getStatus() {
String message;
if ( running ) {
if ( isStopped() ) {
message = STRING_HALTING;
} else {
if ( isFinished() ) {
message = STRING_FINISHED;
if ( getResult().getNrErrors() > 0 ) {
message += " (with errors)";
}
} else if ( isPaused() ) {
message = STRING_PAUSED;
} else {
message = STRING_RUNNING;
}
}
} else if ( isStopped() ) {
message = STRING_STOPPED;
} else if ( preparing ) {
message = STRING_PREPARING;
} else if ( initializing ) {
message = STRING_INITIALIZING;
} else {
message = STRING_WAITING;
}
return message;
}
/**
* Checks whether the transformation is initializing.
*
* @return true if the transformation is initializing, false otherwise
*/
public boolean isInitializing() {
return initializing;
}
/**
* Sets whether the transformation is initializing.
*
* @param initializing
* true if the transformation is initializing, false otherwise
*/
public void setInitializing( boolean initializing ) {
this.initializing = initializing;
}
/**
* Checks whether the transformation is preparing for execution.
*
* @return true if the transformation is preparing for execution, false otherwise
*/
public boolean isPreparing() {
return preparing;
}
/**
* Sets whether the transformation is preparing for execution.
*
* @param preparing
* true if the transformation is preparing for execution, false otherwise
*/
public void setPreparing( boolean preparing ) {
this.preparing = preparing;
}
/**
* Checks whether the transformation is running.
*
* @return true if the transformation is running, false otherwise
*/
public boolean isRunning() {
return running;
}
/**
* Sets whether the transformation is running.
*
* @param running
* true if the transformation is running, false otherwise
*/
public void setRunning( boolean running ) {
this.running = running;
}
/**
* Execute the transformation in a clustered fashion. The transformation steps are split and collected in a
* TransSplitter object
*
* @param transMeta
* the transformation's meta-data
* @param executionConfiguration
* the execution configuration
* @return the transformation splitter object
* @throws KettleException
* the kettle exception
*/
public static final TransSplitter executeClustered( final TransMeta transMeta,
final TransExecutionConfiguration executionConfiguration ) throws KettleException {
if ( Const.isEmpty( transMeta.getName() ) ) {
throw new KettleException(
"The transformation needs a name to uniquely identify it by on the remote server." );
}
TransSplitter transSplitter = new TransSplitter( transMeta );
transSplitter.splitOriginalTransformation();
// Pass the clustered run ID to allow for parallel execution of clustered transformations
//
executionConfiguration.getVariables().put(
Const.INTERNAL_VARIABLE_CLUSTER_RUN_ID, transSplitter.getClusteredRunId() );
executeClustered( transSplitter, executionConfiguration );
return transSplitter;
}
/**
* Executes an existing TransSplitter, with the transformation already split.
*
* @param transSplitter
* the trans splitter
* @param executionConfiguration
* the execution configuration
* @throws KettleException
* the kettle exception
* @see org.pentaho.di.ui.spoon.delegates.SpoonTransformationDelegate
*/
public static final void executeClustered( final TransSplitter transSplitter,
final TransExecutionConfiguration executionConfiguration ) throws KettleException {
try {
// Send the transformations to the servers...
//
// First the master and the slaves...
//
TransMeta master = transSplitter.getMaster();
final SlaveServer[] slaves = transSplitter.getSlaveTargets();
final Thread[] threads = new Thread[slaves.length];
final Throwable[] errors = new Throwable[slaves.length];
// Keep track of the various Carte object IDs
//
final Map<TransMeta, String> carteObjectMap = transSplitter.getCarteObjectMap();
//
// Send them all on their way...
//
SlaveServer masterServer = null;
List<StepMeta> masterSteps = master.getTransHopSteps( false );
if ( masterSteps.size() > 0 ) // If there is something that needs to be done on the master...
{
masterServer = transSplitter.getMasterServer();
if ( executionConfiguration.isClusterPosting() ) {
TransConfiguration transConfiguration = new TransConfiguration( master, executionConfiguration );
Map<String, String> variables = transConfiguration.getTransExecutionConfiguration().getVariables();
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) );
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "Y" );
// Parameters override the variables but they need to pass over the configuration too...
//
Map<String, String> params = transConfiguration.getTransExecutionConfiguration().getParams();
TransMeta ot = transSplitter.getOriginalTransformation();
for ( String param : ot.listParameters() ) {
String value =
Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot
.getVariable( param ) ) );
params.put( param, value );
}
String masterReply =
masterServer.sendXML( transConfiguration.getXML(), AddTransServlet.CONTEXT_PATH + "/?xml=Y" );
WebResult webResult = WebResult.fromXMLString( masterReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "An error occurred sending the master transformation: "
+ webResult.getMessage() );
}
carteObjectMap.put( master, webResult.getId() );
}
}
// Then the slaves...
// These are started in a background thread.
//
for ( int i = 0; i < slaves.length; i++ ) {
final int index = i;
final TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] );
if ( executionConfiguration.isClusterPosting() ) {
Runnable runnable = new Runnable() {
public void run() {
try {
// Create a copy for local use... We get race-conditions otherwise...
//
TransExecutionConfiguration slaveTransExecutionConfiguration =
(TransExecutionConfiguration) executionConfiguration.clone();
TransConfiguration transConfiguration =
new TransConfiguration( slaveTrans, slaveTransExecutionConfiguration );
Map<String, String> variables = slaveTransExecutionConfiguration.getVariables();
variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NUMBER, Integer.toString( index ) );
variables.put( Const.INTERNAL_VARIABLE_SLAVE_SERVER_NAME, slaves[index].getName() );
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_SIZE, Integer.toString( slaves.length ) );
variables.put( Const.INTERNAL_VARIABLE_CLUSTER_MASTER, "N" );
// Parameters override the variables but they need to pass over the configuration too...
//
Map<String, String> params = slaveTransExecutionConfiguration.getParams();
TransMeta ot = transSplitter.getOriginalTransformation();
for ( String param : ot.listParameters() ) {
String value =
Const.NVL( ot.getParameterValue( param ), Const.NVL( ot.getParameterDefault( param ), ot
.getVariable( param ) ) );
params.put( param, value );
}
String slaveReply =
slaves[index].sendXML( transConfiguration.getXML(), AddTransServlet.CONTEXT_PATH + "/?xml=Y" );
WebResult webResult = WebResult.fromXMLString( slaveReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "An error occurred sending a slave transformation: "
+ webResult.getMessage() );
}
carteObjectMap.put( slaveTrans, webResult.getId() );
} catch ( Throwable t ) {
errors[index] = t;
}
}
};
threads[i] = new Thread( runnable );
}
}
// Start the slaves
for ( int i = 0; i < threads.length; i++ ) {
if ( threads[i] != null ) {
threads[i].start();
}
}
// Wait until the slaves report back...
// Sending the XML over is the heaviest part
// Later we can do the others as well...
//
for ( int i = 0; i < threads.length; i++ ) {
if ( threads[i] != null ) {
threads[i].join();
if ( errors[i] != null ) {
throw new KettleException( errors[i] );
}
}
}
if ( executionConfiguration.isClusterPosting() ) {
if ( executionConfiguration.isClusterPreparing() ) {
// Prepare the master...
if ( masterSteps.size() > 0 ) // If there is something that needs to be done on the master...
{
String carteObjectId = carteObjectMap.get( master );
String masterReply =
masterServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( master.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( masterReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while preparing the execution of the master transformation: "
+ webResult.getMessage() );
}
}
// Prepare the slaves
// WG: Should these be threaded like the above initialization?
for ( int i = 0; i < slaves.length; i++ ) {
TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] );
String carteObjectId = carteObjectMap.get( slaveTrans );
String slaveReply =
slaves[i].execService( PrepareExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( slaveTrans.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( slaveReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while preparing the execution of a slave transformation: "
+ webResult.getMessage() );
}
}
}
if ( executionConfiguration.isClusterStarting() ) {
// Start the master...
if ( masterSteps.size() > 0 ) // If there is something that needs to be done on the master...
{
String carteObjectId = carteObjectMap.get( master );
String masterReply =
masterServer.execService( StartExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( master.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( masterReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while starting the execution of the master transformation: "
+ webResult.getMessage() );
}
}
// Start the slaves
// WG: Should these be threaded like the above initialization?
for ( int i = 0; i < slaves.length; i++ ) {
TransMeta slaveTrans = transSplitter.getSlaveTransMap().get( slaves[i] );
String carteObjectId = carteObjectMap.get( slaveTrans );
String slaveReply =
slaves[i].execService( StartExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( slaveTrans.getName(), "UTF-8" ) + "&id="
+ URLEncoder.encode( carteObjectId, "UTF-8" ) + "&xml=Y" );
WebResult webResult = WebResult.fromXMLString( slaveReply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"An error occurred while starting the execution of a slave transformation: "
+ webResult.getMessage() );
}
}
}
}
} catch ( KettleException ke ) {
throw ke;
} catch ( Exception e ) {
throw new KettleException( "There was an error during transformation split", e );
}
}
/**
* Monitors a clustered transformation every second,
* after all the transformations in a cluster schema are running.<br>
* Now we should verify that they are all running as they should.<br>
* If a transformation has an error, we should kill them all.<br>
* This should happen in a separate thread to prevent blocking of the UI.<br>
* <br>
* When the master and slave transformations have all finished, we should also run<br>
* a cleanup on those transformations to release sockets, etc.<br>
* <br>
*
* @param log
* the log interface channel
* @param transSplitter
* the transformation splitter object
* @param parentJob
* the parent job when executed in a job, otherwise just set to null
* @return the number of errors encountered
*/
public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter,
Job parentJob ) {
return monitorClusteredTransformation( log, transSplitter, parentJob, 1 ); // monitor every 1 seconds
}
/**
* Monitors a clustered transformation every second,
* after all the transformations in a cluster schema are running.<br>
* Now we should verify that they are all running as they should.<br>
* If a transformation has an error, we should kill them all.<br>
* This should happen in a separate thread to prevent blocking of the UI.<br>
* <br>
* When the master and slave transformations have all finished, we should also run<br>
* a cleanup on those transformations to release sockets, etc.<br>
* <br>
*
* @param log
* the subject to use for logging
* @param transSplitter
* the transformation splitter object
* @param parentJob
* the parent job when executed in a job, otherwise just set to null
* @param sleepTimeSeconds
* the sleep time in seconds in between slave transformation status polling
* @return the number of errors encountered
*/
public static final long monitorClusteredTransformation( LogChannelInterface log, TransSplitter transSplitter,
Job parentJob, int sleepTimeSeconds ) {
long errors = 0L;
//
// See if the remote transformations have finished.
// We could just look at the master, but I doubt that that is enough in all
// situations.
//
SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask
// these guys
TransMeta[] slaves = transSplitter.getSlaves();
Map<TransMeta, String> carteObjectMap = transSplitter.getCarteObjectMap();
SlaveServer masterServer;
try {
masterServer = transSplitter.getMasterServer();
} catch ( KettleException e ) {
log.logError( "Error getting the master server", e );
masterServer = null;
errors++;
}
TransMeta masterTransMeta = transSplitter.getMaster();
boolean allFinished = false;
while ( !allFinished && errors == 0 && ( parentJob == null || !parentJob.isStopped() ) ) {
allFinished = true;
errors = 0L;
// Slaves first...
//
for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) {
try {
String carteObjectId = carteObjectMap.get( slaves[s] );
SlaveServerTransStatus transStatus =
slaveServers[s].getTransStatus( slaves[s].getName(), carteObjectId, 0 );
if ( transStatus.isRunning() ) {
if ( log.isDetailed() ) {
log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' is still running." );
}
allFinished = false;
} else {
if ( log.isDetailed() ) {
log.logDetailed( "Slave transformation on '" + slaveServers[s] + "' has finished." );
}
}
errors += transStatus.getNrStepErrors();
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to check slave transformation : " + e.toString() );
}
}
// Check the master too
if ( allFinished && errors == 0 && masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) {
try {
String carteObjectId = carteObjectMap.get( masterTransMeta );
SlaveServerTransStatus transStatus =
masterServer.getTransStatus( masterTransMeta.getName(), carteObjectId, 0 );
if ( transStatus.isRunning() ) {
if ( log.isDetailed() ) {
log.logDetailed( "Master transformation is still running." );
}
allFinished = false;
} else {
if ( log.isDetailed() ) {
log.logDetailed( "Master transformation has finished." );
}
}
Result result = transStatus.getResult( transSplitter.getOriginalTransformation() );
errors += result.getNrErrors();
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to check master transformation : " + e.toString() );
}
}
if ( ( parentJob != null && parentJob.isStopped() ) || errors != 0 ) {
//
// Stop all slaves and the master on the slave servers
//
for ( int s = 0; s < slaveServers.length && allFinished && errors == 0; s++ ) {
try {
String carteObjectId = carteObjectMap.get( slaves[s] );
WebResult webResult = slaveServers[s].stopTransformation( slaves[s].getName(), carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
log.logError( "Unable to stop slave transformation '"
+ slaves[s].getName() + "' : " + webResult.getMessage() );
}
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to stop transformation : " + e.toString() );
}
}
try {
String carteObjectId = carteObjectMap.get( masterTransMeta );
WebResult webResult = masterServer.stopTransformation( masterTransMeta.getName(), carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
log.logError( "Unable to stop master transformation '"
+ masterServer.getName() + "' : " + webResult.getMessage() );
}
} catch ( Exception e ) {
errors += 1;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to stop the master : " + e.toString() );
}
}
//
// Keep waiting until all transformations have finished
// If needed, we stop them again and again until they yield.
//
if ( !allFinished ) {
// Not finished or error: wait a bit longer
if ( log.isDetailed() ) {
log.logDetailed( "Clustered transformation is still running, waiting a few seconds..." );
}
try {
Thread.sleep( sleepTimeSeconds * 2000 );
} catch ( Exception e ) {
// Ignore errors
} // Check all slaves every x seconds.
}
}
log.logBasic( "All transformations in the cluster have finished." );
errors += cleanupCluster( log, transSplitter );
return errors;
}
/**
* Cleanup the cluster, including the master and all slaves, and return the number of errors that occurred.
*
* @param log
* the log channel interface
* @param transSplitter
* the TransSplitter object
* @return the number of errors that occurred in the clustered transformation
*/
public static int cleanupCluster( LogChannelInterface log, TransSplitter transSplitter ) {
SlaveServer[] slaveServers = transSplitter.getSlaveTargets();
TransMeta[] slaves = transSplitter.getSlaves();
SlaveServer masterServer;
try {
masterServer = transSplitter.getMasterServer();
} catch ( KettleException e ) {
log.logError( "Unable to obtain the master server from the cluster", e );
return 1;
}
TransMeta masterTransMeta = transSplitter.getMaster();
int errors = 0;
// All transformations have finished, with or without error.
// Now run a cleanup on all the transformation on the master and the slaves.
//
// Slaves first...
//
for ( int s = 0; s < slaveServers.length; s++ ) {
try {
cleanupSlaveServer( transSplitter, slaveServers[s], slaves[s] );
} catch ( Exception e ) {
errors++;
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to clean up slave transformation", e );
}
}
// Clean up the master too
//
if ( masterTransMeta != null && masterTransMeta.nrSteps() > 0 ) {
try {
cleanupSlaveServer( transSplitter, masterServer, masterTransMeta );
} catch ( Exception e ) {
errors++;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to clean up master transformation", e );
}
// Also de-allocate all ports used for this clustered transformation on the master.
//
try {
// Deallocate all ports belonging to this clustered run, not anything else
//
masterServer.deAllocateServerSockets( transSplitter.getOriginalTransformation().getName(), transSplitter
.getClusteredRunId() );
} catch ( Exception e ) {
errors++;
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to clean up port sockets for transformation'"
+ transSplitter.getOriginalTransformation().getName() + "'", e );
}
}
return errors;
}
/**
* Cleanup the slave server as part of a clustered transformation.
*
* @param transSplitter
* the TransSplitter object
* @param slaveServer
* the slave server
* @param slaveTransMeta
* the slave transformation meta-data
* @throws KettleException
* if any errors occur during cleanup
*/
public static void cleanupSlaveServer( TransSplitter transSplitter, SlaveServer slaveServer,
TransMeta slaveTransMeta ) throws KettleException {
String transName = slaveTransMeta.getName();
try {
String carteObjectId = transSplitter.getCarteObjectMap().get( slaveTransMeta );
WebResult webResult = slaveServer.cleanupTransformation( transName, carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
throw new KettleException( "Unable to run clean-up on slave server '"
+ slaveServer + "' for transformation '" + transName + "' : " + webResult.getMessage() );
}
} catch ( Exception e ) {
throw new KettleException( "Unexpected error contacting slave server '"
+ slaveServer + "' to clear up transformation '" + transName + "'", e );
}
}
/**
* Gets the clustered transformation result.
*
* @param log
* the log channel interface
* @param transSplitter
* the TransSplitter object
* @param parentJob
* the parent job
* @return the clustered transformation result
*/
public static final Result getClusteredTransformationResult( LogChannelInterface log,
TransSplitter transSplitter, Job parentJob ) {
return getClusteredTransformationResult( log, transSplitter, parentJob, false );
}
/**
* Gets the clustered transformation result.
*
* @param log
* the log channel interface
* @param transSplitter
* the TransSplitter object
* @param parentJob
* the parent job
* @param loggingRemoteWork
* log remote execution logs locally
* @return the clustered transformation result
*/
public static final Result getClusteredTransformationResult( LogChannelInterface log,
TransSplitter transSplitter, Job parentJob, boolean loggingRemoteWork ) {
Result result = new Result();
//
// See if the remote transformations have finished.
// We could just look at the master, but I doubt that that is enough in all situations.
//
SlaveServer[] slaveServers = transSplitter.getSlaveTargets(); // <-- ask these guys
TransMeta[] slaves = transSplitter.getSlaves();
SlaveServer masterServer;
try {
masterServer = transSplitter.getMasterServer();
} catch ( KettleException e ) {
log.logError( "Error getting the master server", e );
masterServer = null;
result.setNrErrors( result.getNrErrors() + 1 );
}
TransMeta master = transSplitter.getMaster();
// Slaves first...
//
for ( int s = 0; s < slaveServers.length; s++ ) {
try {
// Get the detailed status of the slave transformation...
//
SlaveServerTransStatus transStatus = slaveServers[s].getTransStatus( slaves[s].getName(), "", 0 );
Result transResult = transStatus.getResult( slaves[s] );
result.add( transResult );
if ( loggingRemoteWork ) {
log.logBasic( "-- Slave : " + slaveServers[s].getName() );
log.logBasic( transStatus.getLoggingString() );
}
} catch ( Exception e ) {
result.setNrErrors( result.getNrErrors() + 1 );
log.logError( "Unable to contact slave server '"
+ slaveServers[s].getName() + "' to get result of slave transformation : " + e.toString() );
}
}
// Clean up the master too
//
if ( master != null && master.nrSteps() > 0 ) {
try {
// Get the detailed status of the slave transformation...
//
SlaveServerTransStatus transStatus = masterServer.getTransStatus( master.getName(), "", 0 );
Result transResult = transStatus.getResult( master );
result.add( transResult );
if ( loggingRemoteWork ) {
log.logBasic( "-- Master : " + masterServer.getName() );
log.logBasic( transStatus.getLoggingString() );
}
} catch ( Exception e ) {
result.setNrErrors( result.getNrErrors() + 1 );
log.logError( "Unable to contact master server '"
+ masterServer.getName() + "' to get result of master transformation : " + e.toString() );
}
}
return result;
}
/**
* Send the transformation for execution to a Carte slave server.
*
* @param transMeta
* the transformation meta-data
* @param executionConfiguration
* the transformation execution configuration
* @param repository
* the repository
* @return The Carte object ID on the server.
* @throws KettleException
* if any errors occur during the dispatch to the slave server
*/
public static String sendToSlaveServer( TransMeta transMeta, TransExecutionConfiguration executionConfiguration,
Repository repository, IMetaStore metaStore ) throws KettleException {
String carteObjectId;
SlaveServer slaveServer = executionConfiguration.getRemoteServer();
if ( slaveServer == null ) {
throw new KettleException( "No slave server specified" );
}
if ( Const.isEmpty( transMeta.getName() ) ) {
throw new KettleException(
"The transformation needs a name to uniquely identify it by on the remote server." );
}
try {
// Inject certain internal variables to make it more intuitive.
//
Map<String, String> vars = new HashMap<String, String>();
for ( String var : Const.INTERNAL_TRANS_VARIABLES ) {
vars.put( var, transMeta.getVariable( var ) );
}
for ( String var : Const.INTERNAL_JOB_VARIABLES ) {
vars.put( var, transMeta.getVariable( var ) );
}
executionConfiguration.getVariables().putAll( vars );
slaveServer.injectVariables( executionConfiguration.getVariables() );
slaveServer.getLogChannel().setLogLevel( executionConfiguration.getLogLevel() );
if ( executionConfiguration.isPassingExport() ) {
// First export the job...
//
FileObject tempFile =
KettleVFS.createTempFile( "transExport", ".zip", System.getProperty( "java.io.tmpdir" ), transMeta );
TopLevelResource topLevelResource =
ResourceUtil.serializeResourceExportInterface(
tempFile.getName().toString(), transMeta, transMeta, repository, metaStore, executionConfiguration
.getXML(), CONFIGURATION_IN_EXPORT_FILENAME );
// Send the zip file over to the slave server...
//
String result =
slaveServer.sendExport(
topLevelResource.getArchiveName(), AddExportServlet.TYPE_TRANS, topLevelResource
.getBaseResourceName() );
WebResult webResult = WebResult.fromXMLString( result );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"There was an error passing the exported transformation to the remote server: "
+ Const.CR + webResult.getMessage() );
}
carteObjectId = webResult.getId();
} else {
// Now send it off to the remote server...
//
String xml = new TransConfiguration( transMeta, executionConfiguration ).getXML();
String reply = slaveServer.sendXML( xml, AddTransServlet.CONTEXT_PATH + "/?xml=Y" );
WebResult webResult = WebResult.fromXMLString( reply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "There was an error posting the transformation on the remote server: "
+ Const.CR + webResult.getMessage() );
}
carteObjectId = webResult.getId();
}
// Prepare the transformation
//
String reply =
slaveServer.execService( PrepareExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( transMeta.getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId );
WebResult webResult = WebResult.fromXMLString( reply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException(
"There was an error preparing the transformation for excution on the remote server: "
+ Const.CR + webResult.getMessage() );
}
// Start the transformation
//
reply =
slaveServer.execService( StartExecutionTransServlet.CONTEXT_PATH
+ "/?name=" + URLEncoder.encode( transMeta.getName(), "UTF-8" ) + "&xml=Y&id=" + carteObjectId );
webResult = WebResult.fromXMLString( reply );
if ( !webResult.getResult().equalsIgnoreCase( WebResult.STRING_OK ) ) {
throw new KettleException( "There was an error starting the transformation on the remote server: "
+ Const.CR + webResult.getMessage() );
}
return carteObjectId;
} catch ( KettleException ke ) {
throw ke;
} catch ( Exception e ) {
throw new KettleException( e );
}
}
/**
* Checks whether the transformation is ready to start (i.e. execution preparation was successful)
*
* @return true if the transformation was prepared for execution successfully, false otherwise
* @see org.pentaho.di.trans.Trans#prepareExecution(String[])
*/
public boolean isReadyToStart() {
return readyToStart;
}
/**
* Sets the internal kettle variables.
*
* @param var
* the new internal kettle variables
*/
public void setInternalKettleVariables( VariableSpace var ) {
if ( transMeta != null && !Const.isEmpty( transMeta.getFilename() ) ) // we have a finename that's defined.
{
try {
FileObject fileObject = KettleVFS.getFileObject( transMeta.getFilename(), var );
FileName fileName = fileObject.getName();
// The filename of the transformation
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, fileName.getBaseName() );
// The directory of the transformation
FileName fileDir = fileName.getParent();
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, fileDir.getURI() );
} catch ( KettleFileException e ) {
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" );
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" );
}
} else {
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "" );
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_NAME, "" );
}
// The name of the transformation
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_NAME, Const.NVL( transMeta.getName(), "" ) );
// TODO PUT THIS INSIDE OF THE "IF"
// The name of the directory in the repository
variables.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, transMeta
.getRepositoryDirectory() != null ? transMeta.getRepositoryDirectory().getPath() : "" );
// Here we don't clear the definition of the job specific parameters, as they may come in handy.
// A transformation can be called from a job and may inherit the job internal variables
// but the other around is not possible.
}
/**
* Copies variables from a given variable space to this transformation.
*
* @param space
* the variable space
* @see org.pentaho.di.core.variables.VariableSpace#copyVariablesFrom(org.pentaho.di.core.variables.VariableSpace)
*/
public void copyVariablesFrom( VariableSpace space ) {
variables.copyVariablesFrom( space );
}
/**
* Substitutes any variable values into the given string, and returns the resolved string.
*
* @param aString
* the string to resolve against environment variables
* @return the string after variables have been resolved/susbstituted
* @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String)
*/
public String environmentSubstitute( String aString ) {
return variables.environmentSubstitute( aString );
}
/**
* Substitutes any variable values into each of the given strings, and returns an array containing the resolved
* string(s).
*
* @param aString
* an array of strings to resolve against environment variables
* @return the array of strings after variables have been resolved/susbstituted
* @see org.pentaho.di.core.variables.VariableSpace#environmentSubstitute(java.lang.String[])
*/
public String[] environmentSubstitute( String[] aString ) {
return variables.environmentSubstitute( aString );
}
public String fieldSubstitute( String aString, RowMetaInterface rowMeta, Object[] rowData ) throws KettleValueException {
return variables.fieldSubstitute( aString, rowMeta, rowData );
}
/**
* Gets the parent variable space.
*
* @return the parent variable space
* @see org.pentaho.di.core.variables.VariableSpace#getParentVariableSpace()
*/
public VariableSpace getParentVariableSpace() {
return variables.getParentVariableSpace();
}
/**
* Sets the parent variable space.
*
* @param parent
* the new parent variable space
* @see org.pentaho.di.core.variables.VariableSpace#setParentVariableSpace(
* org.pentaho.di.core.variables.VariableSpace)
*/
public void setParentVariableSpace( VariableSpace parent ) {
variables.setParentVariableSpace( parent );
}
/**
* Gets the value of the specified variable, or returns a default value if no such variable exists.
*
* @param variableName
* the variable name
* @param defaultValue
* the default value
* @return the value of the specified variable, or returns a default value if no such variable exists
* @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String, java.lang.String)
*/
public String getVariable( String variableName, String defaultValue ) {
return variables.getVariable( variableName, defaultValue );
}
/**
* Gets the value of the specified variable, or returns a default value if no such variable exists.
*
* @param variableName
* the variable name
* @return the value of the specified variable, or returns a default value if no such variable exists
* @see org.pentaho.di.core.variables.VariableSpace#getVariable(java.lang.String)
*/
public String getVariable( String variableName ) {
return variables.getVariable( variableName );
}
/**
* Returns a boolean representation of the specified variable after performing any necessary substitution. Truth
* values include case-insensitive versions of "Y", "YES", "TRUE" or "1".
*
* @param variableName
* the variable name
* @param defaultValue
* the default value
* @return a boolean representation of the specified variable after performing any necessary substitution
* @see org.pentaho.di.core.variables.VariableSpace#getBooleanValueOfVariable(java.lang.String, boolean)
*/
public boolean getBooleanValueOfVariable( String variableName, boolean defaultValue ) {
if ( !Const.isEmpty( variableName ) ) {
String value = environmentSubstitute( variableName );
if ( !Const.isEmpty( value ) ) {
return ValueMeta.convertStringToBoolean( value );
}
}
return defaultValue;
}
/**
* Sets the values of the transformation's variables to the values from the parent variables.
*
* @param parent
* the parent
* @see org.pentaho.di.core.variables.VariableSpace#initializeVariablesFrom(
* org.pentaho.di.core.variables.VariableSpace)
*/
public void initializeVariablesFrom( VariableSpace parent ) {
variables.initializeVariablesFrom( parent );
}
/**
* Gets a list of variable names for the transformation.
*
* @return a list of variable names
* @see org.pentaho.di.core.variables.VariableSpace#listVariables()
*/
public String[] listVariables() {
return variables.listVariables();
}
/**
* Sets the value of the specified variable to the specified value.
*
* @param variableName
* the variable name
* @param variableValue
* the variable value
* @see org.pentaho.di.core.variables.VariableSpace#setVariable(java.lang.String, java.lang.String)
*/
public void setVariable( String variableName, String variableValue ) {
variables.setVariable( variableName, variableValue );
}
/**
* Shares a variable space from another variable space. This means that the object should take over the space used as
* argument.
*
* @param space
* the variable space
* @see org.pentaho.di.core.variables.VariableSpace#shareVariablesWith(org.pentaho.di.core.variables.VariableSpace)
*/
public void shareVariablesWith( VariableSpace space ) {
variables = space;
}
/**
* Injects variables using the given Map. The behavior should be that the properties object will be stored and at the
* time the VariableSpace is initialized (or upon calling this method if the space is already initialized). After
* injecting the link of the properties object should be removed.
*
* @param prop
* the property map
* @see org.pentaho.di.core.variables.VariableSpace#injectVariables(java.util.Map)
*/
public void injectVariables( Map<String, String> prop ) {
variables.injectVariables( prop );
}
/**
* Pauses the transformation (pause all steps).
*/
public void pauseRunning() {
paused.set( true );
for ( StepMetaDataCombi combi : steps ) {
combi.step.pauseRunning();
}
}
/**
* Resumes running the transformation after a pause (resume all steps).
*/
public void resumeRunning() {
for ( StepMetaDataCombi combi : steps ) {
combi.step.resumeRunning();
}
paused.set( false );
}
/**
* Checks whether the transformation is being previewed.
*
* @return true if the transformation is being previewed, false otherwise
*/
public boolean isPreview() {
return preview;
}
/**
* Sets whether the transformation is being previewed.
*
* @param preview
* true if the transformation is being previewed, false otherwise
*/
public void setPreview( boolean preview ) {
this.preview = preview;
}
/**
* Gets the repository object for the transformation.
*
* @return the repository
*/
public Repository getRepository() {
if ( repository == null ) {
// Does the transmeta have a repo?
// This is a valid case, when a non-repo trans is attempting to retrieve
// a transformation in the repository.
if ( transMeta != null ) {
return transMeta.getRepository();
}
}
return repository;
}
/**
* Sets the repository object for the transformation.
*
* @param repository
* the repository object to set
*/
public void setRepository( Repository repository ) {
this.repository = repository;
if ( transMeta != null ) {
transMeta.setRepository( repository );
}
}
/**
* Gets a named list (map) of step performance snapshots.
*
* @return a named list (map) of step performance snapshots
*/
public Map<String, List<StepPerformanceSnapShot>> getStepPerformanceSnapShots() {
return stepPerformanceSnapShots;
}
/**
* Sets the named list (map) of step performance snapshots.
*
* @param stepPerformanceSnapShots
* a named list (map) of step performance snapshots to set
*/
public void setStepPerformanceSnapShots( Map<String, List<StepPerformanceSnapShot>> stepPerformanceSnapShots ) {
this.stepPerformanceSnapShots = stepPerformanceSnapShots;
}
/**
* Gets a list of the transformation listeners.
* Please do not attempt to modify this list externally.
* Returned list is mutable only for backward compatibility purposes.
*
* @return the transListeners
*/
public List<TransListener> getTransListeners() {
return transListeners;
}
/**
* Sets the list of transformation listeners.
*
* @param transListeners
* the transListeners to set
*/
public void setTransListeners( List<TransListener> transListeners ) {
this.transListeners = Collections.synchronizedList( transListeners );
}
/**
* Adds a transformation listener.
*
* @param transListener
* the trans listener
*/
public void addTransListener( TransListener transListener ) {
// PDI-5229 sync added
synchronized ( transListeners ) {
transListeners.add( transListener );
}
}
/**
* Sets the list of stop-event listeners for the transformation.
*
* @param transStoppedListeners
* the list of stop-event listeners to set
*/
public void setTransStoppedListeners( List<TransStoppedListener> transStoppedListeners ) {
this.transStoppedListeners = Collections.synchronizedList( transStoppedListeners );
}
/**
* Gets the list of stop-event listeners for the transformation. This is not concurrent safe.
* Please note this is mutable implementation only for backward compatibility reasons.
*
* @return the list of stop-event listeners
*/
public List<TransStoppedListener> getTransStoppedListeners() {
return transStoppedListeners;
}
/**
* Adds a stop-event listener to the transformation.
*
* @param transStoppedListener
* the stop-event listener to add
*/
public void addTransStoppedListener( TransStoppedListener transStoppedListener ) {
transStoppedListeners.add( transStoppedListener );
}
/**
* Checks if the transformation is paused.
*
* @return true if the transformation is paused, false otherwise
*/
public boolean isPaused() {
return paused.get();
}
/**
* Checks if the transformation is stopped.
*
* @return true if the transformation is stopped, false otherwise
*/
public boolean isStopped() {
return stopped.get();
}
/**
* Monitors a remote transformation every 5 seconds.
*
* @param log
* the log channel interface
* @param carteObjectId
* the Carte object ID
* @param transName
* the transformation name
* @param remoteSlaveServer
* the remote slave server
*/
public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName,
SlaveServer remoteSlaveServer ) {
monitorRemoteTransformation( log, carteObjectId, transName, remoteSlaveServer, 5 );
}
/**
* Monitors a remote transformation at the specified interval.
*
* @param log
* the log channel interface
* @param carteObjectId
* the Carte object ID
* @param transName
* the transformation name
* @param remoteSlaveServer
* the remote slave server
* @param sleepTimeSeconds
* the sleep time (in seconds)
*/
public static void monitorRemoteTransformation( LogChannelInterface log, String carteObjectId, String transName,
SlaveServer remoteSlaveServer, int sleepTimeSeconds ) {
long errors = 0;
boolean allFinished = false;
while ( !allFinished && errors == 0 ) {
allFinished = true;
errors = 0L;
// Check the remote server
if ( allFinished && errors == 0 ) {
try {
SlaveServerTransStatus transStatus = remoteSlaveServer.getTransStatus( transName, carteObjectId, 0 );
if ( transStatus.isRunning() ) {
if ( log.isDetailed() ) {
log.logDetailed( transName, "Remote transformation is still running." );
}
allFinished = false;
} else {
if ( log.isDetailed() ) {
log.logDetailed( transName, "Remote transformation has finished." );
}
}
Result result = transStatus.getResult();
errors += result.getNrErrors();
} catch ( Exception e ) {
errors += 1;
log.logError( transName, "Unable to contact remote slave server '"
+ remoteSlaveServer.getName() + "' to check transformation status : " + e.toString() );
}
}
//
// Keep waiting until all transformations have finished
// If needed, we stop them again and again until they yield.
//
if ( !allFinished ) {
// Not finished or error: wait a bit longer
if ( log.isDetailed() ) {
log.logDetailed( transName, "The remote transformation is still running, waiting a few seconds..." );
}
try {
Thread.sleep( sleepTimeSeconds * 1000 );
} catch ( Exception e ) {
// Ignore errors
} // Check all slaves every x seconds.
}
}
log.logMinimal( transName, "The remote transformation has finished." );
// Clean up the remote transformation
//
try {
WebResult webResult = remoteSlaveServer.cleanupTransformation( transName, carteObjectId );
if ( !WebResult.STRING_OK.equals( webResult.getResult() ) ) {
log.logError( transName, "Unable to run clean-up on remote transformation '"
+ transName + "' : " + webResult.getMessage() );
errors += 1;
}
} catch ( Exception e ) {
errors += 1;
log.logError( transName, "Unable to contact slave server '"
+ remoteSlaveServer.getName() + "' to clean up transformation : " + e.toString() );
}
}
/**
* Adds a parameter definition to this transformation.
*
* @param key
* the name of the parameter
* @param defValue
* the default value for the parameter
* @param description
* the description of the parameter
* @throws DuplicateParamException
* the duplicate param exception
* @see org.pentaho.di.core.parameters.NamedParams#addParameterDefinition(java.lang.String, java.lang.String,
* java.lang.String)
*/
public void addParameterDefinition( String key, String defValue, String description ) throws DuplicateParamException {
namedParams.addParameterDefinition( key, defValue, description );
}
/**
* Gets the default value of the specified parameter.
*
* @param key
* the name of the parameter
* @return the default value of the parameter
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#getParameterDefault(java.lang.String)
*/
public String getParameterDefault( String key ) throws UnknownParamException {
return namedParams.getParameterDefault( key );
}
/**
* Gets the description of the specified parameter.
*
* @param key
* the name of the parameter
* @return the parameter description
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#getParameterDescription(java.lang.String)
*/
public String getParameterDescription( String key ) throws UnknownParamException {
return namedParams.getParameterDescription( key );
}
/**
* Gets the value of the specified parameter.
*
* @param key
* the name of the parameter
* @return the parameter value
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#getParameterValue(java.lang.String)
*/
public String getParameterValue( String key ) throws UnknownParamException {
return namedParams.getParameterValue( key );
}
/**
* Gets a list of the parameters for the transformation.
*
* @return an array of strings containing the names of all parameters for the transformation
* @see org.pentaho.di.core.parameters.NamedParams#listParameters()
*/
public String[] listParameters() {
return namedParams.listParameters();
}
/**
* Sets the value for the specified parameter.
*
* @param key
* the name of the parameter
* @param value
* the name of the value
* @throws UnknownParamException
* if the parameter does not exist
* @see org.pentaho.di.core.parameters.NamedParams#setParameterValue(java.lang.String, java.lang.String)
*/
public void setParameterValue( String key, String value ) throws UnknownParamException {
namedParams.setParameterValue( key, value );
}
/**
* Remove all parameters.
*
* @see org.pentaho.di.core.parameters.NamedParams#eraseParameters()
*/
public void eraseParameters() {
namedParams.eraseParameters();
}
/**
* Clear the values of all parameters.
*
* @see org.pentaho.di.core.parameters.NamedParams#clearParameters()
*/
public void clearParameters() {
namedParams.clearParameters();
}
/**
* Activates all parameters by setting their values. If no values already exist, the method will attempt to set the
* parameter to the default value. If no default value exists, the method will set the value of the parameter to the
* empty string ("").
*
* @see org.pentaho.di.core.parameters.NamedParams#activateParameters()
*/
public void activateParameters() {
String[] keys = listParameters();
for ( String key : keys ) {
String value;
try {
value = getParameterValue( key );
} catch ( UnknownParamException e ) {
value = "";
}
String defValue;
try {
defValue = getParameterDefault( key );
} catch ( UnknownParamException e ) {
defValue = "";
}
if ( Const.isEmpty( value ) ) {
setVariable( key, Const.NVL( defValue, "" ) );
} else {
setVariable( key, Const.NVL( value, "" ) );
}
}
}
/**
* Copy parameters from a NamedParams object.
*
* @param params
* the NamedParams object from which to copy the parameters
* @see org.pentaho.di.core.parameters.NamedParams#copyParametersFrom(org.pentaho.di.core.parameters.NamedParams)
*/
public void copyParametersFrom( NamedParams params ) {
namedParams.copyParametersFrom( params );
}
/**
* Gets the parent transformation, which is null if no parent transformation exists.
*
* @return a reference to the parent transformation's Trans object, or null if no parent transformation exists
*/
public Trans getParentTrans() {
return parentTrans;
}
/**
* Sets the parent transformation.
*
* @param parentTrans
* the parentTrans to set
*/
public void setParentTrans( Trans parentTrans ) {
this.logLevel = parentTrans.getLogLevel();
this.log.setLogLevel( logLevel );
this.parentTrans = parentTrans;
transactionId = calculateTransactionId();
}
/**
* Gets the mapping step name.
*
* @return the name of the mapping step that created this transformation
*/
public String getMappingStepName() {
return mappingStepName;
}
/**
* Sets the mapping step name.
*
* @param mappingStepName
* the name of the mapping step that created this transformation
*/
public void setMappingStepName( String mappingStepName ) {
this.mappingStepName = mappingStepName;
}
/**
* Sets the socket repository.
*
* @param socketRepository
* the new socket repository
*/
public void setSocketRepository( SocketRepository socketRepository ) {
this.socketRepository = socketRepository;
}
/**
* Gets the socket repository.
*
* @return the socket repository
*/
public SocketRepository getSocketRepository() {
return socketRepository;
}
/**
* Gets the object name.
*
* @return the object name
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectName()
*/
public String getObjectName() {
return getName();
}
/**
* Gets the object copy. For Trans, this always returns null
*
* @return null
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectCopy()
*/
public String getObjectCopy() {
return null;
}
/**
* Gets the filename of the transformation, or null if no filename exists
*
* @return the filename
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getFilename()
*/
public String getFilename() {
if ( transMeta == null ) {
return null;
}
return transMeta.getFilename();
}
/**
* Gets the log channel ID.
*
* @return the log channel ID
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogChannelId()
*/
public String getLogChannelId() {
return log.getLogChannelId();
}
/**
* Gets the object ID.
*
* @return the object ID
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectId()
*/
public ObjectId getObjectId() {
if ( transMeta == null ) {
return null;
}
return transMeta.getObjectId();
}
/**
* Gets the object revision.
*
* @return the object revision
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectRevision()
*/
public ObjectRevision getObjectRevision() {
if ( transMeta == null ) {
return null;
}
return transMeta.getObjectRevision();
}
/**
* Gets the object type. For Trans, this always returns LoggingObjectType.TRANS
*
* @return the object type
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getObjectType()
*/
public LoggingObjectType getObjectType() {
return LoggingObjectType.TRANS;
}
/**
* Gets the parent logging object interface.
*
* @return the parent
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getParent()
*/
public LoggingObjectInterface getParent() {
return parent;
}
/**
* Gets the repository directory.
*
* @return the repository directory
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getRepositoryDirectory()
*/
public RepositoryDirectoryInterface getRepositoryDirectory() {
if ( transMeta == null ) {
return null;
}
return transMeta.getRepositoryDirectory();
}
/**
* Gets the log level.
*
* @return the log level
* @see org.pentaho.di.core.logging.LoggingObjectInterface#getLogLevel()
*/
public LogLevel getLogLevel() {
return logLevel;
}
/**
* Sets the log level.
*
* @param logLevel
* the new log level
*/
public void setLogLevel( LogLevel logLevel ) {
this.logLevel = logLevel;
log.setLogLevel( logLevel );
}
/**
* Gets the logging hierarchy.
*
* @return the logging hierarchy
*/
public List<LoggingHierarchy> getLoggingHierarchy() {
List<LoggingHierarchy> hierarchy = new ArrayList<LoggingHierarchy>();
List<String> childIds = LoggingRegistry.getInstance().getLogChannelChildren( getLogChannelId() );
for ( String childId : childIds ) {
LoggingObjectInterface loggingObject = LoggingRegistry.getInstance().getLoggingObject( childId );
if ( loggingObject != null ) {
hierarchy.add( new LoggingHierarchy( getLogChannelId(), batchId, loggingObject ) );
}
}
return hierarchy;
}
/**
* Gets the active sub-transformations.
*
* @return a map (by name) of the active sub-transformations
*/
public Map<String, Trans> getActiveSubtransformations() {
return activeSubtransformations;
}
/**
* Gets the active sub-jobs.
*
* @return a map (by name) of the active sub-jobs
*/
public Map<String, Job> getActiveSubjobs() {
return activeSubjobs;
}
/**
* Gets the container object ID.
*
* @return the Carte object ID
*/
public String getContainerObjectId() {
return containerObjectId;
}
/**
* Sets the container object ID.
*
* @param containerObjectId
* the Carte object ID to set
*/
public void setContainerObjectId( String containerObjectId ) {
this.containerObjectId = containerObjectId;
}
/**
* Gets the registration date. For Trans, this always returns null
*
* @return null
*/
public Date getRegistrationDate() {
return null;
}
/**
* Sets the servlet print writer.
*
* @param servletPrintWriter
* the new servlet print writer
*/
public void setServletPrintWriter( PrintWriter servletPrintWriter ) {
this.servletPrintWriter = servletPrintWriter;
}
/**
* Gets the servlet print writer.
*
* @return the servlet print writer
*/
public PrintWriter getServletPrintWriter() {
return servletPrintWriter;
}
/**
* Gets the name of the executing server.
*
* @return the executingServer
*/
public String getExecutingServer() {
return executingServer;
}
/**
* Sets the name of the executing server.
*
* @param executingServer
* the executingServer to set
*/
public void setExecutingServer( String executingServer ) {
this.executingServer = executingServer;
}
/**
* Gets the name of the executing user.
*
* @return the executingUser
*/
public String getExecutingUser() {
return executingUser;
}
/**
* Sets the name of the executing user.
*
* @param executingUser
* the executingUser to set
*/
public void setExecutingUser( String executingUser ) {
this.executingUser = executingUser;
}
@Override
public boolean isGatheringMetrics() {
return log != null && log.isGatheringMetrics();
}
@Override
public void setGatheringMetrics( boolean gatheringMetrics ) {
if ( log != null ) {
log.setGatheringMetrics( gatheringMetrics );
}
}
@Override
public boolean isForcingSeparateLogging() {
return log != null && log.isForcingSeparateLogging();
}
@Override
public void setForcingSeparateLogging( boolean forcingSeparateLogging ) {
if ( log != null ) {
log.setForcingSeparateLogging( forcingSeparateLogging );
}
}
public List<ResultFile> getResultFiles() {
return resultFiles;
}
public void setResultFiles( List<ResultFile> resultFiles ) {
this.resultFiles = resultFiles;
}
public List<RowMetaAndData> getResultRows() {
return resultRows;
}
public void setResultRows( List<RowMetaAndData> resultRows ) {
this.resultRows = resultRows;
}
public Result getPreviousResult() {
return previousResult;
}
public void setPreviousResult( Result previousResult ) {
this.previousResult = previousResult;
}
public Hashtable<String, Counter> getCounters() {
return counters;
}
public void setCounters( Hashtable<String, Counter> counters ) {
this.counters = counters;
}
public String[] getArguments() {
return arguments;
}
public void setArguments( String[] arguments ) {
this.arguments = arguments;
}
/**
* Clear the error in the transformation, clear all the rows from all the row sets, to make sure the transformation
* can continue with other data. This is intended for use when running single threaded.
*/
public void clearError() {
stopped.set( false );
errors.set( 0 );
setFinished( false );
for ( StepMetaDataCombi combi : steps ) {
StepInterface step = combi.step;
for ( RowSet rowSet : step.getInputRowSets() ) {
rowSet.clear();
}
step.setStopped( false );
}
}
/**
* Gets the transaction ID for the transformation.
*
* @return the transactionId
*/
public String getTransactionId() {
return transactionId;
}
/**
* Sets the transaction ID for the transformation.
*
* @param transactionId
* the transactionId to set
*/
public void setTransactionId( String transactionId ) {
this.transactionId = transactionId;
}
/**
* Calculates the transaction ID for the transformation.
*
* @return the calculated transaction ID for the transformation.
*/
public String calculateTransactionId() {
if ( getTransMeta() != null && getTransMeta().isUsingUniqueConnections() ) {
if ( parentJob != null && parentJob.getTransactionId() != null ) {
return parentJob.getTransactionId();
} else if ( parentTrans != null && parentTrans.getTransMeta().isUsingUniqueConnections() ) {
return parentTrans.getTransactionId();
} else {
return DatabaseConnectionMap.getInstance().getNextTransactionId();
}
} else {
return Thread.currentThread().getName();
}
}
public IMetaStore getMetaStore() {
return metaStore;
}
public void setMetaStore( IMetaStore metaStore ) {
this.metaStore = metaStore;
if ( transMeta != null ) {
transMeta.setMetaStore( metaStore );
}
}
/**
* Sets encoding of HttpServletResponse according to System encoding.Check if system encoding is null or an empty and
* set it to HttpServletResponse when not and writes error to log if null. Throw IllegalArgumentException if input
* parameter is null.
*
* @param response
* the HttpServletResponse to set encoding, mayn't be null
*/
public void setServletReponse( HttpServletResponse response ) {
if ( response == null ) {
throw new IllegalArgumentException( "Response is not valid: " + response );
}
String encoding = System.getProperty( "KETTLE_DEFAULT_SERVLET_ENCODING", null );
// true if encoding is null or an empty (also for the next kin of strings: " ")
if ( !StringUtils.isBlank( encoding ) ) {
try {
response.setCharacterEncoding( encoding.trim() );
response.setContentType( "text/html; charset=" + encoding );
} catch ( Exception ex ) {
LogChannel.GENERAL.logError( "Unable to encode data with encoding : '" + encoding + "'", ex );
}
}
this.servletResponse = response;
}
public HttpServletResponse getServletResponse() {
return servletResponse;
}
public void setServletRequest( HttpServletRequest request ) {
this.servletRequest = request;
}
public HttpServletRequest getServletRequest() {
return servletRequest;
}
public List<DelegationListener> getDelegationListeners() {
return delegationListeners;
}
public void setDelegationListeners( List<DelegationListener> delegationListeners ) {
this.delegationListeners = delegationListeners;
}
public void addDelegationListener( DelegationListener delegationListener ) {
delegationListeners.add( delegationListener );
}
public synchronized void doTopologySortOfSteps() {
// The bubble sort algorithm in contrast to the QuickSort or MergeSort
// algorithms
// does indeed cover all possibilities.
// Sorting larger transformations with hundreds of steps might be too slow
// though.
// We should consider caching TransMeta.findPrevious() results in that case.
//
transMeta.clearCaches();
//
// Cocktail sort (bi-directional bubble sort)
//
// Original sort was taking 3ms for 30 steps
// cocktail sort takes about 8ms for the same 30, but it works :)
//
int stepsMinSize = 0;
int stepsSize = steps.size();
// Noticed a problem with an immediate shrinking iteration window
// trapping rows that need to be sorted.
// This threshold buys us some time to get the sorting close before
// starting to decrease the window size.
//
// TODO: this could become much smarter by tracking row movement
// and reacting to that each outer iteration verses
// using a threshold.
//
// After this many iterations enable trimming inner iteration
// window on no change being detected.
//
int windowShrinkThreshold = (int) Math.round( stepsSize * 0.75 );
// give ourselves some room to sort big lists. the window threshold should
// stop us before reaching this anyway.
//
int totalIterations = stepsSize * 2;
boolean isBefore = false;
boolean forwardChange = false;
boolean backwardChange = false;
boolean lastForwardChange = true;
boolean keepSortingForward = true;
StepMetaDataCombi one = null;
StepMetaDataCombi two = null;
for ( int x = 0; x < totalIterations; x++ ) {
// Go forward through the list
//
if ( keepSortingForward ) {
for ( int y = stepsMinSize; y < stepsSize - 1; y++ ) {
one = steps.get( y );
two = steps.get( y + 1 );
if ( one.stepMeta.equals( two.stepMeta ) ) {
isBefore = one.copy > two.copy;
} else {
isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta );
}
if ( isBefore ) {
// two was found to be positioned BEFORE one so we need to
// switch them...
//
steps.set( y, two );
steps.set( y + 1, one );
forwardChange = true;
}
}
}
// Go backward through the list
//
for ( int z = stepsSize - 1; z > stepsMinSize; z-- ) {
one = steps.get( z );
two = steps.get( z - 1 );
if ( one.stepMeta.equals( two.stepMeta ) ) {
isBefore = one.copy > two.copy;
} else {
isBefore = transMeta.findPrevious( one.stepMeta, two.stepMeta );
}
if ( !isBefore ) {
// two was found NOT to be positioned BEFORE one so we need to
// switch them...
//
steps.set( z, two );
steps.set( z - 1, one );
backwardChange = true;
}
}
// Shrink stepsSize(max) if there was no forward change
//
if ( x > windowShrinkThreshold && !forwardChange ) {
// should we keep going? check the window size
//
stepsSize--;
if ( stepsSize <= stepsMinSize ) {
break;
}
}
// shrink stepsMinSize(min) if there was no backward change
//
if ( x > windowShrinkThreshold && !backwardChange ) {
// should we keep going? check the window size
//
stepsMinSize++;
if ( stepsMinSize >= stepsSize ) {
break;
}
}
// End of both forward and backward traversal.
// Time to see if we should keep going.
//
if ( !forwardChange && !backwardChange ) {
break;
}
//
// if we are past the first iteration and there has been no change twice,
// quit doing it!
//
if ( keepSortingForward && x > 0 && !lastForwardChange && !forwardChange ) {
keepSortingForward = false;
}
lastForwardChange = forwardChange;
forwardChange = false;
backwardChange = false;
} // finished sorting
}
@Override
public Map<String, Object> getExtensionDataMap() {
return extensionDataMap;
}
}
| MikhailHubanau/pentaho-kettle | engine/src/org/pentaho/di/trans/Trans.java | Java | apache-2.0 | 191,825 |
// Copyright 2007-2016 Chris Patterson, Dru Sellers, Travis Smith, et. al.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
namespace MassTransit.RabbitMqTransport.Tests
{
using System.Threading;
using System.Threading.Tasks;
using NUnit.Framework;
[TestFixture]
public class Using_a_consumer_concurrency_limit :
RabbitMqTestFixture
{
[Test]
public async Task Should_limit_the_consumer()
{
_complete = GetTask<bool>();
for (var i = 0; i < _messageCount; i++)
{
Bus.Publish(new A());
}
await _complete.Task;
Assert.AreEqual(1, _consumer.MaxDeliveryCount);
}
Consumer _consumer;
static int _messageCount = 100;
static TaskCompletionSource<bool> _complete;
protected override void ConfigureInputQueueEndpoint(IRabbitMqReceiveEndpointConfigurator configurator)
{
base.ConfigureInputQueueEndpoint(configurator);
_consumer = new Consumer();
configurator.Consumer(() => _consumer, x => x.UseConcurrencyLimit(1));
}
class Consumer :
IConsumer<A>
{
int _currentPendingDeliveryCount;
long _deliveryCount;
int _maxPendingDeliveryCount;
public int MaxDeliveryCount
{
get { return _maxPendingDeliveryCount; }
}
public async Task Consume(ConsumeContext<A> context)
{
Interlocked.Increment(ref _deliveryCount);
var current = Interlocked.Increment(ref _currentPendingDeliveryCount);
while (current > _maxPendingDeliveryCount)
Interlocked.CompareExchange(ref _maxPendingDeliveryCount, current, _maxPendingDeliveryCount);
await Task.Delay(100);
Interlocked.Decrement(ref _currentPendingDeliveryCount);
if (_deliveryCount == _messageCount)
_complete.TrySetResult(true);
}
}
class A
{
}
}
} | jsmale/MassTransit | src/MassTransit.RabbitMqTransport.Tests/ConcurrencyFilter_Specs.cs | C# | apache-2.0 | 2,753 |
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
eventlet.monkey_patch()
import contextlib
import sys
from oslo.config import cfg
from keystone.openstack.common import log as logging
from keystone.openstack.common import rpc
from keystone.openstack.common.rpc import impl_zmq
CONF = cfg.CONF
CONF.register_opts(rpc.rpc_opts)
CONF.register_opts(impl_zmq.zmq_opts)
def main():
CONF(sys.argv[1:], project='oslo')
logging.setup("oslo")
with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
reactor.consume_in_thread()
reactor.wait()
| derekchiang/keystone | keystone/openstack/common/rpc/zmq_receiver.py | Python | apache-2.0 | 1,154 |
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
package org.deeplearning4j.arbiter.dropout;
import lombok.AllArgsConstructor;
import org.deeplearning4j.arbiter.optimize.api.ParameterSpace;
import org.deeplearning4j.arbiter.optimize.parameter.FixedValue;
import org.deeplearning4j.nn.conf.dropout.GaussianNoise;
import org.deeplearning4j.nn.conf.dropout.IDropout;
import java.util.Collections;
import java.util.List;
import java.util.Map;
@AllArgsConstructor
public class GaussianNoiseSpace implements ParameterSpace<IDropout> {
private ParameterSpace<Double> stddev;
public GaussianNoiseSpace(double stddev){
this(new FixedValue<>(stddev));
}
@Override
public IDropout getValue(double[] parameterValues) {
return new GaussianNoise(stddev.getValue(parameterValues));
}
@Override
public int numParameters() {
return stddev.numParameters();
}
@Override
public List<ParameterSpace> collectLeaves() {
return Collections.<ParameterSpace>singletonList(stddev);
}
@Override
public Map<String, ParameterSpace> getNestedSpaces() {
return Collections.<String,ParameterSpace>singletonMap("stddev", stddev);
}
@Override
public boolean isLeaf() {
return false;
}
@Override
public void setIndices(int... indices) {
stddev.setIndices(indices);
}
}
| deeplearning4j/deeplearning4j | arbiter/arbiter-deeplearning4j/src/main/java/org/deeplearning4j/arbiter/dropout/GaussianNoiseSpace.java | Java | apache-2.0 | 2,098 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.commons.compress.utils;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.ServiceConfigurationError;
import java.util.ServiceLoader;
/**
* Iterates all services for a given class through the standard
* {@link ServiceLoader} mechanism.
*
* @param <E>
* The service to load
* @since 1.13
*/
public class ServiceLoaderIterator<E> implements Iterator<E> {
private E nextServiceLoader;
private final Class<E> service;
private final Iterator<E> serviceLoaderIterator;
public ServiceLoaderIterator(final Class<E> service) {
this(service, ClassLoader.getSystemClassLoader());
}
public ServiceLoaderIterator(final Class<E> service, final ClassLoader classLoader) {
this.service = service;
this.serviceLoaderIterator = ServiceLoader.load(service, classLoader).iterator();
}
@Override
public boolean hasNext() {
while (nextServiceLoader == null) {
try {
if (!serviceLoaderIterator.hasNext()) {
return false;
}
nextServiceLoader = serviceLoaderIterator.next();
} catch (final ServiceConfigurationError e) {
if (e.getCause() instanceof SecurityException) {
// Ignore security exceptions
// TODO Log?
continue;
}
throw e;
}
}
return true;
}
@Override
public E next() {
if (!hasNext()) {
throw new NoSuchElementException("No more elements for service " + service.getName());
}
final E tempNext = nextServiceLoader;
nextServiceLoader = null;
return tempNext;
}
@Override
public void remove() {
throw new UnsupportedOperationException("service=" + service.getName());
}
}
| apache/commons-compress | src/main/java/org/apache/commons/compress/utils/ServiceLoaderIterator.java | Java | apache-2.0 | 2,727 |
package answers.chapter3;
import java.util.stream.IntStream;
public class ForEach06 {
public ForEach06() {
// for文で記述
printEvens();
// IntStream#rangeメソッドでストリームを生成して、使用
printEvensStream1();
// filterメソッドを追加
printEvensStream2();
// メソッド参照を使用
printEvensStream3();
}
private void printEvens() {
for (int i = 0; i < 20; i++) {
if (i % 2 == 0) {
System.out.println(i);
}
}
}
private void printEvensStream1() {
IntStream.range(0, 20)
.forEach(i -> {
if (i % 2 == 0) {
System.out.println(i);
}
});
}
private void printEvensStream2() {
IntStream.range(0, 20)
.filter(i -> i % 2 == 0)
.forEach(i -> System.out.println(i));
}
private void printEvensStream3() {
IntStream.range(0, 20)
.filter(i -> i % 2 == 0)
.forEach(System.out::println);
}
public static void main(String... args) {
new ForEach06();
}
}
| gobjapan/LambdaOkeiko | src/answers/chapter3/ForEach06.java | Java | apache-2.0 | 1,304 |
# frozen_string_literal: true
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require_relative 'spec_helper'
module Selenium
module WebDriver
describe Element do
it 'should click' do
driver.navigate.to url_for('formPage.html')
expect { driver.find_element(id: 'imageButton').click }.not_to raise_error
reset_driver!(1) if %i[safari safari_preview].include? GlobalTestEnv.browser
end
# Safari returns "click intercepted" error instead of "element click intercepted"
it 'should raise if different element receives click', except: {browser: %i[safari safari_preview]} do
driver.navigate.to url_for('click_tests/overlapping_elements.html')
expect { driver.find_element(id: 'contents').click }.to raise_error(Error::ElementClickInterceptedError)
end
# Safari returns "click intercepted" error instead of "element click intercepted"
it 'should raise if element is partially covered', except: {browser: %i[safari safari_preview]} do
driver.navigate.to url_for('click_tests/overlapping_elements.html')
expect { driver.find_element(id: 'other_contents').click }.to raise_error(Error::ElementClickInterceptedError)
end
it 'should submit' do
driver.navigate.to url_for('formPage.html')
wait_for_element(id: 'submitButton')
expect { driver.find_element(id: 'submitButton').submit }.not_to raise_error
reset_driver!
end
it 'should send string keys' do
driver.navigate.to url_for('formPage.html')
wait_for_element(id: 'working')
expect { driver.find_element(id: 'working').send_keys('foo', 'bar') }.not_to raise_error
end
it 'should send key presses' do
driver.navigate.to url_for('javascriptPage.html')
key_reporter = driver.find_element(id: 'keyReporter')
key_reporter.send_keys('Tet', :arrow_left, 's')
expect(key_reporter.attribute('value')).to eq('Test')
end
# https://github.com/mozilla/geckodriver/issues/245
it 'should send key presses chords', except: {browser: %i[firefox firefox_nightly safari safari_preview]} do
driver.navigate.to url_for('javascriptPage.html')
key_reporter = driver.find_element(id: 'keyReporter')
key_reporter.send_keys([:shift, 'h'], 'ello')
expect(key_reporter.attribute('value')).to eq('Hello')
end
it 'should handle file uploads' do
driver.navigate.to url_for('formPage.html')
element = driver.find_element(id: 'upload')
expect(element.attribute('value')).to be_empty
path = WebDriver::Platform.windows? ? WebDriver::Platform.windows_path(__FILE__) : __FILE__
element.send_keys path
expect(element.attribute('value')).to include(File.basename(path))
end
describe 'properties and attributes' do
before { driver.navigate.to url_for('formPage.html') }
context 'string type' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'type' }
it '#dom_attribute returns attribute value' do
expect(element.dom_attribute(prop_or_attr)).to eq 'checkbox'
end
it '#property returns property value' do
expect(element.property(prop_or_attr)).to eq 'checkbox'
end
it '#attribute returns value' do
expect(element.attribute(prop_or_attr)).to eq 'checkbox'
end
end
context 'numeric type' do
let(:element) { driver.find_element(id: 'withText') }
let(:prop_or_attr) { 'rows' }
it '#dom_attribute String' do
expect(element.dom_attribute(prop_or_attr)).to eq '5'
end
it '#property returns Number' do
expect(element.property(prop_or_attr)).to eq 5
end
it '#attribute returns String' do
expect(element.attribute(prop_or_attr)).to eq '5'
end
end
context 'boolean type of true' do
let(:element) { driver.find_element(id: 'checkedchecky') }
let(:prop_or_attr) { 'checked' }
it '#dom_attribute returns String', except: {browser: :safari} do
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property returns true' do
expect(element.property(prop_or_attr)).to eq true
end
it '#attribute returns String' do
expect(element.attribute(prop_or_attr)).to eq 'true'
end
it '#dom_attribute does not update after click', except: {browser: :safari} do
element.click
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property updates to false after click' do
element.click
expect(element.property(prop_or_attr)).to eq false
end
it '#attribute updates to nil after click' do
element.click
expect(element.attribute(prop_or_attr)).to eq nil
end
end
context 'boolean type of false' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'checked' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns false' do
expect(element.property(prop_or_attr)).to eq false
end
it '#attribute returns nil' do
expect(element.attribute(prop_or_attr)).to be_nil
end
it '#dom_attribute does not update after click' do
element.click
expect(element.dom_attribute(prop_or_attr)).to eq nil
end
it '#property updates to true after click' do
element.click
expect(element.property(prop_or_attr)).to eq true
end
it '#attribute updates to String after click' do
element.click
expect(element.attribute(prop_or_attr)).to eq 'true'
end
end
context 'property exists but attribute does not' do
let(:element) { driver.find_element(id: 'withText') }
let(:prop_or_attr) { 'value' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns default property' do
expect(element.property(prop_or_attr)).to eq 'Example text'
end
it '#attribute returns default property' do
expect(element.attribute(prop_or_attr)).to eq 'Example text'
end
it '#property returns updated property' do
element.clear
expect(element.property(prop_or_attr)).to be_empty
end
it '#attribute returns updated property' do
element.clear
expect(element.attribute(prop_or_attr)).to be_empty
end
end
context 'attribute exists but property does not' do
let(:element) { driver.find_element(id: 'vsearchGadget') }
let(:prop_or_attr) { 'accesskey' }
it '#dom_attribute returns attribute' do
expect(element.dom_attribute(prop_or_attr)).to eq '4'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns attribute' do
expect(element.attribute(prop_or_attr)).to eq '4'
end
end
context 'neither attribute nor property exists' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'nonexistent' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns nil' do
expect(element.attribute(prop_or_attr)).to be_nil
end
end
context 'style' do
before { driver.navigate.to url_for('clickEventPage.html') }
let(:element) { driver.find_element(id: 'result') }
let(:prop_or_attr) { 'style' }
it '#dom_attribute attribute with no formatting' do
expect(element.dom_attribute(prop_or_attr)).to eq 'width:300;height:60'
end
# TODO: This might not be correct behavior
it '#property returns object',
except: [{browser: :firefox,
reason: 'https://github.com/mozilla/geckodriver/issues/1846'},
{browser: :safari}] do
expect(element.property(prop_or_attr)).to eq %w[width height]
end
it '#attribute returns attribute with formatting' do
expect(element.attribute(prop_or_attr)).to eq 'width: 300px; height: 60px;'
end
end
context 'incorrect casing' do
let(:element) { driver.find_element(id: 'checky') }
let(:prop_or_attr) { 'nAme' }
it '#dom_attribute returns correctly cased attribute' do
expect(element.dom_attribute(prop_or_attr)).to eq 'checky'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns correctly cased attribute' do
expect(element.attribute(prop_or_attr)).to eq 'checky'
end
end
context 'property attribute case difference with attribute casing' do
let(:element) { driver.find_element(name: 'readonly') }
let(:prop_or_attr) { 'readonly' }
it '#dom_attribute returns a String', except: {browser: :safari} do
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns a String' do
expect(element.attribute(prop_or_attr)).to eq 'true'
end
end
context 'property attribute case difference with property casing' do
let(:element) { driver.find_element(name: 'readonly') }
let(:prop_or_attr) { 'readOnly' }
it '#dom_attribute returns a String',
except: [{browser: :firefox,
reason: 'https://github.com/mozilla/geckodriver/issues/1850'},
{browser: :safari}] do
expect(element.dom_attribute(prop_or_attr)).to eq 'true'
end
it '#property returns property as true' do
expect(element.property(prop_or_attr)).to eq true
end
it '#attribute returns property as String' do
expect(element.attribute(prop_or_attr)).to eq 'true'
end
end
context 'property attribute name difference with attribute naming' do
let(:element) { driver.find_element(id: 'wallace') }
let(:prop_or_attr) { 'class' }
it '#dom_attribute returns attribute value' do
expect(element.dom_attribute(prop_or_attr)).to eq 'gromit'
end
it '#property returns nil' do
expect(element.property(prop_or_attr)).to be_nil
end
it '#attribute returns attribute value' do
expect(element.attribute(prop_or_attr)).to eq 'gromit'
end
end
context 'property attribute name difference with property naming' do
let(:element) { driver.find_element(id: 'wallace') }
let(:prop_or_attr) { 'className' }
it '#dom_attribute returns nil' do
expect(element.dom_attribute(prop_or_attr)).to be_nil
end
it '#property returns property value' do
expect(element.property(prop_or_attr)).to eq 'gromit'
end
it '#attribute returns property value' do
expect(element.attribute(prop_or_attr)).to eq 'gromit'
end
end
context 'property attribute value difference' do
let(:element) { driver.find_element(tag_name: 'form') }
let(:prop_or_attr) { 'action' }
it '#dom_attribute returns attribute value' do
expect(element.dom_attribute(prop_or_attr)).to eq 'resultPage.html'
end
it '#property returns property value' do
expect(element.property(prop_or_attr)).to match(%r{http://(.+)/resultPage\.html})
end
it '#attribute returns property value' do
expect(element.attribute(prop_or_attr)).to match(%r{http://(.+)/resultPage\.html})
end
end
end
it 'returns ARIA role', only: {browser: %i[chrome edge]} do
driver.navigate.to "data:text/html," \
"<div role='heading' aria-level='1'>Level 1 Header</div>" \
"<h1>Level 1 Header</h1>" \
"<h2 role='alert'>Level 2 Header</h2>"
expect(driver.find_element(tag_name: 'div').aria_role).to eq('heading')
expect(driver.find_element(tag_name: 'h1').aria_role).to eq('heading')
expect(driver.find_element(tag_name: 'h2').aria_role).to eq('alert')
end
it 'returns accessible name', only: {browser: %i[chrome edge]} do
driver.navigate.to "data:text/html,<h1>Level 1 Header</h1>"
expect(driver.find_element(tag_name: 'h1').accessible_name).to eq('Level 1 Header')
end
it 'should clear' do
driver.navigate.to url_for('formPage.html')
expect { driver.find_element(id: 'withText').clear }.not_to raise_error
end
it 'should get and set selected' do
driver.navigate.to url_for('formPage.html')
cheese = driver.find_element(id: 'cheese')
peas = driver.find_element(id: 'peas')
cheese.click
expect(cheese).to be_selected
expect(peas).not_to be_selected
peas.click
expect(peas).to be_selected
expect(cheese).not_to be_selected
end
it 'should get enabled' do
driver.navigate.to url_for('formPage.html')
expect(driver.find_element(id: 'notWorking')).not_to be_enabled
end
it 'should get text' do
driver.navigate.to url_for('xhtmlTest.html')
expect(driver.find_element(class: 'header').text).to eq('XHTML Might Be The Future')
end
it 'should get displayed' do
driver.navigate.to url_for('xhtmlTest.html')
expect(driver.find_element(class: 'header')).to be_displayed
end
context 'size and location' do
it 'should get current location' do
driver.navigate.to url_for('xhtmlTest.html')
loc = driver.find_element(class: 'header').location
expect(loc.x).to be >= 1
expect(loc.y).to be >= 1
end
it 'should get location once scrolled into view' do
driver.navigate.to url_for('javascriptPage.html')
loc = driver.find_element(id: 'keyUp').location_once_scrolled_into_view
expect(loc.x).to be >= 1
expect(loc.y).to be >= 0 # can be 0 if scrolled to the top
end
it 'should get size' do
driver.navigate.to url_for('xhtmlTest.html')
size = driver.find_element(class: 'header').size
expect(size.width).to be_positive
expect(size.height).to be_positive
end
it 'should get rect' do
driver.navigate.to url_for('xhtmlTest.html')
rect = driver.find_element(class: 'header').rect
expect(rect.x).to be_positive
expect(rect.y).to be_positive
expect(rect.width).to be_positive
expect(rect.height).to be_positive
end
end
# IE - https://github.com/SeleniumHQ/selenium/pull/4043
it 'should drag and drop', except: {browser: :ie} do
driver.navigate.to url_for('dragAndDropTest.html')
img1 = driver.find_element(id: 'test1')
img2 = driver.find_element(id: 'test2')
driver.action.drag_and_drop_by(img1, 100, 100)
.drag_and_drop(img2, img1)
.perform
expect(img1.location).to eq(img2.location)
end
it 'should get css property' do
driver.navigate.to url_for('javascriptPage.html')
element = driver.find_element(id: 'green-parent')
style1 = element.css_value('background-color')
style2 = element.style('background-color') # backwards compatibility
acceptable = ['rgb(0, 128, 0)', '#008000', 'rgba(0,128,0,1)', 'rgba(0, 128, 0, 1)']
expect(acceptable).to include(style1, style2)
end
it 'should know when two elements are equal' do
driver.navigate.to url_for('simpleTest.html')
body = driver.find_element(tag_name: 'body')
xbody = driver.find_element(xpath: '//body')
jsbody = driver.execute_script('return document.getElementsByTagName("body")[0]')
expect(body).to eq(xbody)
expect(body).to eq(jsbody)
expect(body).to eql(xbody)
expect(body).to eql(jsbody)
end
it 'should know when element arrays are equal' do
driver.navigate.to url_for('simpleTest.html')
tags = driver.find_elements(tag_name: 'div')
jstags = driver.execute_script('return document.getElementsByTagName("div")')
expect(tags).to eq(jstags)
end
it 'should know when two elements are not equal' do
driver.navigate.to url_for('simpleTest.html')
elements = driver.find_elements(tag_name: 'p')
p1 = elements.fetch(0)
p2 = elements.fetch(1)
expect(p1).not_to eq(p2)
expect(p1).not_to eql(p2)
end
it 'should return the same #hash for equal elements when found by Driver#find_element' do
driver.navigate.to url_for('simpleTest.html')
body = driver.find_element(tag_name: 'body')
xbody = driver.find_element(xpath: '//body')
expect(body.hash).to eq(xbody.hash)
end
it 'should return the same #hash for equal elements when found by Driver#find_elements' do
driver.navigate.to url_for('simpleTest.html')
body = driver.find_elements(tag_name: 'body').fetch(0)
xbody = driver.find_elements(xpath: '//body').fetch(0)
expect(body.hash).to eq(xbody.hash)
end
end
end # WebDriver
end # Selenium
| SeleniumHQ/selenium | rb/spec/integration/selenium/webdriver/element_spec.rb | Ruby | apache-2.0 | 19,292 |
"""The Hunter Douglas PowerView integration."""
import asyncio
from datetime import timedelta
import logging
from aiopvapi.helpers.aiorequest import AioRequest
from aiopvapi.helpers.constants import ATTR_ID
from aiopvapi.helpers.tools import base64_to_unicode
from aiopvapi.rooms import Rooms
from aiopvapi.scenes import Scenes
from aiopvapi.shades import Shades
from aiopvapi.userdata import UserData
import async_timeout
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
COORDINATOR,
DEVICE_FIRMWARE,
DEVICE_INFO,
DEVICE_MAC_ADDRESS,
DEVICE_MODEL,
DEVICE_NAME,
DEVICE_REVISION,
DEVICE_SERIAL_NUMBER,
DOMAIN,
FIRMWARE_BUILD,
FIRMWARE_IN_USERDATA,
FIRMWARE_SUB_REVISION,
HUB_EXCEPTIONS,
HUB_NAME,
LEGACY_DEVICE_BUILD,
LEGACY_DEVICE_MODEL,
LEGACY_DEVICE_REVISION,
LEGACY_DEVICE_SUB_REVISION,
MAC_ADDRESS_IN_USERDATA,
MAINPROCESSOR_IN_USERDATA_FIRMWARE,
MODEL_IN_MAINPROCESSOR,
PV_API,
PV_ROOM_DATA,
PV_SCENE_DATA,
PV_SHADE_DATA,
PV_SHADES,
REVISION_IN_MAINPROCESSOR,
ROOM_DATA,
SCENE_DATA,
SERIAL_NUMBER_IN_USERDATA,
SHADE_DATA,
USER_DATA,
)
PARALLEL_UPDATES = 1
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
PLATFORMS = ["cover", "scene", "sensor"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, hass_config: dict):
"""Set up the Hunter Douglas PowerView component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Hunter Douglas PowerView from a config entry."""
config = entry.data
hub_address = config.get(CONF_HOST)
websession = async_get_clientsession(hass)
pv_request = AioRequest(hub_address, loop=hass.loop, websession=websession)
try:
async with async_timeout.timeout(10):
device_info = await async_get_device_info(pv_request)
async with async_timeout.timeout(10):
rooms = Rooms(pv_request)
room_data = _async_map_data_by_id((await rooms.get_resources())[ROOM_DATA])
async with async_timeout.timeout(10):
scenes = Scenes(pv_request)
scene_data = _async_map_data_by_id(
(await scenes.get_resources())[SCENE_DATA]
)
async with async_timeout.timeout(10):
shades = Shades(pv_request)
shade_data = _async_map_data_by_id(
(await shades.get_resources())[SHADE_DATA]
)
except HUB_EXCEPTIONS as err:
_LOGGER.error("Connection error to PowerView hub: %s", hub_address)
raise ConfigEntryNotReady from err
if not device_info:
_LOGGER.error("Unable to initialize PowerView hub: %s", hub_address)
raise ConfigEntryNotReady
async def async_update_data():
"""Fetch data from shade endpoint."""
async with async_timeout.timeout(10):
shade_entries = await shades.get_resources()
if not shade_entries:
raise UpdateFailed("Failed to fetch new shade data.")
return _async_map_data_by_id(shade_entries[SHADE_DATA])
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="powerview hub",
update_method=async_update_data,
update_interval=timedelta(seconds=60),
)
hass.data[DOMAIN][entry.entry_id] = {
PV_API: pv_request,
PV_ROOM_DATA: room_data,
PV_SCENE_DATA: scene_data,
PV_SHADES: shades,
PV_SHADE_DATA: shade_data,
COORDINATOR: coordinator,
DEVICE_INFO: device_info,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_get_device_info(pv_request):
"""Determine device info."""
userdata = UserData(pv_request)
resources = await userdata.get_resources()
userdata_data = resources[USER_DATA]
if FIRMWARE_IN_USERDATA in userdata_data:
main_processor_info = userdata_data[FIRMWARE_IN_USERDATA][
MAINPROCESSOR_IN_USERDATA_FIRMWARE
]
else:
# Legacy devices
main_processor_info = {
REVISION_IN_MAINPROCESSOR: LEGACY_DEVICE_REVISION,
FIRMWARE_SUB_REVISION: LEGACY_DEVICE_SUB_REVISION,
FIRMWARE_BUILD: LEGACY_DEVICE_BUILD,
MODEL_IN_MAINPROCESSOR: LEGACY_DEVICE_MODEL,
}
return {
DEVICE_NAME: base64_to_unicode(userdata_data[HUB_NAME]),
DEVICE_MAC_ADDRESS: userdata_data[MAC_ADDRESS_IN_USERDATA],
DEVICE_SERIAL_NUMBER: userdata_data[SERIAL_NUMBER_IN_USERDATA],
DEVICE_REVISION: main_processor_info[REVISION_IN_MAINPROCESSOR],
DEVICE_FIRMWARE: main_processor_info,
DEVICE_MODEL: main_processor_info[MODEL_IN_MAINPROCESSOR],
}
@callback
def _async_map_data_by_id(data):
"""Return a dict with the key being the id for a list of entries."""
return {entry[ATTR_ID]: entry for entry in data}
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| turbokongen/home-assistant | homeassistant/components/hunterdouglas_powerview/__init__.py | Python | apache-2.0 | 5,873 |
var AddressInSeattleView = function (answerService) {
this.initialize = function () {
// Define a div wrapper for the view (used to attach events)
this.$el = $('<div/>');
var queryAddress = function(evt) {
var geocodeDeferred = $.Deferred();
var geocoder = new google.maps.Geocoder();
geocoder.geocode({ address: $("#employer-address").val() },function(results, status) {
if(results.length == 0) {
geocodeDeferred.reject("Error geocoding");
} else {
geocodeDeferred.resolve(results);
}
});
var loadCityLimitsDeferred = $.Deferred();
$.ajax({
dataType: "json",
url: "data/city-limits.json",
success: function(cityLimits) {
loadCityLimitsDeferred.resolve(cityLimits);
},
error: function(response, status, errorThrown) {
loadCityLimitsDeferred.reject("Error loading city limits");
}
});
var onGeocodeAndLoad = function(results, cityLimits) {
var ww = Wherewolf();
ww.add("Seattle", cityLimits);
var lngLat, inSeattle;
//For each geocoder result
for (var i = 0; i < results.length; i++) {
lngLat = {
lng: results[0].geometry.location.lng(),
lat: results[0].geometry.location.lat()
};
inSeattle = ww.find(lngLat,{
layer:"Seattle",
wholeFeature: true
});
//If it's a match, stop
if (inSeattle) {
answerService.saveAnswer("work-seattle","yes");
var resultDiv = $(this.$el.find(".result")).html("In Seattle");
var continueButton = $(this.$el.find("a.btn"));
continueButton.attr("href","#question/number-employees");
continueButton.removeClass("hidden");
return;
}
}
answerService.saveAnswer("work-seattle","no");
var resultDiv = $(this.$el.find(".result")).html("Not In Seattle");
var continueButton = $(this.$el.find("a.btn"));
continueButton.attr("href","#results");
continueButton.removeClass("hidden");
}
var onFailedGeocodeOrLoad = function(err1, err2) {
$(this.$el.find(".result")).html("Unable to Determine");
};
$.when(geocodeDeferred, loadCityLimitsDeferred).done(onGeocodeAndLoad.bind(this)).fail( onFailedGeocodeOrLoad.bind(this));
};
this.$el.on("click",".query", queryAddress.bind(this));
this.render();
};
this.render = function() {
this.$el.html(this.template());
return this;
};
this.initialize();
}
| working-wa/whats-my-wage-app | www/js/AddressInSeattleView.js | JavaScript | apache-2.0 | 2,780 |
/*
* Copyright 2014-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.facebook.buck.parser;
import static org.junit.Assert.assertEquals;
import com.facebook.buck.config.Config;
import com.facebook.buck.config.ConfigBuilder;
import com.facebook.buck.io.FakeWatchmanClient;
import com.facebook.buck.io.ProjectFilesystem;
import com.facebook.buck.io.ProjectWatch;
import com.facebook.buck.io.Watchman;
import com.facebook.buck.rules.Cell;
import com.facebook.buck.rules.TestCellBuilder;
import com.facebook.buck.testutil.FakeProjectFilesystem;
import com.facebook.buck.util.HumanReadableException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.rules.TemporaryFolder;
public class BuildFileSpecTest {
@Rule public ExpectedException thrown = ExpectedException.none();
@Rule public TemporaryFolder tmp = new TemporaryFolder();
@Test
public void recursiveVsNonRecursive() throws IOException, InterruptedException {
FakeProjectFilesystem filesystem = new FakeProjectFilesystem();
Path buildFile = Paths.get("a", "BUCK");
filesystem.mkdirs(buildFile.getParent());
filesystem.touch(buildFile);
Path nestedBuildFile = Paths.get("a", "b", "BUCK");
filesystem.mkdirs(nestedBuildFile.getParent());
filesystem.touch(nestedBuildFile);
// Test a non-recursive spec.
BuildFileSpec nonRecursiveSpec =
BuildFileSpec.fromPath(buildFile.getParent(), filesystem.getRootPath());
ImmutableSet<Path> expectedBuildFiles = ImmutableSet.of(filesystem.resolve(buildFile));
Cell cell = new TestCellBuilder().setFilesystem(filesystem).build();
ImmutableSet<Path> actualBuildFiles =
nonRecursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.FILESYSTEM_CRAWL);
assertEquals(expectedBuildFiles, actualBuildFiles);
// Test a recursive spec.
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
expectedBuildFiles =
ImmutableSet.of(filesystem.resolve(buildFile), filesystem.resolve(nestedBuildFile));
actualBuildFiles =
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.FILESYSTEM_CRAWL);
assertEquals(expectedBuildFiles, actualBuildFiles);
}
@Test
public void recursiveIgnorePaths() throws IOException, InterruptedException {
Path ignoredBuildFile = Paths.get("a", "b", "BUCK");
Config config = ConfigBuilder.createFromText("[project]", "ignore = a/b");
ProjectFilesystem filesystem = new ProjectFilesystem(tmp.getRoot().toPath(), config);
Path buildFile = Paths.get("a", "BUCK");
filesystem.mkdirs(buildFile.getParent());
filesystem.writeContentsToPath("", buildFile);
filesystem.mkdirs(ignoredBuildFile.getParent());
filesystem.writeContentsToPath("", ignoredBuildFile);
// Test a recursive spec with an ignored dir.
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
ImmutableSet<Path> expectedBuildFiles = ImmutableSet.of(filesystem.resolve(buildFile));
Cell cell = new TestCellBuilder().setFilesystem(filesystem).build();
ImmutableSet<Path> actualBuildFiles =
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.FILESYSTEM_CRAWL);
assertEquals(expectedBuildFiles, actualBuildFiles);
}
@Test
public void findWithWatchmanSucceeds() throws IOException, InterruptedException {
Path watchRoot = Paths.get(".").toAbsolutePath().normalize();
FakeProjectFilesystem filesystem = new FakeProjectFilesystem(watchRoot.resolve("project-name"));
Path buildFile = Paths.get("a", "BUCK");
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
ImmutableSet<Path> expectedBuildFiles = ImmutableSet.of(filesystem.resolve(buildFile));
FakeWatchmanClient fakeWatchmanClient =
new FakeWatchmanClient(
0,
ImmutableMap.of(
ImmutableList.of(
"query",
watchRoot.toString(),
ImmutableMap.of(
"relative_root", "project-name",
"sync_timeout", 0,
"path", ImmutableList.of("a"),
"fields", ImmutableList.of("name"),
"expression",
ImmutableList.of(
"allof",
"exists",
ImmutableList.of("name", "BUCK"),
ImmutableList.of("type", "f")))),
ImmutableMap.of("files", ImmutableList.of("a/BUCK"))));
Cell cell =
new TestCellBuilder()
.setFilesystem(filesystem)
.setWatchman(
new Watchman(
ImmutableMap.of(
filesystem.getRootPath(),
ProjectWatch.of(watchRoot.toString(), Optional.of("project-name"))),
ImmutableSet.of(
Watchman.Capability.SUPPORTS_PROJECT_WATCH,
Watchman.Capability.DIRNAME,
Watchman.Capability.WILDMATCH_GLOB),
ImmutableMap.of(),
Optional.of(Paths.get(".watchman-sock")),
Optional.of(fakeWatchmanClient)))
.build();
ImmutableSet<Path> actualBuildFiles =
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.WATCHMAN);
assertEquals(expectedBuildFiles, actualBuildFiles);
}
@Test
public void findWithWatchmanThrowsOnFailure() throws IOException, InterruptedException {
Path watchRoot = Paths.get(".").toAbsolutePath().normalize();
FakeProjectFilesystem filesystem = new FakeProjectFilesystem(watchRoot.resolve("project-name"));
Path buildFile = Paths.get("a", "BUCK");
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
FakeWatchmanClient fakeWatchmanClient =
new FakeWatchmanClient(
0,
ImmutableMap.of(
ImmutableList.of(
"query",
watchRoot.toString(),
ImmutableMap.of(
"relative_root", "project-name",
"sync_timeout", 0,
"path", ImmutableList.of("a"),
"fields", ImmutableList.of("name"),
"expression",
ImmutableList.of(
"allof",
"exists",
ImmutableList.of("name", "BUCK"),
ImmutableList.of("type", "f")))),
ImmutableMap.of("files", ImmutableList.of("a/BUCK"))),
new IOException("Whoopsie!"));
Cell cell =
new TestCellBuilder()
.setFilesystem(filesystem)
.setWatchman(
new Watchman(
ImmutableMap.of(
filesystem.getRootPath(),
ProjectWatch.of(watchRoot.toString(), Optional.of("project-name"))),
ImmutableSet.of(
Watchman.Capability.SUPPORTS_PROJECT_WATCH,
Watchman.Capability.DIRNAME,
Watchman.Capability.WILDMATCH_GLOB),
ImmutableMap.of(),
Optional.of(Paths.get(".watchman-sock")),
Optional.of(fakeWatchmanClient)))
.build();
thrown.expect(IOException.class);
thrown.expectMessage("Whoopsie!");
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.WATCHMAN);
}
@Test
public void findWithWatchmanFallsBackToFilesystemOnTimeout()
throws IOException, InterruptedException {
Path watchRoot = Paths.get(".").toAbsolutePath().normalize();
FakeProjectFilesystem filesystem = new FakeProjectFilesystem(watchRoot.resolve("project-name"));
Path buildFile = Paths.get("a", "BUCK");
filesystem.mkdirs(buildFile.getParent());
filesystem.touch(buildFile);
Path nestedBuildFile = Paths.get("a", "b", "BUCK");
filesystem.mkdirs(nestedBuildFile.getParent());
filesystem.touch(nestedBuildFile);
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(buildFile.getParent(), filesystem.getRootPath());
FakeWatchmanClient timingOutWatchmanClient =
new FakeWatchmanClient(
// Pretend the query takes a very very long time.
TimeUnit.SECONDS.toNanos(Long.MAX_VALUE),
ImmutableMap.of(
ImmutableList.of(
"query",
watchRoot.toString(),
ImmutableMap.of(
"relative_root", "project-name",
"sync_timeout", 0,
"path", ImmutableList.of("a"),
"fields", ImmutableList.of("name"),
"expression",
ImmutableList.of(
"allof",
"exists",
ImmutableList.of("name", "BUCK"),
ImmutableList.of("type", "f")))),
ImmutableMap.of("files", ImmutableList.of("a/BUCK", "a/b/BUCK"))));
Cell cell =
new TestCellBuilder()
.setFilesystem(filesystem)
.setWatchman(
new Watchman(
ImmutableMap.of(
filesystem.getRootPath(),
ProjectWatch.of(watchRoot.toString(), Optional.of("project-name"))),
ImmutableSet.of(
Watchman.Capability.SUPPORTS_PROJECT_WATCH,
Watchman.Capability.DIRNAME,
Watchman.Capability.WILDMATCH_GLOB),
ImmutableMap.of(),
Optional.of(Paths.get(".watchman-sock")),
Optional.of(timingOutWatchmanClient)))
.build();
ImmutableSet<Path> expectedBuildFiles =
ImmutableSet.of(filesystem.resolve(buildFile), filesystem.resolve(nestedBuildFile));
ImmutableSet<Path> actualBuildFiles =
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.WATCHMAN);
assertEquals(expectedBuildFiles, actualBuildFiles);
}
@Test
public void testWildcardFolderNotFound() throws IOException, InterruptedException {
FakeProjectFilesystem filesystem = new FakeProjectFilesystem();
Cell cell = new TestCellBuilder().setFilesystem(filesystem).build();
BuildFileSpec recursiveSpec =
BuildFileSpec.fromRecursivePath(filesystem.resolve("foo/bar"), filesystem.getRootPath());
thrown.expect(HumanReadableException.class);
thrown.expectMessage("could not be found");
recursiveSpec.findBuildFiles(cell, ParserConfig.BuildFileSearchMethod.FILESYSTEM_CRAWL);
}
}
| dsyang/buck | test/com/facebook/buck/parser/BuildFileSpecTest.java | Java | apache-2.0 | 11,992 |
using System.Collections.Generic;
using System.Runtime.CompilerServices;
namespace System.Html {
[IgnoreNamespace, Imported(ObeysTypeSystem = true)]
public partial class DOMRectList {
internal DOMRectList() {
}
[IndexerName("__Item"), IntrinsicProperty]
public DOMRect this[int index] {
get {
return default(DOMRect);
}
}
[EnumerateAsArray, InlineCode("new {$System.ArrayEnumerator}({this})")]
public IEnumerator<DOMRect> GetEnumerator() {
return null;
}
public DOMRect Item(int index) {
return default(DOMRect);
}
[IntrinsicProperty]
public int Length {
get {
return 0;
}
}
}
}
| n9/SaltarelleWeb | Web/Generated/Html/DOMRectList.cs | C# | apache-2.0 | 642 |
/*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.execution.runners;
import com.intellij.execution.*;
import com.intellij.execution.configurations.ConfigurationPerRunnerSettings;
import com.intellij.execution.configurations.RunConfiguration;
import com.intellij.execution.configurations.RunProfile;
import com.intellij.execution.configurations.RunnerSettings;
import com.intellij.execution.ui.RunContentDescriptor;
import com.intellij.openapi.actionSystem.DataContext;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.util.UserDataHolderBase;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public final class ExecutionEnvironmentBuilder {
@NotNull private RunProfile myRunProfile;
@NotNull private ExecutionTarget myTarget = DefaultExecutionTarget.INSTANCE;
@NotNull private final Project myProject;
@Nullable private RunnerSettings myRunnerSettings;
@Nullable private ConfigurationPerRunnerSettings myConfigurationSettings;
@Nullable private RunContentDescriptor myContentToReuse;
@Nullable private RunnerAndConfigurationSettings myRunnerAndConfigurationSettings;
@Nullable private String myRunnerId;
private ProgramRunner<?> myRunner;
private boolean myAssignNewId;
@NotNull private Executor myExecutor;
@Nullable private DataContext myDataContext;
private final UserDataHolderBase myUserData = new UserDataHolderBase();
public ExecutionEnvironmentBuilder(@NotNull Project project, @NotNull Executor executor) {
myProject = project;
myExecutor = executor;
}
@NotNull
public static ExecutionEnvironmentBuilder create(@NotNull Project project, @NotNull Executor executor, @NotNull RunProfile runProfile) throws ExecutionException {
ExecutionEnvironmentBuilder builder = createOrNull(project, executor, runProfile);
if (builder == null) {
throw new ExecutionException("Cannot find runner for " + runProfile.getName());
}
return builder;
}
@Nullable
public static ExecutionEnvironmentBuilder createOrNull(@NotNull Project project, @NotNull Executor executor, @NotNull RunProfile runProfile) {
ProgramRunner runner = RunnerRegistry.getInstance().getRunner(executor.getId(), runProfile);
if (runner == null) {
return null;
}
return new ExecutionEnvironmentBuilder(project, executor).runner(runner).runProfile(runProfile);
}
@Nullable
public static ExecutionEnvironmentBuilder createOrNull(@NotNull Executor executor, @NotNull RunnerAndConfigurationSettings settings) {
ExecutionEnvironmentBuilder builder = createOrNull(settings.getConfiguration().getProject(), executor, settings.getConfiguration());
return builder == null ? null : builder.runnerAndSettings(builder.myRunner, settings);
}
@NotNull
public static ExecutionEnvironmentBuilder create(@NotNull Executor executor, @NotNull RunnerAndConfigurationSettings settings) throws ExecutionException {
RunConfiguration configuration = settings.getConfiguration();
ExecutionEnvironmentBuilder builder = create(configuration.getProject(), executor, configuration);
return builder.runnerAndSettings(builder.myRunner, settings);
}
@NotNull
public static ExecutionEnvironmentBuilder create(@NotNull Executor executor, @NotNull RunConfiguration configuration) {
return new ExecutionEnvironmentBuilder(configuration.getProject(), executor).runProfile(configuration);
}
@NotNull
Executor getExecutor() {
return myExecutor;
}
/**
* Creates an execution environment builder initialized with a copy of the specified environment.
*
* @param copySource the environment to copy from.
*/
public ExecutionEnvironmentBuilder(@NotNull ExecutionEnvironment copySource) {
myTarget = copySource.getExecutionTarget();
myProject = copySource.getProject();
myRunnerAndConfigurationSettings = copySource.getRunnerAndConfigurationSettings();
myRunProfile = copySource.getRunProfile();
myRunnerSettings = copySource.getRunnerSettings();
myConfigurationSettings = copySource.getConfigurationSettings();
myRunner = copySource.getRunner();
myContentToReuse = copySource.getContentToReuse();
myExecutor = copySource.getExecutor();
copySource.copyUserDataTo(myUserData);
}
public ExecutionEnvironmentBuilder target(@Nullable ExecutionTarget target) {
if (target != null) {
myTarget = target;
}
return this;
}
public ExecutionEnvironmentBuilder activeTarget() {
myTarget = ExecutionTargetManager.getActiveTarget(myProject);
return this;
}
public ExecutionEnvironmentBuilder runnerAndSettings(@NotNull ProgramRunner runner,
@NotNull RunnerAndConfigurationSettings settings) {
myRunnerAndConfigurationSettings = settings;
myRunProfile = settings.getConfiguration();
myRunnerSettings = settings.getRunnerSettings(runner);
myConfigurationSettings = settings.getConfigurationSettings(runner);
myRunner = runner;
return this;
}
public ExecutionEnvironmentBuilder runnerSettings(@Nullable RunnerSettings runnerSettings) {
myRunnerSettings = runnerSettings;
return this;
}
public ExecutionEnvironmentBuilder contentToReuse(@Nullable RunContentDescriptor contentToReuse) {
myContentToReuse = contentToReuse;
return this;
}
public ExecutionEnvironmentBuilder runProfile(@NotNull RunProfile runProfile) {
myRunProfile = runProfile;
return this;
}
public ExecutionEnvironmentBuilder runner(@NotNull ProgramRunner<?> runner) {
myRunner = runner;
return this;
}
public ExecutionEnvironmentBuilder dataContext(@Nullable DataContext dataContext) {
myDataContext = dataContext;
return this;
}
public ExecutionEnvironmentBuilder executor(@NotNull Executor executor) {
myExecutor = executor;
return this;
}
@NotNull
public ExecutionEnvironment build() {
ExecutionEnvironment environment = null;
ExecutionEnvironmentProvider environmentProvider = ServiceManager.getService(myProject, ExecutionEnvironmentProvider.class);
if (environmentProvider != null) {
environment = environmentProvider.createExecutionEnvironment(
myProject, myRunProfile, myExecutor, myTarget, myRunnerSettings, myConfigurationSettings, myRunnerAndConfigurationSettings);
}
if (environment == null && myRunner == null) {
if (myRunnerId == null) {
myRunner = RunnerRegistry.getInstance().getRunner(myExecutor.getId(), myRunProfile);
}
else {
myRunner = RunnerRegistry.getInstance().findRunnerById(myRunnerId);
}
}
if (environment == null && myRunner == null) {
throw new IllegalStateException("Runner must be specified");
}
if (environment == null) {
environment = new ExecutionEnvironment(myRunProfile, myExecutor, myTarget, myProject, myRunnerSettings,
myConfigurationSettings, myContentToReuse, myRunnerAndConfigurationSettings, myRunner);
}
if (myAssignNewId) {
environment.assignNewExecutionId();
}
if (myDataContext != null) {
environment.setDataContext(myDataContext);
}
myUserData.copyUserDataTo(environment);
return environment;
}
public void buildAndExecute() throws ExecutionException {
ExecutionEnvironment environment = build();
myRunner.execute(environment);
}
}
| jk1/intellij-community | platform/lang-api/src/com/intellij/execution/runners/ExecutionEnvironmentBuilder.java | Java | apache-2.0 | 8,057 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import itertools
import json
import re
from itertools import imap
from operator import itemgetter
from django.utils.translation import ugettext as _
from desktop.lib import thrift_util
from desktop.conf import DEFAULT_USER
from hadoop import cluster
from TCLIService import TCLIService
from TCLIService.ttypes import TOpenSessionReq, TGetTablesReq, TFetchResultsReq,\
TStatusCode, TGetResultSetMetadataReq, TGetColumnsReq, TTypeId,\
TExecuteStatementReq, TGetOperationStatusReq, TFetchOrientation,\
TCloseSessionReq, TGetSchemasReq, TGetLogReq, TCancelOperationReq,\
TCloseOperationReq, TFetchResultsResp, TRowSet, TProtocolVersion
from beeswax import conf as beeswax_conf
from beeswax import hive_site
from beeswax.hive_site import hiveserver2_use_ssl
from beeswax.models import Session, HiveServerQueryHandle, HiveServerQueryHistory
from beeswax.server.dbms import Table, NoSuchObjectException, DataTable,\
QueryServerException
LOG = logging.getLogger(__name__)
IMPALA_RESULTSET_CACHE_SIZE = 'impala.resultset.cache.size'
DEFAULT_USER = DEFAULT_USER.get()
class HiveServerTable(Table):
"""
We get the table details from a DESCRIBE FORMATTED.
"""
def __init__(self, table_results, table_schema, desc_results, desc_schema):
if beeswax_conf.THRIFT_VERSION.get() >= 7:
if not table_results.columns:
raise NoSuchObjectException()
self.table = table_results.columns
else: # Deprecated. To remove in Hue 4.
if not table_results.rows:
raise NoSuchObjectException()
self.table = table_results.rows and table_results.rows[0] or ''
self.table_schema = table_schema
self.desc_results = desc_results
self.desc_schema = desc_schema
self.describe = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
@property
def name(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_NAME')
@property
def is_view(self):
return HiveServerTRow(self.table, self.table_schema).col('TABLE_TYPE') == 'VIEW'
@property
def partition_keys(self):
try:
return [PartitionKeyCompatible(row['col_name'], row['data_type'], row['comment']) for row in self._get_partition_column()]
except:
LOG.exception('failed to get partition keys')
return []
@property
def path_location(self):
try:
rows = self.describe
rows = [row for row in rows if row['col_name'].startswith('Location:')]
if rows:
return rows[0]['data_type']
except:
LOG.exception('failed to get path location')
return None
@property
def cols(self):
rows = self.describe
col_row_index = 2
try:
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return rows[col_row_index:][:end_cols_index] + self._get_partition_column()
except ValueError: # DESCRIBE on columns and nested columns does not contain add'l rows beyond cols
return rows[col_row_index:]
except:
# Impala does not have it
return rows
def _get_partition_column(self):
rows = self.describe
try:
col_row_index = map(itemgetter('col_name'), rows).index('# Partition Information') + 3
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return rows[col_row_index:][:end_cols_index]
except:
# Impala does not have it
return []
@property
def comment(self):
return HiveServerTRow(self.table, self.table_schema).col('REMARKS')
@property
def properties(self):
rows = self.describe
col_row_index = 2
end_cols_index = map(itemgetter('col_name'), rows[col_row_index:]).index('')
return [{
'col_name': prop['col_name'].strip() if prop['col_name'] else prop['col_name'],
'data_type': prop['data_type'].strip() if prop['data_type'] else prop['data_type'],
'comment': prop['comment'].strip() if prop['comment'] else prop['comment']
} for prop in rows[col_row_index + end_cols_index + 1:]
]
@property
def stats(self):
rows = self.properties
col_row_index = map(itemgetter('col_name'), rows).index('Table Parameters:') + 1
end_cols_index = map(itemgetter('data_type'), rows[col_row_index:]).index(None)
return rows[col_row_index:][:end_cols_index]
@property
def has_complex(self):
has_complex = False
complex_types = ["struct", "array", "map", "uniontype"]
patterns = [re.compile(typ) for typ in complex_types]
for column in self.cols:
if isinstance(column, dict) and 'data_type' in column:
column_type = column['data_type']
else: # Col object
column_type = column.type
if column_type and any(p.match(column_type.lower()) for p in patterns):
has_complex = True
break
return has_complex
class HiveServerTRowSet2:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return not self.row_set.columns or not HiveServerTColumnValue2(self.row_set.columns[0]).val
def cols(self, col_names):
cols_rows = []
rs = HiveServerTRow2(self.row_set.columns, self.schema)
cols = [rs.full_col(name) for name in col_names]
for cols_row in itertools.izip(*cols):
cols_rows.append(dict(itertools.izip(col_names, cols_row)))
return cols_rows
def __iter__(self):
return self
def next(self):
if self.row_set.columns:
return HiveServerTRow2(self.row_set.columns, self.schema)
else:
raise StopIteration
class HiveServerTRow2:
def __init__(self, cols, schema):
self.cols = cols
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val[0] # Return only first element
def full_col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue2(self.cols[pos]).val # Return the full column and its values
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
try:
return [HiveServerTColumnValue2(field).val.pop(0) for field in self.cols]
except IndexError:
raise StopIteration
class HiveServerTColumnValue2:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
# Could directly get index from schema but would need to cache the schema
if self.column_value.stringVal:
return self._get_val(self.column_value.stringVal)
elif self.column_value.i16Val is not None:
return self._get_val(self.column_value.i16Val)
elif self.column_value.i32Val is not None:
return self._get_val(self.column_value.i32Val)
elif self.column_value.i64Val is not None:
return self._get_val(self.column_value.i64Val)
elif self.column_value.doubleVal is not None:
return self._get_val(self.column_value.doubleVal)
elif self.column_value.boolVal is not None:
return self._get_val(self.column_value.boolVal)
elif self.column_value.byteVal is not None:
return self._get_val(self.column_value.byteVal)
elif self.column_value.binaryVal is not None:
return self._get_val(self.column_value.binaryVal)
@classmethod
def _get_val(cls, column):
column.values = cls.set_nulls(column.values, column.nulls)
column.nulls = '' # Clear the null values for not re-marking again the column with nulls at the next call
return column.values
@classmethod
def mark_nulls(cls, values, bytestring):
mask = bytearray(bytestring)
for n in mask:
yield n & 0x01
yield n & 0x02
yield n & 0x04
yield n & 0x08
yield n & 0x10
yield n & 0x20
yield n & 0x40
yield n & 0x80
@classmethod
def set_nulls(cls, values, bytestring):
if bytestring == '' or re.match('^(\x00)+$', bytestring): # HS2 has just \x00 or '', Impala can have \x00\x00...
return values
else:
_values = [None if is_null else value for value, is_null in itertools.izip(values, cls.mark_nulls(values, bytestring))]
if len(values) != len(_values): # HS2 can have just \x00\x01 instead of \x00\x01\x00...
_values.extend(values[len(_values):])
return _values
class HiveServerDataTable(DataTable):
def __init__(self, results, schema, operation_handle, query_server):
self.schema = schema and schema.schema
self.row_set = HiveServerTRowSet(results.results, schema)
self.operation_handle = operation_handle
if query_server['server_name'] == 'impala':
self.has_more = results.hasMoreRows
else:
self.has_more = not self.row_set.is_empty() # Should be results.hasMoreRows but always True in HS2
self.startRowOffset = self.row_set.startRowOffset # Always 0 in HS2
@property
def ready(self):
return True
def cols(self):
if self.schema:
return [HiveServerTColumnDesc(col) for col in self.schema.columns]
else:
return []
def rows(self):
for row in self.row_set:
yield row.fields()
class HiveServerTTableSchema:
def __init__(self, columns, schema):
self.columns = columns
self.schema = schema
def cols(self):
try:
return HiveServerTRowSet(self.columns, self.schema).cols(('col_name', 'data_type', 'comment'))
except:
# Impala API is different
cols = HiveServerTRowSet(self.columns, self.schema).cols(('name', 'type', 'comment'))
for col in cols:
col['col_name'] = col.pop('name')
col['data_type'] = col.pop('type')
return cols
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnDesc(self.columns[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
if beeswax_conf.THRIFT_VERSION.get() >= 7:
HiveServerTRow = HiveServerTRow2
HiveServerTRowSet = HiveServerTRowSet2
else:
# Deprecated. To remove in Hue 4.
class HiveServerTRow:
def __init__(self, row, schema):
self.row = row
self.schema = schema
def col(self, colName):
pos = self._get_col_position(colName)
return HiveServerTColumnValue(self.row.colVals[pos]).val
def _get_col_position(self, column_name):
return filter(lambda (i, col): col.columnName == column_name, enumerate(self.schema.columns))[0][0]
def fields(self):
return [HiveServerTColumnValue(field).val for field in self.row.colVals]
class HiveServerTRowSet:
def __init__(self, row_set, schema):
self.row_set = row_set
self.rows = row_set.rows
self.schema = schema
self.startRowOffset = row_set.startRowOffset
def is_empty(self):
return len(self.rows) == 0
def cols(self, col_names):
cols_rows = []
for row in self.rows:
row = HiveServerTRow(row, self.schema)
cols = {}
for col_name in col_names:
cols[col_name] = row.col(col_name)
cols_rows.append(cols)
return cols_rows
def __iter__(self):
return self
def next(self):
if self.rows:
return HiveServerTRow(self.rows.pop(0), self.schema)
else:
raise StopIteration
class HiveServerTColumnValue:
def __init__(self, tcolumn_value):
self.column_value = tcolumn_value
@property
def val(self):
if self.column_value.boolVal is not None:
return self.column_value.boolVal.value
elif self.column_value.byteVal is not None:
return self.column_value.byteVal.value
elif self.column_value.i16Val is not None:
return self.column_value.i16Val.value
elif self.column_value.i32Val is not None:
return self.column_value.i32Val.value
elif self.column_value.i64Val is not None:
return self.column_value.i64Val.value
elif self.column_value.doubleVal is not None:
return self.column_value.doubleVal.value
elif self.column_value.stringVal is not None:
return self.column_value.stringVal.value
class HiveServerTColumnDesc:
def __init__(self, column):
self.column = column
@property
def name(self):
return self.column.columnName
@property
def comment(self):
return self.column.comment
@property
def type(self):
return self.get_type(self.column.typeDesc)
@classmethod
def get_type(self, typeDesc):
for ttype in typeDesc.types:
if ttype.primitiveEntry is not None:
return TTypeId._VALUES_TO_NAMES[ttype.primitiveEntry.type]
elif ttype.mapEntry is not None:
return ttype.mapEntry
elif ttype.unionEntry is not None:
return ttype.unionEntry
elif ttype.arrayEntry is not None:
return ttype.arrayEntry
elif ttype.structEntry is not None:
return ttype.structEntry
elif ttype.userDefinedTypeEntry is not None:
return ttype.userDefinedTypeEntry
class HiveServerClient:
HS2_MECHANISMS = {
'KERBEROS': 'GSSAPI',
'NONE': 'PLAIN',
'NOSASL': 'NOSASL',
'LDAP': 'PLAIN',
'PAM': 'PLAIN'
}
def __init__(self, query_server, user):
self.query_server = query_server
self.user = user
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username, auth_password = self.get_security()
LOG.info('use_sasl=%s, mechanism=%s, kerberos_principal_short_name=%s, impersonation_enabled=%s, auth_username=%s' % (
use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username))
self.use_sasl = use_sasl
self.kerberos_principal_short_name = kerberos_principal_short_name
self.impersonation_enabled = impersonation_enabled
if self.query_server['server_name'] == 'impala':
from impala import conf as impala_conf
ssl_enabled = impala_conf.SSL.ENABLED.get()
ca_certs = impala_conf.SSL.CACERTS.get()
keyfile = impala_conf.SSL.KEY.get()
certfile = impala_conf.SSL.CERT.get()
validate = impala_conf.SSL.VALIDATE.get()
timeout = impala_conf.SERVER_CONN_TIMEOUT.get()
else:
ssl_enabled = hiveserver2_use_ssl()
ca_certs = beeswax_conf.SSL.CACERTS.get()
keyfile = beeswax_conf.SSL.KEY.get()
certfile = beeswax_conf.SSL.CERT.get()
validate = beeswax_conf.SSL.VALIDATE.get()
timeout = beeswax_conf.SERVER_CONN_TIMEOUT.get()
if auth_username:
username = auth_username
password = auth_password
else:
username = user.username
password = None
self._client = thrift_util.get_client(TCLIService.Client,
query_server['server_host'],
query_server['server_port'],
service_name=query_server['server_name'],
kerberos_principal=kerberos_principal_short_name,
use_sasl=use_sasl,
mechanism=mechanism,
username=username,
password=password,
timeout_seconds=timeout,
use_ssl=ssl_enabled,
ca_certs=ca_certs,
keyfile=keyfile,
certfile=certfile,
validate=validate,
transport_mode=query_server.get('transport_mode', 'socket'),
http_url=query_server.get('http_url', '')
)
def get_security(self):
principal = self.query_server['principal']
impersonation_enabled = False
auth_username = self.query_server['auth_username'] # Pass-through LDAP/PAM authentication
auth_password = self.query_server['auth_password']
if principal:
kerberos_principal_short_name = principal.split('/', 1)[0]
else:
kerberos_principal_short_name = None
if self.query_server['server_name'] == 'impala':
if auth_password: # Force LDAP/PAM.. auth if auth_password is provided
use_sasl = True
mechanism = HiveServerClient.HS2_MECHANISMS['NONE']
else:
cluster_conf = cluster.get_cluster_conf_for_job_submission()
use_sasl = cluster_conf is not None and cluster_conf.SECURITY_ENABLED.get()
mechanism = HiveServerClient.HS2_MECHANISMS['KERBEROS']
impersonation_enabled = self.query_server['impersonation_enabled']
else:
hive_mechanism = hive_site.get_hiveserver2_authentication()
if hive_mechanism not in HiveServerClient.HS2_MECHANISMS:
raise Exception(_('%s server authentication not supported. Valid are %s.') % (hive_mechanism, HiveServerClient.HS2_MECHANISMS.keys()))
use_sasl = hive_mechanism in ('KERBEROS', 'NONE', 'LDAP', 'PAM')
mechanism = HiveServerClient.HS2_MECHANISMS[hive_mechanism]
impersonation_enabled = hive_site.hiveserver2_impersonation_enabled()
return use_sasl, mechanism, kerberos_principal_short_name, impersonation_enabled, auth_username, auth_password
def open_session(self, user):
kwargs = {
'client_protocol': beeswax_conf.THRIFT_VERSION.get() - 1,
'username': user.username, # If SASL or LDAP, it gets the username from the authentication mechanism" since it dependents on it.
'configuration': {},
}
if self.impersonation_enabled:
kwargs.update({'username': DEFAULT_USER})
if self.query_server['server_name'] == 'impala': # Only when Impala accepts it
kwargs['configuration'].update({'impala.doas.user': user.username})
if self.query_server['server_name'] == 'beeswax': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
if self.query_server['server_name'] == 'sparksql': # All the time
kwargs['configuration'].update({'hive.server2.proxy.user': user.username})
req = TOpenSessionReq(**kwargs)
res = self._client.OpenSession(req)
if res.status is not None and res.status.statusCode not in (TStatusCode.SUCCESS_STATUS,):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
sessionId = res.sessionHandle.sessionId
LOG.info('Opening session %s' % sessionId)
encoded_status, encoded_guid = HiveServerQueryHandle(secret=sessionId.secret, guid=sessionId.guid).get()
properties = json.dumps(res.configuration)
return Session.objects.create(owner=user,
application=self.query_server['server_name'],
status_code=res.status.statusCode,
secret=encoded_status,
guid=encoded_guid,
server_protocol_version=res.serverProtocolVersion,
properties=properties)
def call(self, fn, req, status=TStatusCode.SUCCESS_STATUS):
session = Session.objects.get_session(self.user, self.query_server['server_name'])
if session is None:
session = self.open_session(self.user)
if hasattr(req, 'sessionHandle') and req.sessionHandle is None:
req.sessionHandle = session.get_handle()
res = fn(req)
# Not supported currently in HS2 and Impala: TStatusCode.INVALID_HANDLE_STATUS
if res.status.statusCode == TStatusCode.ERROR_STATUS and \
re.search('Invalid SessionHandle|Invalid session|Client session expired', res.status.errorMessage or '', re.I):
LOG.info('Retrying with a new session because for %s of %s' % (self.user, res))
session = self.open_session(self.user)
req.sessionHandle = session.get_handle()
# Get back the name of the function to call
res = getattr(self._client, fn.attr)(req)
if status is not None and res.status.statusCode not in (
TStatusCode.SUCCESS_STATUS, TStatusCode.SUCCESS_WITH_INFO_STATUS, TStatusCode.STILL_EXECUTING_STATUS):
if hasattr(res.status, 'errorMessage') and res.status.errorMessage:
message = res.status.errorMessage
else:
message = ''
raise QueryServerException(Exception('Bad status for request %s:\n%s' % (req, res)), message=message)
else:
return res
def close_session(self, sessionHandle):
req = TCloseSessionReq(sessionHandle=sessionHandle)
return self._client.CloseSession(req)
def get_databases(self):
# GetCatalogs() is not implemented in HS2
req = TGetSchemasReq()
res = self.call(self._client.GetSchemas, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
col = 'TABLE_SCHEM'
return HiveServerTRowSet(results.results, schema.schema).cols((col,))
def get_database(self, database):
if self.query_server['server_name'] == 'impala':
raise NotImplementedError(_("Impala has not implemented the 'DESCRIBE DATABASE' command: %(issue_ref)s") % {
'issue_ref': "https://issues.cloudera.org/browse/IMPALA-2196"
})
query = 'DESCRIBE DATABASE EXTENDED `%s`' % (database)
(desc_results, desc_schema), operation_handle = self.execute_statement(query, max_rows=5000, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(operation_handle)
cols = ('db_name', 'comment', 'location')
if len(HiveServerTRowSet(desc_results.results, desc_schema.schema).cols(cols)) != 1:
raise ValueError(_("%(query)s returned more than 1 row") % {'query': query})
return HiveServerTRowSet(desc_results.results, desc_schema.schema).cols(cols)[0] # Should only contain one row
def get_tables_meta(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
cols = ('TABLE_NAME', 'TABLE_TYPE', 'REMARKS')
return HiveServerTRowSet(results.results, schema.schema).cols(cols)
def get_tables(self, database, table_names):
req = TGetTablesReq(schemaName=database, tableName=table_names)
res = self.call(self._client.GetTables, req)
results, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=5000)
self.close_operation(res.operationHandle)
return HiveServerTRowSet(results.results, schema.schema).cols(('TABLE_NAME',))
def get_table(self, database, table_name, partition_spec=None):
req = TGetTablesReq(schemaName=database, tableName=table_name)
res = self.call(self._client.GetTables, req)
table_results, table_schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
if partition_spec:
query = 'DESCRIBE FORMATTED `%s`.`%s` PARTITION(%s)' % (database, table_name, partition_spec)
else:
query = 'DESCRIBE FORMATTED `%s`.`%s`' % (database, table_name)
(desc_results, desc_schema), operation_handle = self.execute_statement(query, max_rows=5000, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(operation_handle)
return HiveServerTable(table_results.results, table_schema.schema, desc_results.results, desc_schema.schema)
def execute_query(self, query, max_rows=1000):
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement=query.query['query'], max_rows=max_rows, configuration=configuration)
def execute_query_statement(self, statement, max_rows=1000, configuration={}, orientation=TFetchOrientation.FETCH_FIRST):
(results, schema), operation_handle = self.execute_statement(statement=statement, max_rows=max_rows, configuration=configuration, orientation=orientation)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def execute_async_query(self, query, statement=0):
if statement == 0:
# Impala just has settings currently
if self.query_server['server_name'] == 'beeswax':
for resource in query.get_configuration_statements():
self.execute_statement(resource.strip())
configuration = {}
if self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] > 0:
configuration[IMPALA_RESULTSET_CACHE_SIZE] = str(self.query_server['querycache_rows'])
# The query can override the default configuration
configuration.update(self._get_query_configuration(query))
query_statement = query.get_query_statement(statement)
return self.execute_async_statement(statement=query_statement, confOverlay=configuration)
def execute_statement(self, statement, max_rows=1000, configuration={}, orientation=TFetchOrientation.FETCH_NEXT):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
configuration['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=configuration)
res = self.call(self._client.ExecuteStatement, req)
return self.fetch_result(res.operationHandle, max_rows=max_rows, orientation=orientation), res.operationHandle
def execute_async_statement(self, statement, confOverlay):
if self.query_server['server_name'] == 'impala' and self.query_server['QUERY_TIMEOUT_S'] > 0:
confOverlay['QUERY_TIMEOUT_S'] = str(self.query_server['QUERY_TIMEOUT_S'])
req = TExecuteStatementReq(statement=statement.encode('utf-8'), confOverlay=confOverlay, runAsync=True)
res = self.call(self._client.ExecuteStatement, req)
return HiveServerQueryHandle(secret=res.operationHandle.operationId.secret,
guid=res.operationHandle.operationId.guid,
operation_type=res.operationHandle.operationType,
has_result_set=res.operationHandle.hasResultSet,
modified_row_count=res.operationHandle.modifiedRowCount)
def fetch_data(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
# Fetch until the result is empty dues to a HS2 bug instead of looking at hasMoreRows
results, schema = self.fetch_result(operation_handle, orientation, max_rows)
return HiveServerDataTable(results, schema, operation_handle, self.query_server)
def cancel_operation(self, operation_handle):
req = TCancelOperationReq(operationHandle=operation_handle)
return self.call(self._client.CancelOperation, req)
def close_operation(self, operation_handle):
req = TCloseOperationReq(operationHandle=operation_handle)
return self.call(self._client.CloseOperation, req)
def get_columns(self, database, table):
req = TGetColumnsReq(schemaName=database, tableName=table)
res = self.call(self._client.GetColumns, req)
res, schema = self.fetch_result(res.operationHandle, orientation=TFetchOrientation.FETCH_NEXT)
self.close_operation(res.operationHandle)
return res, schema
def fetch_result(self, operation_handle, orientation=TFetchOrientation.FETCH_FIRST, max_rows=1000):
if operation_handle.hasResultSet:
fetch_req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows)
res = self.call(self._client.FetchResults, fetch_req)
else:
res = TFetchResultsResp(results=TRowSet(startRowOffset=0, rows=[], columns=[]))
if operation_handle.hasResultSet and TFetchOrientation.FETCH_FIRST: # Only fetch for the first call that should be with start_over
meta_req = TGetResultSetMetadataReq(operationHandle=operation_handle)
schema = self.call(self._client.GetResultSetMetadata, meta_req)
else:
schema = None
return res, schema
def fetch_log(self, operation_handle, orientation=TFetchOrientation.FETCH_NEXT, max_rows=1000):
req = TFetchResultsReq(operationHandle=operation_handle, orientation=orientation, maxRows=max_rows, fetchType=1)
res = self.call(self._client.FetchResults, req)
if beeswax_conf.THRIFT_VERSION.get() >= 7:
lines = res.results.columns[0].stringVal.values
else:
lines = imap(lambda r: r.colVals[0].stringVal.value, res.results.rows)
return '\n'.join(lines)
def get_operation_status(self, operation_handle):
req = TGetOperationStatusReq(operationHandle=operation_handle)
return self.call(self._client.GetOperationStatus, req)
def explain(self, query):
query_statement = query.get_query_statement(0)
configuration = self._get_query_configuration(query)
return self.execute_query_statement(statement='EXPLAIN %s' % query_statement, configuration=configuration, orientation=TFetchOrientation.FETCH_NEXT)
def get_log(self, operation_handle):
try:
req = TGetLogReq(operationHandle=operation_handle)
res = self.call(self._client.GetLog, req)
return res.log
except:
LOG.exception('server does not support GetLog')
return 'Server does not support GetLog()'
def get_partitions(self, database, table_name, partition_spec=None, max_parts=None, reverse_sort=True):
table = self.get_table(database, table_name)
if max_parts is None or max_parts <= 0:
max_rows = 10000
else:
max_rows = 1000 if max_parts <= 250 else max_parts
query = 'SHOW PARTITIONS `%s`.`%s`' % (database, table_name)
if partition_spec:
query += ' PARTITION(%s)' % partition_spec
partition_table = self.execute_query_statement(query, max_rows=max_rows)
partitions = [PartitionValueCompatible(partition, table) for partition in partition_table.rows()]
if reverse_sort:
partitions.reverse()
return partitions[:max_parts]
def _get_query_configuration(self, query):
return dict([(setting['key'], setting['value']) for setting in query.settings])
class HiveServerTableCompatible(HiveServerTable):
"""Same API as Beeswax"""
def __init__(self, hive_table):
self.table = hive_table.table
self.table_schema = hive_table.table_schema
self.desc_results = hive_table.desc_results
self.desc_schema = hive_table.desc_schema
self.describe = HiveServerTTableSchema(self.desc_results, self.desc_schema).cols()
@property
def cols(self):
return [
type('Col', (object,), {
'name': col.get('col_name', '').strip() if col.get('col_name') else '',
'type': col.get('data_type', '').strip() if col.get('data_type') else '',
'comment': col.get('comment', '').strip() if col.get('comment') else ''
}) for col in HiveServerTable.cols.fget(self)
]
class ResultCompatible:
def __init__(self, data_table):
self.data_table = data_table
self.rows = data_table.rows
self.has_more = data_table.has_more
self.start_row = data_table.startRowOffset
self.ready = True
@property
def columns(self):
return self.cols()
def cols(self):
return [col.name for col in self.data_table.cols()]
class PartitionKeyCompatible:
def __init__(self, name, type, comment):
self.name = name
self.type = type
self.comment = comment
def __eq__(self, other):
return isinstance(other, PartitionKeyCompatible) and \
self.name == other.name and \
self.type == other.type and \
self.comment == other.comment
def __repr__(self):
return 'PartitionKey(name:%s, type:%s, comment:%s)' % (self.name, self.type, self.comment)
class PartitionValueCompatible:
def __init__(self, partition_row, table, properties=None):
if properties is None:
properties = {}
# Parses: ['datehour=2013022516'] or ['month=2011-07/dt=2011-07-01/hr=12']
partition = partition_row[0]
parts = partition.split('/')
self.partition_spec = ','.join(["%s='%s'" % (pv[0], pv[1]) for pv in [part.split('=') for part in parts]])
self.values = [pv[1] for pv in [part.split('=') for part in parts]]
self.sd = type('Sd', (object,), properties,)
class ExplainCompatible:
def __init__(self, data_table):
self.textual = '\n'.join([line[0] for line in data_table.rows()])
class ResultMetaCompatible:
def __init__(self):
self.in_tablename = True
class HiveServerClientCompatible(object):
"""Same API as Beeswax"""
def __init__(self, client):
self._client = client
self.user = client.user
self.query_server = client.query_server
def query(self, query, statement=0):
return self._client.execute_async_query(query, statement)
def get_state(self, handle):
operationHandle = handle.get_rpc_handle()
res = self._client.get_operation_status(operationHandle)
return HiveServerQueryHistory.STATE_MAP[res.operationState]
def get_operation_status(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.get_operation_status(operationHandle)
def use(self, query):
data = self._client.execute_query(query)
self._client.close_operation(data.operation_handle)
return data
def explain(self, query):
data_table = self._client.explain(query)
data = ExplainCompatible(data_table)
self._client.close_operation(data_table.operation_handle)
return data
def fetch(self, handle, start_over=False, max_rows=None):
operationHandle = handle.get_rpc_handle()
if max_rows is None:
max_rows = 1000
if start_over and not (self.query_server['server_name'] == 'impala' and self.query_server['querycache_rows'] == 0): # Backward compatibility for impala
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
data_table = self._client.fetch_data(operationHandle, orientation=orientation, max_rows=max_rows)
return ResultCompatible(data_table)
def cancel_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.cancel_operation(operationHandle)
def close(self, handle):
return self.close_operation(handle)
def close_operation(self, handle):
operationHandle = handle.get_rpc_handle()
return self._client.close_operation(operationHandle)
def close_session(self, session):
operationHandle = session.get_handle()
return self._client.close_session(operationHandle)
def dump_config(self):
return 'Does not exist in HS2'
def get_log(self, handle, start_over=True):
operationHandle = handle.get_rpc_handle()
if beeswax_conf.USE_GET_LOG_API.get() or self.query_server['server_name'] == 'impala':
return self._client.get_log(operationHandle)
else:
if start_over:
orientation = TFetchOrientation.FETCH_FIRST
else:
orientation = TFetchOrientation.FETCH_NEXT
return self._client.fetch_log(operationHandle, orientation=orientation, max_rows=-1)
def get_databases(self):
col = 'TABLE_SCHEM'
return [table[col] for table in self._client.get_databases()]
def get_database(self, database):
return self._client.get_database(database)
def get_tables_meta(self, database, table_names):
tables = self._client.get_tables_meta(database, table_names)
massaged_tables = []
for table in tables:
massaged_tables.append({
'name': table['TABLE_NAME'],
'comment': table['REMARKS'],
'type': table['TABLE_TYPE'].capitalize()}
)
return massaged_tables
def get_tables(self, database, table_names):
tables = [table['TABLE_NAME'] for table in self._client.get_tables(database, table_names)]
tables.sort()
return tables
def get_table(self, database, table_name, partition_spec=None):
table = self._client.get_table(database, table_name, partition_spec)
return HiveServerTableCompatible(table)
def get_columns(self, database, table):
return self._client.get_columns(database, table)
def get_default_configuration(self, *args, **kwargs):
return {}
def get_results_metadata(self, handle):
# We just need to mock
return ResultMetaCompatible()
def create_database(self, name, description): raise NotImplementedError()
def alter_table(self, dbname, tbl_name, new_tbl): raise NotImplementedError()
def open_session(self, user):
return self._client.open_session(user)
def add_partition(self, new_part): raise NotImplementedError()
def get_partition(self, *args, **kwargs): raise NotImplementedError()
def get_partitions(self, database, table_name, partition_spec, max_parts, reverse_sort=True):
return self._client.get_partitions(database, table_name, partition_spec, max_parts, reverse_sort)
def alter_partition(self, db_name, tbl_name, new_part): raise NotImplementedError()
| ahmed-mahran/hue | apps/beeswax/src/beeswax/server/hive_server2_lib.py | Python | apache-2.0 | 38,037 |
"use strict";
var CONST = require('../persistence/sqlconst');
var util = require("util");
var DEFAULT_LIMIT = 50;
var MAX_THRESHOLD = 500;
var knexModule = require("knex");
/**
*
* @param name
* @constructor
*/
function Blog(knexConfig) {
this.knex = knexModule(knexConfig);
}
//DDL Functions
Blog.prototype.dropPostTable = function () {
console.info("Dropping table if exist");
return this.knex.schema.dropTableIfExists(CONST.POST.TABLE);
};
/**
* Table Create Post Table
*/
Blog.prototype.createPostTable = function () {
console.info("Creating %s table if exist", CONST.POST.TABLE);
return this.knex.schema.createTable(CONST.POST.TABLE, function (table) {
table.increments(CONST.POST.PK);
table.string(CONST.POST.GUID).unique();
table.string(CONST.POST.TITLE).unique()
.notNullable();
table.binary(CONST.POST.CONTENT)
.notNullable();
table.datetime(CONST.POST.PUB_DATE).index(CONST.POST.IDX_PUBDATE)
.notNullable();
});
};
Blog.prototype.cleanUp = function () {
console.log("Cleaning up Knex");
this.knex.destroy();
};
Blog.prototype.savePost = function (post) {
var record = {
"title": post.title,
"content": post.content,
"guid": post.guid,
"publication_date": post.publicationDate
};
return this.knex.insert(record).into(CONST.POST.TABLE);
};
Blog.prototype.deletePost = function (postId) {
console.info("Deleting post :%d", postId);
return this.knex(CONST.POST.TABLE).where(CONST.POST.PK, postId).del();
};
//Limit Helper functions
function checkLowerBoundLimit(limit) {
if (util.isNullOrUndefined(limit) || limit === 0) {
return DEFAULT_LIMIT;
} else {
return limit;
}
}
function checkUpperBoundLimit(value) {
if (!util.isNullOrUndefined(value) && value >= MAX_THRESHOLD) {
return MAX_THRESHOLD;
}
else {
return value;
}
}
Blog.prototype._determineDefaultLimit = function (limit) {
var result = checkLowerBoundLimit(limit);
result = checkUpperBoundLimit(result);
return result;
};
// Query functions
function selectAllColumns(knex) {
return knex.
select(CONST.POST.PK, CONST.POST.TITLE, CONST.POST.CONTENT,CONST.POST.PUB_DATE,CONST.POST.GUID).
from(CONST.POST.TABLE);
}
Blog.prototype.findPostById = function (postId) {
return selectAllColumns(this.knex).
where(CONST.POST.PK, postId);
};
Blog.prototype.getAllPosts = function (limit) {
return this.knex.select(CONST.POST.PK,CONST.POST.TITLE,CONST.POST.GUID,CONST.POST.PUB_DATE).
from(CONST.POST.TABLE).limit(this._determineDefaultLimit(limit));
};
Blog.prototype.findPostByTitle = function (title) {
return selectAllColumns(this.knex).
where(CONST.POST.TITLE, title);
};
module.exports = Blog;
| mohan82/myblog-api | persistence/blog.js | JavaScript | apache-2.0 | 2,864 |
<?php
final class DiffusionRepositoryEditEngine
extends PhabricatorEditEngine {
const ENGINECONST = 'diffusion.repository';
private $versionControlSystem;
public function setVersionControlSystem($version_control_system) {
$this->versionControlSystem = $version_control_system;
return $this;
}
public function getVersionControlSystem() {
return $this->versionControlSystem;
}
public function isEngineConfigurable() {
return false;
}
public function isDefaultQuickCreateEngine() {
return true;
}
public function getQuickCreateOrderVector() {
return id(new PhutilSortVector())->addInt(300);
}
public function getEngineName() {
return pht('Repositories');
}
public function getSummaryHeader() {
return pht('Edit Repositories');
}
public function getSummaryText() {
return pht('Creates and edits repositories.');
}
public function getEngineApplicationClass() {
return 'PhabricatorDiffusionApplication';
}
protected function newEditableObject() {
$viewer = $this->getViewer();
$repository = PhabricatorRepository::initializeNewRepository($viewer);
$repository->setDetail('newly-initialized', true);
$vcs = $this->getVersionControlSystem();
if ($vcs) {
$repository->setVersionControlSystem($vcs);
}
// Pick a random open service to allocate this repository on, if any exist.
// If there are no services, we aren't in cluster mode and will allocate
// locally. If there are services but none permit allocations, we fail.
// Eventually we can make this more flexible, but this rule is a reasonable
// starting point as we begin to deploy cluster services.
$services = id(new AlmanacServiceQuery())
->setViewer(PhabricatorUser::getOmnipotentUser())
->withServiceTypes(
array(
AlmanacClusterRepositoryServiceType::SERVICETYPE,
))
->needProperties(true)
->execute();
if ($services) {
// Filter out services which do not permit new allocations.
foreach ($services as $key => $possible_service) {
if ($possible_service->getAlmanacPropertyValue('closed')) {
unset($services[$key]);
}
}
if (!empty($services)) {
shuffle($services);
$service = head($services);
$repository->setAlmanacServicePHID($service->getPHID());
}
}
return $repository;
}
protected function newObjectQuery() {
return new PhabricatorRepositoryQuery();
}
protected function getObjectCreateTitleText($object) {
return pht('Create Repository');
}
protected function getObjectCreateButtonText($object) {
return pht('Create Repository');
}
protected function getObjectEditTitleText($object) {
return pht('Edit Repository: %s', $object->getName());
}
protected function getObjectEditShortText($object) {
return $object->getDisplayName();
}
protected function getObjectCreateShortText() {
return pht('Create Repository');
}
protected function getObjectName() {
return pht('Repository');
}
protected function getObjectViewURI($object) {
return $object->getPathURI('manage/');
}
protected function getCreateNewObjectPolicy() {
return $this->getApplication()->getPolicy(
DiffusionCreateRepositoriesCapability::CAPABILITY);
}
protected function newPages($object) {
$panels = DiffusionRepositoryManagementPanel::getAllPanels();
$pages = array();
$uris = array();
foreach ($panels as $panel_key => $panel) {
$panel->setRepository($object);
$uris[$panel_key] = $panel->getPanelURI();
$page = $panel->newEditEnginePage();
if (!$page) {
continue;
}
$pages[] = $page;
}
$basics_key = DiffusionRepositoryBasicsManagementPanel::PANELKEY;
$basics_uri = $uris[$basics_key];
$more_pages = array(
id(new PhabricatorEditPage())
->setKey('encoding')
->setLabel(pht('Text Encoding'))
->setViewURI($basics_uri)
->setFieldKeys(
array(
'encoding',
)),
id(new PhabricatorEditPage())
->setKey('extensions')
->setLabel(pht('Extensions'))
->setIsDefault(true),
);
foreach ($more_pages as $page) {
$pages[] = $page;
}
return $pages;
}
protected function willConfigureFields($object, array $fields) {
// Change the default field order so related fields are adjacent.
$after = array(
'policy.edit' => array('policy.push'),
);
$result = array();
foreach ($fields as $key => $value) {
$result[$key] = $value;
if (!isset($after[$key])) {
continue;
}
foreach ($after[$key] as $next_key) {
if (!isset($fields[$next_key])) {
continue;
}
unset($result[$next_key]);
$result[$next_key] = $fields[$next_key];
unset($fields[$next_key]);
}
}
return $result;
}
protected function buildCustomEditFields($object) {
$viewer = $this->getViewer();
$policies = id(new PhabricatorPolicyQuery())
->setViewer($viewer)
->setObject($object)
->execute();
$fetch_value = $object->getFetchRules();
$track_value = $object->getTrackOnlyRules();
$permanent_value = $object->getPermanentRefRules();
$automation_instructions = pht(
"Configure **Repository Automation** to allow Phabricator to ".
"write to this repository.".
"\n\n".
"IMPORTANT: This feature is new, experimental, and not supported. ".
"Use it at your own risk.");
$staging_instructions = pht(
"To make it easier to run integration tests and builds on code ".
"under review, you can configure a **Staging Area**. When `arc` ".
"creates a diff, it will push a copy of the changes to the ".
"configured staging area with a corresponding tag.".
"\n\n".
"IMPORTANT: This feature is new, experimental, and not supported. ".
"Use it at your own risk.");
$subpath_instructions = pht(
'If you want to import only part of a repository, like `trunk/`, '.
'you can set a path in **Import Only**. Phabricator will ignore '.
'commits which do not affect this path.');
$filesize_warning = null;
if ($object->isGit()) {
$git_binary = PhutilBinaryAnalyzer::getForBinary('git');
$git_version = $git_binary->getBinaryVersion();
$filesize_version = '1.8.4';
if (version_compare($git_version, $filesize_version, '<')) {
$filesize_warning = pht(
'(WARNING) {icon exclamation-triangle} The version of "git" ("%s") '.
'installed on this server does not support '.
'"--batch-check=<format>", a feature required to enforce filesize '.
'limits. Upgrade to "git" %s or newer to use this feature.',
$git_version,
$filesize_version);
}
}
$track_instructions = pht(
'WARNING: The "Track Only" feature is deprecated. Use "Fetch Refs" '.
'and "Permanent Refs" instead. This feature will be removed in a '.
'future version of Phabricator.');
return array(
id(new PhabricatorSelectEditField())
->setKey('vcs')
->setLabel(pht('Version Control System'))
->setTransactionType(
PhabricatorRepositoryVCSTransaction::TRANSACTIONTYPE)
->setIsFormField(false)
->setIsCopyable(true)
->setOptions(PhabricatorRepositoryType::getAllRepositoryTypes())
->setDescription(pht('Underlying repository version control system.'))
->setConduitDescription(
pht(
'Choose which version control system to use when creating a '.
'repository.'))
->setConduitTypeDescription(pht('Version control system selection.'))
->setValue($object->getVersionControlSystem()),
id(new PhabricatorTextEditField())
->setKey('name')
->setLabel(pht('Name'))
->setIsRequired(true)
->setTransactionType(
PhabricatorRepositoryNameTransaction::TRANSACTIONTYPE)
->setDescription(pht('The repository name.'))
->setConduitDescription(pht('Rename the repository.'))
->setConduitTypeDescription(pht('New repository name.'))
->setValue($object->getName()),
id(new PhabricatorTextEditField())
->setKey('callsign')
->setLabel(pht('Callsign'))
->setTransactionType(
PhabricatorRepositoryCallsignTransaction::TRANSACTIONTYPE)
->setDescription(pht('The repository callsign.'))
->setConduitDescription(pht('Change the repository callsign.'))
->setConduitTypeDescription(pht('New repository callsign.'))
->setValue($object->getCallsign()),
id(new PhabricatorTextEditField())
->setKey('shortName')
->setLabel(pht('Short Name'))
->setTransactionType(
PhabricatorRepositorySlugTransaction::TRANSACTIONTYPE)
->setDescription(pht('Short, unique repository name.'))
->setConduitDescription(pht('Change the repository short name.'))
->setConduitTypeDescription(pht('New short name for the repository.'))
->setValue($object->getRepositorySlug()),
id(new PhabricatorRemarkupEditField())
->setKey('description')
->setLabel(pht('Description'))
->setTransactionType(
PhabricatorRepositoryDescriptionTransaction::TRANSACTIONTYPE)
->setDescription(pht('Repository description.'))
->setConduitDescription(pht('Change the repository description.'))
->setConduitTypeDescription(pht('New repository description.'))
->setValue($object->getDetail('description')),
id(new PhabricatorTextEditField())
->setKey('encoding')
->setLabel(pht('Text Encoding'))
->setIsCopyable(true)
->setTransactionType(
PhabricatorRepositoryEncodingTransaction::TRANSACTIONTYPE)
->setDescription(pht('Default text encoding.'))
->setConduitDescription(pht('Change the default text encoding.'))
->setConduitTypeDescription(pht('New text encoding.'))
->setValue($object->getDetail('encoding')),
id(new PhabricatorBoolEditField())
->setKey('allowDangerousChanges')
->setLabel(pht('Allow Dangerous Changes'))
->setIsCopyable(true)
->setIsFormField(false)
->setOptions(
pht('Prevent Dangerous Changes'),
pht('Allow Dangerous Changes'))
->setTransactionType(
PhabricatorRepositoryDangerousTransaction::TRANSACTIONTYPE)
->setDescription(pht('Permit dangerous changes to be made.'))
->setConduitDescription(pht('Allow or prevent dangerous changes.'))
->setConduitTypeDescription(pht('New protection setting.'))
->setValue($object->shouldAllowDangerousChanges()),
id(new PhabricatorBoolEditField())
->setKey('allowEnormousChanges')
->setLabel(pht('Allow Enormous Changes'))
->setIsCopyable(true)
->setIsFormField(false)
->setOptions(
pht('Prevent Enormous Changes'),
pht('Allow Enormous Changes'))
->setTransactionType(
PhabricatorRepositoryEnormousTransaction::TRANSACTIONTYPE)
->setDescription(pht('Permit enormous changes to be made.'))
->setConduitDescription(pht('Allow or prevent enormous changes.'))
->setConduitTypeDescription(pht('New protection setting.'))
->setValue($object->shouldAllowEnormousChanges()),
id(new PhabricatorSelectEditField())
->setKey('status')
->setLabel(pht('Status'))
->setTransactionType(
PhabricatorRepositoryActivateTransaction::TRANSACTIONTYPE)
->setIsFormField(false)
->setOptions(PhabricatorRepository::getStatusNameMap())
->setDescription(pht('Active or inactive status.'))
->setConduitDescription(pht('Active or deactivate the repository.'))
->setConduitTypeDescription(pht('New repository status.'))
->setValue($object->getStatus()),
id(new PhabricatorTextEditField())
->setKey('defaultBranch')
->setLabel(pht('Default Branch'))
->setTransactionType(
PhabricatorRepositoryDefaultBranchTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Default branch name.'))
->setConduitDescription(pht('Set the default branch name.'))
->setConduitTypeDescription(pht('New default branch name.'))
->setValue($object->getDetail('default-branch')),
id(new PhabricatorTextAreaEditField())
->setIsStringList(true)
->setKey('fetchRefs')
->setLabel(pht('Fetch Refs'))
->setTransactionType(
PhabricatorRepositoryFetchRefsTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Fetch only these refs.'))
->setConduitDescription(pht('Set the fetched refs.'))
->setConduitTypeDescription(pht('New fetched refs.'))
->setValue($fetch_value),
id(new PhabricatorTextAreaEditField())
->setIsStringList(true)
->setKey('permanentRefs')
->setLabel(pht('Permanent Refs'))
->setTransactionType(
PhabricatorRepositoryPermanentRefsTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Only these refs are considered permanent.'))
->setConduitDescription(pht('Set the permanent refs.'))
->setConduitTypeDescription(pht('New permanent ref rules.'))
->setValue($permanent_value),
id(new PhabricatorTextAreaEditField())
->setIsStringList(true)
->setKey('trackOnly')
->setLabel(pht('Track Only'))
->setTransactionType(
PhabricatorRepositoryTrackOnlyTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setControlInstructions($track_instructions)
->setDescription(pht('Track only these branches.'))
->setConduitDescription(pht('Set the tracked branches.'))
->setConduitTypeDescription(pht('New tracked branches.'))
->setValue($track_value),
id(new PhabricatorTextEditField())
->setKey('importOnly')
->setLabel(pht('Import Only'))
->setTransactionType(
PhabricatorRepositorySVNSubpathTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Subpath to selectively import.'))
->setConduitDescription(pht('Set the subpath to import.'))
->setConduitTypeDescription(pht('New subpath to import.'))
->setValue($object->getDetail('svn-subpath'))
->setControlInstructions($subpath_instructions),
id(new PhabricatorTextEditField())
->setKey('stagingAreaURI')
->setLabel(pht('Staging Area URI'))
->setTransactionType(
PhabricatorRepositoryStagingURITransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(pht('Staging area URI.'))
->setConduitDescription(pht('Set the staging area URI.'))
->setConduitTypeDescription(pht('New staging area URI.'))
->setValue($object->getStagingURI())
->setControlInstructions($staging_instructions),
id(new PhabricatorDatasourceEditField())
->setKey('automationBlueprintPHIDs')
->setLabel(pht('Use Blueprints'))
->setTransactionType(
PhabricatorRepositoryBlueprintsTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDatasource(new DrydockBlueprintDatasource())
->setDescription(pht('Automation blueprints.'))
->setConduitDescription(pht('Change automation blueprints.'))
->setConduitTypeDescription(pht('New blueprint PHIDs.'))
->setValue($object->getAutomationBlueprintPHIDs())
->setControlInstructions($automation_instructions),
id(new PhabricatorStringListEditField())
->setKey('symbolLanguages')
->setLabel(pht('Languages'))
->setTransactionType(
PhabricatorRepositorySymbolLanguagesTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDescription(
pht('Languages which define symbols in this repository.'))
->setConduitDescription(
pht('Change symbol languages for this repository.'))
->setConduitTypeDescription(
pht('New symbol languages.'))
->setValue($object->getSymbolLanguages()),
id(new PhabricatorDatasourceEditField())
->setKey('symbolRepositoryPHIDs')
->setLabel(pht('Uses Symbols From'))
->setTransactionType(
PhabricatorRepositorySymbolSourcesTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setDatasource(new DiffusionRepositoryDatasource())
->setDescription(pht('Repositories to link symbols from.'))
->setConduitDescription(pht('Change symbol source repositories.'))
->setConduitTypeDescription(pht('New symbol repositories.'))
->setValue($object->getSymbolSources()),
id(new PhabricatorBoolEditField())
->setKey('publish')
->setLabel(pht('Publish/Notify'))
->setTransactionType(
PhabricatorRepositoryNotifyTransaction::TRANSACTIONTYPE)
->setIsCopyable(true)
->setOptions(
pht('Disable Notifications, Feed, and Herald'),
pht('Enable Notifications, Feed, and Herald'))
->setDescription(pht('Configure how changes are published.'))
->setConduitDescription(pht('Change publishing options.'))
->setConduitTypeDescription(pht('New notification setting.'))
->setValue(!$object->isPublishingDisabled()),
id(new PhabricatorPolicyEditField())
->setKey('policy.push')
->setLabel(pht('Push Policy'))
->setAliases(array('push'))
->setIsCopyable(true)
->setCapability(DiffusionPushCapability::CAPABILITY)
->setPolicies($policies)
->setTransactionType(
PhabricatorRepositoryPushPolicyTransaction::TRANSACTIONTYPE)
->setDescription(
pht('Controls who can push changes to the repository.'))
->setConduitDescription(
pht('Change the push policy of the repository.'))
->setConduitTypeDescription(pht('New policy PHID or constant.'))
->setValue($object->getPolicy(DiffusionPushCapability::CAPABILITY)),
id(new PhabricatorTextEditField())
->setKey('filesizeLimit')
->setLabel(pht('Filesize Limit'))
->setTransactionType(
PhabricatorRepositoryFilesizeLimitTransaction::TRANSACTIONTYPE)
->setDescription(pht('Maximum permitted file size.'))
->setConduitDescription(pht('Change the filesize limit.'))
->setConduitTypeDescription(pht('New repository filesize limit.'))
->setControlInstructions($filesize_warning)
->setValue($object->getFilesizeLimit()),
id(new PhabricatorTextEditField())
->setKey('copyTimeLimit')
->setLabel(pht('Clone/Fetch Timeout'))
->setTransactionType(
PhabricatorRepositoryCopyTimeLimitTransaction::TRANSACTIONTYPE)
->setDescription(
pht('Maximum permitted duration of internal clone/fetch.'))
->setConduitDescription(pht('Change the copy time limit.'))
->setConduitTypeDescription(pht('New repository copy time limit.'))
->setValue($object->getCopyTimeLimit()),
id(new PhabricatorTextEditField())
->setKey('touchLimit')
->setLabel(pht('Touched Paths Limit'))
->setTransactionType(
PhabricatorRepositoryTouchLimitTransaction::TRANSACTIONTYPE)
->setDescription(pht('Maximum permitted paths touched per commit.'))
->setConduitDescription(pht('Change the touch limit.'))
->setConduitTypeDescription(pht('New repository touch limit.'))
->setValue($object->getTouchLimit()),
);
}
}
| wikimedia/phabricator | src/applications/diffusion/editor/DiffusionRepositoryEditEngine.php | PHP | apache-2.0 | 19,968 |
/*
* Created on 21.07.2015
*/
package com.github.dockerjava.core.command;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.github.dockerjava.api.model.Frame;
import com.github.dockerjava.core.async.ResultCallbackTemplate;
/**
*
* @author Marcus Linke
*
* @deprecated use {@link com.github.dockerjava.api.async.ResultCallback.Adapter}
*/
@Deprecated
public class AttachContainerResultCallback extends ResultCallbackTemplate<AttachContainerResultCallback, Frame> {
private static final Logger LOGGER = LoggerFactory.getLogger(AttachContainerResultCallback.class);
@Override
public void onNext(Frame item) {
LOGGER.debug(item.toString());
}
}
| tejksat/docker-java | docker-java-core/src/main/java/com/github/dockerjava/core/command/AttachContainerResultCallback.java | Java | apache-2.0 | 695 |
package com.orhanobut.wasp;
import android.graphics.Bitmap;
import android.os.Looper;
import android.text.TextUtils;
import android.view.ViewGroup;
import android.widget.ImageView;
import com.orhanobut.wasp.utils.StringUtils;
/**
* This class is responsible of the loading image. It automatically handles the canceling and
* loading images for the recycled view as well.
*
* @author Orhan Obut
*/
final class InternalImageHandler implements ImageHandler {
/**
* It is used to determine which url is current for the ImageView
*/
private static final int KEY_TAG = 0x7f070006;
/**
* Stores the cached images
*/
private final ImageCache imageCache;
/**
* It is used to create network request for the bitmap
*/
private final ImageNetworkHandler imageNetworkHandler;
InternalImageHandler(ImageCache cache, ImageNetworkHandler handler) {
this.imageCache = cache;
this.imageNetworkHandler = handler;
}
@Override
public void load(ImageCreator imageCreator) {
checkMain();
loadImage(imageCreator);
}
private void loadImage(final ImageCreator imageCreator) {
final String url = imageCreator.getUrl();
final ImageView imageView = imageCreator.getImageView();
// clear the target
initImageView(imageCreator);
// if there is any old request. cancel it
String tag = (String) imageView.getTag(KEY_TAG);
if (tag != null) {
imageNetworkHandler.cancelRequest(tag);
}
// update the current url
imageView.setTag(KEY_TAG, url);
int width = imageView.getWidth();
int height = imageView.getHeight();
boolean wrapWidth = false;
boolean wrapHeight = false;
if (imageView.getLayoutParams() != null) {
ViewGroup.LayoutParams params = imageView.getLayoutParams();
wrapWidth = params.width == ViewGroup.LayoutParams.WRAP_CONTENT;
wrapHeight = params.height == ViewGroup.LayoutParams.WRAP_CONTENT;
}
// if the view's bounds aren't known yet, and this is not a wrap-content/wrap-content
// view, hold off on loading the image.
boolean isFullyWrapContent = wrapWidth && wrapHeight;
if (width == 0 && height == 0 && !isFullyWrapContent) {
Logger.d("ImageHandler : width == 0 && height == 0 && !isFullyWrapContent");
// return;
}
// Calculate the max image width / height to use while ignoring WRAP_CONTENT dimens.
int maxWidth = wrapWidth ? 0 : width;
int maxHeight = wrapHeight ? 0 : height;
// check if it is already in cache
final String cacheKey = StringUtils.getCacheKey(url, maxWidth, maxHeight);
final Bitmap bitmap = imageCache.getBitmap(cacheKey);
if (bitmap != null) {
imageView.setImageBitmap(bitmap);
Logger.d("CACHE IMAGE : " + url);
return;
}
// make a new request
imageNetworkHandler.requestImage(imageCreator, maxWidth, maxHeight, new InternalCallback<Container>() {
@Override
public void onSuccess(final Container container) {
Bitmap bitmap = container.bitmap;
if (bitmap == null) {
return;
}
container.waspImageCreator.logSuccess(bitmap);
// cache the image
imageCache.putBitmap(container.cacheKey, container.bitmap);
ImageView imageView = container.waspImageCreator.getImageView();
// if it is the current url, set the image
String tag = (String) imageView.getTag(KEY_TAG);
if (TextUtils.equals(tag, container.waspImageCreator.getUrl())) {
imageView.setImageBitmap(container.bitmap);
imageView.setTag(KEY_TAG, null);
}
}
@Override
public void onError(WaspError error) {
int errorImage = imageCreator.getErrorImage();
if (errorImage != 0) {
imageView.setImageResource(errorImage);
}
error.log();
}
});
imageCreator.logRequest();
}
// clear the target by setting null or default placeholder
private void initImageView(ImageCreator waspImageCreator) {
int defaultImage = waspImageCreator.getDefaultImage();
ImageView imageView = waspImageCreator.getImageView();
if (defaultImage != 0) {
imageView.setImageResource(defaultImage);
return;
}
imageView.setImageBitmap(null);
}
@Override
public void clearCache() {
if (imageCache == null) {
return;
}
imageCache.clearCache();
}
// the call should be done in main thread
private void checkMain() {
if (Looper.myLooper() != Looper.getMainLooper()) {
throw new IllegalStateException("Wasp.Image.load() must be invoked from the main thread.");
}
}
/**
* Simple cache adapter interface.
*/
interface ImageCache {
Bitmap getBitmap(String url);
void putBitmap(String url, Bitmap bitmap);
void clearCache();
}
interface ImageNetworkHandler {
void requestImage(ImageCreator waspImageCreator, int maxWidth, int maxHeight, InternalCallback<Container> waspCallback);
void cancelRequest(String tag);
}
static class Container {
String cacheKey;
Bitmap bitmap;
ImageCreator waspImageCreator;
}
}
| imdatcandan/wasp | wasp/src/main/java/com/orhanobut/wasp/InternalImageHandler.java | Java | apache-2.0 | 5,127 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <thrift/thrift-config.h>
#include <cstring>
#include <sstream>
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_SYS_UN_H
#include <sys/un.h>
#endif
#ifdef HAVE_SYS_POLL_H
#include <sys/poll.h>
#endif
#include <sys/types.h>
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#include <netinet/tcp.h>
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <thrift/concurrency/Monitor.h>
#include <thrift/transport/TSocket.h>
#include <thrift/transport/TTransportException.h>
#include <thrift/transport/PlatformSocket.h>
#ifndef SOCKOPT_CAST_T
# ifndef _WIN32
# define SOCKOPT_CAST_T void
# else
# define SOCKOPT_CAST_T char
# endif // _WIN32
#endif
template<class T>
inline const SOCKOPT_CAST_T* const_cast_sockopt(const T* v) {
return reinterpret_cast<const SOCKOPT_CAST_T*>(v);
}
template<class T>
inline SOCKOPT_CAST_T* cast_sockopt(T* v) {
return reinterpret_cast<SOCKOPT_CAST_T*>(v);
}
namespace apache { namespace thrift { namespace transport {
using namespace std;
// Global var to track total socket sys calls
uint32_t g_socket_syscalls = 0;
/**
* TSocket implementation.
*
*/
TSocket::TSocket(string host, int port) :
host_(host),
port_(port),
path_(""),
socket_(THRIFT_INVALID_SOCKET),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
}
TSocket::TSocket(string path) :
host_(""),
port_(0),
path_(path),
socket_(THRIFT_INVALID_SOCKET),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
cachedPeerAddr_.ipv4.sin_family = AF_UNSPEC;
}
TSocket::TSocket() :
host_(""),
port_(0),
path_(""),
socket_(THRIFT_INVALID_SOCKET),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
cachedPeerAddr_.ipv4.sin_family = AF_UNSPEC;
}
TSocket::TSocket(THRIFT_SOCKET socket) :
host_(""),
port_(0),
path_(""),
socket_(socket),
connTimeout_(0),
sendTimeout_(0),
recvTimeout_(0),
keepAlive_(false),
lingerOn_(1),
lingerVal_(0),
noDelay_(1),
maxRecvRetries_(5) {
cachedPeerAddr_.ipv4.sin_family = AF_UNSPEC;
#ifdef SO_NOSIGPIPE
{
int one = 1;
setsockopt(socket_, SOL_SOCKET, SO_NOSIGPIPE, &one, sizeof(one));
}
#endif
}
TSocket::~TSocket() {
close();
}
bool TSocket::isOpen() {
return (socket_ != THRIFT_INVALID_SOCKET);
}
bool TSocket::peek() {
if (!isOpen()) {
return false;
}
uint8_t buf;
int r = static_cast<int>(recv(socket_, cast_sockopt(&buf), 1, MSG_PEEK));
if (r == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
#if defined __FreeBSD__ || defined __MACH__
/* shigin:
* freebsd returns -1 and THRIFT_ECONNRESET if socket was closed by
* the other side
*/
if (errno_copy == THRIFT_ECONNRESET)
{
close();
return false;
}
#endif
GlobalOutput.perror("TSocket::peek() recv() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::UNKNOWN, "recv()", errno_copy);
}
return (r > 0);
}
void TSocket::openConnection(struct addrinfo *res) {
if (isOpen()) {
return;
}
if (! path_.empty()) {
socket_ = socket(PF_UNIX, SOCK_STREAM, IPPROTO_IP);
} else {
socket_ = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
}
if (socket_ == THRIFT_INVALID_SOCKET) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() socket() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "socket()", errno_copy);
}
// Send timeout
if (sendTimeout_ > 0) {
setSendTimeout(sendTimeout_);
}
// Recv timeout
if (recvTimeout_ > 0) {
setRecvTimeout(recvTimeout_);
}
if(keepAlive_) {
setKeepAlive(keepAlive_);
}
// Linger
setLinger(lingerOn_, lingerVal_);
// No delay
setNoDelay(noDelay_);
#ifdef SO_NOSIGPIPE
{
int one = 1;
setsockopt(socket_, SOL_SOCKET, SO_NOSIGPIPE, &one, sizeof(one));
}
#endif
// Uses a low min RTO if asked to.
#ifdef TCP_LOW_MIN_RTO
if (getUseLowMinRto()) {
int one = 1;
setsockopt(socket_, IPPROTO_TCP, TCP_LOW_MIN_RTO, &one, sizeof(one));
}
#endif
// Set the socket to be non blocking for connect if a timeout exists
int flags = THRIFT_FCNTL(socket_, THRIFT_F_GETFL, 0);
if (connTimeout_ > 0) {
if (-1 == THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags | THRIFT_O_NONBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() THRIFT_FCNTL() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_FCNTL() failed", errno_copy);
}
} else {
if (-1 == THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags & ~THRIFT_O_NONBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() THRIFT_FCNTL " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_FCNTL() failed", errno_copy);
}
}
// Connect the socket
int ret;
if (! path_.empty()) {
#ifndef _WIN32
size_t len = path_.size() + 1;
if (len > sizeof(sockaddr_un::sun_path)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() Unix Domain socket path too long", errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, " Unix Domain socket path too long");
}
struct sockaddr_un address;
address.sun_family = AF_UNIX;
memcpy(address.sun_path, path_.c_str(), len);
socklen_t structlen = static_cast<socklen_t>(sizeof(address));
ret = connect(socket_, (struct sockaddr *) &address, structlen);
#else
GlobalOutput.perror("TSocket::open() Unix Domain socket path not supported on windows", -99);
throw TTransportException(TTransportException::NOT_OPEN, " Unix Domain socket path not supported");
#endif
} else {
ret = connect(socket_, res->ai_addr, static_cast<int>(res->ai_addrlen));
}
// success case
if (ret == 0) {
goto done;
}
if ((THRIFT_GET_SOCKET_ERROR != THRIFT_EINPROGRESS) && (THRIFT_GET_SOCKET_ERROR != THRIFT_EWOULDBLOCK)) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() connect() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "connect() failed", errno_copy);
}
struct THRIFT_POLLFD fds[1];
std::memset(fds, 0 , sizeof(fds));
fds[0].fd = socket_;
fds[0].events = THRIFT_POLLOUT;
ret = THRIFT_POLL(fds, 1, connTimeout_);
if (ret > 0) {
// Ensure the socket is connected and that there are no errors set
int val;
socklen_t lon;
lon = sizeof(int);
int ret2 = getsockopt(socket_, SOL_SOCKET, SO_ERROR, cast_sockopt(&val), &lon);
if (ret2 == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() getsockopt() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "getsockopt()", errno_copy);
}
// no errors on socket, go to town
if (val == 0) {
goto done;
}
GlobalOutput.perror("TSocket::open() error on socket (after THRIFT_POLL) " + getSocketInfo(), val);
throw TTransportException(TTransportException::NOT_OPEN, "socket open() error", val);
} else if (ret == 0) {
// socket timed out
string errStr = "TSocket::open() timed out " + getSocketInfo();
GlobalOutput(errStr.c_str());
throw TTransportException(TTransportException::NOT_OPEN, "open() timed out");
} else {
// error on THRIFT_POLL()
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::open() THRIFT_POLL() " + getSocketInfo(), errno_copy);
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_POLL() failed", errno_copy);
}
done:
// Set socket back to normal mode (blocking)
THRIFT_FCNTL(socket_, THRIFT_F_SETFL, flags);
if (path_.empty()) {
setCachedAddress(res->ai_addr, static_cast<socklen_t>(res->ai_addrlen));
}
}
void TSocket::open() {
if (isOpen()) {
return;
}
if (! path_.empty()) {
unix_open();
} else {
local_open();
}
}
void TSocket::unix_open(){
if (! path_.empty()) {
// Unix Domain SOcket does not need addrinfo struct, so we pass NULL
openConnection(NULL);
}
}
void TSocket::local_open(){
#ifdef _WIN32
TWinsockSingleton::create();
#endif // _WIN32
if (isOpen()) {
return;
}
// Validate port number
if (port_ < 0 || port_ > 0xFFFF) {
throw TTransportException(TTransportException::NOT_OPEN, "Specified port is invalid");
}
struct addrinfo hints, *res, *res0;
res = NULL;
res0 = NULL;
int error;
char port[sizeof("65535")];
std::memset(&hints, 0, sizeof(hints));
hints.ai_family = PF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE | AI_ADDRCONFIG;
sprintf(port, "%d", port_);
error = getaddrinfo(host_.c_str(), port, &hints, &res0);
#ifdef _WIN32
if (error == WSANO_DATA) {
hints.ai_flags &= ~AI_ADDRCONFIG;
error = getaddrinfo(host_.c_str(), port, &hints, &res0);
}
#endif
if (error) {
string errStr = "TSocket::open() getaddrinfo() " + getSocketInfo() + string(THRIFT_GAI_STRERROR(error));
GlobalOutput(errStr.c_str());
close();
throw TTransportException(TTransportException::NOT_OPEN, "Could not resolve host for client socket.");
}
// Cycle through all the returned addresses until one
// connects or push the exception up.
for (res = res0; res; res = res->ai_next) {
try {
openConnection(res);
break;
} catch (TTransportException&) {
if (res->ai_next) {
close();
} else {
close();
freeaddrinfo(res0); // cleanup on failure
throw;
}
}
}
// Free address structure memory
freeaddrinfo(res0);
}
void TSocket::close() {
if (socket_ != THRIFT_INVALID_SOCKET) {
shutdown(socket_, THRIFT_SHUT_RDWR);
::THRIFT_CLOSESOCKET(socket_);
}
socket_ = THRIFT_INVALID_SOCKET;
}
void TSocket::setSocketFD(THRIFT_SOCKET socket) {
if (socket_ != THRIFT_INVALID_SOCKET) {
close();
}
socket_ = socket;
}
uint32_t TSocket::read(uint8_t* buf, uint32_t len) {
if (socket_ == THRIFT_INVALID_SOCKET) {
throw TTransportException(TTransportException::NOT_OPEN, "Called read on non-open socket");
}
int32_t retries = 0;
// THRIFT_EAGAIN can be signalled both when a timeout has occurred and when
// the system is out of resources (an awesome undocumented feature).
// The following is an approximation of the time interval under which
// THRIFT_EAGAIN is taken to indicate an out of resources error.
uint32_t eagainThresholdMicros = 0;
if (recvTimeout_) {
// if a readTimeout is specified along with a max number of recv retries, then
// the threshold will ensure that the read timeout is not exceeded even in the
// case of resource errors
eagainThresholdMicros = (recvTimeout_*1000)/ ((maxRecvRetries_>0) ? maxRecvRetries_ : 2);
}
try_again:
// Read from the socket
struct timeval begin;
if (recvTimeout_ > 0) {
THRIFT_GETTIMEOFDAY(&begin, NULL);
} else {
// if there is no read timeout we don't need the TOD to determine whether
// an THRIFT_EAGAIN is due to a timeout or an out-of-resource condition.
begin.tv_sec = begin.tv_usec = 0;
}
int got = static_cast<int>(recv(socket_, cast_sockopt(buf), len, 0));
int errno_copy = THRIFT_GET_SOCKET_ERROR; //THRIFT_GETTIMEOFDAY can change THRIFT_GET_SOCKET_ERROR
++g_socket_syscalls;
// Check for error on read
if (got < 0) {
if (errno_copy == THRIFT_EAGAIN) {
// if no timeout we can assume that resource exhaustion has occurred.
if (recvTimeout_ == 0) {
throw TTransportException(TTransportException::TIMED_OUT,
"THRIFT_EAGAIN (unavailable resources)");
}
// check if this is the lack of resources or timeout case
struct timeval end;
THRIFT_GETTIMEOFDAY(&end, NULL);
uint32_t readElapsedMicros = static_cast<uint32_t>(
((end.tv_sec - begin.tv_sec) * 1000 * 1000)
+ (((uint64_t)(end.tv_usec - begin.tv_usec))));
if (!eagainThresholdMicros || (readElapsedMicros < eagainThresholdMicros)) {
if (retries++ < maxRecvRetries_) {
THRIFT_SLEEP_USEC(50);
goto try_again;
} else {
throw TTransportException(TTransportException::TIMED_OUT,
"THRIFT_EAGAIN (unavailable resources)");
}
} else {
// infer that timeout has been hit
throw TTransportException(TTransportException::TIMED_OUT,
"THRIFT_EAGAIN (timed out)");
}
}
// If interrupted, try again
if (errno_copy == THRIFT_EINTR && retries++ < maxRecvRetries_) {
goto try_again;
}
#if defined __FreeBSD__ || defined __MACH__
if (errno_copy == THRIFT_ECONNRESET) {
/* shigin: freebsd doesn't follow POSIX semantic of recv and fails with
* THRIFT_ECONNRESET if peer performed shutdown
* edhall: eliminated close() since we do that in the destructor.
*/
return 0;
}
#endif
#ifdef _WIN32
if(errno_copy == WSAECONNRESET) {
return 0; // EOF
}
#endif
// Now it's not a try again case, but a real probblez
GlobalOutput.perror("TSocket::read() recv() " + getSocketInfo(), errno_copy);
// If we disconnect with no linger time
if (errno_copy == THRIFT_ECONNRESET) {
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_ECONNRESET");
}
// This ish isn't open
if (errno_copy == THRIFT_ENOTCONN) {
throw TTransportException(TTransportException::NOT_OPEN, "THRIFT_ENOTCONN");
}
// Timed out!
if (errno_copy == THRIFT_ETIMEDOUT) {
throw TTransportException(TTransportException::TIMED_OUT, "THRIFT_ETIMEDOUT");
}
// Some other error, whatevz
throw TTransportException(TTransportException::UNKNOWN, "Unknown", errno_copy);
}
// The remote host has closed the socket
if (got == 0) {
// edhall: we used to call close() here, but our caller may want to deal
// with the socket fd and we'll close() in our destructor in any case.
return 0;
}
// Pack data into string
return got;
}
void TSocket::write(const uint8_t* buf, uint32_t len) {
uint32_t sent = 0;
while (sent < len) {
uint32_t b = write_partial(buf + sent, len - sent);
if (b == 0) {
// This should only happen if the timeout set with SO_SNDTIMEO expired.
// Raise an exception.
throw TTransportException(TTransportException::TIMED_OUT,
"send timeout expired");
}
sent += b;
}
}
uint32_t TSocket::write_partial(const uint8_t* buf, uint32_t len) {
if (socket_ == THRIFT_INVALID_SOCKET) {
throw TTransportException(TTransportException::NOT_OPEN, "Called write on non-open socket");
}
uint32_t sent = 0;
int flags = 0;
#ifdef MSG_NOSIGNAL
// Note the use of MSG_NOSIGNAL to suppress SIGPIPE errors, instead we
// check for the THRIFT_EPIPE return condition and close the socket in that case
flags |= MSG_NOSIGNAL;
#endif // ifdef MSG_NOSIGNAL
int b = static_cast<int>(send(socket_, const_cast_sockopt(buf + sent), len - sent, flags));
++g_socket_syscalls;
if (b < 0) {
if (THRIFT_GET_SOCKET_ERROR == THRIFT_EWOULDBLOCK || THRIFT_GET_SOCKET_ERROR == THRIFT_EAGAIN) {
return 0;
}
// Fail on a send error
int errno_copy = THRIFT_GET_SOCKET_ERROR;
GlobalOutput.perror("TSocket::write_partial() send() " + getSocketInfo(), errno_copy);
if (errno_copy == THRIFT_EPIPE || errno_copy == THRIFT_ECONNRESET || errno_copy == THRIFT_ENOTCONN) {
close();
throw TTransportException(TTransportException::NOT_OPEN, "write() send()", errno_copy);
}
throw TTransportException(TTransportException::UNKNOWN, "write() send()", errno_copy);
}
// Fail on blocked send
if (b == 0) {
throw TTransportException(TTransportException::NOT_OPEN, "Socket send returned 0.");
}
return b;
}
std::string TSocket::getHost() {
return host_;
}
int TSocket::getPort() {
return port_;
}
void TSocket::setHost(string host) {
host_ = host;
}
void TSocket::setPort(int port) {
port_ = port;
}
void TSocket::setLinger(bool on, int linger) {
lingerOn_ = on;
lingerVal_ = linger;
if (socket_ == THRIFT_INVALID_SOCKET) {
return;
}
#ifndef _WIN32
struct linger l = {(lingerOn_ ? 1 : 0), lingerVal_};
#else
struct linger l = {(lingerOn_ ? 1 : 0), static_cast<u_short>(lingerVal_)};
#endif
int ret = setsockopt(socket_, SOL_SOCKET, SO_LINGER, cast_sockopt(&l), sizeof(l));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setLinger() setsockopt() " + getSocketInfo(), errno_copy);
}
}
void TSocket::setNoDelay(bool noDelay) {
noDelay_ = noDelay;
if (socket_ == THRIFT_INVALID_SOCKET || !path_.empty()) {
return;
}
// Set socket to NODELAY
int v = noDelay_ ? 1 : 0;
int ret = setsockopt(socket_, IPPROTO_TCP, TCP_NODELAY, cast_sockopt(&v), sizeof(v));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setNoDelay() setsockopt() " + getSocketInfo(), errno_copy);
}
}
void TSocket::setConnTimeout(int ms) {
connTimeout_ = ms;
}
void setGenericTimeout(THRIFT_SOCKET s, int timeout_ms, int optname)
{
if (timeout_ms < 0) {
char errBuf[512];
sprintf(errBuf, "TSocket::setGenericTimeout with negative input: %d\n", timeout_ms);
GlobalOutput(errBuf);
return;
}
if (s == THRIFT_INVALID_SOCKET) {
return;
}
#ifdef _WIN32
DWORD platform_time = static_cast<DWORD>(timeout_ms);
#else
struct timeval platform_time = {
(int)(timeout_ms/1000),
(int)((timeout_ms%1000)*1000)};
#endif
int ret = setsockopt(s, SOL_SOCKET, optname, cast_sockopt(&platform_time), sizeof(platform_time));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setGenericTimeout() setsockopt() ", errno_copy);
}
}
void TSocket::setRecvTimeout(int ms) {
setGenericTimeout(socket_, ms, SO_RCVTIMEO);
recvTimeout_ = ms;
}
void TSocket::setSendTimeout(int ms) {
setGenericTimeout(socket_, ms, SO_SNDTIMEO);
sendTimeout_ = ms;
}
void TSocket::setKeepAlive(bool keepAlive) {
keepAlive_ = keepAlive;
if (socket_ == -1) {
return;
}
int value = keepAlive_;
int ret = setsockopt(socket_, SOL_SOCKET, SO_KEEPALIVE, const_cast_sockopt(&value), sizeof(value));
if (ret == -1) {
int errno_copy = THRIFT_GET_SOCKET_ERROR; // Copy THRIFT_GET_SOCKET_ERROR because we're allocating memory.
GlobalOutput.perror("TSocket::setKeepAlive() setsockopt() " + getSocketInfo(), errno_copy);
}
}
void TSocket::setMaxRecvRetries(int maxRecvRetries) {
maxRecvRetries_ = maxRecvRetries;
}
string TSocket::getSocketInfo() {
std::ostringstream oss;
if (host_.empty() || port_ == 0) {
oss << "<Host: " << getPeerAddress();
oss << " Port: " << getPeerPort() << ">";
} else {
oss << "<Host: " << host_ << " Port: " << port_ << ">";
}
return oss.str();
}
std::string TSocket::getPeerHost() {
if (peerHost_.empty() && path_.empty()) {
struct sockaddr_storage addr;
struct sockaddr* addrPtr;
socklen_t addrLen;
if (socket_ == THRIFT_INVALID_SOCKET) {
return host_;
}
addrPtr = getCachedAddress(&addrLen);
if (addrPtr == NULL) {
addrLen = sizeof(addr);
if (getpeername(socket_, (sockaddr*) &addr, &addrLen) != 0) {
return peerHost_;
}
addrPtr = (sockaddr*)&addr;
setCachedAddress(addrPtr, addrLen);
}
char clienthost[NI_MAXHOST];
char clientservice[NI_MAXSERV];
getnameinfo((sockaddr*) addrPtr, addrLen,
clienthost, sizeof(clienthost),
clientservice, sizeof(clientservice), 0);
peerHost_ = clienthost;
}
return peerHost_;
}
std::string TSocket::getPeerAddress() {
if (peerAddress_.empty() && path_.empty()) {
struct sockaddr_storage addr;
struct sockaddr* addrPtr;
socklen_t addrLen;
if (socket_ == THRIFT_INVALID_SOCKET) {
return peerAddress_;
}
addrPtr = getCachedAddress(&addrLen);
if (addrPtr == NULL) {
addrLen = sizeof(addr);
if (getpeername(socket_, (sockaddr*) &addr, &addrLen) != 0) {
return peerAddress_;
}
addrPtr = (sockaddr*)&addr;
setCachedAddress(addrPtr, addrLen);
}
char clienthost[NI_MAXHOST];
char clientservice[NI_MAXSERV];
getnameinfo(addrPtr, addrLen,
clienthost, sizeof(clienthost),
clientservice, sizeof(clientservice),
NI_NUMERICHOST|NI_NUMERICSERV);
peerAddress_ = clienthost;
peerPort_ = std::atoi(clientservice);
}
return peerAddress_;
}
int TSocket::getPeerPort() {
getPeerAddress();
return peerPort_;
}
void TSocket::setCachedAddress(const sockaddr* addr, socklen_t len) {
if (!path_.empty()) {
return;
}
switch (addr->sa_family) {
case AF_INET:
if (len == sizeof(sockaddr_in)) {
memcpy((void*)&cachedPeerAddr_.ipv4, (void*)addr, len);
}
break;
case AF_INET6:
if (len == sizeof(sockaddr_in6)) {
memcpy((void*)&cachedPeerAddr_.ipv6, (void*)addr, len);
}
break;
}
}
sockaddr* TSocket::getCachedAddress(socklen_t* len) const {
switch (cachedPeerAddr_.ipv4.sin_family) {
case AF_INET:
*len = sizeof(sockaddr_in);
return (sockaddr*) &cachedPeerAddr_.ipv4;
case AF_INET6:
*len = sizeof(sockaddr_in6);
return (sockaddr*) &cachedPeerAddr_.ipv6;
default:
return NULL;
}
}
bool TSocket::useLowMinRto_ = false;
void TSocket::setUseLowMinRto(bool useLowMinRto) {
useLowMinRto_ = useLowMinRto;
}
bool TSocket::getUseLowMinRto() {
return useLowMinRto_;
}
const std::string TSocket::getOrigin() {
std::ostringstream oss;
oss << getPeerHost() << ":" << getPeerPort();
return oss.str();
}
}}} // apache::thrift::transport
| rewardStyle/apache.thrift | lib/cpp/src/thrift/transport/TSocket.cpp | C++ | apache-2.0 | 23,335 |
/*-
*
* * Copyright 2016 Skymind,Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.deeplearning4j.nn.conf.graph;
import org.deeplearning4j.nn.conf.inputs.InputType;
import org.deeplearning4j.nn.conf.inputs.InvalidInputTypeException;
import org.deeplearning4j.nn.graph.ComputationGraph;
import org.nd4j.linalg.api.ndarray.INDArray;
/** A MergeVertex is used to combine the activations of two or more layers/GraphVertex by means of concatenation/merging.<br>
* Exactly how this is done depends on the type of input.<br>
* For 2d (feed forward layer) inputs: MergeVertex([numExamples,layerSize1],[numExamples,layerSize2]) -> [numExamples,layerSize1 + layerSize2]<br>
* For 3d (time series) inputs: MergeVertex([numExamples,layerSize1,timeSeriesLength],[numExamples,layerSize2,timeSeriesLength])
* -> [numExamples,layerSize1 + layerSize2,timeSeriesLength]<br>
* For 4d (convolutional) inputs: MergeVertex([numExamples,depth1,width,height],[numExamples,depth2,width,height])
* -> [numExamples,depth1 + depth2,width,height]<br>
* @author Alex Black
*/
public class MergeVertex extends GraphVertex {
@Override
public MergeVertex clone() {
return new MergeVertex();
}
@Override
public boolean equals(Object o) {
return o instanceof MergeVertex;
}
@Override
public int hashCode() {
return 433682566;
}
@Override
public int numParams(boolean backprop) {
return 0;
}
@Override
public org.deeplearning4j.nn.graph.vertex.GraphVertex instantiate(ComputationGraph graph, String name, int idx,
INDArray paramsView, boolean initializeParams) {
return new org.deeplearning4j.nn.graph.vertex.impl.MergeVertex(graph, name, idx);
}
@Override
public InputType getOutputType(int layerIndex, InputType... vertexInputs) throws InvalidInputTypeException {
if (vertexInputs.length == 1)
return vertexInputs[0];
InputType first = vertexInputs[0];
if (first.getType() == InputType.Type.CNNFlat) {
//TODO
//Merging flattened CNN format data could be messy?
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot currently merge CNN data in flattened format. Got: "
+ vertexInputs);
} else if (first.getType() != InputType.Type.CNN) {
//FF or RNN data inputs
int size = 0;
InputType.Type type = null;
for (int i = 0; i < vertexInputs.length; i++) {
if (vertexInputs[i].getType() != first.getType()) {
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot merge activations of different types:"
+ " first type = " + first.getType() + ", input type " + (i + 1)
+ " = " + vertexInputs[i].getType());
}
int thisSize;
switch (vertexInputs[i].getType()) {
case FF:
thisSize = ((InputType.InputTypeFeedForward) vertexInputs[i]).getSize();
type = InputType.Type.FF;
break;
case RNN:
thisSize = ((InputType.InputTypeRecurrent) vertexInputs[i]).getSize();
type = InputType.Type.RNN;
break;
default:
throw new IllegalStateException("Unknown input type: " + vertexInputs[i]); //Should never happen
}
if (thisSize <= 0) {//Size is not defined
size = -1;
} else {
size += thisSize;
}
}
if (size > 0) {
//Size is specified
if (type == InputType.Type.FF)
return InputType.feedForward(size);
else
return InputType.recurrent(size);
} else {
//size is unknown
if (type == InputType.Type.FF)
return InputType.feedForward(-1);
else
return InputType.recurrent(-1);
}
} else {
//CNN inputs... also check that the depth, width and heights match:
InputType.InputTypeConvolutional firstConv = (InputType.InputTypeConvolutional) first;
int fd = firstConv.getDepth();
int fw = firstConv.getWidth();
int fh = firstConv.getHeight();
int depthSum = fd;
for (int i = 1; i < vertexInputs.length; i++) {
if (vertexInputs[i].getType() != InputType.Type.CNN) {
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot process activations of different types:"
+ " first type = " + InputType.Type.CNN + ", input type " + (i + 1)
+ " = " + vertexInputs[i].getType());
}
InputType.InputTypeConvolutional otherConv = (InputType.InputTypeConvolutional) vertexInputs[i];
int od = otherConv.getDepth();
int ow = otherConv.getWidth();
int oh = otherConv.getHeight();
if (fw != ow || fh != oh) {
throw new InvalidInputTypeException(
"Invalid input: MergeVertex cannot merge CNN activations of different width/heights:"
+ "first [depth,width,height] = [" + fd + "," + fw + "," + fh
+ "], input " + i + " = [" + od + "," + ow + "," + oh + "]");
}
depthSum += od;
}
return InputType.convolutional(fh, fw, depthSum);
}
}
}
| dmmiller612/deeplearning4j | deeplearning4j-nn/src/main/java/org/deeplearning4j/nn/conf/graph/MergeVertex.java | Java | apache-2.0 | 6,727 |
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/codedeploy/model/ListApplicationRevisionsRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::CodeDeploy::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
ListApplicationRevisionsRequest::ListApplicationRevisionsRequest() :
m_applicationNameHasBeenSet(false),
m_sortBy(ApplicationRevisionSortBy::NOT_SET),
m_sortByHasBeenSet(false),
m_sortOrder(SortOrder::NOT_SET),
m_sortOrderHasBeenSet(false),
m_s3BucketHasBeenSet(false),
m_s3KeyPrefixHasBeenSet(false),
m_deployed(ListStateFilterAction::NOT_SET),
m_deployedHasBeenSet(false),
m_nextTokenHasBeenSet(false)
{
}
Aws::String ListApplicationRevisionsRequest::SerializePayload() const
{
JsonValue payload;
if(m_applicationNameHasBeenSet)
{
payload.WithString("applicationName", m_applicationName);
}
if(m_sortByHasBeenSet)
{
payload.WithString("sortBy", ApplicationRevisionSortByMapper::GetNameForApplicationRevisionSortBy(m_sortBy));
}
if(m_sortOrderHasBeenSet)
{
payload.WithString("sortOrder", SortOrderMapper::GetNameForSortOrder(m_sortOrder));
}
if(m_s3BucketHasBeenSet)
{
payload.WithString("s3Bucket", m_s3Bucket);
}
if(m_s3KeyPrefixHasBeenSet)
{
payload.WithString("s3KeyPrefix", m_s3KeyPrefix);
}
if(m_deployedHasBeenSet)
{
payload.WithString("deployed", ListStateFilterActionMapper::GetNameForListStateFilterAction(m_deployed));
}
if(m_nextTokenHasBeenSet)
{
payload.WithString("nextToken", m_nextToken);
}
return payload.WriteReadable();
}
Aws::Http::HeaderValueCollection ListApplicationRevisionsRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "CodeDeploy_20141006.ListApplicationRevisions"));
return headers;
}
| chiaming0914/awe-cpp-sdk | aws-cpp-sdk-codedeploy/source/model/ListApplicationRevisionsRequest.cpp | C++ | apache-2.0 | 2,461 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.builder.endpoint.dsl;
import java.util.Map;
import javax.annotation.Generated;
import org.apache.camel.ExchangePattern;
import org.apache.camel.builder.EndpointConsumerBuilder;
import org.apache.camel.builder.EndpointProducerBuilder;
import org.apache.camel.builder.endpoint.AbstractEndpointBuilder;
import org.apache.camel.spi.ExceptionHandler;
/**
* The rabbitmq component allows you produce and consume messages from RabbitMQ
* instances.
*
* Generated by camel-package-maven-plugin - do not edit this file!
*/
@Generated("org.apache.camel.maven.packaging.EndpointDslMojo")
public interface RabbitMQEndpointBuilderFactory {
/**
* Builder for endpoint consumers for the RabbitMQ component.
*/
public interface RabbitMQEndpointConsumerBuilder
extends
EndpointConsumerBuilder {
default AdvancedRabbitMQEndpointConsumerBuilder advanced() {
return (AdvancedRabbitMQEndpointConsumerBuilder) this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option is a: <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder addresses(Object[] addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option will be converted to a
* <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder addresses(String addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder autoDelete(boolean autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder autoDelete(String autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option is a: <code>com.rabbitmq.client.ConnectionFactory</code>
* type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder connectionFactory(
Object connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option will be converted to a
* <code>com.rabbitmq.client.ConnectionFactory</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder connectionFactory(
String connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* The name of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterExchange(
String deadLetterExchange) {
setProperty("deadLetterExchange", deadLetterExchange);
return this;
}
/**
* The type of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterExchangeType(
String deadLetterExchangeType) {
setProperty("deadLetterExchangeType", deadLetterExchangeType);
return this;
}
/**
* The name of the dead letter queue.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterQueue(
String deadLetterQueue) {
setProperty("deadLetterQueue", deadLetterQueue);
return this;
}
/**
* The routing key for the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder deadLetterRoutingKey(
String deadLetterRoutingKey) {
setProperty("deadLetterRoutingKey", deadLetterRoutingKey);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder declare(boolean declare) {
setProperty("declare", declare);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder declare(String declare) {
setProperty("declare", declare);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder durable(boolean durable) {
setProperty("durable", durable);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder durable(String durable) {
setProperty("durable", durable);
return this;
}
/**
* The exchange type such as direct or topic.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder exchangeType(String exchangeType) {
setProperty("exchangeType", exchangeType);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder exclusive(boolean exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder exclusive(String exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* The hostname of the running rabbitmq instance or cluster.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder hostname(String hostname) {
setProperty("hostname", hostname);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder passive(boolean passive) {
setProperty("passive", passive);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder passive(String passive) {
setProperty("passive", passive);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option is a: <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder portNumber(int portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder portNumber(String portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* The queue to receive messages from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder queue(String queue) {
setProperty("queue", queue);
return this;
}
/**
* The routing key to use when binding a consumer queue to the exchange.
* For producer routing keys, you set the header rabbitmq.ROUTING_KEY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder routingKey(String routingKey) {
setProperty("routingKey", routingKey);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipExchangeDeclare(
boolean skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipExchangeDeclare(
String skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueBind(
boolean skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueBind(
String skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueDeclare(
boolean skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder skipQueueDeclare(
String skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* The vhost for the channel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointConsumerBuilder vhost(String vhost) {
setProperty("vhost", vhost);
return this;
}
/**
* If messages should be auto acknowledged.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder autoAck(boolean autoAck) {
setProperty("autoAck", autoAck);
return this;
}
/**
* If messages should be auto acknowledged.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder autoAck(String autoAck) {
setProperty("autoAck", autoAck);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* pickup incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. By default the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions, that will be logged at WARN or ERROR level and
* ignored.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder bridgeErrorHandler(
boolean bridgeErrorHandler) {
setProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Allows for bridging the consumer to the Camel routing Error Handler,
* which mean any exceptions occurred while the consumer is trying to
* pickup incoming messages, or the likes, will now be processed as a
* message and handled by the routing Error Handler. By default the
* consumer will use the org.apache.camel.spi.ExceptionHandler to deal
* with exceptions, that will be logged at WARN or ERROR level and
* ignored.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder bridgeErrorHandler(
String bridgeErrorHandler) {
setProperty("bridgeErrorHandler", bridgeErrorHandler);
return this;
}
/**
* Number of concurrent consumers when consuming from broker. (eg
* similar as to the same option for the JMS component).
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder concurrentConsumers(
int concurrentConsumers) {
setProperty("concurrentConsumers", concurrentConsumers);
return this;
}
/**
* Number of concurrent consumers when consuming from broker. (eg
* similar as to the same option for the JMS component).
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder concurrentConsumers(
String concurrentConsumers) {
setProperty("concurrentConsumers", concurrentConsumers);
return this;
}
/**
* Request exclusive access to the queue (meaning only this consumer can
* access the queue). This is useful when you want a long-lived shared
* queue to be temporarily accessible by just one consumer.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder exclusiveConsumer(
boolean exclusiveConsumer) {
setProperty("exclusiveConsumer", exclusiveConsumer);
return this;
}
/**
* Request exclusive access to the queue (meaning only this consumer can
* access the queue). This is useful when you want a long-lived shared
* queue to be temporarily accessible by just one consumer.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder exclusiveConsumer(
String exclusiveConsumer) {
setProperty("exclusiveConsumer", exclusiveConsumer);
return this;
}
/**
* The maximum number of messages that the server will deliver, 0 if
* unlimited. You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchCount(int prefetchCount) {
setProperty("prefetchCount", prefetchCount);
return this;
}
/**
* The maximum number of messages that the server will deliver, 0 if
* unlimited. You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchCount(
String prefetchCount) {
setProperty("prefetchCount", prefetchCount);
return this;
}
/**
* Enables the quality of service on the RabbitMQConsumer side. You need
* to specify the option of prefetchSize, prefetchCount, prefetchGlobal
* at the same time.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchEnabled(
boolean prefetchEnabled) {
setProperty("prefetchEnabled", prefetchEnabled);
return this;
}
/**
* Enables the quality of service on the RabbitMQConsumer side. You need
* to specify the option of prefetchSize, prefetchCount, prefetchGlobal
* at the same time.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchEnabled(
String prefetchEnabled) {
setProperty("prefetchEnabled", prefetchEnabled);
return this;
}
/**
* If the settings should be applied to the entire channel rather than
* each consumer You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option is a: <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchGlobal(
boolean prefetchGlobal) {
setProperty("prefetchGlobal", prefetchGlobal);
return this;
}
/**
* If the settings should be applied to the entire channel rather than
* each consumer You need to specify the option of prefetchSize,
* prefetchCount, prefetchGlobal at the same time.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchGlobal(
String prefetchGlobal) {
setProperty("prefetchGlobal", prefetchGlobal);
return this;
}
/**
* The maximum amount of content (measured in octets) that the server
* will deliver, 0 if unlimited. You need to specify the option of
* prefetchSize, prefetchCount, prefetchGlobal at the same time.
*
* The option is a: <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchSize(int prefetchSize) {
setProperty("prefetchSize", prefetchSize);
return this;
}
/**
* The maximum amount of content (measured in octets) that the server
* will deliver, 0 if unlimited. You need to specify the option of
* prefetchSize, prefetchCount, prefetchGlobal at the same time.
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer
*/
default RabbitMQEndpointConsumerBuilder prefetchSize(String prefetchSize) {
setProperty("prefetchSize", prefetchSize);
return this;
}
/**
* Password for authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder password(String password) {
setProperty("password", password);
return this;
}
/**
* Enables SSL on connection, accepted value are true, TLS and 'SSLv3.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder sslProtocol(String sslProtocol) {
setProperty("sslProtocol", sslProtocol);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option is a: <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder trustManager(Object trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option will be converted to a
* <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder trustManager(String trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Username in case of authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointConsumerBuilder username(String username) {
setProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint consumers for the RabbitMQ component.
*/
public interface AdvancedRabbitMQEndpointConsumerBuilder
extends
EndpointConsumerBuilder {
default RabbitMQEndpointConsumerBuilder basic() {
return (RabbitMQEndpointConsumerBuilder) this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
* type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exceptionHandler(
ExceptionHandler exceptionHandler) {
setProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* To let the consumer use a custom ExceptionHandler. Notice if the
* option bridgeErrorHandler is enabled then this option is not in use.
* By default the consumer will deal with exceptions, that will be
* logged at WARN or ERROR level and ignored.
*
* The option will be converted to a
* <code>org.apache.camel.spi.ExceptionHandler</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exceptionHandler(
String exceptionHandler) {
setProperty("exceptionHandler", exceptionHandler);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option is a: <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exchangePattern(
ExchangePattern exchangePattern) {
setProperty("exchangePattern", exchangePattern);
return this;
}
/**
* Sets the exchange pattern when the consumer creates an exchange.
*
* The option will be converted to a
* <code>org.apache.camel.ExchangePattern</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder exchangePattern(
String exchangePattern) {
setProperty("exchangePattern", exchangePattern);
return this;
}
/**
* The consumer uses a Thread Pool Executor with a fixed number of
* threads. This setting allows you to set that number of threads.
*
* The option is a: <code>int</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder threadPoolSize(
int threadPoolSize) {
setProperty("threadPoolSize", threadPoolSize);
return this;
}
/**
* The consumer uses a Thread Pool Executor with a fixed number of
* threads. This setting allows you to set that number of threads.
*
* The option will be converted to a <code>int</code> type.
*
* Group: consumer (advanced)
*/
default AdvancedRabbitMQEndpointConsumerBuilder threadPoolSize(
String threadPoolSize) {
setProperty("threadPoolSize", threadPoolSize);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder args(
Map<String, Object> args) {
setProperty("args", args);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder args(String args) {
setProperty("args", args);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder automaticRecoveryEnabled(
Boolean automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder automaticRecoveryEnabled(
String automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder clientProperties(
Map<String, Object> clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder clientProperties(
String clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection timeout.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder connectionTimeout(
int connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder connectionTimeout(
String connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder networkRecoveryInterval(
Integer networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder networkRecoveryInterval(
String networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedChannelMax(
int requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedChannelMax(
String requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedFrameMax(
int requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedFrameMax(
String requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedHeartbeat(
int requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestedHeartbeat(
String requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeout(
long requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeout(
String requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeoutCheckerInterval(
long requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder requestTimeoutCheckerInterval(
String requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder synchronous(
boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder synchronous(
String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder topologyRecoveryEnabled(
Boolean topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder topologyRecoveryEnabled(
String topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder transferException(
boolean transferException) {
setProperty("transferException", transferException);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointConsumerBuilder transferException(
String transferException) {
setProperty("transferException", transferException);
return this;
}
}
/**
* Builder for endpoint producers for the RabbitMQ component.
*/
public interface RabbitMQEndpointProducerBuilder
extends
EndpointProducerBuilder {
default AdvancedRabbitMQEndpointProducerBuilder advanced() {
return (AdvancedRabbitMQEndpointProducerBuilder) this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option is a: <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder addresses(Object[] addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option will be converted to a
* <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder addresses(String addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder autoDelete(boolean autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder autoDelete(String autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option is a: <code>com.rabbitmq.client.ConnectionFactory</code>
* type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder connectionFactory(
Object connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option will be converted to a
* <code>com.rabbitmq.client.ConnectionFactory</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder connectionFactory(
String connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* The name of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterExchange(
String deadLetterExchange) {
setProperty("deadLetterExchange", deadLetterExchange);
return this;
}
/**
* The type of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterExchangeType(
String deadLetterExchangeType) {
setProperty("deadLetterExchangeType", deadLetterExchangeType);
return this;
}
/**
* The name of the dead letter queue.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterQueue(
String deadLetterQueue) {
setProperty("deadLetterQueue", deadLetterQueue);
return this;
}
/**
* The routing key for the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder deadLetterRoutingKey(
String deadLetterRoutingKey) {
setProperty("deadLetterRoutingKey", deadLetterRoutingKey);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder declare(boolean declare) {
setProperty("declare", declare);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder declare(String declare) {
setProperty("declare", declare);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder durable(boolean durable) {
setProperty("durable", durable);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder durable(String durable) {
setProperty("durable", durable);
return this;
}
/**
* The exchange type such as direct or topic.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder exchangeType(String exchangeType) {
setProperty("exchangeType", exchangeType);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder exclusive(boolean exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder exclusive(String exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* The hostname of the running rabbitmq instance or cluster.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder hostname(String hostname) {
setProperty("hostname", hostname);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder passive(boolean passive) {
setProperty("passive", passive);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder passive(String passive) {
setProperty("passive", passive);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option is a: <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder portNumber(int portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder portNumber(String portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* The queue to receive messages from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder queue(String queue) {
setProperty("queue", queue);
return this;
}
/**
* The routing key to use when binding a consumer queue to the exchange.
* For producer routing keys, you set the header rabbitmq.ROUTING_KEY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder routingKey(String routingKey) {
setProperty("routingKey", routingKey);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipExchangeDeclare(
boolean skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipExchangeDeclare(
String skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueBind(
boolean skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueBind(
String skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueDeclare(
boolean skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder skipQueueDeclare(
String skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* The vhost for the channel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointProducerBuilder vhost(String vhost) {
setProperty("vhost", vhost);
return this;
}
/**
* Allow pass null values to header.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder allowNullHeaders(
boolean allowNullHeaders) {
setProperty("allowNullHeaders", allowNullHeaders);
return this;
}
/**
* Allow pass null values to header.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder allowNullHeaders(
String allowNullHeaders) {
setProperty("allowNullHeaders", allowNullHeaders);
return this;
}
/**
* If the bridgeEndpoint is true, the producer will ignore the message
* header of rabbitmq.EXCHANGE_NAME and rabbitmq.ROUTING_KEY.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder bridgeEndpoint(
boolean bridgeEndpoint) {
setProperty("bridgeEndpoint", bridgeEndpoint);
return this;
}
/**
* If the bridgeEndpoint is true, the producer will ignore the message
* header of rabbitmq.EXCHANGE_NAME and rabbitmq.ROUTING_KEY.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder bridgeEndpoint(
String bridgeEndpoint) {
setProperty("bridgeEndpoint", bridgeEndpoint);
return this;
}
/**
* Get maximum number of opened channel in pool.
*
* The option is a: <code>int</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxSize(
int channelPoolMaxSize) {
setProperty("channelPoolMaxSize", channelPoolMaxSize);
return this;
}
/**
* Get maximum number of opened channel in pool.
*
* The option will be converted to a <code>int</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxSize(
String channelPoolMaxSize) {
setProperty("channelPoolMaxSize", channelPoolMaxSize);
return this;
}
/**
* Set the maximum number of milliseconds to wait for a channel from the
* pool.
*
* The option is a: <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxWait(
long channelPoolMaxWait) {
setProperty("channelPoolMaxWait", channelPoolMaxWait);
return this;
}
/**
* Set the maximum number of milliseconds to wait for a channel from the
* pool.
*
* The option will be converted to a <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder channelPoolMaxWait(
String channelPoolMaxWait) {
setProperty("channelPoolMaxWait", channelPoolMaxWait);
return this;
}
/**
* When true, an exception will be thrown when the message cannot be
* delivered (basic.return) and the message is marked as mandatory.
* PublisherAcknowledgement will also be activated in this case. See
* also publisher acknowledgements - When will messages be confirmed.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder guaranteedDeliveries(
boolean guaranteedDeliveries) {
setProperty("guaranteedDeliveries", guaranteedDeliveries);
return this;
}
/**
* When true, an exception will be thrown when the message cannot be
* delivered (basic.return) and the message is marked as mandatory.
* PublisherAcknowledgement will also be activated in this case. See
* also publisher acknowledgements - When will messages be confirmed.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder guaranteedDeliveries(
String guaranteedDeliveries) {
setProperty("guaranteedDeliveries", guaranteedDeliveries);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue consumer immediately. If this flag is set, the
* server will return an undeliverable message with a Return method. If
* this flag is zero, the server will queue the message, but with no
* guarantee that it will ever be consumed. If the header is present
* rabbitmq.IMMEDIATE it will override this option.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder immediate(boolean immediate) {
setProperty("immediate", immediate);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue consumer immediately. If this flag is set, the
* server will return an undeliverable message with a Return method. If
* this flag is zero, the server will queue the message, but with no
* guarantee that it will ever be consumed. If the header is present
* rabbitmq.IMMEDIATE it will override this option.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder immediate(String immediate) {
setProperty("immediate", immediate);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder lazyStartProducer(
boolean lazyStartProducer) {
setProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* Whether the producer should be started lazy (on the first message).
* By starting lazy you can use this to allow CamelContext and routes to
* startup in situations where a producer may otherwise fail during
* starting and cause the route to fail being started. By deferring this
* startup to be lazy then the startup failure can be handled during
* routing messages via Camel's routing error handlers. Beware that when
* the first message is processed then creating and starting the
* producer may take a little time and prolong the total processing time
* of the processing.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder lazyStartProducer(
String lazyStartProducer) {
setProperty("lazyStartProducer", lazyStartProducer);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue. If this flag is set, the server will return an
* unroutable message with a Return method. If this flag is zero, the
* server silently drops the message. If the header is present
* rabbitmq.MANDATORY it will override this option.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder mandatory(boolean mandatory) {
setProperty("mandatory", mandatory);
return this;
}
/**
* This flag tells the server how to react if the message cannot be
* routed to a queue. If this flag is set, the server will return an
* unroutable message with a Return method. If this flag is zero, the
* server silently drops the message. If the header is present
* rabbitmq.MANDATORY it will override this option.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder mandatory(String mandatory) {
setProperty("mandatory", mandatory);
return this;
}
/**
* When true, the message will be published with publisher
* acknowledgements turned on.
*
* The option is a: <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgements(
boolean publisherAcknowledgements) {
setProperty("publisherAcknowledgements", publisherAcknowledgements);
return this;
}
/**
* When true, the message will be published with publisher
* acknowledgements turned on.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgements(
String publisherAcknowledgements) {
setProperty("publisherAcknowledgements", publisherAcknowledgements);
return this;
}
/**
* The amount of time in milliseconds to wait for a basic.ack response
* from RabbitMQ server.
*
* The option is a: <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgementsTimeout(
long publisherAcknowledgementsTimeout) {
setProperty("publisherAcknowledgementsTimeout", publisherAcknowledgementsTimeout);
return this;
}
/**
* The amount of time in milliseconds to wait for a basic.ack response
* from RabbitMQ server.
*
* The option will be converted to a <code>long</code> type.
*
* Group: producer
*/
default RabbitMQEndpointProducerBuilder publisherAcknowledgementsTimeout(
String publisherAcknowledgementsTimeout) {
setProperty("publisherAcknowledgementsTimeout", publisherAcknowledgementsTimeout);
return this;
}
/**
* Password for authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder password(String password) {
setProperty("password", password);
return this;
}
/**
* Enables SSL on connection, accepted value are true, TLS and 'SSLv3.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder sslProtocol(String sslProtocol) {
setProperty("sslProtocol", sslProtocol);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option is a: <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder trustManager(Object trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option will be converted to a
* <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder trustManager(String trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Username in case of authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointProducerBuilder username(String username) {
setProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint producers for the RabbitMQ component.
*/
public interface AdvancedRabbitMQEndpointProducerBuilder
extends
EndpointProducerBuilder {
default RabbitMQEndpointProducerBuilder basic() {
return (RabbitMQEndpointProducerBuilder) this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder args(
Map<String, Object> args) {
setProperty("args", args);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder args(String args) {
setProperty("args", args);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder automaticRecoveryEnabled(
Boolean automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder automaticRecoveryEnabled(
String automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder clientProperties(
Map<String, Object> clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder clientProperties(
String clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection timeout.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder connectionTimeout(
int connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder connectionTimeout(
String connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder networkRecoveryInterval(
Integer networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder networkRecoveryInterval(
String networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedChannelMax(
int requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedChannelMax(
String requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedFrameMax(
int requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedFrameMax(
String requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedHeartbeat(
int requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestedHeartbeat(
String requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeout(
long requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeout(
String requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeoutCheckerInterval(
long requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder requestTimeoutCheckerInterval(
String requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder synchronous(
boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder synchronous(
String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder topologyRecoveryEnabled(
Boolean topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder topologyRecoveryEnabled(
String topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder transferException(
boolean transferException) {
setProperty("transferException", transferException);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointProducerBuilder transferException(
String transferException) {
setProperty("transferException", transferException);
return this;
}
}
/**
* Builder for endpoint for the RabbitMQ component.
*/
public interface RabbitMQEndpointBuilder
extends
RabbitMQEndpointConsumerBuilder, RabbitMQEndpointProducerBuilder {
default AdvancedRabbitMQEndpointBuilder advanced() {
return (AdvancedRabbitMQEndpointBuilder) this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option is a: <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder addresses(Object[] addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If this option is set, camel-rabbitmq will try to create connection
* based on the setting of option addresses. The addresses value is a
* string which looks like server1:12345, server2:12345.
*
* The option will be converted to a
* <code>com.rabbitmq.client.Address[]</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder addresses(String addresses) {
setProperty("addresses", addresses);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder autoDelete(boolean autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* If it is true, the exchange will be deleted when it is no longer in
* use.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder autoDelete(String autoDelete) {
setProperty("autoDelete", autoDelete);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option is a: <code>com.rabbitmq.client.ConnectionFactory</code>
* type.
*
* Group: common
*/
default RabbitMQEndpointBuilder connectionFactory(
Object connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* To use a custom RabbitMQ connection factory. When this option is set,
* all connection options (connectionTimeout, requestedChannelMax...)
* set on URI are not used.
*
* The option will be converted to a
* <code>com.rabbitmq.client.ConnectionFactory</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder connectionFactory(
String connectionFactory) {
setProperty("connectionFactory", connectionFactory);
return this;
}
/**
* The name of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterExchange(
String deadLetterExchange) {
setProperty("deadLetterExchange", deadLetterExchange);
return this;
}
/**
* The type of the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterExchangeType(
String deadLetterExchangeType) {
setProperty("deadLetterExchangeType", deadLetterExchangeType);
return this;
}
/**
* The name of the dead letter queue.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterQueue(String deadLetterQueue) {
setProperty("deadLetterQueue", deadLetterQueue);
return this;
}
/**
* The routing key for the dead letter exchange.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder deadLetterRoutingKey(
String deadLetterRoutingKey) {
setProperty("deadLetterRoutingKey", deadLetterRoutingKey);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder declare(boolean declare) {
setProperty("declare", declare);
return this;
}
/**
* If the option is true, camel declare the exchange and queue name and
* bind them together. If the option is false, camel won't declare the
* exchange and queue name on the server.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder declare(String declare) {
setProperty("declare", declare);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder durable(boolean durable) {
setProperty("durable", durable);
return this;
}
/**
* If we are declaring a durable exchange (the exchange will survive a
* server restart).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder durable(String durable) {
setProperty("durable", durable);
return this;
}
/**
* The exchange type such as direct or topic.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder exchangeType(String exchangeType) {
setProperty("exchangeType", exchangeType);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder exclusive(boolean exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* Exclusive queues may only be accessed by the current connection, and
* are deleted when that connection closes.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder exclusive(String exclusive) {
setProperty("exclusive", exclusive);
return this;
}
/**
* The hostname of the running rabbitmq instance or cluster.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder hostname(String hostname) {
setProperty("hostname", hostname);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder passive(boolean passive) {
setProperty("passive", passive);
return this;
}
/**
* Passive queues depend on the queue already to be available at
* RabbitMQ.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder passive(String passive) {
setProperty("passive", passive);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option is a: <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder portNumber(int portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* Port number for the host with the running rabbitmq instance or
* cluster. Default value is 5672.
*
* The option will be converted to a <code>int</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder portNumber(String portNumber) {
setProperty("portNumber", portNumber);
return this;
}
/**
* The queue to receive messages from.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder queue(String queue) {
setProperty("queue", queue);
return this;
}
/**
* The routing key to use when binding a consumer queue to the exchange.
* For producer routing keys, you set the header rabbitmq.ROUTING_KEY.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder routingKey(String routingKey) {
setProperty("routingKey", routingKey);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipExchangeDeclare(
boolean skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* This can be used if we need to declare the queue but not the
* exchange.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipExchangeDeclare(
String skipExchangeDeclare) {
setProperty("skipExchangeDeclare", skipExchangeDeclare);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueBind(boolean skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the queue will not be bound to the exchange after declaring
* it.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueBind(String skipQueueBind) {
setProperty("skipQueueBind", skipQueueBind);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option is a: <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueDeclare(
boolean skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* If true the producer will not declare and bind a queue. This can be
* used for directing messages via an existing routing key.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder skipQueueDeclare(String skipQueueDeclare) {
setProperty("skipQueueDeclare", skipQueueDeclare);
return this;
}
/**
* The vhost for the channel.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: common
*/
default RabbitMQEndpointBuilder vhost(String vhost) {
setProperty("vhost", vhost);
return this;
}
/**
* Password for authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder password(String password) {
setProperty("password", password);
return this;
}
/**
* Enables SSL on connection, accepted value are true, TLS and 'SSLv3.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder sslProtocol(String sslProtocol) {
setProperty("sslProtocol", sslProtocol);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option is a: <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder trustManager(Object trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Configure SSL trust manager, SSL should be enabled for this option to
* be effective.
*
* The option will be converted to a
* <code>javax.net.ssl.TrustManager</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder trustManager(String trustManager) {
setProperty("trustManager", trustManager);
return this;
}
/**
* Username in case of authenticated access.
*
* The option is a: <code>java.lang.String</code> type.
*
* Group: security
*/
default RabbitMQEndpointBuilder username(String username) {
setProperty("username", username);
return this;
}
}
/**
* Advanced builder for endpoint for the RabbitMQ component.
*/
public interface AdvancedRabbitMQEndpointBuilder
extends
AdvancedRabbitMQEndpointConsumerBuilder, AdvancedRabbitMQEndpointProducerBuilder {
default RabbitMQEndpointBuilder basic() {
return (RabbitMQEndpointBuilder) this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder args(Map<String, Object> args) {
setProperty("args", args);
return this;
}
/**
* Specify arguments for configuring the different RabbitMQ concepts, a
* different prefix is required for each: Exchange: arg.exchange. Queue:
* arg.queue. Binding: arg.binding. For example to declare a queue with
* message ttl argument:
* http://localhost:5672/exchange/queueargs=arg.queue.x-message-ttl=60000.
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder args(String args) {
setProperty("args", args);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder automaticRecoveryEnabled(
Boolean automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Enables connection automatic recovery (uses connection implementation
* that performs automatic recovery when connection shutdown is not
* initiated by the application).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder automaticRecoveryEnabled(
String automaticRecoveryEnabled) {
setProperty("automaticRecoveryEnabled", automaticRecoveryEnabled);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder basicPropertyBinding(
boolean basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Whether the endpoint should use basic property binding (Camel 2.x) or
* the newer property binding with additional capabilities.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder basicPropertyBinding(
String basicPropertyBinding) {
setProperty("basicPropertyBinding", basicPropertyBinding);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option is a: <code>java.util.Map<java.lang.String,
* java.lang.Object></code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder clientProperties(
Map<String, Object> clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection client properties (client info used in negotiating with
* the server).
*
* The option will be converted to a
* <code>java.util.Map<java.lang.String, java.lang.Object></code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder clientProperties(
String clientProperties) {
setProperty("clientProperties", clientProperties);
return this;
}
/**
* Connection timeout.
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder connectionTimeout(
int connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Connection timeout.
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder connectionTimeout(
String connectionTimeout) {
setProperty("connectionTimeout", connectionTimeout);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option is a: <code>java.lang.Integer</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder networkRecoveryInterval(
Integer networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Network recovery interval in milliseconds (interval used when
* recovering from network failure).
*
* The option will be converted to a <code>java.lang.Integer</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder networkRecoveryInterval(
String networkRecoveryInterval) {
setProperty("networkRecoveryInterval", networkRecoveryInterval);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedChannelMax(
int requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested channel max (max number of channels offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedChannelMax(
String requestedChannelMax) {
setProperty("requestedChannelMax", requestedChannelMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedFrameMax(
int requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested frame max (max size of frame offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedFrameMax(
String requestedFrameMax) {
setProperty("requestedFrameMax", requestedFrameMax);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option is a: <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedHeartbeat(
int requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Connection requested heartbeat (heart-beat in seconds offered).
*
* The option will be converted to a <code>int</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestedHeartbeat(
String requestedHeartbeat) {
setProperty("requestedHeartbeat", requestedHeartbeat);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeout(
long requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set timeout for waiting for a reply when using the InOut Exchange
* Pattern (in milliseconds).
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeout(
String requestTimeout) {
setProperty("requestTimeout", requestTimeout);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option is a: <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeoutCheckerInterval(
long requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Set requestTimeoutCheckerInterval for inOut exchange.
*
* The option will be converted to a <code>long</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder requestTimeoutCheckerInterval(
String requestTimeoutCheckerInterval) {
setProperty("requestTimeoutCheckerInterval", requestTimeoutCheckerInterval);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder synchronous(boolean synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Sets whether synchronous processing should be strictly used, or Camel
* is allowed to use asynchronous processing (if supported).
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder synchronous(String synchronous) {
setProperty("synchronous", synchronous);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option is a: <code>java.lang.Boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder topologyRecoveryEnabled(
Boolean topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* Enables connection topology recovery (should topology recovery be
* performed).
*
* The option will be converted to a <code>java.lang.Boolean</code>
* type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder topologyRecoveryEnabled(
String topologyRecoveryEnabled) {
setProperty("topologyRecoveryEnabled", topologyRecoveryEnabled);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option is a: <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder transferException(
boolean transferException) {
setProperty("transferException", transferException);
return this;
}
/**
* When true and an inOut Exchange failed on the consumer side send the
* caused Exception back in the response.
*
* The option will be converted to a <code>boolean</code> type.
*
* Group: advanced
*/
default AdvancedRabbitMQEndpointBuilder transferException(
String transferException) {
setProperty("transferException", transferException);
return this;
}
}
/**
* RabbitMQ (camel-rabbitmq)
* The rabbitmq component allows you produce and consume messages from
* RabbitMQ instances.
*
* Category: messaging
* Available as of version: 2.12
* Maven coordinates: org.apache.camel:camel-rabbitmq
*
* Syntax: <code>rabbitmq:exchangeName</code>
*
* Path parameter: exchangeName (required)
* The exchange name determines which exchange produced messages will sent
* to. In the case of consumers, the exchange name determines which exchange
* the queue will bind to.
*/
default RabbitMQEndpointBuilder rabbitMQ(String path) {
class RabbitMQEndpointBuilderImpl extends AbstractEndpointBuilder implements RabbitMQEndpointBuilder, AdvancedRabbitMQEndpointBuilder {
public RabbitMQEndpointBuilderImpl(String path) {
super("rabbitmq", path);
}
}
return new RabbitMQEndpointBuilderImpl(path);
}
} | Fabryprog/camel | core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/RabbitMQEndpointBuilderFactory.java | Java | apache-2.0 | 115,871 |
from matplotlib.testing.decorators import cleanup
from unittest import TestCase
from nose_parameterized import parameterized
import os
import gzip
from pandas import read_csv
from pyfolio.utils import (to_utc, to_series)
from pyfolio.tears import (create_full_tear_sheet,
create_simple_tear_sheet,
create_returns_tear_sheet,
create_position_tear_sheet,
create_txn_tear_sheet,
create_round_trip_tear_sheet,
create_interesting_times_tear_sheet,)
class PositionsTestCase(TestCase):
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
test_returns = read_csv(
gzip.open(
__location__ + '/test_data/test_returns.csv.gz'),
index_col=0, parse_dates=True)
test_returns = to_series(to_utc(test_returns))
test_txn = to_utc(read_csv(
gzip.open(
__location__ + '/test_data/test_txn.csv.gz'),
index_col=0, parse_dates=True))
test_pos = to_utc(read_csv(
gzip.open(__location__ + '/test_data/test_pos.csv.gz'),
index_col=0, parse_dates=True))
@parameterized.expand([({},),
({'slippage': 1},),
({'live_start_date': test_returns.index[-20]},),
({'round_trips': True},),
({'hide_positions': True},),
({'cone_std': 1},),
({'bootstrap': True},),
])
@cleanup
def test_create_full_tear_sheet_breakdown(self, kwargs):
create_full_tear_sheet(self.test_returns,
positions=self.test_pos,
transactions=self.test_txn,
benchmark_rets=self.test_returns,
**kwargs
)
@parameterized.expand([({},),
({'slippage': 1},),
({'live_start_date': test_returns.index[-20]},),
])
@cleanup
def test_create_simple_tear_sheet_breakdown(self, kwargs):
create_simple_tear_sheet(self.test_returns,
positions=self.test_pos,
transactions=self.test_txn,
**kwargs
)
@parameterized.expand([({},),
({'live_start_date':
test_returns.index[-20]},),
({'cone_std': 1},),
({'bootstrap': True},),
])
@cleanup
def test_create_returns_tear_sheet_breakdown(self, kwargs):
create_returns_tear_sheet(self.test_returns,
benchmark_rets=self.test_returns,
**kwargs
)
@parameterized.expand([({},),
({'hide_positions': True},),
({'show_and_plot_top_pos': 0},),
({'show_and_plot_top_pos': 1},),
])
@cleanup
def test_create_position_tear_sheet_breakdown(self, kwargs):
create_position_tear_sheet(self.test_returns,
self.test_pos,
**kwargs
)
@parameterized.expand([({},),
({'unadjusted_returns': test_returns},),
])
@cleanup
def test_create_txn_tear_sheet_breakdown(self, kwargs):
create_txn_tear_sheet(self.test_returns,
self.test_pos,
self.test_txn,
**kwargs
)
@parameterized.expand([({},),
({'sector_mappings': {}},),
])
@cleanup
def test_create_round_trip_tear_sheet_breakdown(self, kwargs):
create_round_trip_tear_sheet(self.test_returns,
self.test_pos,
self.test_txn,
**kwargs
)
@parameterized.expand([({},),
({'legend_loc': 1},),
])
@cleanup
def test_create_interesting_times_tear_sheet_breakdown(self,
kwargs):
create_interesting_times_tear_sheet(self.test_returns,
self.test_returns,
**kwargs
)
| quantopian/pyfolio | pyfolio/tests/test_tears.py | Python | apache-2.0 | 4,911 |
/*
* Copyright (c) 2014-2015 University of Ulm
*
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package components.execution;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import java.util.Set;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Created by daniel on 24.07.15.
*/
@Singleton public class Init {
@Inject public Init(ExecutionService executionService, Set<Runnable> runnables,
Set<Schedulable> schedulables) {
checkNotNull(executionService);
checkNotNull(runnables);
checkNotNull(schedulables);
for (Runnable runnable : runnables) {
executionService.execute(runnable);
}
for (Schedulable schedulable : schedulables) {
executionService.schedule(schedulable);
}
}
}
| cha87de/colosseum | app/components/execution/Init.java | Java | apache-2.0 | 1,451 |
/**
* copyright
* Inubit AG
* Schoeneberger Ufer 89
* 10785 Berlin
* Germany
*/
package net.frapu.code.visualization.twf;
import java.awt.Color;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.Point;
import java.awt.Shape;
import java.awt.geom.Rectangle2D;
import java.util.HashSet;
import java.util.Set;
import net.frapu.code.visualization.ProcessModel;
import net.frapu.code.visualization.ProcessNode;
import net.frapu.code.visualization.ProcessUtils;
/**
* @author ff
*
*/
public class ToolErrorConnector extends ProcessNode {
private static final int DIST_X = 4;
private static final int DIST_Y = 3;
private static final int BUTTON_WIDTH = 25;
public static final int AREA_HEIGHT = 20;
public static String PROP_PARENT_ID = "#ParentToolID";
private Tool f_parent;
private String PROP_NUMBER = "#ConnectorNumber";
/**
* for serialization
*/
public ToolErrorConnector() {
f_parent = null;
setNumber(0);
}
public Tool getParent() {
return f_parent;
}
/**
* @param tool
*/
public ToolErrorConnector(Tool tool, int number) {
f_parent = tool;
setProperty(PROP_PARENT_ID, f_parent.getId());
setNumber(number);
}
@Override
public void addContext(ProcessModel context) {
super.addContext(context);
if(f_parent == null) {
f_parent = (Tool) context.getNodeById(getProperty(PROP_PARENT_ID));
if(f_parent != null)//can happen with legacy models
f_parent.setErrorConnector(this,getNumber());
}
}
@Override
protected Shape getOutlineShape() {
Rectangle2D outline = new Rectangle2D.Float(getPos().x - (getSize().width / 2),
getPos().y - (getSize().height / 2), getSize().width, getSize().height);
return outline;
}
@Override
protected void paintInternal(Graphics g) {
updatePosAndSize();
Graphics2D g2 = (Graphics2D) g;
g2.setStroke(ProcessUtils.defaultStroke);
g2.setColor(Color.WHITE);
g2.fillRect(getPos().x-getSize().width/2, getPos().y-getSize().height/2, getSize().width, getSize().height);
g2.setColor(Color.BLACK);
g2.drawRect(getPos().x-getSize().width/2, getPos().y-getSize().height/2, getSize().width, getSize().height);
}
/**
* @param left
*/
public void setNumber(int number) {
setProperty(PROP_NUMBER , ""+number);
}
public int getNumber() {
try {
return Integer.parseInt(getProperty(PROP_NUMBER));
} catch (NumberFormatException e) {
e.printStackTrace();
return 0;
}
}
/**
*
*/
private void updatePosAndSize() {
if(f_parent != null) {
Point _tlPos = new Point(f_parent.getPos().x-f_parent.getSize().width/2,
f_parent.getPos().y+f_parent.getSize().height/2-AREA_HEIGHT);
_tlPos.x += ((getNumber()+0.5)*BUTTON_WIDTH) + (getNumber()+1)*DIST_X;
_tlPos.y += AREA_HEIGHT/2;
setPos(_tlPos);
setSize(BUTTON_WIDTH, AREA_HEIGHT-2*DIST_Y);
}
}
@Override
public Set<Point> getDefaultConnectionPoints() {
HashSet<Point> cp = new HashSet<Point>();
cp.add(new Point(0, (getSize().height/2)));
return cp;
}
}
| bptlab/processeditor | src/net/frapu/code/visualization/twf/ToolErrorConnector.java | Java | apache-2.0 | 3,182 |
/a/lib/tsc.js --w
//// [/user/username/projects/myproject/a.ts]
export interface Point {
name: string;
c: Coords;
}
export interface Coords {
x2: number;
y: number;
}
//// [/user/username/projects/myproject/b.ts]
import { Point } from "./a";
export interface PointWrapper extends Point {
}
//// [/user/username/projects/myproject/c.ts]
import { PointWrapper } from "./b";
export function getPoint(): PointWrapper {
return {
name: "test",
c: {
x: 1,
y: 2
}
}
};
//// [/user/username/projects/myproject/d.ts]
import { getPoint } from "./c";
getPoint().c.x;
//// [/user/username/projects/myproject/e.ts]
import "./d";
//// [/user/username/projects/myproject/tsconfig.json]
{}
//// [/a/lib/lib.d.ts]
/// <reference no-default-lib="true"/>
interface Boolean {}
interface Function {}
interface CallableFunction {}
interface NewableFunction {}
interface IArguments {}
interface Number { toExponential: any; }
interface Object {}
interface RegExp {}
interface String { charAt: any; }
interface Array<T> { length: number; [n: number]: T; }
//// [/user/username/projects/myproject/a.js]
"use strict";
exports.__esModule = true;
//// [/user/username/projects/myproject/b.js]
"use strict";
exports.__esModule = true;
//// [/user/username/projects/myproject/c.js]
"use strict";
exports.__esModule = true;
exports.getPoint = void 0;
function getPoint() {
return {
name: "test",
c: {
x: 1,
y: 2
}
};
}
exports.getPoint = getPoint;
;
//// [/user/username/projects/myproject/d.js]
"use strict";
exports.__esModule = true;
var c_1 = require("./c");
c_1.getPoint().c.x;
//// [/user/username/projects/myproject/e.js]
"use strict";
exports.__esModule = true;
require("./d");
Output::
>> Screen clear
[[90m12:00:29 AM[0m] Starting compilation in watch mode...
[96mc.ts[0m:[93m6[0m:[93m13[0m - [91merror[0m[90m TS2322: [0mType '{ x: number; y: number; }' is not assignable to type 'Coords'.
Object literal may only specify known properties, and 'x' does not exist in type 'Coords'.
[7m6[0m x: 1,
[7m [0m [91m ~~~~[0m
[96ma.ts[0m:[93m3[0m:[93m5[0m
[7m3[0m c: Coords;
[7m [0m [96m ~[0m
The expected type comes from property 'c' which is declared here on type 'PointWrapper'
[96md.ts[0m:[93m2[0m:[93m14[0m - [91merror[0m[90m TS2339: [0mProperty 'x' does not exist on type 'Coords'.
[7m2[0m getPoint().c.x;
[7m [0m [91m ~[0m
[[90m12:00:40 AM[0m] Found 2 errors. Watching for file changes.
Program root files: ["/user/username/projects/myproject/a.ts","/user/username/projects/myproject/b.ts","/user/username/projects/myproject/c.ts","/user/username/projects/myproject/d.ts","/user/username/projects/myproject/e.ts"]
Program options: {"watch":true,"configFilePath":"/user/username/projects/myproject/tsconfig.json"}
Program files::
/a/lib/lib.d.ts
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
/user/username/projects/myproject/e.ts
Semantic diagnostics in builder refreshed for::
/a/lib/lib.d.ts
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
/user/username/projects/myproject/e.ts
WatchedFiles::
/user/username/projects/myproject/tsconfig.json:
{"fileName":"/user/username/projects/myproject/tsconfig.json","pollingInterval":250}
/user/username/projects/myproject/a.ts:
{"fileName":"/user/username/projects/myproject/a.ts","pollingInterval":250}
/user/username/projects/myproject/b.ts:
{"fileName":"/user/username/projects/myproject/b.ts","pollingInterval":250}
/user/username/projects/myproject/c.ts:
{"fileName":"/user/username/projects/myproject/c.ts","pollingInterval":250}
/user/username/projects/myproject/d.ts:
{"fileName":"/user/username/projects/myproject/d.ts","pollingInterval":250}
/user/username/projects/myproject/e.ts:
{"fileName":"/user/username/projects/myproject/e.ts","pollingInterval":250}
/a/lib/lib.d.ts:
{"fileName":"/a/lib/lib.d.ts","pollingInterval":250}
FsWatches::
FsWatchesRecursive::
/user/username/projects/myproject/node_modules/@types:
{"directoryName":"/user/username/projects/myproject/node_modules/@types","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
/user/username/projects/myproject:
{"directoryName":"/user/username/projects/myproject","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
exitCode:: ExitStatus.undefined
Change:: Rename property x2 to x of interface Coords
//// [/user/username/projects/myproject/a.ts]
export interface Point {
name: string;
c: Coords;
}
export interface Coords {
x: number;
y: number;
}
//// [/user/username/projects/myproject/a.js] file written with same contents
//// [/user/username/projects/myproject/b.js] file written with same contents
Output::
>> Screen clear
[[90m12:00:44 AM[0m] File change detected. Starting incremental compilation...
[[90m12:00:51 AM[0m] Found 0 errors. Watching for file changes.
Program root files: ["/user/username/projects/myproject/a.ts","/user/username/projects/myproject/b.ts","/user/username/projects/myproject/c.ts","/user/username/projects/myproject/d.ts","/user/username/projects/myproject/e.ts"]
Program options: {"watch":true,"configFilePath":"/user/username/projects/myproject/tsconfig.json"}
Program files::
/a/lib/lib.d.ts
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
/user/username/projects/myproject/e.ts
Semantic diagnostics in builder refreshed for::
/user/username/projects/myproject/a.ts
/user/username/projects/myproject/b.ts
/user/username/projects/myproject/c.ts
/user/username/projects/myproject/d.ts
WatchedFiles::
/user/username/projects/myproject/tsconfig.json:
{"fileName":"/user/username/projects/myproject/tsconfig.json","pollingInterval":250}
/user/username/projects/myproject/a.ts:
{"fileName":"/user/username/projects/myproject/a.ts","pollingInterval":250}
/user/username/projects/myproject/b.ts:
{"fileName":"/user/username/projects/myproject/b.ts","pollingInterval":250}
/user/username/projects/myproject/c.ts:
{"fileName":"/user/username/projects/myproject/c.ts","pollingInterval":250}
/user/username/projects/myproject/d.ts:
{"fileName":"/user/username/projects/myproject/d.ts","pollingInterval":250}
/user/username/projects/myproject/e.ts:
{"fileName":"/user/username/projects/myproject/e.ts","pollingInterval":250}
/a/lib/lib.d.ts:
{"fileName":"/a/lib/lib.d.ts","pollingInterval":250}
FsWatches::
FsWatchesRecursive::
/user/username/projects/myproject/node_modules/@types:
{"directoryName":"/user/username/projects/myproject/node_modules/@types","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
/user/username/projects/myproject:
{"directoryName":"/user/username/projects/myproject","fallbackPollingInterval":500,"fallbackOptions":{"watchFile":"PriorityPollingInterval"}}
exitCode:: ExitStatus.undefined
| nojvek/TypeScript | tests/baselines/reference/tscWatch/emitAndErrorUpdates/default/file-not-exporting-a-deep-multilevel-import-that-changes.js | JavaScript | apache-2.0 | 7,454 |
package com.amqtech.opensource.appintroexample.util;
/**
* Created by andrew on 11/17/16.
*/
import android.app.Fragment;
import android.app.FragmentManager;
import android.app.FragmentTransaction;
import android.os.Build;
import android.os.Bundle;
import android.os.Parcelable;
import androidx.fragment.app.FragmentPagerAdapter;
import androidx.viewpager.widget.PagerAdapter;
import android.view.View;
import android.view.ViewGroup;
import java.util.ArrayList;
/**
* Implementation of {@link PagerAdapter} that
* uses a {@link Fragment} to manage each page. This class also handles
* saving and restoring of fragment's state.
* <p>
* <p>This version of the pager is more useful when there are a large number
* of pages, working more like a list view. When pages are not visible to
* the user, their entire fragment may be destroyed, only keeping the saved
* state of that fragment. This allows the pager to hold on to much less
* memory associated with each visited page as compared to
* {@link FragmentPagerAdapter} at the cost of potentially more overhead when
* switching between pages.
* <p>
* <p>When using FragmentPagerAdapter the host ViewPager must have a
* valid ID set.</p>
* <p>
* <p>Subclasses only need to implement {@link #getItem(int)}
* and {@link #getCount()} to have a working adapter.
* <p>
* <p>Here is an example implementation of a pager containing fragments of
* lists:
* <p>
* {@sample frameworks/support/samples/Support13Demos/src/com/example/android/supportv13/app/FragmentStatePagerSupport.java
* complete}
* <p>
* <p>The <code>R.layout.fragment_pager</code> resource of the top-level fragment is:
* <p>
* {@sample frameworks/support/samples/Support13Demos/res/layout/fragment_pager.xml
* complete}
* <p>
* <p>The <code>R.layout.fragment_pager_list</code> resource containing each
* individual fragment's layout is:
* <p>
* {@sample frameworks/support/samples/Support13Demos/res/layout/fragment_pager_list.xml
* complete}
*/
public abstract class FragmentStatePagerAdapter extends PagerAdapter {
private static final String TAG = "FragmentStatePagerAdapter";
private static final boolean DEBUG = false;
private final FragmentManager mFragmentManager;
private FragmentTransaction mCurTransaction = null;
private ArrayList<Fragment.SavedState> mSavedState = new ArrayList<Fragment.SavedState>();
private ArrayList<Fragment> mFragments = new ArrayList<Fragment>();
private Fragment mCurrentPrimaryItem = null;
public FragmentStatePagerAdapter(FragmentManager fm) {
mFragmentManager = fm;
}
/**
* Return the Fragment associated with a specified position.
*/
public abstract Fragment getItem(int position);
@Override
public void startUpdate(ViewGroup container) {
if (container.getId() == View.NO_ID) {
throw new IllegalStateException("ViewPager with adapter " + this
+ " requires a view id");
}
}
@Override
public Object instantiateItem(ViewGroup container, int position) {
// If we already have this item instantiated, there is nothing
// to do. This can happen when we are restoring the entire pager
// from its saved state, where the fragment manager has already
// taken care of restoring the fragments we previously had instantiated.
if (mFragments.size() > position) {
Fragment f = mFragments.get(position);
if (f != null) {
return f;
}
}
if (mCurTransaction == null) {
mCurTransaction = mFragmentManager.beginTransaction();
}
Fragment fragment = getItem(position);
if (mSavedState.size() > position) {
Fragment.SavedState fss = mSavedState.get(position);
if (fss != null) {
fragment.setInitialSavedState(fss);
}
}
while (mFragments.size() <= position) {
mFragments.add(null);
}
fragment.setMenuVisibility(false);
setFragmentUserVisibleHint(fragment);
mFragments.set(position, fragment);
mCurTransaction.add(container.getId(), fragment);
return fragment;
}
public void setFragmentUserVisibleHint(Fragment fragment) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH_MR1) {
fragment.setUserVisibleHint(false);
}
}
@Override
public void destroyItem(ViewGroup container, int position, Object object) {
Fragment fragment = (Fragment) object;
if (mCurTransaction == null) {
mCurTransaction = mFragmentManager.beginTransaction();
}
while (mSavedState.size() <= position) {
mSavedState.add(null);
}
mSavedState.set(position, fragment.isAdded()
? mFragmentManager.saveFragmentInstanceState(fragment) : null);
mFragments.set(position, null);
mCurTransaction.remove(fragment);
}
@Override
public void setPrimaryItem(ViewGroup container, int position, Object object) {
Fragment fragment = (Fragment) object;
if (fragment != mCurrentPrimaryItem) {
if (mCurrentPrimaryItem != null) {
mCurrentPrimaryItem.setMenuVisibility(false);
setFragmentUserVisibleHint(mCurrentPrimaryItem);
}
if (fragment != null) {
fragment.setMenuVisibility(true);
setFragmentUserVisibleHint(fragment);
}
mCurrentPrimaryItem = fragment;
}
}
@Override
public void finishUpdate(ViewGroup container) {
if (mCurTransaction != null) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
mCurTransaction.commitNowAllowingStateLoss();
}
mCurTransaction = null;
}
}
@Override
public boolean isViewFromObject(View view, Object object) {
return ((Fragment) object).getView() == view;
}
@Override
public Parcelable saveState() {
Bundle state = null;
if (mSavedState.size() > 0) {
state = new Bundle();
Fragment.SavedState[] fss = new Fragment.SavedState[mSavedState.size()];
mSavedState.toArray(fss);
state.putParcelableArray("states", fss);
}
for (int i = 0; i < mFragments.size(); i++) {
Fragment f = mFragments.get(i);
if (f != null && f.isAdded()) {
if (state == null) {
state = new Bundle();
}
String key = "f" + i;
mFragmentManager.putFragment(state, key, f);
}
}
return state;
}
@Override
public void restoreState(Parcelable state, ClassLoader loader) {
if (state != null) {
Bundle bundle = (Bundle) state;
bundle.setClassLoader(loader);
Parcelable[] fss = bundle.getParcelableArray("states");
mSavedState.clear();
mFragments.clear();
if (fss != null) {
for (int i = 0; i < fss.length; i++) {
mSavedState.add((Fragment.SavedState) fss[i]);
}
}
Iterable<String> keys = bundle.keySet();
for (String key : keys) {
if (key.startsWith("f")) {
int index = Integer.parseInt(key.substring(1));
Fragment f = mFragmentManager.getFragment(bundle, key);
if (f != null) {
while (mFragments.size() <= index) {
mFragments.add(null);
}
f.setMenuVisibility(false);
mFragments.set(index, f);
}
}
}
}
}
}
| PaoloRotolo/AppIntro | example/src/main/java/com/amqtech/opensource/appintroexample/util/FragmentStatePagerAdapter.java | Java | apache-2.0 | 7,938 |
package querqy.rewrite.commonrules.model;
import static java.util.Collections.singletonList;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
import querqy.model.*;
import querqy.rewrite.commonrules.AbstractCommonRulesTest;
import querqy.rewrite.commonrules.CommonRulesRewriter;
import querqy.model.Input;
import java.util.Collection;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static querqy.QuerqyMatchers.*;
import static querqy.QuerqyMatchers.dmq;
import static querqy.QuerqyMatchers.term;
import static querqy.rewrite.commonrules.select.SelectionStrategyFactory.DEFAULT_SELECTION_STRATEGY;
/**
* Created by rene on 08/12/2015.
*/
public class FilterInstructionTest extends AbstractCommonRulesTest {
@Test
public void testThatBoostQueriesWithMustClauseUseMM100ByDefault() {
RulesCollectionBuilder builder = new TrieMapRulesCollectionBuilder(false);
FilterInstruction filterInstruction = new FilterInstruction(makeQuery("a b").getUserQuery());
builder.addRule(new Input.SimpleInput(singletonList(mkTerm("x")), false, false, "x"),
new Instructions(1, "1", singletonList(filterInstruction)));
RulesCollection rules = builder.build();
CommonRulesRewriter rewriter = new CommonRulesRewriter(rules, DEFAULT_SELECTION_STRATEGY);
ExpandedQuery query = makeQuery("x");
Collection<QuerqyQuery<?>> filterQueries = rewriter.rewrite(query, new EmptySearchEngineRequestAdapter())
.getFilterQueries();
QuerqyQuery<?> qq = filterQueries.iterator().next();
assertTrue(qq instanceof BooleanQuery);
assertThat((BooleanQuery) qq,
bq(
dmq(must(), term("a", true)),
dmq(must(), term("b", true))
)
);
}
@Test
public void testPurelyNegativeFilterQuery() {
RulesCollectionBuilder builder = new TrieMapRulesCollectionBuilder(true);
FilterInstruction filterInstruction = new FilterInstruction(makeQuery("-ab").getUserQuery());
builder.addRule(new Input.SimpleInput(singletonList(mkTerm("x")), false, false, "x"),
new Instructions(1, "1", singletonList(filterInstruction)));
RulesCollection rules = builder.build();
CommonRulesRewriter rewriter = new CommonRulesRewriter(rules, DEFAULT_SELECTION_STRATEGY);
ExpandedQuery query = makeQuery("x");
Collection<QuerqyQuery<?>> filterQueries = rewriter.rewrite(query, new EmptySearchEngineRequestAdapter())
.getFilterQueries();
assertNotNull(filterQueries);
assertEquals(1, filterQueries.size());
QuerqyQuery<?> qq = filterQueries.iterator().next();
assertTrue(qq instanceof BooleanQuery);
assertThat((BooleanQuery) qq,
bq(
should(),
dmq(
mustNot(),
term("ab", true)
)
)
);
}
@Test
public void testThatFilterQueriesAreMarkedAsGenerated() {
RulesCollectionBuilder builder = new TrieMapRulesCollectionBuilder(false);
FilterInstruction filterInstruction = new FilterInstruction(makeQuery("a").getUserQuery());
builder.addRule(new Input.SimpleInput(singletonList(mkTerm("x")), false, false, "x"),
new Instructions(1, "1", singletonList(filterInstruction)));
RulesCollection rules = builder.build();
CommonRulesRewriter rewriter = new CommonRulesRewriter(rules, DEFAULT_SELECTION_STRATEGY);
ExpandedQuery query = makeQuery("x");
Collection<QuerqyQuery<?>> filterQueries = rewriter.rewrite(query, new EmptySearchEngineRequestAdapter())
.getFilterQueries();
QuerqyQuery<?> qq = filterQueries.iterator().next();
assertTrue(qq instanceof BooleanQuery);
assertThat((BooleanQuery) qq,
bq(
dmq(must(), term("a", true))
)
);
}
@Test
public void testThatMainQueryIsNotMarkedAsGenerated() {
RulesCollectionBuilder builder = new TrieMapRulesCollectionBuilder(false);
FilterInstruction filterInstruction = new FilterInstruction(makeQuery("a").getUserQuery());
builder.addRule(new Input.SimpleInput(singletonList(mkTerm("x")), false, false, "x"),
new Instructions(1, "1", singletonList(filterInstruction)));
RulesCollection rules = builder.build();
CommonRulesRewriter rewriter = new CommonRulesRewriter(rules, DEFAULT_SELECTION_STRATEGY);
ExpandedQuery query = makeQuery("x");
QuerqyQuery<?> mainQuery = rewriter.rewrite(query, new EmptySearchEngineRequestAdapter()).getUserQuery();
assertFalse(mainQuery.isGenerated());
}
}
| renekrie/querqy | querqy-core/src/test/java/querqy/rewrite/commonrules/model/FilterInstructionTest.java | Java | apache-2.0 | 5,024 |
/*
Derby - Class org.apache.derby.iapi.sql.compile.OptimizerPlan
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to you under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.apache.derby.iapi.sql.compile;
import org.apache.derby.catalog.AliasInfo;
import org.apache.derby.shared.common.error.StandardException;
import org.apache.derby.shared.common.reference.SQLState;
import org.apache.derby.iapi.sql.StatementUtil;
import org.apache.derby.iapi.sql.compile.CompilerContext;
import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
import org.apache.derby.iapi.sql.dictionary.AliasDescriptor;
import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
import org.apache.derby.iapi.sql.dictionary.DataDictionary;
import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;
import org.apache.derby.iapi.sql.dictionary.UniqueTupleDescriptor;
import org.apache.derby.iapi.util.IdUtil;
/**
* <p>
* High level description of a plan for consideration by the Optimizer.
* This is used to specify a complete plan via optimizer overrides. A
* plan is a tree whose interior nodes are join operators and whose
* leaves are row sources (conglomerates or tableFunctions).
* </p>
*/
public abstract class OptimizerPlan
{
////////////////////////////////////////////////////////////////////////
//
// CONSTANTS
//
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
//
// FACTORY METHODS
//
////////////////////////////////////////////////////////////////////////
/**
* <p>
* Make a RowSource corresponding to the given tuple descriptor.
* </p>
*/
public static RowSource makeRowSource( UniqueTupleDescriptor utd, DataDictionary dd )
throws StandardException
{
if ( utd == null ) { return null; }
else if ( utd instanceof ConglomerateDescriptor )
{
return new ConglomerateRS( (ConglomerateDescriptor) utd, dd );
}
else if ( utd instanceof AliasDescriptor )
{
return new TableFunctionRS( (AliasDescriptor) utd );
}
else { return null; }
}
////////////////////////////////////////////////////////////////////////
//
// ABSTRACT BEHAVIOR
//
////////////////////////////////////////////////////////////////////////
/**
* <p>
* Bind the conglomerate and table function names in this plan.
* </p>
*
* @param dataDictionary DataDictionary to bind against.
*/
public abstract void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException;
/**
* <p>
* Return true if this the schema and RowSource names have been resolved.
* </p>
*/
public abstract boolean isBound();
/**
* <p>
* Count the number of leaf nodes under (and including) this node.
* </p>
*/
public abstract int countLeafNodes();
/**
* <p>
* Get the leftmost leaf node in this plan.
* </p>
*/
public abstract OptimizerPlan leftmostLeaf();
/**
* <p>
* Return true if this plan is a (left) leading prefix of the other plan.
* </p>
*/
public abstract boolean isLeftPrefixOf( OptimizerPlan that );
////////////////////////////////////////////////////////////////////////
//
// INNER CLASSES
//
////////////////////////////////////////////////////////////////////////
public static final class Join extends OptimizerPlan
{
final JoinStrategy strategy;
final OptimizerPlan leftChild;
final OptimizerPlan rightChild;
private boolean _isBound;
private int _leafNodeCount = 0;
public Join
(
JoinStrategy strategy,
OptimizerPlan leftChild,
OptimizerPlan rightChild
)
{
this.strategy = strategy;
this.leftChild = leftChild;
this.rightChild = rightChild;
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
// only left-deep trees allowed at this time
if ( !( rightChild instanceof RowSource ) )
{
throw StandardException.newException( SQLState.LANG_NOT_LEFT_DEEP );
}
leftChild.bind( dataDictionary, lcc, cc );
rightChild.bind( dataDictionary, lcc, cc );
_isBound = true;
}
public boolean isBound() { return _isBound; }
public int countLeafNodes()
{
if ( _leafNodeCount <= 0 ) { _leafNodeCount = leftChild.countLeafNodes() + rightChild.countLeafNodes(); }
return _leafNodeCount;
}
public OptimizerPlan leftmostLeaf() { return leftChild.leftmostLeaf(); }
public boolean isLeftPrefixOf( OptimizerPlan other )
{
if ( !(other instanceof Join) ) { return false; }
Join that = (Join) other;
int thisLeafCount = this.countLeafNodes();
int thatLeafCount = that.countLeafNodes();
if ( thisLeafCount > thatLeafCount ) { return false; }
else if ( thisLeafCount < thatLeafCount ) { return isLeftPrefixOf( that.leftChild ); }
else { return this.equals( that ); }
}
public String toString()
{
return
"( " +
leftChild.toString() +
" " + strategy.getOperatorSymbol() + " " +
rightChild.toString() +
" )";
}
public boolean equals( Object other )
{
if ( other == null ) { return false; }
if ( !(other instanceof Join) ) { return false; }
Join that = (Join) other;
if ( !this.strategy.getOperatorSymbol().equals( that.strategy.getOperatorSymbol() ) ) { return false; }
return this.leftChild.equals( that.leftChild) && this.rightChild.equals( that.rightChild );
}
}
/** Generic plan for row sources we don't understand */
public static class DeadEnd extends OptimizerPlan
{
private String _name;
public DeadEnd( String name )
{
_name = name;
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{}
public boolean isBound() { return true; }
public int countLeafNodes() { return 1; }
public OptimizerPlan leftmostLeaf() { return this; }
public boolean isLeftPrefixOf( OptimizerPlan that )
{
return this.equals( that.leftmostLeaf() );
}
public String toString() { return _name; }
}
public abstract static class RowSource<D extends UniqueTupleDescriptor> extends OptimizerPlan
{
protected String _schemaName;
protected String _rowSourceName;
protected SchemaDescriptor _schema;
protected D _descriptor;
public RowSource( String schemaName, String rowSourceName )
{
_schemaName = schemaName;
_rowSourceName = rowSourceName;
}
protected RowSource() {}
/** Get the UniqueTupleDescriptor bound to this RowSource */
public D getDescriptor() { return _descriptor; }
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
// bind the schema name
if ( _schema == null )
{
_schema = StatementUtil.getSchemaDescriptor( _schemaName, true, dataDictionary, lcc, cc );
_schemaName = _schema.getSchemaName();
}
}
public boolean isBound() { return (_descriptor != null); }
public int countLeafNodes() { return 1; }
public OptimizerPlan leftmostLeaf() { return this; }
public boolean isLeftPrefixOf( OptimizerPlan that )
{
return this.equals( that.leftmostLeaf() );
}
public String toString()
{
return IdUtil.mkQualifiedName( _schemaName, _rowSourceName );
}
public boolean equals( Object other )
{
if ( other == null ) { return false; }
if ( other.getClass() != this.getClass() ) { return false; }
RowSource that = (RowSource) other;
if ( !( this.isBound() && that.isBound() ) ) { return false; }
return this._schemaName.equals( that._schemaName ) && this._rowSourceName.equals( that._rowSourceName );
}
}
public static final class ConglomerateRS extends RowSource<ConglomerateDescriptor>
{
public ConglomerateRS( String schemaName, String rowSourceName ) { super( schemaName, rowSourceName ); }
public ConglomerateRS( ConglomerateDescriptor cd, DataDictionary dataDictionary )
throws StandardException
{
_descriptor = cd;
_schema = dataDictionary.getSchemaDescriptor( cd.getSchemaID(), null );
_schemaName = _schema.getSchemaName();
_rowSourceName = cd.getConglomerateName();
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
super.bind( dataDictionary, lcc, cc );
if ( _descriptor == null )
{
_descriptor = dataDictionary.getConglomerateDescriptor( _rowSourceName, _schema, false );
}
if ( _descriptor == null )
{
throw StandardException.newException
( SQLState.LANG_INDEX_NOT_FOUND, _schemaName + "." + _rowSourceName );
}
}
}
public static final class TableFunctionRS extends RowSource<AliasDescriptor>
{
public TableFunctionRS( String schemaName, String rowSourceName ) { super( schemaName, rowSourceName ); }
public TableFunctionRS( AliasDescriptor ad )
{
_descriptor = ad;
_schemaName = ad.getSchemaName();
_rowSourceName = ad.getName();
}
public void bind
(
DataDictionary dataDictionary,
LanguageConnectionContext lcc,
CompilerContext cc
)
throws StandardException
{
super.bind( dataDictionary, lcc, cc );
if ( _descriptor == null )
{
_descriptor = dataDictionary.getAliasDescriptor
( _schema.getUUID().toString(), _rowSourceName, AliasInfo.ALIAS_NAME_SPACE_FUNCTION_AS_CHAR );
}
if ( _descriptor == null )
{
throw StandardException.newException
(
SQLState.LANG_OBJECT_NOT_FOUND,
AliasDescriptor.getAliasType( AliasInfo.ALIAS_TYPE_FUNCTION_AS_CHAR ),
_schemaName + "." + _rowSourceName
);
}
}
public String toString() { return super.toString() + "()"; }
}
}
| apache/derby | java/org.apache.derby.engine/org/apache/derby/iapi/sql/compile/OptimizerPlan.java | Java | apache-2.0 | 12,716 |
# Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import json
class NullResource(object):
""" Implments the lock interface for spawn. """
def __init__(self, *args, **kwargs):
self.owned = False
def remove(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace):
pass
def acquire(self, info):
pass
class LockFile(object):
""" Manages locking and unlocking an open file handle
can also be used as a context manager
"""
def __init__(self, fd, lock_operation=fcntl.LOCK_EX,
unlock_operation=fcntl.LOCK_UN):
self.fd = fd
self.file_name = None
if type(fd) != int:
self.fd = self.open(fd)
self.file_name = fd
self.lock_operation = lock_operation
self.unlock_operation = unlock_operation
def __enter__(self):
self.lock(self.lock_operation)
return self
def __exit__(self, exc_type, exc_value, trace):
self.unlock(self.unlock_operation)
return False
def lock(self, operation=fcntl.LOCK_EX):
fcntl.flock(self.fd, operation)
def unlock(self, operation=fcntl.LOCK_UN):
fcntl.flock(self.fd, operation)
def write(self, data):
os.lseek(self.fd, 0, os.SEEK_SET)
os.ftruncate(self.fd, 0)
os.write(self.fd, data)
os.fsync(self.fd)
def read(self):
size = os.lseek(self.fd, 0, os.SEEK_END)
os.lseek(self.fd, 0, os.SEEK_SET)
return os.read(self.fd, size)
def close(self):
try:
os.close(self.fd)
except TypeError, OSError:
pass
self.fd = None
def unlink(self):
self.close()
try:
os.unlink(self.file_name)
except OSError, e:
pass
def _createdir(self, file_name):
try:
dir = os.path.dirname(file_name)
os.makedirs(dir)
except OSError, e:
# ignore if already exists
if e.errno != errno.EEXIST:
raise
def open(self, file_name):
for i in range(0, 2):
try:
# Attempt to create the file
return os.open(file_name, os.O_RDWR | os.O_CREAT)
except OSError, e:
# No such file or directory
if e.errno == errno.ENOENT:
# create the dir and try again
self._createdir(file_name)
continue
# Unknown error
raise
raise RuntimeError("failed to create '%s'" % file_name)
class JsonLockFile(LockFile):
""" Manages a lock file that contains json """
def update(self, info):
data = self.read()
data.update(info)
self.write(data)
def get(self, key, default=None):
try:
data = self.read()
return data[key]
except KeyError:
return default
def write(self, data):
super(JsonLockFile, self).write(json.dumps(data))
def read(self):
try:
return json.loads(super(JsonLockFile, self).read())
except ValueError, e:
return {}
class ResourceFile(JsonLockFile):
""" Manages ownership of a resource file,
can also be used as a context manager
"""
def __init__(self, file_name):
self.file_name = file_name
self.owned = False
self.fd = None
def __enter__(self):
self.fd = self.open(self.file_name)
super(ResourceFile, self).lock()
return self
def __exit__(self, exc_type, exc_value, trace):
super(ResourceFile, self).unlock()
self.close()
return False
def used(self):
""" Returns true if the resource file is in use by someone """
info = self.read()
# If pid is alive, the volume is owned by someone else
if 'pid' in info and self.alive(info['pid']):
return info
return False
def alive(self, pid):
try:
os.kill(pid, 0)
return True
except OSError, e:
return False
def acquire(self, info):
""" Acquire ownership of the file by writing our pid information """
self.update(info)
if 'pid' in info:
# We own the resource
self.owned = True
def remove(self):
if self.owned:
self.unlink()
| rackerlabs/lunr | lunr/common/lock.py | Python | apache-2.0 | 5,070 |
// Copyright (c) 2017, TIG All rights reserved.
// Use of this source code is governed by a Apache License 2.0 that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"os"
"runtime"
"strings"
"github.com/tiglabs/containerfs/datanode"
"github.com/tiglabs/containerfs/logger"
"github.com/tiglabs/containerfs/utils"
)
func init() {
var loglevel string
var volMgrHosts string
flag.StringVar(&datanode.DtAddr.Host, "host", "127.0.0.1:8801", "ContainerFS DataNode Host")
flag.StringVar(&datanode.DtAddr.Tier, "tier", "sas", "ContainerFS DataNode Storage Medium")
flag.StringVar(&datanode.DtAddr.Path, "datapath", "", "ContainerFS DataNode Data Path")
flag.StringVar(&datanode.DtAddr.Log, "logpath", "/export/Logs/containerfs/logs/", "ContainerFS Log Path")
flag.StringVar(&loglevel, "loglevel", "error", "ContainerFS Log Level")
flag.StringVar(&volMgrHosts, "volmgr", "10.8.64.216,10.8.64.217,10.8.64.218", "ContainerFS VolMgr hosts")
flag.Parse()
if len(os.Args) >= 2 && (os.Args[1] == "version") {
fmt.Println(utils.Version())
os.Exit(0)
}
tmp := strings.Split(volMgrHosts, ",")
datanode.VolMgrHosts = make([]string, 3)
datanode.VolMgrHosts[0] = tmp[0] + ":7703"
datanode.VolMgrHosts[1] = tmp[1] + ":7713"
datanode.VolMgrHosts[2] = tmp[2] + ":7723"
datanode.DtAddr.Flag = datanode.DtAddr.Path + "/.registryflag"
logger.SetConsole(true)
logger.SetRollingFile(datanode.DtAddr.Log, "datanode.log", 10, 100, logger.MB) //each 100M rolling
switch loglevel {
case "error":
logger.SetLevel(logger.ERROR)
case "debug":
logger.SetLevel(logger.DEBUG)
case "info":
logger.SetLevel(logger.INFO)
default:
logger.SetLevel(logger.ERROR)
}
_, err := os.Stat(datanode.DtAddr.Path)
if err != nil {
logger.Error("data node statup failed : datanode.DtAddr.Path not exist !")
os.Exit(1)
}
datanode.RegistryToVolMgr()
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
datanode.StartDataService()
}
| ipdcode/containerfs | cmd/datanode/main.go | GO | apache-2.0 | 1,965 |
package leafnodes_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/internal/leafnodes"
. "github.com/onsi/gomega"
"reflect"
"runtime"
"time"
"github.com/onsi/ginkgo/internal/codelocation"
Failer "github.com/onsi/ginkgo/internal/failer"
"github.com/onsi/ginkgo/types"
)
type runnable interface {
Run() (outcome types.SpecState, failure types.SpecFailure)
CodeLocation() types.CodeLocation
}
func SynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) {
var (
outcome types.SpecState
failure types.SpecFailure
failer *Failer.Failer
componentCodeLocation types.CodeLocation
innerCodeLocation types.CodeLocation
didRun bool
)
BeforeEach(func() {
failer = Failer.New()
componentCodeLocation = codelocation.New(0)
innerCodeLocation = codelocation.New(0)
didRun = false
})
Describe("synchronous functions", func() {
Context("when the function passes", func() {
BeforeEach(func() {
outcome, failure = build(func() {
didRun = true
}, 0, failer, componentCodeLocation).Run()
})
It("should have a succesful outcome", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePassed))
Ω(failure).Should(BeZero())
})
})
Context("when a failure occurs", func() {
BeforeEach(func() {
outcome, failure = build(func() {
didRun = true
failer.Fail("bam", innerCodeLocation)
panic("should not matter")
}, 0, failer, componentCodeLocation).Run()
})
It("should return the failure", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStateFailed))
Ω(failure).Should(Equal(types.SpecFailure{
Message: "bam",
Location: innerCodeLocation,
ForwardedPanic: nil,
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
Context("when a panic occurs", func() {
BeforeEach(func() {
outcome, failure = build(func() {
didRun = true
innerCodeLocation = codelocation.New(0)
panic("ack!")
}, 0, failer, componentCodeLocation).Run()
})
It("should return the panic", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePanicked))
innerCodeLocation.LineNumber++
Ω(failure).Should(Equal(types.SpecFailure{
Message: "Test Panicked",
Location: innerCodeLocation,
ForwardedPanic: "ack!",
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
})
}
func AsynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType, componentIndex int) {
var (
outcome types.SpecState
failure types.SpecFailure
failer *Failer.Failer
componentCodeLocation types.CodeLocation
innerCodeLocation types.CodeLocation
didRun bool
)
BeforeEach(func() {
failer = Failer.New()
componentCodeLocation = codelocation.New(0)
innerCodeLocation = codelocation.New(0)
didRun = false
})
Describe("asynchronous functions", func() {
var timeoutDuration time.Duration
BeforeEach(func() {
timeoutDuration = time.Duration(1 * float64(time.Second))
})
Context("when running", func() {
It("should run the function as a goroutine, and block until it's done", func() {
initialNumberOfGoRoutines := runtime.NumGoroutine()
numberOfGoRoutines := 0
build(func(done Done) {
didRun = true
numberOfGoRoutines = runtime.NumGoroutine()
close(done)
}, timeoutDuration, failer, componentCodeLocation).Run()
Ω(didRun).Should(BeTrue())
Ω(numberOfGoRoutines).Should(BeNumerically(">=", initialNumberOfGoRoutines+1))
})
})
Context("when the function passes", func() {
BeforeEach(func() {
outcome, failure = build(func(done Done) {
didRun = true
close(done)
}, timeoutDuration, failer, componentCodeLocation).Run()
})
It("should have a succesful outcome", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePassed))
Ω(failure).Should(BeZero())
})
})
Context("when the function fails", func() {
BeforeEach(func() {
outcome, failure = build(func(done Done) {
didRun = true
failer.Fail("bam", innerCodeLocation)
time.Sleep(20 * time.Millisecond)
panic("doesn't matter")
close(done)
}, 10*time.Millisecond, failer, componentCodeLocation).Run()
})
It("should return the failure", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStateFailed))
Ω(failure).Should(Equal(types.SpecFailure{
Message: "bam",
Location: innerCodeLocation,
ForwardedPanic: nil,
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
Context("when the function times out", func() {
BeforeEach(func() {
outcome, failure = build(func(done Done) {
didRun = true
time.Sleep(20 * time.Millisecond)
panic("doesn't matter")
close(done)
}, 10*time.Millisecond, failer, componentCodeLocation).Run()
})
It("should return the timeout", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStateTimedOut))
Ω(failure).Should(Equal(types.SpecFailure{
Message: "Timed out",
Location: componentCodeLocation,
ForwardedPanic: nil,
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
Context("when the function panics", func() {
BeforeEach(func() {
outcome, failure = build(func(done Done) {
didRun = true
innerCodeLocation = codelocation.New(0)
panic("ack!")
}, 100*time.Millisecond, failer, componentCodeLocation).Run()
})
It("should return the panic", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePanicked))
innerCodeLocation.LineNumber++
Ω(failure).Should(Equal(types.SpecFailure{
Message: "Test Panicked",
Location: innerCodeLocation,
ForwardedPanic: "ack!",
ComponentIndex: componentIndex,
ComponentType: componentType,
ComponentCodeLocation: componentCodeLocation,
}))
})
})
})
}
func InvalidSharedRunnerBehaviors(build func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable, componentType types.SpecComponentType) {
var (
failer *Failer.Failer
componentCodeLocation types.CodeLocation
innerCodeLocation types.CodeLocation
)
BeforeEach(func() {
failer = Failer.New()
componentCodeLocation = codelocation.New(0)
innerCodeLocation = codelocation.New(0)
})
Describe("invalid functions", func() {
Context("when passed something that's not a function", func() {
It("should panic", func() {
Ω(func() {
build("not a function", 0, failer, componentCodeLocation)
}).Should(Panic())
})
})
Context("when the function takes the wrong kind of argument", func() {
It("should panic", func() {
Ω(func() {
build(func(oops string) {}, 0, failer, componentCodeLocation)
}).Should(Panic())
})
})
Context("when the function takes more than one argument", func() {
It("should panic", func() {
Ω(func() {
build(func(done Done, oops string) {}, 0, failer, componentCodeLocation)
}).Should(Panic())
})
})
})
}
var _ = Describe("Shared RunnableNode behavior", func() {
Describe("It Nodes", func() {
build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewItNode("", body, types.FlagTypeFocused, componentCodeLocation, timeout, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3)
AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeIt, 3)
InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeIt)
})
Describe("Measure Nodes", func() {
build := func(body interface{}, _ time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewMeasureNode("", func(Benchmarker) {
reflect.ValueOf(body).Call([]reflect.Value{})
}, types.FlagTypeFocused, componentCodeLocation, 10, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeMeasure, 3)
})
Describe("BeforeEach Nodes", func() {
build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewBeforeEachNode(body, componentCodeLocation, timeout, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3)
AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach, 3)
InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeBeforeEach)
})
Describe("AfterEach Nodes", func() {
build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewAfterEachNode(body, componentCodeLocation, timeout, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3)
AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach, 3)
InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeAfterEach)
})
Describe("JustBeforeEach Nodes", func() {
build := func(body interface{}, timeout time.Duration, failer *Failer.Failer, componentCodeLocation types.CodeLocation) runnable {
return NewJustBeforeEachNode(body, componentCodeLocation, timeout, failer, 3)
}
SynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3)
AsynchronousSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach, 3)
InvalidSharedRunnerBehaviors(build, types.SpecComponentTypeJustBeforeEach)
})
})
| starkandwayne/cf-cli | Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/shared_runner_test.go | GO | apache-2.0 | 10,464 |
// Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package policy
import (
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "policy")
mutex lock.RWMutex // Protects enablePolicy
enablePolicy string // Whether policy enforcement is enabled.
)
// SetPolicyEnabled sets the policy enablement configuration. Valid values are:
// - endpoint.AlwaysEnforce
// - endpoint.NeverEnforce
// - endpoint.DefaultEnforcement
func SetPolicyEnabled(val string) {
mutex.Lock()
enablePolicy = val
mutex.Unlock()
}
// GetPolicyEnabled returns the policy enablement configuration
func GetPolicyEnabled() string {
mutex.RLock()
val := enablePolicy
mutex.RUnlock()
return val
}
// AddOptions are options which can be passed to PolicyAdd
type AddOptions struct {
// Replace if true indicates that existing rules with identical labels should be replaced
Replace bool
// ReplaceWithLabels if present indicates that existing rules with the
// given LabelArray should be deleted.
ReplaceWithLabels labels.LabelArray
// Generated should be set as true to signalize a the policy being inserted
// was generated by cilium-agent, e.g. dns poller.
Generated bool
// The source of this policy, one of api, fqdn or k8s
Source string
}
| tgraf/cilium | pkg/policy/config.go | GO | apache-2.0 | 1,981 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.sort;
import org.apache.lucene.spatial.util.GeoHashUtils;
import org.elasticsearch.Version;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.geo.GeoDistance;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.query.GeoDistanceQueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.geoDistanceQuery;
import static org.elasticsearch.index.query.QueryBuilders.geoDistanceRangeQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.equalTo;
public class GeoDistanceIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return pluginList(InternalSettingsPlugin.class);
}
public void testSimpleDistance() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("location").field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
xContentBuilder.field("lat_lon", true);
}
xContentBuilder.endObject().endObject().endObject().endObject();
assertAcked(prepareCreate("test").setSettings(settings).addMapping("type1", xContentBuilder));
ensureGreen();
indexRandom(true,
client().prepareIndex("test", "type1", "1")
.setSource(jsonBuilder().startObject().field("name", "New York").startObject("location").field("lat", 40.7143528)
.field("lon", -74.0059731).endObject().endObject()),
// to NY: 5.286 km
client().prepareIndex("test", "type1", "2")
.setSource(jsonBuilder().startObject().field("name", "Times Square").startObject("location").field("lat", 40.759011)
.field("lon", -73.9844722).endObject().endObject()),
// to NY: 0.4621 km
client().prepareIndex("test", "type1", "3")
.setSource(jsonBuilder().startObject().field("name", "Tribeca").startObject("location").field("lat", 40.718266)
.field("lon", -74.007819).endObject().endObject()),
// to NY: 1.055 km
client().prepareIndex("test", "type1", "4")
.setSource(jsonBuilder().startObject().field("name", "Wall Street").startObject("location").field("lat", 40.7051157)
.field("lon", -74.0088305).endObject().endObject()),
// to NY: 1.258 km
client().prepareIndex("test", "type1", "5")
.setSource(jsonBuilder().startObject().field("name", "Soho").startObject("location").field("lat", 40.7247222)
.field("lon", -74).endObject().endObject()),
// to NY: 2.029 km
client().prepareIndex("test", "type1", "6")
.setSource(jsonBuilder().startObject().field("name", "Greenwich Village").startObject("location")
.field("lat", 40.731033).field("lon", -73.9962255).endObject().endObject()),
// to NY: 8.572 km
client().prepareIndex("test", "type1", "7").setSource(jsonBuilder().startObject().field("name", "Brooklyn")
.startObject("location").field("lat", 40.65).field("lon", -73.95).endObject().endObject()));
SearchResponse searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("3km").point(40.7143528, -74.0059731)).execute().actionGet();
assertHitCount(searchResponse, 5);
assertThat(searchResponse.getHits().hits().length, equalTo(5));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("3km").point(40.7143528, -74.0059731).optimizeBbox("indexed")).execute()
.actionGet();
assertHitCount(searchResponse, 5);
assertThat(searchResponse.getHits().hits().length, equalTo(5));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
}
// now with a PLANE type
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("3km").geoDistance(GeoDistance.PLANE).point(40.7143528, -74.0059731))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertThat(searchResponse.getHits().hits().length, equalTo(5));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5"), equalTo("6")));
}
// factor type is really too small for this resolution
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("2km").point(40.7143528, -74.0059731)).execute().actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("2km").point(40.7143528, -74.0059731).optimizeBbox("indexed")).execute()
.actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("1.242mi").point(40.7143528, -74.0059731)).execute().actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceQuery("location").distance("1.242mi").point(40.7143528, -74.0059731).optimizeBbox("indexed")).execute()
.actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("1"), equalTo("3"), equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceRangeQuery("location", 40.7143528, -74.0059731).from("1.0km").to("2.0km")).execute().actionGet();
assertHitCount(searchResponse, 2);
assertThat(searchResponse.getHits().hits().length, equalTo(2));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceRangeQuery("location", 40.7143528, -74.0059731).from("1.0km").to("2.0km").optimizeBbox("indexed"))
.execute().actionGet();
assertHitCount(searchResponse, 2);
assertThat(searchResponse.getHits().hits().length, equalTo(2));
for (SearchHit hit : searchResponse.getHits()) {
assertThat(hit.id(), anyOf(equalTo("4"), equalTo("5")));
}
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceRangeQuery("location", 40.7143528, -74.0059731).to("2.0km")).execute().actionGet();
assertHitCount(searchResponse, 4);
assertThat(searchResponse.getHits().hits().length, equalTo(4));
searchResponse = client().prepareSearch() // from NY
.setQuery(geoDistanceRangeQuery("location", 40.7143528, -74.0059731).from("2.0km")).execute().actionGet();
assertHitCount(searchResponse, 3);
assertThat(searchResponse.getHits().hits().length, equalTo(3));
// SORTING
searchResponse = client().prepareSearch().setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("location", 40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
.actionGet();
assertHitCount(searchResponse, 7);
assertOrderedSearchHits(searchResponse, "1", "3", "4", "5", "6", "2", "7");
searchResponse = client().prepareSearch().setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("location", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
.actionGet();
assertHitCount(searchResponse, 7);
assertOrderedSearchHits(searchResponse, "7", "2", "6", "5", "4", "3", "1");
}
public void testDistanceSortingMVFields() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("locations").field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
xContentBuilder.field("lat_lon", true).field("coerce", true);
}
xContentBuilder.field("ignore_malformed", true).endObject().endObject().endObject().endObject();
assertAcked(prepareCreate("test").setSettings(settings).addMapping("type1", xContentBuilder));
ensureGreen();
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("names", "New York")
.startObject("locations").field("lat", 40.7143528).field("lon", -74.0059731).endObject().endObject()).execute().actionGet();
client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("names", "New York 2")
.startObject("locations").field("lat", 400.7143528).field("lon", 285.9990269).endObject().endObject()).execute()
.actionGet();
client().prepareIndex("test", "type1", "3")
.setSource(jsonBuilder().startObject().field("names", "Times Square", "Tribeca").startArray("locations")
// to NY: 5.286 km
.startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
// to NY: 0.4621 km
.startObject().field("lat", 40.718266).field("lon", -74.007819).endObject().endArray().endObject())
.execute().actionGet();
client().prepareIndex("test", "type1", "4")
.setSource(jsonBuilder().startObject().field("names", "Wall Street", "Soho").startArray("locations")
// to NY: 1.055 km
.startObject().field("lat", 40.7051157).field("lon", -74.0088305).endObject()
// to NY: 1.258 km
.startObject().field("lat", 40.7247222).field("lon", -74).endObject().endArray().endObject())
.execute().actionGet();
client().prepareIndex("test", "type1", "5")
.setSource(jsonBuilder().startObject().field("names", "Greenwich Village", "Brooklyn").startArray("locations")
// to NY: 2.029 km
.startObject().field("lat", 40.731033).field("lon", -73.9962255).endObject()
// to NY: 8.572 km
.startObject().field("lat", 40.65).field("lon", -73.95).endObject().endArray().endObject())
.execute().actionGet();
client().admin().indices().prepareRefresh().execute().actionGet();
// Order: Asc
SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
.actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "1", "2", "3", "4", "5");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
// Order: Asc, Mode: max
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
// Order: Desc
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
.actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
// Order: Desc, Mode: min
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "5", "4", "3", "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "1", "2", "4", "3", "5");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2874d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(5301d, 10d));
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC))
.execute().actionGet();
assertHitCount(searchResponse, 5);
assertOrderedSearchHits(searchResponse, "5", "3", "4", "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
try {
client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("sum"));
fail("sum should not be supported for sorting by geo distance");
} catch (IllegalArgumentException e) {
// expected
}
}
// Regression bug:
// https://github.com/elasticsearch/elasticsearch/issues/2851
public void testDistanceSortingWithMissingGeoPoint() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
.startObject("locations").field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
xContentBuilder.field("lat_lon", true);
}
xContentBuilder.endObject().endObject().endObject().endObject();
assertAcked(prepareCreate("test").setSettings(settings).addMapping("type1", xContentBuilder));
ensureGreen();
client().prepareIndex("test", "type1", "1")
.setSource(jsonBuilder().startObject().field("names", "Times Square", "Tribeca").startArray("locations")
// to NY: 5.286 km
.startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject()
// to NY: 0.4621 km
.startObject().field("lat", 40.718266).field("lon", -74.007819).endObject().endArray().endObject())
.execute().actionGet();
client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("names", "Wall Street", "Soho").endObject())
.execute().actionGet();
refresh();
// Order: Asc
SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
.actionGet();
assertHitCount(searchResponse, 2);
assertOrderedSearchHits(searchResponse, "1", "2");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
// Order: Desc
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
.actionGet();
// Doc with missing geo point is first, is consistent with 0.20.x
assertHitCount(searchResponse, 2);
assertOrderedSearchHits(searchResponse, "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286d, 10d));
}
public void testDistanceSortingNestedFields() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("company").startObject("properties")
.startObject("name").field("type", "text").endObject().startObject("branches").field("type", "nested")
.startObject("properties").startObject("name").field("type", "text").endObject().startObject("location")
.field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
xContentBuilder.field("lat_lon", true);
}
xContentBuilder.endObject().endObject().endObject().endObject().endObject().endObject();
assertAcked(prepareCreate("companies").setSettings(settings).addMapping("company", xContentBuilder));
ensureGreen();
indexRandom(true,
client().prepareIndex("companies", "company", "1")
.setSource(
jsonBuilder().startObject().field("name", "company 1").startArray("branches").startObject()
.field("name", "New York").startObject("location").field("lat", 40.7143528)
.field("lon",
-74.0059731)
.endObject().endObject().endArray().endObject()),
client().prepareIndex("companies", "company", "2")
.setSource(jsonBuilder().startObject().field("name", "company 2").startArray("branches").startObject()
.field("name", "Times Square").startObject("location").field("lat", 40.759011).field("lon", -73.9844722)
.endObject() // to NY: 5.286 km
.endObject().startObject().field("name", "Tribeca").startObject("location").field("lat", 40.718266)
.field("lon", -74.007819).endObject() // to NY:
// 0.4621
// km
.endObject().endArray().endObject()),
client().prepareIndex("companies", "company", "3")
.setSource(jsonBuilder().startObject().field("name", "company 3").startArray("branches").startObject()
.field("name", "Wall Street").startObject("location").field("lat", 40.7051157).field("lon", -74.0088305)
.endObject() // to NY: 1.055 km
.endObject().startObject().field("name", "Soho").startObject("location").field("lat", 40.7247222)
.field("lon", -74).endObject() // to NY: 1.258
// km
.endObject().endArray().endObject()),
client().prepareIndex("companies", "company", "4")
.setSource(jsonBuilder().startObject().field("name", "company 4").startArray("branches").startObject()
.field("name", "Greenwich Village").startObject("location").field("lat", 40.731033)
.field("lon", -73.9962255).endObject() // to NY:
// 2.029
// km
.endObject().startObject().field("name", "Brooklyn").startObject("location").field("lat", 40.65)
.field("lon", -73.95).endObject() // to NY:
// 8.572 km
.endObject().endArray().endObject()));
// Order: Asc
SearchResponse searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders
.geoDistanceSort("branches.location", 40.7143528, -74.0059731).order(SortOrder.ASC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "1", "2", "3", "4");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
// Order: Asc, Mode: max
searchResponse = client()
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location",
40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max").setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
// Order: Desc
searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders
.geoDistanceSort("branches.location", 40.7143528, -74.0059731).order(SortOrder.DESC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(5286.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1258.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
// Order: Desc, Mode: min
searchResponse = client()
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location",
40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min").setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "4", "3", "2", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(2029.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1055.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(462.1d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
searchResponse = client()
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location",
40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "1", "3", "2", "4");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
searchResponse = client().prepareSearch("companies")
.setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731)
.setNestedPath("branches").sortMode("avg").order(SortOrder.DESC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertOrderedSearchHits(searchResponse, "4", "2", "3", "1");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), closeTo(2874.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), closeTo(1157.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731)
.setNestedFilter(termQuery("branches.name", "brooklyn"))
.sortMode("avg").order(SortOrder.ASC).setNestedPath("branches"))
.execute().actionGet();
assertHitCount(searchResponse, 4);
assertFirstHit(searchResponse, hasId("4"));
assertSearchHits(searchResponse, "1", "2", "3", "4");
assertThat(((Number) searchResponse.getHits().getAt(0).sortValues()[0]).doubleValue(), closeTo(8572.0d, 10d));
assertThat(((Number) searchResponse.getHits().getAt(1).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
try {
client().prepareSearch("companies").setQuery(matchAllQuery())
.addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731).sortMode("sum")
.setNestedPath("branches"));
fail("Sum should not be allowed as sort mode");
} catch (IllegalArgumentException e) {
//expected
}
}
/**
* Issue 3073
*/
public void testGeoDistanceFilter() throws IOException {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
double lat = 40.720611;
double lon = -73.998776;
XContentBuilder mapping = JsonXContent.contentBuilder().startObject().startObject("location").startObject("properties")
.startObject("pin").field("type", "geo_point");
if (version.before(Version.V_2_2_0)) {
mapping.field("lat_lon", true);
}
mapping.endObject().endObject().endObject().endObject();
XContentBuilder source = JsonXContent.contentBuilder().startObject().field("pin", GeoHashUtils.stringEncode(lon, lat)).endObject();
assertAcked(prepareCreate("locations").setSettings(settings).addMapping("location", mapping));
client().prepareIndex("locations", "location", "1").setCreate(true).setSource(source).execute().actionGet();
refresh();
client().prepareGet("locations", "location", "1").execute().actionGet();
SearchResponse result = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery())
.setPostFilter(QueryBuilders.geoDistanceQuery("pin").geoDistance(GeoDistance.ARC).point(lat, lon).distance("1m")).execute()
.actionGet();
assertHitCount(result, 1);
}
private double randomLon() {
return randomDouble() * 360 - 180;
}
private double randomLat() {
return randomDouble() * 180 - 90;
}
public void testDuelOptimizations() throws Exception {
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
if (version.before(Version.V_2_2_0)) {
assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", "location", "type=geo_point,lat_lon=true"));
} else {
assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", "location", "type=geo_point"));
}
final int numDocs = scaledRandomIntBetween(3000, 10000);
List<IndexRequestBuilder> docs = new ArrayList<>();
for (int i = 0; i < numDocs; ++i) {
docs.add(client().prepareIndex("index", "type").setSource(jsonBuilder().startObject().startObject("location")
.field("lat", randomLat()).field("lon", randomLon()).endObject().endObject()));
}
indexRandom(true, docs);
ensureSearchable();
for (int i = 0; i < 10; ++i) {
final double originLat = randomLat();
final double originLon = randomLon();
final String distance = DistanceUnit.KILOMETERS.toString(randomIntBetween(1, 10000));
for (GeoDistance geoDistance : Arrays.asList(GeoDistance.ARC, GeoDistance.SLOPPY_ARC)) {
logger.info("Now testing GeoDistance={}, distance={}, origin=({}, {})", geoDistance, distance, originLat, originLon);
GeoDistanceQueryBuilder qb = QueryBuilders.geoDistanceQuery("location").point(originLat, originLon).distance(distance)
.geoDistance(geoDistance);
long matches;
if (version.before(Version.V_2_2_0)) {
for (String optimizeBbox : Arrays.asList("none", "memory", "indexed")) {
qb.optimizeBbox(optimizeBbox);
SearchResponse resp = client().prepareSearch("index").setSize(0).setQuery(QueryBuilders.constantScoreQuery(qb))
.execute().actionGet();
matches = assertDuelOptimization(resp);
logger.info("{} -> {} hits", optimizeBbox, matches);
}
} else {
SearchResponse resp = client().prepareSearch("index").setSize(0).setQuery(QueryBuilders.constantScoreQuery(qb))
.execute().actionGet();
matches = assertDuelOptimization(resp);
logger.info("{} hits", matches);
}
}
}
}
private long assertDuelOptimization(SearchResponse resp) {
long matches = -1;
assertSearchResponse(resp);
if (matches < 0) {
matches = resp.getHits().totalHits();
} else {
assertEquals(matches, matches = resp.getHits().totalHits());
}
return matches;
}
}
| jbertouch/elasticsearch | core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java | Java | apache-2.0 | 39,991 |
package org.wikipedia;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.net.Uri;
import android.os.Build;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.os.Parcelable;
import android.preference.PreferenceManager;
import android.telephony.TelephonyManager;
import android.text.InputType;
import android.text.format.DateUtils;
import android.util.Base64;
import android.util.DisplayMetrics;
import android.util.Log;
import android.util.TypedValue;
import android.view.View;
import android.view.Window;
import android.view.inputmethod.InputMethodManager;
import android.widget.CheckBox;
import android.widget.CompoundButton;
import android.widget.EditText;
import android.widget.Toast;
import com.squareup.otto.Bus;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.mediawiki.api.json.ApiResult;
import org.wikipedia.bridge.CommunicationBridge;
import org.wikipedia.events.WikipediaZeroInterstitialEvent;
import org.wikipedia.events.WikipediaZeroStateChangeEvent;
import org.wikipedia.settings.PrefKeys;
import org.wikipedia.zero.WikipediaZeroTask;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
/**
* Contains utility methods that Java doesn't have because we can't make code look too good, can we?
*/
public final class Utils {
private static final int MCC_LENGTH = 3;
private static final int KB16 = 16 * 1024;
/**
* Private constructor, so nobody can construct Utils.
*
* THEIR EVIL PLANS HAVE BEEN THWARTED!!!1
*/
private Utils() { }
/**
* Compares two strings properly, even when one of them is null - without throwing up
*
* @param str1 The first string
* @param str2 Guess?
* @return true if they are both equal (even if both are null)
*/
public static boolean compareStrings(String str1, String str2) {
return (str1 == null ? str2 == null : str1.equals(str2));
}
/**
* Creates an MD5 hash of the provided string & returns its base64 representation
* @param s String to hash
* @return Base64'd MD5 representation of the string passed in
*/
public static String md5base64(final String s) {
try {
// Create MD5 Hash
MessageDigest digest = java.security.MessageDigest.getInstance("MD5");
digest.update(s.getBytes("utf-8"));
byte[] messageDigest = digest.digest();
return Base64.encodeToString(messageDigest, Base64.URL_SAFE | Base64.NO_WRAP);
} catch (NoSuchAlgorithmException e) {
// This will never happen, yes.
throw new RuntimeException(e);
} catch (UnsupportedEncodingException e) {
// This will never happen, yes.
throw new RuntimeException(e);
}
}
/**
* Creates an MD5 hash of the provided string and returns its ASCII representation
* @param s String to hash
* @return ASCII MD5 representation of the string passed in
*/
public static String md5string(String s) {
StringBuilder hexStr = new StringBuilder();
try {
// Create MD5 Hash
MessageDigest digest = java.security.MessageDigest.getInstance("MD5");
digest.update(s.getBytes("utf-8"));
byte[] messageDigest = digest.digest();
final int maxByteVal = 0xFF;
for (byte b : messageDigest) {
hexStr.append(Integer.toHexString(maxByteVal & b));
}
} catch (NoSuchAlgorithmException e) {
// This will never happen, yes.
throw new RuntimeException(e);
} catch (UnsupportedEncodingException e) {
// This will never happen, yes.
throw new RuntimeException(e);
}
return hexStr.toString();
}
/**
* Deletes a file or directory, with optional recursion.
* @param path File or directory to delete.
* @param recursive Whether to delete all subdirectories and files.
*/
public static void delete(File path, boolean recursive) {
if (recursive && path.isDirectory()) {
String[] children = path.list();
for (String child : children) {
delete(new File(path, child), recursive);
}
}
path.delete();
}
/**
* Formats provided date relative to the current system time
* @param date Date to format
* @return String representing the relative time difference of the paramter from current time
*/
public static String formatDateRelative(Date date) {
return DateUtils.getRelativeTimeSpanString(date.getTime(), System.currentTimeMillis(), DateUtils.SECOND_IN_MILLIS, 0).toString();
}
/**
* Ensures that the calling method is on the main thread.
*/
public static void ensureMainThread() {
if (Looper.getMainLooper().getThread() != Thread.currentThread()) {
throw new IllegalStateException("Method must be called from the Main Thread");
}
}
/**
* Attempt to hide the Android Keyboard.
*
* FIXME: This should not need to exist.
* I do not know why Android does not handle this automatically.
*
* @param activity The current activity
*/
public static void hideSoftKeyboard(Activity activity) {
InputMethodManager keyboard = (InputMethodManager)activity.getSystemService(Context.INPUT_METHOD_SERVICE);
// Not using getCurrentFocus as that sometimes is null, but the keyboard is still up.
keyboard.hideSoftInputFromWindow(activity.getWindow().getDecorView().getWindowToken(), 0);
}
/**
* Attempt to display the Android keyboard.
*
* FIXME: This should not need to exist.
* Android should always show the keyboard at the appropriate time. This method allows you to display the keyboard
* when Android fails to do so.
*
* @param activity The current activity
* @param view The currently focused view that will receive the keyboard input
*/
public static void showSoftKeyboard(Activity activity, View view) {
InputMethodManager keyboard = (InputMethodManager)activity.getSystemService(Context.INPUT_METHOD_SERVICE);
keyboard.showSoftInput(view, InputMethodManager.SHOW_FORCED);
}
/**
* Same as showSoftKeyboard(), but posted to the message queue of the current thread, so that it's executed
* after the current block of code is finished.
* @param activity The current activity
* @param view The currently focused view that will receive the keyboard input
*/
public static void showSoftKeyboardAsync(final Activity activity, final View view) {
view.post(new Runnable() {
@Override
public void run() {
Utils.showSoftKeyboard(activity, view);
}
});
}
public static void setupShowPasswordCheck(final CheckBox check, final EditText edit) {
check.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton compoundButton, boolean isChecked) {
// EditText loses the cursor position when you change the InputType
int curPos = edit.getSelectionStart();
if (isChecked) {
edit.setInputType(InputType.TYPE_CLASS_TEXT);
} else {
edit.setInputType(InputType.TYPE_CLASS_TEXT | InputType.TYPE_TEXT_VARIATION_PASSWORD);
}
edit.setSelection(curPos);
}
});
}
/* Inspect an API response, and fire an event to update the UI for Wikipedia Zero On/Off.
*
* @param app The application object
* @param result An API result to inspect for Wikipedia Zero headers
*/
public static void processHeadersForZero(final WikipediaApp app, final ApiResult result) {
new Handler(Looper.getMainLooper()).post(new Runnable() {
@Override
public void run() {
Map<String, List<String>> headers = result.getHeaders();
boolean responseZeroState = headers.containsKey("X-CS");
if (responseZeroState) {
String xcs = headers.get("X-CS").get(0);
if (!xcs.equals(WikipediaApp.getXcs())) {
identifyZeroCarrier(app, xcs);
}
} else if (WikipediaApp.getWikipediaZeroDisposition()) {
WikipediaApp.setXcs("");
WikipediaApp.setCarrierMessage("");
WikipediaApp.setWikipediaZeroDisposition(responseZeroState);
app.getBus().post(new WikipediaZeroStateChangeEvent());
}
}
});
}
private static final int MESSAGE_ZERO = 1;
public static void identifyZeroCarrier(final WikipediaApp app, final String xcs) {
Handler wikipediaZeroHandler = new Handler(new Handler.Callback(){
private WikipediaZeroTask curZeroTask;
@Override
public boolean handleMessage(Message msg) {
WikipediaZeroTask zeroTask = new WikipediaZeroTask(app.getAPIForSite(app.getPrimarySite()), app) {
@Override
public void onFinish(String message) {
Log.d("Wikipedia", "Wikipedia Zero message: " + message);
if (message != null) {
WikipediaApp.setXcs(xcs);
WikipediaApp.setCarrierMessage(message);
WikipediaApp.setWikipediaZeroDisposition(true);
Bus bus = app.getBus();
bus.post(new WikipediaZeroStateChangeEvent());
curZeroTask = null;
}
}
@Override
public void onCatch(Throwable caught) {
// oh snap
Log.d("Wikipedia", "Wikipedia Zero Eligibility Check Exception Caught");
curZeroTask = null;
}
};
if (curZeroTask != null) {
// if this connection was hung, clean up a bit
curZeroTask.cancel();
}
curZeroTask = zeroTask;
curZeroTask.execute();
return true;
}
});
wikipediaZeroHandler.removeMessages(MESSAGE_ZERO);
Message zeroMessage = Message.obtain();
zeroMessage.what = MESSAGE_ZERO;
zeroMessage.obj = "zero_eligible_check";
wikipediaZeroHandler.sendMessage(zeroMessage);
}
/**
* Read the MCC-MNC (mobile operator code) if available and the cellular data connection is the active one.
* http://lists.wikimedia.org/pipermail/wikimedia-l/2014-April/071131.html
* @param ctx Application context.
* @return The MCC-MNC, typically as ###-##, or null if unable to ascertain (e.g., no actively used cellular)
*/
public static String getMccMnc(Context ctx) {
String mccMncNetwork;
String mccMncSim;
try {
ConnectivityManager conn = (ConnectivityManager) ctx.getSystemService(Context.CONNECTIVITY_SERVICE);
NetworkInfo networkInfo = conn.getActiveNetworkInfo();
if (networkInfo != null && networkInfo.getState() == NetworkInfo.State.CONNECTED
&& (networkInfo.getType() == ConnectivityManager.TYPE_MOBILE || networkInfo.getType() == ConnectivityManager.TYPE_WIMAX))
{
TelephonyManager t = (TelephonyManager)ctx.getSystemService(WikipediaApp.TELEPHONY_SERVICE);
if (t != null && t.getPhoneType() >= 0) {
mccMncNetwork = t.getNetworkOperator();
if (mccMncNetwork != null) {
mccMncNetwork = mccMncNetwork.substring(0, MCC_LENGTH) + "-" + mccMncNetwork.substring(MCC_LENGTH);
} else {
mccMncNetwork = "000-00";
}
// TelephonyManager documentation refers to MCC-MNC unreliability on CDMA,
// and we actually see that network and SIM MCC-MNC don't always agree,
// so let's check the SIM, too. Let's not worry if it's CDMA, as the def of CDMA is complex.
mccMncSim = t.getSimOperator();
if (mccMncSim != null) {
mccMncSim = mccMncSim.substring(0, MCC_LENGTH) + "-" + mccMncSim.substring(MCC_LENGTH);
} else {
mccMncSim = "000-00";
}
return mccMncNetwork + "," + mccMncSim;
}
}
return null;
} catch (Throwable t) {
// Because, despite best efforts, things can go wrong and we don't want to crash the app:
return null;
}
}
/**
* Takes a language code (as returned by Android) and returns a wiki code, as used by wikipedia.
*
* @param langCode Language code (as returned by Android)
* @return Wiki code, as used by wikipedia.
*/
public static String langCodeToWikiLang(String langCode) {
// Convert deprecated language codes to modern ones.
// See https://developer.android.com/reference/java/util/Locale.html
if (langCode.equals("iw")) {
return "he"; // Hebrew
} else if (langCode.equals("in")) {
return "id"; // Indonesian
} else if (langCode.equals("ji")) {
return "yi"; // Yiddish
}
return langCode;
}
/**
* List of wiki language codes for which the content is primarily RTL.
*
* Ensure that this is always sorted alphabetically.
*/
private static final String[] RTL_LANGS = {
"ar", "arc", "arz", "bcc", "bqi", "ckb", "dv", "fa", "glk", "ha", "he",
"khw", "ks", "mzn", "pnb", "ps", "sd", "ug", "ur", "yi"
};
/**
* Returns true if the given wiki language is to be displayed RTL.
*
* @param lang Wiki code for the language to check for directionality
* @return true if it is RTL, false if LTR
*/
public static boolean isLangRTL(String lang) {
return Arrays.binarySearch(RTL_LANGS, lang, null) >= 0;
}
/**
* Setup directionality for both UI and content elements in a webview.
*
* @param contentLang The Content language to use to set directionality. Wiki Language code.
* @param uiLang The UI language to use to set directionality. Java language code.
* @param bridge The CommunicationBridge to use to communicate with the WebView
*/
public static void setupDirectionality(String contentLang, String uiLang, CommunicationBridge bridge) {
JSONObject payload = new JSONObject();
try {
if (isLangRTL(contentLang)) {
payload.put("contentDirection", "rtl");
} else {
payload.put("contentDirection", "ltr");
}
if (isLangRTL(langCodeToWikiLang(uiLang))) {
payload.put("uiDirection", "rtl");
} else {
payload.put("uiDirection", "ltr");
}
} catch (JSONException e) {
throw new RuntimeException(e);
}
bridge.sendMessage("setDirectionality", payload);
}
/**
* Sets text direction (RTL / LTR) for given view based on given lang.
*
* Doesn't do anything on pre Android 4.2, since their RTL support is terrible.
*
* @param view View to set direction of
* @param lang Wiki code for the language based on which to set direction
*/
public static void setTextDirection(View view, String lang) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1) {
view.setTextDirection(Utils.isLangRTL(lang) ? View.TEXT_DIRECTION_RTL : View.TEXT_DIRECTION_LTR);
}
}
/**
* Returns db name for given site
*
* WARNING: HARDCODED TO WORK FOR WIKIPEDIA ONLY
*
* @param site Site object to get dbname for
* @return dbname for given site object
*/
public static String getDBNameForSite(Site site) {
return site.getLanguage() + "wiki";
}
public static void handleExternalLink(final Context context, final Uri uri) {
if (WikipediaApp.isWikipediaZeroDevmodeOn() && WikipediaApp.getWikipediaZeroDisposition()) {
SharedPreferences sharedPref = PreferenceManager.getDefaultSharedPreferences(context);
if (sharedPref.getBoolean(PrefKeys.getZeroInterstitial(), true)) {
WikipediaApp.getInstance().getBus().post(new WikipediaZeroInterstitialEvent(uri));
} else {
Utils.visitInExternalBrowser(context, uri);
}
} else {
Utils.visitInExternalBrowser(context, uri);
}
}
/**
* Open the specified URI in an external browser (even if our app's intent filter
* matches the given URI)
*
* @param context Context of the calling app
* @param uri URI to open in an external browser
*/
public static void visitInExternalBrowser(final Context context, Uri uri) {
Intent intent = new Intent();
intent.setAction(Intent.ACTION_VIEW);
intent.setData(uri);
List<ResolveInfo> resInfo = context.getPackageManager().queryIntentActivities(intent, 0);
if (!resInfo.isEmpty()) {
List<Intent> browserIntents = new ArrayList<Intent>();
for (ResolveInfo resolveInfo : resInfo) {
String packageName = resolveInfo.activityInfo.packageName;
// remove our apps from the selection!
// This ensures that all the variants of the Wiki app (Alpha, Beta, Stable) are never shown
if (packageName.startsWith("org.wikipedia")) {
continue;
}
Intent newIntent = new Intent(Intent.ACTION_VIEW);
newIntent.setData(uri);
newIntent.setPackage(packageName);
browserIntents.add(newIntent);
}
if (browserIntents.size() > 0) {
// initialize the chooser intent with one of the browserIntents, and remove that
// intent from the list, since the chooser already has it, and we don't need to
// add it again in putExtra. (initialize with the last item in the list, to preserve order)
Intent chooserIntent = Intent.createChooser(browserIntents.remove(browserIntents.size() - 1), null);
chooserIntent.putExtra(Intent.EXTRA_INITIAL_INTENTS, browserIntents.toArray(new Parcelable[]{}));
context.startActivity(chooserIntent);
return;
}
}
// This means that there was no way to handle this link.
// We will just show a toast now. FIXME: Make this more visible?
Toast.makeText(context, R.string.error_can_not_process_link, Toast.LENGTH_LONG).show();
}
/**
* Utility method to detect whether an Email app is installed,
* for conditionally enabling/disabling email links.
* @param context Context of the calling app.
* @return True if an Email app exists, false otherwise.
*/
public static boolean mailAppExists(Context context) {
Intent intent = new Intent();
intent.setAction(Intent.ACTION_SENDTO);
intent.setData(Uri.parse("mailto:test@wikimedia.org"));
List<ResolveInfo> resInfo = context.getPackageManager().queryIntentActivities(intent, 0);
return resInfo.size() > 0;
}
/**
* Utility method to copy a stream into another stream.
*
* Uses a 16KB buffer.
*
* @param in Stream to copy from.
* @param out Stream to copy to.
* @throws IOException
*/
public static void copyStreams(InputStream in, OutputStream out) throws IOException {
byte[] buffer = new byte[KB16]; // 16kb buffer
int len;
while ((len = in.read(buffer)) != -1) {
out.write(buffer, 0, len);
}
}
/**
* Write a JSON object to a file
* @param file file to be written
* @param jsonObject content of file
* @throws IOException when writing failed
*/
public static void writeToFile(File file, JSONObject jsonObject) throws IOException {
OutputStreamWriter writer = new OutputStreamWriter(new FileOutputStream(file));
try {
writer.write(jsonObject.toString());
} finally {
writer.close();
}
}
/**
* Reads the contents of this page from storage.
* @return Page object with the contents of the page.
* @throws IOException
* @throws JSONException
*/
public static JSONObject readJSONFile(File f) throws IOException, JSONException {
BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(f)));
try {
StringBuilder stringBuilder = new StringBuilder();
String readStr;
while ((readStr = reader.readLine()) != null) {
stringBuilder.append(readStr);
}
return new JSONObject(stringBuilder.toString());
} finally {
reader.close();
}
}
/**
* Format for formatting/parsing dates to/from the ISO 8601 standard
*/
private static final String ISO8601_FORMAT_STRING = "yyyy-MM-dd'T'HH:mm:ss'Z'";
/**
* Parse a date formatted in ISO8601 format.
*
* @param dateString Date String to parse
* @return Parsed Date object.
* @throws ParseException
*/
public static Date parseISO8601(String dateString) throws ParseException {
Date date = new Date();
SimpleDateFormat sdf = new SimpleDateFormat(ISO8601_FORMAT_STRING, Locale.ROOT);
sdf.setTimeZone(TimeZone.getTimeZone("UTC"));
date.setTime(sdf.parse(dateString).getTime());
return date;
}
/**
* Format a date to an ISO8601 formatted string.
*
* @param date Date to format.
* @return The given date formatted in ISO8601 format.
*/
public static String formatISO8601(Date date) {
SimpleDateFormat sdf = new SimpleDateFormat(ISO8601_FORMAT_STRING, Locale.ROOT);
sdf.setTimeZone(TimeZone.getTimeZone("UTC"));
return sdf.format(date);
}
/**
* Convert a JSONArray object to a String Array.
*
* @param array a JSONArray containing only Strings
* @return a String[] with all the items in the JSONArray
*/
public static String[] jsonArrayToStringArray(JSONArray array) {
if (array == null) {
return null;
}
String[] stringArray = new String[array.length()];
for (int i = 0; i < array.length(); i++) {
stringArray[i] = array.optString(i);
}
return stringArray;
}
/**
* Resolves a potentially protocol relative URL to a 'full' URL
*
* @param url Url to check for (and fix) protocol relativeness
* @return A fully qualified, protocol specified URL
*/
public static String resolveProtocolRelativeUrl(String url) {
String fullUrl;
if (url.startsWith("//")) {
// That's a protocol specific link! Make it https!
fullUrl = WikipediaApp.getInstance().getNetworkProtocol() + ":" + url;
} else {
fullUrl = url;
}
return fullUrl;
}
/**
* Ask user to try connecting again upon (hopefully) recoverable network failure.
*/
public static void toastFail() {
Toast.makeText(WikipediaApp.getInstance(), R.string.error_network_error_try_again, Toast.LENGTH_LONG).show();
}
/**
*
* @param actual The exception object
* @param expected The class you're trying to find, usually tossed by ExceptionImpl.class, for example.
* @return boolean true if the Throwable type was found in the nested exception change, else false.
*/
public static boolean throwableContainsSpecificType(Throwable actual, Class expected) {
if (actual == null) {
return false;
} else if (actual.getClass() == expected) {
return true;
} else {
return throwableContainsSpecificType(actual.getCause(), expected);
}
}
/**
* Calculates the actual font size for the current device, based on an "sp" measurement.
* @param window The window on which the font will be rendered.
* @param fontSp Measurement in "sp" units of the font.
* @return Actual font size for the given sp amount.
*/
public static float getFontSizeFromSp(Window window, float fontSp) {
final DisplayMetrics metrics = new DisplayMetrics();
window.getWindowManager().getDefaultDisplay().getMetrics(metrics);
return fontSp / metrics.scaledDensity;
}
/**
* Resolves the resource ID of a theme-dependent attribute (for example, a color value
* that changes based on the selected theme)
* @param activity The activity whose theme contains the attribute.
* @param id Theme-dependent attribute ID to be resolved.
* @return The actual resource ID of the requested theme-dependent attribute.
*/
public static int getThemedAttributeId(Activity activity, int id) {
TypedValue tv = new TypedValue();
activity.getTheme().resolveAttribute(id, tv, true);
return tv.resourceId;
}
/**
* Returns the distribution channel for the app from AndroidManifest.xml
* @param ctx
* @return The channel (the empty string if not defined)
*/
public static String getChannelDescriptor(Context ctx) {
try {
ApplicationInfo a = ctx.getPackageManager().getApplicationInfo(ctx.getPackageName(), PackageManager.GET_META_DATA);
String channel = a.metaData.getString(PrefKeys.getChannel());
return channel != null ? channel : "";
} catch (Throwable t) {
// oops
return "";
}
}
/**
* Sets the distribution channel for the app into SharedPreferences
* @param ctx
*/
public static void setChannel(Context ctx) {
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(ctx);
String channel = getChannelDescriptor(ctx);
prefs.edit().putString(PrefKeys.getChannel(), channel).commit();
}
/**
* Gets the distribution channel for the app from SharedPreferences
* @param ctx
*/
public static String getChannel(Context ctx) {
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(ctx);
String channel = prefs.getString(PrefKeys.getChannel(), null);
if (channel != null) {
return channel;
} else {
setChannel(ctx);
return getChannel(ctx);
}
}
}
| creaITve/apps-android-tbrc-works | wikipedia/src/main/java/org/wikipedia/Utils.java | Java | apache-2.0 | 28,052 |
#include <boost/test/unit_test.hpp>
#include <boost/lambda/lambda.hpp>
#include <boost/lambda/bind.hpp>
#include <iostream>
#include <sstream>
#include <3rdparty/zookeeper/ZooKeeper.hpp>
#include <3rdparty/zookeeper/ZooKeeperWatcher.hpp>
#include <3rdparty/zookeeper/ZooKeeperEvent.hpp>
using namespace std;
using namespace boost;
using namespace boost::lambda;
using namespace izenelib::zookeeper;
class WorkerSearch : public ZooKeeperEventHandler
{
public:
virtual void onSessionConnected()
{
}
virtual void onNodeCreated(const std::string& path)
{
cout << "[WorkerSearch] onNodeCreated " << path <<endl;
path_ = path;
}
virtual void onNodeDeleted(const std::string& path)
{
cout << "[WorkerSearch] onNodeDeleted " << path <<endl;
}
virtual void onDataChanged(const std::string& path)
{
cout << "[WorkerSearch] onDataChanged " << path <<endl;
path_ = path;
}
std::string path_;
};
class WorkerMining : public ZooKeeperEventHandler
{
public:
virtual void onSessionConnected()
{
}
virtual void onNodeCreated(const std::string& path)
{
cout << "[WorkerMining] onNodeCreated " << path <<endl;
}
virtual void onNodeDeleted(const std::string& path)
{
cout << "[WorkerMining] onNodeDeleted " << path <<endl;
}
virtual void onDataChanged(const std::string& path)
{
cout << "[WorkerMining] onDataChanged " << path <<endl;
}
};
static const std::string gHosts = "localhost:2181"; //"127.16.0.161:2181,127.16.0.162:2181,127.16.0.163:2181";
//#define ENABLE_ZK_TEST
BOOST_AUTO_TEST_SUITE( t_zookeeper )
BOOST_AUTO_TEST_CASE( check_zookeeper_service )
{
std::cout << "---> Note: start ZooKeeper Service firstly before test." << std::endl;
std::cout << " ZooKeeper Service: "<< gHosts << std::endl;
}
BOOST_AUTO_TEST_CASE( zookeeper_client_basic )
{
std::cout << "---> Test ZooKeeper Client basic functions" << std::endl;
#ifndef ENABLE_ZK_TEST
return;
#endif
std::string hosts = gHosts;
int recvTimeout = 3000;
// Zookeeper Client
ZooKeeper cli(hosts, recvTimeout);
sleep(2);
if (!cli.isConnected())
return;
// remove all
cli.deleteZNode("/SF1", true);
// create
std::string path = "/SF1";
std::string data = "distributed search";
cli.createZNode(path, data, ZooKeeper::ZNODE_NORMAL);
BOOST_CHECK_EQUAL(cli.isZNodeExists(path), true);
BOOST_CHECK_EQUAL(cli.createZNode(path, data, ZooKeeper::ZNODE_NORMAL), false);
// create ephemeral node
if (false) // disable
{
ZooKeeper tmpCli(hosts, recvTimeout);
tmpCli.createZNode("/SF1/ephemeral", "", ZooKeeper::ZNODE_EPHEMERAL);
BOOST_CHECK_EQUAL(tmpCli.isZNodeExists("/SF1/ephemeral"), true);
}
//tmpCli exited...
sleep(1);
BOOST_CHECK_EQUAL(cli.isZNodeExists("/SF1/ephemeral"), false);
// create sequence node
cli.createZNode("/SF1/sequence", "", ZooKeeper::ZNODE_SEQUENCE);
string s1 = cli.getLastCreatedNodePath();
BOOST_CHECK_EQUAL(cli.isZNodeExists(s1), true);
cli.createZNode("/SF1/sequence", "", ZooKeeper::ZNODE_SEQUENCE);
string s2 = cli.getLastCreatedNodePath();
BOOST_CHECK_EQUAL(cli.isZNodeExists(s2), true);
cli.deleteZNode(s1);
cli.deleteZNode(s2);
// get
std::string data_get;
cli.getZNodeData(path, data_get);
BOOST_CHECK_EQUAL(data_get, data);
// set
std::string data2 = "distributed search (sf1-kite)";
BOOST_CHECK_EQUAL(cli.setZNodeData(path, data2), true);
cli.getZNodeData(path, data_get);
BOOST_CHECK_EQUAL(data_get, data2);
// children
std::string master = "/SF1/Master";
std::string worker1 = "/SF1/Worker1";
std::string worker2 = "/SF1/Worker2";
BOOST_CHECK_EQUAL(cli.createZNode(master, "this is master node"), true);
BOOST_CHECK_EQUAL(cli.createZNode(worker1, "remote worker1"), true);
BOOST_CHECK_EQUAL(cli.createZNode(worker2, "remote worker2"), true);
std::vector<std::string> children;
cli.getZNodeChildren("/SF1", children);
BOOST_CHECK_EQUAL(children.size(), 3);
BOOST_CHECK_EQUAL(children[0], master);
BOOST_CHECK_EQUAL(children[1], worker1);
BOOST_CHECK_EQUAL(children[2], worker2);
// display
//cli.showZKNamespace("/SF1");
}
BOOST_AUTO_TEST_CASE( zookeeper_watch )
{
std::cout << "---> Test ZooKeeper Watcher" << std::endl;
#ifndef ENABLE_ZK_TEST
return;
#endif
// Client
std::string hosts = gHosts;
int recvTimeout = 2000;
ZooKeeper cli(hosts, recvTimeout);
sleep(1);
if (!cli.isConnected())
return;
// set event handlers for watcher
WorkerSearch wkSearch;
WorkerMining wkMining;
cli.registerEventHandler(&wkSearch);
cli.registerEventHandler(&wkMining);
// 1. get and watch znode for changes
std::string path = "/SF1/Master";
std::string data_get;
cli.getZNodeData(path, data_get, ZooKeeper::WATCH);
BOOST_CHECK_EQUAL(data_get, "this is master node"); // set in former test case
cli.setZNodeData(path, "master data changed!");
sleep(1); //ensure watcher notified
// master was notified by watcher on znode changed
BOOST_CHECK_EQUAL(wkSearch.path_, path);
cli.getZNodeData(wkSearch.path_, data_get);
BOOST_CHECK_EQUAL(data_get, "master data changed!");
// 2. check exists and watch znode for creation
std::string path2 = "/NotExistedNode";
cli.deleteZNode(path2, true);
BOOST_CHECK_EQUAL(cli.isZNodeExists(path2, ZooKeeper::WATCH), false);
cli.createZNode(path2, "nodata");
sleep(1); //ensure watcher notified
// master was notified by watcher on znode created
BOOST_CHECK_EQUAL(wkSearch.path_, path2);
cli.getZNodeData(wkSearch.path_, data_get);
BOOST_CHECK_EQUAL(data_get, "nodata");
// clear test data from zookeeper servers
cli.deleteZNode(path2, true);
cli.deleteZNode("/SF1", true);
}
BOOST_AUTO_TEST_SUITE_END()
| izenecloud/izenelib | test/3rdparty/zookeeper/t_zookeeper.cpp | C++ | apache-2.0 | 6,019 |
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Generated code. DO NOT EDIT!
namespace Google.Cloud.Compute.V1.Snippets
{
// [START compute_v1_generated_RegionInstanceGroupManagers_ListPerInstanceConfigs_async]
using Google.Api.Gax;
using Google.Cloud.Compute.V1;
using System;
using System.Linq;
using System.Threading.Tasks;
public sealed partial class GeneratedRegionInstanceGroupManagersClientSnippets
{
/// <summary>Snippet for ListPerInstanceConfigsAsync</summary>
/// <remarks>
/// This snippet has been automatically generated for illustrative purposes only.
/// It may require modifications to work in your environment.
/// </remarks>
public async Task ListPerInstanceConfigsRequestObjectAsync()
{
// Create client
RegionInstanceGroupManagersClient regionInstanceGroupManagersClient = await RegionInstanceGroupManagersClient.CreateAsync();
// Initialize request argument(s)
ListPerInstanceConfigsRegionInstanceGroupManagersRequest request = new ListPerInstanceConfigsRegionInstanceGroupManagersRequest
{
Region = "",
OrderBy = "",
Project = "",
InstanceGroupManager = "",
Filter = "",
ReturnPartialSuccess = false,
};
// Make the request
PagedAsyncEnumerable<RegionInstanceGroupManagersListInstanceConfigsResp, PerInstanceConfig> response = regionInstanceGroupManagersClient.ListPerInstanceConfigsAsync(request);
// Iterate over all response items, lazily performing RPCs as required
await response.ForEachAsync((PerInstanceConfig item) =>
{
// Do something with each item
Console.WriteLine(item);
});
// Or iterate over pages (of server-defined size), performing one RPC per page
await response.AsRawResponses().ForEachAsync((RegionInstanceGroupManagersListInstanceConfigsResp page) =>
{
// Do something with each page of items
Console.WriteLine("A page of results:");
foreach (PerInstanceConfig item in page)
{
// Do something with each item
Console.WriteLine(item);
}
});
// Or retrieve a single page of known size (unless it's the final page), performing as many RPCs as required
int pageSize = 10;
Page<PerInstanceConfig> singlePage = await response.ReadPageAsync(pageSize);
// Do something with the page of items
Console.WriteLine($"A page of {pageSize} results (unless it's the final page):");
foreach (PerInstanceConfig item in singlePage)
{
// Do something with each item
Console.WriteLine(item);
}
// Store the pageToken, for when the next page is required.
string nextPageToken = singlePage.NextPageToken;
}
}
// [END compute_v1_generated_RegionInstanceGroupManagers_ListPerInstanceConfigs_async]
}
| googleapis/google-cloud-dotnet | apis/Google.Cloud.Compute.V1/Google.Cloud.Compute.V1.GeneratedSnippets/RegionInstanceGroupManagersClient.ListPerInstanceConfigsRequestObjectAsyncSnippet.g.cs | C# | apache-2.0 | 3,764 |
<?php
/*
* Copyright 2016 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* The "sinks" collection of methods.
* Typical usage is:
* <code>
* $loggingService = new Google_Service_Logging(...);
* $sinks = $loggingService->sinks;
* </code>
*/
class Google_Service_Logging_Resource_OrganizationsSinks extends Google_Service_Resource
{
/**
* Creates a sink. (sinks.create)
*
* @param string $parent Required. The resource in which to create the sink.
* Example: `"projects/my-project-id"`. The new sink must be provided in the
* request.
* @param Google_Service_Logging_LogSink $postBody
* @param array $optParams Optional parameters.
*
* @opt_param bool uniqueWriterIdentity Optional. Whether the sink will have a
* dedicated service account returned in the sink's writer_identity. Set this
* field to be true to export logs from one project to a different project. This
* field is ignored for non-project sinks (e.g. organization sinks) because
* those sinks are required to have dedicated service accounts.
* @return Google_Service_Logging_LogSink
*/
public function create($parent, Google_Service_Logging_LogSink $postBody, $optParams = array())
{
$params = array('parent' => $parent, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('create', array($params), "Google_Service_Logging_LogSink");
}
/**
* Deletes a sink. (sinks.delete)
*
* @param string $sinkName Required. The resource name of the sink to delete,
* including the parent resource and the sink identifier. Example: `"projects
* /my-project-id/sinks/my-sink-id"`. It is an error if the sink does not
* exist.
* @param array $optParams Optional parameters.
* @return Google_Service_Logging_LoggingEmpty
*/
public function delete($sinkName, $optParams = array())
{
$params = array('sinkName' => $sinkName);
$params = array_merge($params, $optParams);
return $this->call('delete', array($params), "Google_Service_Logging_LoggingEmpty");
}
/**
* Gets a sink. (sinks.get)
*
* @param string $sinkName Required. The resource name of the sink to return.
* Example: `"projects/my-project-id/sinks/my-sink-id"`.
* @param array $optParams Optional parameters.
* @return Google_Service_Logging_LogSink
*/
public function get($sinkName, $optParams = array())
{
$params = array('sinkName' => $sinkName);
$params = array_merge($params, $optParams);
return $this->call('get', array($params), "Google_Service_Logging_LogSink");
}
/**
* Lists sinks. (sinks.listOrganizationsSinks)
*
* @param string $parent Required. The resource name where this sink was
* created. Example: `"projects/my-logging-project"`.
* @param array $optParams Optional parameters.
*
* @opt_param int pageSize Optional. The maximum number of results to return
* from this request. Non-positive values are ignored. The presence of
* `nextPageToken` in the response indicates that more results might be
* available.
* @opt_param string pageToken Optional. If present, then retrieve the next
* batch of results from the preceding call to this method. `pageToken` must be
* the value of `nextPageToken` from the previous response. The values of other
* method parameters should be identical to those in the previous call.
* @return Google_Service_Logging_ListSinksResponse
*/
public function listOrganizationsSinks($parent, $optParams = array())
{
$params = array('parent' => $parent);
$params = array_merge($params, $optParams);
return $this->call('list', array($params), "Google_Service_Logging_ListSinksResponse");
}
/**
* Updates or creates a sink. (sinks.update)
*
* @param string $sinkName Required. The resource name of the sink to update,
* including the parent resource and the sink identifier. If the sink does not
* exist, this method creates the sink. Example: `"projects/my-project-id/sinks
* /my-sink-id"`.
* @param Google_Service_Logging_LogSink $postBody
* @param array $optParams Optional parameters.
*
* @opt_param bool uniqueWriterIdentity Optional. Whether the sink will have a
* dedicated service account returned in the sink's writer_identity. Set this
* field to be true to export logs from one project to a different project. This
* field is ignored for non-project sinks (e.g. organization sinks) because
* those sinks are required to have dedicated service accounts.
* @return Google_Service_Logging_LogSink
*/
public function update($sinkName, Google_Service_Logging_LogSink $postBody, $optParams = array())
{
$params = array('sinkName' => $sinkName, 'postBody' => $postBody);
$params = array_merge($params, $optParams);
return $this->call('update', array($params), "Google_Service_Logging_LogSink");
}
}
| googleapis/discovery-artifact-manager | clients/php/google-api-php-client-services/src/Google/Service/Logging/Resource/OrganizationsSinks.php | PHP | apache-2.0 | 5,423 |
/***
Copyright (c) 2008-2012 CommonsWare, LLC
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy
of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
Covered in detail in the book _The Busy Coder's Guide to Android Development_
https://commonsware.com/Android
*/
package com.commonsware.abj.interp;
import android.app.Activity;
import android.app.IntentService;
import android.app.PendingIntent;
import android.content.Intent;
import android.os.Bundle;
import android.util.Log;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.HashMap;
public class InterpreterService extends IntentService {
public static final String SCRIPT="_script";
public static final String BUNDLE="_bundle";
public static final String RESULT="_result";
public static final String BROADCAST_ACTION="com.commonsware.abj.interp.BROADCAST_ACTION";
public static final String BROADCAST_PACKAGE="com.commonsware.abj.interp.BROADCAST_PACKAGE";
public static final String PENDING_RESULT="com.commonsware.abj.interp.PENDING_RESULT";
public static final String RESULT_CODE="com.commonsware.abj.interp.RESULT_CODE";
public static final String ERROR="com.commonsware.abj.interp.ERROR";
public static final String TRACE="com.commonsware.abj.interp.TRACE";
public static final int SUCCESS=1337;
public static final int FAILURE=-1;
private HashMap<String, I_Interpreter> interpreters=new HashMap<String, I_Interpreter>();
public InterpreterService() {
super("InterpreterService");
}
@Override
protected void onHandleIntent(Intent intent) {
String action=intent.getAction();
I_Interpreter interpreter=interpreters.get(action);
if (interpreter==null) {
try {
interpreter=(I_Interpreter)Class.forName(action).newInstance();
interpreters.put(action, interpreter);
}
catch (Throwable t) {
Log.e("InterpreterService", "Error creating interpreter", t);
}
}
if (interpreter==null) {
failure(intent, "Could not create interpreter: "+intent.getAction());
}
else {
try {
success(intent, interpreter.executeScript(intent.getBundleExtra(BUNDLE)));
}
catch (Throwable t) {
Log.e("InterpreterService", "Error executing script", t);
try {
failure(intent, t);
}
catch (Throwable t2) {
Log.e("InterpreterService",
"Error returning exception to client",
t2);
}
}
}
}
private void success(Intent intent, Bundle result) {
Intent data=new Intent();
data.putExtras(result);
data.putExtra(RESULT_CODE, SUCCESS);
send(intent, data);
}
private void failure(Intent intent, String message) {
Intent data=new Intent();
data.putExtra(ERROR, message);
data.putExtra(RESULT_CODE, FAILURE);
send(intent, data);
}
private void failure(Intent intent, Throwable t) {
Intent data=new Intent();
data.putExtra(ERROR, t.getMessage());
data.putExtra(TRACE, getStackTrace(t));
data.putExtra(RESULT_CODE, FAILURE);
send(intent, data);
}
private void send(Intent intent, Intent data) {
String broadcast=intent.getStringExtra(BROADCAST_ACTION);
if (broadcast==null) {
PendingIntent pi=(PendingIntent)intent.getParcelableExtra(PENDING_RESULT);
if (pi!=null) {
try {
pi.send(this, Activity.RESULT_OK, data);
}
catch (PendingIntent.CanceledException e) {
// no-op -- client must be gone
}
}
}
else {
data.setPackage(intent.getStringExtra(BROADCAST_PACKAGE));
data.setAction(broadcast);
sendBroadcast(data);
}
}
private String getStackTrace(Throwable t) {
final StringWriter result=new StringWriter();
final PrintWriter printWriter=new PrintWriter(result);
t.printStackTrace(printWriter);
return(result.toString());
}
} | alexsh/cw-omnibus | JVM/InterpreterService/src/com/commonsware/abj/interp/InterpreterService.java | Java | apache-2.0 | 4,412 |
package com.pacoapp.paco.os;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.pacoapp.paco.R;
import com.pacoapp.paco.UserPreferences;
import android.app.Activity;
import android.content.ContentResolver;
import android.content.ContentValues;
import android.content.Context;
import android.content.Intent;
import android.database.Cursor;
import android.media.Ringtone;
import android.media.RingtoneManager;
import android.net.Uri;
import android.os.Build;
import android.provider.MediaStore;
public class RingtoneUtil {
private static Logger Log = LoggerFactory.getLogger(RingtoneUtil.class);
private static final String RINGTONE_TITLE_COLUMN_NAME = "title";
private static final String PACO_BARK_RINGTONE_TITLE = "Paco Bark";
private static final String BARK_RINGTONE_FILENAME = "deepbark_trial.mp3";
public static final String ALTERNATE_RINGTONE_FILENAME = "PBSRingtone_2.mp3";
public static final String ALTERNATE_RINGTONE_TITLE = "Paco Alternate Alert";
public static final String ALTERNATE_RINGTONE_TITLE_V2 = "Paco Alternate Alert Tone";
public static final String ALTERNATE_RINGTONE_TITLE_V2_FULLPATH = "/assets/ringtone/Paco Alternate Alert Tone";
private Context context;
private UserPreferences userPreferences;
public static final int RINGTONE_REQUESTCODE = 945;
public RingtoneUtil(Context context) {
super();
this.context = context.getApplicationContext();
}
public void XXXinstallPacoBarkRingtone() {
userPreferences = new UserPreferences(context);
if (userPreferences.hasInstalledPacoBarkRingtone()) {
return;
}
File f = copyRingtoneFromAssetsToSdCard(BARK_RINGTONE_FILENAME);
if (f == null) {
return;
}
insertRingtoneFile(f);
}
public void installPacoBarkRingtone() {
UserPreferences userPreferences = new UserPreferences(context);
if (!userPreferences.hasInstalledAlternateRingtone()) {
installRingtone(userPreferences, ALTERNATE_RINGTONE_FILENAME, ALTERNATE_RINGTONE_TITLE, true);
}
// only try once
userPreferences.setAlternateRingtoneInstalled();
if (!userPreferences.hasInstalledPacoBarkRingtone()) {
installRingtone(userPreferences, BARK_RINGTONE_FILENAME, PACO_BARK_RINGTONE_TITLE, false);
}
// only try once
userPreferences.setPacoBarkRingtoneInstalled();
}
public void installRingtone(UserPreferences userPreferences, String ringtoneFilename, String ringtoneTitle, boolean altRingtone) {
File f = copyRingtoneFromAssetsToSdCard(ringtoneFilename);
if (f == null) {
return;
}
ContentValues values = createBarkRingtoneDatabaseEntry(f, ringtoneTitle);
Uri uri = MediaStore.Audio.Media.getContentUriForPath(f.getAbsolutePath());
ContentResolver mediaStoreContentProvider = context.getContentResolver();
Cursor existingRingtoneCursor = mediaStoreContentProvider.query(uri, null, null, null, null); // Note: i want to just retrieve MediaStore.MediaColumns.TITLE and to search on the match, but it is returning null for the TITLE value!!!
Cursor c = mediaStoreContentProvider.query(uri, null, null, null, null);
boolean alreadyInstalled = false;
while (c.moveToNext()) {
int titleColumnIndex = c.getColumnIndex(RINGTONE_TITLE_COLUMN_NAME);
String existingRingtoneTitle = c.getString(titleColumnIndex);
if (existingRingtoneTitle.equals(ringtoneTitle)) {
alreadyInstalled = true;
}
}
existingRingtoneCursor.close();
if (!alreadyInstalled) {
Uri newUri = mediaStoreContentProvider.insert(uri, values);
if (newUri != null) {
if (!altRingtone) {
userPreferences.setRingtoneUri(newUri.toString());
userPreferences.setRingtoneName(ringtoneTitle);
} else {
userPreferences.setAltRingtoneUri(newUri.toString());
userPreferences.setAltRingtoneName(ALTERNATE_RINGTONE_TITLE);
}
}
}
}
private File copyRingtoneFromAssetsToSdCard(String ringtoneFilename) {
InputStream fis = null;
OutputStream fos = null;
try {
fis = context.getAssets().open(ringtoneFilename);
if (fis == null) {
return null;
}
File path = new File(android.os.Environment.getExternalStorageDirectory().getAbsolutePath()
+ "/Android/data/" + context.getPackageName() + "/");
if (!path.exists()) {
path.mkdirs();
}
File f = new File(path, ringtoneFilename);
fos = new FileOutputStream(f);
byte[] buf = new byte[1024];
int len;
while ((len = fis.read(buf)) > 0) {
fos.write(buf, 0, len);
}
return f;
} catch (FileNotFoundException e) {
Log.error("Could not create ringtone file on sd card. Error = " + e.getMessage());
} catch (IOException e) {
Log.error("Either Could not open ringtone from assets. Or could not write to sd card. Error = " + e.getMessage());
return null;
} finally {
if (fos != null) {
try {
fos.close();
} catch (IOException e) {
Log.error("could not close sd card file handle. Error = " + e.getMessage());
}
}
if (fis != null) {
try {
fis.close();
} catch (IOException e) {
Log.error("could not close asset file handle. Error = " + e.getMessage());
}
}
}
return null;
}
private ContentValues createBarkRingtoneDatabaseEntry(File f, String ringtoneTitle) {
ContentValues values = new ContentValues();
values.put(MediaStore.MediaColumns.DATA, f.getAbsolutePath());
values.put(MediaStore.MediaColumns.TITLE, ringtoneTitle);
values.put(MediaStore.MediaColumns.SIZE, f.length());
values.put(MediaStore.MediaColumns.MIME_TYPE, "audio/mp3");
values.put(MediaStore.Audio.Media.ARTIST, "Paco");
// values.put(MediaStore.Audio.Media.DURATION, ""); This is not needed
values.put(MediaStore.Audio.Media.IS_RINGTONE, true);
values.put(MediaStore.Audio.Media.IS_NOTIFICATION, true);
values.put(MediaStore.Audio.Media.IS_ALARM, false);
values.put(MediaStore.Audio.Media.IS_MUSIC, false);
return values;
}
/**
* From Stackoverflow issue:
* http://stackoverflow.com/questions/22184729/sqliteconstraintexception-thrown-when-trying-to-insert
* @param filename
* @return
*/
Uri insertRingtoneFile(File filename) {
Uri toneUri = MediaStore.Audio.Media.getContentUriForPath(filename.getAbsolutePath());
// SDK 11+ has the Files store, which already indexed... everything
// We need the file's URI though, so we'll be forced to query
if (Build.VERSION.SDK_INT >= 11) {
Uri uri = null;
Uri filesUri = MediaStore.Files.getContentUri("external");
String[] projection = {MediaStore.MediaColumns._ID, MediaStore.MediaColumns.TITLE};
String selection = MediaStore.MediaColumns.DATA + " = ?";
String[] args = {filename.getAbsolutePath()};
Cursor c = context.getContentResolver().query(filesUri, projection, selection, args, null);
// We expect a single unique record to be returned, since _data is unique
if (c.getCount() == 1) {
c.moveToFirst();
long rowId = c.getLong(c.getColumnIndex(MediaStore.MediaColumns._ID));
String title = c.getString(c.getColumnIndex(MediaStore.MediaColumns.TITLE));
c.close();
uri = MediaStore.Files.getContentUri("external", rowId);
// Since all this stuff was added automatically, it might not have the metadata you want,
// like Title, or Artist, or IsRingtone
if (!title.equals(PACO_BARK_RINGTONE_TITLE)) {
ContentValues values = new ContentValues();
values.put(MediaStore.MediaColumns.TITLE, PACO_BARK_RINGTONE_TITLE);
if (context.getContentResolver().update(toneUri, values, null, null) < 1) {
Log.error("could not update ringtome metadata");
}
// Apparently this is best practice, although I have no idea what the Media Scanner
// does with the new data
context.sendBroadcast(new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE, toneUri));
}
}
else if (c.getCount() == 0) {
// I suppose the MediaScanner hasn't run yet, we'll insert it
// ... ommitted
}
else {
throw new UnsupportedOperationException(); // it's expected to be unique!
}
return uri;
}
// For the legacy way, I'm assuming that the file we're working with is in a .nomedia
// folder, so we are the ones who created it in the MediaStore. If this isn't the case,
// consider querying for it and updating the existing record. You should store the URIs
// you create in case you need to delete them from the MediaStore, otherwise you're a
// litter bug :P
else {
ContentValues values = new ContentValues();
values.put(MediaStore.MediaColumns.DATA, filename.getAbsolutePath());
values.put(MediaStore.MediaColumns.SIZE, filename.length());
values.put(MediaStore.MediaColumns.DISPLAY_NAME, filename.getName());
values.put(MediaStore.MediaColumns.TITLE, PACO_BARK_RINGTONE_TITLE);
values.put(MediaStore.MediaColumns.MIME_TYPE, "audio/mpeg3");
values.put(MediaStore.Audio.Media.ARTIST, "Paco App");
values.put(MediaStore.Audio.Media.IS_RINGTONE, true);
values.put(MediaStore.Audio.Media.IS_NOTIFICATION, true);
values.put(MediaStore.Audio.Media.IS_ALARM, true);
values.put(MediaStore.Audio.Media.IS_MUSIC, false);
Uri newToneUri = context.getContentResolver().insert(MediaStore.Audio.Media.EXTERNAL_CONTENT_URI, values);
userPreferences.setRingtoneUri(newToneUri.toString());
userPreferences.setRingtoneName(PACO_BARK_RINGTONE_TITLE);
userPreferences.setPacoBarkRingtoneInstalled();
// Apparently this is best practice, although I have no idea what the Media Scanner
// does with the new data
context.sendBroadcast(new Intent(Intent.ACTION_MEDIA_SCANNER_SCAN_FILE, newToneUri));
return newToneUri;
}
}
public static boolean isOkRingtoneResult(int requestCode, int resultCode) {
return requestCode == RINGTONE_REQUESTCODE && resultCode == Activity.RESULT_OK;
}
public static void updateRingtone(Intent data, final Activity activity) {
Uri uri = data.getParcelableExtra(RingtoneManager.EXTRA_RINGTONE_PICKED_URI);
final UserPreferences userPreferences = new UserPreferences(activity);
if (uri != null) {
userPreferences.setRingtoneUri(uri.toString());
String name= getNameOfRingtone(activity, uri);
userPreferences.setRingtoneName(name);
} else {
userPreferences.clearRingtone();
}
}
public static void launchRingtoneChooserFor(final Activity activity) {
UserPreferences userPreferences = new UserPreferences(activity);
String uri = userPreferences.getRingtoneUri();
Intent intent = new Intent(RingtoneManager.ACTION_RINGTONE_PICKER);
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_TYPE, RingtoneManager.TYPE_NOTIFICATION);
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_TITLE, R.string.select_signal_tone);
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_SHOW_SILENT, false);
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_SHOW_DEFAULT, true);
if (uri != null) {
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_EXISTING_URI, Uri.parse(uri));
} else {
intent.putExtra(RingtoneManager.EXTRA_RINGTONE_EXISTING_URI,
RingtoneManager.getDefaultUri(RingtoneManager.TYPE_NOTIFICATION));
}
activity.startActivityForResult(intent, RingtoneUtil.RINGTONE_REQUESTCODE);
}
public static String getNameOfRingtone(Context context, Uri uri) {
Ringtone ringtone = RingtoneManager.getRingtone(context, uri);
return ringtone.getTitle(context);
}
}
| hlecuanda/paco | Paco/src/com/pacoapp/paco/os/RingtoneUtil.java | Java | apache-2.0 | 12,217 |
#region
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection;
using Tabster.Core.Plugins;
using Tabster.Utilities;
#endregion
namespace Tabster.Plugins
{
public class PluginInstance
{
private bool _enabled;
private List<Type> _types = new List<Type>();
public PluginInstance(Assembly assembly, ITabsterPlugin plugin, FileInfo fileInfo)
{
Assembly = assembly;
Plugin = plugin;
FileInfo = fileInfo;
}
public Assembly Assembly { get; private set; }
public ITabsterPlugin Plugin { get; private set; }
public FileInfo FileInfo { get; private set; }
public Boolean Enabled
{
get { return _enabled; }
set
{
if (value)
{
try
{
Plugin.Activate();
}
catch (Exception ex)
{
Logging.GetLogger().Error(string.Format("Error occured while activating plugin: {0}", FileInfo.FullName), ex);
}
}
else
{
try
{
Plugin.Deactivate();
}
catch (Exception ex)
{
Logging.GetLogger().Error(string.Format("Error occured while deactivating plugin: {0}", FileInfo.FullName), ex);
}
}
_enabled = value;
}
}
public IEnumerable<T> GetClassInstances<T>()
{
var instances = new List<T>();
var cType = typeof (T);
_types.Clear();
try
{
_types = Assembly.GetTypes().Where(x => x.IsPublic && !x.IsAbstract && !x.IsInterface).ToList();
}
catch (Exception ex)
{
Logging.GetLogger().Error(string.Format("Error occured while loading plugin types: {0}", FileInfo.FullName), ex);
}
foreach (var type in _types)
{
if (cType.IsAssignableFrom(type))
{
try
{
var instance = (T) Activator.CreateInstance(type);
instances.Add(instance);
}
catch (Exception ex)
{
Logging.GetLogger().Error(string.Format("Error occured while creating plugin type instance: '{0}' in {1}", type.FullName, FileInfo.FullName), ex);
}
}
}
return instances;
}
public bool Contains(Type type)
{
return _types.Contains(type);
}
}
} | GetTabster/Tabster | Tabster/Plugins/PluginInstance.cs | C# | apache-2.0 | 2,948 |
/*
* Copyright 2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.restassured.config;
import io.restassured.internal.common.assertion.AssertParameter;
import static io.restassured.config.ParamConfig.UpdateStrategy.MERGE;
import static io.restassured.config.ParamConfig.UpdateStrategy.REPLACE;
/**
* Param config determines how different parameter types in REST Assured should be updated when adding multiple parameters
* of the same type with the same name.
*/
public class ParamConfig implements Config {
private final boolean userConfigured;
private final UpdateStrategy queryParamsUpdateStrategy;
private final UpdateStrategy formParamsUpdateStrategy;
private final UpdateStrategy requestParameterUpdateStrategy;
/**
* Create a new instance where all parameters are merged
*/
public ParamConfig() {
this(MERGE, MERGE, MERGE, false);
}
/**
* Create a new instance and specify update strategies for all parameter types.
*
* @param queryParamsUpdateStrategy The update strategy for query parameters
* @param formParamsUpdateStrategy The update strategy for form parameters
* @param requestParameterUpdateStrategy The update strategy for request parameters
*/
public ParamConfig(UpdateStrategy queryParamsUpdateStrategy,
UpdateStrategy formParamsUpdateStrategy,
UpdateStrategy requestParameterUpdateStrategy) {
this(queryParamsUpdateStrategy, formParamsUpdateStrategy, requestParameterUpdateStrategy, true);
}
private ParamConfig(UpdateStrategy queryParamsUpdateStrategy, UpdateStrategy formParamsUpdateStrategy,
UpdateStrategy requestParameterUpdateStrategy, boolean userConfigured) {
AssertParameter.notNull(queryParamsUpdateStrategy, "Query param update strategy");
AssertParameter.notNull(requestParameterUpdateStrategy, "Request param update strategy");
AssertParameter.notNull(formParamsUpdateStrategy, "Form param update strategy");
this.queryParamsUpdateStrategy = queryParamsUpdateStrategy;
this.formParamsUpdateStrategy = formParamsUpdateStrategy;
this.requestParameterUpdateStrategy = requestParameterUpdateStrategy;
this.userConfigured = userConfigured;
}
/**
* Merge all parameter types.
*
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig mergeAllParameters() {
return new ParamConfig(MERGE, MERGE, MERGE, true);
}
/**
* Replace parameter values for all kinds of parameter types.
*
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig replaceAllParameters() {
return new ParamConfig(REPLACE, REPLACE, REPLACE, true);
}
/**
* Set form parameter update strategy to the given value.
*
* @param updateStrategy The update strategy to use for form parameters
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig formParamsUpdateStrategy(UpdateStrategy updateStrategy) {
return new ParamConfig(queryParamsUpdateStrategy, updateStrategy, requestParameterUpdateStrategy, true);
}
/**
* Set request parameter update strategy to the given value.
* A "request parameter" is a parameter that will turn into a form or query parameter depending on the request. For example:
* <p>
* given().param("name", "value").when().get("/x"). ..
* </p>
*
* @param updateStrategy The update strategy to use for request parameters
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig requestParamsUpdateStrategy(UpdateStrategy updateStrategy) {
return new ParamConfig(queryParamsUpdateStrategy, formParamsUpdateStrategy, updateStrategy, true);
}
/**
* Set query parameter update strategy to the given value.
*
* @param updateStrategy The update strategy to use for query parameters
* @return A new instance of {@link ParamConfig}
*/
public ParamConfig queryParamsUpdateStrategy(UpdateStrategy updateStrategy) {
return new ParamConfig(updateStrategy, formParamsUpdateStrategy, requestParameterUpdateStrategy, true);
}
/**
* @return The update strategy for form parameters
*/
public UpdateStrategy formParamsUpdateStrategy() {
return formParamsUpdateStrategy;
}
/**
* @return The update strategy for request parameters
*/
public UpdateStrategy requestParamsUpdateStrategy() {
return requestParameterUpdateStrategy;
}
/**
* @return The update strategy for query parameters
*/
public UpdateStrategy queryParamsUpdateStrategy() {
return queryParamsUpdateStrategy;
}
/**
* {@inheritDoc}
*/
public boolean isUserConfigured() {
return userConfigured;
}
/**
* The update strategy to use for a parameter type
*/
public enum UpdateStrategy {
/**
* Parameters with the same name is merged.
*/
MERGE,
/**
* Parameters with the same name is replaced with the latest applied value.
*/
REPLACE
}
/**
* @return A static way to create a new ParamConfig instance without calling "new" explicitly. Mainly for syntactic sugar.
*/
public static ParamConfig paramConfig() {
return new ParamConfig();
}
/**
* Syntactic sugar.
*
* @return The same ParamConfig instance.
*/
public ParamConfig and() {
return this;
}
/**
* Syntactic sugar.
*
* @return The same ParamConfig instance.
*/
public ParamConfig with() {
return this;
}
}
| jayway/rest-assured | rest-assured/src/main/java/io/restassured/config/ParamConfig.java | Java | apache-2.0 | 6,343 |
package config
import (
"fmt"
"os"
"testing"
"github.com/jgsqware/clairctl/test"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
)
const defaultValues = `
clair:
uri: http://localhost
port: 6060
healthport: 6061
report:
path: reports
format: html
auth:
insecureskipverify: true
clairctl:
ip: ""
tempfolder: /tmp/clairctl
port: 0
`
const customValues = `
clair:
uri: http://clair
port: 6061
healthport: 6062
report:
path: reports/test
format: json
auth:
insecureskipverify: false
clairctl:
ip: "localhost"
tempfolder: /tmp/clairctl/test
port: 64157
`
func TestInitDefault(t *testing.T) {
Init("", "INFO")
cfg := values()
var expected config
err := yaml.Unmarshal([]byte(defaultValues), &expected)
if err != nil {
t.Fatal(err)
}
if cfg != expected {
t.Error("Default values are not correct")
}
viper.Reset()
}
func TestInitCustomLocal(t *testing.T) {
tmpfile := test.CreateConfigFile(customValues, "clairctl.yml", ".")
defer os.Remove(tmpfile) // clean up
fmt.Println(tmpfile)
Init("", "INFO")
cfg := values()
var expected config
err := yaml.Unmarshal([]byte(customValues), &expected)
if err != nil {
t.Fatal(err)
}
if cfg != expected {
t.Error("values are not correct")
}
viper.Reset()
}
func TestInitCustomHome(t *testing.T) {
tmpfile := test.CreateConfigFile(customValues, "clairctl.yml", ClairctlHome())
defer os.Remove(tmpfile) // clean up
fmt.Println(tmpfile)
Init("", "INFO")
cfg := values()
var expected config
err := yaml.Unmarshal([]byte(customValues), &expected)
if err != nil {
t.Fatal(err)
}
if cfg != expected {
t.Error("values are not correct")
}
viper.Reset()
}
func TestInitCustom(t *testing.T) {
tmpfile := test.CreateConfigFile(customValues, "clairctl.yml", "/tmp")
defer os.Remove(tmpfile) // clean up
fmt.Println(tmpfile)
Init(tmpfile, "INFO")
cfg := values()
var expected config
err := yaml.Unmarshal([]byte(customValues), &expected)
if err != nil {
t.Fatal(err)
}
if cfg != expected {
t.Error("values are not correct")
}
viper.Reset()
}
| jdel/clairctl | config/config_test.go | GO | apache-2.0 | 2,090 |
package com.beecavegames.common.handlers.admin;
import java.io.Serializable;
import org.joda.time.DateTime;
import lombok.AllArgsConstructor;
@AllArgsConstructor
public class PlayerBackupMetadata implements Serializable {
private static final long serialVersionUID = -485633840234547452L;
public long beeId;
public DateTime timestamp;
}
| sgmiller/hiveelements | core/src/main/java/common/handlers/admin/PlayerBackupMetadata.java | Java | apache-2.0 | 349 |
#ifndef AUTOBOOST_MPL_FOR_EACH_HPP_INCLUDED
#define AUTOBOOST_MPL_FOR_EACH_HPP_INCLUDED
// Copyright Aleksey Gurtovoy 2000-2008
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Id$
// $Date$
// $Revision$
#include <autoboost/mpl/is_sequence.hpp>
#include <autoboost/mpl/begin_end.hpp>
#include <autoboost/mpl/apply.hpp>
#include <autoboost/mpl/bool.hpp>
#include <autoboost/mpl/next_prior.hpp>
#include <autoboost/mpl/deref.hpp>
#include <autoboost/mpl/identity.hpp>
#include <autoboost/mpl/assert.hpp>
#include <autoboost/mpl/aux_/config/gpu.hpp>
#include <autoboost/mpl/aux_/unwrap.hpp>
#include <autoboost/type_traits/is_same.hpp>
#include <autoboost/utility/value_init.hpp>
namespace autoboost { namespace mpl {
namespace aux {
template< bool done = true >
struct for_each_impl
{
template<
typename Iterator
, typename LastIterator
, typename TransformFunc
, typename F
>
AUTOBOOST_MPL_CFG_GPU_ENABLED
static void execute(
Iterator*
, LastIterator*
, TransformFunc*
, F
)
{
}
};
template<>
struct for_each_impl<false>
{
template<
typename Iterator
, typename LastIterator
, typename TransformFunc
, typename F
>
AUTOBOOST_MPL_CFG_GPU_ENABLED
static void execute(
Iterator*
, LastIterator*
, TransformFunc*
, F f
)
{
typedef typename deref<Iterator>::type item;
typedef typename apply1<TransformFunc,item>::type arg;
// dwa 2002/9/10 -- make sure not to invoke undefined behavior
// when we pass arg.
value_initialized<arg> x;
aux::unwrap(f, 0)(autoboost::get(x));
typedef typename mpl::next<Iterator>::type iter;
for_each_impl<autoboost::is_same<iter,LastIterator>::value>
::execute( static_cast<iter*>(0), static_cast<LastIterator*>(0), static_cast<TransformFunc*>(0), f);
}
};
} // namespace aux
// agurt, 17/mar/02: pointer default parameters are necessary to workaround
// MSVC 6.5 function template signature's mangling bug
template<
typename Sequence
, typename TransformOp
, typename F
>
AUTOBOOST_MPL_CFG_GPU_ENABLED
inline
void for_each(F f, Sequence* = 0, TransformOp* = 0)
{
AUTOBOOST_MPL_ASSERT(( is_sequence<Sequence> ));
typedef typename begin<Sequence>::type first;
typedef typename end<Sequence>::type last;
aux::for_each_impl< autoboost::is_same<first,last>::value >
::execute(static_cast<first*>(0), static_cast<last*>(0), static_cast<TransformOp*>(0), f);
}
template<
typename Sequence
, typename F
>
AUTOBOOST_MPL_CFG_GPU_ENABLED
inline
void for_each(F f, Sequence* = 0)
{
// jfalcou: fully qualifying this call so it doesnt clash with autoboostphoenix::for_each
// ons ome compilers -- done on 02/28/2011
autoboost::mpl::for_each<Sequence, identity<> >(f);
}
}}
#endif // AUTOBOOST_MPL_FOR_EACH_HPP_INCLUDED
| codemercenary/autowiring | contrib/autoboost/autoboost/mpl/for_each.hpp | C++ | apache-2.0 | 3,170 |
(function() {
function timecode() {
return function(seconds) {
var seconds = Number.parseFloat(seconds);
if (Number.isNaN(seconds)) {
return '-:--'
};
var wholeSeconds = Math.floor(seconds);
var minutes = Math.floor(wholeSeconds / 60);
var remainingSeconds = wholeSeconds % 60;
var output = minutes + ':';
if (remainingSeconds < 10) {
output += '0';
}
output += remainingSeconds;
return output;
};
}
angular
.module('blocJams')
.filter('timecode', timecode);
})();
| sjcliu/bloc-jams-angular | dist/scripts/filters/timecode.js | JavaScript | apache-2.0 | 692 |
module RSpec
module Matchers
module BuiltIn
class OperatorMatcher
class << self
def registry
@registry ||= {}
end
def register(klass, operator, matcher)
registry[klass] ||= {}
registry[klass][operator] = matcher
end
def unregister(klass, operator)
registry[klass] && registry[klass].delete(operator)
end
def get(klass, operator)
klass.ancestors.each { |ancestor|
matcher = registry[ancestor] && registry[ancestor][operator]
return matcher if matcher
}
nil
end
end
def initialize(actual)
@actual = actual
end
def self.use_custom_matcher_or_delegate(operator)
define_method(operator) do |expected|
if !has_non_generic_implementation_of?(operator) && matcher = OperatorMatcher.get(@actual.class, operator)
@actual.__send__(::RSpec::Matchers.last_should, matcher.new(expected))
else
eval_match(@actual, operator, expected)
end
end
negative_operator = operator.sub(/^=/, '!')
if negative_operator != operator && respond_to?(negative_operator)
define_method(negative_operator) do |expected|
opposite_should = ::RSpec::Matchers.last_should == :should ? :should_not : :should
raise "RSpec does not support `#{::RSpec::Matchers.last_should} #{negative_operator} expected`. " +
"Use `#{opposite_should} #{operator} expected` instead."
end
end
end
['==', '===', '=~', '>', '>=', '<', '<='].each do |operator|
use_custom_matcher_or_delegate operator
end
def fail_with_message(message)
RSpec::Expectations.fail_with(message, @expected, @actual)
end
def description
"#{@operator} #{@expected.inspect}"
end
private
if Method.method_defined?(:owner) # 1.8.6 lacks Method#owner :-(
def has_non_generic_implementation_of?(op)
Expectations.method_handle_for(@actual, op).owner != ::Kernel
rescue NameError
false
end
else
def has_non_generic_implementation_of?(op)
# This is a bit of a hack, but:
#
# {}.method(:=~).to_s # => "#<Method: Hash(Kernel)#=~>"
#
# In the absence of Method#owner, this is the best we
# can do to see if the method comes from Kernel.
!Expectations.method_handle_for(@actual, op).to_s.include?('(Kernel)')
rescue NameError
false
end
end
def eval_match(actual, operator, expected)
::RSpec::Matchers.last_matcher = self
@operator, @expected = operator, expected
__delegate_operator(actual, operator, expected)
end
end
class PositiveOperatorMatcher < OperatorMatcher
def __delegate_operator(actual, operator, expected)
if actual.__send__(operator, expected)
true
elsif ['==','===', '=~'].include?(operator)
fail_with_message("expected: #{expected.inspect}\n got: #{actual.inspect} (using #{operator})")
else
fail_with_message("expected: #{operator} #{expected.inspect}\n got: #{operator.gsub(/./, ' ')} #{actual.inspect}")
end
end
end
class NegativeOperatorMatcher < OperatorMatcher
def __delegate_operator(actual, operator, expected)
return false unless actual.__send__(operator, expected)
return fail_with_message("expected not: #{operator} #{expected.inspect}\n got: #{operator.gsub(/./, ' ')} #{actual.inspect}")
end
end
end
end
end
| sghill/gocd | server/webapp/WEB-INF/rails.new/vendor/bundle/jruby/1.9/gems/rspec-expectations-2.99.2/lib/rspec/matchers/operator_matcher.rb | Ruby | apache-2.0 | 3,913 |
/*
* Copyright 2010-2012 Luca Garulli (l.garulli--at--orientechnologies.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.orientechnologies.orient.core.sql.operator;
import com.orientechnologies.orient.core.command.OCommandContext;
import com.orientechnologies.orient.core.db.record.OIdentifiable;
import com.orientechnologies.orient.core.id.ORID;
import com.orientechnologies.orient.core.sql.filter.OSQLFilterCondition;
/**
* OR operator.
*
* @author Luca Garulli
*
*/
public class OQueryOperatorOr extends OQueryOperator {
public OQueryOperatorOr() {
super("OR", 3, false);
}
@Override
public Object evaluateRecord(final OIdentifiable iRecord, final OSQLFilterCondition iCondition, final Object iLeft,
final Object iRight, OCommandContext iContext) {
if (iLeft == null)
return false;
return (Boolean) iLeft || (Boolean) iRight;
}
@Override
public OIndexReuseType getIndexReuseType(final Object iLeft, final Object iRight) {
if (iLeft == null || iRight == null)
return OIndexReuseType.NO_INDEX;
return OIndexReuseType.INDEX_UNION;
}
@Override
public ORID getBeginRidRange(final Object iLeft,final Object iRight) {
final ORID leftRange;
final ORID rightRange;
if(iLeft instanceof OSQLFilterCondition)
leftRange = ((OSQLFilterCondition) iLeft).getBeginRidRange();
else
leftRange = null;
if(iRight instanceof OSQLFilterCondition)
rightRange = ((OSQLFilterCondition) iRight).getBeginRidRange();
else
rightRange = null;
if(leftRange == null || rightRange == null)
return null;
else
return leftRange.compareTo(rightRange) <= 0 ? leftRange : rightRange;
}
@Override
public ORID getEndRidRange(final Object iLeft,final Object iRight) {
final ORID leftRange;
final ORID rightRange;
if(iLeft instanceof OSQLFilterCondition)
leftRange = ((OSQLFilterCondition) iLeft).getEndRidRange();
else
leftRange = null;
if(iRight instanceof OSQLFilterCondition)
rightRange = ((OSQLFilterCondition) iRight).getEndRidRange();
else
rightRange = null;
if(leftRange == null || rightRange == null)
return null;
else
return leftRange.compareTo(rightRange) >= 0 ? leftRange : rightRange;
}
}
| redox/OrientDB | core/src/main/java/com/orientechnologies/orient/core/sql/operator/OQueryOperatorOr.java | Java | apache-2.0 | 2,880 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.artemis.tests.integration.management;
import org.apache.activemq.artemis.api.core.management.ActiveMQServerControl;
import org.apache.activemq.artemis.api.core.management.Parameter;
import org.apache.activemq.artemis.api.core.management.ResourceNames;
import java.util.Map;
public class ActiveMQServerControlUsingCoreTest extends ActiveMQServerControlTest {
// Constants -----------------------------------------------------
// Attributes ----------------------------------------------------
// Static --------------------------------------------------------
private static String[] toStringArray(final Object[] res) {
String[] names = new String[res.length];
for (int i = 0; i < res.length; i++) {
names[i] = res[i].toString();
}
return names;
}
// Constructors --------------------------------------------------
// Public --------------------------------------------------------
// ActiveMQServerControlTest overrides --------------------------
// the core messaging proxy doesn't work when the server is stopped so we cant run these 2 tests
@Override
public void testScaleDownWithOutConnector() throws Exception {
}
@Override
public void testScaleDownWithConnector() throws Exception {
}
@Override
protected ActiveMQServerControl createManagementControl() throws Exception {
return new ActiveMQServerControl() {
@Override
public void updateDuplicateIdCache(String address, Object[] ids) {
}
@Override
public void scaleDown(String connector) throws Exception {
throw new UnsupportedOperationException();
}
private final CoreMessagingProxy proxy = new CoreMessagingProxy(addServerLocator(createInVMNonHALocator()), ResourceNames.CORE_SERVER);
@Override
public boolean isSharedStore() {
return (Boolean) proxy.retrieveAttributeValue("sharedStore");
}
@Override
public boolean closeConnectionsForAddress(final String ipAddress) throws Exception {
return (Boolean) proxy.invokeOperation("closeConnectionsForAddress", ipAddress);
}
@Override
public boolean closeConsumerConnectionsForAddress(final String address) throws Exception {
return (Boolean) proxy.invokeOperation("closeConsumerConnectionsForAddress", address);
}
@Override
public boolean closeConnectionsForUser(final String userName) throws Exception {
return (Boolean) proxy.invokeOperation("closeConnectionsForUser", userName);
}
@Override
public boolean commitPreparedTransaction(final String transactionAsBase64) throws Exception {
return (Boolean) proxy.invokeOperation("commitPreparedTransaction", transactionAsBase64);
}
@Override
public void createQueue(final String address, final String name) throws Exception {
proxy.invokeOperation("createQueue", address, name);
}
@Override
public void createQueue(final String address,
final String name,
final String filter,
final boolean durable) throws Exception {
proxy.invokeOperation("createQueue", address, name, filter, durable);
}
@Override
public void createQueue(final String address, final String name, final boolean durable) throws Exception {
proxy.invokeOperation("createQueue", address, name, durable);
}
@Override
public void deployQueue(final String address,
final String name,
final String filter,
final boolean durable) throws Exception {
proxy.invokeOperation("deployQueue", address, name, filter, durable);
}
@Override
public void deployQueue(final String address, final String name, final String filterString) throws Exception {
proxy.invokeOperation("deployQueue", address, name);
}
@Override
public void destroyQueue(final String name) throws Exception {
proxy.invokeOperation("destroyQueue", name);
}
@Override
public void destroyQueue(final String name, final boolean removeConsumers) throws Exception {
proxy.invokeOperation("destroyQueue", name, removeConsumers);
}
@Override
public void disableMessageCounters() throws Exception {
proxy.invokeOperation("disableMessageCounters");
}
@Override
public void enableMessageCounters() throws Exception {
proxy.invokeOperation("enableMessageCounters");
}
@Override
public String getBindingsDirectory() {
return (String) proxy.retrieveAttributeValue("bindingsDirectory");
}
@Override
public int getConnectionCount() {
return (Integer) proxy.retrieveAttributeValue("connectionCount", Integer.class);
}
@Override
public long getTotalConnectionCount() {
return (Long) proxy.retrieveAttributeValue("totalConnectionCount", Long.class);
}
@Override
public long getTotalMessageCount() {
return (Long) proxy.retrieveAttributeValue("totalMessageCount", Long.class);
}
@Override
public long getTotalMessagesAdded() {
return (Long) proxy.retrieveAttributeValue("totalMessagesAdded", Long.class);
}
@Override
public long getTotalMessagesAcknowledged() {
return (Long) proxy.retrieveAttributeValue("totalMessagesAcknowledged", Long.class);
}
@Override
public long getTotalConsumerCount() {
return (Long) proxy.retrieveAttributeValue("totalConsumerCount", Long.class);
}
@Override
public long getConnectionTTLOverride() {
return (Long) proxy.retrieveAttributeValue("connectionTTLOverride", Long.class);
}
@Override
public Object[] getConnectors() throws Exception {
return (Object[]) proxy.retrieveAttributeValue("connectors");
}
@Override
public String getConnectorsAsJSON() throws Exception {
return (String) proxy.retrieveAttributeValue("connectorsAsJSON");
}
@Override
public String[] getAddressNames() {
return ActiveMQServerControlUsingCoreTest.toStringArray((Object[]) proxy.retrieveAttributeValue("addressNames"));
}
@Override
public String[] getQueueNames() {
return ActiveMQServerControlUsingCoreTest.toStringArray((Object[]) proxy.retrieveAttributeValue("queueNames", String.class));
}
@Override
public String getUptime() {
return null;
}
@Override
public long getUptimeMillis() {
return 0;
}
@Override
public boolean isReplicaSync() {
return false;
}
@Override
public int getIDCacheSize() {
return (Integer) proxy.retrieveAttributeValue("IDCacheSize", Integer.class);
}
public String[] getInterceptorClassNames() {
return ActiveMQServerControlUsingCoreTest.toStringArray((Object[]) proxy.retrieveAttributeValue("incomingInterceptorClassNames"));
}
@Override
public String[] getIncomingInterceptorClassNames() {
return ActiveMQServerControlUsingCoreTest.toStringArray((Object[]) proxy.retrieveAttributeValue("incomingInterceptorClassNames"));
}
@Override
public String[] getOutgoingInterceptorClassNames() {
return ActiveMQServerControlUsingCoreTest.toStringArray((Object[]) proxy.retrieveAttributeValue("outgoingInterceptorClassNames"));
}
@Override
public String getJournalDirectory() {
return (String) proxy.retrieveAttributeValue("journalDirectory");
}
@Override
public int getJournalFileSize() {
return (Integer) proxy.retrieveAttributeValue("journalFileSize", Integer.class);
}
@Override
public int getJournalMaxIO() {
return (Integer) proxy.retrieveAttributeValue("journalMaxIO", Integer.class);
}
@Override
public int getJournalMinFiles() {
return (Integer) proxy.retrieveAttributeValue("journalMinFiles", Integer.class);
}
@Override
public String getJournalType() {
return (String) proxy.retrieveAttributeValue("journalType");
}
@Override
public String getLargeMessagesDirectory() {
return (String) proxy.retrieveAttributeValue("largeMessagesDirectory");
}
@Override
public String getManagementAddress() {
return (String) proxy.retrieveAttributeValue("managementAddress");
}
@Override
public String getManagementNotificationAddress() {
return (String) proxy.retrieveAttributeValue("managementNotificationAddress");
}
@Override
public int getMessageCounterMaxDayCount() {
return (Integer) proxy.retrieveAttributeValue("messageCounterMaxDayCount", Integer.class);
}
@Override
public long getMessageCounterSamplePeriod() {
return (Long) proxy.retrieveAttributeValue("messageCounterSamplePeriod", Long.class);
}
@Override
public long getMessageExpiryScanPeriod() {
return (Long) proxy.retrieveAttributeValue("messageExpiryScanPeriod", Long.class);
}
@Override
public long getMessageExpiryThreadPriority() {
return (Long) proxy.retrieveAttributeValue("messageExpiryThreadPriority", Long.class);
}
@Override
public String getPagingDirectory() {
return (String) proxy.retrieveAttributeValue("pagingDirectory");
}
@Override
public int getScheduledThreadPoolMaxSize() {
return (Integer) proxy.retrieveAttributeValue("scheduledThreadPoolMaxSize", Integer.class);
}
@Override
public int getThreadPoolMaxSize() {
return (Integer) proxy.retrieveAttributeValue("threadPoolMaxSize", Integer.class);
}
@Override
public long getSecurityInvalidationInterval() {
return (Long) proxy.retrieveAttributeValue("securityInvalidationInterval", Long.class);
}
@Override
public long getTransactionTimeout() {
return (Long) proxy.retrieveAttributeValue("transactionTimeout", Long.class);
}
@Override
public long getTransactionTimeoutScanPeriod() {
return (Long) proxy.retrieveAttributeValue("transactionTimeoutScanPeriod", Long.class);
}
@Override
public String getVersion() {
return proxy.retrieveAttributeValue("version").toString();
}
@Override
public boolean isBackup() {
return (Boolean) proxy.retrieveAttributeValue("backup");
}
@Override
public boolean isClustered() {
return (Boolean) proxy.retrieveAttributeValue("clustered");
}
@Override
public boolean isCreateBindingsDir() {
return (Boolean) proxy.retrieveAttributeValue("createBindingsDir");
}
@Override
public boolean isCreateJournalDir() {
return (Boolean) proxy.retrieveAttributeValue("createJournalDir");
}
@Override
public boolean isJournalSyncNonTransactional() {
return (Boolean) proxy.retrieveAttributeValue("journalSyncNonTransactional");
}
@Override
public boolean isJournalSyncTransactional() {
return (Boolean) proxy.retrieveAttributeValue("journalSyncTransactional");
}
@Override
public void setFailoverOnServerShutdown(boolean failoverOnServerShutdown) throws Exception {
proxy.invokeOperation("setFailoverOnServerShutdown", failoverOnServerShutdown);
}
@Override
public boolean isFailoverOnServerShutdown() {
return (Boolean) proxy.retrieveAttributeValue("failoverOnServerShutdown");
}
public void setScaleDown(boolean scaleDown) throws Exception {
proxy.invokeOperation("setEnabled", scaleDown);
}
public boolean isScaleDown() {
return (Boolean) proxy.retrieveAttributeValue("scaleDown");
}
@Override
public boolean isMessageCounterEnabled() {
return (Boolean) proxy.retrieveAttributeValue("messageCounterEnabled");
}
@Override
public boolean isPersistDeliveryCountBeforeDelivery() {
return (Boolean) proxy.retrieveAttributeValue("persistDeliveryCountBeforeDelivery");
}
@Override
public boolean isAsyncConnectionExecutionEnabled() {
return (Boolean) proxy.retrieveAttributeValue("asyncConnectionExecutionEnabled");
}
@Override
public boolean isPersistIDCache() {
return (Boolean) proxy.retrieveAttributeValue("persistIDCache");
}
@Override
public boolean isSecurityEnabled() {
return (Boolean) proxy.retrieveAttributeValue("securityEnabled");
}
@Override
public boolean isStarted() {
return (Boolean) proxy.retrieveAttributeValue("started");
}
@Override
public boolean isWildcardRoutingEnabled() {
return (Boolean) proxy.retrieveAttributeValue("wildcardRoutingEnabled");
}
@Override
public String[] listConnectionIDs() throws Exception {
return (String[]) proxy.invokeOperation("listConnectionIDs");
}
@Override
public String[] listPreparedTransactions() throws Exception {
return (String[]) proxy.invokeOperation("listPreparedTransactions");
}
@Override
public String listPreparedTransactionDetailsAsJSON() throws Exception {
return (String) proxy.invokeOperation("listPreparedTransactionDetailsAsJSON");
}
@Override
public String listPreparedTransactionDetailsAsHTML() throws Exception {
return (String) proxy.invokeOperation("listPreparedTransactionDetailsAsHTML");
}
@Override
public String[] listHeuristicCommittedTransactions() throws Exception {
return (String[]) proxy.invokeOperation("listHeuristicCommittedTransactions");
}
@Override
public String[] listHeuristicRolledBackTransactions() throws Exception {
return (String[]) proxy.invokeOperation("listHeuristicRolledBackTransactions");
}
@Override
public String[] listRemoteAddresses() throws Exception {
return (String[]) proxy.invokeOperation("listRemoteAddresses");
}
@Override
public String[] listRemoteAddresses(final String ipAddress) throws Exception {
return (String[]) proxy.invokeOperation("listRemoteAddresses", ipAddress);
}
@Override
public String[] listSessions(final String connectionID) throws Exception {
return (String[]) proxy.invokeOperation("listSessions", connectionID);
}
@Override
public void resetAllMessageCounterHistories() throws Exception {
proxy.invokeOperation("resetAllMessageCounterHistories");
}
@Override
public void resetAllMessageCounters() throws Exception {
proxy.invokeOperation("resetAllMessageCounters");
}
@Override
public boolean rollbackPreparedTransaction(final String transactionAsBase64) throws Exception {
return (Boolean) proxy.invokeOperation("rollbackPreparedTransaction", transactionAsBase64);
}
@Override
public void sendQueueInfoToQueue(final String queueName, final String address) throws Exception {
proxy.invokeOperation("sendQueueInfoToQueue", queueName, address);
}
@Override
public void setMessageCounterMaxDayCount(final int count) throws Exception {
proxy.invokeOperation("setMessageCounterMaxDayCount", count);
}
@Override
public void setMessageCounterSamplePeriod(final long newPeriod) throws Exception {
proxy.invokeOperation("setMessageCounterSamplePeriod", newPeriod);
}
@Override
public int getJournalBufferSize() {
return (Integer) proxy.retrieveAttributeValue("JournalBufferSize", Integer.class);
}
@Override
public int getJournalBufferTimeout() {
return (Integer) proxy.retrieveAttributeValue("JournalBufferTimeout", Integer.class);
}
@Override
public int getJournalCompactMinFiles() {
return (Integer) proxy.retrieveAttributeValue("JournalCompactMinFiles", Integer.class);
}
@Override
public int getJournalCompactPercentage() {
return (Integer) proxy.retrieveAttributeValue("JournalCompactPercentage", Integer.class);
}
@Override
public boolean isPersistenceEnabled() {
return (Boolean) proxy.retrieveAttributeValue("PersistenceEnabled");
}
@Override
public int getDiskScanPeriod() {
return (Integer) proxy.retrieveAttributeValue("DiskScanPeriod", Integer.class);
}
@Override
public int getMaxDiskUsage() {
return (Integer) proxy.retrieveAttributeValue("MaxDiskUsage", Integer.class);
}
@Override
public long getGlobalMaxSize() {
return (Long) proxy.retrieveAttributeValue("GlobalMaxSize", Long.class);
}
@Override
public void addSecuritySettings(String addressMatch,
String sendRoles,
String consumeRoles,
String createDurableQueueRoles,
String deleteDurableQueueRoles,
String createNonDurableQueueRoles,
String deleteNonDurableQueueRoles,
String manageRoles) throws Exception {
proxy.invokeOperation("addSecuritySettings", addressMatch, sendRoles, consumeRoles, createDurableQueueRoles, deleteDurableQueueRoles, createNonDurableQueueRoles, deleteNonDurableQueueRoles, manageRoles);
}
@Override
public void addSecuritySettings(String addressMatch,
String sendRoles,
String consumeRoles,
String createDurableQueueRoles,
String deleteDurableQueueRoles,
String createNonDurableQueueRoles,
String deleteNonDurableQueueRoles,
String manageRoles,
String browseRoles) throws Exception {
proxy.invokeOperation("addSecuritySettings", addressMatch, sendRoles, consumeRoles, createDurableQueueRoles, deleteDurableQueueRoles, createNonDurableQueueRoles, deleteNonDurableQueueRoles, manageRoles, browseRoles);
}
@Override
public void removeSecuritySettings(String addressMatch) throws Exception {
proxy.invokeOperation("removeSecuritySettings", addressMatch);
}
@Override
public Object[] getRoles(String addressMatch) throws Exception {
return (Object[]) proxy.invokeOperation("getRoles", addressMatch);
}
@Override
public String getRolesAsJSON(String addressMatch) throws Exception {
return (String) proxy.invokeOperation("getRolesAsJSON", addressMatch);
}
@Override
public void addAddressSettings(@Parameter(desc = "an address match", name = "addressMatch") String addressMatch,
@Parameter(desc = "the dead letter address setting", name = "DLA") String DLA,
@Parameter(desc = "the expiry address setting", name = "expiryAddress") String expiryAddress,
@Parameter(desc = "the expiry delay setting", name = "expiryDelay") long expiryDelay,
@Parameter(desc = "are any queues created for this address a last value queue", name = "lastValueQueue") boolean lastValueQueue,
@Parameter(desc = "the delivery attempts", name = "deliveryAttempts") int deliveryAttempts,
@Parameter(desc = "the max size in bytes", name = "maxSizeBytes") long maxSizeBytes,
@Parameter(desc = "the page size in bytes", name = "pageSizeBytes") int pageSizeBytes,
@Parameter(desc = "the max number of pages in the soft memory cache", name = "pageMaxCacheSize") int pageMaxCacheSize,
@Parameter(desc = "the redelivery delay", name = "redeliveryDelay") long redeliveryDelay,
@Parameter(desc = "the redelivery delay multiplier", name = "redeliveryMultiplier") double redeliveryMultiplier,
@Parameter(desc = "the maximum redelivery delay", name = "maxRedeliveryDelay") long maxRedeliveryDelay,
@Parameter(desc = "the redistribution delay", name = "redistributionDelay") long redistributionDelay,
@Parameter(desc = "do we send to the DLA when there is no where to route the message", name = "sendToDLAOnNoRoute") boolean sendToDLAOnNoRoute,
@Parameter(desc = "the policy to use when the address is full", name = "addressFullMessagePolicy") String addressFullMessagePolicy,
@Parameter(desc = "when a consumer falls below this threshold in terms of messages consumed per second it will be considered 'slow'", name = "slowConsumerThreshold") long slowConsumerThreshold,
@Parameter(desc = "how often (in seconds) to check for slow consumers", name = "slowConsumerCheckPeriod") long slowConsumerCheckPeriod,
@Parameter(desc = "the policy to use when a slow consumer is detected", name = "slowConsumerPolicy") String slowConsumerPolicy,
@Parameter(desc = "allow queues to be created automatically", name = "autoCreateJmsQueues") boolean autoCreateJmsQueues,
@Parameter(desc = "allow auto-created queues to be deleted automatically", name = "autoDeleteJmsQueues") boolean autoDeleteJmsQueues,
@Parameter(desc = "allow topics to be created automatically", name = "autoCreateJmsTopics") boolean autoCreateJmsTopics,
@Parameter(desc = "allow auto-created topics to be deleted automatically", name = "autoDeleteJmsTopics") boolean autoDeleteJmsTopics) throws Exception {
proxy.invokeOperation("addAddressSettings", addressMatch, DLA, expiryAddress, expiryDelay, lastValueQueue, deliveryAttempts, maxSizeBytes, pageSizeBytes, pageMaxCacheSize, redeliveryDelay, redeliveryMultiplier, maxRedeliveryDelay, redistributionDelay, sendToDLAOnNoRoute, addressFullMessagePolicy, slowConsumerThreshold, slowConsumerCheckPeriod, slowConsumerPolicy, autoCreateJmsQueues, autoDeleteJmsQueues, autoCreateJmsTopics, autoDeleteJmsTopics);
}
@Override
public void removeAddressSettings(String addressMatch) throws Exception {
proxy.invokeOperation("removeAddressSettings", addressMatch);
}
@Override
public void createDivert(String name,
String routingName,
String address,
String forwardingAddress,
boolean exclusive,
String filterString,
String transformerClassName) throws Exception {
proxy.invokeOperation("createDivert", name, routingName, address, forwardingAddress, exclusive, filterString, transformerClassName);
}
@Override
public void destroyDivert(String name) throws Exception {
proxy.invokeOperation("destroyDivert", name);
}
@Override
public String[] getBridgeNames() {
return ActiveMQServerControlUsingCoreTest.toStringArray((Object[]) proxy.retrieveAttributeValue("bridgeNames"));
}
@Override
public void destroyBridge(String name) throws Exception {
proxy.invokeOperation("destroyBridge", name);
}
@Override
public void createConnectorService(String name, String factoryClass, Map<String, Object> parameters) throws Exception {
proxy.invokeOperation("createConnectorService", name, factoryClass, parameters);
}
@Override
public void destroyConnectorService(String name) throws Exception {
proxy.invokeOperation("destroyConnectorService", name);
}
@Override
public String[] getConnectorServices() {
return ActiveMQServerControlUsingCoreTest.toStringArray((Object[]) proxy.retrieveAttributeValue("connectorServices"));
}
@Override
public void forceFailover() throws Exception {
proxy.invokeOperation("forceFailover");
}
public String getLiveConnectorName() throws Exception {
return (String) proxy.retrieveAttributeValue("liveConnectorName");
}
@Override
public String getAddressSettingsAsJSON(String addressMatch) throws Exception {
return (String) proxy.invokeOperation("getAddressSettingsAsJSON", addressMatch);
}
@Override
public String[] getDivertNames() {
return ActiveMQServerControlUsingCoreTest.toStringArray((Object[]) proxy.retrieveAttributeValue("divertNames"));
}
@Override
public void createBridge(String name,
String queueName,
String forwardingAddress,
String filterString,
String transformerClassName,
long retryInterval,
double retryIntervalMultiplier,
int initialConnectAttempts,
int reconnectAttempts,
boolean useDuplicateDetection,
int confirmationWindowSize,
int producerWindowSize,
long clientFailureCheckPeriod,
String connectorNames,
boolean useDiscovery,
boolean ha,
String user,
String password) throws Exception {
proxy.invokeOperation("createBridge", name, queueName, forwardingAddress, filterString, transformerClassName, retryInterval, retryIntervalMultiplier, initialConnectAttempts, reconnectAttempts, useDuplicateDetection, confirmationWindowSize, producerWindowSize, clientFailureCheckPeriod, connectorNames, useDiscovery, ha, user, password);
}
@Override
public void createBridge(String name,
String queueName,
String forwardingAddress,
String filterString,
String transformerClassName,
long retryInterval,
double retryIntervalMultiplier,
int initialConnectAttempts,
int reconnectAttempts,
boolean useDuplicateDetection,
int confirmationWindowSize,
long clientFailureCheckPeriod,
String connectorNames,
boolean useDiscovery,
boolean ha,
String user,
String password) throws Exception {
proxy.invokeOperation("createBridge", name, queueName, forwardingAddress, filterString, transformerClassName, retryInterval, retryIntervalMultiplier, initialConnectAttempts, reconnectAttempts, useDuplicateDetection, confirmationWindowSize, clientFailureCheckPeriod, connectorNames, useDiscovery, ha, user, password);
}
@Override
public String listProducersInfoAsJSON() throws Exception {
return (String) proxy.invokeOperation("listProducersInfoAsJSON");
}
@Override
public String listConsumersAsJSON(String connectionID) throws Exception {
return (String) proxy.invokeOperation("listConsumersAsJSON", connectionID);
}
@Override
public String listAllConsumersAsJSON() throws Exception {
return (String) proxy.invokeOperation("listAllConsumersAsJSON");
}
@Override
public String listConnectionsAsJSON() throws Exception {
return (String) proxy.invokeOperation("listConnectionsAsJSON");
}
@Override
public String listSessionsAsJSON(@Parameter(desc = "a connection ID", name = "connectionID") String connectionID) throws Exception {
return (String) proxy.invokeOperation("listSessionsAsJSON", connectionID);
}
};
}
@Override
public boolean usingCore() {
return true;
}
// Package protected ---------------------------------------------
// Protected -----------------------------------------------------
// Private -------------------------------------------------------
// Inner classes -------------------------------------------------
}
| paulgallagher75/activemq-artemis | tests/integration-tests/src/test/java/org/apache/activemq/artemis/tests/integration/management/ActiveMQServerControlUsingCoreTest.java | Java | apache-2.0 | 32,136 |
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.base;
import com.google.common.annotations.GwtCompatible;
import com.google.common.annotations.GwtIncompatible;
import junit.framework.TestCase;
import java.nio.charset.Charset;
import java.util.Arrays;
/**
* Unit test for {@link Charsets}.
*
* @author Mike Bostock
*/
@GwtCompatible(emulated = true)
public class CharsetsTest extends TestCase {
@GwtIncompatible // Non-UTF-8 Charset
public void testUsAscii() {
assertEquals(Charset.forName("US-ASCII"), Charsets.US_ASCII);
}
@GwtIncompatible // Non-UTF-8 Charset
public void testIso88591() {
assertEquals(Charset.forName("ISO-8859-1"), Charsets.ISO_8859_1);
}
public void testUtf8() {
assertEquals(Charset.forName("UTF-8"), Charsets.UTF_8);
}
@GwtIncompatible // Non-UTF-8 Charset
public void testUtf16be() {
assertEquals(Charset.forName("UTF-16BE"), Charsets.UTF_16BE);
}
@GwtIncompatible // Non-UTF-8 Charset
public void testUtf16le() {
assertEquals(Charset.forName("UTF-16LE"), Charsets.UTF_16LE);
}
@GwtIncompatible // Non-UTF-8 Charset
public void testUtf16() {
assertEquals(Charset.forName("UTF-16"), Charsets.UTF_16);
}
@GwtIncompatible // Non-UTF-8 Charset
public void testWhyUsAsciiIsDangerous() {
byte[] b1 = "朝日新聞".getBytes(Charsets.US_ASCII);
byte[] b2 = "聞朝日新".getBytes(Charsets.US_ASCII);
byte[] b3 = "????".getBytes(Charsets.US_ASCII);
byte[] b4 = "ニュース".getBytes(Charsets.US_ASCII);
byte[] b5 = "スューー".getBytes(Charsets.US_ASCII);
// Assert they are all equal (using the transitive property)
assertTrue(Arrays.equals(b1, b2));
assertTrue(Arrays.equals(b2, b3));
assertTrue(Arrays.equals(b3, b4));
assertTrue(Arrays.equals(b4, b5));
}
}
| aiyanbo/guava | guava-tests/test/com/google/common/base/CharsetsTest.java | Java | apache-2.0 | 2,382 |
/*
* Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
/*
* Do not modify this file. This file is generated from the directconnect-2012-10-25.normal.json service model.
*/
using System;
using System.Collections.Generic;
using System.Xml.Serialization;
using System.Text;
using System.IO;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
namespace Amazon.DirectConnect.Model
{
/// <summary>
/// Container for the parameters to the DeleteInterconnect operation.
/// Deletes the specified interconnect.
/// </summary>
public partial class DeleteInterconnectRequest : AmazonDirectConnectRequest
{
private string _interconnectId;
/// <summary>
/// Gets and sets the property InterconnectId.
/// </summary>
public string InterconnectId
{
get { return this._interconnectId; }
set { this._interconnectId = value; }
}
// Check to see if InterconnectId property is set
internal bool IsSetInterconnectId()
{
return this._interconnectId != null;
}
}
} | rafd123/aws-sdk-net | sdk/src/Services/DirectConnect/Generated/Model/DeleteInterconnectRequest.cs | C# | apache-2.0 | 1,636 |
/*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zts;
import static org.testng.Assert.*;
import java.util.ArrayList;
import java.util.List;
import org.testng.annotations.Test;
@SuppressWarnings({"EqualsWithItself", "EqualsBetweenInconvertibleTypes"})
public class HostServicesTest {
@Test
public void testHostService() {
HostServices hs = new HostServices();
HostServices hs2 = new HostServices();
List<String> nl = new ArrayList<>();
nl.add("sample.service1");
// set
hs.setNames(nl);
hs.setHost("sample.com");
hs2.setHost("sample.com");
// getter assertion
assertEquals(hs.getHost(), "sample.com");
assertEquals(hs.getNames(), nl);
assertEquals(hs, hs);
assertNotEquals(hs2, hs);
hs2.setHost(null);
assertNotEquals(hs2, hs);
assertNotEquals("", hs);
}
}
| yahoo/athenz | core/zts/src/test/java/com/yahoo/athenz/zts/HostServicesTest.java | Java | apache-2.0 | 1,478 |
package org.apache.pdfbox.pdmodel.graphics;
import org.apache.pdfbox.cos.COSName;
import org.apache.pdfbox.pdmodel.common.PDStream;
/**
* A PostScript XObject.
* Conforming readers may not be able to interpret the PostScript fragments.
*
* @author John Hewson
*/
public class PDPostScriptXObject extends PDXObject
{
/**
* Creates a PostScript XObject.
* @param stream The XObject stream
*/
public PDPostScriptXObject(PDStream stream)
{
super(stream, COSName.PS);
}
}
| mdamt/PdfBox-Android | library/src/main/java/org/apache/pdfbox/pdmodel/graphics/PDPostScriptXObject.java | Java | apache-2.0 | 535 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.compute.drivers.hostvirtual import HostVirtualNodeDriver
from libcloud.compute.types import NodeState
from libcloud.compute.base import NodeAuthPassword
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.secrets import HOSTVIRTUAL_PARAMS
class HostVirtualTest(unittest.TestCase):
def setUp(self):
HostVirtualNodeDriver.connectionCls.conn_class = HostVirtualMockHttp
self.driver = HostVirtualNodeDriver(*HOSTVIRTUAL_PARAMS)
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 4)
self.assertEqual(len(nodes[0].public_ips), 1)
self.assertEqual(len(nodes[1].public_ips), 1)
self.assertEqual(len(nodes[0].private_ips), 0)
self.assertEqual(len(nodes[1].private_ips), 0)
self.assertTrue("208.111.39.118" in nodes[1].public_ips)
self.assertTrue("208.111.45.250" in nodes[0].public_ips)
self.assertEqual(nodes[3].state, NodeState.RUNNING)
self.assertEqual(nodes[1].state, NodeState.TERMINATED)
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 14)
self.assertEqual(sizes[0].id, "31")
self.assertEqual(sizes[4].id, "71")
self.assertEqual(sizes[2].ram, "512MB")
self.assertEqual(sizes[2].disk, "20GB")
self.assertEqual(sizes[3].bandwidth, "600GB")
self.assertEqual(sizes[1].price, "15.00")
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 8)
self.assertEqual(images[0].id, "1739")
self.assertEqual(images[0].name, "Gentoo 2012 (0619) i386")
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertEqual(locations[0].id, "3")
self.assertEqual(locations[0].name, "SJC - San Jose, CA")
self.assertEqual(locations[1].id, "13")
self.assertEqual(locations[1].name, "IAD - Reston, VA")
def test_reboot_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.reboot_node(node))
def test_ex_get_node(self):
node = self.driver.ex_get_node(node_id="62291")
self.assertEqual(node.id, "62291")
self.assertEqual(node.name, "server1.vr-cluster.org")
self.assertEqual(node.state, NodeState.TERMINATED)
self.assertTrue("208.111.45.250" in node.public_ips)
def test_ex_list_packages(self):
pkgs = self.driver.ex_list_packages()
self.assertEqual(len(pkgs), 3)
self.assertEqual(pkgs[1]["mbpkgid"], "176018")
self.assertEqual(pkgs[2]["package_status"], "Suspended")
def test_ex_order_package(self):
sizes = self.driver.list_sizes()
pkg = self.driver.ex_order_package(sizes[0])
self.assertEqual(pkg["id"], "62291")
def test_ex_cancel_package(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_cancel_package(node)
self.assertEqual(result["status"], "success")
def test_ex_unlink_package(self):
node = self.driver.list_nodes()[0]
result = self.driver.ex_unlink_package(node)
self.assertEqual(result["status"], "success")
def test_ex_stop_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.ex_stop_node(node))
def test_ex_start_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.ex_start_node(node))
def test_destroy_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.destroy_node(node))
def test_ex_delete_node(self):
node = self.driver.list_nodes()[0]
self.assertTrue(self.driver.ex_delete_node(node))
def test_create_node(self):
auth = NodeAuthPassword("vr!@#hosted#@!")
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
node = self.driver.create_node(
name="test.com", image=image, size=size, auth=auth
)
self.assertEqual("62291", node.id)
self.assertEqual("server1.vr-cluster.org", node.name)
def test_ex_provision_node(self):
node = self.driver.list_nodes()[0]
auth = NodeAuthPassword("vr!@#hosted#@!")
self.assertTrue(self.driver.ex_provision_node(node=node, auth=auth))
def test_create_node_in_location(self):
auth = NodeAuthPassword("vr!@#hosted#@!")
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
location = self.driver.list_locations()[1]
node = self.driver.create_node(
name="test.com", image=image, size=size, auth=auth, location=location
)
self.assertEqual("62291", node.id)
self.assertEqual("server1.vr-cluster.org", node.name)
class HostVirtualMockHttp(MockHttp):
fixtures = ComputeFileFixtures("hostvirtual")
def _cloud_servers(self, method, url, body, headers):
body = self.fixtures.load("list_nodes.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server(self, method, url, body, headers):
body = self.fixtures.load("get_node.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_packages(self, method, url, body, headers):
body = self.fixtures.load("list_packages.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_sizes(self, method, url, body, headers):
body = self.fixtures.load("list_sizes.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_images(self, method, url, body, headers):
body = self.fixtures.load("list_images.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_locations(self, method, url, body, headers):
body = self.fixtures.load("list_locations.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_delete(self, method, url, body, headers):
body = self.fixtures.load("cancel_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_reboot(self, method, url, body, headers):
body = self.fixtures.load("node_reboot.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_shutdown(self, method, url, body, headers):
body = self.fixtures.load("node_stop.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_start(self, method, url, body, headers):
body = self.fixtures.load("node_start.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_server_build(self, method, url, body, headers):
body = self.fixtures.load("order_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_buy(self, method, url, body, headers):
body = self.fixtures.load("order_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_cancel(self, method, url, body, headers):
body = self.fixtures.load("cancel_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _cloud_unlink(self, method, url, body, headers):
body = self.fixtures.load("unlink_package.json")
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == "__main__":
sys.exit(unittest.main())
# vim:autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4 filetype=python
| apache/libcloud | libcloud/test/compute/test_hostvirtual.py | Python | apache-2.0 | 8,559 |
# Copyright 2021 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
DEPS = [
'cq',
'properties',
'step',
]
def RunSteps(api):
api.step('show properties', [])
api.step.active_result.presentation.logs['result'] = [
'mode: %s' % (api.cq.run_mode,),
]
def GenTests(api):
yield api.test('dry') + api.cq(run_mode=api.cq.DRY_RUN)
yield api.test('quick-dry') + api.cq(run_mode=api.cq.QUICK_DRY_RUN)
yield api.test('full') + api.cq(run_mode=api.cq.FULL_RUN)
yield api.test('legacy-full') + api.properties(**{
'$recipe_engine/cq': {'dry_run': False},
})
yield api.test('legacy-dry') + api.properties(**{
'$recipe_engine/cq': {'dry_run': True},
})
| luci/recipes-py | recipe_modules/cq/tests/mode_of_run.py | Python | apache-2.0 | 825 |
// Copyright 2016-2017 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package addressing
import (
"encoding/json"
"fmt"
"net"
)
type CiliumIP interface {
IPNet(ones int) *net.IPNet
EndpointPrefix() *net.IPNet
IP() net.IP
String() string
IsIPv6() bool
GetFamilyString() string
IsSet() bool
}
type CiliumIPv6 []byte
// NewCiliumIPv6 returns a IPv6 if the given `address` is an IPv6 address.
func NewCiliumIPv6(address string) (CiliumIPv6, error) {
ip, _, err := net.ParseCIDR(address)
if err != nil {
ip = net.ParseIP(address)
if ip == nil {
return nil, fmt.Errorf("Invalid IPv6 address: %s", address)
}
}
// As result of ParseIP, ip is either a valid IPv6 or IPv4 address. net.IP
// represents both versions on 16 bytes, so a more reliable way to tell
// IPv4 and IPv6 apart is to see if it fits 4 bytes
ip4 := ip.To4()
if ip4 != nil {
return nil, fmt.Errorf("Not an IPv6 address: %s", address)
}
return DeriveCiliumIPv6(ip.To16()), nil
}
func DeriveCiliumIPv6(src net.IP) CiliumIPv6 {
ip := make(CiliumIPv6, 16)
copy(ip, src.To16())
return ip
}
// IsSet returns true if the IP is set
func (ip CiliumIPv6) IsSet() bool {
return ip.String() != ""
}
func (ip CiliumIPv6) IsIPv6() bool {
return true
}
func (ip CiliumIPv6) IPNet(ones int) *net.IPNet {
return &net.IPNet{
IP: ip.IP(),
Mask: net.CIDRMask(ones, 128),
}
}
func (ip CiliumIPv6) EndpointPrefix() *net.IPNet {
return ip.IPNet(128)
}
func (ip CiliumIPv6) IP() net.IP {
return net.IP(ip)
}
func (ip CiliumIPv6) String() string {
if ip == nil {
return ""
}
return net.IP(ip).String()
}
func (ip CiliumIPv6) MarshalJSON() ([]byte, error) {
return json.Marshal(net.IP(ip))
}
func (ip *CiliumIPv6) UnmarshalJSON(b []byte) error {
if len(b) < len(`""`) {
return fmt.Errorf("Invalid CiliumIPv6 '%s'", string(b))
}
str := string(b[1 : len(b)-1])
if str == "" {
return nil
}
c, err := NewCiliumIPv6(str)
if err != nil {
return fmt.Errorf("Invalid CiliumIPv6 '%s': %s", str, err)
}
*ip = c
return nil
}
type CiliumIPv4 []byte
func NewCiliumIPv4(address string) (CiliumIPv4, error) {
ip, _, err := net.ParseCIDR(address)
if err != nil {
ip = net.ParseIP(address)
if ip == nil {
return nil, fmt.Errorf("Invalid IPv4 address: %s", address)
}
}
ip4 := ip.To4()
if ip4 == nil {
return nil, fmt.Errorf("Not an IPv4 address")
}
return DeriveCiliumIPv4(ip4), nil
}
func DeriveCiliumIPv4(src net.IP) CiliumIPv4 {
ip := make(CiliumIPv4, 4)
copy(ip, src.To4())
return ip
}
// IsSet returns true if the IP is set
func (ip CiliumIPv4) IsSet() bool {
return ip.String() != ""
}
func (ip CiliumIPv4) IsIPv6() bool {
return false
}
func (ip CiliumIPv4) IPNet(ones int) *net.IPNet {
return &net.IPNet{
IP: net.IP(ip),
Mask: net.CIDRMask(ones, 32),
}
}
func (ip CiliumIPv4) EndpointPrefix() *net.IPNet {
return ip.IPNet(32)
}
func (ip CiliumIPv4) IP() net.IP {
return net.IP(ip)
}
func (ip CiliumIPv4) String() string {
if ip == nil {
return ""
}
return net.IP(ip).String()
}
func (ip CiliumIPv4) MarshalJSON() ([]byte, error) {
return json.Marshal(net.IP(ip))
}
func (ip *CiliumIPv4) UnmarshalJSON(b []byte) error {
if len(b) < len(`""`) {
return fmt.Errorf("Invalid CiliumIPv4 '%s'", string(b))
}
str := string(b[1 : len(b)-1])
if str == "" {
return nil
}
c, err := NewCiliumIPv4(str)
if err != nil {
return fmt.Errorf("Invalid CiliumIPv4 '%s': %s", str, err)
}
*ip = c
return nil
}
// GetFamilyString returns the address family of ip as a string.
func (ip CiliumIPv4) GetFamilyString() string {
return "IPv4"
}
// GetFamilyString returns the address family of ip as a string.
func (ip CiliumIPv6) GetFamilyString() string {
return "IPv6"
}
| tgraf/cilium | pkg/addressing/ip.go | GO | apache-2.0 | 4,261 |
/*
* Copyright 2008 biaoping.yin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.frameworkset.spi.assemble;
import org.frameworkset.spi.assemble.BeanAccembleHelper.LoopObject;
/**
*
*
* <p>Title: Context.java</p>
*
* <p>Description:
* This context is used to defend loopioc.
* 依赖注入的上下文信息,
* 如果注入类与被注入类之间存在循环注入的情况,则系统自动报错,是否存在循环注入
* 是通过上下文信息来判断的
*
*
*
*
* </p>
*
* <p>Copyright: Copyright (c) 2007</p>
*
* <p>bboss workgroup</p>
* @Date Aug 14, 2008 4:37:33 PM
* @author biaoping.yin,尹标平
* @version 1.0
*/
public class Context {
Context parent;
String refid;
/**
* 保存应用的
*/
private Object currentObj;
boolean isroot = false;
public Context(String refid)
{
isroot = true;
this.refid = refid;
}
public Context(Context parent,String refid)
{
this.parent = parent;
this.refid = refid;
}
public boolean isLoopIOC(String refid_,LoopObject lo)
{
if(refid_.equals(this.refid))
{
lo.setObj(currentObj);
return true;
}
if(this.isroot)
{
return false;
}
else if(parent != null)
{
return parent.isLoopIOC(refid_,lo);
}
return false;
}
public String toString()
{
StringBuilder ret = new StringBuilder();
if(this.isroot)
{
ret.append(refid);
}
else
{
ret.append(parent)
.append(">")
.append(refid);
}
return ret.toString();
}
public Object getCurrentObj() {
return currentObj;
}
public Object setCurrentObj(Object currentObj) {
this.currentObj = currentObj;
return currentObj;
}
}
| bbossgroups/bbossgroups-3.5 | bboss-core/src/org/frameworkset/spi/assemble/Context.java | Java | apache-2.0 | 2,300 |
// Copyright 2017 Monax Industries Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vm
import (
"fmt"
"github.com/hyperledger/burrow/common/math/integral"
"github.com/hyperledger/burrow/common/sanity"
. "github.com/hyperledger/burrow/word256"
)
// Not goroutine safe
type Stack struct {
data []Word256
ptr int
gas *int64
err *error
}
func NewStack(capacity int, gas *int64, err *error) *Stack {
return &Stack{
data: make([]Word256, capacity),
ptr: 0,
gas: gas,
err: err,
}
}
func (st *Stack) useGas(gasToUse int64) {
if *st.gas > gasToUse {
*st.gas -= gasToUse
} else {
st.setErr(ErrInsufficientGas)
}
}
func (st *Stack) setErr(err error) {
if *st.err == nil {
*st.err = err
}
}
func (st *Stack) Push(d Word256) {
st.useGas(GasStackOp)
if st.ptr == cap(st.data) {
st.setErr(ErrDataStackOverflow)
return
}
st.data[st.ptr] = d
st.ptr++
}
// currently only called after Sha3
func (st *Stack) PushBytes(bz []byte) {
if len(bz) != 32 {
sanity.PanicSanity("Invalid bytes size: expected 32")
}
st.Push(LeftPadWord256(bz))
}
func (st *Stack) Push64(i int64) {
st.Push(Int64ToWord256(i))
}
func (st *Stack) Pop() Word256 {
st.useGas(GasStackOp)
if st.ptr == 0 {
st.setErr(ErrDataStackUnderflow)
return Zero256
}
st.ptr--
return st.data[st.ptr]
}
func (st *Stack) PopBytes() []byte {
return st.Pop().Bytes()
}
func (st *Stack) Pop64() int64 {
d := st.Pop()
return Int64FromWord256(d)
}
func (st *Stack) Len() int {
return st.ptr
}
func (st *Stack) Swap(n int) {
st.useGas(GasStackOp)
if st.ptr < n {
st.setErr(ErrDataStackUnderflow)
return
}
st.data[st.ptr-n], st.data[st.ptr-1] = st.data[st.ptr-1], st.data[st.ptr-n]
return
}
func (st *Stack) Dup(n int) {
st.useGas(GasStackOp)
if st.ptr < n {
st.setErr(ErrDataStackUnderflow)
return
}
st.Push(st.data[st.ptr-n])
return
}
// Not an opcode, costs no gas.
func (st *Stack) Peek() Word256 {
if st.ptr == 0 {
st.setErr(ErrDataStackUnderflow)
return Zero256
}
return st.data[st.ptr-1]
}
func (st *Stack) Print(n int) {
fmt.Println("### stack ###")
if st.ptr > 0 {
nn := integral.MinInt(n, st.ptr)
for j, i := 0, st.ptr-1; i > st.ptr-1-nn; i-- {
fmt.Printf("%-3d %X\n", j, st.data[i])
j += 1
}
} else {
fmt.Println("-- empty --")
}
fmt.Println("#############")
}
| benjaminbollen/eris-db | manager/burrow-mint/evm/stack.go | GO | apache-2.0 | 2,847 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysds.test.functions.compress.instructionsSpark;
public class CompressedSparkInstructionsTestSparse extends CompressedSparkInstructionsTest {
@Override
public double getDensity() {
return 0.1;
}
}
| apache/incubator-systemml | src/test/java/org/apache/sysds/test/functions/compress/instructionsSpark/CompressedSparkInstructionsTestSparse.java | Java | apache-2.0 | 1,032 |
/**
* Copyright 2021 The AMP HTML Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import '../amp-iframely';
describes.realWin(
'amp-iframely',
{
amp: {
extensions: ['amp-iframely'],
},
},
(env) => {
let win, doc;
const TestID = 'pHBVuFj';
const paramsID = {
'data-id': TestID,
'width': '10',
'height': '10',
'layout': 'responsive',
};
const url = 'https//some.url/';
const key = 'some-StRiNg-of-more-then-16';
const paramsKU = {
'data-key': key,
'data-url': url,
'layout': 'responsive',
'width': '100',
'height': '100',
};
beforeEach(() => {
win = env.win;
doc = win.document;
});
function renderIframely(params) {
const iframely = doc.createElement('amp-iframely');
for (const param in params) {
iframely.setAttribute(param, params[param]);
}
doc.body.appendChild(iframely);
return iframely.buildInternal().then(() => {
iframely.layoutCallback();
return iframely;
});
}
function testIframe(iframe, id) {
expect(iframe).to.not.be.null;
expect(iframe.src).to.equal('https://cdn.iframe.ly/' + id + '?amp=1');
expect(iframe.className).to.match(/i-amphtml-fill-content/);
}
it('renders', () => {
return renderIframely(paramsID).then((iframely) => {
testIframe(iframely.querySelector('iframe'), TestID);
});
});
it('does not render without required attributes', () => {
return allowConsoleError(() => {
return renderIframely({
'layout': 'fill',
}).should.eventually.be.rejectedWith(/<amp-iframely> requires either/);
});
});
it('not renders with only URL parameter', () => {
return allowConsoleError(() => {
return renderIframely({
'data-url': 'https//some.url/',
'layout': 'fixed',
}).should.eventually.be.rejectedWith(
/Iframely data-key must also be set/
);
});
});
it('not renders with only KEY parameter', () => {
return allowConsoleError(() => {
return renderIframely({
'data-key': 'some-StRiNg/',
'layout': 'fixed',
}).should.eventually.be.rejectedWith(
/<amp-iframely> requires either "data-id" /
);
});
});
it('rejects with both data-url AND data-id parameters specified', () => {
const params = {
'data-id': TestID,
'data-key': 'some-StRiNg/',
'data-url': 'https//some.url/',
'layout': 'fill',
};
return allowConsoleError(() => {
return renderIframely(params).should.eventually.be.rejectedWith(
/Only one way of setting either data-id or/
);
});
});
it('builds url for key-url pair properly', () => {
return renderIframely(paramsKU).then((iframely) => {
const iframe = iframely.querySelector('iframe');
expect(iframe).to.not.be.null;
expect(iframe.src).to.equal(
`https://cdn.iframe.ly/api/iframe?url=${encodeURIComponent(
url
)}&key=${key}&=1`
);
expect(iframe.className).to.match(/i-amphtml-fill-content/);
});
});
it('renders iframe properly', () => {
return renderIframely(paramsID).then((iframely) => {
const iframe = iframely.querySelector('iframe');
testIframe(iframe, TestID);
expect(iframe.tagName).to.equal('IFRAME');
expect(iframe.className).to.match(/i-amphtml-fill-content/);
// Border render cleared
expect(iframe.getAttribute('style')).to.equal('border: 0px;');
});
});
it('renders image placeholder', () => {
return renderIframely(paramsID).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.not.be.null;
expect(image).to.have.class('i-amphtml-fill-content');
expect(image.getAttribute('loading')).to.equal('lazy');
});
});
it('renders image placeholder with proper URL for ID version', () => {
return renderIframely(paramsID).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.not.be.null;
expect(image.getAttribute('src')).to.equal(
`https://cdn.iframe.ly/${TestID}/thumbnail?amp=1`
);
});
});
it('renders image placeholder with proper URL for Key-URL version', () => {
return renderIframely(paramsKU).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.not.be.null;
expect(image.getAttribute('src')).to.equal(
`https://cdn.iframe.ly/api/thumbnail?url=${encodeURIComponent(
url
)}&key=${key}&=1`
);
});
});
it('does not render iframe and image placeholder with wrong domain', () => {
const domain = 'mydomain.com';
const properDomain = 'cdn.iframe.ly';
const data = {
'data-id': TestID,
'data-domain': domain,
'width': '100',
'height': '100',
'layout': 'responsive',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.not.be.null;
expect(image.getAttribute('src')).to.equal(
`https://${properDomain}/${TestID}/thumbnail?amp=1`
);
const iframe = iframely.querySelector('iframe');
expect(iframe.src).to.equal(`https://${properDomain}/${TestID}?amp=1`);
});
});
it('renders iframe and image placeholder with proper domain', () => {
const domain = 'iframe.ly';
const data = {
'data-id': TestID,
'data-domain': domain,
'width': '100',
'height': '100',
'layout': 'responsive',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.not.be.null;
expect(image.getAttribute('src')).to.equal(
`https://${domain}/${TestID}/thumbnail?amp=1`
);
const iframe = iframely.querySelector('iframe');
expect(iframe.src).to.equal(`https://${domain}/${TestID}?amp=1`);
});
});
it('renders placeholder with data-img key set', () => {
const data = {
'data-id': TestID,
'data-img': '',
'layout': 'fill',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.not.be.null;
expect(iframely.querySelector('iframe')).to.not.be.null;
});
});
it('does not render placeholder with resizable key set and responsive layout', () => {
const data = {
'data-id': TestID,
'resizable': '',
'height': '100',
'width': '100',
'layout': 'responsive',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.be.null;
expect(iframely.querySelector('iframe')).to.not.be.null;
});
});
it('render placeholder with data-img and resizeable', () => {
const data = {
'data-id': TestID,
'resizable': '',
'data-img': '',
'height': '100',
'width': '100',
'layout': 'responsive',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.not.be.null;
expect(iframely.querySelector('iframe')).to.not.be.null;
});
});
it('does not render placeholder with fixed layout', () => {
const data = {
'data-id': TestID,
'height': '100',
'width': '100',
'layout': 'fixed',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.be.null;
expect(iframely.querySelector('iframe')).to.not.be.null;
});
});
it('does not render placeholder with fixed layout and resizable', () => {
const data = {
'data-id': TestID,
'height': '100',
'width': '100',
'resizable': '',
'layout': 'fixed',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.be.null;
expect(iframely.querySelector('iframe')).to.not.be.null;
});
});
it('render placeholder with data-img responsive layout and resizable params', () => {
const data = {
'data-id': TestID,
'height': '100',
'width': '100',
'data-img': '',
'resizable': '',
'layout': 'responsive',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.not.be.null;
expect(iframely.querySelector('iframe')).to.not.be.null;
});
});
it('render placeholder with data-img fixed layout and resizable params', () => {
const data = {
'data-id': TestID,
'height': '100',
'width': '100',
'data-img': '',
'layout': 'fixed',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.not.be.null;
expect(iframely.querySelector('iframe')).to.not.be.null;
});
});
it('does not render placeholder with fixed-height layout and resizable params', () => {
const data = {
'data-id': TestID,
'height': '166',
'layout': 'fixed-height',
'resizable': '',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.be.null;
expect(iframely.querySelector('iframe')).to.not.be.null;
});
});
it('does not render placeholder with resizable param set and layout===responsive', () => {
const data = {
'data-id': TestID,
'height': '100',
'width': '100',
'resizable': '',
'layout': 'responsive',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
expect(image).to.be.null;
expect(iframely.querySelector('iframe')).to.not.be.null;
});
});
it('does not render with invalid key length', () => {
const data = {
'data-url': 'https://some-url.com',
'height': '100',
'width': '100',
'data-key': '',
'layout': 'fixed',
};
return allowConsoleError(() => {
return renderIframely(data).should.eventually.be.rejectedWith(
/Iframely data-key must also be set when you specify data-url parameter/
);
});
});
it('render iframe options properly', () => {
const data = {
'data-id': TestID,
'height': '100',
'width': '100',
'data-optionOne': 'value',
'data-option-two': 'value',
'data-img': 'something',
'layout': 'responsive',
};
return renderIframely(data).then((iframely) => {
const image = iframely.querySelector('img');
const iframe = iframely.querySelector('iframe');
expect(image).to.not.be.null;
expect(iframe).to.not.be.null;
expect(iframe.src.includes('&optionone=value&optionTwo=value')).to.be
.true;
});
});
}
);
| rsimha-amp/amphtml | extensions/amp-iframely/0.1/test/test-amp-iframely.js | JavaScript | apache-2.0 | 12,110 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.isis.viewer.wicket.ui.errors;
import java.util.Iterator;
import java.util.List;
import com.google.common.base.Throwables;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.isis.applib.NonRecoverableException;
import org.apache.isis.applib.services.error.ErrorReportingService;
import org.apache.isis.applib.services.error.Ticket;
import org.apache.isis.core.metamodel.spec.feature.ObjectMember;
import org.apache.isis.viewer.wicket.model.models.ModelAbstract;
public class ExceptionModel extends ModelAbstract<List<StackTraceDetail>> {
private static final long serialVersionUID = 1L;
private static final String MAIN_MESSAGE_IF_NOT_RECOGNIZED = "Sorry, an unexpected error occurred.";
private List<StackTraceDetail> stackTraceDetailList;
private List<List<StackTraceDetail>> stackTraceDetailLists;
private boolean recognized;
private boolean authorizationCause;
private final String mainMessage;
public static ExceptionModel create(String recognizedMessageIfAny, Exception ex) {
return new ExceptionModel(recognizedMessageIfAny, ex);
}
/**
* Three cases: authorization exception, else recognized, else or not recognized.
* @param recognizedMessageIfAny
* @param ex
*/
private ExceptionModel(String recognizedMessageIfAny, Exception ex) {
final ObjectMember.AuthorizationException authorizationException = causalChainOf(ex, ObjectMember.AuthorizationException.class);
if(authorizationException != null) {
this.authorizationCause = true;
this.mainMessage = authorizationException.getMessage();
} else {
this.authorizationCause = false;
if(recognizedMessageIfAny != null) {
this.recognized = true;
this.mainMessage = recognizedMessageIfAny;
} else {
this.recognized =false;
// see if we can find a NonRecoverableException in the stack trace
Iterable<NonRecoverableException> appEx = Iterables.filter(Throwables.getCausalChain(ex), NonRecoverableException.class);
Iterator<NonRecoverableException> iterator = appEx.iterator();
NonRecoverableException nonRecoverableException = iterator.hasNext() ? iterator.next() : null;
this.mainMessage = nonRecoverableException != null? nonRecoverableException.getMessage() : MAIN_MESSAGE_IF_NOT_RECOGNIZED;
}
}
stackTraceDetailList = asStackTrace(ex);
stackTraceDetailLists = asStackTraces(ex);
}
@Override
protected List<StackTraceDetail> load() {
return stackTraceDetailList;
}
private static <T extends Exception> T causalChainOf(Exception ex, Class<T> exType) {
final List<Throwable> causalChain = Throwables.getCausalChain(ex);
for (Throwable cause : causalChain) {
if(exType.isAssignableFrom(cause.getClass())) {
return (T)cause;
}
}
return null;
}
@Override
public void setObject(List<StackTraceDetail> stackTraceDetail) {
if(stackTraceDetail == null) {
return;
}
this.stackTraceDetailList = stackTraceDetail;
}
private Ticket ticket;
public Ticket getTicket() {
return ticket;
}
/**
* Optionally called if an {@link ErrorReportingService} has been configured and returns a <tt>non-null</tt> ticket
* to represent the fact that the error has been recorded.
*/
public void setTicket(final Ticket ticket) {
this.ticket = ticket;
}
public boolean isRecognized() {
return recognized;
}
public String getMainMessage() {
return mainMessage;
}
/**
* Whether this was an authorization exception (so UI can suppress information, eg stack trace).
*/
public boolean isAuthorizationException() {
return authorizationCause;
}
public List<StackTraceDetail> getStackTrace() {
return stackTraceDetailList;
}
public List<List<StackTraceDetail>> getStackTraces() {
return stackTraceDetailLists;
}
private static List<StackTraceDetail> asStackTrace(Throwable ex) {
List<StackTraceDetail> stackTrace = Lists.newArrayList();
List<Throwable> causalChain = Throwables.getCausalChain(ex);
boolean firstTime = true;
for(Throwable cause: causalChain) {
if(!firstTime) {
stackTrace.add(StackTraceDetail.spacer());
stackTrace.add(StackTraceDetail.causedBy());
stackTrace.add(StackTraceDetail.spacer());
} else {
firstTime = false;
}
append(cause, stackTrace);
}
return stackTrace;
}
private static List<List<StackTraceDetail>> asStackTraces(Throwable ex) {
List<List<StackTraceDetail>> stackTraces = Lists.newArrayList();
List<Throwable> causalChain = Throwables.getCausalChain(ex);
boolean firstTime = true;
for(Throwable cause: causalChain) {
List<StackTraceDetail> stackTrace = Lists.newArrayList();
append(cause, stackTrace);
stackTraces.add(stackTrace);
}
return stackTraces;
}
private static void append(final Throwable cause, final List<StackTraceDetail> stackTrace) {
stackTrace.add(StackTraceDetail.exceptionClassName(cause));
stackTrace.add(StackTraceDetail.exceptionMessage(cause));
for (StackTraceElement el : cause.getStackTrace()) {
stackTrace.add(StackTraceDetail.element(el));
}
}
}
| incodehq/isis | core/viewer-wicket-ui/src/main/java/org/apache/isis/viewer/wicket/ui/errors/ExceptionModel.java | Java | apache-2.0 | 6,578 |
//
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
using Microsoft.Azure.Commands.Compute.Automation.Models;
using Microsoft.Azure.Commands.ResourceManager.Common.ArgumentCompleters;
using Microsoft.Azure.Management.Compute;
using Microsoft.Azure.Management.Compute.Models;
using System;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Management.Automation;
namespace Microsoft.Azure.Commands.Compute.Automation
{
[Cmdlet(VerbsData.Update, ResourceManager.Common.AzureRMConstants.AzureRMPrefix + "Snapshot", DefaultParameterSetName = "DefaultParameter", SupportsShouldProcess = true)]
[OutputType(typeof(PSSnapshot))]
public partial class UpdateAzureRmSnapshot : ComputeAutomationBaseCmdlet
{
public override void ExecuteCmdlet()
{
base.ExecuteCmdlet();
ExecuteClientAction(() =>
{
if (ShouldProcess(this.SnapshotName, VerbsData.Update))
{
string resourceGroupName = this.ResourceGroupName;
string snapshotName = this.SnapshotName;
SnapshotUpdate snapshotupdate = new SnapshotUpdate();
ComputeAutomationAutoMapperProfile.Mapper.Map<PSSnapshotUpdate, SnapshotUpdate>(this.SnapshotUpdate, snapshotupdate);
Snapshot snapshot = new Snapshot();
ComputeAutomationAutoMapperProfile.Mapper.Map<PSSnapshot, Snapshot>(this.Snapshot, snapshot);
var result = (this.SnapshotUpdate == null)
? SnapshotsClient.CreateOrUpdate(resourceGroupName, snapshotName, snapshot)
: SnapshotsClient.Update(resourceGroupName, snapshotName, snapshotupdate);
var psObject = new PSSnapshot();
ComputeAutomationAutoMapperProfile.Mapper.Map<Snapshot, PSSnapshot>(result, psObject);
WriteObject(psObject);
}
});
}
[Parameter(
ParameterSetName = "DefaultParameter",
Position = 1,
Mandatory = true,
ValueFromPipelineByPropertyName = true)]
[Parameter(
ParameterSetName = "FriendMethod",
Position = 1,
Mandatory = true,
ValueFromPipelineByPropertyName = true)]
[ResourceGroupCompleter]
public string ResourceGroupName { get; set; }
[Parameter(
ParameterSetName = "DefaultParameter",
Position = 2,
Mandatory = true,
ValueFromPipelineByPropertyName = true)]
[Parameter(
ParameterSetName = "FriendMethod",
Position = 2,
Mandatory = true,
ValueFromPipelineByPropertyName = true)]
[ResourceNameCompleter("Microsoft.Compute/snapshots", "ResourceGroupName")]
[Alias("Name")]
public string SnapshotName { get; set; }
[Parameter(
ParameterSetName = "DefaultParameter",
Position = 3,
Mandatory = true,
ValueFromPipeline = true)]
public PSSnapshotUpdate SnapshotUpdate { get; set; }
[Parameter(
ParameterSetName = "FriendMethod",
Position = 4,
Mandatory = true,
ValueFromPipelineByPropertyName = false,
ValueFromPipeline = true)]
[AllowNull]
public PSSnapshot Snapshot { get; set; }
[Parameter(Mandatory = false, HelpMessage = "Run cmdlet in the background")]
public SwitchParameter AsJob { get; set; }
}
}
| AzureAutomationTeam/azure-powershell | src/ResourceManager/Compute/Commands.Compute/Generated/Snapshot/SnapshotUpdateMethod.cs | C# | apache-2.0 | 4,375 |
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
from tempest import exceptions
from tempest import test
import time
CONF = config.CONF
class AttachInterfacesTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
if not CONF.service_available.neutron:
raise cls.skipException("Neutron is required")
if not CONF.compute_feature_enabled.interface_attach:
raise cls.skipException("Interface attachment is not available.")
# This test class requires network and subnet
cls.set_network_resources(network=True, subnet=True)
super(AttachInterfacesTestJSON, cls).resource_setup()
cls.client = cls.os.interfaces_client
def _check_interface(self, iface, port_id=None, network_id=None,
fixed_ip=None):
self.assertIn('port_state', iface)
if port_id:
self.assertEqual(iface['port_id'], port_id)
if network_id:
self.assertEqual(iface['net_id'], network_id)
if fixed_ip:
self.assertEqual(iface['fixed_ips'][0]['ip_address'], fixed_ip)
def _create_server_get_interfaces(self):
resp, server = self.create_test_server(wait_until='ACTIVE')
resp, ifs = self.client.list_interfaces(server['id'])
self.assertEqual(200, resp.status)
resp, body = self.client.wait_for_interface_status(
server['id'], ifs[0]['port_id'], 'ACTIVE')
ifs[0]['port_state'] = body['port_state']
return server, ifs
def _test_create_interface(self, server):
resp, iface = self.client.create_interface(server['id'])
self.assertEqual(200, resp.status)
resp, iface = self.client.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface)
return iface
def _test_create_interface_by_network_id(self, server, ifs):
network_id = ifs[0]['net_id']
resp, iface = self.client.create_interface(server['id'],
network_id=network_id)
self.assertEqual(200, resp.status)
resp, iface = self.client.wait_for_interface_status(
server['id'], iface['port_id'], 'ACTIVE')
self._check_interface(iface, network_id=network_id)
return iface
def _test_show_interface(self, server, ifs):
iface = ifs[0]
resp, _iface = self.client.show_interface(server['id'],
iface['port_id'])
self.assertEqual(200, resp.status)
self.assertEqual(iface, _iface)
def _test_delete_interface(self, server, ifs):
# NOTE(danms): delete not the first or last, but one in the middle
iface = ifs[1]
resp, _ = self.client.delete_interface(server['id'], iface['port_id'])
self.assertEqual(202, resp.status)
_ifs = self.client.list_interfaces(server['id'])[1]
start = int(time.time())
while len(ifs) == len(_ifs):
time.sleep(self.build_interval)
_ifs = self.client.list_interfaces(server['id'])[1]
timed_out = int(time.time()) - start >= self.build_timeout
if len(ifs) == len(_ifs) and timed_out:
message = ('Failed to delete interface within '
'the required time: %s sec.' % self.build_timeout)
raise exceptions.TimeoutException(message)
self.assertNotIn(iface['port_id'], [i['port_id'] for i in _ifs])
return _ifs
def _compare_iface_list(self, list1, list2):
# NOTE(danms): port_state will likely have changed, so just
# confirm the port_ids are the same at least
list1 = [x['port_id'] for x in list1]
list2 = [x['port_id'] for x in list2]
self.assertEqual(sorted(list1), sorted(list2))
@test.attr(type='smoke')
@test.services('network')
def test_create_list_show_delete_interfaces(self):
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertTrue(interface_count > 0)
self._check_interface(ifs[0])
iface = self._test_create_interface(server)
ifs.append(iface)
iface = self._test_create_interface_by_network_id(server, ifs)
ifs.append(iface)
resp, _ifs = self.client.list_interfaces(server['id'])
self._compare_iface_list(ifs, _ifs)
self._test_show_interface(server, ifs)
_ifs = self._test_delete_interface(server, ifs)
self.assertEqual(len(ifs) - 1, len(_ifs))
@test.attr(type='smoke')
@test.services('network')
def test_add_remove_fixed_ip(self):
# Add and Remove the fixed IP to server.
server, ifs = self._create_server_get_interfaces()
interface_count = len(ifs)
self.assertTrue(interface_count > 0)
self._check_interface(ifs[0])
network_id = ifs[0]['net_id']
resp, body = self.client.add_fixed_ip(server['id'],
network_id)
self.assertEqual(202, resp.status)
# Remove the fixed IP from server.
server_resp, server_detail = self.os.servers_client.get_server(
server['id'])
# Get the Fixed IP from server.
fixed_ip = None
for ip_set in server_detail['addresses']:
for ip in server_detail['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
fixed_ip = ip['addr']
break
if fixed_ip is not None:
break
resp, body = self.client.remove_fixed_ip(server['id'],
fixed_ip)
self.assertEqual(202, resp.status)
class AttachInterfacesTestXML(AttachInterfacesTestJSON):
_interface = 'xml'
| queria/my-tempest | tempest/api/compute/servers/test_attach_interfaces.py | Python | apache-2.0 | 6,514 |
import { get } from 'lodash';
class WizardHelper {
constructor() {}
isScheduleModeEvery(scheduleString) {
return !!scheduleString.match(/every\s(\d+)\s(seconds|minutes|hours|days|months|years)/);
}
isWizardWatcher(watcher) {
return get(watcher, 'wizard.chart_query_params');
}
getUniqueTagId(name, uuid) {
return name + '_' + uuid.replace(/-/g, '');
}
}
export default WizardHelper;
| elasticfence/kaae | public/pages/watcher_wizard/services/wizard_helper/wizard_helper.js | JavaScript | apache-2.0 | 414 |
// (C) Copyright 2014-2015 Hewlett Packard Enterprise Development LP
import React, { Component } from 'react';
import PropTypes from 'prop-types';
import classnames from 'classnames';
import CSSClassnames from '../../../utils/CSSClassnames';
import Intl from '../../../utils/Intl';
import Props from '../../../utils/Props';
const CLASS_ROOT = CSSClassnames.CONTROL_ICON;
const COLOR_INDEX = CSSClassnames.COLOR_INDEX;
export default class Icon extends Component {
render () {
const { className, colorIndex } = this.props;
let { a11yTitle, size, responsive } = this.props;
let { intl } = this.context;
const classes = classnames(
CLASS_ROOT,
`${CLASS_ROOT}-document-config`,
className,
{
[`${CLASS_ROOT}--${size}`]: size,
[`${CLASS_ROOT}--responsive`]: responsive,
[`${COLOR_INDEX}-${colorIndex}`]: colorIndex
}
);
a11yTitle = a11yTitle || Intl.getMessage(intl, 'document-config');
const restProps = Props.omit(this.props, Object.keys(Icon.propTypes));
return <svg {...restProps} version="1.1" viewBox="0 0 24 24" width="24px" height="24px" role="img" className={classes} aria-label={a11yTitle}><path fill="none" stroke="#000" strokeWidth="2" d="M4.99787498,8.99999999 L4.99787498,0.999999992 L19.4999998,0.999999992 L22.9999998,4.50000005 L23,23 L16,23 M18,1 L18,6 L23,6 M9,14 L9,11 M9,20 C10.6568542,20 12,18.6568542 12,17 C12,15.3431458 10.6568542,14 9,14 C7.34314575,14 6,15.3431458 6,17 C6,18.6568542 7.34314575,20 9,20 Z M9,23 L9,20 M12,17 L15,17 M3,17 L6,17 M5,13 L7,15 M11,19 L13,21 M13,13 L11,15 M7,19 L5,21"/></svg>;
}
};
Icon.contextTypes = {
intl: PropTypes.object
};
Icon.defaultProps = {
responsive: true
};
Icon.displayName = 'DocumentConfig';
Icon.icon = true;
Icon.propTypes = {
a11yTitle: PropTypes.string,
colorIndex: PropTypes.string,
size: PropTypes.oneOf(['xsmall', 'small', 'medium', 'large', 'xlarge', 'huge']),
responsive: PropTypes.bool
};
| kylebyerly-hp/grommet | src/js/components/icons/base/DocumentConfig.js | JavaScript | apache-2.0 | 1,979 |
from django.conf import settings
from django.utils.module_loading import import_string
from .tracing import DjangoTracing
from .tracing import initialize_global_tracer
try:
# Django >= 1.10
from django.utils.deprecation import MiddlewareMixin
except ImportError:
# Not required for Django <= 1.9, see:
# https://docs.djangoproject.com/en/1.10/topics/http/middleware/#upgrading-pre-django-1-10-style-middleware
MiddlewareMixin = object
class OpenTracingMiddleware(MiddlewareMixin):
'''
__init__() is only called once, no arguments, when the Web server
responds to the first request
'''
def __init__(self, get_response=None):
'''
TODO: ANSWER Qs
- Is it better to place all tracing info in the settings file,
or to require a tracing.py file with configurations?
- Also, better to have try/catch with empty tracer or just fail
fast if there's no tracer specified
'''
self._init_tracing()
self._tracing = settings.OPENTRACING_TRACING
self.get_response = get_response
def _init_tracing(self):
if getattr(settings, 'OPENTRACING_TRACER', None) is not None:
# Backwards compatibility.
tracing = settings.OPENTRACING_TRACER
elif getattr(settings, 'OPENTRACING_TRACING', None) is not None:
tracing = settings.OPENTRACING_TRACING
elif getattr(settings, 'OPENTRACING_TRACER_CALLABLE',
None) is not None:
tracer_callable = settings.OPENTRACING_TRACER_CALLABLE
tracer_parameters = getattr(settings,
'OPENTRACING_TRACER_PARAMETERS',
{})
if not callable(tracer_callable):
tracer_callable = import_string(tracer_callable)
tracer = tracer_callable(**tracer_parameters)
tracing = DjangoTracing(tracer)
else:
# Rely on the global Tracer.
tracing = DjangoTracing()
# trace_all defaults to True when used as middleware.
tracing._trace_all = getattr(settings, 'OPENTRACING_TRACE_ALL', True)
# set the start_span_cb hook, if any.
tracing._start_span_cb = getattr(settings, 'OPENTRACING_START_SPAN_CB',
None)
# Normalize the tracing field in settings, including the old field.
settings.OPENTRACING_TRACING = tracing
settings.OPENTRACING_TRACER = tracing
# Potentially set the global Tracer (unless we rely on it already).
if getattr(settings, 'OPENTRACING_SET_GLOBAL_TRACER', False):
initialize_global_tracer(tracing)
def process_view(self, request, view_func, view_args, view_kwargs):
# determine whether this middleware should be applied
# NOTE: if tracing is on but not tracing all requests, then the tracing
# occurs through decorator functions rather than middleware
if not self._tracing._trace_all:
return None
if hasattr(settings, 'OPENTRACING_TRACED_ATTRIBUTES'):
traced_attributes = getattr(settings,
'OPENTRACING_TRACED_ATTRIBUTES')
else:
traced_attributes = []
self._tracing._apply_tracing(request, view_func, traced_attributes)
def process_exception(self, request, exception):
self._tracing._finish_tracing(request, error=exception)
def process_response(self, request, response):
self._tracing._finish_tracing(request, response=response)
return response
| kawamon/hue | desktop/core/ext-py/django_opentracing-1.1.0/django_opentracing/middleware.py | Python | apache-2.0 | 3,646 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/appstream/model/CreateFleetRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::AppStream::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
CreateFleetRequest::CreateFleetRequest() :
m_nameHasBeenSet(false),
m_imageNameHasBeenSet(false),
m_imageArnHasBeenSet(false),
m_instanceTypeHasBeenSet(false),
m_fleetType(FleetType::NOT_SET),
m_fleetTypeHasBeenSet(false),
m_computeCapacityHasBeenSet(false),
m_vpcConfigHasBeenSet(false),
m_maxUserDurationInSeconds(0),
m_maxUserDurationInSecondsHasBeenSet(false),
m_disconnectTimeoutInSeconds(0),
m_disconnectTimeoutInSecondsHasBeenSet(false),
m_descriptionHasBeenSet(false),
m_displayNameHasBeenSet(false),
m_enableDefaultInternetAccess(false),
m_enableDefaultInternetAccessHasBeenSet(false),
m_domainJoinInfoHasBeenSet(false),
m_tagsHasBeenSet(false),
m_idleDisconnectTimeoutInSeconds(0),
m_idleDisconnectTimeoutInSecondsHasBeenSet(false),
m_iamRoleArnHasBeenSet(false),
m_streamView(StreamView::NOT_SET),
m_streamViewHasBeenSet(false)
{
}
Aws::String CreateFleetRequest::SerializePayload() const
{
JsonValue payload;
if(m_nameHasBeenSet)
{
payload.WithString("Name", m_name);
}
if(m_imageNameHasBeenSet)
{
payload.WithString("ImageName", m_imageName);
}
if(m_imageArnHasBeenSet)
{
payload.WithString("ImageArn", m_imageArn);
}
if(m_instanceTypeHasBeenSet)
{
payload.WithString("InstanceType", m_instanceType);
}
if(m_fleetTypeHasBeenSet)
{
payload.WithString("FleetType", FleetTypeMapper::GetNameForFleetType(m_fleetType));
}
if(m_computeCapacityHasBeenSet)
{
payload.WithObject("ComputeCapacity", m_computeCapacity.Jsonize());
}
if(m_vpcConfigHasBeenSet)
{
payload.WithObject("VpcConfig", m_vpcConfig.Jsonize());
}
if(m_maxUserDurationInSecondsHasBeenSet)
{
payload.WithInteger("MaxUserDurationInSeconds", m_maxUserDurationInSeconds);
}
if(m_disconnectTimeoutInSecondsHasBeenSet)
{
payload.WithInteger("DisconnectTimeoutInSeconds", m_disconnectTimeoutInSeconds);
}
if(m_descriptionHasBeenSet)
{
payload.WithString("Description", m_description);
}
if(m_displayNameHasBeenSet)
{
payload.WithString("DisplayName", m_displayName);
}
if(m_enableDefaultInternetAccessHasBeenSet)
{
payload.WithBool("EnableDefaultInternetAccess", m_enableDefaultInternetAccess);
}
if(m_domainJoinInfoHasBeenSet)
{
payload.WithObject("DomainJoinInfo", m_domainJoinInfo.Jsonize());
}
if(m_tagsHasBeenSet)
{
JsonValue tagsJsonMap;
for(auto& tagsItem : m_tags)
{
tagsJsonMap.WithString(tagsItem.first, tagsItem.second);
}
payload.WithObject("Tags", std::move(tagsJsonMap));
}
if(m_idleDisconnectTimeoutInSecondsHasBeenSet)
{
payload.WithInteger("IdleDisconnectTimeoutInSeconds", m_idleDisconnectTimeoutInSeconds);
}
if(m_iamRoleArnHasBeenSet)
{
payload.WithString("IamRoleArn", m_iamRoleArn);
}
if(m_streamViewHasBeenSet)
{
payload.WithString("StreamView", StreamViewMapper::GetNameForStreamView(m_streamView));
}
return payload.View().WriteReadable();
}
Aws::Http::HeaderValueCollection CreateFleetRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "PhotonAdminProxyService.CreateFleet"));
return headers;
}
| jt70471/aws-sdk-cpp | aws-cpp-sdk-appstream/source/model/CreateFleetRequest.cpp | C++ | apache-2.0 | 3,638 |
/**
* Get more info at : www.jrebirth.org .
* Copyright JRebirth.org © 2011-2013
* Contact : sebastien.bordes@jrebirth.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jrebirth.af.core.resource.font;
import javafx.scene.text.Font;
import javafx.scene.text.FontPosture;
import javafx.scene.text.FontWeight;
import org.jrebirth.af.api.resource.builder.VariantResourceBuilder;
import org.jrebirth.af.api.resource.font.FontExtension;
import org.jrebirth.af.api.resource.font.FontItem;
import org.jrebirth.af.api.resource.font.FontName;
import org.jrebirth.af.api.resource.font.FontParams;
import org.jrebirth.af.core.resource.ResourceBuilders;
/**
* The interface <strong>FontItemReal</strong> used to provided convenient shortcuts for initializing a {@link Font}.
*
* @author Sébastien Bordes
*/
public interface FontItemBase extends FontItem {
/**
* {@inheritDoc}
*/
@Override
default VariantResourceBuilder<FontItem, FontParams, Font, Double> builder() {
return ResourceBuilders.FONT_BUILDER;
}
/**
* The interface <strong>Real</strong> provides shortcuts method used to build and register a {@link RealFont}.
*/
interface Real extends FontItemBase {
/**
* Build and register a {@link RealFont} {@link FontParams}.
*
* @param name the name of the font
* @param size the size of the font
* @param extension the font extension
*/
default void real(final FontName name, final double size, final FontExtension extension) {
set(new RealFont(name, size, extension));
}
/**
* Build and register a {@link RealFont} {@link FontParams}.
*
* @param name the name of the font
* @param size the size of the font
*/
default void real(final FontName name, final double size) {
set(new RealFont(name, size));
}
}
/**
* The interface <strong>Family</strong> provides shortcuts method used to build and register a {@link FamilyFont}.
*/
interface Family extends FontItemBase {
/**
* Build and register a {@link FamilyFont} {@link FontParams}.
*
* @param family the font family
* @param size the font size
* @param extension the font extension
*/
default void family(final String family, final double size, final FontExtension extension) {
set(new FamilyFont(family, size, extension));
}
/**
* Build and register a {@link FamilyFont} {@link FontParams}.
*
* @param family the font family
* @param size the font size
* @param weight the font weight {@link FontWeight}
* @param extension the font extension
*/
default void family(final String family, final double size, final FontWeight weight, final FontExtension extension) {
set(new FamilyFont(family, size, extension, weight));
}
/**
* Build and register a {@link FamilyFont} {@link FontParams}.
*
* @param family the font family
* @param size the font size
* @param extension the font extension
* @param posture the font posture {@link FontPosture}
*/
default void family(final String family, final double size, final FontExtension extension, final FontPosture posture) {
set(new FamilyFont(family, size, extension, posture));
}
/**
* Build and register a {@link FamilyFont} {@link FontParams}.
*
* @param family the font family
* @param size the font size
* @param extension the font extension
* @param weight the font weight {@link FontWeight}
* @param posture the font posture {@link FontPosture}
*/
default void family(final String family, final double size, final FontExtension extension, final FontWeight weight, final FontPosture posture) {
set(new FamilyFont(family, size, extension, weight, posture));
}
/**
* Build and register a {@link FamilyFont} {@link FontParams}.
*
* @param family the font family
* @param size the font size
*/
default void family(final String family, final double size) {
set(new FamilyFont(family, size));
}
/**
* Build and register a {@link FamilyFont} {@link FontParams}.
*
* @param family the font family
* @param size the font size
* @param weight the font weight {@link FontWeight}
*/
default void family(final String family, final double size, final FontWeight weight) {
set(new FamilyFont(family, size, weight));
}
/**
* Build and register a {@link FamilyFont} {@link FontParams}.
*
* @param family the font family
* @param size the font size
* @param posture the font posture {@link FontPosture}
*/
default void family(final String family, final double size, final FontPosture posture) {
set(new FamilyFont(family, size, posture));
}
/**
* Build and register a {@link FamilyFont} {@link FontParams}.
*
* @param family the font family
* @param size the font size
* @param weight the font weight {@link FontWeight}
* @param posture the font posture {@link FontPosture}
*/
default void family(final String family, final double size, final FontWeight weight, final FontPosture posture) {
set(new FamilyFont(family, size, weight, posture));
}
}
}
| JRebirth/JRebirth | org.jrebirth.af/core/src/main/java/org/jrebirth/af/core/resource/font/FontItemBase.java | Java | apache-2.0 | 6,279 |
/*
* Copyright 2012 Martin Winandy
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package org.pmw.tinylog;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.Locale;
import java.util.regex.Pattern;
import org.pmw.tinylog.writers.LogEntryValue;
/**
* Converts a format pattern for log entries to a list of tokens.
*
* @see Logger#setLoggingFormat(String)
*/
final class Tokenizer {
private static final String DEFAULT_DATE_FORMAT_PATTERN = "yyyy-MM-dd HH:mm:ss";
private static final String NEW_LINE = EnvironmentHelper.getNewLine();
private static final Pattern NEW_LINE_REPLACER = Pattern.compile("\r\n|\\\\r\\\\n|\n|\\\\n|\r|\\\\r");
private static final String TAB = "\t";
private static final Pattern TAB_REPLACER = Pattern.compile("\t|\\\\t");
private final Locale locale;
private final int maxStackTraceElements;
private int index;
/**
* @param locale
* Locale for formatting
* @param maxStackTraceElements
* Limit of stack traces for exceptions
*/
Tokenizer(final Locale locale, final int maxStackTraceElements) {
this.locale = locale;
this.maxStackTraceElements = maxStackTraceElements;
}
/**
* Parse a format pattern.
*
* @param formatPattern
* Format pattern for log entries
*
* @return List of tokens
*/
List<Token> parse(final String formatPattern) {
List<Token> tokens = new ArrayList<>();
index = 0;
while (index < formatPattern.length()) {
char c = formatPattern.charAt(index);
int start = index;
while (c != '{' && c != '}') {
++index;
if (index >= formatPattern.length()) {
tokens.add(getPlainTextToken(formatPattern.substring(start, index)));
return tokens;
}
c = formatPattern.charAt(index);
}
if (index > start) {
tokens.add(getPlainTextToken(formatPattern.substring(start, index)));
}
if (c == '{') {
Token token = parsePartly(formatPattern);
if (token != null) {
tokens.add(token);
}
} else if (c == '}') {
InternalLogger.warn("Opening curly brace is missing for: \"{}\"", formatPattern.substring(0, index + 1));
++index;
}
}
return tokens;
}
private Token parsePartly(final String formatPattern) {
List<Token> tokens = new ArrayList<>();
int[] options = new int[] { 0 /* minimum size */, 0 /* indent */};
int offset = index;
++index;
while (index < formatPattern.length()) {
char c = formatPattern.charAt(index);
int start = index;
while (c != '{' && c != '|' && c != '}') {
++index;
if (index >= formatPattern.length()) {
InternalLogger.warn("Closing curly brace is missing for: \"{}\"", formatPattern.substring(offset, index));
tokens.add(getToken(formatPattern.substring(start, index)));
return combine(tokens, options);
}
c = formatPattern.charAt(index);
}
if (index > start) {
if (c == '{') {
tokens.add(getPlainTextToken(formatPattern.substring(start, index)));
} else {
tokens.add(getToken(formatPattern.substring(start, index)));
}
}
if (c == '{') {
Token token = parsePartly(formatPattern);
if (token != null) {
tokens.add(token);
}
} else if (c == '|') {
++index;
start = index;
while (c != '{' && c != '}') {
++index;
if (index >= formatPattern.length()) {
InternalLogger.warn("Closing curly brace is missing for: \"{}\"", formatPattern.substring(offset, index));
options = parseOptions(formatPattern.substring(start));
return combine(tokens, options);
}
c = formatPattern.charAt(index);
}
if (index > start) {
options = parseOptions(formatPattern.substring(start, index));
}
} else if (c == '}') {
++index;
return combine(tokens, options);
}
}
InternalLogger.warn("Closing curly brace is missing for: \"{}\"", formatPattern.substring(offset, index));
return combine(tokens, options);
}
private Token getToken(final String text) {
if (text.equals("date")) {
return new DateToken(DateTimeFormatter.ofPattern(DEFAULT_DATE_FORMAT_PATTERN, locale));
} else if (text.startsWith("date:")) {
String dateFormatPattern = text.substring(5, text.length());
try {
return new DateToken(DateTimeFormatter.ofPattern(dateFormatPattern, locale));
} catch (IllegalArgumentException ex) {
InternalLogger.error(ex, "\"{}\" is an invalid date format pattern", dateFormatPattern);
return new DateToken(DateTimeFormatter.ofPattern(DEFAULT_DATE_FORMAT_PATTERN, locale));
}
} else if ("pid".equals(text)) {
return new PlainTextToken(EnvironmentHelper.getProcessId());
} else if (text.startsWith("pid:")) {
InternalLogger.warn("\"{pid}\" does not support parameters");
return new PlainTextToken(EnvironmentHelper.getProcessId().toString());
} else if ("thread".equals(text)) {
return new ThreadNameToken();
} else if (text.startsWith("thread:")) {
InternalLogger.warn("\"{thread}\" does not support parameters");
return new ThreadNameToken();
} else if ("thread_id".equals(text)) {
return new ThreadIdToken();
} else if (text.startsWith("thread_id:")) {
InternalLogger.warn("\"{thread_id}\" does not support parameters");
return new ThreadIdToken();
} else if ("class".equals(text)) {
return new ClassToken();
} else if (text.startsWith("class:")) {
InternalLogger.warn("\"{class}\" does not support parameters");
return new ClassToken();
} else if ("class_name".equals(text)) {
return new ClassNameToken();
} else if (text.startsWith("class_name:")) {
InternalLogger.warn("\"{class_name}\" does not support parameters");
return new ClassNameToken();
} else if ("package".equals(text)) {
return new PackageToken();
} else if (text.startsWith("package:")) {
InternalLogger.warn("\"{package}\" does not support parameters");
return new PackageToken();
} else if ("method".equals(text)) {
return new MethodToken();
} else if (text.startsWith("method:")) {
InternalLogger.warn("\"{method}\" does not support parameters");
return new MethodToken();
} else if ("file".equals(text)) {
return new FileToken();
} else if (text.startsWith("file:")) {
InternalLogger.warn("\"{file}\" does not support parameters");
return new FileToken();
} else if ("line".equals(text)) {
return new LineToken();
} else if (text.startsWith("line:")) {
InternalLogger.warn("\"{line}\" does not support parameters");
return new LineToken();
} else if ("level".equals(text)) {
return new LevelToken();
} else if (text.startsWith("level:")) {
InternalLogger.warn("\"{level}\" does not support parameters");
return new LevelToken();
} else if ("message".equals(text)) {
return new MessageToken(maxStackTraceElements);
} else if (text.startsWith("message:")) {
InternalLogger.warn("\"{message}\" does not support parameters");
return new MessageToken(maxStackTraceElements);
} else {
return getPlainTextToken(text);
}
}
private static Token getPlainTextToken(final String text) {
String plainText = NEW_LINE_REPLACER.matcher(text).replaceAll(NEW_LINE);
plainText = TAB_REPLACER.matcher(plainText).replaceAll(TAB);
return new PlainTextToken(plainText);
}
private static int[] parseOptions(final String text) {
int minSize = 0;
int indent = 0;
int index = 0;
while (index < text.length()) {
char c = text.charAt(index);
while (c == ',') {
++index;
if (index >= text.length()) {
return new int[] { minSize, indent };
}
c = text.charAt(index);
}
int start = index;
while (c != ',') {
++index;
if (index >= text.length()) {
break;
}
c = text.charAt(index);
}
if (index > start) {
String parameter = text.substring(start, index);
int splitter = parameter.indexOf('=');
if (splitter == -1) {
parameter = parameter.trim();
if ("min-size".equals(parameter)) {
InternalLogger.warn("No value set for \"min-size\"");
} else if ("indent".equals(parameter)) {
InternalLogger.warn("No value set for \"indent\"");
} else {
InternalLogger.warn("Unknown option \"{}\"", parameter);
}
} else {
String key = parameter.substring(0, splitter).trim();
String value = parameter.substring(splitter + 1).trim();
if ("min-size".equals(key)) {
if (value.length() == 0) {
InternalLogger.warn("No value set for \"min-size\"");
} else {
try {
minSize = parsePositiveInt(value);
} catch (NumberFormatException ex) {
InternalLogger.warn("\"{}\" is an invalid number for \"min-size\"", value);
}
}
} else if ("indent".equals(key)) {
if (value.length() == 0) {
InternalLogger.warn("No value set for \"indent\"");
} else {
try {
indent = parsePositiveInt(value);
} catch (NumberFormatException ex) {
InternalLogger.warn("\"{}\" is an invalid number for \"indent\"", value);
}
}
} else {
InternalLogger.warn("Unknown option \"{}\"", key);
}
}
}
}
return new int[] { minSize, indent };
}
private static int parsePositiveInt(final String value) throws NumberFormatException {
int number = Integer.parseInt(value);
if (number >= 0) {
return number;
} else {
throw new NumberFormatException();
}
}
private static Token combine(final List<Token> tokens, final int[] options) {
int minSize = options[0];
int indent = options[1];
if (tokens.isEmpty()) {
return null;
} else if (tokens.size() == 1) {
if (indent > 0) {
return new IndentToken(tokens.get(0), indent);
} else if (minSize > 0) {
return new MinSizeToken(tokens.get(0), minSize);
} else {
return tokens.get(0);
}
} else {
if (indent > 0) {
return new IndentToken(new BundlerToken(tokens), indent);
} else if (minSize > 0) {
return new MinSizeToken(new BundlerToken(tokens), minSize);
} else {
return new BundlerToken(tokens);
}
}
}
private static final class BundlerToken implements Token {
private final List<Token> tokens;
private BundlerToken(final List<Token> tokens) {
this.tokens = tokens;
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
Collection<LogEntryValue> values = EnumSet.noneOf(LogEntryValue.class);
for (Token token : tokens) {
values.addAll(token.getRequiredLogEntryValues());
}
return values;
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
for (Token token : tokens) {
token.render(logEntry, builder);
}
}
}
private static final class MinSizeToken implements Token {
private final Token token;
private final int minSize;
private MinSizeToken(final Token token, final int minSize) {
this.token = token;
this.minSize = minSize;
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return token.getRequiredLogEntryValues();
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
int offset = builder.length();
token.render(logEntry, builder);
int size = builder.length() - offset;
if (size < minSize) {
char[] spaces = new char[minSize - size];
Arrays.fill(spaces, ' ');
builder.append(spaces);
}
}
}
private static final class IndentToken implements Token {
private final Token token;
private final char[] spaces;
private IndentToken(final Token token, final int indent) {
this.token = token;
this.spaces = new char[indent];
Arrays.fill(spaces, ' ');
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return token.getRequiredLogEntryValues();
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
if (builder.length() == 0 || builder.charAt(builder.length() - 1) == '\n' || builder.charAt(builder.length() - 1) == '\r') {
builder.append(spaces);
}
StringBuilder subBuilder = new StringBuilder(1024);
token.render(logEntry, subBuilder);
int head = 0;
for (int i = head; i < subBuilder.length(); ++i) {
char c = subBuilder.charAt(i);
if (c == '\n') {
builder.append(subBuilder, head, i + 1);
builder.append(spaces);
head = i + 1;
} else if (c == '\r') {
if (i + 1 < subBuilder.length() && subBuilder.charAt(i + 1) == '\n') {
++i;
}
builder.append(subBuilder, head, i + 1);
builder.append(spaces);
head = i + 1;
} else if (head == i && (c == ' ' || c == '\t')) {
++head;
}
}
if (head < subBuilder.length()) {
builder.append(subBuilder, head, subBuilder.length());
}
}
}
private static final class DateToken implements Token {
private final DateTimeFormatter formatter;
private DateToken(final DateTimeFormatter formatter) {
this.formatter = formatter;
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.DATE);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
builder.append(formatter.format(logEntry.getDate()));
}
}
private static final class ThreadNameToken implements Token {
private ThreadNameToken() {
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.THREAD);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
builder.append(logEntry.getThread().getName());
}
};
private static final class ThreadIdToken implements Token {
public ThreadIdToken() {
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.THREAD);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
builder.append(logEntry.getThread().getId());
}
}
private static final class ClassToken implements Token {
public ClassToken() {
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.CLASS);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
builder.append(logEntry.getClassName());
}
}
private static final class ClassNameToken implements Token {
public ClassNameToken() {
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.CLASS);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
String fullyQualifiedClassName = logEntry.getClassName();
int dotIndex = fullyQualifiedClassName.lastIndexOf('.');
if (dotIndex < 0) {
builder.append(fullyQualifiedClassName);
} else {
builder.append(fullyQualifiedClassName.substring(dotIndex + 1));
}
}
}
private static final class PackageToken implements Token {
private PackageToken() {
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.CLASS);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
String fullyQualifiedClassName = logEntry.getClassName();
int dotIndex = fullyQualifiedClassName.lastIndexOf('.');
if (dotIndex != -1) {
builder.append(fullyQualifiedClassName.substring(0, dotIndex));
}
}
}
private static final class MethodToken implements Token {
private MethodToken() {
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.METHOD);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
builder.append(logEntry.getMethodName());
}
}
private static final class FileToken implements Token {
private FileToken() {
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.FILE);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
builder.append(logEntry.getFilename());
}
}
private static final class LineToken implements Token {
private LineToken() {
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.LINE);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
builder.append(logEntry.getLineNumber());
}
}
private static final class LevelToken implements Token {
private LevelToken() {
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.LEVEL);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
builder.append(logEntry.getLevel());
}
}
private static final class MessageToken implements Token {
private static final String NEW_LINE = EnvironmentHelper.getNewLine();
private final int maxStackTraceElements;
private MessageToken(final int maxStackTraceElements) {
this.maxStackTraceElements = maxStackTraceElements;
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.singletonList(LogEntryValue.MESSAGE);
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
String message = logEntry.getMessage();
if (message != null) {
builder.append(message);
}
Throwable exception = logEntry.getException();
if (exception != null) {
if (message != null) {
builder.append(": ");
}
formatException(builder, exception, maxStackTraceElements);
}
}
private static void formatException(final StringBuilder builder, final Throwable exception, final int maxStackTraceElements) {
if (maxStackTraceElements == 0) {
builder.append(exception.getClass().getName());
String exceptionMessage = exception.getMessage();
if (exceptionMessage != null) {
builder.append(": ");
builder.append(exceptionMessage);
}
} else {
formatExceptionWithStackTrace(builder, exception, maxStackTraceElements);
}
}
private static void formatExceptionWithStackTrace(final StringBuilder builder, final Throwable exception, final int countStackTraceElements) {
builder.append(exception.getClass().getName());
String message = exception.getMessage();
if (message != null) {
builder.append(": ");
builder.append(message);
}
StackTraceElement[] stackTrace = exception.getStackTrace();
int length = Math.min(stackTrace.length, Math.max(1, countStackTraceElements));
for (int i = 0; i < length; ++i) {
builder.append(NEW_LINE);
builder.append('\t');
builder.append("at ");
builder.append(stackTrace[i]);
}
if (stackTrace.length > length) {
builder.append(NEW_LINE);
builder.append('\t');
builder.append("...");
} else {
Throwable cause = exception.getCause();
if (cause != null) {
builder.append(NEW_LINE);
builder.append("Caused by: ");
formatExceptionWithStackTrace(builder, cause, countStackTraceElements - length);
}
}
}
}
private static final class PlainTextToken implements Token {
private final String text;
private PlainTextToken(final String text) {
this.text = text;
}
@Override
public Collection<LogEntryValue> getRequiredLogEntryValues() {
return Collections.emptyList();
}
@Override
public void render(final LogEntry logEntry, final StringBuilder builder) {
builder.append(text);
}
}
}
| yarish/tinylog | tinylog/src/main/java/org/pmw/tinylog/Tokenizer.java | Java | apache-2.0 | 21,096 |
//
// Copyright 2015 Blu Age Corporation - Plano, Texas
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System.Collections.Generic;
using Summer.Batch.Extra.Copybook;
namespace Summer.Batch.Extra.Ebcdic
{
/// <summary>
/// An EbcdicReaderMapper maps a list of fields, corresponding to an EBCDIC
/// record, to a business object.
/// </summary>
/// <typeparam name="TT"> </typeparam>
public interface IEbcdicReaderMapper<out TT>
{
/// <summary>
/// Converts the content of a list of values into a business object.
/// </summary>
/// <param name="values">the list of values to map</param>
/// <param name="itemCount">the record line number, starting at 0.</param>
/// <returns>The mapped object</returns>
TT Map(IList<object> values, int itemCount);
/// <summary>
/// Sets the record format map to use for mapping
/// </summary>
RecordFormatMap RecordFormatMap { set; }
/// <summary>
/// Sets the date parser
/// </summary>
IDateParser DateParser { set; }
/// <summary>
/// The getter for the distinguished pattern.
/// </summary>
string DistinguishedPattern { get; }
}
} | SummerBatch/SummerBatch | Summer.Batch.Extra/Ebcdic/IEbcdicReaderMapper.cs | C# | apache-2.0 | 1,853 |
/*
* Copyright 2019 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.config.registry;
import java.util.ArrayList;
import java.util.List;
import com.thoughtworks.go.plugins.PluginExtensions;
import org.springframework.stereotype.Component;
@Component
public class NoPluginsInstalled implements PluginExtensions {
@Override
public List<ConfigurationExtension> configTagImplementations() {
return new ArrayList<>();
}
}
| kierarad/gocd | config/config-api/src/main/java/com/thoughtworks/go/config/registry/NoPluginsInstalled.java | Java | apache-2.0 | 1,001 |
package org.testng;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.text.CharacterIterator;
import java.text.StringCharacterIterator;
import java.util.Enumeration;
import java.util.List;
import java.util.Properties;
import java.util.StringTokenizer;
import org.apache.tools.ant.BuildException;
import org.apache.tools.ant.Project;
import org.apache.tools.ant.Target;
import org.apache.tools.ant.Task;
import org.apache.tools.ant.taskdefs.Execute;
import org.apache.tools.ant.taskdefs.ExecuteWatchdog;
import org.apache.tools.ant.taskdefs.LogOutputStream;
import org.apache.tools.ant.taskdefs.PumpStreamHandler;
import org.apache.tools.ant.types.Commandline;
import org.apache.tools.ant.types.CommandlineJava;
import org.apache.tools.ant.types.Environment;
import org.apache.tools.ant.types.FileSet;
import org.apache.tools.ant.types.Path;
import org.apache.tools.ant.types.PropertySet;
import org.apache.tools.ant.types.Reference;
import org.apache.tools.ant.types.Resource;
import org.apache.tools.ant.types.ResourceCollection;
import org.apache.tools.ant.types.resources.FileResource;
import org.apache.tools.ant.types.selectors.FilenameSelector;
import org.testng.collections.Lists;
import org.testng.internal.ExitCode;
import org.testng.internal.Utils;
import org.testng.log4testng.Logger;
import org.testng.reporters.VerboseReporter;
import static java.lang.Boolean.TRUE;
import static org.testng.internal.Utils.isStringNotBlank;
/**
* TestNG settings:
*
* <ul>
* <li>classfileset (inner)
* <li>classfilesetref (attribute)
* <li>xmlfileset (inner)
* <li>xmlfilesetref (attribute)
* <li>enableAssert (attribute)
* <li>excludedGroups (attribute)
* <li>groups (attribute)
* <li>junit (attribute)
* <li>listener (attribute)
* <li>outputdir (attribute)
* <li>parallel (attribute)
* <li>reporter (attribute)
* <li>sourcedir (attribute)
* <li>sourcedirref (attribute)
* <li>suitename (attribute)
* <li>suiterunnerclass (attribute)
* <li>target (attribute)
* <li>testjar (attribute)
* <li>testname (attribute)
* <li>threadcount (attribute)
* <li>dataproviderthreadcount (attribute)
* <li>verbose (attribute)
* <li>testrunfactory (attribute)
* <li>configFailurepolicy (attribute)
* <li>randomizeSuites (attribute)
* <li>methodselectors (attribute)
* </ul>
*
* Ant settings:
*
* <ul>
* <li>classpath (inner)
* <li>classpathref (attribute)
* <li>jvm (attribute)
* <li>workingDir (attribute)
* <li>env (inner)
* <li>sysproperty (inner)
* <li>propertyset (inner)
* <li>jvmarg (inner)
* <li>timeout (attribute)
* <li>haltonfailure (attribute)
* <li>onHaltTarget (attribute)
* <li>failureProperty (attribute)
* <li>haltonFSP (attribute)
* <li>FSPproperty (attribute)
* <li>haltonskipped (attribute)
* <li>skippedProperty (attribute)
* <li>testRunnerFactory (attribute)
* </ul>
*
* Debug information:
*
* <ul>
* <li>dumpCommand (boolean)
* <li>dumpEnv (boolean)
* <li>dumpSys (boolean)
* </ul>
*
* @author <a href="mailto:the_mindstorm@evolva.ro">Alexandru Popescu</a>
* @author Cedric Beust
* @author Lukas Jungmann
*/
public class TestNGAntTask extends Task {
protected CommandlineJava m_javaCommand;
protected List<ResourceCollection> m_xmlFilesets = Lists.newArrayList();
protected List<ResourceCollection> m_classFilesets = Lists.newArrayList();
protected File m_outputDir;
protected File m_testjar;
protected File m_workingDir;
private Integer m_timeout;
private List<String> m_listeners = Lists.newArrayList();
private List<String> m_methodselectors = Lists.newArrayList();
private String m_objectFactory;
protected String m_testRunnerFactory;
private boolean m_delegateCommandSystemProperties = false;
protected Environment m_environment = new Environment();
/** The suite runner name (defaults to TestNG.class.getName(). */
protected String m_mainClass = TestNG.class.getName();
/**
* True if the temporary file created by the Ant Task for command line parameters to TestNG should
* be preserved after execution.
*/
protected boolean m_dump;
private boolean m_dumpEnv;
private boolean m_dumpSys;
protected boolean m_assertEnabled = true;
protected boolean m_haltOnFailure;
protected String m_onHaltTarget;
protected String m_failurePropertyName;
protected boolean m_haltOnSkipped;
protected String m_skippedPropertyName;
protected boolean m_haltOnFSP;
protected String m_fspPropertyName;
protected String m_includedGroups;
protected String m_excludedGroups;
protected String m_parallelMode;
protected String m_threadCount;
protected String m_dataproviderthreadCount;
protected String m_configFailurePolicy;
protected Boolean m_randomizeSuites;
public String m_useDefaultListeners;
private String m_suiteName = "Ant suite";
private String m_testName = "Ant test";
private Boolean m_skipFailedInvocationCounts;
private String m_methods;
private Mode mode = Mode.testng;
public enum Mode {
// lower-case to better look in build scripts
testng,
junit,
mixed
}
private static final Logger LOGGER = Logger.getLogger(TestNGAntTask.class);
/** The list of report listeners added via <reporter> sub-element of the Ant task */
private List<ReporterConfig> reporterConfigs = Lists.newArrayList();
private String m_testNames = "";
public void setParallel(String parallel) {
m_parallelMode = parallel;
}
public void setThreadCount(String threadCount) {
m_threadCount = threadCount;
}
public void setDataProviderThreadCount(String dataproviderthreadCount) {
m_dataproviderthreadCount = dataproviderthreadCount;
}
public void setUseDefaultListeners(String f) {
m_useDefaultListeners = f;
}
// Ant task settings
public void setHaltonfailure(boolean value) {
m_haltOnFailure = value;
}
public void setOnHaltTarget(String targetName) {
m_onHaltTarget = targetName;
}
public void setFailureProperty(String propertyName) {
m_failurePropertyName = propertyName;
}
public void setHaltonskipped(boolean value) {
m_haltOnSkipped = value;
}
public void setSkippedProperty(String propertyName) {
m_skippedPropertyName = propertyName;
}
public void setHaltonFSP(boolean value) {
m_haltOnFSP = value;
}
public void setFSPProperty(String propertyName) {
m_fspPropertyName = propertyName;
}
public void setDelegateCommandSystemProperties(boolean value) {
m_delegateCommandSystemProperties = value;
}
/**
* Sets the flag to log the command line. When verbose is set to true the command line parameters
* are stored in a temporary file stored in the user's default temporary file directory. The file
* created is prefixed with "testng".
*/
public void setDumpCommand(boolean verbose) {
m_dump = verbose;
}
/**
* Sets the flag to write on <code>System.out</code> the Ant Environment properties.
*
* @param verbose <tt>true</tt> for printing
*/
public void setDumpEnv(boolean verbose) {
m_dumpEnv = verbose;
}
/**
* Sets te flag to write on <code>System.out</code> the system properties.
*
* @param verbose <tt>true</tt> for dumping the info
*/
public void setDumpSys(boolean verbose) {
m_dumpSys = verbose;
}
public void setEnableAssert(boolean flag) {
m_assertEnabled = flag;
}
/**
* The directory to invoke the VM in.
*
* @param workingDir the directory to invoke the JVM from.
*/
public void setWorkingDir(File workingDir) {
m_workingDir = workingDir;
}
/**
* Sets a particular JVM to be used. Default is 'java' and is solved by <code>Runtime.exec()
* </code>.
*
* @param jvm the new jvm
*/
public void setJvm(String jvm) {
getJavaCommand().setVm(jvm);
}
/**
* Set the timeout value (in milliseconds).
*
* <p>If the tests are running for more than this value, the tests will be canceled.
*
* @param value the maximum time (in milliseconds) allowed before declaring the test as
* 'timed-out'
*/
public void setTimeout(Integer value) {
m_timeout = value;
}
public Commandline.Argument createJvmarg() {
return getJavaCommand().createVmArgument();
}
public void addSysproperty(Environment.Variable sysp) {
getJavaCommand().addSysproperty(sysp);
}
/** Adds an environment variable; used when forking. */
public void addEnv(Environment.Variable var) {
m_environment.addVariable(var);
}
/**
* Adds path to classpath used for tests.
*
* @return reference to the classpath in the embedded java command line
*/
public Path createClasspath() {
return getJavaCommand().createClasspath(getProject()).createPath();
}
/**
* Adds a path to the bootclasspath.
*
* @return reference to the bootclasspath in the embedded java command line
*/
public Path createBootclasspath() {
return getJavaCommand().createBootclasspath(getProject()).createPath();
}
/**
* Set the classpath to be used when running the Java class
*
* @param s an Ant Path object containing the classpath.
*/
public void setClasspath(Path s) {
createClasspath().append(s);
}
/**
* Classpath to use, by reference.
*
* @param r a reference to an existing classpath
*/
public void setClasspathRef(Reference r) {
createClasspath().setRefid(r);
}
public void addXmlfileset(FileSet fs) {
m_xmlFilesets.add(fs);
}
public void setXmlfilesetRef(Reference ref) {
m_xmlFilesets.add(createResourceCollection(ref));
}
public void addClassfileset(FileSet fs) {
m_classFilesets.add(appendClassSelector(fs));
}
public void setClassfilesetRef(Reference ref) {
m_classFilesets.add(createResourceCollection(ref));
}
public void setTestNames(String testNames) {
m_testNames = testNames;
}
/**
* Sets the suite runner class to invoke
*
* @param s the name of the suite runner class
*/
public void setSuiteRunnerClass(String s) {
m_mainClass = s;
}
/**
* Sets the suite name
*
* @param s the name of the suite
*/
public void setSuiteName(String s) {
m_suiteName = s;
}
/**
* Sets the test name
*
* @param s the name of the test
*/
public void setTestName(String s) {
m_testName = s;
}
// TestNG settings
public void setJUnit(boolean value) {
mode = value ? Mode.junit : Mode.testng;
}
// TestNG settings
public void setMode(Mode mode) {
this.mode = mode;
}
/**
* Sets the test output directory
*
* @param dir the name of directory
*/
public void setOutputDir(File dir) {
m_outputDir = dir;
}
/**
* Sets the test jar
*
* @param s the name of test jar
*/
public void setTestJar(File s) {
m_testjar = s;
}
public void setGroups(String groups) {
m_includedGroups = groups;
}
public void setExcludedGroups(String groups) {
m_excludedGroups = groups;
}
private Integer m_verbose = null;
private Integer m_suiteThreadPoolSize;
private String m_xmlPathInJar;
public void setVerbose(Integer verbose) {
m_verbose = verbose;
}
public void setReporter(String listener) {
m_listeners.add(listener);
}
public void setObjectFactory(String className) {
m_objectFactory = className;
}
public void setTestRunnerFactory(String testRunnerFactory) {
m_testRunnerFactory = testRunnerFactory;
}
public void setSuiteThreadPoolSize(Integer n) {
m_suiteThreadPoolSize = n;
}
/** @deprecated Use "listeners" */
@Deprecated
public void setListener(String listener) {
m_listeners.add(listener);
}
public void setListeners(String listeners) {
StringTokenizer st = new StringTokenizer(listeners, " ,");
while (st.hasMoreTokens()) {
m_listeners.add(st.nextToken());
}
}
public void setMethodSelectors(String methodSelectors) {
StringTokenizer st = new StringTokenizer(methodSelectors, " ,");
while (st.hasMoreTokens()) {
m_methodselectors.add(st.nextToken());
}
}
public void setConfigFailurePolicy(String failurePolicy) {
m_configFailurePolicy = failurePolicy;
}
public void setRandomizeSuites(Boolean randomizeSuites) {
m_randomizeSuites = randomizeSuites;
}
public void setMethods(String methods) {
m_methods = methods;
}
/**
* Launches TestNG in a new JVM.
*
* <p>{@inheritDoc}
*/
@Override
public void execute() throws BuildException {
validateOptions();
CommandlineJava cmd = getJavaCommand();
cmd.setClassname(m_mainClass);
if (m_assertEnabled) {
cmd.createVmArgument().setValue("-ea");
}
if (m_delegateCommandSystemProperties) {
delegateCommandSystemProperties();
}
List<String> argv = createArguments();
String fileName = "";
FileWriter fw = null;
BufferedWriter bw = null;
try {
File f = File.createTempFile("testng", "");
fileName = f.getAbsolutePath();
// If the user asked to see the command, preserve the file
if (!m_dump) {
f.deleteOnExit();
}
fw = new FileWriter(f);
bw = new BufferedWriter(fw);
for (String arg : argv) {
bw.write(arg);
bw.newLine();
}
bw.flush();
} catch (IOException e) {
LOGGER.error(e.getMessage(), e);
} finally {
try {
if (bw != null) {
bw.close();
}
if (fw != null) {
fw.close();
}
} catch (IOException e) {
LOGGER.error(e.getMessage(), e);
}
}
printDebugInfo(fileName);
createClasspath().setLocation(findJar());
cmd.createArgument().setValue("@" + fileName);
ExecuteWatchdog watchdog = createWatchdog();
boolean wasKilled = false;
int exitValue = executeAsForked(cmd, watchdog);
if (null != watchdog) {
wasKilled = watchdog.killedProcess();
}
actOnResult(exitValue, wasKilled);
}
protected List<String> createArguments() {
List<String> argv = Lists.newArrayList();
addBooleanIfTrue(argv, CommandLineArgs.JUNIT, mode == Mode.junit);
addBooleanIfTrue(argv, CommandLineArgs.MIXED, mode == Mode.mixed);
addBooleanIfTrue(
argv, CommandLineArgs.SKIP_FAILED_INVOCATION_COUNTS, m_skipFailedInvocationCounts);
addIntegerIfNotNull(argv, CommandLineArgs.LOG, m_verbose);
addDefaultListeners(argv);
addOutputDir(argv);
addFileIfFile(argv, CommandLineArgs.TEST_JAR, m_testjar);
addStringIfNotBlank(argv, CommandLineArgs.GROUPS, m_includedGroups);
addStringIfNotBlank(argv, CommandLineArgs.EXCLUDED_GROUPS, m_excludedGroups);
addFilesOfRCollection(argv, CommandLineArgs.TEST_CLASS, m_classFilesets);
addListOfStringIfNotEmpty(argv, CommandLineArgs.LISTENER, m_listeners);
addListOfStringIfNotEmpty(argv, CommandLineArgs.METHOD_SELECTORS, m_methodselectors);
addStringIfNotNull(argv, CommandLineArgs.OBJECT_FACTORY, m_objectFactory);
addStringIfNotNull(argv, CommandLineArgs.TEST_RUNNER_FACTORY, m_testRunnerFactory);
addStringIfNotNull(argv, CommandLineArgs.PARALLEL, m_parallelMode);
addStringIfNotNull(argv, CommandLineArgs.CONFIG_FAILURE_POLICY, m_configFailurePolicy);
addBooleanIfTrue(argv, CommandLineArgs.RANDOMIZE_SUITES, m_randomizeSuites);
addStringIfNotNull(argv, CommandLineArgs.THREAD_COUNT, m_threadCount);
addStringIfNotNull(argv, CommandLineArgs.DATA_PROVIDER_THREAD_COUNT, m_dataproviderthreadCount);
addStringIfNotBlank(argv, CommandLineArgs.SUITE_NAME, m_suiteName);
addStringIfNotBlank(argv, CommandLineArgs.TEST_NAME, m_testName);
addStringIfNotBlank(argv, CommandLineArgs.TEST_NAMES, m_testNames);
addStringIfNotBlank(argv, CommandLineArgs.METHODS, m_methods);
addReporterConfigs(argv);
addIntegerIfNotNull(argv, CommandLineArgs.SUITE_THREAD_POOL_SIZE, m_suiteThreadPoolSize);
addStringIfNotNull(argv, CommandLineArgs.XML_PATH_IN_JAR, m_xmlPathInJar);
addXmlFiles(argv);
return argv;
}
private void addDefaultListeners(List<String> argv) {
if (m_useDefaultListeners != null) {
String useDefaultListeners = "false";
if ("yes".equalsIgnoreCase(m_useDefaultListeners)
|| "true".equalsIgnoreCase(m_useDefaultListeners)) {
useDefaultListeners = "true";
}
argv.add(CommandLineArgs.USE_DEFAULT_LISTENERS);
argv.add(useDefaultListeners);
}
}
private void addOutputDir(List<String> argv) {
if (null != m_outputDir) {
if (!m_outputDir.exists()) {
m_outputDir.mkdirs();
}
if (m_outputDir.isDirectory()) {
argv.add(CommandLineArgs.OUTPUT_DIRECTORY);
argv.add(m_outputDir.getAbsolutePath());
} else {
throw new BuildException("Output directory is not a directory: " + m_outputDir);
}
}
}
private void addReporterConfigs(List<String> argv) {
for (ReporterConfig reporterConfig : reporterConfigs) {
argv.add(CommandLineArgs.REPORTER);
argv.add(reporterConfig.serialize());
}
}
private void addFilesOfRCollection(
List<String> argv, String name, List<ResourceCollection> resources) {
addArgumentsIfNotEmpty(argv, name, getFiles(resources), ",");
}
private void addListOfStringIfNotEmpty(List<String> argv, String name, List<String> arguments) {
addArgumentsIfNotEmpty(argv, name, arguments, ";");
}
private void addArgumentsIfNotEmpty(
List<String> argv, String name, List<String> arguments, String separator) {
if (arguments != null && !arguments.isEmpty()) {
argv.add(name);
String value = Utils.join(arguments, separator);
argv.add(value);
}
}
private void addFileIfFile(List<String> argv, String name, File file) {
if ((null != file) && file.isFile()) {
argv.add(name);
argv.add(file.getAbsolutePath());
}
}
private void addBooleanIfTrue(List<String> argv, String name, Boolean value) {
if (TRUE.equals(value)) {
argv.add(name);
}
}
private void addIntegerIfNotNull(List<String> argv, String name, Integer value) {
if (value != null) {
argv.add(name);
argv.add(value.toString());
}
}
private void addStringIfNotNull(List<String> argv, String name, String value) {
if (value != null) {
argv.add(name);
argv.add(value);
}
}
private void addStringIfNotBlank(List<String> argv, String name, String value) {
if (isStringNotBlank(value)) {
argv.add(name);
argv.add(value);
}
}
private void addXmlFiles(List<String> argv) {
for (String file : getSuiteFileNames()) {
argv.add(file);
}
}
/** @return the list of the XML file names. This method can be overridden by subclasses. */
protected List<String> getSuiteFileNames() {
List<String> result = Lists.newArrayList();
for (String file : getFiles(m_xmlFilesets)) {
result.add(file);
}
return result;
}
private void delegateCommandSystemProperties() {
// Iterate over command-line args and pass them through as sysproperty
// exclude any built-in properties that start with "ant."
for (Object propKey : getProject().getUserProperties().keySet()) {
String propName = (String) propKey;
String propVal = getProject().getUserProperty(propName);
if (propName.startsWith("ant.")) {
log("Excluding ant property: " + propName + ": " + propVal, Project.MSG_DEBUG);
} else {
log("Including user property: " + propName + ": " + propVal, Project.MSG_DEBUG);
Environment.Variable var = new Environment.Variable();
var.setKey(propName);
var.setValue(propVal);
addSysproperty(var);
}
}
}
private void printDebugInfo(String fileName) {
if (m_dumpSys) {
debug("* SYSTEM PROPERTIES *");
Properties props = System.getProperties();
Enumeration en = props.propertyNames();
while (en.hasMoreElements()) {
String key = (String) en.nextElement();
debug(key + ": " + props.getProperty(key));
}
debug("");
}
if (m_dumpEnv) {
String[] vars = m_environment.getVariables();
if (null != vars && vars.length > 0) {
debug("* ENVIRONMENT *");
for (String v : vars) {
debug(v);
}
debug("");
}
}
if (m_dump) {
dumpCommand(fileName);
}
}
private void debug(String message) {
log("[TestNGAntTask] " + message, Project.MSG_DEBUG);
}
protected void actOnResult(int exitValue, boolean wasKilled) {
if (exitValue == -1) {
executeHaltTarget(exitValue);
throw new BuildException("an error occurred when running TestNG tests");
}
if ((exitValue & ExitCode.HAS_NO_TEST) == ExitCode.HAS_NO_TEST) {
if (m_haltOnFailure) {
executeHaltTarget(exitValue);
throw new BuildException("No tests were run");
} else {
if (null != m_failurePropertyName) {
getProject().setNewProperty(m_failurePropertyName, "true");
}
log("TestNG haven't found any tests to be run", Project.MSG_DEBUG);
}
}
boolean failed = (ExitCode.hasFailure(exitValue)) || wasKilled;
if (failed) {
final String msg = wasKilled ? "The tests timed out and were killed." : "The tests failed.";
if (m_haltOnFailure) {
executeHaltTarget(exitValue);
throw new BuildException(msg);
} else {
if (null != m_failurePropertyName) {
getProject().setNewProperty(m_failurePropertyName, "true");
}
log(msg, Project.MSG_INFO);
}
}
if (ExitCode.hasSkipped(exitValue)) {
if (m_haltOnSkipped) {
executeHaltTarget(exitValue);
throw new BuildException("There are TestNG SKIPPED tests");
} else {
if (null != m_skippedPropertyName) {
getProject().setNewProperty(m_skippedPropertyName, "true");
}
log("There are TestNG SKIPPED tests", Project.MSG_DEBUG);
}
}
if (ExitCode.hasFailureWithinSuccessPercentage(exitValue)) {
if (m_haltOnFSP) {
executeHaltTarget(exitValue);
throw new BuildException("There are TestNG FAILED WITHIN SUCCESS PERCENTAGE tests");
} else {
if (null != m_fspPropertyName) {
getProject().setNewProperty(m_fspPropertyName, "true");
}
log("There are TestNG FAILED WITHIN SUCCESS PERCENTAGE tests", Project.MSG_DEBUG);
}
}
}
/** Executes the target, if any, that user designates executing before failing the test */
private void executeHaltTarget(int exitValue) {
if (m_onHaltTarget != null) {
if (m_outputDir != null) {
getProject().setProperty("testng.outputdir", m_outputDir.getAbsolutePath());
}
getProject().setProperty("testng.returncode", String.valueOf(exitValue));
Target t = getProject().getTargets().get(m_onHaltTarget);
if (t != null) {
t.execute();
}
}
}
/**
* Executes the command line as a new process.
*
* @param cmd the command to execute
* @param watchdog - A {@link ExecuteWatchdog} object.
* @return the exit status of the subprocess or INVALID.
*/
protected int executeAsForked(CommandlineJava cmd, ExecuteWatchdog watchdog) {
Execute execute =
new Execute(
new TestNGLogSH(
this, Project.MSG_INFO, Project.MSG_WARN, (m_verbose == null || m_verbose < 5)),
watchdog);
execute.setCommandline(cmd.getCommandline());
execute.setAntRun(getProject());
if (m_workingDir != null) {
if (m_workingDir.exists() && m_workingDir.isDirectory()) {
execute.setWorkingDirectory(m_workingDir);
} else {
log("Ignoring invalid working directory : " + m_workingDir, Project.MSG_WARN);
}
}
String[] environment = m_environment.getVariables();
if (null != environment) {
for (String envEntry : environment) {
log("Setting environment variable: " + envEntry, Project.MSG_VERBOSE);
}
}
execute.setEnvironment(environment);
log(cmd.describeCommand(), Project.MSG_VERBOSE);
int retVal;
try {
retVal = execute.execute();
} catch (IOException e) {
throw new BuildException("Process fork failed.", e, getLocation());
}
return retVal;
}
/** Creates or returns the already created <CODE>CommandlineJava</CODE>. */
protected CommandlineJava getJavaCommand() {
if (null == m_javaCommand) {
m_javaCommand = new CommandlineJava();
}
return m_javaCommand;
}
/**
* @return <tt>null</tt> if there is no timeout value, otherwise the watchdog instance.
* @throws BuildException under unspecified circumstances
* @since Ant 1.2
*/
protected ExecuteWatchdog createWatchdog() /*throws BuildException*/ {
if (m_timeout == null) {
return null;
}
return new ExecuteWatchdog(m_timeout.longValue());
}
protected void validateOptions() throws BuildException {
int suiteCount = getSuiteFileNames().size();
if (suiteCount == 0
&& m_classFilesets.size() == 0
&& Utils.isStringEmpty(m_methods)
&& ((null == m_testjar) || !m_testjar.isFile())) {
throw new BuildException("No suites, classes, methods or jar file was specified.");
}
if ((null != m_includedGroups) && (m_classFilesets.size() == 0 && suiteCount == 0)) {
throw new BuildException("No class filesets or xml file sets specified while using groups");
}
if (m_onHaltTarget != null) {
if (!getProject().getTargets().containsKey(m_onHaltTarget)) {
throw new BuildException("Target " + m_onHaltTarget + " not found in this project");
}
}
}
private ResourceCollection createResourceCollection(Reference ref) {
Object o = ref.getReferencedObject();
if (!(o instanceof ResourceCollection)) {
throw new BuildException("Only File based ResourceCollections are supported.");
}
ResourceCollection rc = (ResourceCollection) o;
if (!rc.isFilesystemOnly()) {
throw new BuildException("Only ResourceCollections from local file system are supported.");
}
return rc;
}
private FileSet appendClassSelector(FileSet fs) {
FilenameSelector selector = new FilenameSelector();
selector.setName("**/*.class");
selector.setProject(getProject());
fs.appendSelector(selector);
return fs;
}
private File findJar() {
Class thisClass = getClass();
String resource = thisClass.getName().replace('.', '/') + ".class";
URL url = thisClass.getClassLoader().getResource(resource);
if (null != url) {
String u = url.toString();
if (u.startsWith("jar:file:")) {
int pling = u.indexOf("!");
String jarName = u.substring(4, pling);
return new File(fromURI(jarName));
} else if (u.startsWith("file:")) {
int tail = u.indexOf(resource);
String dirName = u.substring(0, tail);
return new File(fromURI(dirName));
}
}
return null;
}
private String fromURI(String uri) {
URL url = null;
try {
url = new URL(uri);
} catch (MalformedURLException murle) {
// Gobble exceptions and do nothing.
}
if ((null == url) || !("file".equals(url.getProtocol()))) {
throw new IllegalArgumentException("Can only handle valid file: URIs");
}
StringBuilder buf = new StringBuilder(url.getHost());
if (buf.length() > 0) {
buf.insert(0, File.separatorChar).insert(0, File.separatorChar);
}
String file = url.getFile();
int queryPos = file.indexOf('?');
buf.append((queryPos < 0) ? file : file.substring(0, queryPos));
uri = buf.toString().replace('/', File.separatorChar);
if ((File.pathSeparatorChar == ';')
&& uri.startsWith("\\")
&& (uri.length() > 2)
&& Character.isLetter(uri.charAt(1))
&& (uri.lastIndexOf(':') > -1)) {
uri = uri.substring(1);
}
StringBuilder sb = new StringBuilder();
CharacterIterator iter = new StringCharacterIterator(uri);
for (char c = iter.first(); c != CharacterIterator.DONE; c = iter.next()) {
if (c == '%') {
char c1 = iter.next();
if (c1 != CharacterIterator.DONE) {
int i1 = Character.digit(c1, 16);
char c2 = iter.next();
if (c2 != CharacterIterator.DONE) {
int i2 = Character.digit(c2, 16);
sb.append((char) ((i1 << 4) + i2));
}
}
} else {
sb.append(c);
}
}
return sb.toString();
}
/**
* Returns the list of files corresponding to the resource collection
*
* @param resources - A list of {@link ResourceCollection}
* @return the list of files corresponding to the resource collection
* @throws BuildException
*/
private List<String> getFiles(List<ResourceCollection> resources) throws BuildException {
List<String> files = Lists.newArrayList();
for (ResourceCollection rc : resources) {
for (Resource o : rc) {
if (o instanceof FileResource) {
FileResource fr = ((FileResource) o);
if (fr.isDirectory()) {
throw new BuildException("Directory based FileResources are not supported.");
}
if (!fr.isExists()) {
log("'" + fr.toLongString() + "' does not exist", Project.MSG_VERBOSE);
}
files.add(fr.getFile().getAbsolutePath());
} else {
log("Unsupported Resource type: " + o.toString(), Project.MSG_VERBOSE);
}
}
}
return files;
}
private void dumpCommand(String fileName) {
log("TESTNG PASSED @" + fileName + " WHICH CONTAINS:", Project.MSG_INFO);
readAndPrintFile(fileName);
}
private void readAndPrintFile(String fileName) {
try {
Files.readAllLines(Paths.get(fileName)).forEach(line -> log(" " + line, Project.MSG_INFO));
} catch (IOException ex) {
LOGGER.error(ex.getMessage(), ex);
}
}
public void addConfiguredReporter(ReporterConfig reporterConfig) {
reporterConfigs.add(reporterConfig);
}
public void setSkipFailedInvocationCounts(boolean skip) {
m_skipFailedInvocationCounts = skip;
}
public void setXmlPathInJar(String path) {
m_xmlPathInJar = path;
}
/**
* Add the referenced property set as system properties for the TestNG JVM.
*
* @param sysPropertySet A PropertySet of system properties.
*/
public void addConfiguredPropertySet(PropertySet sysPropertySet) {
Properties properties = sysPropertySet.getProperties();
log(
properties.keySet().size() + " properties found in nested propertyset",
Project.MSG_VERBOSE);
for (Object propKeyObj : properties.keySet()) {
String propKey = (String) propKeyObj;
Environment.Variable sysProp = new Environment.Variable();
sysProp.setKey(propKey);
if (properties.get(propKey) instanceof String) {
String propVal = (String) properties.get(propKey);
sysProp.setValue(propVal);
getJavaCommand().addSysproperty(sysProp);
log("Added system property " + propKey + " with value " + propVal, Project.MSG_VERBOSE);
} else {
log("Ignoring non-String property " + propKey, Project.MSG_WARN);
}
}
}
@Override
protected void handleOutput(String output) {
if (output.startsWith(VerboseReporter.LISTENER_PREFIX)) {
// send everything from VerboseReporter to verbose level unless log level is > 4
log(output, m_verbose < 5 ? Project.MSG_VERBOSE : Project.MSG_INFO);
} else {
super.handleOutput(output);
}
}
private static class TestNGLogOS extends LogOutputStream {
private Task task;
private boolean verbose;
public TestNGLogOS(Task task, int level, boolean verbose) {
super(task, level);
this.task = task;
this.verbose = verbose;
}
@Override
protected void processLine(String line, int level) {
if (line.startsWith(VerboseReporter.LISTENER_PREFIX)) {
task.log(line, verbose ? Project.MSG_VERBOSE : Project.MSG_INFO);
} else {
super.processLine(line, level);
}
}
}
protected static class TestNGLogSH extends PumpStreamHandler {
public TestNGLogSH(Task task, int outlevel, int errlevel, boolean verbose) {
super(new TestNGLogOS(task, outlevel, verbose), new LogOutputStream(task, errlevel));
}
}
}
| missedone/testng | src/main/java/org/testng/TestNGAntTask.java | Java | apache-2.0 | 32,705 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.Term;
import org.apache.lucene.mockfile.FilterFileChannel;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.ByteArrayDataOutput;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.Engine.Operation.Origin;
import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.seqno.LocalCheckpointTracker;
import org.elasticsearch.index.seqno.LocalCheckpointTrackerTests;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog.Location;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import org.hamcrest.Matchers;
import org.junit.After;
import org.junit.Before;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.charset.Charset;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.LongStream;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween;
import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE;
import static org.elasticsearch.index.translog.TranslogDeletionPolicyTests.createTranslogDeletionPolicy;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.hasToString;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
public class TranslogTests extends ESTestCase {
protected final ShardId shardId = new ShardId("index", "_na_", 1);
protected Translog translog;
private AtomicLong globalCheckpoint;
protected Path translogDir;
@Override
protected void afterIfSuccessful() throws Exception {
super.afterIfSuccessful();
if (translog.isOpen()) {
if (translog.currentFileGeneration() > 1) {
markCurrentGenAsCommitted(translog);
translog.trimUnreferencedReaders();
assertFileDeleted(translog, translog.currentFileGeneration() - 1);
}
translog.close();
}
assertFileIsPresent(translog, translog.currentFileGeneration());
IOUtils.rm(translog.location()); // delete all the locations
}
protected Translog createTranslog(TranslogConfig config, String translogUUID) throws IOException {
return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()),
() -> SequenceNumbersService.UNASSIGNED_SEQ_NO);
}
private void markCurrentGenAsCommitted(Translog translog) throws IOException {
commit(translog, translog.currentFileGeneration());
}
private void rollAndCommit(Translog translog) throws IOException {
translog.rollGeneration();
commit(translog, translog.currentFileGeneration());
}
private void commit(Translog translog, long genToCommit) throws IOException {
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
deletionPolicy.setMinTranslogGenerationForRecovery(genToCommit);
long minGenRequired = deletionPolicy.minTranslogGenRequired(translog.getReaders(), translog.getCurrent());
translog.trimUnreferencedReaders();
assertThat(minGenRequired, equalTo(translog.getMinFileGeneration()));
assertFilePresences(translog);
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
// if a previous test failed we clean up things here
translogDir = createTempDir();
translog = create(translogDir);
}
@Override
@After
public void tearDown() throws Exception {
try {
assertEquals("there are still open views", 0, translog.getDeletionPolicy().pendingViewsCount());
translog.close();
} finally {
super.tearDown();
}
}
private Translog create(Path path) throws IOException {
globalCheckpoint = new AtomicLong(SequenceNumbersService.UNASSIGNED_SEQ_NO);
final TranslogConfig translogConfig = getTranslogConfig(path);
final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings());
return new Translog(translogConfig, null, deletionPolicy, () -> globalCheckpoint.get());
}
private TranslogConfig getTranslogConfig(final Path path) {
final Settings settings = Settings
.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT)
// only randomize between nog age retention and a long one, so failures will have a chance of reproducing
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomBoolean() ? "-1ms" : "1h")
.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), randomIntBetween(-1, 2048) + "b")
.build();
return getTranslogConfig(path, settings);
}
private TranslogConfig getTranslogConfig(final Path path, final Settings settings) {
final ByteSizeValue bufferSize;
if (randomBoolean()) {
bufferSize = TranslogConfig.DEFAULT_BUFFER_SIZE;
} else {
bufferSize = new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES);
}
final IndexSettings indexSettings =
IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings);
return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize);
}
private void addToTranslogAndList(Translog translog, ArrayList<Translog.Operation> list, Translog.Operation op) throws IOException {
list.add(op);
translog.add(op);
}
public void testIdParsingFromFile() {
long id = randomIntBetween(0, Integer.MAX_VALUE);
Path file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + id + ".tlog");
assertThat(Translog.parseIdFromFileName(file), equalTo(id));
id = randomIntBetween(0, Integer.MAX_VALUE);
file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + id);
try {
Translog.parseIdFromFileName(file);
fail("invalid pattern");
} catch (IllegalArgumentException ex) {
// all good
}
file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + id + ".recovering");
try {
Translog.parseIdFromFileName(file);
fail("invalid pattern");
} catch (IllegalArgumentException ex) {
// all good
}
file = translogDir.resolve(Translog.TRANSLOG_FILE_PREFIX + randomNonTranslogPatternString(1, 10) + id);
try {
Translog.parseIdFromFileName(file);
fail("invalid pattern");
} catch (IllegalArgumentException ex) {
// all good
}
file = translogDir.resolve(randomNonTranslogPatternString(1, Translog.TRANSLOG_FILE_PREFIX.length() - 1));
try {
Translog.parseIdFromFileName(file);
fail("invalid pattern");
} catch (IllegalArgumentException ex) {
// all good
}
}
private String randomNonTranslogPatternString(int min, int max) {
String string;
boolean validPathString;
do {
validPathString = false;
string = randomRealisticUnicodeOfCodepointLength(randomIntBetween(min, max));
try {
final Path resolved = translogDir.resolve(string);
// some strings (like '/' , '..') do not refer to a file, which we this method should return
validPathString = resolved.getFileName() != null;
} catch (InvalidPathException ex) {
// some FS don't like our random file names -- let's just skip these random choices
}
} while (Translog.PARSE_STRICT_ID_PATTERN.matcher(string).matches() || validPathString == false);
return string;
}
public void testSimpleOperations() throws IOException {
ArrayList<Translog.Operation> ops = new ArrayList<>();
Translog.Snapshot snapshot = translog.newSnapshot();
assertThat(snapshot, SnapshotMatchers.size(0));
addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}));
snapshot = translog.newSnapshot();
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
assertThat(snapshot.totalOperations(), equalTo(ops.size()));
addToTranslogAndList(translog, ops, new Translog.Delete("test", "2", 1, newUid("2")));
snapshot = translog.newSnapshot();
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
assertThat(snapshot.totalOperations(), equalTo(ops.size()));
final long seqNo = randomNonNegativeLong();
final long primaryTerm = randomNonNegativeLong();
final String reason = randomAlphaOfLength(16);
addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, primaryTerm, reason));
snapshot = translog.newSnapshot();
Translog.Index index = (Translog.Index) snapshot.next();
assertNotNull(index);
assertThat(BytesReference.toBytes(index.source()), equalTo(new byte[]{1}));
Translog.Delete delete = (Translog.Delete) snapshot.next();
assertNotNull(delete);
assertThat(delete.uid(), equalTo(newUid("2")));
Translog.NoOp noOp = (Translog.NoOp) snapshot.next();
assertNotNull(noOp);
assertThat(noOp.seqNo(), equalTo(seqNo));
assertThat(noOp.primaryTerm(), equalTo(primaryTerm));
assertThat(noOp.reason(), equalTo(reason));
assertNull(snapshot.next());
long firstId = translog.currentFileGeneration();
translog.rollGeneration();
assertThat(translog.currentFileGeneration(), Matchers.not(equalTo(firstId)));
snapshot = translog.newSnapshot();
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
assertThat(snapshot.totalOperations(), equalTo(ops.size()));
markCurrentGenAsCommitted(translog);
snapshot = translog.newSnapshot(firstId + 1);
assertThat(snapshot, SnapshotMatchers.size(0));
assertThat(snapshot.totalOperations(), equalTo(0));
}
protected TranslogStats stats() throws IOException {
// force flushing and updating of stats
translog.sync();
TranslogStats stats = translog.stats();
if (randomBoolean()) {
BytesStreamOutput out = new BytesStreamOutput();
stats.writeTo(out);
StreamInput in = out.bytes().streamInput();
stats = new TranslogStats();
stats.readFrom(in);
}
return stats;
}
public void testStats() throws IOException {
// self control cleaning for test
translog.getDeletionPolicy().setRetentionSizeInBytes(1024 * 1024);
translog.getDeletionPolicy().setRetentionAgeInMillis(3600 * 1000);
final long firstOperationPosition = translog.getFirstOperationPosition();
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(0));
}
assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC)));
translog.add(new Translog.Index("test", "1", 0, new byte[]{1}));
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(1));
assertThat(stats.getTranslogSizeInBytes(), equalTo(97L));
assertThat(stats.getUncommittedOperations(), equalTo(1));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(97L));
}
translog.add(new Translog.Delete("test", "2", 1, newUid("2")));
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(2));
assertThat(stats.getTranslogSizeInBytes(), equalTo(146L));
assertThat(stats.getUncommittedOperations(), equalTo(2));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(146L));
}
translog.add(new Translog.Delete("test", "3", 2, newUid("3")));
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(3));
assertThat(stats.getTranslogSizeInBytes(), equalTo(195L));
assertThat(stats.getUncommittedOperations(), equalTo(3));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(195L));
}
translog.add(new Translog.NoOp(3, 1, randomAlphaOfLength(16)));
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(4));
assertThat(stats.getTranslogSizeInBytes(), equalTo(237L));
assertThat(stats.getUncommittedOperations(), equalTo(4));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(237L));
}
final long expectedSizeInBytes = 280L;
translog.rollGeneration();
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(4));
assertThat(stats.getTranslogSizeInBytes(), equalTo(expectedSizeInBytes));
assertThat(stats.getUncommittedOperations(), equalTo(4));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(expectedSizeInBytes));
}
{
final TranslogStats stats = stats();
final BytesStreamOutput out = new BytesStreamOutput();
stats.writeTo(out);
final TranslogStats copy = new TranslogStats();
copy.readFrom(out.bytes().streamInput());
assertThat(copy.estimatedNumberOfOperations(), equalTo(4));
assertThat(copy.getTranslogSizeInBytes(), equalTo(expectedSizeInBytes));
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
builder.startObject();
copy.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
assertThat(builder.string(), equalTo("{\"translog\":{\"operations\":4,\"size_in_bytes\":" + expectedSizeInBytes
+ ",\"uncommitted_operations\":4,\"uncommitted_size_in_bytes\":" + expectedSizeInBytes + "}}"));
}
}
markCurrentGenAsCommitted(translog);
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(4));
assertThat(stats.getTranslogSizeInBytes(), equalTo(expectedSizeInBytes));
assertThat(stats.getUncommittedOperations(), equalTo(0));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(firstOperationPosition));
}
}
public void testTotalTests() {
final TranslogStats total = new TranslogStats();
final int n = randomIntBetween(0, 16);
final List<TranslogStats> statsList = new ArrayList<>(n);
for (int i = 0; i < n; i++) {
final TranslogStats stats = new TranslogStats(randomIntBetween(1, 4096), randomIntBetween(1, 1 << 20),
randomIntBetween(1, 1 << 20), randomIntBetween(1, 4096));
statsList.add(stats);
total.add(stats);
}
assertThat(
total.estimatedNumberOfOperations(),
equalTo(statsList.stream().mapToInt(TranslogStats::estimatedNumberOfOperations).sum()));
assertThat(
total.getTranslogSizeInBytes(),
equalTo(statsList.stream().mapToLong(TranslogStats::getTranslogSizeInBytes).sum()));
assertThat(
total.getUncommittedOperations(),
equalTo(statsList.stream().mapToInt(TranslogStats::getUncommittedOperations).sum()));
assertThat(
total.getUncommittedSizeInBytes(),
equalTo(statsList.stream().mapToLong(TranslogStats::getUncommittedSizeInBytes).sum()));
}
public void testNegativeNumberOfOperations() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(-1, 1, 1, 1));
assertThat(e, hasToString(containsString("numberOfOperations must be >= 0")));
e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, -1, 1));
assertThat(e, hasToString(containsString("uncommittedOperations must be >= 0")));
}
public void testNegativeSizeInBytes() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1));
assertThat(e, hasToString(containsString("translogSizeInBytes must be >= 0")));
e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, -1));
assertThat(e, hasToString(containsString("uncommittedSizeInBytes must be >= 0")));
}
public void testSnapshot() throws IOException {
ArrayList<Translog.Operation> ops = new ArrayList<>();
Translog.Snapshot snapshot = translog.newSnapshot();
assertThat(snapshot, SnapshotMatchers.size(0));
addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}));
snapshot = translog.newSnapshot();
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
assertThat(snapshot.totalOperations(), equalTo(1));
snapshot = translog.newSnapshot();
Translog.Snapshot snapshot1 = translog.newSnapshot();
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
assertThat(snapshot.totalOperations(), equalTo(1));
assertThat(snapshot1, SnapshotMatchers.size(1));
assertThat(snapshot1.totalOperations(), equalTo(1));
}
public void testSnapshotWithNewTranslog() throws IOException {
ArrayList<Translog.Operation> ops = new ArrayList<>();
Translog.Snapshot snapshot = translog.newSnapshot();
assertThat(snapshot, SnapshotMatchers.size(0));
addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}));
Translog.Snapshot snapshot1 = translog.newSnapshot();
addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{2}));
assertThat(snapshot1, SnapshotMatchers.equalsTo(ops.get(0)));
translog.rollGeneration();
addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{3}));
try (Translog.View view = translog.newView()) {
Translog.Snapshot snapshot2 = translog.newSnapshot();
markCurrentGenAsCommitted(translog);
assertThat(snapshot2, SnapshotMatchers.equalsTo(ops));
assertThat(snapshot2.totalOperations(), equalTo(ops.size()));
}
}
public void testSnapshotOnClosedTranslog() throws IOException {
assertTrue(Files.exists(translogDir.resolve(Translog.getFilename(1))));
translog.add(new Translog.Index("test", "1", 0, new byte[]{1}));
translog.close();
try {
Translog.Snapshot snapshot = translog.newSnapshot();
fail("translog is closed");
} catch (AlreadyClosedException ex) {
assertEquals(ex.getMessage(), "translog is already closed");
}
}
public void assertFileIsPresent(Translog translog, long id) {
if (Files.exists(translog.location().resolve(Translog.getFilename(id)))) {
return;
}
fail(Translog.getFilename(id) + " is not present in any location: " + translog.location());
}
public void assertFileDeleted(Translog translog, long id) {
assertFalse("translog [" + id + "] still exists", Files.exists(translog.location().resolve(Translog.getFilename(id))));
}
private void assertFilePresences(Translog translog) {
for (long gen = translog.getMinFileGeneration(); gen < translog.currentFileGeneration(); gen++) {
assertFileIsPresent(translog, gen);
}
for (long gen = 1; gen < translog.getMinFileGeneration(); gen++) {
assertFileDeleted(translog, gen);
}
}
static class LocationOperation implements Comparable<LocationOperation> {
final Translog.Operation operation;
final Translog.Location location;
LocationOperation(Translog.Operation operation, Translog.Location location) {
this.operation = operation;
this.location = location;
}
@Override
public int compareTo(LocationOperation o) {
return location.compareTo(o.location);
}
}
public void testConcurrentWritesWithVaryingSize() throws Throwable {
final int opsPerThread = randomIntBetween(10, 200);
int threadCount = 2 + randomInt(5);
logger.info("testing with [{}] threads, each doing [{}] ops", threadCount, opsPerThread);
final BlockingQueue<LocationOperation> writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread);
Thread[] threads = new Thread[threadCount];
final Exception[] threadExceptions = new Exception[threadCount];
final AtomicLong seqNoGenerator = new AtomicLong();
final CountDownLatch downLatch = new CountDownLatch(1);
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions);
threads[i].setDaemon(true);
threads[i].start();
}
downLatch.countDown();
for (int i = 0; i < threadCount; i++) {
if (threadExceptions[i] != null) {
throw threadExceptions[i];
}
threads[i].join(60 * 1000);
}
List<LocationOperation> collect = new ArrayList<>(writtenOperations);
Collections.sort(collect);
Translog.Snapshot snapshot = translog.newSnapshot();
for (LocationOperation locationOperation : collect) {
Translog.Operation op = snapshot.next();
assertNotNull(op);
Translog.Operation expectedOp = locationOperation.operation;
assertEquals(expectedOp.opType(), op.opType());
switch (op.opType()) {
case INDEX:
Translog.Index indexOp = (Translog.Index) op;
Translog.Index expIndexOp = (Translog.Index) expectedOp;
assertEquals(expIndexOp.id(), indexOp.id());
assertEquals(expIndexOp.routing(), indexOp.routing());
assertEquals(expIndexOp.type(), indexOp.type());
assertEquals(expIndexOp.source(), indexOp.source());
assertEquals(expIndexOp.version(), indexOp.version());
assertEquals(expIndexOp.versionType(), indexOp.versionType());
break;
case DELETE:
Translog.Delete delOp = (Translog.Delete) op;
Translog.Delete expDelOp = (Translog.Delete) expectedOp;
assertEquals(expDelOp.uid(), delOp.uid());
assertEquals(expDelOp.version(), delOp.version());
assertEquals(expDelOp.versionType(), delOp.versionType());
break;
case NO_OP:
final Translog.NoOp noOp = (Translog.NoOp) op;
final Translog.NoOp expectedNoOp = (Translog.NoOp) expectedOp;
assertThat(noOp.seqNo(), equalTo(expectedNoOp.seqNo()));
assertThat(noOp.primaryTerm(), equalTo(expectedNoOp.primaryTerm()));
assertThat(noOp.reason(), equalTo(expectedNoOp.reason()));
break;
default:
throw new AssertionError("unsupported operation type [" + op.opType() + "]");
}
}
assertNull(snapshot.next());
}
public void testTranslogChecksums() throws Exception {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
String ascii = randomAlphaOfLengthBetween(1, 50);
locations.add(translog.add(new Translog.Index("test", "" + op, op, ascii.getBytes("UTF-8"))));
}
translog.sync();
corruptTranslogs(translogDir);
AtomicInteger corruptionsCaught = new AtomicInteger(0);
Translog.Snapshot snapshot = translog.newSnapshot();
for (Translog.Location location : locations) {
try {
Translog.Operation next = snapshot.next();
assertNotNull(next);
} catch (TranslogCorruptedException e) {
corruptionsCaught.incrementAndGet();
}
}
expectThrows(TranslogCorruptedException.class, snapshot::next);
assertThat("at least one corruption was caused and caught", corruptionsCaught.get(), greaterThanOrEqualTo(1));
}
public void testTruncatedTranslogs() throws Exception {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
String ascii = randomAlphaOfLengthBetween(1, 50);
locations.add(translog.add(new Translog.Index("test", "" + op, op, ascii.getBytes("UTF-8"))));
}
translog.sync();
truncateTranslogs(translogDir);
AtomicInteger truncations = new AtomicInteger(0);
Translog.Snapshot snap = translog.newSnapshot();
for (Translog.Location location : locations) {
try {
assertNotNull(snap.next());
} catch (EOFException e) {
truncations.incrementAndGet();
}
}
assertThat("at least one truncation was caused and caught", truncations.get(), greaterThanOrEqualTo(1));
}
/**
* Randomly truncate some bytes in the translog files
*/
private void truncateTranslogs(Path directory) throws Exception {
Path[] files = FileSystemUtils.files(directory, "translog-*");
for (Path file : files) {
try (FileChannel f = FileChannel.open(file, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
long prevSize = f.size();
long newSize = prevSize - randomIntBetween(1, (int) prevSize / 2);
logger.info("--> truncating {}, prev: {}, now: {}", file, prevSize, newSize);
f.truncate(newSize);
}
}
}
/**
* Randomly overwrite some bytes in the translog files
*/
private void corruptTranslogs(Path directory) throws Exception {
Path[] files = FileSystemUtils.files(directory, "translog-*");
for (Path file : files) {
logger.info("--> corrupting {}...", file);
FileChannel f = FileChannel.open(file, StandardOpenOption.READ, StandardOpenOption.WRITE);
int corruptions = scaledRandomIntBetween(10, 50);
for (int i = 0; i < corruptions; i++) {
// note: with the current logic, this will sometimes be a no-op
long pos = randomIntBetween(0, (int) f.size());
ByteBuffer junk = ByteBuffer.wrap(new byte[]{randomByte()});
f.write(junk, pos);
}
f.close();
}
}
private Term newUid(ParsedDocument doc) {
return new Term("_uid", Uid.createUidAsBytes(doc.type(), doc.id()));
}
private Term newUid(String uid) {
return new Term("_uid", uid);
}
public void testVerifyTranslogIsNotDeleted() throws IOException {
assertFileIsPresent(translog, 1);
translog.add(new Translog.Index("test", "1", 0, new byte[]{1}));
Translog.Snapshot snapshot = translog.newSnapshot();
assertThat(snapshot, SnapshotMatchers.size(1));
assertFileIsPresent(translog, 1);
assertThat(snapshot.totalOperations(), equalTo(1));
translog.close();
assertFileIsPresent(translog, 1);
}
/**
* Tests that concurrent readers and writes maintain view and snapshot semantics
*/
public void testConcurrentWriteViewsAndSnapshot() throws Throwable {
final Thread[] writers = new Thread[randomIntBetween(1, 3)];
final Thread[] readers = new Thread[randomIntBetween(1, 3)];
final int flushEveryOps = randomIntBetween(5, 100);
final int maxOps = randomIntBetween(200, 1000);
final Object signalReaderSomeDataWasIndexed = new Object();
final AtomicLong idGenerator = new AtomicLong();
final CyclicBarrier barrier = new CyclicBarrier(writers.length + readers.length + 1);
// a map of all written ops and their returned location.
final Map<Translog.Operation, Translog.Location> writtenOps = ConcurrentCollections.newConcurrentMap();
// a signal for all threads to stop
final AtomicBoolean run = new AtomicBoolean(true);
final Object flushMutex = new Object();
final AtomicLong lastCommittedLocalCheckpoint = new AtomicLong(SequenceNumbersService.NO_OPS_PERFORMED);
final LocalCheckpointTracker tracker = LocalCheckpointTrackerTests.createEmptyTracker();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
// any errors on threads
final List<Exception> errors = new CopyOnWriteArrayList<>();
logger.debug("using [{}] readers. [{}] writers. flushing every ~[{}] ops.", readers.length, writers.length, flushEveryOps);
for (int i = 0; i < writers.length; i++) {
final String threadName = "writer_" + i;
final int threadId = i;
writers[i] = new Thread(new AbstractRunnable() {
@Override
public void doRun() throws BrokenBarrierException, InterruptedException, IOException {
barrier.await();
int counter = 0;
while (run.get() && idGenerator.get() < maxOps) {
long id = idGenerator.getAndIncrement();
final Translog.Operation op;
final Translog.Operation.Type type =
Translog.Operation.Type.values()[((int) (id % Translog.Operation.Type.values().length))];
switch (type) {
case CREATE:
case INDEX:
op = new Translog.Index("type", "" + id, id, new byte[]{(byte) id});
break;
case DELETE:
op = new Translog.Delete("test", Long.toString(id), id, newUid(Long.toString(id)));
break;
case NO_OP:
op = new Translog.NoOp(id, 1, Long.toString(id));
break;
default:
throw new AssertionError("unsupported operation type [" + type + "]");
}
Translog.Location location = translog.add(op);
tracker.markSeqNoAsCompleted(id);
Translog.Location existing = writtenOps.put(op, location);
if (existing != null) {
fail("duplicate op [" + op + "], old entry at " + location);
}
if (id % writers.length == threadId) {
translog.ensureSynced(location);
}
if (id % flushEveryOps == 0) {
synchronized (flushMutex) {
// we need not do this concurrently as we need to make sure that the generation
// we're committing - is still present when we're committing
long localCheckpoint = tracker.getCheckpoint() + 1;
translog.rollGeneration();
deletionPolicy.setMinTranslogGenerationForRecovery(
translog.getMinGenerationForSeqNo(localCheckpoint + 1).translogFileGeneration);
translog.trimUnreferencedReaders();
lastCommittedLocalCheckpoint.set(localCheckpoint);
}
}
if (id % 7 == 0) {
synchronized (signalReaderSomeDataWasIndexed) {
signalReaderSomeDataWasIndexed.notifyAll();
}
}
counter++;
}
logger.debug("--> [{}] done. wrote [{}] ops.", threadName, counter);
}
@Override
public void onFailure(Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e);
errors.add(e);
}
}, threadName);
writers[i].start();
}
for (int i = 0; i < readers.length; i++) {
final String threadId = "reader_" + i;
readers[i] = new Thread(new AbstractRunnable() {
Translog.View view = null;
long committedLocalCheckpointAtView;
@Override
public void onFailure(Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e);
errors.add(e);
try {
closeView();
} catch (IOException inner) {
inner.addSuppressed(e);
logger.error("unexpected error while closing view, after failure", inner);
}
}
void closeView() throws IOException {
if (view != null) {
view.close();
}
}
void newView() throws IOException {
closeView();
view = translog.newView();
// captures the last committed checkpoint, while holding the view, simulating
// recovery logic which captures a view and gets a lucene commit
committedLocalCheckpointAtView = lastCommittedLocalCheckpoint.get();
logger.debug("--> [{}] opened view from [{}]", threadId, view.viewGenToRelease);
}
@Override
protected void doRun() throws Exception {
barrier.await();
int iter = 0;
while (idGenerator.get() < maxOps) {
if (iter++ % 10 == 0) {
newView();
}
// captures al views that are written since the view was created (with a small caveat see bellow)
// these are what we expect the snapshot to return (and potentially some more).
Set<Translog.Operation> expectedOps = new HashSet<>(writtenOps.keySet());
expectedOps.removeIf(op -> op.seqNo() <= committedLocalCheckpointAtView);
Translog.Snapshot snapshot = view.snapshot(committedLocalCheckpointAtView + 1L);
Translog.Operation op;
while ((op = snapshot.next()) != null) {
expectedOps.remove(op);
}
if (expectedOps.isEmpty() == false) {
StringBuilder missed = new StringBuilder("missed ").append(expectedOps.size())
.append(" operations from [").append(committedLocalCheckpointAtView + 1L).append("]");
boolean failed = false;
for (Translog.Operation expectedOp : expectedOps) {
final Translog.Location loc = writtenOps.get(expectedOp);
failed = true;
missed.append("\n --> [").append(expectedOp).append("] written at ").append(loc);
}
if (failed) {
fail(missed.toString());
}
}
// slow down things a bit and spread out testing..
synchronized (signalReaderSomeDataWasIndexed) {
if (idGenerator.get() < maxOps) {
signalReaderSomeDataWasIndexed.wait();
}
}
}
closeView();
logger.debug("--> [{}] done. tested [{}] snapshots", threadId, iter);
}
}, threadId);
readers[i].start();
}
barrier.await();
logger.debug("--> waiting for threads to stop");
for (Thread thread : writers) {
thread.join();
}
logger.debug("--> waiting for readers to stop");
// force stopping, if all writers crashed
synchronized (signalReaderSomeDataWasIndexed) {
idGenerator.set(Long.MAX_VALUE);
signalReaderSomeDataWasIndexed.notifyAll();
}
for (Thread thread : readers) {
thread.join();
}
if (errors.size() > 0) {
Throwable e = errors.get(0);
for (Throwable suppress : errors.subList(1, errors.size())) {
e.addSuppressed(suppress);
}
throw e;
}
logger.info("--> test done. total ops written [{}]", writtenOps.size());
}
public void testSyncUpTo() throws IOException {
int translogOperations = randomIntBetween(10, 100);
int count = 0;
for (int op = 0; op < translogOperations; op++) {
int seqNo = ++count;
final Translog.Location location =
translog.add(new Translog.Index("test", "" + op, seqNo, Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
if (randomBoolean()) {
assertTrue("at least one operation pending", translog.syncNeeded());
assertTrue("this operation has not been synced", translog.ensureSynced(location));
assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
seqNo = ++count;
translog.add(new Translog.Index("test", "" + op, seqNo, Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
assertTrue("one pending operation", translog.syncNeeded());
assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now
assertTrue("we only synced a previous operation yet", translog.syncNeeded());
}
if (rarely()) {
rollAndCommit(translog);
assertFalse("location is from a previous translog - already synced", translog.ensureSynced(location)); // not syncing now
assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
}
if (randomBoolean()) {
translog.sync();
assertFalse("translog has been synced already", translog.ensureSynced(location));
}
}
}
public void testSyncUpToStream() throws IOException {
int iters = randomIntBetween(5, 10);
for (int i = 0; i < iters; i++) {
int translogOperations = randomIntBetween(10, 100);
int count = 0;
ArrayList<Location> locations = new ArrayList<>();
for (int op = 0; op < translogOperations; op++) {
if (rarely()) {
rollAndCommit(translog); // do this first so that there is at least one pending tlog entry
}
final Translog.Location location =
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
locations.add(location);
}
Collections.shuffle(locations, random());
if (randomBoolean()) {
assertTrue("at least one operation pending", translog.syncNeeded());
assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream()));
assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
} else if (rarely()) {
rollAndCommit(translog);
assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); // not syncing now
assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
} else {
translog.sync();
assertFalse("translog has been synced already", translog.ensureSynced(locations.stream()));
}
for (Location location : locations) {
assertFalse("all of the locations should be synced: " + location, translog.ensureSynced(location));
}
}
}
public void testLocationComparison() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
int count = 0;
for (int op = 0; op < translogOperations; op++) {
locations.add(
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
if (rarely() && translogOperations > op + 1) {
rollAndCommit(translog);
}
}
Collections.shuffle(locations, random());
Translog.Location max = locations.get(0);
for (Translog.Location location : locations) {
max = max(max, location);
}
assertEquals(max.generation, translog.currentFileGeneration());
Translog.Snapshot snap = translog.newSnapshot();
Translog.Operation next;
Translog.Operation maxOp = null;
while ((next = snap.next()) != null) {
maxOp = next;
}
assertNotNull(maxOp);
assertEquals(maxOp.getSource().source.utf8ToString(), Integer.toString(count));
}
public static Translog.Location max(Translog.Location a, Translog.Location b) {
if (a.compareTo(b) > 0) {
return a;
}
return b;
}
public void testBasicCheckpoint() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
int lastSynced = -1;
long lastSyncedGlobalCheckpoint = globalCheckpoint.get();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (randomBoolean()) {
globalCheckpoint.set(globalCheckpoint.get() + randomIntBetween(1, 16));
}
if (frequently()) {
translog.sync();
lastSynced = op;
lastSyncedGlobalCheckpoint = globalCheckpoint.get();
}
}
assertEquals(translogOperations, translog.totalOperations());
translog.add(new Translog.Index(
"test", "" + translogOperations, translogOperations, Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8"))));
final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME));
try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) {
assertEquals(lastSynced + 1, reader.totalOperations());
Translog.Snapshot snapshot = reader.newSnapshot();
for (int op = 0; op < translogOperations; op++) {
if (op <= lastSynced) {
final Translog.Operation read = snapshot.next();
assertEquals(Integer.toString(op), read.getSource().source.utf8ToString());
} else {
Translog.Operation next = snapshot.next();
assertNull(next);
}
}
Translog.Operation next = snapshot.next();
assertNull(next);
}
assertEquals(translogOperations + 1, translog.totalOperations());
assertThat(checkpoint.globalCheckpoint, equalTo(lastSyncedGlobalCheckpoint));
translog.close();
}
public void testTranslogWriter() throws IOException {
final TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1);
final int numOps = randomIntBetween(8, 128);
byte[] bytes = new byte[4];
ByteArrayDataOutput out = new ByteArrayDataOutput(bytes);
final Set<Long> seenSeqNos = new HashSet<>();
boolean opsHaveValidSequenceNumbers = randomBoolean();
for (int i = 0; i < numOps; i++) {
out.reset(bytes);
out.writeInt(i);
long seqNo;
do {
seqNo = opsHaveValidSequenceNumbers ? randomNonNegativeLong() : SequenceNumbersService.UNASSIGNED_SEQ_NO;
opsHaveValidSequenceNumbers = opsHaveValidSequenceNumbers || !rarely();
} while (seenSeqNos.contains(seqNo));
if (seqNo != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
seenSeqNos.add(seqNo);
}
writer.add(new BytesArray(bytes), seqNo);
}
writer.sync();
final BaseTranslogReader reader = randomBoolean() ? writer : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)));
for (int i = 0; i < numOps; i++) {
ByteBuffer buffer = ByteBuffer.allocate(4);
reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i);
buffer.flip();
final int value = buffer.getInt();
assertEquals(i, value);
}
final long minSeqNo = seenSeqNos.stream().min(Long::compareTo).orElse(SequenceNumbersService.NO_OPS_PERFORMED);
final long maxSeqNo = seenSeqNos.stream().max(Long::compareTo).orElse(SequenceNumbersService.NO_OPS_PERFORMED);
assertThat(reader.getCheckpoint().minSeqNo, equalTo(minSeqNo));
assertThat(reader.getCheckpoint().maxSeqNo, equalTo(maxSeqNo));
out.reset(bytes);
out.writeInt(2048);
writer.add(new BytesArray(bytes), randomNonNegativeLong());
if (reader instanceof TranslogReader) {
ByteBuffer buffer = ByteBuffer.allocate(4);
try {
reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * numOps);
fail("read past EOF?");
} catch (EOFException ex) {
// expected
}
((TranslogReader) reader).close();
} else {
// live reader!
ByteBuffer buffer = ByteBuffer.allocate(4);
final long pos = reader.getFirstOperationOffset() + 4 * numOps;
reader.readBytes(buffer, pos);
buffer.flip();
final int value = buffer.getInt();
assertEquals(2048, value);
}
IOUtils.close(writer);
}
public void testCloseIntoReader() throws IOException {
try (TranslogWriter writer = translog.createWriter(translog.currentFileGeneration() + 1)) {
final int numOps = randomIntBetween(8, 128);
final byte[] bytes = new byte[4];
final ByteArrayDataOutput out = new ByteArrayDataOutput(bytes);
for (int i = 0; i < numOps; i++) {
out.reset(bytes);
out.writeInt(i);
writer.add(new BytesArray(bytes), randomNonNegativeLong());
}
writer.sync();
final Checkpoint writerCheckpoint = writer.getCheckpoint();
TranslogReader reader = writer.closeIntoReader();
try {
if (randomBoolean()) {
reader.close();
reader = translog.openReader(reader.path(), writerCheckpoint);
}
for (int i = 0; i < numOps; i++) {
final ByteBuffer buffer = ByteBuffer.allocate(4);
reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i);
buffer.flip();
final int value = buffer.getInt();
assertEquals(i, value);
}
final Checkpoint readerCheckpoint = reader.getCheckpoint();
assertThat(readerCheckpoint, equalTo(writerCheckpoint));
} finally {
IOUtils.close(reader);
}
}
}
public void testBasicRecovery() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
Translog.TranslogGeneration translogGeneration = null;
int minUncommittedOp = -1;
final boolean commitOften = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
final boolean commit = commitOften ? frequently() : rarely();
if (commit && op < translogOperations - 1) {
rollAndCommit(translog);
minUncommittedOp = op + 1;
translogGeneration = translog.getGeneration();
}
}
translog.sync();
TranslogConfig config = translog.getConfig();
translog.close();
if (translogGeneration == null) {
translog = createTranslog(config, null);
assertEquals(0, translog.stats().estimatedNumberOfOperations());
assertEquals(1, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
Translog.Snapshot snapshot = translog.newSnapshot();
assertNull(snapshot.next());
} else {
translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO);
assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
Translog.Snapshot snapshot = translog.newSnapshot(translogGeneration.translogFileGeneration);
for (int i = minUncommittedOp; i < translogOperations; i++) {
assertEquals("expected operation" + i + " to be in the previous translog but wasn't", translog.currentFileGeneration() - 1, locations.get(i).generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals(i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
public void testRecoveryUncommitted() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
final int prepareOp = randomIntBetween(0, translogOperations - 1);
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
assertEquals("expected this to be the first commit", 1L, translogGeneration.translogFileGeneration);
assertNotNull(translogGeneration.translogUUID);
}
}
if (sync) {
translog.sync();
}
// we intentionally don't close the tlog that is in the prepareCommit stage since we try to recovery the uncommitted
// translog here as well.
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
Translog.Snapshot snapshot = translog.newSnapshot();
int upTo = sync ? translogOperations : prepareOp;
for (int i = 0; i < upTo; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
if (randomBoolean()) { // recover twice
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
Translog.Snapshot snapshot = translog.newSnapshot();
int upTo = sync ? translogOperations : prepareOp;
for (int i = 0; i < upTo; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
}
public void testRecoveryUncommittedFileExists() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
final int prepareOp = randomIntBetween(0, translogOperations - 1);
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
assertEquals("expected this to be the first commit", 1L, translogGeneration.translogFileGeneration);
assertNotNull(translogGeneration.translogUUID);
}
}
if (sync) {
translog.sync();
}
// we intentionally don't close the tlog that is in the prepareCommit stage since we try to recovery the uncommitted
// translog here as well.
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
Files.copy(ckp, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)));
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
Translog.Snapshot snapshot = translog.newSnapshot();
int upTo = sync ? translogOperations : prepareOp;
for (int i = 0; i < upTo; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
if (randomBoolean()) { // recover twice
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
Translog.Snapshot snapshot = translog.newSnapshot();
int upTo = sync ? translogOperations : prepareOp;
for (int i = 0; i < upTo; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
}
public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = 100;
final int prepareOp = 44;
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
assertEquals("expected this to be the first commit", 1L, translogGeneration.translogFileGeneration);
assertNotNull(translogGeneration.translogUUID);
}
}
translog.sync();
// we intentionally don't close the tlog that is in the prepareCommit stage since we try to recovery the uncommitted
// translog here as well.
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0);
Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
fail("corrupted");
} catch (IllegalStateException ex) {
assertEquals("Checkpoint file translog-2.ckp already exists but has corrupted content expected: Checkpoint{offset=3123, " +
"numOps=55, generation=2, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-2, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " +
"generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-2, minTranslogGeneration=0}", ex.getMessage());
}
Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
Translog.Snapshot snapshot = translog.newSnapshot();
int upTo = sync ? translogOperations : prepareOp;
for (int i = 0; i < upTo; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
public void testSnapshotFromStreamInput() throws IOException {
BytesStreamOutput out = new BytesStreamOutput();
List<Translog.Operation> ops = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
Translog.Index test = new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")));
ops.add(test);
}
Translog.writeOperations(out, ops);
final List<Translog.Operation> readOperations = Translog.readOperations(out.bytes().streamInput());
assertEquals(ops.size(), readOperations.size());
assertEquals(ops, readOperations);
}
public void testLocationHashCodeEquals() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
List<Translog.Location> locations2 = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
try (Translog translog2 = create(createTempDir())) {
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations2.add(translog2.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
}
int iters = randomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
Translog.Location location = RandomPicks.randomFrom(random(), locations);
for (Translog.Location loc : locations) {
if (loc == location) {
assertTrue(loc.equals(location));
assertEquals(loc.hashCode(), location.hashCode());
} else {
assertFalse(loc.equals(location));
}
}
for (int j = 0; j < translogOperations; j++) {
assertTrue(locations.get(j).equals(locations2.get(j)));
assertEquals(locations.get(j).hashCode(), locations2.get(j).hashCode());
}
}
}
}
public void testOpenForeignTranslog() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int translogOperations = randomIntBetween(1, 10);
int firstUncommitted = 0;
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (randomBoolean()) {
rollAndCommit(translog);
firstUncommitted = op + 1;
}
}
final TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
translog.close();
final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1,
translogGeneration.translogUUID.length());
try {
new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO);
fail("translog doesn't belong to this UUID");
} catch (TranslogCorruptedException ex) {
}
this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO);
Translog.Snapshot snapshot = this.translog.newSnapshot(translogGeneration.translogFileGeneration);
for (int i = firstUncommitted; i < translogOperations; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("" + i, next);
assertEquals(Integer.parseInt(next.getSource().source.utf8ToString()), i);
}
assertNull(snapshot.next());
}
public void testFailOnClosedWrite() throws IOException {
translog.add(new Translog.Index("test", "1", 0, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.close();
try {
translog.add(new Translog.Index("test", "1", 0, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
fail("closed");
} catch (AlreadyClosedException ex) {
// all is well
}
}
public void testCloseConcurrently() throws Throwable {
final int opsPerThread = randomIntBetween(10, 200);
int threadCount = 2 + randomInt(5);
logger.info("testing with [{}] threads, each doing [{}] ops", threadCount, opsPerThread);
final BlockingQueue<LocationOperation> writtenOperations = new ArrayBlockingQueue<>(threadCount * opsPerThread);
Thread[] threads = new Thread[threadCount];
final Exception[] threadExceptions = new Exception[threadCount];
final CountDownLatch downLatch = new CountDownLatch(1);
final AtomicLong seqNoGenerator = new AtomicLong();
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions);
threads[i].setDaemon(true);
threads[i].start();
}
downLatch.countDown();
translog.close();
for (int i = 0; i < threadCount; i++) {
if (threadExceptions[i] != null) {
if ((threadExceptions[i] instanceof AlreadyClosedException) == false) {
throw threadExceptions[i];
}
}
threads[i].join(60 * 1000);
}
}
private static class TranslogThread extends Thread {
private final CountDownLatch downLatch;
private final int opsPerThread;
private final int threadId;
private final Collection<LocationOperation> writtenOperations;
private final Exception[] threadExceptions;
private final Translog translog;
private final AtomicLong seqNoGenerator;
TranslogThread(Translog translog, CountDownLatch downLatch, int opsPerThread, int threadId,
Collection<LocationOperation> writtenOperations, AtomicLong seqNoGenerator, Exception[] threadExceptions) {
this.translog = translog;
this.downLatch = downLatch;
this.opsPerThread = opsPerThread;
this.threadId = threadId;
this.writtenOperations = writtenOperations;
this.seqNoGenerator = seqNoGenerator;
this.threadExceptions = threadExceptions;
}
@Override
public void run() {
try {
downLatch.await();
for (int opCount = 0; opCount < opsPerThread; opCount++) {
Translog.Operation op;
final Translog.Operation.Type type = randomFrom(Translog.Operation.Type.values());
switch (type) {
case CREATE:
case INDEX:
op = new Translog.Index("test", threadId + "_" + opCount, seqNoGenerator.getAndIncrement(),
randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8"));
break;
case DELETE:
op = new Translog.Delete(
"test", threadId + "_" + opCount,
new Term("_uid", threadId + "_" + opCount),
seqNoGenerator.getAndIncrement(),
0,
1 + randomInt(100000),
randomFrom(VersionType.values()));
break;
case NO_OP:
op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), randomNonNegativeLong(), randomAlphaOfLength(16));
break;
default:
throw new AssertionError("unsupported operation type [" + type + "]");
}
Translog.Location loc = add(op);
writtenOperations.add(new LocationOperation(op, loc));
afterAdd();
}
} catch (Exception t) {
threadExceptions[threadId] = t;
}
}
protected Translog.Location add(Translog.Operation op) throws IOException {
return translog.add(op);
}
protected void afterAdd() throws IOException {
}
}
public void testFailFlush() throws IOException {
Path tempDir = createTempDir();
final FailSwitch fail = new FailSwitch();
TranslogConfig config = getTranslogConfig(tempDir);
Translog translog = getFailableTranslog(fail, config);
List<Translog.Location> locations = new ArrayList<>();
int opsSynced = 0;
boolean failed = false;
while (failed == false) {
try {
locations.add(translog.add(
new Translog.Index("test", "" + opsSynced, opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
translog.sync();
opsSynced++;
} catch (MockDirectoryWrapper.FakeIOException ex) {
failed = true;
assertFalse(translog.isOpen());
} catch (IOException ex) {
failed = true;
assertFalse(translog.isOpen());
assertEquals("__FAKE__ no space left on device", ex.getMessage());
}
if (randomBoolean()) {
fail.failAlways();
} else {
fail.failNever();
}
}
fail.failNever();
if (randomBoolean()) {
try {
locations.add(translog.add(
new Translog.Index("test", "" + opsSynced, opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
fail("we are already closed");
} catch (AlreadyClosedException ex) {
assertNotNull(ex.getCause());
if (ex.getCause() instanceof MockDirectoryWrapper.FakeIOException) {
assertNull(ex.getCause().getMessage());
} else {
assertEquals(ex.getCause().getMessage(), "__FAKE__ no space left on device");
}
}
}
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
try {
translog.newSnapshot();
fail("already closed");
} catch (AlreadyClosedException ex) {
// all is well
assertNotNull(ex.getCause());
assertSame(translog.getTragicException(), ex.getCause());
}
try {
rollAndCommit(translog);
fail("already closed");
} catch (AlreadyClosedException ex) {
assertNotNull(ex.getCause());
assertSame(translog.getTragicException(), ex.getCause());
}
assertFalse(translog.isOpen());
translog.close(); // we are closed
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration());
assertFalse(tlog.syncNeeded());
Translog.Snapshot snapshot = tlog.newSnapshot();
assertEquals(opsSynced, snapshot.totalOperations());
for (int i = 0; i < opsSynced; i++) {
assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, locations.get(i).generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals(i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
public void testTranslogOpsCountIsCorrect() throws IOException {
List<Translog.Location> locations = new ArrayList<>();
int numOps = randomIntBetween(100, 200);
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer borders regularly
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
locations.add(translog.add(
new Translog.Index("test", "" + opsAdded, opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))));
Translog.Snapshot snapshot = this.translog.newSnapshot();
assertEquals(opsAdded + 1, snapshot.totalOperations());
for (int i = 0; i < opsAdded; i++) {
assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(), locations.get(i).generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
}
}
}
public void testTragicEventCanBeAnyException() throws IOException {
Path tempDir = createTempDir();
final FailSwitch fail = new FailSwitch();
TranslogConfig config = getTranslogConfig(tempDir);
Translog translog = getFailableTranslog(fail, config, false, true, null, createTranslogDeletionPolicy());
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly
translog.add(new Translog.Index("test", "1", 0, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
fail.failAlways();
try {
Translog.Location location = translog.add(
new Translog.Index("test", "2", 1, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
if (randomBoolean()) {
translog.ensureSynced(location);
} else {
translog.sync();
}
//TODO once we have a mock FS that can simulate we can also fail on plain sync
fail("WTF");
} catch (UnknownException ex) {
// w00t
} catch (TranslogException ex) {
assertTrue(ex.getCause() instanceof UnknownException);
}
assertFalse(translog.isOpen());
assertTrue(translog.getTragicException() instanceof UnknownException);
}
public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, InterruptedException {
Path tempDir = createTempDir();
final FailSwitch fail = new FailSwitch();
TranslogConfig config = getTranslogConfig(tempDir);
Translog translog = getFailableTranslog(fail, config);
final String translogUUID = translog.getTranslogUUID();
final int threadCount = randomIntBetween(1, 5);
Thread[] threads = new Thread[threadCount];
final Exception[] threadExceptions = new Exception[threadCount];
final CountDownLatch downLatch = new CountDownLatch(1);
final CountDownLatch added = new CountDownLatch(randomIntBetween(10, 100));
final AtomicLong seqNoGenerator = new AtomicLong();
List<LocationOperation> writtenOperations = Collections.synchronizedList(new ArrayList<>());
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
threads[i] = new TranslogThread(translog, downLatch, 200, threadId, writtenOperations, seqNoGenerator, threadExceptions) {
@Override
protected Translog.Location add(Translog.Operation op) throws IOException {
Translog.Location add = super.add(op);
added.countDown();
return add;
}
@Override
protected void afterAdd() throws IOException {
if (randomBoolean()) {
translog.sync();
}
}
};
threads[i].setDaemon(true);
threads[i].start();
}
downLatch.countDown();
added.await();
try (Translog.View view = translog.newView()) {
// this holds a reference to the current tlog channel such that it's not closed
// if we hit a tragic event. this is important to ensure that asserts inside the Translog#add doesn't trip
// otherwise our assertions here are off by one sometimes.
fail.failAlways();
for (int i = 0; i < threadCount; i++) {
threads[i].join();
}
boolean atLeastOneFailed = false;
for (Throwable ex : threadExceptions) {
if (ex != null) {
assertTrue(ex.toString(), ex instanceof IOException || ex instanceof AlreadyClosedException);
atLeastOneFailed = true;
}
}
if (atLeastOneFailed == false) {
try {
boolean syncNeeded = translog.syncNeeded();
translog.close();
assertFalse("should have failed if sync was needed", syncNeeded);
} catch (IOException ex) {
// boom now we failed
}
}
Collections.sort(writtenOperations, (a, b) -> a.location.compareTo(b.location));
assertFalse(translog.isOpen());
final Checkpoint checkpoint = Checkpoint.read(config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME));
Iterator<LocationOperation> iterator = writtenOperations.iterator();
while (iterator.hasNext()) {
LocationOperation next = iterator.next();
if (checkpoint.offset < (next.location.translogLocation + next.location.size)) {
// drop all that haven't been synced
iterator.remove();
}
}
try (Translog tlog = new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
Translog.Snapshot snapshot = tlog.newSnapshot();
if (writtenOperations.size() != snapshot.totalOperations()) {
for (int i = 0; i < threadCount; i++) {
if (threadExceptions[i] != null) {
logger.info("Translog exception", threadExceptions[i]);
}
}
}
assertEquals(writtenOperations.size(), snapshot.totalOperations());
for (int i = 0; i < writtenOperations.size(); i++) {
assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals(next, writtenOperations.get(i).operation);
}
}
}
}
/**
* Tests the situation where the node crashes after a translog gen was committed to lucene, but before the translog had the chance
* to clean up its files.
*/
public void testRecoveryFromAFutureGenerationCleansUp() throws IOException {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations / 2; op++) {
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
}
translog.rollGeneration();
long comittedGeneration = randomLongBetween(2, translog.currentFileGeneration());
for (int op = translogOperations / 2; op < translogOperations; op++) {
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
}
// engine blows up, after committing the above generation
translog.close();
TranslogConfig config = translog.getConfig();
final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1);
deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration);
translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO);
assertThat(translog.getMinFileGeneration(), equalTo(1L));
// no trimming done yet, just recovered
for (long gen = 1; gen < translog.currentFileGeneration(); gen++) {
assertFileIsPresent(translog, gen);
}
translog.trimUnreferencedReaders();
for (long gen = 1; gen < comittedGeneration; gen++) {
assertFileDeleted(translog, gen);
}
}
/**
* Tests the situation where the node crashes after a translog gen was committed to lucene, but before the translog had the chance
* to clean up its files.
*/
public void testRecoveryFromFailureOnTrimming() throws IOException {
Path tempDir = createTempDir();
final FailSwitch fail = new FailSwitch();
fail.failNever();
final TranslogConfig config = getTranslogConfig(tempDir);
final long comittedGeneration;
final String translogUUID;
try (Translog translog = getFailableTranslog(fail, config)) {
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
// disable retention so we trim things
deletionPolicy.setRetentionSizeInBytes(-1);
deletionPolicy.setRetentionAgeInMillis(-1);
translogUUID = translog.getTranslogUUID();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations / 2; op++) {
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
}
translog.rollGeneration();
comittedGeneration = randomLongBetween(2, translog.currentFileGeneration());
for (int op = translogOperations / 2; op < translogOperations; op++) {
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
}
deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration);
fail.failRandomly();
try {
translog.trimUnreferencedReaders();
} catch (Exception e) {
// expected...
}
}
final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1);
deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration);
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
// we don't know when things broke exactly
assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L));
assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration));
assertFilePresences(translog);
translog.trimUnreferencedReaders();
assertThat(translog.getMinFileGeneration(), equalTo(comittedGeneration));
assertFilePresences(translog);
}
}
private Translog getFailableTranslog(FailSwitch fail, final TranslogConfig config) throws IOException {
return getFailableTranslog(fail, config, randomBoolean(), false, null, createTranslogDeletionPolicy());
}
private static class FailSwitch {
private volatile int failRate;
private volatile boolean onceFailedFailAlways = false;
public boolean fail() {
boolean fail = randomIntBetween(1, 100) <= failRate;
if (fail && onceFailedFailAlways) {
failAlways();
}
return fail;
}
public void failNever() {
failRate = 0;
}
public void failAlways() {
failRate = 100;
}
public void failRandomly() {
failRate = randomIntBetween(1, 100);
}
public void onceFailedFailAlways() {
onceFailedFailAlways = true;
}
}
private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig config, final boolean partialWrites,
final boolean throwUnknownException, String translogUUID,
final TranslogDeletionPolicy deletionPolicy) throws IOException {
return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO) {
@Override
ChannelFactory getChannelFactory() {
final ChannelFactory factory = super.getChannelFactory();
return (file, openOption) -> {
FileChannel channel = factory.open(file, openOption);
boolean success = false;
try {
final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation
ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel);
success = true;
return throwingFileChannel;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(channel);
}
}
};
}
@Override
void deleteReaderFiles(TranslogReader reader) {
if (fail.fail()) {
// simulate going OOM and dieing just at the wrong moment.
throw new RuntimeException("simulated");
} else {
super.deleteReaderFiles(reader);
}
}
};
}
public static class ThrowingFileChannel extends FilterFileChannel {
private final FailSwitch fail;
private final boolean partialWrite;
private final boolean throwUnknownException;
public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException {
super(delegate);
this.fail = fail;
this.partialWrite = partialWrite;
this.throwUnknownException = throwUnknownException;
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
}
@Override
public int read(ByteBuffer dst) throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
return super.read(dst);
}
@Override
public long read(ByteBuffer[] dsts, int offset, int length) throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
return super.read(dsts, offset, length);
}
@Override
public long write(ByteBuffer[] srcs, int offset, int length) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int write(ByteBuffer src, long position) throws IOException {
throw new UnsupportedOperationException();
}
public int write(ByteBuffer src) throws IOException {
if (fail.fail()) {
if (partialWrite) {
if (src.hasRemaining()) {
final int pos = src.position();
final int limit = src.limit();
src.limit(randomIntBetween(pos, limit));
super.write(src);
src.limit(limit);
src.position(pos);
throw new IOException("__FAKE__ no space left on device");
}
}
if (throwUnknownException) {
throw new UnknownException();
} else {
throw new MockDirectoryWrapper.FakeIOException();
}
}
return super.write(src);
}
@Override
public void force(boolean metaData) throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
super.force(metaData);
}
@Override
public long position() throws IOException {
if (fail.fail()) {
throw new MockDirectoryWrapper.FakeIOException();
}
return super.position();
}
}
private static final class UnknownException extends RuntimeException {
}
// see https://github.com/elastic/elasticsearch/issues/15754
public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException {
Path tempDir = createTempDir();
TranslogConfig config = getTranslogConfig(tempDir);
Translog translog = createTranslog(config, null);
translog.add(new Translog.Index("test", "boom", 0, "boom".getBytes(Charset.forName("UTF-8"))));
translog.close();
try {
new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO) {
@Override
protected TranslogWriter createWriter(long fileGeneration) throws IOException {
throw new MockDirectoryWrapper.FakeIOException();
}
};
// if we have a LeakFS here we fail if not all resources are closed
fail("should have been failed");
} catch (MockDirectoryWrapper.FakeIOException ex) {
// all is well
}
}
public void testRecoverWithUnbackedNextGen() throws IOException {
translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
Files.copy(ckp, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)));
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
try (Translog tlog = createTranslog(config, translog.getTranslogUUID())) {
assertFalse(tlog.syncNeeded());
Translog.Snapshot snapshot = tlog.newSnapshot();
for (int i = 0; i < 1; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
}
try (Translog tlog = createTranslog(config, translog.getTranslogUUID())) {
assertFalse(tlog.syncNeeded());
Translog.Snapshot snapshot = tlog.newSnapshot();
for (int i = 0; i < 2; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException {
translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
// don't copy the new file
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
try {
Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbersService.UNASSIGNED_SEQ_NO);
fail("file already exists?");
} catch (TranslogException ex) {
// all is well
assertEquals(ex.getMessage(), "failed to create new translog file");
assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class);
}
}
public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException {
translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
Files.copy(ckp, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)));
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
// we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog"));
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
assertFalse(tlog.syncNeeded());
Translog.Snapshot snapshot = tlog.newSnapshot();
for (int i = 0; i < 1; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
}
try {
Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO);
fail("file already exists?");
} catch (TranslogException ex) {
// all is well
assertEquals(ex.getMessage(), "failed to create new translog file");
assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class);
}
}
/**
* This test adds operations to the translog which might randomly throw an IOException. The only thing this test verifies is
* that we can, after we hit an exception, open and recover the translog successfully and retrieve all successfully synced operations
* from the transaction log.
*/
public void testWithRandomException() throws IOException {
final int runs = randomIntBetween(5, 10);
for (int run = 0; run < runs; run++) {
Path tempDir = createTempDir();
final FailSwitch fail = new FailSwitch();
fail.failRandomly();
TranslogConfig config = getTranslogConfig(tempDir);
final int numOps = randomIntBetween(100, 200);
long minGenForRecovery = 1;
List<String> syncedDocs = new ArrayList<>();
List<String> unsynced = new ArrayList<>();
if (randomBoolean()) {
fail.onceFailedFailAlways();
}
String generationUUID = null;
try {
boolean committing = false;
final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false, generationUUID, createTranslogDeletionPolicy());
try {
LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
String doc = lineFileDocs.nextDoc().toString();
failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, doc.getBytes(Charset.forName("UTF-8"))));
unsynced.add(doc);
if (randomBoolean()) {
failableTLog.sync();
syncedDocs.addAll(unsynced);
unsynced.clear();
}
if (randomFloat() < 0.1) {
failableTLog.sync(); // we have to sync here first otherwise we don't know if the sync succeeded if the commit fails
syncedDocs.addAll(unsynced);
unsynced.clear();
failableTLog.rollGeneration();
committing = true;
failableTLog.getDeletionPolicy().setMinTranslogGenerationForRecovery(failableTLog.currentFileGeneration());
failableTLog.trimUnreferencedReaders();
committing = false;
syncedDocs.clear();
}
}
// we survived all the randomness!!!
// lets close the translog and if it succeeds we are all synced again. If we don't do this we will close
// it in the finally block but miss to copy over unsynced docs to syncedDocs and fail the assertion down the road...
failableTLog.close();
syncedDocs.addAll(unsynced);
unsynced.clear();
} catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) {
// fair enough
} catch (IOException ex) {
assertEquals(ex.getMessage(), "__FAKE__ no space left on device");
} catch (RuntimeException ex) {
assertEquals(ex.getMessage(), "simulated");
} finally {
Checkpoint checkpoint = Translog.readCheckpoint(config.getTranslogPath());
if (checkpoint.numOps == unsynced.size() + syncedDocs.size()) {
syncedDocs.addAll(unsynced); // failed in fsync but got fully written
unsynced.clear();
}
if (committing && checkpoint.minTranslogGeneration == checkpoint.generation) {
// we were committing and blew up in one of the syncs, but they made it through
syncedDocs.clear();
assertThat(unsynced, empty());
}
generationUUID = failableTLog.getTranslogUUID();
minGenForRecovery = failableTLog.getDeletionPolicy().getMinTranslogGenerationForRecovery();
IOUtils.closeWhileHandlingException(failableTLog);
}
} catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) {
// failed - that's ok, we didn't even create it
} catch (IOException ex) {
assertEquals(ex.getMessage(), "__FAKE__ no space left on device");
}
// now randomly open this failing tlog again just to make sure we can also recover from failing during recovery
if (randomBoolean()) {
try {
TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy();
deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery);
IOUtils.close(getFailableTranslog(fail, config, randomBoolean(), false, generationUUID, deletionPolicy));
} catch (TranslogException | MockDirectoryWrapper.FakeIOException ex) {
// failed - that's ok, we didn't even create it
} catch (IOException ex) {
assertEquals(ex.getMessage(), "__FAKE__ no space left on device");
}
}
fail.failNever(); // we don't wanna fail here but we might since we write a new checkpoint and create a new tlog file
TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy();
deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery);
try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO)) {
Translog.Snapshot snapshot = translog.newSnapshot(minGenForRecovery);
assertEquals(syncedDocs.size(), snapshot.totalOperations());
for (int i = 0; i < syncedDocs.size(); i++) {
Translog.Operation next = snapshot.next();
assertEquals(syncedDocs.get(i), next.getSource().source.utf8ToString());
assertNotNull("operation " + i + " must be non-null", next);
}
}
}
}
private Checkpoint randomCheckpoint() {
final long a = randomNonNegativeLong();
final long b = randomNonNegativeLong();
final long minSeqNo;
final long maxSeqNo;
if (a <= b) {
minSeqNo = a;
maxSeqNo = b;
} else {
minSeqNo = b;
maxSeqNo = a;
}
final long generation = randomNonNegativeLong();
return new Checkpoint(randomLong(), randomInt(), generation, minSeqNo, maxSeqNo, randomNonNegativeLong(),
randomLongBetween(1, generation));
}
public void testCheckpointOnDiskFull() throws IOException {
final Checkpoint checkpoint = randomCheckpoint();
Path tempDir = createTempDir();
Checkpoint.write(FileChannel::open, tempDir.resolve("foo.cpk"), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
final Checkpoint checkpoint2 = randomCheckpoint();
try {
Checkpoint.write((p, o) -> {
if (randomBoolean()) {
throw new MockDirectoryWrapper.FakeIOException();
}
FileChannel open = FileChannel.open(p, o);
FailSwitch failSwitch = new FailSwitch();
failSwitch.failNever(); // don't fail in the ctor
ThrowingFileChannel channel = new ThrowingFileChannel(failSwitch, false, false, open);
failSwitch.failAlways();
return channel;
}, tempDir.resolve("foo.cpk"), checkpoint2, StandardOpenOption.WRITE);
fail("should have failed earlier");
} catch (MockDirectoryWrapper.FakeIOException ex) {
//fine
}
Checkpoint read = Checkpoint.read(tempDir.resolve("foo.cpk"));
assertEquals(read, checkpoint);
}
/**
* Tests that closing views after the translog is fine and we can reopen the translog
*/
public void testPendingDelete() throws IOException {
translog.add(new Translog.Index("test", "1", 0, new byte[]{1}));
translog.rollGeneration();
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings());
translog.close();
translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO);
translog.add(new Translog.Index("test", "2", 1, new byte[]{2}));
translog.rollGeneration();
Translog.View view = translog.newView();
translog.add(new Translog.Index("test", "3", 2, new byte[]{3}));
translog.close();
IOUtils.close(view);
translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbersService.UNASSIGNED_SEQ_NO);
}
public static Translog.Location randomTranslogLocation() {
return new Translog.Location(randomLong(), randomLong(), randomInt());
}
public void testTranslogOpSerialization() throws Exception {
BytesReference B_1 = new BytesArray(new byte[]{1});
SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers";
long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong();
long primaryTerm = randomSeqNum == SequenceNumbersService.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 16);
long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong();
seqID.seqNo.setLongValue(randomSeqNum);
seqID.seqNoDocValue.setLongValue(randomSeqNum);
seqID.primaryTerm.setLongValue(randomPrimaryTerm);
Field uidField = new Field("_uid", Uid.createUid("test", "1"), UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 1);
Document document = new Document();
document.add(new TextField("value", "test", Field.Store.YES));
document.add(uidField);
document.add(versionField);
document.add(seqID.seqNo);
document.add(seqID.seqNoDocValue);
document.add(seqID.primaryTerm);
ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, XContentType.JSON,
null);
Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm,
1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false);
Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true);
Translog.Index index = new Translog.Index(eIndex, eIndexResult);
BytesStreamOutput out = new BytesStreamOutput();
index.writeTo(out);
StreamInput in = out.bytes().streamInput();
Translog.Index serializedIndex = new Translog.Index(in);
assertEquals(index, serializedIndex);
Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm,
2, VersionType.INTERNAL, Origin.PRIMARY, 0);
Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true);
Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult);
out = new BytesStreamOutput();
delete.writeTo(out);
in = out.bytes().streamInput();
Translog.Delete serializedDelete = new Translog.Delete(in);
assertEquals(delete, serializedDelete);
// simulate legacy delete serialization
out = new BytesStreamOutput();
out.writeVInt(Translog.Delete.FORMAT_5_0);
out.writeString(UidFieldMapper.NAME);
out.writeString("my_type#my_id");
out.writeLong(3); // version
out.writeByte(VersionType.INTERNAL.getValue());
out.writeLong(2); // seq no
out.writeLong(0); // primary term
in = out.bytes().streamInput();
serializedDelete = new Translog.Delete(in);
assertEquals("my_type", serializedDelete.type());
assertEquals("my_id", serializedDelete.id());
}
public void testRollGeneration() throws Exception {
// make sure we keep some files around
final boolean longRetention = randomBoolean();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
if (longRetention) {
deletionPolicy.setRetentionAgeInMillis(3600 * 1000);
} else {
deletionPolicy.setRetentionAgeInMillis(-1);
}
// we control retention via time, disable size based calculations for simplicity
deletionPolicy.setRetentionSizeInBytes(-1);
final long generation = translog.currentFileGeneration();
final int rolls = randomIntBetween(1, 16);
int totalOperations = 0;
int seqNo = 0;
for (int i = 0; i < rolls; i++) {
final int operations = randomIntBetween(1, 128);
for (int j = 0; j < operations; j++) {
translog.add(new Translog.NoOp(seqNo++, 0, "test"));
totalOperations++;
}
try (ReleasableLock ignored = translog.writeLock.acquire()) {
translog.rollGeneration();
}
assertThat(translog.currentFileGeneration(), equalTo(generation + i + 1));
assertThat(translog.totalOperations(), equalTo(totalOperations));
}
for (int i = 0; i <= rolls; i++) {
assertFileIsPresent(translog, generation + i);
}
commit(translog, generation + rolls);
assertThat(translog.currentFileGeneration(), equalTo(generation + rolls ));
assertThat(translog.uncommittedOperations(), equalTo(0));
if (longRetention) {
for (int i = 0; i <= rolls; i++) {
assertFileIsPresent(translog, generation + i);
}
deletionPolicy.setRetentionAgeInMillis(randomBoolean() ? 100 : -1);
assertBusy(() -> {
translog.trimUnreferencedReaders();
for (int i = 0; i < rolls; i++) {
assertFileDeleted(translog, generation + i);
}
});
} else {
// immediate cleanup
for (int i = 0; i < rolls; i++) {
assertFileDeleted(translog, generation + i);
}
}
assertFileIsPresent(translog, generation + rolls);
}
public void testMinGenerationForSeqNo() throws IOException {
final int operations = randomIntBetween(1, 4096);
final List<Long> shuffledSeqNos = LongStream.range(0, operations).boxed().collect(Collectors.toList());
Randomness.shuffle(shuffledSeqNos);
final List<Tuple<Long, Long>> seqNos = new ArrayList<>();
final Map<Long, Long> terms = new HashMap<>();
for (final Long seqNo : shuffledSeqNos) {
seqNos.add(Tuple.tuple(seqNo, terms.computeIfAbsent(seqNo, k -> 0L)));
Long repeatingTermSeqNo = randomFrom(seqNos.stream().map(Tuple::v1).collect(Collectors.toList()));
seqNos.add(Tuple.tuple(repeatingTermSeqNo, terms.get(repeatingTermSeqNo)));
}
for (final Tuple<Long, Long> tuple : seqNos) {
translog.add(new Translog.NoOp(tuple.v1(), tuple.v2(), "test"));
if (rarely()) {
translog.rollGeneration();
}
}
Map<Long, Set<Tuple<Long, Long>>> generations = new HashMap<>();
// one extra roll to make sure that all ops so far are available via a reader and a translog-{gen}.ckp
// file in a consistent way, in order to simplify checking code.
translog.rollGeneration();
for (long seqNo = 0; seqNo < operations; seqNo++) {
final Set<Tuple<Long, Long>> seenSeqNos = new HashSet<>();
final long generation = translog.getMinGenerationForSeqNo(seqNo).translogFileGeneration;
for (long g = generation; g < translog.currentFileGeneration(); g++) {
if (!generations.containsKey(g)) {
final Set<Tuple<Long, Long>> generationSeenSeqNos = new HashSet<>();
final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.getCommitCheckpointFileName(g)));
try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(g)), checkpoint)) {
Translog.Snapshot snapshot = reader.newSnapshot();
Translog.Operation operation;
while ((operation = snapshot.next()) != null) {
generationSeenSeqNos.add(Tuple.tuple(operation.seqNo(), operation.primaryTerm()));
}
}
generations.put(g, generationSeenSeqNos);
}
seenSeqNos.addAll(generations.get(g));
}
final long seqNoLowerBound = seqNo;
final Set<Tuple<Long, Long>> expected = seqNos.stream().filter(t -> t.v1() >= seqNoLowerBound).collect(Collectors.toSet());
seenSeqNos.retainAll(expected);
assertThat(seenSeqNos, equalTo(expected));
}
}
public void testSimpleCommit() throws IOException {
final int operations = randomIntBetween(1, 4096);
long seqNo = 0;
for (int i = 0; i < operations; i++) {
translog.add(new Translog.NoOp(seqNo++, 0, "test'"));
if (rarely()) {
translog.rollGeneration();
}
}
final long generation =
randomIntBetween(1, Math.toIntExact(translog.currentFileGeneration()));
commit(translog, generation);
}
public void testOpenViewIsPassToDeletionPolicy() throws IOException {
final int operations = randomIntBetween(1, 4096);
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
for (int i = 0; i < operations; i++) {
translog.add(new Translog.NoOp(i, 0, "test"));
if (rarely()) {
translog.rollGeneration();
}
if (rarely()) {
commit(translog, randomLongBetween(deletionPolicy.getMinTranslogGenerationForRecovery(), translog.currentFileGeneration()));
}
if (frequently()) {
long viewGen;
try (Translog.View view = translog.newView()) {
viewGen = view.viewGenToRelease;
assertThat(deletionPolicy.getViewCount(view.viewGenToRelease), equalTo(1L));
}
assertThat(deletionPolicy.getViewCount(viewGen), equalTo(0L));
}
}
}
}
| naveenhooda2000/elasticsearch | core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java | Java | apache-2.0 | 121,631 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/es/model/AutoTune.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace ElasticsearchService
{
namespace Model
{
AutoTune::AutoTune() :
m_autoTuneType(AutoTuneType::NOT_SET),
m_autoTuneTypeHasBeenSet(false),
m_autoTuneDetailsHasBeenSet(false)
{
}
AutoTune::AutoTune(JsonView jsonValue) :
m_autoTuneType(AutoTuneType::NOT_SET),
m_autoTuneTypeHasBeenSet(false),
m_autoTuneDetailsHasBeenSet(false)
{
*this = jsonValue;
}
AutoTune& AutoTune::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("AutoTuneType"))
{
m_autoTuneType = AutoTuneTypeMapper::GetAutoTuneTypeForName(jsonValue.GetString("AutoTuneType"));
m_autoTuneTypeHasBeenSet = true;
}
if(jsonValue.ValueExists("AutoTuneDetails"))
{
m_autoTuneDetails = jsonValue.GetObject("AutoTuneDetails");
m_autoTuneDetailsHasBeenSet = true;
}
return *this;
}
JsonValue AutoTune::Jsonize() const
{
JsonValue payload;
if(m_autoTuneTypeHasBeenSet)
{
payload.WithString("AutoTuneType", AutoTuneTypeMapper::GetNameForAutoTuneType(m_autoTuneType));
}
if(m_autoTuneDetailsHasBeenSet)
{
payload.WithObject("AutoTuneDetails", m_autoTuneDetails.Jsonize());
}
return payload;
}
} // namespace Model
} // namespace ElasticsearchService
} // namespace Aws
| aws/aws-sdk-cpp | aws-cpp-sdk-es/source/model/AutoTune.cpp | C++ | apache-2.0 | 1,529 |
<?php
/************************************************************************/
/* PHP-NUKE: Advanced Content Management System */
/* ============================================ */
/* */
/* Copyright (c) 2006 by Francisco Burzi */
/* http://phpnuke.org */
/* */
/* This program is free software. You can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation; either version 2 of the License. */
/************************************************************************/
if (stristr(htmlentities($_SERVER['PHP_SELF']), "header.php")) {
Header("Location: index.php");
die();
}
$preloader = 1; //Preloader
define('NUKE_HEADER', true);
@require_once("mainfile.php");
global $db ,$prefix ,$gtset ,$admin ,$adminmail;
if ($gtset == "1") {
nextGenTap(1,0,0);
}
if(!$row = $db->sql_fetchrow($db->sql_query("SELECT * from ".$prefix."_nukesql"))){
die(header("location: upgrade.php"));
}
##################################################
# Include some common header for HTML generation #
##################################################
global $pagetitle;
if(defined("ADMIN_FILE")){
adminheader($pagetitle);
}else{
global $name, $nukeurl, $xdemailset,$xtouch,$sitecookies;
//// in this code we save current page that user is in it.
if($name != "Your_Account"){
$currentpagelink = $_SERVER['REQUEST_URI'];
$arr_nukeurl = explode("/",$nukeurl);
$arr_nukeurl = array_filter($arr_nukeurl);
foreach($arr_nukeurl as $key => $values){
if($key > 2){
unset($arr_nukeurl[$key]);
}
}
$arr_nukeurl = array_filter($arr_nukeurl);
$new_nukeurl = $arr_nukeurl[0]."//".$arr_nukeurl[2];
$currentpage = $new_nukeurl.$currentpagelink;
nuke_set_cookie("currentpage",$currentpage,time()+1800);
}
if(!function_exists('head')){
function head() {
global $slogan, $name, $sitename, $banners, $nukeurl, $Version_Num, $ab_config, $artpage, $topic, $hlpfile, $user, $hr, $theme, $cookie, $bgcolor1, $bgcolor2, $bgcolor3, $bgcolor4, $textcolor1, $textcolor2, $forumpage, $adminpage, $userpage, $pagetitle, $pagetags, $align, $preloader, $anonymous;
$ThemeSel = get_theme();
theme_lang();
echo "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n";
echo "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n";
echo "<head>\n";
echo"<title>$sitename $pagetitle</title>";
@include("includes/meta.php");
@include("includes/javascript.php");
if (file_exists("themes/$ThemeSel/images/favicon.ico")) {
echo "<link rel=\"shortcut icon\" href=\"themes/$ThemeSel/images/favicon.ico\" type=\"image/x-icon\" />\n";
}
echo "</head>\n";
if($preloader == 1){
echo "<div id=\"waitDiv\" onclick=\"this.style.visibility='hidden'\" style=\" direction:rtl; text-align:center; line-height:17px; position:fixed; z-index:1000; width:100%; height:100%; background-color:#000; color:#fff; font-size:11px; margin:0px auto;\"><div style=\"position:fixed; top:100px; left:100px \" id=\"loadingimage\" ><img src=\"images/loader.gif\" /></div></div>";
echo "<script>showWaitm('waitDiv', 1);</script>\n";
echo "</div>\n";
}
$u_agent = $_SERVER['HTTP_USER_AGENT'];
themeheader();
}
}
online();
if(isset($admin) && $admin == $_COOKIE['admin']){}else{
function xdvs($nuim){
global $prefix, $db, $dbname;
$nuim=intval($nuim);
$result = $db->sql_query("SELECT * FROM `" . $prefix . "_xdisable` WHERE `xdid` =$nuim LIMIT 0 , 1");
while ($row = $db->sql_fetchrow($result)) {
mb_internal_encoding('UTF-8');
$xdid = intval($row['xdid']);
$xdname = $row['xdname'];
$xdvalue = $row['xdvalue'];
}
return $xdvalue;
}
function xduemails($nuim){
global $prefix, $db, $dbname;
$db->sql_query("INSERT INTO `$dbname`.`" . $prefix . "_xdisable` (`xdid`, `xdname`, `xdvalue`) VALUES (NULL, 'xduemail', '$nuim');");
}
if(xdvs(1)==1){
if(isset($xdemailset)){
if (filter_var($xdemailset, FILTER_VALIDATE_EMAIL)) {
xduemails($xdemailset);
}else{
$xdemailset=0;
}
}
$xdtheme=xdvs(2);
if($xdtheme=="default"){
xdisable_theme();
}else{
@include("includes/xdisable/$xdtheme/xdtheme.php");
xdisable_theme();
}
}
}
function xtscookie($name, $cookiedata, $cookietime){
global $sitecookies;
if($cookiedata == false){
$cookietime = time()-3600;
}
$name_data = rawurlencode($name) . '=' . rawurlencode($cookiedata);
$expire = gmdate('D, d-M-Y H:i:s \\G\\M\\T', $cookietime);
@header('Set-Cookie: '.$name_data.(($cookietime) ? '; expires='.$expire : '').'; path='.$sitecookies.'; HttpOnly', false);
}
if(isset($xtouch)){
$expire = time()+60*60*24*7;
xtscookie("xtouch-set", $xtouch, $expire);
if($_SERVER['HTTP_REFERER']==""){
Header("Location: index.php");
} else {
header('Location:' . $_SERVER['HTTP_REFERER']);
}
}
$mobile_browser = '0';
if(@preg_match('/(up.browser|up.link|mmp|symbian|smartphone|midp|wap|phone)/i',
strtolower($_SERVER['HTTP_USER_AGENT']))){
$mobile_browser++;
}
if((strpos(strtolower($_SERVER['HTTP_ACCEPT']),'application/vnd.wap.xhtml+xml')>0) or
((isset($_SERVER['HTTP_X_WAP_PROFILE']) or isset($_SERVER['HTTP_PROFILE'])))){
$mobile_browser++;
}
$mobile_ua = strtolower(substr($_SERVER['HTTP_USER_AGENT'],0,4));
$mobile_agents = array(
'w3c ','acs-','alav','alca','amoi','audi','avan','benq','bird','blac',
'blaz','brew','cell','cldc','cmd-','dang','doco','eric','hipt','inno',
'ipaq','java','jigs','kddi','keji','leno','lg-c','lg-d','lg-g','lge-',
'maui','maxo','midp','mits','mmef','mobi','mot-','moto','mwbp','nec-',
'newt','noki','oper','palm','pana','pant','phil','play','port','prox',
'qwap','sage','sams','sany','sch-','sec-','send','seri','sgh-','shar',
'sie-','siem','smal','smar','sony','sph-','symb','t-mo','teli','tim-',
'tosh','tsm-','upg1','upsi','vk-v','voda','wap-','wapa','wapi','wapp',
'wapr','webc','winw','winw','xda','xda-');
if(in_array($mobile_ua,$mobile_agents)){
$mobile_browser++;
}
if (strpos(strtolower($_SERVER['ALL_HTTP']),'OperaMini')>0) {
$mobile_browser++;
}
if (strpos(strtolower($_SERVER['HTTP_USER_AGENT']),'windows')>0) {
$mobile_browser=0;
}
if($mobile_browser>0 OR $_COOKIE['xtouch-set']==1){
require_once("XMSConfig.lib.php");
$xcall1=xsitemapitemcall("radius","حالت فعال بودن ماژول تاچ");
$xcallt=xsitemapitemcall("select","پوسته ماژول تاچ");
$xcallsm=xsitemapitemcall("select","منو در Xtouch");
$xcallst=xsitemapitemcall("text","عنوان سایت در حالت موبایل");
$xcallss=xsitemapitemcall("radius","نمایش لینک دسکتاپ");
$xcallsmm=xsitemapitemcall("checkbox","فعال بودن ماژول های تاچ");
if($xcall1[1]=="radius" AND $xcall1[2]=="حالت فعال بودن ماژول تاچ" AND $xcall1[3]!==""){
if($xcall1[3]==0 OR $xcall1[3]==1){
if($xcallsmm[1]=="checkbox" AND $xcallsmm[2]=="فعال بودن ماژول های تاچ"){$xtmodules=$xcallsmm[3];}
if($xcallt[1]=="select" AND $xcallt[2]=="پوسته ماژول تاچ"){$xttheme=$xcallt[3];}
if($xcallt[3]==""){$xttheme="default";}
if($xcallsm[1]=="select" AND $xcallsm[2]=="منو در Xtouch" AND $xcallsm[3]!==""){$xtxmmenu=$xcallsm[3];}
if($xcallst[1]=="text" AND $xcallst[2]=="عنوان سایت در حالت موبایل" AND $xcallst[3]!==""){$xtstitle=$xcallst[3];}
if($xcallss[1]=="radius" AND $xcallss[2]=="نمایش لینک دسکتاپ" AND $xcallss[3]!==""){$swichers=$xcallss[3];}
if($xcallst[3]==""){$xtstitle=$sitename;}
if($xcall1[3]==1 AND is_admin($admin)){
if($xttheme=="default"){}else{include("modules/Xtouch/themes/$xttheme/xttheme.php");}
xttheme($xtstitle,$xtxmmenu,$swichers,$xtmodules);
}elseif($xcall1[3]==0){
if($xttheme=="default"){}else{include("modules/Xtouch/themes/$xttheme/xttheme.php");}
xttheme($xtstitle,$xtxmmenu,$swichers,$xtmodules);
}
}
}
}
head();
@include("includes/counter.php");
if(defined('HOME_FILE')) {
message_box();
blocks("Center");
}
}
?> | xtoolkit/XMPN | Xtouch/header2.php | PHP | apache-2.0 | 8,225 |
// -*- mode: java; c-basic-offset: 2; -*-
// Copyright 2009-2011 Google, All Rights reserved
// Copyright 2011-2020 MIT, All rights reserved
// Released under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
package com.google.appinventor.client.wizards;
import static com.google.appinventor.client.Ode.MESSAGES;
import com.allen_sauer.gwt.dnd.client.util.StringUtil;
import com.google.appinventor.client.Ode;
import com.google.appinventor.client.OdeAsyncCallback;
import com.google.appinventor.client.editor.simple.SimpleComponentDatabase;
import com.google.appinventor.client.editor.youngandroid.YaProjectEditor;
import com.google.appinventor.client.explorer.project.Project;
import com.google.appinventor.client.output.OdeLog;
import com.google.appinventor.common.utils.StringUtils;
import com.google.appinventor.client.utils.Uploader;
import com.google.appinventor.shared.rpc.ServerLayout;
import com.google.appinventor.shared.rpc.UploadResponse;
import com.google.appinventor.shared.rpc.component.Component;
import com.google.appinventor.shared.rpc.component.ComponentImportResponse;
import com.google.appinventor.shared.rpc.project.ProjectNode;
import com.google.appinventor.shared.rpc.project.youngandroid.YoungAndroidAssetsFolder;
import com.google.appinventor.shared.rpc.project.youngandroid.YoungAndroidComponentsFolder;
import com.google.appinventor.shared.rpc.project.youngandroid.YoungAndroidProjectNode;
import com.google.gwt.core.client.GWT;
import com.google.gwt.user.cellview.client.CellTable;
import com.google.gwt.user.cellview.client.Column;
import com.google.gwt.user.client.Command;
import com.google.gwt.cell.client.CheckboxCell;
import com.google.gwt.cell.client.NumberCell;
import com.google.gwt.cell.client.TextCell;
import com.google.gwt.user.client.Window;
import com.google.gwt.user.client.ui.FileUpload;
import com.google.gwt.user.client.ui.Grid;
import com.google.gwt.user.client.ui.Label;
import com.google.gwt.user.client.ui.TabPanel;
import com.google.gwt.user.client.ui.TextBox;
import com.google.gwt.user.client.ui.VerticalPanel;
import com.google.gwt.view.client.ListDataProvider;
import com.google.gwt.view.client.SingleSelectionModel;
import java.util.List;
public class ComponentImportWizard extends Wizard {
final static String external_components = "assets/external_comps/";
public static class ImportComponentCallback extends OdeAsyncCallback<ComponentImportResponse> {
@Override
public void onSuccess(ComponentImportResponse response) {
if (response.getStatus() == ComponentImportResponse.Status.FAILED){
Window.alert(MESSAGES.componentImportError() + "\n" + response.getMessage());
return;
}
else if (response.getStatus() != ComponentImportResponse.Status.IMPORTED &&
response.getStatus() != ComponentImportResponse.Status.UPGRADED) {
Window.alert(MESSAGES.componentImportError());
return;
}
else if (response.getStatus() == ComponentImportResponse.Status.UNKNOWN_URL) {
Window.alert(MESSAGES.componentImportUnknownURLError());
}
else if (response.getStatus() == ComponentImportResponse.Status.UPGRADED) {
StringBuilder sb = new StringBuilder(MESSAGES.componentUpgradedAlert());
for (String name : response.getComponentTypes().values()) {
sb.append("\n");
sb.append(name);
}
Window.alert(sb.toString());
}
List<ProjectNode> compNodes = response.getNodes();
long destinationProjectId = response.getProjectId();
long currentProjectId = ode.getCurrentYoungAndroidProjectId();
if (currentProjectId != destinationProjectId) {
return; // User switched project early!
}
Project project = ode.getProjectManager().getProject(destinationProjectId);
if (project == null) {
return; // Project does not exist!
}
if (response.getStatus() == ComponentImportResponse.Status.UPGRADED ||
response.getStatus() == ComponentImportResponse.Status.IMPORTED) {
YoungAndroidComponentsFolder componentsFolder = ((YoungAndroidProjectNode) project.getRootNode()).getComponentsFolder();
YaProjectEditor projectEditor = (YaProjectEditor) ode.getEditorManager().getOpenProjectEditor(destinationProjectId);
if (projectEditor == null) {
return; // Project is not open!
}
for (ProjectNode node : compNodes) {
project.addNode(componentsFolder, node);
if ((node.getName().equals("component.json") || node.getName().equals("components.json"))
&& StringUtils.countMatches(node.getFileId(), "/") == 3) {
projectEditor.addComponent(node, null);
}
}
}
}
}
private static int FROM_MY_COMPUTER_TAB = 0;
private static int URL_TAB = 1;
private static final String COMPONENT_ARCHIVE_EXTENSION = ".aix";
private static final Ode ode = Ode.getInstance();
public ComponentImportWizard() {
super(MESSAGES.componentImportWizardCaption(), true, false);
final CellTable compTable = createCompTable();
final FileUpload fileUpload = createFileUpload();
final Grid urlGrid = createUrlGrid();
final TabPanel tabPanel = new TabPanel();
tabPanel.add(fileUpload, MESSAGES.componentImportFromComputer());
tabPanel.add(urlGrid, MESSAGES.componentImportFromURL());
tabPanel.selectTab(FROM_MY_COMPUTER_TAB);
tabPanel.addStyleName("ode-Tabpanel");
VerticalPanel panel = new VerticalPanel();
panel.add(tabPanel);
addPage(panel);
getConfirmButton().setText("Import");
setPagePanelHeight(150);
setPixelSize(200, 150);
setStylePrimaryName("ode-DialogBox");
initFinishCommand(new Command() {
@Override
public void execute() {
final long projectId = ode.getCurrentYoungAndroidProjectId();
final Project project = ode.getProjectManager().getProject(projectId);
final YoungAndroidAssetsFolder assetsFolderNode =
((YoungAndroidProjectNode) project.getRootNode()).getAssetsFolder();
if (tabPanel.getTabBar().getSelectedTab() == URL_TAB) {
TextBox urlTextBox = (TextBox) urlGrid.getWidget(1, 0);
String url = urlTextBox.getText();
if (url.trim().isEmpty()) {
Window.alert(MESSAGES.noUrlError());
return;
}
ode.getComponentService().importComponentToProject(url, projectId,
assetsFolderNode.getFileId(), new ImportComponentCallback());
} else if (tabPanel.getTabBar().getSelectedTab() == FROM_MY_COMPUTER_TAB) {
if (!fileUpload.getFilename().endsWith(COMPONENT_ARCHIVE_EXTENSION)) {
Window.alert(MESSAGES.notComponentArchiveError());
return;
}
String url = GWT.getModuleBaseURL() +
ServerLayout.UPLOAD_SERVLET + "/" +
ServerLayout.UPLOAD_COMPONENT + "/" +
trimLeadingPath(fileUpload.getFilename());
Uploader.getInstance().upload(fileUpload, url,
new OdeAsyncCallback<UploadResponse>() {
@Override
public void onSuccess(UploadResponse uploadResponse) {
String toImport = uploadResponse.getInfo();
ode.getComponentService().importComponentToProject(toImport, projectId,
assetsFolderNode.getFileId(), new ImportComponentCallback());
}
});
return;
}
}
private String trimLeadingPath(String filename) {
// Strip leading path off filename.
// We need to support both Unix ('/') and Windows ('\\') separators.
return filename.substring(Math.max(filename.lastIndexOf('/'), filename.lastIndexOf('\\')) + 1);
}
});
}
private CellTable createCompTable() {
final SingleSelectionModel<Component> selectionModel =
new SingleSelectionModel<Component>();
CellTable<Component> compTable = new CellTable<Component>();
compTable.setSelectionModel(selectionModel);
Column<Component, Boolean> checkColumn =
new Column<Component, Boolean>(new CheckboxCell(true, false)) {
@Override
public Boolean getValue(Component comp) {
return selectionModel.isSelected(comp);
}
};
Column<Component, String> nameColumn =
new Column<Component, String>(new TextCell()) {
@Override
public String getValue(Component comp) {
return comp.getName();
}
};
Column<Component, Number> versionColumn =
new Column<Component, Number>(new NumberCell()) {
@Override
public Number getValue(Component comp) {
return comp.getVersion();
}
};
compTable.addColumn(checkColumn);
compTable.addColumn(nameColumn, "Component");
compTable.addColumn(versionColumn, "Version");
return compTable;
}
private Grid createUrlGrid() {
TextBox urlTextBox = new TextBox();
urlTextBox.setWidth("100%");
Grid grid = new Grid(2, 1);
grid.setWidget(0, 0, new Label("Url:"));
grid.setWidget(1, 0, urlTextBox);
return grid;
}
private FileUpload createFileUpload() {
FileUpload upload = new FileUpload();
upload.setName(ServerLayout.UPLOAD_COMPONENT_ARCHIVE_FORM_ELEMENT);
upload.getElement().setAttribute("accept", COMPONENT_ARCHIVE_EXTENSION);
return upload;
}
}
| halatmit/appinventor-sources | appinventor/appengine/src/com/google/appinventor/client/wizards/ComponentImportWizard.java | Java | apache-2.0 | 9,473 |
/*************************GO-LICENSE-START*********************************
* Copyright 2014 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*************************GO-LICENSE-END***********************************/
package com.thoughtworks.go.domain.materials;
import com.thoughtworks.go.config.materials.SubprocessExecutionContext;
import com.thoughtworks.go.remote.AgentIdentifier;
import com.thoughtworks.go.util.CachedDigestUtils;
import java.util.HashMap;
import java.util.Map;
class AgentSubprocessExecutionContext implements SubprocessExecutionContext {
private AgentIdentifier agentIdentifier;
private final String workingDirectory;
AgentSubprocessExecutionContext(final AgentIdentifier agentIdentifier, String workingDirectory) {
this.agentIdentifier = agentIdentifier;
this.workingDirectory = workingDirectory;
}
public String getProcessNamespace(String fingerprint) {
return CachedDigestUtils.sha256Hex(fingerprint + agentIdentifier.getUuid() + workingDirectory);
}
@Override
public Map<String, String> getDefaultEnvironmentVariables() {
return new HashMap<>();
}
}
| xli/gocd | common/src/com/thoughtworks/go/domain/materials/AgentSubprocessExecutionContext.java | Java | apache-2.0 | 1,684 |
package com.wangjie.rapidrouter;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Example local unit test, which will execute on the development machine (host).
*
* @see <a href="http://d.android.com/tools/testing">Testing documentation</a>
*/
public class ExampleUnitTest {
@Test
public void addition_isCorrect() throws Exception {
assertEquals(4, 2 + 2);
}
} | wangjiegulu/RapidRouter | library/src/test/java/com/wangjie/rapidrouter/ExampleUnitTest.java | Java | apache-2.0 | 401 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.beam.sdk.coders;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertThat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import org.apache.beam.sdk.testing.CoderProperties;
import org.apache.beam.sdk.transforms.windowing.GlobalWindow;
import org.apache.beam.sdk.util.CoderUtils;
import org.apache.beam.sdk.values.TypeDescriptor;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Unit tests for {@link ListCoder}. */
@RunWith(JUnit4.class)
public class ListCoderTest {
private static final Coder<List<Integer>> TEST_CODER = ListCoder.of(VarIntCoder.of());
private static final List<List<Integer>> TEST_VALUES =
Arrays.asList(
Collections.emptyList(),
Collections.singletonList(43),
Arrays.asList(1, 2, 3, 4),
new ArrayList<>(Arrays.asList(7, 6, 5)));
@Test
public void testCoderIsSerializableWithWellKnownCoderType() throws Exception {
CoderProperties.coderSerializable(ListCoder.of(GlobalWindow.Coder.INSTANCE));
}
@Test
public void testDecodeEncodeContentsInSameOrder() throws Exception {
for (List<Integer> value : TEST_VALUES) {
CoderProperties.coderDecodeEncodeContentsInSameOrder(TEST_CODER, value);
}
}
@Test
public void testEmptyList() throws Exception {
List<Integer> list = Collections.emptyList();
Coder<List<Integer>> coder = ListCoder.of(VarIntCoder.of());
CoderProperties.coderDecodeEncodeEqual(coder, list);
}
@Test
public void testCoderSerializable() throws Exception {
CoderProperties.coderSerializable(TEST_CODER);
}
/**
* Generated data to check that the wire format has not changed. To regenerate, see
* {@link org.apache.beam.sdk.coders.PrintBase64Encodings}.
*/
private static final List<String> TEST_ENCODINGS = Arrays.asList(
"AAAAAA",
"AAAAASs",
"AAAABAECAwQ",
"AAAAAwcGBQ");
@Test
public void testWireFormatEncode() throws Exception {
CoderProperties.coderEncodesBase64(TEST_CODER, TEST_VALUES, TEST_ENCODINGS);
}
@Rule
public ExpectedException thrown = ExpectedException.none();
@Test
public void encodeNullThrowsCoderException() throws Exception {
thrown.expect(CoderException.class);
thrown.expectMessage("cannot encode a null List");
CoderUtils.encodeToBase64(TEST_CODER, null);
}
@Test
public void testListWithNullsAndVarIntCoderThrowsException() throws Exception {
thrown.expect(CoderException.class);
thrown.expectMessage("cannot encode a null Integer");
List<Integer> list = Arrays.asList(1, 2, 3, null, 4);
Coder<List<Integer>> coder = ListCoder.of(VarIntCoder.of());
CoderProperties.coderDecodeEncodeEqual(coder, list);
}
@Test
public void testListWithNullsAndSerializableCoder() throws Exception {
List<Integer> list = Arrays.asList(1, 2, 3, null, 4);
Coder<List<Integer>> coder = ListCoder.of(SerializableCoder.of(Integer.class));
CoderProperties.coderDecodeEncodeEqual(coder, list);
}
@Test
public void testEncodedTypeDescriptor() throws Exception {
TypeDescriptor<List<Integer>> typeDescriptor = new TypeDescriptor<List<Integer>>() {};
assertThat(TEST_CODER.getEncodedTypeDescriptor(), equalTo(typeDescriptor));
}
}
| tgroh/incubator-beam | sdks/java/core/src/test/java/org/apache/beam/sdk/coders/ListCoderTest.java | Java | apache-2.0 | 4,233 |