code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
/* * Copyright (C) 2011 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.googlecode.eyesfree.textdetect; import android.os.Environment; import com.googlecode.leptonica.android.Pix; import com.googlecode.leptonica.android.Pixa; /** * @author alanv@google.com (Alan Viverette) */ public class HydrogenTextDetector { private final int mNative; static { System.loadLibrary("lept"); System.loadLibrary("hydrogen"); } private Parameters mParams; public HydrogenTextDetector() { mNative = nativeConstructor(); mParams = new Parameters(); setParameters(mParams); } public void setSize(int width, int height) { // TODO(alanv): Set up native buffers } @Override protected void finalize() throws Throwable { try { nativeDestructor(mNative); } finally { super.finalize(); } } public void setParameters(Parameters params) { mParams = params; nativeSetParameters(mNative, mParams); } public Parameters getParameters() { return mParams; } public Pixa getTextAreas() { int nativePixa = nativeGetTextAreas(mNative); if (nativePixa == 0) { return null; } int width = nativeGetSourceWidth(mNative); int height = nativeGetSourceHeight(mNative); return new Pixa(nativePixa, width, height); } public float getSkewAngle() { return nativeGetSkewAngle(mNative); } public float[] getTextConfs() { return nativeGetTextConfs(mNative); } public Pix getSourceImage() { int nativePix = nativeGetSourceImage(mNative); if (nativePix == 0) { return null; } return new Pix(nativePix); } /** * Sets the text detection source image to be a clone of the supplied source * image. The supplied image may be recycled after calling this method. * * @param pixs The source image on which to perform text detection. */ public void setSourceImage(Pix pixs) { nativeSetSourceImage(mNative, pixs.getNativePix()); } public void detectText() { nativeDetectText(mNative); } public void clear() { nativeClear(mNative); } // ****************** // * PUBLIC CLASSES * // ****************** public class Parameters { public boolean debug; public String out_dir; // Edge-based thresholding public int edge_tile_x; public int edge_tile_y; public int edge_thresh; public int edge_avg_thresh; // Skew angle correction public boolean skew_enabled; public float skew_min_angle; public float skew_sweep_range; public float skew_sweep_delta; public int skew_sweep_reduction; public int skew_search_reduction; public float skew_search_min_delta; // Singleton filter public float single_min_aspect; public float single_max_aspect; public int single_min_area; public float single_min_density; // Quick pair filter public float pair_h_ratio; public float pair_d_ratio; public float pair_h_dist_ratio; public float pair_v_dist_ratio; public float pair_h_shared; // Cluster pair filter public int cluster_width_spacing; public float cluster_shared_edge; public float cluster_h_ratio; // Finalized cluster filter public int cluster_min_blobs; public float cluster_min_aspect; public float cluster_min_fdr; public int cluster_min_edge; public int cluster_min_edge_avg; public Parameters() { debug = false; out_dir = Environment.getExternalStorageDirectory().toString(); // Edge-based thresholding edge_tile_x = 32; edge_tile_y = 64; edge_thresh = 64; edge_avg_thresh = 4; // Skew angle correction skew_enabled = true; skew_min_angle = 1.0f; skew_sweep_range = 30.0f; skew_sweep_delta = 5.0f; skew_sweep_reduction = 8; skew_search_reduction = 4; skew_search_min_delta = 0.01f; // Singleton filter single_min_aspect = 0.1f; single_max_aspect = 4.0f; single_min_area = 4; single_min_density = 0.2f; // Quick pair filter pair_h_ratio = 1.0f; pair_d_ratio = 1.5f; pair_h_dist_ratio = 2.0f; pair_v_dist_ratio = 0.25f; pair_h_shared = 0.25f; // Cluster pair filter cluster_width_spacing = 2; cluster_shared_edge = 0.5f; cluster_h_ratio = 1.0f; // Finalized cluster filter cluster_min_blobs = 5; cluster_min_aspect = 2; cluster_min_fdr = 2.5f; cluster_min_edge = 32; cluster_min_edge_avg = 1; } } // ****************** // * NATIVE METHODS * // ****************** private static native int nativeConstructor(); private static native void nativeDestructor(int nativePtr); private static native void nativeSetParameters(int nativePtr, Parameters params); private static native int nativeGetTextAreas(int nativePtr); private static native float nativeGetSkewAngle(int nativePtr); private static native int nativeGetSourceWidth(int nativePtr); private static native int nativeGetSourceHeight(int nativePtr); private static native float[] nativeGetTextConfs(int nativePtr); private static native int nativeGetSourceImage(int nativePtr); private static native int nativeSetSourceImage(int nativePtr, int nativePix); private static native void nativeDetectText(int nativePtr); private static native void nativeClear(int nativePtr); }
0359xiaodong/tess-two
eyes-two/src/com/googlecode/eyesfree/textdetect/HydrogenTextDetector.java
Java
apache-2.0
6,584
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Datatypes passed between Python and C code.""" import collections import enum @enum.unique class Code(enum.IntEnum): """One Platform error codes (see status.h and codes.proto).""" OK = 0 CANCELLED = 1 UNKNOWN = 2 INVALID_ARGUMENT = 3 EXPIRED = 4 NOT_FOUND = 5 ALREADY_EXISTS = 6 PERMISSION_DENIED = 7 UNAUTHENTICATED = 16 RESOURCE_EXHAUSTED = 8 FAILED_PRECONDITION = 9 ABORTED = 10 OUT_OF_RANGE = 11 UNIMPLEMENTED = 12 INTERNAL_ERROR = 13 UNAVAILABLE = 14 DATA_LOSS = 15 class Status(collections.namedtuple('Status', ['code', 'details'])): """Describes an RPC's overall status.""" class ServiceAcceptance( collections.namedtuple( 'ServiceAcceptance', ['call', 'method', 'host', 'deadline'])): """Describes an RPC on the service side at the start of service.""" class Event( collections.namedtuple( 'Event', ['kind', 'tag', 'write_accepted', 'complete_accepted', 'service_acceptance', 'bytes', 'status', 'metadata'])): """Describes an event emitted from a completion queue.""" @enum.unique class Kind(enum.Enum): """Describes the kind of an event.""" STOP = object() WRITE_ACCEPTED = object() COMPLETE_ACCEPTED = object() SERVICE_ACCEPTED = object() READ_ACCEPTED = object() METADATA_ACCEPTED = object() FINISH = object()
gameduell/kythe
third_party/grpc/src/python/src/grpc/_adapter/_datatypes.py
Python
apache-2.0
2,884
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define EIGEN_USE_GPU // We need to include gpu_kernel_helper.h before segment_reduction_ops.h // See comment in segment_reduction_ops.h for more details. // clang-format off #include "tensorflow/core/util/gpu_kernel_helper.h" // clang-format on #include "tensorflow/core/kernels/segment_reduction_ops.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/util/gpu_device_functions.h" namespace tensorflow { using GPUDevice = Eigen::GpuDevice; // SortedSegmentSumFunctor kernel reduces input data just as // UnsortedSegmentSumCustomKernel does except that input data // is partitioned along the outer reduction dimension. This is // because consecutive rows (elements in a row share the same // outer dimension index) in the flattened 2D input data likely // belong to the same segment in sorted segment sum operation. // Therefore such partitioning strategy has two advantages over // the UnsortedSegmentSumFunctor kernel: // 1. Each thread reduces across multiple rows before writing // answers to the global memory, we can therefore // write reduction results to global memory less often. // 2. We may know that the current thread is the only contributor // to an output element because of the increasing nature of segment // ids. In such cases, we do not need to use atomic operations // to write results to global memory. // In the flattened view of input data (with only outer and inner // dimension), every thread processes a strip of input data of // size OuterDimTileSize x 1. This strip runs across multiple // rows of input data and all reduction elements share one inner // dimension index. template <typename T, typename Index, int OuterDimTileSize> __global__ void SortedSegmentSumCustomKernel( const Index input_outer_dim_size, const Index inner_dim_size, const Index output_outer_dim_size, const Index* __restrict__ segment_ids, const T* __restrict__ input, T* __restrict__ output, const Index total_stripe_count) { for (int stripe_index : GpuGridRangeX(total_stripe_count)) { const Index segment_offset = stripe_index % inner_dim_size; const Index input_outer_dim_index_base = stripe_index / inner_dim_size * Index(OuterDimTileSize); T sum = T(0); Index first_segment_id = segment_ids[input_outer_dim_index_base]; Index last_output_segment_id = output_outer_dim_size; const Index actual_stripe_height = min(Index(OuterDimTileSize), input_outer_dim_size - input_outer_dim_index_base); for (Index j = 0; j < actual_stripe_height; j++) { Index current_output_segment_id = segment_ids[input_outer_dim_index_base + j]; // Decide whether to write result to global memory. // Result is only written to global memory if we move // to another segment. Otherwise we can keep accumulating // locally. if (current_output_segment_id > last_output_segment_id) { const Index output_index = last_output_segment_id * inner_dim_size + segment_offset; // decide whether to write result to global memory using atomic // operations if (last_output_segment_id == first_segment_id) { GpuAtomicAdd(output + output_index, sum); } else { *(output + output_index) = sum; } sum = T(0); } sum += ldg(input + (input_outer_dim_index_base + j) * inner_dim_size + segment_offset); last_output_segment_id = current_output_segment_id; } // For the last result in a strip, always write using atomic operations // due to possible race conditions with threads computing // the following strip. const Index output_index = last_output_segment_id * inner_dim_size + segment_offset; GpuAtomicAdd(output + output_index, sum); } } // UnsortedSegmentSumKernel processes 'input_total_size' elements. // Each element is mapped from input to output by a combination of its // 'segment_ids' mapping and 'inner_dim_size'. template <typename T, typename Index, typename KernelReductionFunctor> __global__ void UnsortedSegmentCustomKernel( const int64 input_outer_dim_size, const int64 inner_dim_size, const int64 output_outer_dim_size, const Index* __restrict__ segment_ids, const T* __restrict__ input, T* __restrict__ output) { const int64 input_total_size = input_outer_dim_size * inner_dim_size; for (int64 input_index : GpuGridRangeX(input_total_size)) { const int64 input_segment_index = input_index / inner_dim_size; const int64 segment_offset = input_index % inner_dim_size; const Index output_segment_index = segment_ids[input_segment_index]; if (output_segment_index < 0 || output_segment_index >= output_outer_dim_size) { continue; } const int64 output_index = output_segment_index * inner_dim_size + segment_offset; KernelReductionFunctor()(output + output_index, ldg(input + input_index)); } } namespace functor { template <typename T, typename Index> void SegmentSumFunctor<T, Index>::operator()( OpKernelContext* ctx, const GPUDevice& d, const Index output_rows, const TensorShape& segment_ids_shape, typename TTypes<Index>::ConstFlat segment_ids, const Index data_size, const T* data, typename TTypes<T, 2>::Tensor output) { if (output.size() == 0) { return; } // Set 'output' to zeros. GpuLaunchConfig config = GetGpuLaunchConfig(output.size(), d); TF_CHECK_OK(GpuLaunchKernel(SetZero<T>, config.block_count, config.thread_per_block, 0, d.stream(), output.size(), output.data())); if (data_size == 0 || segment_ids_shape.num_elements() == 0) { return; } // Launch kernel to compute sorted segment sum. // Notes: // *) 'input_total_size' is the total number of elements to process. // *) 'segment_ids.shape' is a prefix of data's shape. // *) 'input_outer_dim_size' is the total number of segments to process. const Index input_total_size = data_size; const Index input_outer_dim_size = segment_ids.dimension(0); const Index input_inner_dim_size = input_total_size / input_outer_dim_size; const int OuterDimTileSize = 8; const Index input_outer_dim_num_stripe = Eigen::divup(input_outer_dim_size, Index(OuterDimTileSize)); const Index total_stripe_count = input_inner_dim_size * input_outer_dim_num_stripe; config = GetGpuLaunchConfig(total_stripe_count, d); TF_CHECK_OK(GpuLaunchKernel( SortedSegmentSumCustomKernel<T, Index, OuterDimTileSize>, config.block_count, config.thread_per_block, 0, d.stream(), input_outer_dim_size, input_inner_dim_size, output_rows, segment_ids.data(), data, output.data(), total_stripe_count)); } template <typename T, typename Index, typename InitialValueF, typename ReductionF> struct UnsortedSegmentFunctor<GPUDevice, T, Index, InitialValueF, ReductionF> { void operator()(OpKernelContext* ctx, const TensorShape& segment_ids_shape, typename TTypes<Index>::ConstFlat segment_ids, typename TTypes<T, 2>::ConstTensor data, typename TTypes<T, 2>::Tensor output) { if (output.size() == 0) { return; } // Set 'output' to initial value. GPUDevice d = ctx->template eigen_device<GPUDevice>(); GpuLaunchConfig config = GetGpuLaunchConfig(output.size(), d); TF_CHECK_OK(GpuLaunchKernel( SetToValue<T>, config.block_count, config.thread_per_block, 0, d.stream(), output.size(), output.data(), InitialValueF()())); const int64 data_size = data.size(); if (data_size == 0 || segment_ids_shape.num_elements() == 0) { return; } // Launch kernel to compute unsorted segment reduction. // Notes: // *) 'data_size' is the total number of elements to process. // *) 'segment_ids.shape' is a prefix of data's shape. // *) 'input_outer_dim_size' is the total number of segments to process. const int64 input_outer_dim_size = segment_ids.dimension(0); const int64 input_inner_dim_size = data.dimension(1); const int64 output_outer_dim_size = output.dimension(0); config = GetGpuLaunchConfig(data_size, d); TF_CHECK_OK(GpuLaunchKernel( UnsortedSegmentCustomKernel<T, Index, ReductionF>, config.block_count, config.thread_per_block, 0, d.stream(), input_outer_dim_size, input_inner_dim_size, output_outer_dim_size, segment_ids.data(), data.data(), output.data())); } }; #define DEFINE_SORTED_GPU_SPECS_INDEX(T, Index) \ template struct SegmentSumFunctor<T, Index> #define DEFINE_SORTED_GPU_SPECS(T) \ DEFINE_SORTED_GPU_SPECS_INDEX(T, int32); \ DEFINE_SORTED_GPU_SPECS_INDEX(T, int64); TF_CALL_GPU_NUMBER_TYPES(DEFINE_SORTED_GPU_SPECS); #define DEFINE_REAL_UNSORTED_GPU_SPECS_INDEX(T, Index) \ template struct UnsortedSegmentFunctor< \ GPUDevice, T, Index, functor::Lowest<T>, functor::MaxOpGpu<T>>; \ template struct UnsortedSegmentFunctor< \ GPUDevice, T, Index, functor::Highest<T>, functor::MinOpGpu<T>>; \ template struct UnsortedSegmentFunctor<GPUDevice, T, Index, functor::One<T>, \ functor::ProdOpGpu<T>>; // sum is the only op that supports all input types currently #define DEFINE_SUM_UNSORTED_GPU_SPECS_INDEX(T, Index) \ template struct UnsortedSegmentFunctor< \ GPUDevice, T, Index, functor::Zero<T>, functor::SumOpGpu<T>>; #define DEFINE_REAL_GPU_SPECS(T) \ DEFINE_REAL_UNSORTED_GPU_SPECS_INDEX(T, int32); \ DEFINE_REAL_UNSORTED_GPU_SPECS_INDEX(T, int64); #define DEFINE_SUM_GPU_SPECS(T) \ DEFINE_SUM_UNSORTED_GPU_SPECS_INDEX(T, int32); \ DEFINE_SUM_UNSORTED_GPU_SPECS_INDEX(T, int64); TF_CALL_GPU_NUMBER_TYPES(DEFINE_REAL_GPU_SPECS); TF_CALL_int32(DEFINE_REAL_GPU_SPECS); TF_CALL_GPU_NUMBER_TYPES(DEFINE_SUM_GPU_SPECS); TF_CALL_int32(DEFINE_SUM_GPU_SPECS); // TODO(rocm): support atomicAdd for complex numbers on ROCm #if GOOGLE_CUDA TF_CALL_COMPLEX_TYPES(DEFINE_SUM_GPU_SPECS); #endif #undef DEFINE_SORTED_GPU_SPECS_INDEX #undef DEFINE_SORTED_GPU_SPECS #undef DEFINE_REAL_UNSORTED_GPU_SPECS_INDEX #undef DEFINE_SUM_UNSORTED_GPU_SPECS_INDEX #undef DEFINE_REAL_GPU_SPECS #undef DEFINE_SUM_GPU_SPECS } // namespace functor } // namespace tensorflow #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
karllessard/tensorflow
tensorflow/core/kernels/segment_reduction_ops_gpu.cu.cc
C++
apache-2.0
11,228
// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. using Google.Apis.Compute.v1.Data; using GoogleCloudExtension.DataSources; using GoogleCloudExtension.Utils; namespace GoogleCloudExtension.CloudExplorerSources.Gce { public class WindowsInstanceItem : GceInstanceItem { private readonly WindowsInstanceInfo _info; public WindowsInstanceItem(Instance instance) : base(instance) { _info = instance.GetWindowsInstanceInfo(); } [LocalizedCategory(nameof(Resources.CloudExplorerGceInstanceWindowsCategory))] [LocalizedDisplayName(nameof(Resources.CloudExplorerGceInstanceWindowsVersionDisplayName))] [LocalizedDescription(nameof(Resources.CloudExplorerGceInstanceWindowsVersionDescription))] public string WindowsDisplayName => _info.DisplayName; } }
Deren-Liao/google-cloud-visualstudio
GoogleCloudExtension/GoogleCloudExtension/CloudExplorerSources/Gce/WindowsInstanceItem.cs
C#
apache-2.0
1,400
package com.bazaarvoice.emodb.sor.core; import com.bazaarvoice.emodb.sor.api.Audit; import com.bazaarvoice.emodb.sor.api.PurgeStatus; /** * Interface responsible for Async purge jobs for DataStore */ public interface DataStoreAsync { /** * starts a purge job on table */ String purgeTableAsync(String table, Audit audit); /** * gets the status of the job with jobID */ PurgeStatus getPurgeStatus(String table, String jobID); }
billkalter/emodb
sor/src/main/java/com/bazaarvoice/emodb/sor/core/DataStoreAsync.java
Java
apache-2.0
469
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.hadoop.serialization; import org.apache.hadoop.io.BooleanWritable; import org.apache.hadoop.io.DoubleWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.BytesWritable; import org.elasticsearch.hadoop.mr.WritableValueReader; import org.elasticsearch.hadoop.serialization.builder.ValueReader; import static org.junit.Assert.*; public class WritableValueReaderTest extends AbstractValueReaderTest { @Override public ValueReader createValueReader() { return new WritableValueReader(); } @Override public void checkNull(Object typeFromJson) { assertEquals(NullWritable.get(), typeFromJson); } @Override public void checkEmptyString(Object typeFromJson) { assertEquals(NullWritable.get(), typeFromJson); } @Override public void checkString(Object typeFromJson) { assertEquals(new Text("someText"), typeFromJson); } @Override public void checkInteger(Object typeFromJson) { assertEquals(new IntWritable(Integer.MAX_VALUE), typeFromJson); } @Override public void checkLong(Object typeFromJson) { assertEquals(new LongWritable(Long.MAX_VALUE), typeFromJson); } @Override public void checkDouble(Object typeFromJson) { assertEquals(new DoubleWritable(Double.MAX_VALUE), typeFromJson); } @Override public void checkFloat(Object typeFromJson) { assertEquals(Float.MAX_VALUE + "", typeFromJson + ""); } @Override public void checkBoolean(Object typeFromJson) { assertEquals(new BooleanWritable(Boolean.TRUE), typeFromJson); } @Override public void checkByteArray(Object typeFromJson, String encode) { assertEquals(new Text(encode), typeFromJson); } @Override public void checkBinary(Object typeFromJson, byte[] encode) { assertEquals(new BytesWritable(encode), typeFromJson); } }
elastic/elasticsearch-hadoop
mr/src/test/java/org/elasticsearch/hadoop/serialization/WritableValueReaderTest.java
Java
apache-2.0
2,836
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include "statemgr/heron-zkstatemgr.h" #include <iostream> #include <string> #include <vector> #include "proto/messages.h" #include "basics/basics.h" #include "errors/errors.h" #include "threads/threads.h" #include "network/network.h" #include "zookeeper/zkclient.h" namespace heron { namespace common { HeronZKStateMgr::HeronZKStateMgr(const std::string& zkhostport, const std::string& topleveldir, EventLoop* eventLoop, bool exitOnSessionExpiry) : HeronStateMgr(topleveldir), zkhostport_(zkhostport), zkclient_(NULL), zkclient_factory_(new DefaultZKClientFactory()), eventLoop_(eventLoop), tmaster_location_watcher_info_(NULL), exitOnSessionExpiry_(exitOnSessionExpiry) { Init(); } HeronZKStateMgr::HeronZKStateMgr(const std::string& zkhostport, const std::string& topleveldir, EventLoop* eventLoop, ZKClientFactory* zkclient_factory, bool exitOnSessionExpiry) : HeronStateMgr(topleveldir), zkhostport_(zkhostport), zkclient_(NULL), zkclient_factory_(zkclient_factory), eventLoop_(eventLoop), tmaster_location_watcher_info_(NULL), exitOnSessionExpiry_(exitOnSessionExpiry) { Init(); } void HeronZKStateMgr::Init() { if (exitOnSessionExpiry_) { watch_event_cb_ = VCallback<ZKClient::ZkWatchEvent>(); } else { watch_event_cb_ = [this](ZKClient::ZkWatchEvent event) { this->GlobalWatchEventHandler(event); }; } // If watch_event_cb is empty, zkClient exits on session expired event zkclient_ = zkclient_factory_->create(zkhostport_, eventLoop_, watch_event_cb_); } HeronZKStateMgr::~HeronZKStateMgr() { delete zkclient_; delete zkclient_factory_; delete tmaster_location_watcher_info_; } void HeronZKStateMgr::InitTree() { // Needs to be implemented CHECK(false); } void HeronZKStateMgr::SetTMasterLocationWatch(const std::string& topology_name, VCallback<> watcher) { CHECK(watcher); CHECK(!topology_name.empty()); tmaster_location_watcher_info_ = new TMasterLocationWatchInfo(std::move(watcher), topology_name); SetTMasterLocationWatchInternal(); } void HeronZKStateMgr::SetMetricsCacheLocationWatch(const std::string& topology_name, VCallback<> watcher) { CHECK(watcher); CHECK(!topology_name.empty()); metricscache_location_watcher_info_ = new TMasterLocationWatchInfo( std::move(watcher), topology_name); SetMetricsCacheLocationWatchInternal(); } void HeronZKStateMgr::SetPackingPlanWatch(const std::string& topology_name, VCallback<> watcher) { CHECK(watcher); CHECK(!topology_name.empty()); packing_plan_watcher_info_ = new TMasterLocationWatchInfo(std::move(watcher), topology_name); SetPackingPlanWatchInternal(); } void HeronZKStateMgr::SetTMasterLocation(const proto::tmaster::TMasterLocation& _location, VCallback<proto::system::StatusCode> cb) { // Just try to create an ephimeral node std::string path = GetTMasterLocationPath(_location.topology_name()); std::string value; _location.SerializeToString(&value); auto wCb = [cb, this](sp_int32 rc) { this->SetTMasterLocationDone(std::move(cb), rc); }; zkclient_->CreateNode(path, value, true, std::move(wCb)); } void HeronZKStateMgr::SetMetricsCacheLocation(const proto::tmaster::MetricsCacheLocation& _location, VCallback<proto::system::StatusCode> cb) { // Just try to create an ephimeral node std::string path = GetMetricsCacheLocationPath(_location.topology_name()); std::string value; _location.SerializeToString(&value); auto wCb = [cb, this](sp_int32 rc) { this->SetMetricsCacheLocationDone(std::move(cb), rc); }; zkclient_->CreateNode(path, value, true, std::move(wCb)); } void HeronZKStateMgr::GetTMasterLocation(const std::string& _topology_name, proto::tmaster::TMasterLocation* _return, VCallback<proto::system::StatusCode> cb) { std::string path = GetTMasterLocationPath(_topology_name); std::string* contents = new std::string(); auto wCb = [contents, _return, cb, this](sp_int32 rc) { this->GetTMasterLocationDone(contents, _return, std::move(cb), rc); }; zkclient_->Get(path, contents, std::move(wCb)); } void HeronZKStateMgr::GetMetricsCacheLocation(const std::string& _topology_name, proto::tmaster::MetricsCacheLocation* _return, VCallback<proto::system::StatusCode> cb) { std::string path = GetMetricsCacheLocationPath(_topology_name); std::string* contents = new std::string(); auto wCb = [contents, _return, cb, this](sp_int32 rc) { this->GetMetricsCacheLocationDone(contents, _return, std::move(cb), rc); }; zkclient_->Get(path, contents, std::move(wCb)); } void HeronZKStateMgr::CreateTopology(const proto::api::Topology& _topology, VCallback<proto::system::StatusCode> cb) { std::string path = GetTopologyPath(_topology.name()); std::string value; _topology.SerializeToString(&value); auto wCb = [cb, this](sp_int32 rc) { this->CreateTopologyDone(std::move(cb), rc); }; zkclient_->CreateNode(path, value, false, std::move(wCb)); } void HeronZKStateMgr::DeleteTopology(const std::string& _topology_name, VCallback<proto::system::StatusCode> cb) { std::string path = GetTopologyPath(_topology_name); auto wCb = [cb, this](sp_int32 rc) { this->DeleteTopologyDone(std::move(cb), rc); }; zkclient_->DeleteNode(path, std::move(wCb)); } void HeronZKStateMgr::SetTopology(const proto::api::Topology& _topology, VCallback<proto::system::StatusCode> cb) { std::string path = GetTopologyPath(_topology.name()); std::string value; _topology.SerializeToString(&value); auto wCb = [cb, this](sp_int32 rc) { this->SetTopologyDone(std::move(cb), rc); }; zkclient_->Set(path, value, std::move(wCb)); } void HeronZKStateMgr::GetTopology(const std::string& _topology_name, proto::api::Topology* _return, VCallback<proto::system::StatusCode> cb) { std::string path = GetTopologyPath(_topology_name); std::string* contents = new std::string(); auto wCb = [contents, _return, cb, this](sp_int32 rc) { this->GetTopologyDone(contents, _return, std::move(cb), rc); }; zkclient_->Get(path, contents, std::move(wCb)); } void HeronZKStateMgr::CreatePhysicalPlan(const proto::system::PhysicalPlan& _pplan, VCallback<proto::system::StatusCode> cb) { std::string path = GetPhysicalPlanPath(_pplan.topology().name()); std::string contents; _pplan.SerializeToString(&contents); auto wCb = [cb, this](sp_int32 rc) { this->CreatePhysicalPlanDone(std::move(cb), rc); }; zkclient_->CreateNode(path, contents, false, std::move(wCb)); } void HeronZKStateMgr::DeletePhysicalPlan(const std::string& _topology_name, VCallback<proto::system::StatusCode> cb) { std::string path = GetPhysicalPlanPath(_topology_name); auto wCb = [cb, this](sp_int32 rc) { this->DeletePhysicalPlanDone(std::move(cb), rc); }; zkclient_->DeleteNode(path, std::move(wCb)); } void HeronZKStateMgr::SetPhysicalPlan(const proto::system::PhysicalPlan& _pplan, VCallback<proto::system::StatusCode> cb) { std::string path = GetPhysicalPlanPath(_pplan.topology().name()); std::string contents; _pplan.SerializeToString(&contents); auto wCb = [cb, this](sp_int32 rc) { this->SetPhysicalPlanDone(std::move(cb), rc); }; zkclient_->Set(path, contents, std::move(wCb)); } void HeronZKStateMgr::GetPhysicalPlan(const std::string& _topology_name, proto::system::PhysicalPlan* _return, VCallback<proto::system::StatusCode> cb) { std::string path = GetPhysicalPlanPath(_topology_name); std::string* contents = new std::string(); auto wCb = [contents, _return, cb, this](sp_int32 rc) { this->GetPhysicalPlanDone(contents, _return, std::move(cb), rc); }; zkclient_->Get(path, contents, std::move(wCb)); } void HeronZKStateMgr::GetPackingPlan(const std::string& _topology_name, proto::system::PackingPlan* _return, VCallback<proto::system::StatusCode> cb) { std::string path = GetPackingPlanPath(_topology_name); std::string* contents = new std::string(); auto wCb = [contents, _return, cb, this](sp_int32 rc) { this->GetPackingPlanDone(contents, _return, std::move(cb), rc); }; zkclient_->Get(path, contents, std::move(wCb)); } void HeronZKStateMgr::CreateExecutionState(const proto::system::ExecutionState& _state, VCallback<proto::system::StatusCode> cb) { std::string path = GetExecutionStatePath(_state.topology_name()); std::string contents; _state.SerializeToString(&contents); auto wCb = [cb, this](sp_int32 rc) { this->CreateExecutionStateDone(std::move(cb), rc); }; zkclient_->CreateNode(path, contents, false, std::move(wCb)); } void HeronZKStateMgr::DeleteExecutionState(const std::string& _topology_name, VCallback<proto::system::StatusCode> cb) { std::string path = GetExecutionStatePath(_topology_name); auto wCb = [cb, this](sp_int32 rc) { this->DeleteExecutionStateDone(std::move(cb), rc); }; zkclient_->DeleteNode(path, std::move(wCb)); } void HeronZKStateMgr::SetExecutionState(const proto::system::ExecutionState& _state, VCallback<proto::system::StatusCode> cb) { std::string path = GetExecutionStatePath(_state.topology_name()); std::string contents; _state.SerializeToString(&contents); auto wCb = [cb, this](sp_int32 rc) { this->SetExecutionStateDone(std::move(cb), rc); }; zkclient_->Set(path, contents, std::move(wCb)); } void HeronZKStateMgr::GetExecutionState(const std::string& _topology_name, proto::system::ExecutionState* _return, VCallback<proto::system::StatusCode> cb) { std::string path = GetExecutionStatePath(_topology_name); std::string* contents = new std::string(); auto wCb = [contents, _return, cb, this](sp_int32 rc) { this->GetExecutionStateDone(contents, _return, std::move(cb), rc); }; zkclient_->Get(path, contents, std::move(wCb)); } void HeronZKStateMgr::CreateStatefulCheckpoints(const std::string& _topology_name, const proto::ckptmgr::StatefulConsistentCheckpoints& _ckpt, VCallback<proto::system::StatusCode> cb) { std::string path = GetStatefulCheckpointsPath(_topology_name); std::string contents; _ckpt.SerializeToString(&contents); auto wCb = [cb, this](sp_int32 rc) { this->CreateStatefulCheckpointsDone(std::move(cb), rc); }; zkclient_->CreateNode(path, contents, false, std::move(wCb)); } void HeronZKStateMgr::DeleteStatefulCheckpoints(const std::string& _topology_name, VCallback<proto::system::StatusCode> cb) { std::string path = GetStatefulCheckpointsPath(_topology_name); auto wCb = [cb, this](sp_int32 rc) { this->DeleteStatefulCheckpointsDone(std::move(cb), rc); }; zkclient_->DeleteNode(path, std::move(wCb)); } void HeronZKStateMgr::SetStatefulCheckpoints(const std::string& _topology_name, const proto::ckptmgr::StatefulConsistentCheckpoints& _ckpt, VCallback<proto::system::StatusCode> cb) { std::string path = GetStatefulCheckpointsPath(_topology_name); std::string contents; _ckpt.SerializeToString(&contents); auto wCb = [cb, this](sp_int32 rc) { this->SetStatefulCheckpointsDone(std::move(cb), rc); }; zkclient_->Set(path, contents, std::move(wCb)); } void HeronZKStateMgr::GetStatefulCheckpoints(const std::string& _topology_name, proto::ckptmgr::StatefulConsistentCheckpoints* _return, VCallback<proto::system::StatusCode> cb) { std::string path = GetStatefulCheckpointsPath(_topology_name); std::string* contents = new std::string(); auto wCb = [contents, _return, cb, this](sp_int32 rc) { this->GetStatefulCheckpointsDone(contents, _return, std::move(cb), rc); }; zkclient_->Get(path, contents, std::move(wCb)); } void HeronZKStateMgr::ListTopologies(std::vector<sp_string>* _return, VCallback<proto::system::StatusCode> cb) { sp_string path = GetTopologyDir(); auto wCb = [cb, this](sp_int32 rc) { this->ListTopologiesDone(std::move(cb), rc); }; zkclient_->GetChildren(path, _return, wCb); } void HeronZKStateMgr::ListExecutionStateTopologies(std::vector<sp_string>* _return, VCallback<proto::system::StatusCode> cb) { sp_string path = GetExecutionStateDir(); auto wCb = [cb, this](sp_int32 rc) { this->ListExecutionStateTopologiesDone(std::move(cb), rc); }; zkclient_->GetChildren(path, _return, std::move(wCb)); } void HeronZKStateMgr::GlobalWatchEventHandler(const ZKClient::ZkWatchEvent event) { LOG(INFO) << "Received an event, Type: " << ZKClient::type2String(event.type) << ", State: " << ZKClient::state2String(event.state); if (event.type == ZOO_SESSION_EVENT && event.state == ZOO_EXPIRED_SESSION_STATE) { // TODO(kramasamy): The session expired event is only triggered after the client // is able to connect back to the zk server after a connection loss. But the // duration of the connection loss is indeterminate, so it is pointless to // wait for the entire duration. A better approach here is to timeout after // client is in connecting state for a duration greater than session timeout. LOG(INFO) << "Deleting current zk client... "; // This could be a blocking call since it flushes out all outstanding // requests. Hence adding logs before and after to track time consumed. // NOTE: Since this class is meant to be operate in single threaded mode, // this is a safe operation. delete zkclient_; LOG(INFO) << "Deleted current zk client, creating a new one..."; zkclient_ = zkclient_factory_->create(zkhostport_, eventLoop_, watch_event_cb_); LOG(INFO) << "New zk client created"; // set tmaster watch and notify the client watcher // NOTE: It isn't enough to just set the watch here, since we could // have lost a tmaster node change when the session expired. This is needed // since the current zkclient design notifies only the "Connected_State" events to // the individual node watchers. Session expired events need explicit notification. if (IsTmasterWatchDefined()) { TMasterLocationWatch(); } } else { LOG(WARNING) << "Events other than session expired event are not" << "expected, at least for now" << std::endl; } } void HeronZKStateMgr::SetTMasterLocationDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNODEEXISTS) { LOG(ERROR) << "Setting TMaster Location failed because another zmaster exists" << std::endl; code = proto::system::TMASTERLOCATION_ALREADY_EXISTS; } else if (_rc != ZOK) { LOG(ERROR) << "Setting TMaster Location failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::SetMetricsCacheLocationDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNODEEXISTS) { LOG(ERROR) << "Setting MetricsCache Location failed because another zmaster exists" << std::endl; code = proto::system::METRICSCACHELOCATION_ALREADY_EXISTS; } else if (_rc != ZOK) { LOG(ERROR) << "Setting MetricsCache Location failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::GetTMasterLocationDone(std::string* _contents, proto::tmaster::TMasterLocation* _return, VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZOK) { if (!_return->ParseFromString(*_contents)) { LOG(ERROR) << "Error parsing tmaster location" << std::endl; code = proto::system::STATE_CORRUPTED; } } else if (_rc == ZNONODE) { LOG(ERROR) << "Error getting tmaster location because the tmaster does not exist" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else { LOG(ERROR) << "Getting TMaster Location failed with error " << _rc << std::endl; code = proto::system::STATE_READ_ERROR; } delete _contents; cb(code); } void HeronZKStateMgr::GetMetricsCacheLocationDone(std::string* _contents, proto::tmaster::MetricsCacheLocation* _return, VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZOK) { if (!_return->ParseFromString(*_contents)) { LOG(ERROR) << "Error parsing metricscache location" << std::endl; code = proto::system::STATE_CORRUPTED; } } else if (_rc == ZNONODE) { LOG(ERROR) << "Error getting metricscache location because the metricscache does not exist" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else { LOG(ERROR) << "Getting MetricsCache Location failed with error " << _rc << std::endl; code = proto::system::STATE_READ_ERROR; } delete _contents; cb(code); } void HeronZKStateMgr::CreateTopologyDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Setting Topology failed because zk is not setup properly" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Creating Topology failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::DeleteTopologyDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Deleting Topology failed because there was no such node" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Setting Topology failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::SetTopologyDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Setting Topology failed because topoloogy does not exist" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Setting Topology failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::GetTopologyDone(std::string* _contents, proto::api::Topology* _return, VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZOK) { if (!_return->ParseFromString(*_contents)) { LOG(ERROR) << "topology parsing failed; zk corruption?" << std::endl; code = proto::system::STATE_CORRUPTED; } } else if (_rc == ZNONODE) { LOG(ERROR) << "Error getting topology because the topology does not exist" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else { LOG(ERROR) << "Getting Topology failed with error " << _rc << std::endl; code = proto::system::STATE_READ_ERROR; } delete _contents; cb(code); } void HeronZKStateMgr::CreatePhysicalPlanDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Creating Physical Plan failed because zk was not setup properly" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Setting Physical Plan failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::DeletePhysicalPlanDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Deleting Physical Plan failed because there was no such node" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Deleting Physical Plan failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::SetPhysicalPlanDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Setting Physical Plan failed because there was no such node" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Setting Assignment failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::GetPhysicalPlanDone(std::string* _contents, proto::system::PhysicalPlan* _return, VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZOK) { if (!_return->ParseFromString(*_contents)) { code = proto::system::STATE_CORRUPTED; } } else if (_rc == ZNONODE) { code = proto::system::PATH_DOES_NOT_EXIST; } else { LOG(ERROR) << "Getting PhysicalPlan failed with error " << _rc << std::endl; code = proto::system::STATE_READ_ERROR; } delete _contents; cb(code); } void HeronZKStateMgr::GetPackingPlanDone(std::string* _contents, proto::system::PackingPlan* _return, VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZOK) { if (!_return->ParseFromString(*_contents)) { code = proto::system::STATE_CORRUPTED; } } else if (_rc == ZNONODE) { code = proto::system::PATH_DOES_NOT_EXIST; } else { LOG(ERROR) << "Getting PackingPlan failed with error " << _rc << std::endl; code = proto::system::STATE_READ_ERROR; } delete _contents; cb(code); } void HeronZKStateMgr::CreateExecutionStateDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Creating ExecutionState failed because zookeeper was not setup properly" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Creating ExecutionState failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::DeleteExecutionStateDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Deleting ExecutionState failed because the node does not exists" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Deleting ExecutionState failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } else { LOG(ERROR) << "Deleted Exectution state" << std::endl; } cb(code); } void HeronZKStateMgr::SetExecutionStateDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Setting Execution State failed because there was no such node" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Setting Execution state failed with error " << _rc << std::endl; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::GetExecutionStateDone(std::string* _contents, proto::system::ExecutionState* _return, VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZOK) { if (!_return->ParseFromString(*_contents)) { code = proto::system::STATE_CORRUPTED; } } else if (_rc == ZNONODE) { code = proto::system::PATH_DOES_NOT_EXIST; } else { LOG(ERROR) << "Getting ExecutionState failed with error " << _rc << std::endl; code = proto::system::STATE_READ_ERROR; } delete _contents; cb(code); } void HeronZKStateMgr::CreateStatefulCheckpointsDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Creating StatefulCheckpoints failed because zookeeper was not setup properly" << std::endl; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Creating Stateful Checkpoints failed with error " << _rc; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::DeleteStatefulCheckpointsDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Deleting StatefulCheckpoints failed because the node does not exists"; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Deleting StatefulCheckpoints failed with error " << _rc; code = proto::system::STATE_WRITE_ERROR; } else { LOG(ERROR) << "Deleted Exectution state"; } cb(code); } void HeronZKStateMgr::SetStatefulCheckpointsDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZNONODE) { LOG(ERROR) << "Setting StatefulCheckpoints failed because there was no such node"; code = proto::system::PATH_DOES_NOT_EXIST; } else if (_rc != ZOK) { LOG(ERROR) << "Setting StatefulCheckpoints failed with error " << _rc; code = proto::system::STATE_WRITE_ERROR; } cb(code); } void HeronZKStateMgr::GetStatefulCheckpointsDone(std::string* _contents, proto::ckptmgr::StatefulConsistentCheckpoints* _return, VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc == ZOK) { if (!_return->ParseFromString(*_contents)) { code = proto::system::STATE_CORRUPTED; } } else if (_rc == ZNONODE) { code = proto::system::PATH_DOES_NOT_EXIST; } else { LOG(ERROR) << "Getting StatefulCheckpoints failed with error " << _rc; code = proto::system::STATE_READ_ERROR; } delete _contents; cb(code); } void HeronZKStateMgr::ListTopologiesDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc != ZOK) { code = proto::system::NOTOK; } cb(code); } void HeronZKStateMgr::ListExecutionStateTopologiesDone(VCallback<proto::system::StatusCode> cb, sp_int32 _rc) { proto::system::StatusCode code = proto::system::OK; if (_rc != ZOK) { code = proto::system::NOTOK; } cb(code); } bool HeronZKStateMgr::IsTmasterWatchDefined() { return (tmaster_location_watcher_info_ != NULL && tmaster_location_watcher_info_->watcher_cb && !tmaster_location_watcher_info_->topology_name.empty()); } bool HeronZKStateMgr::IsMetricsCacheWatchDefined() { return (metricscache_location_watcher_info_ != NULL && metricscache_location_watcher_info_->watcher_cb && !metricscache_location_watcher_info_->topology_name.empty()); } bool HeronZKStateMgr::IsPackingPlanWatchDefined() { return (packing_plan_watcher_info_ != NULL && packing_plan_watcher_info_->watcher_cb && !packing_plan_watcher_info_->topology_name.empty()); } // 2 seconds const int HeronZKStateMgr::SET_WATCH_RETRY_INTERVAL_S = 2; bool HeronZKStateMgr::ShouldRetrySetWatch(sp_int32 rc) { switch (rc) { case ZCONNECTIONLOSS: case ZOPERATIONTIMEOUT: return true; default: // Shouldn't retry for any other return code return false; } } void HeronZKStateMgr::SetTMasterWatchCompletionHandler(sp_int32 rc) { if (rc == ZOK || rc == ZNONODE) { // NoNode is when there is no tmaster up yet, but the watch is set. LOG(INFO) << "Setting watch on tmaster location succeeded: " << zerror(rc) << std::endl; } else { // Any other return code should be treated as warning, since ideally // we shouldn't be in this state. LOG(WARNING) << "Setting watch on tmaster location returned: " << zerror(rc) << std::endl; if (ShouldRetrySetWatch(rc)) { LOG(INFO) << "Retrying after " << SET_WATCH_RETRY_INTERVAL_S << " seconds" << std::endl; auto cb = [this](EventLoop::Status status) { this->CallSetTMasterLocationWatch(status); }; eventLoop_->registerTimer(std::move(cb), false, SET_WATCH_RETRY_INTERVAL_S * 1000 * 1000); } } } void HeronZKStateMgr::SetMetricsCacheWatchCompletionHandler(sp_int32 rc) { if (rc == ZOK || rc == ZNONODE) { // NoNode is when there is no tmaster up yet, but the watch is set. LOG(INFO) << "Setting watch on metricscache location succeeded: " << zerror(rc) << std::endl; } else { // Any other return code should be treated as warning, since ideally // we shouldn't be in this state. LOG(WARNING) << "Setting watch on metricscache location returned: " << zerror(rc) << std::endl; if (ShouldRetrySetWatch(rc)) { LOG(INFO) << "Retrying after " << SET_WATCH_RETRY_INTERVAL_S << " seconds" << std::endl; auto cb = [this](EventLoop::Status status) { this->CallSetMetricsCacheLocationWatch(status);}; eventLoop_->registerTimer(std::move(cb), false, SET_WATCH_RETRY_INTERVAL_S * 1000 * 1000); } } } void HeronZKStateMgr::SetPackingPlanWatchCompletionHandler(sp_int32 rc) { if (rc == ZOK || rc == ZNONODE) { // NoNode is when there is no packingplan up yet, but the watch is set. LOG(INFO) << "Setting watch on packing plan succeeded: " << zerror(rc) << std::endl; } else { // Any other return code should be treated as warning, since ideally // we shouldn't be in this state. LOG(WARNING) << "Setting watch on packing plan returned: " << zerror(rc) << std::endl; if (ShouldRetrySetWatch(rc)) { LOG(INFO) << "Retrying after " << SET_WATCH_RETRY_INTERVAL_S << " seconds" << std::endl; auto cb = [this](EventLoop::Status status) { this->CallSetPackingPlanWatch(status);}; eventLoop_->registerTimer(std::move(cb), false, SET_WATCH_RETRY_INTERVAL_S * 1000 * 1000); } } } void HeronZKStateMgr::CallSetTMasterLocationWatch(EventLoop::Status) { SetTMasterLocationWatchInternal(); } void HeronZKStateMgr::CallSetMetricsCacheLocationWatch(EventLoop::Status) { SetMetricsCacheLocationWatchInternal(); } void HeronZKStateMgr::CallSetPackingPlanWatch(EventLoop::Status) { SetPackingPlanWatchInternal(); } void HeronZKStateMgr::SetTMasterLocationWatchInternal() { CHECK(IsTmasterWatchDefined()); LOG(INFO) << "Setting watch on tmaster location " << std::endl; std::string path = GetTMasterLocationPath(tmaster_location_watcher_info_->topology_name); zkclient_->Exists(path, [this]() { this->TMasterLocationWatch(); }, [this](sp_int32 rc) { this->SetTMasterWatchCompletionHandler(rc); }); } void HeronZKStateMgr::SetMetricsCacheLocationWatchInternal() { CHECK(IsMetricsCacheWatchDefined()); LOG(INFO) << "Setting watch on metricscache location " << std::endl; std::string path = GetMetricsCacheLocationPath( metricscache_location_watcher_info_->topology_name); zkclient_->Exists(path, [this]() { this->MetricsCacheLocationWatch(); }, [this](sp_int32 rc) { this->SetMetricsCacheWatchCompletionHandler(rc); }); } void HeronZKStateMgr::SetPackingPlanWatchInternal() { CHECK(IsPackingPlanWatchDefined()); LOG(INFO) << "Setting watch on packing plan " << std::endl; std::string path = GetPackingPlanPath(packing_plan_watcher_info_->topology_name); zkclient_->Exists(path, [this]() { this->PackingPlanWatch(); }, [this](sp_int32 rc) { this->SetPackingPlanWatchCompletionHandler(rc); }); } void HeronZKStateMgr::TMasterLocationWatch() { // First setup watch again SetTMasterLocationWatchInternal(); // Then run the watcher tmaster_location_watcher_info_->watcher_cb(); } void HeronZKStateMgr::MetricsCacheLocationWatch() { // First setup watch again SetMetricsCacheLocationWatchInternal(); // Then run the watcher metricscache_location_watcher_info_->watcher_cb(); } void HeronZKStateMgr::PackingPlanWatch() { // First setup watch again SetPackingPlanWatchInternal(); // Then run the watcher packing_plan_watcher_info_->watcher_cb(); } } // namespace common } // namespace heron
mycFelix/heron
heron/statemgrs/src/cpp/statemgr/heron-zkstatemgr.cpp
C++
apache-2.0
35,286
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Query display // ============= var GLYPHICON_DEFAULT = {color: '#1edcff'}; var GLYPHICON_HIGHLIGHT = {color: '#999999'}; var STATE_COLOR_MAP = { QUEUED: '#1b8f72', RUNNING: '#19874e', PLANNING: '#674f98', FINISHED: '#1a4629', BLOCKED: '#61003b', USER_ERROR: '#9a7d66', USER_CANCELED: '#858959', INSUFFICIENT_RESOURCES: '#7f5b72', EXTERNAL_ERROR: '#ca7640', UNKNOWN_ERROR: '#943524' }; function getQueryStateColor(query) { switch (query.state) { case "QUEUED": return STATE_COLOR_MAP.QUEUED; case "PLANNING": return STATE_COLOR_MAP.PLANNING; case "STARTING": case "FINISHING": case "RUNNING": if (query.queryStats && query.queryStats.fullyBlocked) { return STATE_COLOR_MAP.BLOCKED; } return STATE_COLOR_MAP.RUNNING; case "FAILED": switch (query.errorType) { case "USER_ERROR": if (query.errorCode.name === 'USER_CANCELED') { return STATE_COLOR_MAP.USER_CANCELED; } return STATE_COLOR_MAP.USER_ERROR; case "EXTERNAL": return STATE_COLOR_MAP.EXTERNAL_ERROR; case "INSUFFICIENT_RESOURCES": return STATE_COLOR_MAP.INSUFFICIENT_RESOURCES; default: return STATE_COLOR_MAP.UNKNOWN_ERROR; } case "FINISHED": return STATE_COLOR_MAP.FINISHED; default: return STATE_COLOR_MAP.QUEUED; } } function getStageStateColor(stage) { switch (stage.state) { case "PLANNED": return STATE_COLOR_MAP.QUEUED; case "SCHEDULING": case "SCHEDULING_SPLITS": case "SCHEDULED": return STATE_COLOR_MAP.PLANNING; case "RUNNING": if (stage.stageStats && stage.stageStats.fullyBlocked) { return STATE_COLOR_MAP.BLOCKED; } return STATE_COLOR_MAP.RUNNING; case "FINISHED": return STATE_COLOR_MAP.FINISHED; case "CANCELED": case "ABORTED": case "FAILED": return STATE_COLOR_MAP.UNKNOWN_ERROR; default: return "#b5b5b5" } } // This relies on the fact that BasicQueryInfo and QueryInfo have all the fields // necessary to compute this string, and that these fields are consistently named. function getHumanReadableState(query) { if (query.state == "RUNNING") { let title = "RUNNING"; if (query.scheduled && query.queryStats.totalDrivers > 0 && query.queryStats.runningDrivers >= 0) { if (query.queryStats.fullyBlocked) { title = "BLOCKED"; if (query.queryStats.blockedReasons && query.queryStats.blockedReasons.length > 0) { title += " (" + query.queryStats.blockedReasons.join(", ") + ")"; } } if (query.memoryPool === "reserved") { title += " (RESERVED)" } return title; } } if (query.state == "FAILED") { switch (query.errorType) { case "USER_ERROR": if (query.errorCode.name === "USER_CANCELED") { return "USER CANCELED"; } return "USER ERROR"; case "INTERNAL_ERROR": return "INTERNAL ERROR"; case "INSUFFICIENT_RESOURCES": return "INSUFFICIENT RESOURCES"; case "EXTERNAL": return "EXTERNAL ERROR"; } } return query.state; } function isProgressMeaningful(query) { return query.scheduled && query.state == "RUNNING" && query.queryStats.totalDrivers > 0 && query.queryStats.completedDrivers > 0; } function getProgressBarPercentage(query) { if (isProgressMeaningful(query)) { return Math.round((query.queryStats.completedDrivers * 100.0) / query.queryStats.totalDrivers); } // progress bars should appear 'full' when query progress is not meaningful return 100; } function getProgressBarTitle(query) { if (isProgressMeaningful(query)) { return getHumanReadableState(query) + " (" + getProgressBarPercentage(query) + "%)" } return getHumanReadableState(query) } function isQueryComplete(query) { return ["FINISHED", "FAILED", "CANCELED"].indexOf(query.state) > -1; } // Sparkline-related functions // =========================== // display at most 5 minutes worth of data on the sparklines var MAX_HISTORY = 60 * 5; // alpha param of exponentially weighted moving average. picked arbitrarily - lower values means more smoothness var MOVING_AVERAGE_ALPHA = 0.2; function addToHistory (value, valuesArray) { if (valuesArray.length == 0) { return valuesArray.concat([value]); } return valuesArray.concat([value]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0)); } function addExponentiallyWeightedToHistory (value, valuesArray) { if (valuesArray.length == 0) { return valuesArray.concat([value]); } var movingAverage = (value * MOVING_AVERAGE_ALPHA) + (valuesArray[valuesArray.length - 1] * (1 - MOVING_AVERAGE_ALPHA)); if (value < 1) { movingAverage = 0; } return valuesArray.concat([movingAverage]).slice(Math.max(valuesArray.length - MAX_HISTORY, 0)); } // DagreD3 Graph-related functions // =============================== function initializeGraph() { return new dagreD3.graphlib.Graph({compound: true}) .setGraph({rankdir: 'BT'}) .setDefaultEdgeLabel(function () { return {}; }); } function initializeSvg(selector) { const svg = d3.select(selector); svg.append("g"); return svg; } function computeSources(nodeInfo) { let sources = []; let remoteSources = []; // TODO: put remoteSources in node-specific section switch (nodeInfo['@type']) { case 'output': case 'explainAnalyze': case 'project': case 'filter': case 'aggregation': case 'sort': case 'markDistinct': case 'window': case 'rowNumber': case 'topnRowNumber': case 'limit': case 'distinctlimit': case 'topn': case 'sample': case 'tablewriter': case 'delete': case 'metadatadelete': case 'tablecommit': case 'groupid': case 'unnest': case 'scalar': sources = [nodeInfo.source]; break; case 'join': sources = [nodeInfo.left, nodeInfo.right]; break; case 'semijoin': sources = [nodeInfo.source, nodeInfo.filteringSource]; break; case 'indexjoin': sources = [nodeInfo.probeSource, nodeInfo.filterSource]; break; case 'union': case 'exchange': sources = nodeInfo.sources; break; case 'remoteSource': remoteSources = nodeInfo.sourceFragmentIds; break; case 'tablescan': case 'values': case 'indexsource': break; default: console.log("NOTE: Unhandled PlanNode: " + nodeInfo['@type']); } return [sources, remoteSources]; } // Utility functions // ================= function truncateString(inputString, length) { if (inputString && inputString.length > length) { return inputString.substring(0, length) + "..."; } return inputString; } function getStageId(stageId) { return stageId.slice(stageId.indexOf('.') + 1, stageId.length) } function getTaskIdSuffix(taskId) { return taskId.slice(taskId.indexOf('.') + 1, taskId.length) } function getTaskIdInStage(taskId) { return Number.parseInt(getTaskIdSuffix(getTaskIdSuffix(taskId))); } function formatState(state, fullyBlocked) { if (fullyBlocked && state == "RUNNING") { return "BLOCKED"; } else { return state; } } function getHostname(url) { var hostname = new URL(url).hostname; if ((hostname.charAt(0) == '[') && (hostname.charAt(hostname.length - 1) == ']')) { hostname = hostname.substr(1, hostname.length - 2); } return hostname; } function getPort(url) { return new URL(url).port; } function getHostAndPort(url) { var url = new URL(url); return url.hostname + ":" + url.port; } function computeRate(count, ms) { if (ms == 0) { return 0; } return (count / ms) * 1000.0; } function precisionRound(n) { if (n < 10) { return n.toFixed(2); } if (n < 100) { return n.toFixed(1); } return Math.round(n); } function formatDuration(duration) { var unit = "ms"; if (duration > 1000) { duration /= 1000; unit = "s"; } if (unit == "s" && duration > 60) { duration /= 60; unit = "m"; } if (unit == "m" && duration > 60) { duration /= 60; unit = "h"; } if (unit == "h" && duration > 24) { duration /= 24; unit = "d"; } if (unit == "d" && duration > 7) { duration /= 7; unit = "w"; } return precisionRound(duration) + unit; } function formatCount(count) { var unit = ""; if (count > 1000) { count /= 1000; unit = "K"; } if (count > 1000) { count /= 1000; unit = "M"; } if (count > 1000) { count /= 1000; unit = "B"; } if (count > 1000) { count /= 1000; unit = "T"; } if (count > 1000) { count /= 1000; unit = "Q"; } return precisionRound(count) + unit; } function formatDataSizeBytes(size) { return formatDataSizeMinUnit(size, ""); } function formatDataSize(size) { return formatDataSizeMinUnit(size, "B"); } function formatDataSizeMinUnit(size, minUnit) { var unit = minUnit; if (size == 0) { return "0" + unit; } if (size >= 1024) { size /= 1024; unit = "K" + minUnit; } if (size >= 1024) { size /= 1024; unit = "M" + minUnit; } if (size >= 1024) { size /= 1024; unit = "G" + minUnit; } if (size >= 1024) { size /= 1024; unit = "T" + minUnit; } if (size >= 1024) { size /= 1024; unit = "P" + minUnit; } return precisionRound(size) + unit; } function parseDataSize(value) { var DATA_SIZE_PATTERN = /^\s*(\d+(?:\.\d+)?)\s*([a-zA-Z]+)\s*$/ var match = DATA_SIZE_PATTERN.exec(value); if (match == null) { return null; } var number = parseFloat(match[1]); switch (match[2]) { case "B": return number; case "kB": return number * Math.pow(2, 10); case "MB": return number * Math.pow(2, 20); case "GB": return number * Math.pow(2, 30); case "TB": return number * Math.pow(2, 40); case "PB": return number * Math.pow(2, 50); default: return null; } } function parseDuration(value) { var DURATION_PATTERN = /^\s*(\d+(?:\.\d+)?)\s*([a-zA-Z]+)\s*$/ var match = DURATION_PATTERN.exec(value); if (match == null) { return null; } var number = parseFloat(match[1]); switch (match[2]) { case "ns": return number / 1000000.0; case "us": return number / 1000.0; case "ms": return number; case "s": return number * 1000; case "m": return number * 1000 * 60; case "h": return number * 1000 * 60 * 60; case "d": return number * 1000 * 60 * 60 * 24; default: return null; } } function formatStackTrace(info) { return doFormatStackTrace(info, [], "", ""); } function doFormatStackTrace(info, parentStack, prefix, linePrefix) { var s = linePrefix + prefix + failureInfoToString(info) + "\n"; if (info.stack != null) { var sharedStackFrames = 0; if (parentStack != null) { sharedStackFrames = countSharedStackFrames(info.stack, parentStack); } for (var i = 0; i < info.stack.length - sharedStackFrames; i++) { s += linePrefix + "\tat " + info.stack[i] + "\n"; } if (sharedStackFrames !== 0) { s += linePrefix + "\t... " + sharedStackFrames + " more" + "\n"; } } if (info.suppressed != null) { for (var i = 0; i < info.suppressed.length; i++) { s += doFormatStackTrace(info.suppressed[i], info.stack, "Suppressed: ", linePrefix + "\t"); } } if (info.cause != null) { s += doFormatStackTrace(info.cause, info.stack, "Caused by: ", linePrefix); } return s; } function countSharedStackFrames(stack, parentStack) { var n = 0; var minStackLength = Math.min(stack.length, parentStack.length); while (n < minStackLength && stack[stack.length - 1 - n] === parentStack[parentStack.length - 1 - n]) { n++; } return n; } function failureInfoToString(t) { return (t.message != null) ? (t.type + ": " + t.message) : t.type; } function formatShortTime(date) { var hours = (date.getHours() % 12) || 12; var minutes = (date.getMinutes() < 10 ? "0" : "") + date.getMinutes(); return hours + ":" + minutes + (date.getHours() >= 12 ? "pm" : "am"); } function formatShortDateTime(date) { var year = date.getFullYear(); var month = "" + (date.getMonth() + 1); var dayOfMonth = "" + date.getDate(); return year + "-" + (month[1] ? month : "0" + month[0]) + "-" + (dayOfMonth[1] ? dayOfMonth: "0" + dayOfMonth[0]) + " " + formatShortTime(date); } function removeQueryId(id) { var pos = id.indexOf('.'); if (pos != -1) { return id.substring(pos + 1); } return id; }
Jimexist/presto
presto-main/src/main/resources/webapp/assets/utils.js
JavaScript
apache-2.0
14,579
# ---------------------------------------------------------------------------- # Copyright 2014 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Datasets with fake data for testing purposes. """ import logging import numpy as np from neon.datasets.dataset import Dataset from neon.util.compat import range logger = logging.getLogger(__name__) class UniformRandom(Dataset): """ Sets up a synthetic uniformly random dataset. Attributes: inputs (dict): structure housing the loaded train/test/validation input data targets (dict): structure housing the loaded train/test/validation target data """ def __init__(self, ntrain, ntest, nin, nout, **kwargs): self.__dict__.update(kwargs) self.ntrain = ntrain self.ntest = ntest self.nin = nin self.nout = nout self.macro_batched = False np.random.seed(0) def load_data(self, shape): data = np.random.uniform(low=0.0, high=1.0, size=shape) labels = np.random.randint(low=0, high=self.nout, size=shape[0]) onehot = np.zeros((len(labels), self.nout), dtype='float32') for col in range(self.nout): onehot[:, col] = (labels == col) return (data, onehot) def load(self, backend=None, experiment=None): self.inputs['train'], self.targets['train'] = ( self.load_data((self.ntrain, self.nin))) self.inputs['test'], self.targets['test'] = ( self.load_data((self.ntest, self.nin))) self.format() class ToyImages(Dataset): """ Sets up a synthetic image classification dataset. Attributes: inputs (dict): structure housing the loaded train/test/validation input data targets (dict): structure housing the loaded train/test/validation target data """ def __init__(self, **kwargs): self.__dict__.update(kwargs) self.macro_batched = False self.ntrain = 128 self.ntest = 128 self.ifmheight = 32 self.ifmwidth = self.ifmheight self.maxrad = self.ifmwidth / 2 self.minrad = self.ifmwidth / 8 self.nifm = 3 self.nin = self.nifm * self.ifmheight * self.ifmwidth self.nout = 2 assert self.ifmheight % 2 == 0 assert self.ifmwidth % 2 == 0 self.center = (self.ifmwidth / 2, self.ifmheight / 2) np.random.seed(0) def ellipse(self, canvas, xrad, yrad): rcanvas = canvas.reshape((self.nifm, self.ifmheight, self.ifmwidth)) smooth = 10 angs = np.linspace(0, 2 * np.pi, smooth * 360) si = np.sin(angs) co = np.cos(angs) xvals = np.int32(xrad * co) + self.center[0] yvals = np.int32(yrad * si) + self.center[1] for fm in range(self.nifm): rcanvas[fm, xvals, yvals] = np.random.randint(256) def circle(self, canvas, rad): self.ellipse(canvas, rad, rad) def load_data(self, shape): data = np.zeros(shape, dtype='float32') labels = np.zeros(shape[0], dtype='float32') ncircles = shape[0] / 2 for row in range(0, ncircles): # Make circles. rad = np.random.randint(self.minrad, self.maxrad) self.circle(data[row], rad) for row in range(ncircles, shape[0]): # Make ellipses. while True: xrad, yrad = np.random.randint(self.minrad, self.maxrad, 2) if xrad != yrad: break self.ellipse(data[row], xrad, yrad) labels[row] = 1 data /= 255 onehot = np.zeros((len(labels), self.nout), dtype='float32') for col in range(self.nout): onehot[:, col] = (labels == col) return (data, onehot) def load(self, backend=None, experiment=None): ntotal = self.ntrain + self.ntest inds = np.arange(ntotal) np.random.shuffle(inds) data, targets = self.load_data((ntotal, self.nin)) self.inputs['train'] = data[inds[:self.ntrain]] self.targets['train'] = targets[inds[:self.ntrain]] self.inputs['test'] = data[inds[self.ntrain:]] self.targets['test'] = targets[inds[self.ntrain:]] self.format()
ml-lab/neon
neon/datasets/synthetic.py
Python
apache-2.0
4,940
// ---------------------------------------------------------------------------------- // // Copyright Microsoft Corporation // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ---------------------------------------------------------------------------------- using System; using System.Collections.Generic; using Microsoft.Azure.Commands.RecoveryServices.Backup.Helpers; using Microsoft.Azure.Management.RecoveryServices.Backup.Models; using Microsoft.Rest.Azure.OData; using RestAzureNS = Microsoft.Rest.Azure; namespace Microsoft.Azure.Commands.RecoveryServices.Backup.Cmdlets.ServiceClientAdapterNS { public partial class ServiceClientAdapter { /// <summary> /// Fetches protection containers in the vault according to the query params /// </summary> /// <param name="queryFilter">Query parameters</param> /// <param name="skipToken">Skip token for pagination</param> /// <returns>List of protection containers</returns> public IEnumerable<ProtectionContainerResource> ListContainers( ODataQuery<BMSContainerQueryObject> queryFilter, string skipToken = default(string)) { Func<RestAzureNS.IPage<ProtectionContainerResource>> listAsync = () => BmsAdapter.Client.BackupProtectionContainers.ListWithHttpMessagesAsync( BmsAdapter.GetResourceName(), BmsAdapter.GetResourceGroupName(), queryFilter, cancellationToken: BmsAdapter.CmdletCancellationToken).Result.Body; Func<string, RestAzureNS.IPage<ProtectionContainerResource>> listNextAsync = nextLink => BmsAdapter.Client.BackupProtectionContainers.ListNextWithHttpMessagesAsync( nextLink, cancellationToken: BmsAdapter.CmdletCancellationToken).Result.Body; return HelperUtils.GetPagedList(listAsync, listNextAsync); } /// <summary> /// Fetches backup engines in the vault according to the query params /// </summary> /// <param name="queryParams">Query parameters</param> /// <returns>List of backup engines</returns> public IEnumerable<BackupEngineBaseResource> ListBackupEngines( ODataQuery<BMSBackupEnginesQueryObject> queryParams) { queryParams.Top = 200; Func<RestAzureNS.IPage<BackupEngineBaseResource>> listAsync = () => BmsAdapter.Client.BackupEngines.ListWithHttpMessagesAsync( BmsAdapter.GetResourceName(), BmsAdapter.GetResourceGroupName(), queryParams, cancellationToken: BmsAdapter.CmdletCancellationToken).Result.Body; Func<string, RestAzureNS.IPage<BackupEngineBaseResource>> listNextAsync = nextLink => BmsAdapter.Client.BackupEngines.ListNextWithHttpMessagesAsync( nextLink, cancellationToken: BmsAdapter.CmdletCancellationToken).Result.Body; var listResponse = HelperUtils.GetPagedList(listAsync, listNextAsync); return listResponse; } /// <summary> /// Triggers refresh of container catalog in service /// </summary> /// <returns>Response of the job created in the service</returns> public RestAzureNS.AzureOperationResponse RefreshContainers() { string resourceName = BmsAdapter.GetResourceName(); string resourceGroupName = BmsAdapter.GetResourceGroupName(); var response = BmsAdapter.Client.ProtectionContainers.RefreshWithHttpMessagesAsync( resourceName, resourceGroupName, AzureFabricName, cancellationToken: BmsAdapter.CmdletCancellationToken).Result; return response; } /// <summary> /// Triggers unregister of a container in service /// </summary> /// <param name="containerName">Name of the container to unregister</param> public RestAzureNS.AzureOperationResponse UnregisterContainers(string containerName) { string resourceName = BmsAdapter.GetResourceName(); string resourceGroupName = BmsAdapter.GetResourceGroupName(); var response = RSAdapter.Client.RegisteredIdentities.DeleteWithHttpMessagesAsync( resourceGroupName, resourceName, containerName, cancellationToken: BmsAdapter.CmdletCancellationToken).Result; return response; } } }
devigned/azure-powershell
src/ResourceManager/RecoveryServices.Backup/Commands.RecoveryServices.Backup.ServiceClientAdapter/BMSAPIs/ContainerAPIs.cs
C#
apache-2.0
5,146
/* ************************************************************************ * Copyright 2013 Advanced Micro Devices, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ************************************************************************/ #include <stdio.h> #include <string.h> #include <clBLAS.h> #include <devinfo.h> #include "clblas-internal.h" #include "solution_seq.h" #include <functor_xscal.h> static clblasStatus doScal( CLBlasKargs *kargs, size_t N, cl_mem X, size_t offx, int incx, cl_uint numCommandQueues, cl_command_queue *commandQueues, cl_uint numEventsInWaitList, const cl_event *eventWaitList, cl_event *events) { cl_int err; ListHead seq; clblasStatus retCode = clblasSuccess; if (!clblasInitialized) { return clblasNotInitialized; } /* Validate arguments */ retCode = checkMemObjects(X, X, X, false, X_VEC_ERRSET, X_VEC_ERRSET, X_VEC_ERRSET ); if (retCode) { printf("Invalid mem object..\n"); return retCode; } // Check wheather enough memory was allocated if ((retCode = checkVectorSizes(kargs->dtype, N, X, offx, incx, X_VEC_ERRSET))) { printf("Invalid Size for X\n"); return retCode; } /////////////////////////////////////////////////////////////// if ((commandQueues == NULL) || (numCommandQueues == 0)) { return clblasInvalidValue; } /* numCommandQueues will be hardcoded to 1 as of now. No multi-gpu support */ numCommandQueues = 1; if (commandQueues[0] == NULL) { return clblasInvalidCommandQueue; } if ((numEventsInWaitList !=0) && (eventWaitList == NULL)) { return clblasInvalidEventWaitList; } kargs->N = N; kargs->A = X; kargs->offBX = offx; kargs->ldb.Vector = incx; // Will be using this as incx if(incx < 0) { // According to Netlib - return for negative incx return clblasSuccess; } listInitHead(&seq); err = makeSolutionSeq(CLBLAS_SCAL, kargs, numCommandQueues, commandQueues, numEventsInWaitList, eventWaitList, events, &seq); if (err == CL_SUCCESS) { err = executeSolutionSeq(&seq); } freeSolutionSeq(&seq); return (clblasStatus)err; } // ================================================================================= // // class clblasSscalFunctorFallback // // ================================================================================= static clblasSscalFunctorFallback sscal_fallback; clblasStatus clblasSscalFunctorFallback::execute(Args & args) { CLBlasKargs kargs; memset(&kargs, 0, sizeof(kargs)); kargs.dtype = TYPE_FLOAT; kargs.alpha.argFloat = args.alpha; return doScal(&kargs, args.N, args.X, args.offx, args.incx, 1, &args.queue, args.numEventsInWaitList, args.eventWaitList, args.events); } clblasSscalFunctorFallback * clblasSscalFunctorFallback::provide () { static clblasSscalFunctorFallback sscal_fallback; return & sscal_fallback; } void clblasSscalFunctorFallback::retain() { // clblasSscalFunctorFallback has a single global instance // and shall never be freed } void clblasSscalFunctorFallback::release() { // clblasDscalFunctorFallback has a single global instance // and shall never be freed } // ================================================================================= // // class clblasDscalFunctorFallback // // ================================================================================= static clblasDscalFunctorFallback dscal_fallback; clblasStatus clblasDscalFunctorFallback::execute(Args & args) { CLBlasKargs kargs; memset(&kargs, 0, sizeof(kargs)); kargs.dtype = TYPE_DOUBLE; kargs.alpha.argDouble = args.alpha; return doScal(&kargs, args.N, args.X, args.offx, args.incx, 1, &args.queue, args.numEventsInWaitList, args.eventWaitList, args.events); } clblasDscalFunctorFallback * clblasDscalFunctorFallback::provide () { static clblasDscalFunctorFallback dscal_fallback; return & dscal_fallback; } void clblasDscalFunctorFallback::retain() { // clblasDscalFunctorFallback has a single global instance // and shall never be freed } void clblasDscalFunctorFallback::release() { // clblasDscalFunctorFallback has a single global instance // and shall never be freed } // ================================================================================= // // class clblasCscalFunctorFallback // // ================================================================================= static clblasCscalFunctorFallback cscal_fallback; clblasStatus clblasCscalFunctorFallback::execute(Args & args) { CLBlasKargs kargs; memset(&kargs, 0, sizeof(kargs)); kargs.dtype = TYPE_COMPLEX_FLOAT; kargs.alpha.argFloatComplex = args.alpha; return doScal(&kargs, args.N, args.X, args.offx, args.incx, 1, &args.queue, args.numEventsInWaitList, args.eventWaitList, args.events); } clblasCscalFunctorFallback * clblasCscalFunctorFallback::provide () { static clblasCscalFunctorFallback cscal_fallback; return & cscal_fallback; } void clblasCscalFunctorFallback::retain() { // clblasCscalFunctorFallback has a single global instance // and shall never be freed } void clblasCscalFunctorFallback::release() { // clblasCscalFunctorFallback has a single global instance // and shall never be freed } // ================================================================================= // // class clblasZscalFunctorFallback // // ================================================================================= static clblasZscalFunctorFallback zscal_fallback; clblasStatus clblasZscalFunctorFallback::execute(Args & args) { CLBlasKargs kargs; memset(&kargs, 0, sizeof(kargs)); kargs.dtype = TYPE_COMPLEX_DOUBLE; kargs.alpha.argDoubleComplex = args.alpha; return doScal(&kargs, args.N, args.X, args.offx, args.incx, 1, &args.queue, args.numEventsInWaitList, args.eventWaitList, args.events); } clblasZscalFunctorFallback * clblasZscalFunctorFallback::provide () { static clblasZscalFunctorFallback zscal_fallback; return & zscal_fallback; } void clblasZscalFunctorFallback::retain() { // clblasZscalFunctorFallback has a single global instance // and shall never be freed } void clblasZscalFunctorFallback::release() { // clblasZscalFunctorFallback has a single global instance // and shall never be freed } // ================================================================================= // // class clblasCsscalFunctorFallback // // ================================================================================= static clblasCsscalFunctorFallback csscal_fallback; clblasStatus clblasCsscalFunctorFallback::execute(Args & args) { CLBlasKargs kargs; FloatComplex fAlpha; CREAL(fAlpha) = args.alpha; CIMAG(fAlpha) = 0.0f; memset(&kargs, 0, sizeof(kargs)); kargs.alpha.argFloatComplex = fAlpha; kargs.dtype = TYPE_COMPLEX_FLOAT; return doScal(&kargs, args.N, args.X, args.offx, args.incx, 1, &args.queue, args.numEventsInWaitList, args.eventWaitList, args.events); } clblasCsscalFunctorFallback * clblasCsscalFunctorFallback::provide () { static clblasCsscalFunctorFallback csscal_fallback; return & csscal_fallback; } void clblasCsscalFunctorFallback::retain() { // clblasCsscalFunctorFallback has a single global instance // and shall never be freed } void clblasCsscalFunctorFallback::release() { // clblasCsscalFunctorFallback has a single global instance // and shall never be freed } // ================================================================================= // // class clblasZdscalFunctorFallback // // ================================================================================= static clblasZdscalFunctorFallback zdscal_fallback; clblasStatus clblasZdscalFunctorFallback::execute(Args & args) { CLBlasKargs kargs; DoubleComplex fAlpha; CREAL(fAlpha) = args.alpha; CIMAG(fAlpha) = 0.0f; memset(&kargs, 0, sizeof(kargs)); kargs.alpha.argDoubleComplex = fAlpha; kargs.dtype = TYPE_COMPLEX_DOUBLE; return doScal(&kargs, args.N, args.X, args.offx, args.incx, 1, &args.queue, args.numEventsInWaitList, args.eventWaitList, args.events); } clblasZdscalFunctorFallback * clblasZdscalFunctorFallback::provide () { static clblasZdscalFunctorFallback zdscal_fallback; return & zdscal_fallback; } void clblasZdscalFunctorFallback::retain() { // clblasZdscalFunctorFallback has a single global instance // and shall never be freed } void clblasZdscalFunctorFallback::release() { // clblasZdscalFunctorFallback has a single global instance // and shall never be freed }
kknox/clBLAS
src/library/blas/functor/functor_xscal.cc
C++
apache-2.0
10,020
//----------------------------------------------------------------------- // <copyright file="RealMessageEnvelope.cs" company="Akka.NET Project"> // Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com> // Copyright (C) 2013-2015 Akka.NET project <https://github.com/akkadotnet/akka.net> // </copyright> //----------------------------------------------------------------------- using Akka.Actor; namespace Akka.TestKit { public class RealMessageEnvelope : MessageEnvelope { private readonly object _message; private readonly IActorRef _sender; public RealMessageEnvelope(object message, IActorRef sender) { _message = message; _sender = sender; } public override object Message { get { return _message; } } public override IActorRef Sender{get { return _sender; }} public override string ToString() { return "<" + (Message ?? "null") + "> from " + (Sender ?? NoSender.Instance); } } }
ashic/akka.net
src/core/Akka.TestKit/RealMessageEnvelope.cs
C#
apache-2.0
1,040
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. import { Data } from '../data.js'; import { Table } from '../table.js'; import { MAGIC } from './message.js'; import { Vector } from '../vector.js'; import { DataType, TypeMap } from '../type.js'; import { Schema, Field } from '../schema.js'; import { Message } from './metadata/message.js'; import * as metadata from './metadata/message.js'; import { FileBlock, Footer } from './metadata/file.js'; import { MessageHeader, MetadataVersion } from '../enum.js'; import { compareSchemas } from '../visitor/typecomparator.js'; import { WritableSink, AsyncByteQueue } from '../io/stream.js'; import { VectorAssembler } from '../visitor/vectorassembler.js'; import { JSONTypeAssembler } from '../visitor/jsontypeassembler.js'; import { JSONVectorAssembler } from '../visitor/jsonvectorassembler.js'; import { ArrayBufferViewInput, toUint8Array } from '../util/buffer.js'; import { RecordBatch, _InternalEmptyPlaceholderRecordBatch } from '../recordbatch.js'; import { Writable, ReadableInterop, ReadableDOMStreamOptions } from '../io/interfaces.js'; import { isPromise, isAsyncIterable, isWritableDOMStream, isWritableNodeStream, isIterable, isObject } from '../util/compat.js'; export interface RecordBatchStreamWriterOptions { /** * */ autoDestroy?: boolean; /** * A flag indicating whether the RecordBatchWriter should construct pre-0.15.0 * encapsulated IPC Messages, which reserves 4 bytes for the Message metadata * length instead of 8. * @see https://issues.apache.org/jira/browse/ARROW-6313 */ writeLegacyIpcFormat?: boolean; } export class RecordBatchWriter<T extends TypeMap = any> extends ReadableInterop<Uint8Array> implements Writable<RecordBatch<T>> { /** @nocollapse */ // @ts-ignore public static throughNode(options?: import('stream').DuplexOptions & { autoDestroy: boolean }): import('stream').Duplex { throw new Error(`"throughNode" not available in this environment`); } /** @nocollapse */ public static throughDOM<T extends TypeMap>( // @ts-ignore writableStrategy?: QueuingStrategy<RecordBatch<T>> & { autoDestroy: boolean }, // @ts-ignore readableStrategy?: { highWaterMark?: number; size?: any } ): { writable: WritableStream<Table<T> | RecordBatch<T>>; readable: ReadableStream<Uint8Array> } { throw new Error(`"throughDOM" not available in this environment`); } constructor(options?: RecordBatchStreamWriterOptions) { super(); isObject(options) || (options = { autoDestroy: true, writeLegacyIpcFormat: false }); this._autoDestroy = (typeof options.autoDestroy === 'boolean') ? options.autoDestroy : true; this._writeLegacyIpcFormat = (typeof options.writeLegacyIpcFormat === 'boolean') ? options.writeLegacyIpcFormat : false; } protected _position = 0; protected _started = false; protected _autoDestroy: boolean; protected _writeLegacyIpcFormat: boolean; // @ts-ignore protected _sink = new AsyncByteQueue(); protected _schema: Schema | null = null; protected _dictionaryBlocks: FileBlock[] = []; protected _recordBatchBlocks: FileBlock[] = []; protected _dictionaryDeltaOffsets = new Map<number, number>(); public toString(sync: true): string; public toString(sync?: false): Promise<string>; public toString(sync: any = false) { return this._sink.toString(sync) as Promise<string> | string; } public toUint8Array(sync: true): Uint8Array; public toUint8Array(sync?: false): Promise<Uint8Array>; public toUint8Array(sync: any = false) { return this._sink.toUint8Array(sync) as Promise<Uint8Array> | Uint8Array; } public writeAll(input: Table<T> | Iterable<RecordBatch<T>>): this; public writeAll(input: AsyncIterable<RecordBatch<T>>): Promise<this>; public writeAll(input: PromiseLike<AsyncIterable<RecordBatch<T>>>): Promise<this>; public writeAll(input: PromiseLike<Table<T> | Iterable<RecordBatch<T>>>): Promise<this>; public writeAll(input: PromiseLike<any> | Table<T> | Iterable<RecordBatch<T>> | AsyncIterable<RecordBatch<T>>) { if (isPromise<any>(input)) { return input.then((x) => this.writeAll(x)); } else if (isAsyncIterable<RecordBatch<T>>(input)) { return writeAllAsync(this, input); } return writeAll(this, <any>input); } public get closed() { return this._sink.closed; } public [Symbol.asyncIterator]() { return this._sink[Symbol.asyncIterator](); } public toDOMStream(options?: ReadableDOMStreamOptions) { return this._sink.toDOMStream(options); } public toNodeStream(options?: import('stream').ReadableOptions) { return this._sink.toNodeStream(options); } public close() { return this.reset()._sink.close(); } public abort(reason?: any) { return this.reset()._sink.abort(reason); } public finish() { this._autoDestroy ? this.close() : this.reset(this._sink, this._schema); return this; } public reset(sink: WritableSink<ArrayBufferViewInput> = this._sink, schema: Schema<T> | null = null) { if ((sink === this._sink) || (sink instanceof AsyncByteQueue)) { this._sink = sink as AsyncByteQueue; } else { this._sink = new AsyncByteQueue(); if (sink && isWritableDOMStream(sink)) { this.toDOMStream({ type: 'bytes' }).pipeTo(sink); } else if (sink && isWritableNodeStream(sink)) { this.toNodeStream({ objectMode: false }).pipe(sink); } } if (this._started && this._schema) { this._writeFooter(this._schema); } this._started = false; this._dictionaryBlocks = []; this._recordBatchBlocks = []; this._dictionaryDeltaOffsets = new Map(); if (!schema || !(compareSchemas(schema, this._schema))) { if (schema == null) { this._position = 0; this._schema = null; } else { this._started = true; this._schema = schema; this._writeSchema(schema); } } return this; } public write(payload?: Table<T> | RecordBatch<T> | Iterable<RecordBatch<T>> | null) { let schema: Schema<T> | null = null; if (!this._sink) { throw new Error(`RecordBatchWriter is closed`); } else if (payload == null) { return this.finish() && undefined; } else if (payload instanceof Table && !(schema = payload.schema)) { return this.finish() && undefined; } else if (payload instanceof RecordBatch && !(schema = payload.schema)) { return this.finish() && undefined; } if (schema && !compareSchemas(schema, this._schema)) { if (this._started && this._autoDestroy) { return this.close(); } this.reset(this._sink, schema); } if (payload instanceof RecordBatch) { if (!(payload instanceof _InternalEmptyPlaceholderRecordBatch)) { this._writeRecordBatch(payload); } } else if (payload instanceof Table) { this.writeAll(payload.batches); } else if (isIterable(payload)) { this.writeAll(payload); } } protected _writeMessage<T extends MessageHeader>(message: Message<T>, alignment = 8) { const a = alignment - 1; const buffer = Message.encode(message); const flatbufferSize = buffer.byteLength; const prefixSize = !this._writeLegacyIpcFormat ? 8 : 4; const alignedSize = (flatbufferSize + prefixSize + a) & ~a; const nPaddingBytes = alignedSize - flatbufferSize - prefixSize; if (message.headerType === MessageHeader.RecordBatch) { this._recordBatchBlocks.push(new FileBlock(alignedSize, message.bodyLength, this._position)); } else if (message.headerType === MessageHeader.DictionaryBatch) { this._dictionaryBlocks.push(new FileBlock(alignedSize, message.bodyLength, this._position)); } // If not in legacy pre-0.15.0 mode, write the stream continuation indicator if (!this._writeLegacyIpcFormat) { this._write(Int32Array.of(-1)); } // Write the flatbuffer size prefix including padding this._write(Int32Array.of(alignedSize - prefixSize)); // Write the flatbuffer if (flatbufferSize > 0) { this._write(buffer); } // Write any padding return this._writePadding(nPaddingBytes); } protected _write(chunk: ArrayBufferViewInput) { if (this._started) { const buffer = toUint8Array(chunk); if (buffer && buffer.byteLength > 0) { this._sink.write(buffer); this._position += buffer.byteLength; } } return this; } protected _writeSchema(schema: Schema<T>) { return this._writeMessage(Message.from(schema)); } // @ts-ignore protected _writeFooter(schema: Schema<T>) { // eos bytes return this._writeLegacyIpcFormat ? this._write(Int32Array.of(0)) : this._write(Int32Array.of(-1, 0)); } protected _writeMagic() { return this._write(MAGIC); } protected _writePadding(nBytes: number) { return nBytes > 0 ? this._write(new Uint8Array(nBytes)) : this; } protected _writeRecordBatch(batch: RecordBatch<T>) { const { byteLength, nodes, bufferRegions, buffers } = VectorAssembler.assemble(batch); const recordBatch = new metadata.RecordBatch(batch.numRows, nodes, bufferRegions); const message = Message.from(recordBatch, byteLength); return this ._writeDictionaries(batch) ._writeMessage(message) ._writeBodyBuffers(buffers); } protected _writeDictionaryBatch(dictionary: Data, id: number, isDelta = false) { this._dictionaryDeltaOffsets.set(id, dictionary.length + (this._dictionaryDeltaOffsets.get(id) || 0)); const { byteLength, nodes, bufferRegions, buffers } = VectorAssembler.assemble(new Vector([dictionary])); const recordBatch = new metadata.RecordBatch(dictionary.length, nodes, bufferRegions); const dictionaryBatch = new metadata.DictionaryBatch(recordBatch, id, isDelta); const message = Message.from(dictionaryBatch, byteLength); return this ._writeMessage(message) ._writeBodyBuffers(buffers); } protected _writeBodyBuffers(buffers: ArrayBufferView[]) { let buffer: ArrayBufferView; let size: number, padding: number; for (let i = -1, n = buffers.length; ++i < n;) { if ((buffer = buffers[i]) && (size = buffer.byteLength) > 0) { this._write(buffer); if ((padding = ((size + 7) & ~7) - size) > 0) { this._writePadding(padding); } } } return this; } protected _writeDictionaries(batch: RecordBatch<T>) { for (let [id, dictionary] of batch.dictionaries) { let offset = this._dictionaryDeltaOffsets.get(id) || 0; if (offset === 0 || (dictionary = dictionary?.slice(offset)).length > 0) { for (const data of dictionary.data) { this._writeDictionaryBatch(data, id, offset > 0); offset += data.length; } } } return this; } } /** @ignore */ export class RecordBatchStreamWriter<T extends TypeMap = any> extends RecordBatchWriter<T> { public static writeAll<T extends TypeMap = any>(input: Table<T> | Iterable<RecordBatch<T>>, options?: RecordBatchStreamWriterOptions): RecordBatchStreamWriter<T>; public static writeAll<T extends TypeMap = any>(input: AsyncIterable<RecordBatch<T>>, options?: RecordBatchStreamWriterOptions): Promise<RecordBatchStreamWriter<T>>; public static writeAll<T extends TypeMap = any>(input: PromiseLike<AsyncIterable<RecordBatch<T>>>, options?: RecordBatchStreamWriterOptions): Promise<RecordBatchStreamWriter<T>>; public static writeAll<T extends TypeMap = any>(input: PromiseLike<Table<T> | Iterable<RecordBatch<T>>>, options?: RecordBatchStreamWriterOptions): Promise<RecordBatchStreamWriter<T>>; /** @nocollapse */ public static writeAll<T extends TypeMap = any>(input: any, options?: RecordBatchStreamWriterOptions) { const writer = new RecordBatchStreamWriter<T>(options); if (isPromise<any>(input)) { return input.then((x) => writer.writeAll(x)); } else if (isAsyncIterable<RecordBatch<T>>(input)) { return writeAllAsync(writer, input); } return writeAll(writer, input); } } /** @ignore */ export class RecordBatchFileWriter<T extends TypeMap = any> extends RecordBatchWriter<T> { public static writeAll<T extends TypeMap = any>(input: Table<T> | Iterable<RecordBatch<T>>): RecordBatchFileWriter<T>; public static writeAll<T extends TypeMap = any>(input: AsyncIterable<RecordBatch<T>>): Promise<RecordBatchFileWriter<T>>; public static writeAll<T extends TypeMap = any>(input: PromiseLike<AsyncIterable<RecordBatch<T>>>): Promise<RecordBatchFileWriter<T>>; public static writeAll<T extends TypeMap = any>(input: PromiseLike<Table<T> | Iterable<RecordBatch<T>>>): Promise<RecordBatchFileWriter<T>>; /** @nocollapse */ public static writeAll<T extends TypeMap = any>(input: any) { const writer = new RecordBatchFileWriter<T>(); if (isPromise<any>(input)) { return input.then((x) => writer.writeAll(x)); } else if (isAsyncIterable<RecordBatch<T>>(input)) { return writeAllAsync(writer, input); } return writeAll(writer, input); } constructor() { super(); this._autoDestroy = true; } // @ts-ignore protected _writeSchema(schema: Schema<T>) { return this._writeMagic()._writePadding(2); } protected _writeFooter(schema: Schema<T>) { const buffer = Footer.encode(new Footer( schema, MetadataVersion.V4, this._recordBatchBlocks, this._dictionaryBlocks )); return super ._writeFooter(schema) // EOS bytes for sequential readers ._write(buffer) // Write the flatbuffer ._write(Int32Array.of(buffer.byteLength)) // then the footer size suffix ._writeMagic(); // then the magic suffix } } /** @ignore */ export class RecordBatchJSONWriter<T extends TypeMap = any> extends RecordBatchWriter<T> { public static writeAll<T extends TypeMap = any>(this: typeof RecordBatchWriter, input: Table<T> | Iterable<RecordBatch<T>>): RecordBatchJSONWriter<T>; // @ts-ignore public static writeAll<T extends TypeMap = any>(this: typeof RecordBatchWriter, input: AsyncIterable<RecordBatch<T>>): Promise<RecordBatchJSONWriter<T>>; public static writeAll<T extends TypeMap = any>(this: typeof RecordBatchWriter, input: PromiseLike<AsyncIterable<RecordBatch<T>>>): Promise<RecordBatchJSONWriter<T>>; public static writeAll<T extends TypeMap = any>(this: typeof RecordBatchWriter, input: PromiseLike<Table<T> | Iterable<RecordBatch<T>>>): Promise<RecordBatchJSONWriter<T>>; /** @nocollapse */ public static writeAll<T extends TypeMap = any>(this: typeof RecordBatchWriter, input: any) { return new RecordBatchJSONWriter<T>().writeAll(input as any); } private _recordBatches: RecordBatch[]; private _dictionaries: RecordBatch[]; constructor() { super(); this._autoDestroy = true; this._recordBatches = []; this._dictionaries = []; } protected _writeMessage() { return this; } // @ts-ignore protected _writeFooter(schema: Schema<T>) { return this; } protected _writeSchema(schema: Schema<T>) { return this._write(`{\n "schema": ${JSON.stringify({ fields: schema.fields.map(field => fieldToJSON(field)) }, null, 2)}`); } protected _writeDictionaries(batch: RecordBatch<T>) { if (batch.dictionaries.size > 0) { this._dictionaries.push(batch); } return this; } protected _writeDictionaryBatch(dictionary: Data, id: number, isDelta = false) { this._dictionaryDeltaOffsets.set(id, dictionary.length + (this._dictionaryDeltaOffsets.get(id) || 0)); this._write(this._dictionaryBlocks.length === 0 ? ` ` : `,\n `); this._write(`${dictionaryBatchToJSON(dictionary, id, isDelta)}`); this._dictionaryBlocks.push(new FileBlock(0, 0, 0)); return this; } protected _writeRecordBatch(batch: RecordBatch<T>) { this._writeDictionaries(batch); this._recordBatches.push(batch); return this; } public close() { if (this._dictionaries.length > 0) { this._write(`,\n "dictionaries": [\n`); for (const batch of this._dictionaries) { super._writeDictionaries(batch); } this._write(`\n ]`); } if (this._recordBatches.length > 0) { for (let i = -1, n = this._recordBatches.length; ++i < n;) { this._write(i === 0 ? `,\n "batches": [\n ` : `,\n `); this._write(`${recordBatchToJSON(this._recordBatches[i])}`); this._recordBatchBlocks.push(new FileBlock(0, 0, 0)); } this._write(`\n ]`); } if (this._schema) { this._write(`\n}`); } this._dictionaries = []; this._recordBatches = []; return super.close(); } } /** @ignore */ function writeAll<T extends TypeMap = any>(writer: RecordBatchWriter<T>, input: Table<T> | Iterable<RecordBatch<T>>) { let chunks = input as Iterable<RecordBatch<T>>; if (input instanceof Table) { chunks = input.batches; writer.reset(undefined, input.schema); } for (const batch of chunks) { writer.write(batch); } return writer.finish(); } /** @ignore */ async function writeAllAsync<T extends TypeMap = any>(writer: RecordBatchWriter<T>, batches: AsyncIterable<RecordBatch<T>>) { for await (const batch of batches) { writer.write(batch); } return writer.finish(); } /** @ignore */ function fieldToJSON({ name, type, nullable }: Field): Record<string, unknown> { const assembler = new JSONTypeAssembler(); return { 'name': name, 'nullable': nullable, 'type': assembler.visit(type), 'children': (type.children || []).map((field: any) => fieldToJSON(field)), 'dictionary': !DataType.isDictionary(type) ? undefined : { 'id': type.id, 'isOrdered': type.isOrdered, 'indexType': assembler.visit(type.indices) } }; } /** @ignore */ function dictionaryBatchToJSON(dictionary: Data, id: number, isDelta = false) { const [columns] = JSONVectorAssembler.assemble(new RecordBatch({ [id]: dictionary })); return JSON.stringify({ 'id': id, 'isDelta': isDelta, 'data': { 'count': dictionary.length, 'columns': columns } }, null, 2); } /** @ignore */ function recordBatchToJSON(records: RecordBatch) { const [columns] = JSONVectorAssembler.assemble(records); return JSON.stringify({ 'count': records.numRows, 'columns': columns }, null, 2); }
kou/arrow
js/src/ipc/writer.ts
TypeScript
apache-2.0
20,460
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.indexing.overlord; import com.google.common.base.Optional; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; import com.google.inject.Inject; import org.apache.druid.concurrent.TaskThreadPriority; import org.apache.druid.guice.annotations.Self; import org.apache.druid.indexer.TaskLocation; import org.apache.druid.indexer.TaskStatus; import org.apache.druid.indexing.common.TaskToolbox; import org.apache.druid.indexing.common.TaskToolboxFactory; import org.apache.druid.indexing.common.config.TaskConfig; import org.apache.druid.indexing.common.task.Task; import org.apache.druid.indexing.overlord.autoscaling.ScalingStats; import org.apache.druid.java.util.common.DateTimes; import org.apache.druid.java.util.common.ISE; import org.apache.druid.java.util.common.Numbers; import org.apache.druid.java.util.common.Pair; import org.apache.druid.java.util.common.concurrent.Execs; import org.apache.druid.java.util.common.lifecycle.LifecycleStart; import org.apache.druid.java.util.common.lifecycle.LifecycleStop; import org.apache.druid.java.util.emitter.EmittingLogger; import org.apache.druid.java.util.emitter.service.ServiceEmitter; import org.apache.druid.java.util.emitter.service.ServiceMetricEvent; import org.apache.druid.query.NoopQueryRunner; import org.apache.druid.query.Query; import org.apache.druid.query.QueryRunner; import org.apache.druid.query.QuerySegmentWalker; import org.apache.druid.query.SegmentDescriptor; import org.apache.druid.query.planning.DataSourceAnalysis; import org.apache.druid.server.DruidNode; import org.apache.druid.server.SetAndVerifyContextQueryRunner; import org.apache.druid.server.initialization.ServerConfig; import org.joda.time.Interval; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; /** * Runs a single task in a JVM thread using an ExecutorService. */ public class SingleTaskBackgroundRunner implements TaskRunner, QuerySegmentWalker { private static final EmittingLogger log = new EmittingLogger(SingleTaskBackgroundRunner.class); private final TaskToolboxFactory toolboxFactory; private final TaskConfig taskConfig; private final ServiceEmitter emitter; private final TaskLocation location; private final ServerConfig serverConfig; // Currently any listeners are registered in peons, but they might be used in the future. private final CopyOnWriteArrayList<Pair<TaskRunnerListener, Executor>> listeners = new CopyOnWriteArrayList<>(); private volatile ListeningExecutorService executorService; private volatile SingleTaskBackgroundRunnerWorkItem runningItem; private volatile boolean stopping; @Inject public SingleTaskBackgroundRunner( TaskToolboxFactory toolboxFactory, TaskConfig taskConfig, ServiceEmitter emitter, @Self DruidNode node, ServerConfig serverConfig ) { this.toolboxFactory = Preconditions.checkNotNull(toolboxFactory, "toolboxFactory"); this.taskConfig = taskConfig; this.emitter = Preconditions.checkNotNull(emitter, "emitter"); this.location = TaskLocation.create(node.getHost(), node.getPlaintextPort(), node.getTlsPort()); this.serverConfig = serverConfig; } @Override public List<Pair<Task, ListenableFuture<TaskStatus>>> restore() { return Collections.emptyList(); } @Override public void registerListener(TaskRunnerListener listener, Executor executor) { for (Pair<TaskRunnerListener, Executor> pair : listeners) { if (pair.lhs.getListenerId().equals(listener.getListenerId())) { throw new ISE("Listener [%s] already registered", listener.getListenerId()); } } final Pair<TaskRunnerListener, Executor> listenerPair = Pair.of(listener, executor); // Location never changes for an existing task, so it's ok to add the listener first and then issue bootstrap // callbacks without any special synchronization. listeners.add(listenerPair); log.info("Registered listener [%s]", listener.getListenerId()); if (runningItem != null) { TaskRunnerUtils.notifyLocationChanged( ImmutableList.of(listenerPair), runningItem.getTaskId(), runningItem.getLocation() ); } } @Override public void unregisterListener(String listenerId) { for (Pair<TaskRunnerListener, Executor> pair : listeners) { if (pair.lhs.getListenerId().equals(listenerId)) { listeners.remove(pair); log.info("Unregistered listener [%s]", listenerId); return; } } } private static ListeningExecutorService buildExecutorService(int priority) { return MoreExecutors.listeningDecorator( Execs.singleThreaded( "task-runner-%d-priority-" + priority, TaskThreadPriority.getThreadPriorityFromTaskPriority(priority) ) ); } @Override @LifecycleStart public void start() { // No state startup required } @Override @LifecycleStop public void stop() { stopping = true; if (executorService != null) { try { executorService.shutdown(); } catch (SecurityException ex) { log.error(ex, "I can't control my own threads!"); } } if (runningItem != null) { final Task task = runningItem.getTask(); final long start = System.currentTimeMillis(); final long elapsed; boolean error = false; // stopGracefully for resource cleaning log.info("Starting graceful shutdown of task[%s].", task.getId()); task.stopGracefully(taskConfig); if (taskConfig.isRestoreTasksOnRestart() && task.canRestore()) { try { final TaskStatus taskStatus = runningItem.getResult().get( new Interval(DateTimes.utc(start), taskConfig.getGracefulShutdownTimeout()).toDurationMillis(), TimeUnit.MILLISECONDS ); // Ignore status, it doesn't matter for graceful shutdowns. log.info( "Graceful shutdown of task[%s] finished in %,dms.", task.getId(), System.currentTimeMillis() - start ); TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), taskStatus); } catch (Exception e) { log.makeAlert(e, "Graceful task shutdown failed: %s", task.getDataSource()) .addData("taskId", task.getId()) .addData("dataSource", task.getDataSource()) .emit(); log.warn(e, "Graceful shutdown of task[%s] aborted with exception.", task.getId()); error = true; TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), TaskStatus.failure(task.getId())); } } else { TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), TaskStatus.failure(task.getId())); } elapsed = System.currentTimeMillis() - start; final ServiceMetricEvent.Builder metricBuilder = ServiceMetricEvent .builder() .setDimension("task", task.getId()) .setDimension("dataSource", task.getDataSource()) .setDimension("graceful", "true") // for backward compatibility .setDimension("error", String.valueOf(error)); emitter.emit(metricBuilder.build("task/interrupt/count", 1L)); emitter.emit(metricBuilder.build("task/interrupt/elapsed", elapsed)); } // Ok, now interrupt everything. if (executorService != null) { try { executorService.shutdownNow(); } catch (SecurityException ex) { log.error(ex, "I can't control my own threads!"); } } } @Override public ListenableFuture<TaskStatus> run(final Task task) { if (runningItem == null) { final TaskToolbox toolbox = toolboxFactory.build(task); final Object taskPriorityObj = task.getContextValue(TaskThreadPriority.CONTEXT_KEY); int taskPriority = 0; try { taskPriority = taskPriorityObj == null ? 0 : Numbers.parseInt(taskPriorityObj); } catch (NumberFormatException e) { log.error(e, "Error parsing task priority [%s] for task [%s]", taskPriorityObj, task.getId()); } // Ensure an executor for that priority exists executorService = buildExecutorService(taskPriority); final ListenableFuture<TaskStatus> statusFuture = executorService.submit( new SingleTaskBackgroundRunnerCallable(task, location, toolbox) ); runningItem = new SingleTaskBackgroundRunnerWorkItem( task, location, statusFuture ); return statusFuture; } else { throw new ISE("Already running task[%s]", runningItem.getTask().getId()); } } /** * There might be a race between {@link #run(Task)} and this method, but it shouldn't happen in real applications * because this method is called only in unit tests. See TaskLifecycleTest. * * @param taskid task ID to clean up resources for */ @Override public void shutdown(final String taskid, String reason) { log.info("Shutdown [%s] because: [%s]", taskid, reason); if (runningItem != null && runningItem.getTask().getId().equals(taskid)) { runningItem.getResult().cancel(true); } } @Override public Collection<TaskRunnerWorkItem> getRunningTasks() { return runningItem == null ? Collections.emptyList() : Collections.singletonList(runningItem); } @Override public Collection<TaskRunnerWorkItem> getPendingTasks() { return Collections.emptyList(); } @Override public Collection<TaskRunnerWorkItem> getKnownTasks() { return runningItem == null ? Collections.emptyList() : Collections.singletonList(runningItem); } @Override public TaskLocation getTaskLocation(String taskId) { return location; } @Override public Optional<ScalingStats> getScalingStats() { return Optional.absent(); } @Override public long getTotalTaskSlotCount() { return 1; } @Override public long getIdleTaskSlotCount() { return runningItem == null ? 1 : 0; } @Override public long getUsedTaskSlotCount() { return runningItem == null ? 0 : 1; } @Override public long getLazyTaskSlotCount() { return 0; } @Override public long getBlacklistedTaskSlotCount() { return 0; } @Override public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals) { return getQueryRunnerImpl(query); } @Override public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs) { return getQueryRunnerImpl(query); } private <T> QueryRunner<T> getQueryRunnerImpl(Query<T> query) { QueryRunner<T> queryRunner = null; if (runningItem != null) { final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource()); final Task task = runningItem.getTask(); if (analysis.getBaseTableDataSource().isPresent() && task.getDataSource().equals(analysis.getBaseTableDataSource().get().getName())) { final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query); if (taskQueryRunner != null) { queryRunner = taskQueryRunner; } } } return new SetAndVerifyContextQueryRunner<>( serverConfig, queryRunner == null ? new NoopQueryRunner<>() : queryRunner ); } private static class SingleTaskBackgroundRunnerWorkItem extends TaskRunnerWorkItem { private final Task task; private final TaskLocation location; private SingleTaskBackgroundRunnerWorkItem( Task task, TaskLocation location, ListenableFuture<TaskStatus> result ) { super(task.getId(), result); this.task = task; this.location = location; } public Task getTask() { return task; } @Override public TaskLocation getLocation() { return location; } @Override public String getTaskType() { return task.getType(); } @Override public String getDataSource() { return task.getDataSource(); } } private class SingleTaskBackgroundRunnerCallable implements Callable<TaskStatus> { private final Task task; private final TaskLocation location; private final TaskToolbox toolbox; SingleTaskBackgroundRunnerCallable(Task task, TaskLocation location, TaskToolbox toolbox) { this.task = task; this.location = location; this.toolbox = toolbox; } @Override public TaskStatus call() { final long startTime = System.currentTimeMillis(); TaskStatus status; try { log.info("Running task: %s", task.getId()); TaskRunnerUtils.notifyLocationChanged( listeners, task.getId(), location ); TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), TaskStatus.running(task.getId())); status = task.run(toolbox); } catch (InterruptedException e) { // Don't reset the interrupt flag of the thread, as we do want to continue to the end of this callable. if (stopping) { // Tasks may interrupt their own run threads to stop themselves gracefully; don't be too scary about this. log.debug(e, "Interrupted while running task[%s] during graceful shutdown.", task); } else { // Not stopping, this is definitely unexpected. log.warn(e, "Interrupted while running task[%s]", task); } status = TaskStatus.failure(task.getId(), e.toString()); } catch (Exception e) { log.error(e, "Exception while running task[%s]", task); status = TaskStatus.failure(task.getId(), e.toString()); } catch (Throwable t) { log.error(t, "Uncaught Throwable while running task[%s]", task); throw t; } status = status.withDuration(System.currentTimeMillis() - startTime); TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), status); return status; } } }
gianm/druid
indexing-service/src/main/java/org/apache/druid/indexing/overlord/SingleTaskBackgroundRunner.java
Java
apache-2.0
15,280
"""Support for getting information from Arduino pins.""" import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.components import arduino from homeassistant.const import CONF_NAME from homeassistant.helpers.entity import Entity import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) CONF_PINS = "pins" CONF_TYPE = "analog" PIN_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string}) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Required(CONF_PINS): vol.Schema({cv.positive_int: PIN_SCHEMA})} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Arduino platform.""" if arduino.BOARD is None: _LOGGER.error("A connection has not been made to the Arduino board") return False pins = config.get(CONF_PINS) sensors = [] for pinnum, pin in pins.items(): sensors.append(ArduinoSensor(pin.get(CONF_NAME), pinnum, CONF_TYPE)) add_entities(sensors) class ArduinoSensor(Entity): """Representation of an Arduino Sensor.""" def __init__(self, name, pin, pin_type): """Initialize the sensor.""" self._pin = pin self._name = name self.pin_type = pin_type self.direction = "in" self._value = None arduino.BOARD.set_mode(self._pin, self.direction, self.pin_type) @property def state(self): """Return the state of the sensor.""" return self._value @property def name(self): """Get the name of the sensor.""" return self._name def update(self): """Get the latest value from the pin.""" self._value = arduino.BOARD.get_analog_inputs()[self._pin][1]
fbradyirl/home-assistant
homeassistant/components/arduino/sensor.py
Python
apache-2.0
1,767
/* * Copyright 2014 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.workbench.screens.guided.dtree.client.widget.factories; import org.drools.workbench.models.guided.dtree.shared.model.nodes.ActionUpdateNode; import org.uberfire.ext.wires.core.api.factories.FactoryHelper; public class ActionUpdateFactoryHelper implements FactoryHelper<ActionUpdateNode> { private ActionUpdateNode context; private boolean isReadOnly; public ActionUpdateFactoryHelper( final ActionUpdateNode context, final boolean isReadOnly ) { this.context = context; this.isReadOnly = isReadOnly; } @Override public ActionUpdateNode getContext() { return this.context; } @Override public void setContext( final ActionUpdateNode context ) { this.context = context; } public boolean isReadOnly() { return this.isReadOnly; } }
sdgdsffdsfff/drools-wb
drools-wb-screens/drools-wb-guided-dtree-editor/drools-wb-guided-dtree-editor-client/src/main/java/org/drools/workbench/screens/guided/dtree/client/widget/factories/ActionUpdateFactoryHelper.java
Java
apache-2.0
1,475
package org.batfish.representation.cisco; import com.google.common.collect.ImmutableSet; import java.util.LinkedList; import java.util.List; import org.batfish.datamodel.acl.AclLineMatchExpr; import org.batfish.datamodel.acl.OrMatchExpr; public class IcmpTypeObjectGroup extends ObjectGroup { private List<IcmpTypeObjectGroupLine> _lines; public IcmpTypeObjectGroup(String name) { super(name); _lines = new LinkedList<>(); } public List<IcmpTypeObjectGroupLine> getLines() { return _lines; } public AclLineMatchExpr toAclLineMatchExpr() { return new OrMatchExpr( _lines.stream() .map(IcmpTypeObjectGroupLine::toAclLineMatchExpr) .collect(ImmutableSet.toImmutableSet())); } }
dhalperi/batfish
projects/batfish/src/main/java/org/batfish/representation/cisco/IcmpTypeObjectGroup.java
Java
apache-2.0
743
package com.intellij.plugin.buck.actions; import com.intellij.icons.AllIcons; import com.intellij.openapi.actionSystem.AnActionEvent; import com.intellij.openapi.project.DumbAwareAction; import com.intellij.openapi.project.Project; import com.intellij.plugin.buck.build.BuckBuildCommandHandler; import com.intellij.plugin.buck.build.BuckBuildManager; import com.intellij.plugin.buck.build.BuckCommand; /** * Run buck uninstall command */ public class BuckUninstallAction extends DumbAwareAction { public static final String ACTION_TITLE = "Run buck uninstall"; public static final String ACTION_DESCRIPTION = "Run buck uninstall command"; public BuckUninstallAction() { super(ACTION_TITLE, ACTION_DESCRIPTION, AllIcons.Actions.Delete); } @Override public void update(AnActionEvent e) { Project project = e.getProject(); if (project != null) { e.getPresentation().setEnabled(!BuckBuildManager.getInstance(project).isBuilding()); } } @Override public void actionPerformed(AnActionEvent e) { BuckBuildManager buildManager = BuckBuildManager.getInstance(e.getProject()); String target = buildManager.getCurrentSavedTarget(e.getProject()); if (target == null) { buildManager.showNoTargetMessage(e.getProject()); return; } BuckBuildCommandHandler handler = new BuckBuildCommandHandler( e.getProject(), e.getProject().getBaseDir(), BuckCommand.UNINSTALL); handler.command().addParameter(target); buildManager.runBuckCommand(handler, ACTION_TITLE); } }
edelron/buck_idea_plugin
src/com/intellij/plugin/buck/actions/BuckUninstallAction.java
Java
apache-2.0
1,560
/** * Copyright 2016 The AMP HTML Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS-IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { ADSENSE_AMP_AUTO_ADS_RESPONSIVE_EXPERIMENT_NAME, AdSenseAmpAutoAdsResponsiveBranches, } from '../../../../ads/google/adsense-amp-auto-ads-responsive'; import {Services} from '../../../../src/services'; import { forceExperimentBranch, toggleExperiment, } from '../../../../src/experiments'; import {getAdNetworkConfig} from '../ad-network-config'; describes.realWin('ad-network-config', { amp: { canonicalUrl: 'https://foo.bar/baz', runtimeOn: true, ampdoc: 'single', }, }, env => { let ampAutoAdsElem; let document; beforeEach(() => { document = env.win.document; ampAutoAdsElem = document.createElement('amp-auto-ads'); env.win.document.body.appendChild(ampAutoAdsElem); }); afterEach(() => { env.win.document.body.removeChild(ampAutoAdsElem); }); describe('AdSense', () => { const AD_CLIENT = 'ca-pub-1234'; beforeEach(() => { ampAutoAdsElem.setAttribute('data-ad-client', AD_CLIENT); }); it('should generate the config fetch URL', () => { const adNetwork = getAdNetworkConfig('adsense', ampAutoAdsElem); expect(adNetwork.getConfigUrl()).to.equal( '//pagead2.googlesyndication.com/getconfig/ama?client=' + AD_CLIENT + '&plah=foo.bar&ama_t=amp&' + 'url=https%3A%2F%2Ffoo.bar%2Fbaz'); }); it('should report responsive-enabled when responsive experiment not on', () => { toggleExperiment( env.win, ADSENSE_AMP_AUTO_ADS_RESPONSIVE_EXPERIMENT_NAME, false); const adNetwork = getAdNetworkConfig('adsense', ampAutoAdsElem); expect(adNetwork.isResponsiveEnabled(env.win)).to.equal(true); }); it('should report responsive-enabled when responsive experiment on and ' + 'control branch picked', () => { forceExperimentBranch(env.win, ADSENSE_AMP_AUTO_ADS_RESPONSIVE_EXPERIMENT_NAME, AdSenseAmpAutoAdsResponsiveBranches.CONTROL); const adNetwork = getAdNetworkConfig('adsense', ampAutoAdsElem); expect(adNetwork.isResponsiveEnabled(env.win)).to.equal(true); }); it('should report responsive-disabled when responsive experiment on ' + 'and experiment branch picked', () => { forceExperimentBranch(env.win, ADSENSE_AMP_AUTO_ADS_RESPONSIVE_EXPERIMENT_NAME, AdSenseAmpAutoAdsResponsiveBranches.EXPERIMENT); const adNetwork = getAdNetworkConfig('adsense', ampAutoAdsElem); expect(adNetwork.isResponsiveEnabled(env.win)).to.equal(false); }); // TODO(bradfrizzell, #12476): Make this test work with sinon 4.0. it.skip('should truncate the URL if it\'s too long', () => { const adNetwork = getAdNetworkConfig('adsense', ampAutoAdsElem); const canonicalUrl = 'http://foo.bar/' + 'a'.repeat(4050) + 'shouldnt_be_included'; const docInfo = Services.documentInfoForDoc(ampAutoAdsElem); sandbox.stub(docInfo, 'canonicalUrl').callsFake(canonicalUrl); const url = adNetwork.getConfigUrl(); expect(url).to.contain('ama_t=amp'); expect(url).to.contain('url=http%3A%2F%2Ffoo.bar'); expect(url).not.to.contain('shouldnt_be_included'); }); it('should generate the attributes', () => { const adNetwork = getAdNetworkConfig('adsense', ampAutoAdsElem); expect(adNetwork.getAttributes()).to.deep.equal({ 'type': 'adsense', 'data-ad-client': 'ca-pub-1234', }); }); it('should get the default ad constraints', () => { const viewportMock = sandbox.mock(Services.viewportForDoc(env.win.document)); viewportMock.expects('getSize').returns( {width: 320, height: 500}).atLeast(1); const adNetwork = getAdNetworkConfig('adsense', ampAutoAdsElem); expect(adNetwork.getDefaultAdConstraints()).to.deep.equal({ initialMinSpacing: 500, subsequentMinSpacing: [ {adCount: 3, spacing: 1000}, {adCount: 6, spacing: 1500}, ], maxAdCount: 8, }); }); }); describe('Doubleclick', () => { const AD_LEGACY_CLIENT = 'ca-pub-1234'; const TARGETING_JSON = {'Categories': 'A'}; const EXPERIMENT_SETTINGS = {'width': 300, 'height': 250}; const AD_SLOT = '1234/example.com/SLOT_1'; beforeEach(() => { ampAutoAdsElem.setAttribute('data-ad-legacy-client', AD_LEGACY_CLIENT); ampAutoAdsElem.setAttribute('data-experiment', JSON.stringify(EXPERIMENT_SETTINGS)); ampAutoAdsElem.setAttribute('data-json', JSON.stringify(TARGETING_JSON)); ampAutoAdsElem.setAttribute('data-slot', AD_SLOT); }); it('should report enabled always', () => { const adNetwork = getAdNetworkConfig('doubleclick', ampAutoAdsElem); expect(adNetwork.isEnabled(env.win)).to.equal(true); }); it('should generate the config fetch URL', () => { const adNetwork = getAdNetworkConfig('doubleclick', ampAutoAdsElem); expect(adNetwork.getConfigUrl()).to.equal( '//pagead2.googlesyndication.com/getconfig/ama?client=' + AD_LEGACY_CLIENT + '&plah=foo.bar&ama_t=amp&' + 'url=https%3A%2F%2Ffoo.bar%2Fbaz'); }); // TODO(bradfrizzell, #12476): Make this test work with sinon 4.0. it.skip('should truncate the URL if it\'s too long', () => { const adNetwork = getAdNetworkConfig('doubleclick', ampAutoAdsElem); const canonicalUrl = 'http://foo.bar/' + 'a'.repeat(4050) + 'shouldnt_be_included'; const docInfo = Services.documentInfoForDoc(ampAutoAdsElem); sandbox.stub(docInfo, 'canonicalUrl').callsFake(canonicalUrl); const url = adNetwork.getConfigUrl(); expect(url).to.contain('ama_t=amp'); expect(url).to.contain('url=http%3A%2F%2Ffoo.bar'); expect(url).not.to.contain('shouldnt_be_included'); }); it('should generate the attributes', () => { const adNetwork = getAdNetworkConfig('doubleclick', ampAutoAdsElem); expect(adNetwork.getAttributes()).to.deep.equal({ 'type': 'doubleclick', 'json': JSON.stringify(TARGETING_JSON), 'data-slot': AD_SLOT, }); }); it('should get the default ad constraints', () => { const viewportMock = sandbox.mock(Services.viewportForDoc(env.win.document)); viewportMock.expects('getSize').returns( {width: 320, height: 500}).atLeast(1); const adNetwork = getAdNetworkConfig('doubleclick', ampAutoAdsElem); expect(adNetwork.getDefaultAdConstraints()).to.deep.equal({ initialMinSpacing: 500, subsequentMinSpacing: [ {adCount: 3, spacing: 1000}, {adCount: 6, spacing: 1500}, ], maxAdCount: 8, }); }); it('should not be responsive-enabled', () => { const adNetwork = getAdNetworkConfig('doubleclick', ampAutoAdsElem); expect(adNetwork.isResponsiveEnabled(env.win)).to.be.false; }); }); it('should return null for unknown type', () => { expect(getAdNetworkConfig('unknowntype', ampAutoAdsElem)).to.be.null; }); });
techhtml/amphtml
extensions/amp-auto-ads/0.1/test/test-ad-network-config.js
JavaScript
apache-2.0
7,709
# # Copyright 2008-2010 Amazon.com, Inc. or its affiliates. All Rights Reserved. module Amazon module Coral class HttpDelegationHelper def self.add_delegation_token(delegate_identity, request_identity) token = "" first = true delegate_identity.each do |k,v| if(first) first = false else token << ';' end token << "#{k}=#{v}" end request_identity[:http_delegation] = token if(token.length > 0) end end end end
bizo/aws-tools
emr/elastic-mapreduce-ruby-20130708/amazon/coral/httpdelegationhelper.rb
Ruby
apache-2.0
578
/* * handler_interface.cpp - handler interface * * Copyright (c) 2014-2015 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Wind Yuan <feng.yuan@intel.com> */ #include "handler_interface.h" namespace XCam { AeHandler::AeHandler() { reset_parameters (); } void AeHandler::reset_parameters () { // in case missing any parameters xcam_mem_clear (&_params); _params.mode = XCAM_AE_MODE_AUTO; _params.metering_mode = XCAM_AE_METERING_MODE_AUTO; _params.flicker_mode = XCAM_AE_FLICKER_MODE_AUTO; _params.speed = 1.0; _params.exposure_time_min = UINT64_C(0); _params.exposure_time_max = UINT64_C(0); _params.max_analog_gain = 0.0; _params.manual_exposure_time = UINT64_C (0); _params.manual_analog_gain = 0.0; _params.aperture_fn = 0.0; _params.ev_shift = 0.0; _params.window.x_start = 0; _params.window.y_start = 0; _params.window.x_end = 0; _params.window.y_end = 0; _params.window.weight = 0; } bool AeHandler::set_mode (XCamAeMode mode) { AnalyzerHandler::HanlderLock lock(this); _params.mode = mode; XCAM_LOG_DEBUG ("ae set mode [%d]", mode); return true; } bool AeHandler::set_metering_mode (XCamAeMeteringMode mode) { AnalyzerHandler::HanlderLock lock(this); _params.metering_mode = mode; XCAM_LOG_DEBUG ("ae set metering mode [%d]", mode); return true; } bool AeHandler::set_window (XCam3AWindow *window) { AnalyzerHandler::HanlderLock lock(this); _params.window = *window; XCAM_LOG_DEBUG ("ae set metering mode window [x:%d, y:%d, x_end:%d, y_end:%d, weight:%d]", window->x_start, window->y_start, window->x_end, window->y_end, window->weight); return true; } bool AeHandler::set_ev_shift (double ev_shift) { AnalyzerHandler::HanlderLock lock(this); _params.ev_shift = ev_shift; XCAM_LOG_DEBUG ("ae set ev shift:%.03f", ev_shift); return true; } bool AeHandler::set_speed (double speed) { AnalyzerHandler::HanlderLock lock(this); _params.speed = speed; XCAM_LOG_DEBUG ("ae set speed:%.03f", speed); return true; } bool AeHandler::set_flicker_mode (XCamFlickerMode flicker) { AnalyzerHandler::HanlderLock lock(this); _params.flicker_mode = flicker; XCAM_LOG_DEBUG ("ae set flicker:%d", flicker); return true; } XCamFlickerMode AeHandler::get_flicker_mode () { AnalyzerHandler::HanlderLock lock(this); return _params.flicker_mode; } int64_t AeHandler::get_current_exposure_time () { AnalyzerHandler::HanlderLock lock(this); if (_params.mode == XCAM_AE_MODE_MANUAL) return _params.manual_exposure_time; return INT64_C(-1); } double AeHandler::get_current_analog_gain () { AnalyzerHandler::HanlderLock lock(this); if (_params.mode == XCAM_AE_MODE_MANUAL) return _params.manual_analog_gain; return 0.0; } bool AeHandler::set_manual_exposure_time (int64_t time_in_us) { AnalyzerHandler::HanlderLock lock(this); _params.manual_exposure_time = time_in_us; XCAM_LOG_DEBUG ("ae set manual exposure time: %lldus", time_in_us); return true; } bool AeHandler::set_manual_analog_gain (double gain) { AnalyzerHandler::HanlderLock lock(this); _params.manual_analog_gain = gain; XCAM_LOG_DEBUG ("ae set manual analog gain: %.03f", gain); return true; } bool AeHandler::set_aperture (double fn) { AnalyzerHandler::HanlderLock lock(this); _params.aperture_fn = fn; XCAM_LOG_DEBUG ("ae set aperture fn: %.03f", fn); return true; } bool AeHandler::set_max_analog_gain (double max_gain) { AnalyzerHandler::HanlderLock lock(this); _params.max_analog_gain = max_gain; XCAM_LOG_DEBUG ("ae set max analog_gain: %.03f", max_gain); return true; } double AeHandler::get_max_analog_gain () { AnalyzerHandler::HanlderLock lock(this); return _params.max_analog_gain; } bool AeHandler::set_exposure_time_range (int64_t min_time_in_us, int64_t max_time_in_us) { AnalyzerHandler::HanlderLock lock(this); _params.exposure_time_min = min_time_in_us; _params.exposure_time_max = max_time_in_us; XCAM_LOG_DEBUG ("ae set exposrue range[%lldus, %lldus]", min_time_in_us, max_time_in_us); return true; } bool AeHandler::get_exposure_time_range (int64_t *min_time_in_us, int64_t *max_time_in_us) { XCAM_ASSERT (min_time_in_us && max_time_in_us); AnalyzerHandler::HanlderLock lock(this); *min_time_in_us = _params.exposure_time_min; *max_time_in_us = _params.exposure_time_max; return true; } AwbHandler::AwbHandler() { reset_parameters (); } void AwbHandler::reset_parameters () { xcam_mem_clear (&_params); _params.mode = XCAM_AWB_MODE_AUTO; _params.speed = 1.0; _params.cct_min = 0; _params.cct_max = 0; _params.gr_gain = 0.0; _params.r_gain = 0.0; _params.b_gain = 0.0; _params.gb_gain = 0.0; _params.window.x_start = 0; _params.window.y_start = 0; _params.window.x_end = 0; _params.window.y_end = 0; _params.window.weight = 0; } bool AwbHandler::set_mode (XCamAwbMode mode) { AnalyzerHandler::HanlderLock lock(this); _params.mode = mode; XCAM_LOG_DEBUG ("awb set mode [%d]", mode); return true; } bool AwbHandler::set_speed (double speed) { XCAM_FAIL_RETURN ( ERROR, (0.0 < speed) && (speed <= 1.0), false, "awb speed(%f) is out of range, suggest (0.0, 1.0]", speed); AnalyzerHandler::HanlderLock lock(this); _params.speed = speed; XCAM_LOG_DEBUG ("awb set speed [%f]", speed); return true; } bool AwbHandler::set_color_temperature_range (uint32_t cct_min, uint32_t cct_max) { XCAM_FAIL_RETURN ( ERROR, (cct_min <= cct_max), false, "awb set wrong cct(%u, %u) parameters", cct_min, cct_max); AnalyzerHandler::HanlderLock lock(this); _params.cct_min = cct_min; _params.cct_max = cct_max; XCAM_LOG_DEBUG ("awb set cct range [%u, %u]", cct_min, cct_max); return true; } bool AwbHandler::set_manual_gain (double gr, double r, double b, double gb) { XCAM_FAIL_RETURN ( ERROR, gr >= 0.0 && r >= 0.0 && b >= 0.0 && gb >= 0.0, false, "awb manual gain value must >= 0.0"); AnalyzerHandler::HanlderLock lock(this); _params.gr_gain = gr; _params.r_gain = r; _params.b_gain = b; _params.gb_gain = gb; XCAM_LOG_DEBUG ("awb set manual gain value(gr:%.03f, r:%.03f, b:%.03f, gb:%.03f)", gr, r, b, gb); return true; } CommonHandler::CommonHandler() { reset_parameters (); } void CommonHandler::reset_parameters () { xcam_mem_clear (&_params); _params.is_manual_gamma = false; _params.nr_level = 0.0; _params.tnr_level = 0.0; _params.brightness = 0.0; _params.contrast = 0.0; _params.hue = 0.0; _params.saturation = 0.0; _params.sharpness = 0.0; _params.enable_dvs = false; _params.enable_gbce = false; _params.enable_night_mode = false; } bool CommonHandler::set_dvs (bool enable) { AnalyzerHandler::HanlderLock lock(this); _params.enable_dvs = enable; XCAM_LOG_DEBUG ("common 3A enable dvs:%s", XCAM_BOOL2STR(enable)); return true; } bool CommonHandler::set_gbce (bool enable) { AnalyzerHandler::HanlderLock lock(this); _params.enable_gbce = enable; XCAM_LOG_DEBUG ("common 3A enable gbce:%s", XCAM_BOOL2STR(enable)); return true; } bool CommonHandler::set_night_mode (bool enable) { AnalyzerHandler::HanlderLock lock(this); _params.enable_night_mode = enable; XCAM_LOG_DEBUG ("common 3A enable night mode:%s", XCAM_BOOL2STR(enable)); return true; } /* Picture quality */ bool CommonHandler::set_noise_reduction_level (double level) { XCAM_FAIL_RETURN ( ERROR, level >= -1.0 && level < 1.0, false, "set NR levlel(%.03f) out of range[-1.0, 1.0]", level); AnalyzerHandler::HanlderLock lock(this); _params.nr_level = level; XCAM_LOG_DEBUG ("common 3A set NR level:%.03f", level); return true; } bool CommonHandler::set_temporal_noise_reduction_level (double level) { XCAM_FAIL_RETURN ( ERROR, level >= -1.0 && level < 1.0, false, "set TNR levlel(%.03f) out of range[-1.0, 1.0]", level); AnalyzerHandler::HanlderLock lock(this); _params.tnr_level = level; XCAM_LOG_DEBUG ("common 3A set TNR level:%.03f", level); return true; } bool CommonHandler::set_manual_brightness (double level) { XCAM_FAIL_RETURN ( ERROR, level >= -1.0 && level < 1.0, false, "set brightness levlel(%.03f) out of range[-1.0, 1.0]", level); AnalyzerHandler::HanlderLock lock(this); _params.brightness = level; XCAM_LOG_DEBUG ("common 3A set brightness level:%.03f", level); return true; } bool CommonHandler::set_manual_contrast (double level) { XCAM_FAIL_RETURN ( ERROR, level >= -1.0 && level < 1.0, false, "set contrast levlel(%.03f) out of range[-1.0, 1.0]", level); AnalyzerHandler::HanlderLock lock(this); _params.contrast = level; XCAM_LOG_DEBUG ("common 3A set contrast level:%.03f", level); return true; } bool CommonHandler::set_manual_hue (double level) { XCAM_FAIL_RETURN ( ERROR, level >= -1.0 && level < 1.0, false, "set hue levlel(%.03f) out of range[-1.0, 1.0]", level); AnalyzerHandler::HanlderLock lock(this); _params.hue = level; XCAM_LOG_DEBUG ("common 3A set hue level:%.03f", level); return true; } bool CommonHandler::set_manual_saturation (double level) { XCAM_FAIL_RETURN ( ERROR, level >= -1.0 && level < 1.0, false, "set saturation levlel(%.03f) out of range[-1.0, 1.0]", level); AnalyzerHandler::HanlderLock lock(this); _params.saturation = level; XCAM_LOG_DEBUG ("common 3A set saturation level:%.03f", level); return true; } bool CommonHandler::set_manual_sharpness (double level) { XCAM_FAIL_RETURN ( ERROR, level >= -1.0 && level < 1.0, false, "set sharpness levlel(%.03f) out of range[-1.0, 1.0]", level); AnalyzerHandler::HanlderLock lock(this); _params.sharpness = level; XCAM_LOG_DEBUG ("common 3A set sharpness level:%.03f", level); return true; } bool CommonHandler::set_gamma_table (double *r_table, double *g_table, double *b_table) { AnalyzerHandler::HanlderLock lock(this); if (!r_table && ! g_table && !b_table) { _params.is_manual_gamma = false; XCAM_LOG_DEBUG ("common 3A disabled gamma"); return true; } if (!r_table || !g_table || !b_table) { XCAM_LOG_ERROR ("common 3A gamma table parameters wrong"); return false; } for (uint32_t i = 0; i < XCAM_GAMMA_TABLE_SIZE; ++i) { _params.r_gamma [i] = r_table [i]; _params.g_gamma [i] = g_table [i]; _params.b_gamma [i] = b_table [i]; } _params.is_manual_gamma = true; XCAM_LOG_DEBUG ("common 3A enabled RGB gamma"); return true; } };
chivakker/libxcam
xcore/handler_interface.cpp
C++
apache-2.0
11,719
# Copyright 2014 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Shortcut methods for getting set up with Google Cloud Storage. You'll typically use these to get started with the API: .. literalinclude:: snippets.py :start-after: [START storage_get_started] :end-before: [END storage_get_started] The main concepts with this API are: - :class:`~google.cloud.storage.bucket.Bucket` which represents a particular bucket (akin to a mounted disk on a computer). - :class:`~google.cloud.storage.blob.Blob` which represents a pointer to a particular entity in Cloud Storage (akin to a file path on a remote machine). """ from pkg_resources import get_distribution __version__ = get_distribution("google-cloud-storage").version from google.cloud.storage.batch import Batch from google.cloud.storage.blob import Blob from google.cloud.storage.bucket import Bucket from google.cloud.storage.client import Client __all__ = ["__version__", "Batch", "Blob", "Bucket", "Client"]
dhermes/gcloud-python
storage/google/cloud/storage/__init__.py
Python
apache-2.0
1,504
/* * Copyright 2002-2005 Sascha Weinreuter * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.intellij.plugins.xpathView.support.jaxen; import com.intellij.lang.xml.XMLLanguage; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.util.Ref; import com.intellij.openapi.util.text.StringUtil; import com.intellij.openapi.vfs.VfsUtilCore; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.PsiElement; import com.intellij.psi.PsiFile; import com.intellij.psi.PsiWhiteSpace; import com.intellij.psi.XmlRecursiveElementVisitor; import com.intellij.psi.xml.*; import com.intellij.xml.XmlAttributeDescriptor; import org.intellij.plugins.xpathView.util.MyPsiUtil; import org.jaxen.DefaultNavigator; import org.jaxen.FunctionCallException; import org.jaxen.UnsupportedAxisException; import org.jaxen.XPath; import org.jaxen.saxpath.SAXPathException; import org.jetbrains.annotations.NotNull; import java.util.Collections; import java.util.Iterator; /** * <p>Adapter class for IDEA's PSI-tree to Jaxen.</p> * Not all of the required functionality is implemented yet. See the TODO comments... */ public class PsiDocumentNavigator extends DefaultNavigator { private static final Logger LOG = Logger.getInstance("org.intellij.plugins.xpathView.support.jaxen.PsiDocumentNavigator"); private final XmlFile file; public PsiDocumentNavigator(XmlFile file) { this.file = file; } public Iterator getChildAxisIterator(Object contextNode) throws UnsupportedAxisException { if (!(contextNode instanceof XmlElement)) { return Collections.emptyList().iterator(); } return new PsiChildAxisIterator(contextNode); } public Iterator getParentAxisIterator(Object contextNode) { if (!(contextNode instanceof XmlElement)) { return Collections.emptyList().iterator(); } return new NodeIterator((XmlElement)contextNode) { protected PsiElement getFirstNode(PsiElement n) { while (n != null) { n = n.getParent(); if (n instanceof XmlTag) { return n; } } return null; } protected PsiElement getNextNode(PsiElement n) { return null; } }; } public Object getDocumentNode(Object contextNode) { if (contextNode instanceof XmlDocument) { return contextNode; } while (contextNode instanceof PsiElement) { if (contextNode instanceof XmlDocument) { return contextNode; } contextNode = ((PsiElement)contextNode).getParent(); } return null; } public String translateNamespacePrefixToUri(String prefix, Object element) { if (isElement(element)) { return ((XmlTag)element).getNamespaceByPrefix(prefix); } return super.translateNamespacePrefixToUri(prefix, element); } public String getProcessingInstructionTarget(Object obj) { LOG.assertTrue(obj instanceof XmlProcessingInstruction); XmlProcessingInstruction pi = (XmlProcessingInstruction)obj; return getProcessingInstructionTarget(pi); } public static String getProcessingInstructionTarget(XmlProcessingInstruction pi) { final PsiElement[] children = pi.getChildren(); LOG.assertTrue(children[1] instanceof XmlToken && ((XmlToken)children[1]).getTokenType() == XmlTokenType.XML_NAME, "Unknown PI structure"); String text = children[1].getText(); int i; for (i=0; i<text.length() && text.charAt(i) == ' ';) i++; // skip final int pos = text.indexOf(' ', i); if (pos != -1) { text = text.substring(i, pos); } else { text = text.substring(i); } return text; } @NotNull public String getProcessingInstructionData(Object obj) { LOG.assertTrue(obj instanceof XmlProcessingInstruction); XmlProcessingInstruction pi = (XmlProcessingInstruction)obj; int targetLength = getProcessingInstructionTarget(obj).length(); int piLength= pi.getText().length(); return pi.getText().substring(2 + targetLength, piLength - 2).trim(); } public Object getParentNode(Object contextNode) throws UnsupportedAxisException { return ((PsiElement)contextNode).getParent(); } public Object getDocument(String url) throws FunctionCallException { final VirtualFile virtualFile = VfsUtilCore.findRelativeFile(url, file.getVirtualFile()); if (virtualFile != null) { final PsiFile file = this.file.getManager().findFile(virtualFile); if (file instanceof XmlFile) { return ((XmlFile)file).getDocument(); } } return null; } public Iterator getAttributeAxisIterator(Object contextNode) { if (isElement(contextNode)) { return new AttributeIterator((XmlElement)contextNode); } else { return Collections.emptyList().iterator(); } } public String getElementNamespaceUri(Object element) { LOG.assertTrue(element instanceof XmlTag); final XmlTag context = (XmlTag)element; final String namespaceUri = context.getNamespace(); if (!MyPsiUtil.isInDeclaredNamespace(context, namespaceUri, context.getNamespacePrefix())) { return ""; } return namespaceUri; } public String getElementName(Object element) { LOG.assertTrue(element instanceof XmlTag); return ((XmlTag)element).getLocalName(); } public String getElementQName(Object element) { LOG.assertTrue(element instanceof XmlTag); return ((XmlTag)element).getName(); } public String getAttributeNamespaceUri(Object attr) { LOG.assertTrue(attr instanceof XmlAttribute); final XmlAttribute attribute = ((XmlAttribute)attr); final String name = attribute.getName(); if (name.indexOf(':') == -1) return ""; final String uri = attribute.getNamespace(); if (!MyPsiUtil.isInDeclaredNamespace(attribute.getParent(), uri, MyPsiUtil.getAttributePrefix(attribute))) { LOG.info("getElementNamespaceUri: not returning implicit attribute-namespace uri: " + uri); return ""; } return uri; } public String getAttributeName(Object attr) { LOG.assertTrue(attr instanceof XmlAttribute); return ((XmlAttribute)attr).getLocalName(); } public String getAttributeQName(Object attr) { LOG.assertTrue(attr instanceof XmlAttribute); return ((XmlAttribute)attr).getName(); } public boolean isDocument(Object object) { return object instanceof XmlDocument; } public boolean isElement(Object object) { return object instanceof XmlTag && isSupportedElement((XmlTag)object); } private static boolean isSupportedElement(XmlTag object) { // optimization: all tags from XML language are supported, but some from other languages (JSP, see IDEADEV-37939) are not return object.getLanguage() == XMLLanguage.INSTANCE || MyPsiUtil.findNameElement(object) != null; } public boolean isAttribute(Object object) { return object instanceof XmlAttribute; } public boolean isNamespace(Object object) { // TODO: implement when namespace axis is supported return false; } public boolean isComment(Object object) { return object instanceof XmlComment; } public boolean isText(Object object) { return object instanceof PsiWhiteSpace ? ((PsiWhiteSpace)object).getParent() instanceof XmlText : object instanceof XmlText; } public boolean isProcessingInstruction(Object object) { return object instanceof XmlProcessingInstruction; } @NotNull public String getCommentStringValue(Object comment) { LOG.assertTrue(comment instanceof XmlComment); PsiElement c = (PsiElement)comment; final PsiElement[] children = c.getChildren(); for (PsiElement child : children) { if (child instanceof XmlToken && ((XmlToken)child).getTokenType() == XmlTokenType.XML_COMMENT_CHARACTERS) { return child.getText(); } } return ""; } @NotNull public String getElementStringValue(Object element) { LOG.assertTrue(element instanceof XmlTag); final TextCollector collector = new TextCollector(); ((XmlTag)element).accept(collector); return collector.getText(); } @NotNull public String getAttributeStringValue(Object attr) { LOG.assertTrue(attr instanceof XmlAttribute); return StringUtil.notNullize(((XmlAttribute)attr).getValue()); } public String getNamespaceStringValue(Object ns) { // TODO: implement when namespace axis is supported return null; } public String getNamespacePrefix(Object ns) { // TODO: implement when namespace axis is supported return null; } @NotNull public String getTextStringValue(Object txt) { if (txt instanceof XmlText) { return ((XmlText)txt).getValue(); } return txt instanceof PsiElement ? ((PsiElement)txt).getText() : txt.toString(); } public XPath parseXPath(String xpath) throws SAXPathException { return new PsiXPath(file, xpath); } public Object getElementById(Object object, final String elementId) { final XmlTag rootTag = ((XmlFile)((XmlElement)object).getContainingFile()).getRootTag(); if (rootTag == null) { return null; } final Ref<XmlTag> ref = new Ref<>(); rootTag.accept(new XmlRecursiveElementVisitor() { @Override public void visitElement(PsiElement element) { if (ref.get() == null) { super.visitElement(element); } } @Override public void visitXmlAttribute(XmlAttribute attribute) { final XmlAttributeDescriptor descriptor = attribute.getDescriptor(); final String value = attribute.getValue(); if ((value != null && (descriptor != null && descriptor.hasIdType()))) { if (elementId.equals(value)) { ref.set(attribute.getParent()); } } } }); return ref.get(); } static class TextCollector extends XmlRecursiveElementVisitor { private final StringBuffer builder = new StringBuffer(); @Override public void visitXmlText(XmlText text) { builder.append(text.getValue()); } public String getText() { return builder.toString(); } } }
jk1/intellij-community
plugins/xpath/xpath-view/src/org/intellij/plugins/xpathView/support/jaxen/PsiDocumentNavigator.java
Java
apache-2.0
11,462
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.mapper.internal; import com.google.common.base.Objects; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedStreamInput; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.core.AbstractFieldMapper; import java.io.IOException; import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; import static org.elasticsearch.index.mapper.MapperBuilders.source; /** * */ public class SourceFieldMapper extends AbstractFieldMapper<byte[]> implements InternalMapper, RootMapper { public static final String NAME = "_source"; public static final String CONTENT_TYPE = "_source"; public static class Defaults extends AbstractFieldMapper.Defaults { public static final String NAME = SourceFieldMapper.NAME; public static final boolean ENABLED = true; public static final long COMPRESS_THRESHOLD = -1; public static final String FORMAT = null; // default format is to use the one provided public static final FieldType FIELD_TYPE = new FieldType(AbstractFieldMapper.Defaults.FIELD_TYPE); static { FIELD_TYPE.setIndexed(false); FIELD_TYPE.setStored(true); FIELD_TYPE.setOmitNorms(true); FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_ONLY); FIELD_TYPE.freeze(); } } public static class Builder extends Mapper.Builder<Builder, SourceFieldMapper> { private boolean enabled = Defaults.ENABLED; private long compressThreshold = Defaults.COMPRESS_THRESHOLD; private Boolean compress = null; private String format = Defaults.FORMAT; private String[] includes = null; private String[] excludes = null; public Builder() { super(Defaults.NAME); } public Builder enabled(boolean enabled) { this.enabled = enabled; return this; } public Builder compress(boolean compress) { this.compress = compress; return this; } public Builder compressThreshold(long compressThreshold) { this.compressThreshold = compressThreshold; return this; } public Builder format(String format) { this.format = format; return this; } public Builder includes(String[] includes) { this.includes = includes; return this; } public Builder excludes(String[] excludes) { this.excludes = excludes; return this; } @Override public SourceFieldMapper build(BuilderContext context) { return new SourceFieldMapper(name, enabled, format, compress, compressThreshold, includes, excludes); } } public static class TypeParser implements Mapper.TypeParser { @Override public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException { SourceFieldMapper.Builder builder = source(); for (Map.Entry<String, Object> entry : node.entrySet()) { String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode)); } else if (fieldName.equals("compress") && fieldNode != null) { builder.compress(nodeBooleanValue(fieldNode)); } else if (fieldName.equals("compress_threshold") && fieldNode != null) { if (fieldNode instanceof Number) { builder.compressThreshold(((Number) fieldNode).longValue()); builder.compress(true); } else { builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString()).bytes()); builder.compress(true); } } else if ("format".equals(fieldName)) { builder.format(nodeStringValue(fieldNode, null)); } else if (fieldName.equals("includes")) { List<Object> values = (List<Object>) fieldNode; String[] includes = new String[values.size()]; for (int i = 0; i < includes.length; i++) { includes[i] = values.get(i).toString(); } builder.includes(includes); } else if (fieldName.equals("excludes")) { List<Object> values = (List<Object>) fieldNode; String[] excludes = new String[values.size()]; for (int i = 0; i < excludes.length; i++) { excludes[i] = values.get(i).toString(); } builder.excludes(excludes); } } return builder; } } private final boolean enabled; private Boolean compress; private long compressThreshold; private String[] includes; private String[] excludes; private String format; private XContentType formatContentType; public SourceFieldMapper() { this(Defaults.NAME, Defaults.ENABLED, Defaults.FORMAT, null, -1, null, null); } protected SourceFieldMapper(String name, boolean enabled, String format, Boolean compress, long compressThreshold, String[] includes, String[] excludes) { super(new Names(name, name, name, name), Defaults.BOOST, new FieldType(Defaults.FIELD_TYPE), null, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, null, null, null, null, null, null); // Only stored. this.enabled = enabled; this.compress = compress; this.compressThreshold = compressThreshold; this.includes = includes; this.excludes = excludes; this.format = format; this.formatContentType = format == null ? null : XContentType.fromRestContentType(format); } public boolean enabled() { return this.enabled; } public String[] excludes() { return this.excludes != null ? this.excludes : Strings.EMPTY_ARRAY; } public String[] includes() { return this.includes != null ? this.includes : Strings.EMPTY_ARRAY; } @Override public FieldType defaultFieldType() { return Defaults.FIELD_TYPE; } @Override public FieldDataType defaultFieldDataType() { return null; } @Override public boolean hasDocValues() { return false; } @Override public void preParse(ParseContext context) throws IOException { super.parse(context); } @Override public void postParse(ParseContext context) throws IOException { } @Override public void parse(ParseContext context) throws IOException { // nothing to do here, we will call it in pre parse } @Override public void validate(ParseContext context) throws MapperParsingException { } @Override public boolean includeInObject() { return false; } @Override protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException { if (!enabled) { return; } if (!fieldType.stored()) { return; } if (context.flyweight()) { return; } BytesReference source = context.source(); boolean filtered = (includes != null && includes.length > 0) || (excludes != null && excludes.length > 0); if (filtered) { // we don't update the context source if we filter, we want to keep it as is... Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true); Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes); BytesStreamOutput bStream = new BytesStreamOutput(); StreamOutput streamOutput = bStream; if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) { streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); } XContentType contentType = formatContentType; if (contentType == null) { contentType = mapTuple.v1(); } XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource); builder.close(); source = bStream.bytes(); } else if (compress != null && compress && !CompressorFactory.isCompressed(source)) { if (compressThreshold == -1 || source.length() > compressThreshold) { BytesStreamOutput bStream = new BytesStreamOutput(); XContentType contentType = XContentFactory.xContentType(source); if (formatContentType != null && formatContentType != contentType) { XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, CompressorFactory.defaultCompressor().streamOutput(bStream)); builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); builder.close(); } else { StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); source.writeTo(streamOutput); streamOutput.close(); } source = bStream.bytes(); // update the data in the context, so it can be compressed and stored compressed outside... context.source(source); } } else if (formatContentType != null) { // see if we need to convert the content type Compressor compressor = CompressorFactory.compressor(source); if (compressor != null) { CompressedStreamInput compressedStreamInput = compressor.streamInput(source.streamInput()); XContentType contentType = XContentFactory.xContentType(compressedStreamInput); compressedStreamInput.resetToBufferStart(); if (contentType != formatContentType) { // we need to reread and store back, compressed.... BytesStreamOutput bStream = new BytesStreamOutput(); StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput); builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(compressedStreamInput)); builder.close(); source = bStream.bytes(); // update the data in the context, so we store it in the translog in this format context.source(source); } else { compressedStreamInput.close(); } } else { XContentType contentType = XContentFactory.xContentType(source); if (contentType != formatContentType) { // we need to reread and store back // we need to reread and store back, compressed.... BytesStreamOutput bStream = new BytesStreamOutput(); XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream); builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); builder.close(); source = bStream.bytes(); // update the data in the context, so we store it in the translog in this format context.source(source); } } } assert source.hasArray(); fields.add(new StoredField(names().indexName(), source.array(), source.arrayOffset(), source.length())); } @Override public byte[] value(Object value) { if (value == null) { return null; } BytesReference bValue; if (value instanceof BytesRef) { bValue = new BytesArray((BytesRef) value); } else { bValue = (BytesReference) value; } try { return CompressorFactory.uncompressIfNeeded(bValue).toBytes(); } catch (IOException e) { throw new ElasticsearchParseException("failed to decompress source", e); } } @Override protected String contentType() { return CONTENT_TYPE; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { boolean includeDefaults = params.paramAsBoolean("include_defaults", false); // all are defaults, no need to write it at all if (!includeDefaults && enabled == Defaults.ENABLED && compress == null && compressThreshold == -1 && includes == null && excludes == null) { return builder; } builder.startObject(contentType()); if (includeDefaults || enabled != Defaults.ENABLED) { builder.field("enabled", enabled); } if (includeDefaults || !Objects.equal(format, Defaults.FORMAT)) { builder.field("format", format); } if (compress != null) { builder.field("compress", compress); } else if (includeDefaults) { builder.field("compress", false); } if (compressThreshold != -1) { builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString()); } else if (includeDefaults) { builder.field("compress_threshold", -1); } if (includes != null) { builder.field("includes", includes); } else if (includeDefaults) { builder.field("includes", Strings.EMPTY_ARRAY); } if (excludes != null) { builder.field("excludes", excludes); } else if (includeDefaults) { builder.field("excludes", Strings.EMPTY_ARRAY); } builder.endObject(); return builder; } @Override public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; if (!mergeContext.mergeFlags().simulate()) { if (sourceMergeWith.compress != null) { this.compress = sourceMergeWith.compress; } if (sourceMergeWith.compressThreshold != -1) { this.compressThreshold = sourceMergeWith.compressThreshold; } if (sourceMergeWith.includes != null) { this.includes = sourceMergeWith.includes; } if (sourceMergeWith.excludes != null) { this.excludes = sourceMergeWith.excludes; } } } }
alexksikes/elasticsearch
src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java
Java
apache-2.0
17,336
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package legacyregistry import ( "fmt" apimachineryversion "k8s.io/apimachinery/pkg/version" "k8s.io/component-base/metrics" "sync" ) var globalRegistryFactory = metricsRegistryFactory{ registerQueue: make([]metrics.KubeCollector, 0), mustRegisterQueue: make([]metrics.KubeCollector, 0), } type metricsRegistryFactory struct { globalRegistry metrics.KubeRegistry kubeVersion *apimachineryversion.Info registrationLock sync.Mutex registerQueue []metrics.KubeCollector mustRegisterQueue []metrics.KubeCollector } // SetRegistryFactoryVersion sets the kubernetes version information for all // subsequent metrics registry initializations. Only the first call has an effect. // If a version is not set, then metrics registry creation will no-opt func SetRegistryFactoryVersion(ver apimachineryversion.Info) []error { globalRegistryFactory.registrationLock.Lock() defer globalRegistryFactory.registrationLock.Unlock() if globalRegistryFactory.kubeVersion != nil { if globalRegistryFactory.kubeVersion.String() != ver.String() { panic(fmt.Sprintf("Cannot load a global registry more than once, had %s tried to load %s", globalRegistryFactory.kubeVersion.String(), ver.String())) } return nil } registrationErrs := make([]error, 0) globalRegistryFactory.globalRegistry = metrics.NewKubeRegistry(ver) globalRegistryFactory.kubeVersion = &ver for _, c := range globalRegistryFactory.registerQueue { err := globalRegistryFactory.globalRegistry.Register(c) if err != nil { registrationErrs = append(registrationErrs, err) } } for _, c := range globalRegistryFactory.mustRegisterQueue { globalRegistryFactory.globalRegistry.MustRegister(c) } return registrationErrs } // Register registers a collectable metric, but it uses a global registry. Registration is deferred // until the global registry has a version to use. func Register(c metrics.KubeCollector) error { globalRegistryFactory.registrationLock.Lock() defer globalRegistryFactory.registrationLock.Unlock() if globalRegistryFactory.kubeVersion != nil { return globalRegistryFactory.globalRegistry.Register(c) } globalRegistryFactory.registerQueue = append(globalRegistryFactory.registerQueue, c) return nil } // MustRegister works like Register but registers any number of // Collectors and panics upon the first registration that causes an // error. Registration is deferred until the global registry has a version to use. func MustRegister(cs ...metrics.KubeCollector) { globalRegistryFactory.registrationLock.Lock() defer globalRegistryFactory.registrationLock.Unlock() if globalRegistryFactory.kubeVersion != nil { globalRegistryFactory.globalRegistry.MustRegister(cs...) return } for _, c := range cs { globalRegistryFactory.mustRegisterQueue = append(globalRegistryFactory.mustRegisterQueue, c) } }
enisoc/kubernetes
staging/src/k8s.io/component-base/metrics/legacyregistry/registry.go
GO
apache-2.0
3,420
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.trino.parquet.predicate; import io.trino.parquet.ParquetCorruptionException; import io.trino.parquet.ParquetDataSourceId; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.statistics.Statistics; import org.apache.parquet.filter2.predicate.FilterPredicate; import org.apache.parquet.internal.filter2.columnindex.ColumnIndexStore; import org.joda.time.DateTimeZone; import java.util.Map; import java.util.Optional; public interface Predicate { /** * Should the Parquet Reader process a file section with the specified statistics. * * @param numberOfRows the number of rows in the segment; this can be used with * Statistics to determine if a column is only null * @param statistics column statistics * @param id Parquet file name */ boolean matches(long numberOfRows, Map<ColumnDescriptor, Statistics<?>> statistics, ParquetDataSourceId id) throws ParquetCorruptionException; /** * Should the Parquet Reader process a file section with the specified dictionary based on that * single dictionary. This is safe to check repeatedly to avoid loading more parquet dictionaries * if the section can already be eliminated. * * @param dictionary The single column dictionary */ boolean matches(DictionaryDescriptor dictionary); /** * Should the Parquet Reader process a file section with the specified statistics. * * @param numberOfRows the number of rows in the segment; this can be used with * Statistics to determine if a column is only null * @param columnIndex column index (statistics) store * @param id Parquet file name */ boolean matches(long numberOfRows, ColumnIndexStore columnIndex, ParquetDataSourceId id) throws ParquetCorruptionException; /** * Convert Predicate to Parquet filter if possible. * * @param timeZone current Parquet timezone * @return Converted Parquet filter or null if conversion not possible */ Optional<FilterPredicate> toParquetFilter(DateTimeZone timeZone); }
ebyhr/presto
lib/trino-parquet/src/main/java/io/trino/parquet/predicate/Predicate.java
Java
apache-2.0
2,681
/* * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package pt.lighthouselabs.obd.reader.net; import retrofit.client.Response; import retrofit.http.Body; import retrofit.http.PUT; /** * Definition of REST service available in OBD Server. */ public interface ObdService { @PUT("/") Response uploadReading(@Body ObdReading reading); }
mateenc/pitstop_Android
src/main/java/pt/lighthouselabs/obd/reader/net/ObdService.java
Java
apache-2.0
850
package org.semanticweb.elk.reasoner.taxonomy; /* * #%L * ELK Reasoner * $Id:$ * $HeadURL:$ * %% * Copyright (C) 2011 - 2013 Department of Computer Science, University of Oxford * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import java.util.Collections; import java.util.Set; import org.semanticweb.elk.owl.interfaces.ElkObject; import org.semanticweb.elk.reasoner.taxonomy.model.InstanceNode; /** * An {@link OrphanNode} with one member type node * * @author "Yevgeny Kazakov" * * @param <T> * the type of objects in this node * @param <I> * the type of instances of this node */ public class OrphanInstanceNode<T extends ElkObject, I extends ElkObject> extends OrphanNode<I> implements InstanceNode<T, I> { final OrphanTypeNode<T, I> typeNode; public OrphanInstanceNode(Set<I> instances, I canonicalInstance, OrphanTypeNode<T, I> typeNode) { super(instances, canonicalInstance); this.typeNode = typeNode; } @Override public Set<? extends OrphanTypeNode<T, I>> getDirectTypeNodes() { return Collections.singleton(typeNode); } @Override public Set<? extends OrphanTypeNode<T, I>> getAllTypeNodes() { return Collections.singleton(typeNode); } }
sesuncedu/elk-reasoner
elk-reasoner/src/main/java/org/semanticweb/elk/reasoner/taxonomy/OrphanInstanceNode.java
Java
apache-2.0
1,742
import Ember from 'ember'; const { Component, computed } = Ember; export default Component.extend({ tagName : 'footer', classNames : ['ui', 'inverted', 'vertical', 'footer', 'segment'], currentLocale: computed(function() { return this.get('l10n').getLocale(); }), actions: { switchLanguage(locale) { this.get('l10n').switchLanguage(locale); } } });
sumedh123/open-event-frontend
app/components/footer-main.js
JavaScript
apache-2.0
386
/* * ======================================================================== * Copyright (c) 2002-2014 Pivotal Software, Inc. All Rights Reserved. * This product is protected by U.S. and international copyright * and intellectual property laws. Pivotal products are covered by * more patents listed at http://www.pivotal.io/patents. * ======================================================================== */ /** * Used to carry arguments between gfsh region command implementations and the functions * that do the work for those commands. * * @author Abhishek Chaudhari * @author David Hoots * @since 7.0 */ package com.gemstone.gemfire.management.internal.cli.functions; import java.io.Serializable; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Set; import com.gemstone.gemfire.cache.ExpirationAction; import com.gemstone.gemfire.cache.ExpirationAttributes; import com.gemstone.gemfire.cache.RegionAttributes; import com.gemstone.gemfire.cache.RegionShortcut; import com.gemstone.gemfire.management.internal.cli.CliUtil; import com.gemstone.gemfire.management.internal.cli.i18n.CliStrings; public class RegionFunctionArgs implements Serializable { private static final long serialVersionUID = -5158224572470173267L; private final String regionPath; private final RegionShortcut regionShortcut; private final String useAttributesFrom; private final Boolean skipIfExists; private final String keyConstraint; private final String valueConstraint; private Boolean statisticsEnabled; private final boolean isSetStatisticsEnabled; private final RegionFunctionArgs.ExpirationAttrs entryExpirationIdleTime; private final RegionFunctionArgs.ExpirationAttrs entryExpirationTTL; private final RegionFunctionArgs.ExpirationAttrs regionExpirationIdleTime; private final RegionFunctionArgs.ExpirationAttrs regionExpirationTTL; private final String diskStore; private Boolean diskSynchronous; private final boolean isSetDiskSynchronous; private Boolean enableAsyncConflation; private final boolean isSetEnableAsyncConflation; private Boolean enableSubscriptionConflation; private final boolean isSetEnableSubscriptionConflation; private final Set<String> cacheListeners; private final String cacheLoader; private final String cacheWriter; private final Set<String> asyncEventQueueIds; private final Set<String> gatewaySenderIds; private Boolean concurrencyChecksEnabled; private final boolean isSetConcurrencyChecksEnabled; private Boolean cloningEnabled; private final boolean isSetCloningEnabled; private Integer concurrencyLevel; private final boolean isSetConcurrencyLevel; private final PartitionArgs partitionArgs; private final Integer evictionMax; private String compressor; private final boolean isSetCompressor; private RegionAttributes<?, ?> regionAttributes; public RegionFunctionArgs(String regionPath, RegionShortcut regionShortcut, String useAttributesFrom, boolean skipIfExists, String keyConstraint, String valueConstraint, Boolean statisticsEnabled, RegionFunctionArgs.ExpirationAttrs entryExpirationIdleTime, RegionFunctionArgs.ExpirationAttrs entryExpirationTTL, RegionFunctionArgs.ExpirationAttrs regionExpirationIdleTime, RegionFunctionArgs.ExpirationAttrs regionExpirationTTL, String diskStore, Boolean diskSynchronous, Boolean enableAsyncConflation, Boolean enableSubscriptionConflation, String[] cacheListeners, String cacheLoader, String cacheWriter, String[] asyncEventQueueIds, String[] gatewaySenderIds, Boolean concurrencyChecksEnabled, Boolean cloningEnabled, Integer concurrencyLevel, String prColocatedWith, Integer prLocalMaxMemory, Long prRecoveryDelay, Integer prRedundantCopies, Long prStartupRecoveryDelay, Long prTotalMaxMemory, Integer prTotalNumBuckets, Integer evictionMax, String compressor) { this.regionPath = regionPath; this.regionShortcut = regionShortcut; this.useAttributesFrom = useAttributesFrom; this.skipIfExists = skipIfExists; this.keyConstraint = keyConstraint; this.valueConstraint = valueConstraint; this.evictionMax = evictionMax; this.isSetStatisticsEnabled = statisticsEnabled != null; if (this.isSetStatisticsEnabled) { this.statisticsEnabled = statisticsEnabled; } this.entryExpirationIdleTime = entryExpirationIdleTime; this.entryExpirationTTL = entryExpirationTTL; this.regionExpirationIdleTime = regionExpirationIdleTime; this.regionExpirationTTL = regionExpirationTTL; this.diskStore = diskStore; this.isSetDiskSynchronous = diskSynchronous != null; if (this.isSetDiskSynchronous) { this.diskSynchronous = diskSynchronous; } this.isSetEnableAsyncConflation = enableAsyncConflation != null; if (this.isSetEnableAsyncConflation) { this.enableAsyncConflation = enableAsyncConflation; } this.isSetEnableSubscriptionConflation = enableSubscriptionConflation != null; if (this.isSetEnableSubscriptionConflation) { this.enableSubscriptionConflation = enableSubscriptionConflation; } if (cacheListeners != null) { this.cacheListeners = new LinkedHashSet<String>(); this.cacheListeners.addAll(Arrays.asList(cacheListeners)); } else { this.cacheListeners = null; } this.cacheLoader = cacheLoader; this.cacheWriter = cacheWriter; if (asyncEventQueueIds != null) { this.asyncEventQueueIds = new LinkedHashSet<String>(); this.asyncEventQueueIds.addAll(Arrays.asList(asyncEventQueueIds)); } else { this.asyncEventQueueIds = null; } if (gatewaySenderIds != null) { this.gatewaySenderIds = new LinkedHashSet<String>(); this.gatewaySenderIds.addAll(Arrays.asList(gatewaySenderIds)); } else { this.gatewaySenderIds = null; } this.isSetConcurrencyChecksEnabled = concurrencyChecksEnabled != null; if (this.isSetConcurrencyChecksEnabled) { this.concurrencyChecksEnabled = concurrencyChecksEnabled; } this.isSetCloningEnabled = cloningEnabled != null; if (this.isSetCloningEnabled) { this.cloningEnabled = cloningEnabled; } this.isSetConcurrencyLevel = concurrencyLevel != null; if (this.isSetConcurrencyLevel) { this.concurrencyLevel = concurrencyLevel; } this.partitionArgs = new PartitionArgs(prColocatedWith, prLocalMaxMemory, prRecoveryDelay, prRedundantCopies, prStartupRecoveryDelay, prTotalMaxMemory, prTotalNumBuckets); this.isSetCompressor = (compressor != null); if(this.isSetCompressor) { this.compressor = compressor; } } // Constructor to be used for supplied region attributes public RegionFunctionArgs(String regionPath, String useAttributesFrom, boolean skipIfExists, String keyConstraint, String valueConstraint, Boolean statisticsEnabled, RegionFunctionArgs.ExpirationAttrs entryExpirationIdleTime, RegionFunctionArgs.ExpirationAttrs entryExpirationTTL, RegionFunctionArgs.ExpirationAttrs regionExpirationIdleTime, RegionFunctionArgs.ExpirationAttrs regionExpirationTTL, String diskStore, Boolean diskSynchronous, Boolean enableAsyncConflation, Boolean enableSubscriptionConflation, String[] cacheListeners, String cacheLoader, String cacheWriter, String[] asyncEventQueueIds, String[] gatewaySenderIds, Boolean concurrencyChecksEnabled, Boolean cloningEnabled, Integer concurrencyLevel, String prColocatedWith, Integer prLocalMaxMemory, Long prRecoveryDelay, Integer prRedundantCopies, Long prStartupRecoveryDelay, Long prTotalMaxMemory, Integer prTotalNumBuckets, RegionAttributes<?, ?> regionAttributes) { this(regionPath, null, useAttributesFrom, skipIfExists, keyConstraint, valueConstraint, statisticsEnabled, entryExpirationIdleTime, entryExpirationTTL, regionExpirationIdleTime, regionExpirationTTL, diskStore, diskSynchronous, enableAsyncConflation, enableSubscriptionConflation, cacheListeners, cacheLoader, cacheWriter, asyncEventQueueIds, gatewaySenderIds, concurrencyChecksEnabled, cloningEnabled, concurrencyLevel, prColocatedWith, prLocalMaxMemory, prRecoveryDelay, prRedundantCopies, prStartupRecoveryDelay, prTotalMaxMemory, prTotalNumBuckets, null, null); this.regionAttributes = regionAttributes; } /** * @return the regionPath */ public String getRegionPath() { return this.regionPath; } /** * @return the regionShortcut */ public RegionShortcut getRegionShortcut() { return this.regionShortcut; } /** * @return the useAttributesFrom */ public String getUseAttributesFrom() { return this.useAttributesFrom; } /** * @return true if need to use specified region attributes */ public Boolean isSetUseAttributesFrom() { return this.regionShortcut == null && this.useAttributesFrom != null && this.regionAttributes != null; } /** * @return the skipIfExists */ public Boolean isSkipIfExists() { return this.skipIfExists; } /** * @return the keyConstraint */ public String getKeyConstraint() { return this.keyConstraint; } /** * @return the valueConstraint */ public String getValueConstraint() { return this.valueConstraint; } /** * @return the statisticsEnabled */ public Boolean isStatisticsEnabled() { return this.statisticsEnabled; } /** * @return the isSetStatisticsEnabled */ public Boolean isSetStatisticsEnabled() { return this.isSetStatisticsEnabled; } /** * @return the entryExpirationIdleTime */ public RegionFunctionArgs.ExpirationAttrs getEntryExpirationIdleTime() { return this.entryExpirationIdleTime; } /** * @return the entryExpirationTTL */ public RegionFunctionArgs.ExpirationAttrs getEntryExpirationTTL() { return this.entryExpirationTTL; } /** * @return the regionExpirationIdleTime */ public RegionFunctionArgs.ExpirationAttrs getRegionExpirationIdleTime() { return this.regionExpirationIdleTime; } /** * @return the regionExpirationTTL */ public RegionFunctionArgs.ExpirationAttrs getRegionExpirationTTL() { return this.regionExpirationTTL; } /** * @return the diskStore */ public String getDiskStore() { return this.diskStore; } /** * @return the diskSynchronous */ public Boolean isDiskSynchronous() { return this.diskSynchronous; } /** * @return the isSetDiskSynchronous */ public Boolean isSetDiskSynchronous() { return this.isSetDiskSynchronous; } /** * @return the enableAsyncConflation */ public Boolean isEnableAsyncConflation() { return this.enableAsyncConflation; } /** * @return the isSetEnableAsyncConflation */ public Boolean isSetEnableAsyncConflation() { return this.isSetEnableAsyncConflation; } /** * @return the enableSubscriptionConflation */ public Boolean isEnableSubscriptionConflation() { return this.enableSubscriptionConflation; } /** * @return the isSetEnableSubscriptionConflation */ public Boolean isSetEnableSubscriptionConflation() { return this.isSetEnableSubscriptionConflation; } /** * @return the cacheListeners */ public Set<String> getCacheListeners() { if (this.cacheListeners == null) { return null; } return Collections.unmodifiableSet(this.cacheListeners); } /** * @return the cacheLoader */ public String getCacheLoader() { return this.cacheLoader; } /** * @return the cacheWriter */ public String getCacheWriter() { return this.cacheWriter; } /** * @return the asyncEventQueueIds */ public Set<String> getAsyncEventQueueIds() { if (this.asyncEventQueueIds == null) { return null; } return Collections.unmodifiableSet(this.asyncEventQueueIds); } /** * @return the gatewaySenderIds */ public Set<String> getGatewaySenderIds() { if (this.gatewaySenderIds == null) { return null; } return Collections.unmodifiableSet(this.gatewaySenderIds); } /** * @return the concurrencyChecksEnabled */ public Boolean isConcurrencyChecksEnabled() { return this.concurrencyChecksEnabled; } /** * @return the isSetConcurrencyChecksEnabled */ public Boolean isSetConcurrencyChecksEnabled() { return this.isSetConcurrencyChecksEnabled; } /** * @return the cloningEnabled */ public Boolean isCloningEnabled() { return this.cloningEnabled; } /** * @return the isSetCloningEnabled */ public Boolean isSetCloningEnabled() { return this.isSetCloningEnabled; } /** * @return the concurrencyLevel */ public Integer getConcurrencyLevel() { return this.concurrencyLevel; } /** * @return the isSetConcurrencyLevel */ public Boolean isSetConcurrencyLevel() { return this.isSetConcurrencyLevel; } public boolean withPartitioning() { return hasPartitionAttributes() || (this.regionShortcut != null && this.regionShortcut.name().startsWith("PARTITION")); } /** * @return the partitionArgs */ public boolean hasPartitionAttributes() { return this.partitionArgs != null && this.partitionArgs.hasPartitionAttributes(); } /** * @return the partitionArgs */ public PartitionArgs getPartitionArgs() { return this.partitionArgs; } /** * @return the evictionMax */ public Integer getEvictionMax() { return this.evictionMax; } /** * @return the compressor. */ public String getCompressor() { return this.compressor; } /** * @return the isSetCompressor. */ public boolean isSetCompressor() { return this.isSetCompressor; } /** * @return the regionAttributes */ @SuppressWarnings("unchecked") public <K, V> RegionAttributes<K, V> getRegionAttributes() { return (RegionAttributes<K, V>) this.regionAttributes; } public static class ExpirationAttrs implements Serializable { private static final long serialVersionUID = 1474255033398008062L; private ExpirationFor type; private Integer time; private ExpirationAction action; public ExpirationAttrs(ExpirationFor type, Integer time, String action) { this.type = type; this.time = time; if (action != null) { this.action = getExpirationAction(action); } } public ExpirationAttributes convertToExpirationAttributes() { ExpirationAttributes expirationAttr = null; if (action != null) { expirationAttr = new ExpirationAttributes(time, action); } else { expirationAttr = new ExpirationAttributes(time); } return expirationAttr; } /** * @return the type */ public ExpirationFor getType() { return type; } /** * @return the time */ public Integer getTime() { return time; } /** * @return the action */ public ExpirationAction getAction() { return action; } @Override public String toString() { StringBuilder builder = new StringBuilder(); builder.append(ExpirationAttrs.class.getSimpleName() + " [type="); builder.append(type); builder.append(", time="); builder.append(time); builder.append(", action="); builder.append(action); builder.append("]"); return builder.toString(); } private static ExpirationAction getExpirationAction(String action) { if (action == null) { return ExpirationAttributes.DEFAULT.getAction(); } action = action.replace('-', '_'); if (action.equalsIgnoreCase(ExpirationAction.DESTROY.toString())) { return ExpirationAction.DESTROY; } else if (action.equalsIgnoreCase(ExpirationAction.INVALIDATE .toString())) { return ExpirationAction.INVALIDATE; } else if (action.equalsIgnoreCase(ExpirationAction.LOCAL_DESTROY .toString())) { return ExpirationAction.LOCAL_DESTROY; } else if (action.equalsIgnoreCase(ExpirationAction.LOCAL_INVALIDATE .toString())) { return ExpirationAction.LOCAL_INVALIDATE; } else { throw new IllegalArgumentException(CliStrings.format(CliStrings.CREATE_REGION__MSG__EXPIRATION_ACTION_0_IS_NOT_VALID, new Object[] {action})); } } public static enum ExpirationFor { REGION_IDLE, REGION_TTL, ENTRY_IDLE, ENTRY_TTL; } } public static class PartitionArgs implements Serializable { private static final long serialVersionUID = 5907052187323280919L; private final String prColocatedWith; private int prLocalMaxMemory; private final boolean isSetPRLocalMaxMemory; private long prRecoveryDelay; private final boolean isSetPRRecoveryDelay; private int prRedundantCopies; private final boolean isSetPRRedundantCopies; private long prStartupRecoveryDelay; private final boolean isSetPRStartupRecoveryDelay; private long prTotalMaxMemory; private final boolean isSetPRTotalMaxMemory; private int prTotalNumBuckets; private final boolean isSetPRTotalNumBuckets; private boolean hasPartitionAttributes; private final Set<String> userSpecifiedPartitionAttributes = new HashSet<String>(); public PartitionArgs(String prColocatedWith, Integer prLocalMaxMemory, Long prRecoveryDelay, Integer prRedundantCopies, Long prStartupRecoveryDelay, Long prTotalMaxMemory, Integer prTotalNumBuckets) { this.prColocatedWith = prColocatedWith; if (this.prColocatedWith != null) { this.hasPartitionAttributes = true; userSpecifiedPartitionAttributes.add(CliStrings.CREATE_REGION__COLOCATEDWITH); } this.isSetPRLocalMaxMemory = prLocalMaxMemory != null; if (this.isSetPRLocalMaxMemory) { this.prLocalMaxMemory = prLocalMaxMemory; this.hasPartitionAttributes = true; userSpecifiedPartitionAttributes.add(CliStrings.CREATE_REGION__LOCALMAXMEMORY); } this.isSetPRRecoveryDelay = prRecoveryDelay != null; if (this.isSetPRRecoveryDelay) { this.prRecoveryDelay = prRecoveryDelay; this.hasPartitionAttributes = true; userSpecifiedPartitionAttributes.add(CliStrings.CREATE_REGION__RECOVERYDELAY); } this.isSetPRRedundantCopies = prRedundantCopies != null; if (this.isSetPRRedundantCopies) { this.prRedundantCopies = prRedundantCopies; this.hasPartitionAttributes = true; userSpecifiedPartitionAttributes.add(CliStrings.CREATE_REGION__REDUNDANTCOPIES); } this.isSetPRStartupRecoveryDelay = prStartupRecoveryDelay != null; if (this.isSetPRStartupRecoveryDelay) { this.prStartupRecoveryDelay = prStartupRecoveryDelay; this.hasPartitionAttributes = true; userSpecifiedPartitionAttributes.add(CliStrings.CREATE_REGION__STARTUPRECOVERYDDELAY); } this.isSetPRTotalMaxMemory = prTotalMaxMemory != null; if (this.isSetPRTotalMaxMemory) { this.prTotalMaxMemory = prTotalMaxMemory; this.hasPartitionAttributes = true; userSpecifiedPartitionAttributes.add(CliStrings.CREATE_REGION__TOTALMAXMEMORY); } this.isSetPRTotalNumBuckets = prTotalNumBuckets != null; if (this.isSetPRTotalNumBuckets) { this.prTotalNumBuckets = prTotalNumBuckets; this.hasPartitionAttributes = true; userSpecifiedPartitionAttributes.add(CliStrings.CREATE_REGION__TOTALNUMBUCKETS); } } /** * @return the hasPartitionAttributes */ public Boolean hasPartitionAttributes() { return hasPartitionAttributes; } /** * @return the userSpecifiedPartitionAttributes */ public String getUserSpecifiedPartitionAttributes() { return CliUtil.collectionToString(userSpecifiedPartitionAttributes, -1); } /** * @return the prColocatedWith */ public String getPrColocatedWith() { return prColocatedWith; } /** * @return the prLocalMaxMemory */ public Integer getPrLocalMaxMemory() { return prLocalMaxMemory; } /** * @return the isSetPRLocalMaxMemory */ public Boolean isSetPRLocalMaxMemory() { return isSetPRLocalMaxMemory; } /** * @return the prRecoveryDelay */ public Long getPrRecoveryDelay() { return prRecoveryDelay; } /** * @return the isSetPRRecoveryDelay */ public Boolean isSetPRRecoveryDelay() { return isSetPRRecoveryDelay; } /** * @return the prRedundantCopies */ public Integer getPrRedundantCopies() { return prRedundantCopies; } /** * @return the isSetPRRedundantCopies */ public Boolean isSetPRRedundantCopies() { return isSetPRRedundantCopies; } /** * @return the prStartupRecoveryDelay */ public Long getPrStartupRecoveryDelay() { return prStartupRecoveryDelay; } /** * @return the isSetPRStartupRecoveryDelay */ public Boolean isSetPRStartupRecoveryDelay() { return isSetPRStartupRecoveryDelay; } /** * @return the prTotalMaxMemory */ public Long getPrTotalMaxMemory() { return prTotalMaxMemory; } /** * @return the isSetPRTotalMaxMemory */ public Boolean isSetPRTotalMaxMemory() { return isSetPRTotalMaxMemory; } /** * @return the prTotalNumBuckets */ public Integer getPrTotalNumBuckets() { return prTotalNumBuckets; } /** * @return the isSetPRTotalNumBuckets */ public Boolean isSetPRTotalNumBuckets() { return isSetPRTotalNumBuckets; } } }
ameybarve15/incubator-geode
gemfire-core/src/main/java/com/gemstone/gemfire/management/internal/cli/functions/RegionFunctionArgs.java
Java
apache-2.0
22,013
/* * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights * Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.services.elasticloadbalancing.model; import java.io.Serializable; /** * <p> * Information about a listener. * </p> * <p> * For information about the protocols and the ports supported by Elastic Load * Balancing, see <a href= * "http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html" * >Listener Configurations for Elastic Load Balancing</a> in the <i>Elastic * Load Balancing Developer Guide</i>. * </p> */ public class Listener implements Serializable, Cloneable { /** * <p> * The load balancer transport protocol to use for routing: HTTP, HTTPS, * TCP, or SSL. * </p> */ private String protocol; /** * <p> * The port on which the load balancer is listening. On EC2-VPC, you can * specify any port from the range 1-65535. On EC2-Classic, you can specify * any port from the following list: 25, 80, 443, 465, 587, 1024-65535. * </p> */ private Integer loadBalancerPort; /** * <p> * The protocol to use for routing traffic to back-end instances: HTTP, * HTTPS, TCP, or SSL. * </p> * <p> * If the front-end protocol is HTTP, HTTPS, TCP, or SSL, * <code>InstanceProtocol</code> must be at the same protocol. * </p> * <p> * If there is another listener with the same <code>InstancePort</code> * whose <code>InstanceProtocol</code> is secure, (HTTPS or SSL), the * listener's <code>InstanceProtocol</code> must also be secure. * </p> * <p> * If there is another listener with the same <code>InstancePort</code> * whose <code>InstanceProtocol</code> is HTTP or TCP, the listener's * <code>InstanceProtocol</code> must be HTTP or TCP. * </p> */ private String instanceProtocol; /** * <p> * The port on which the instance is listening. * </p> */ private Integer instancePort; /** * <p> * The Amazon Resource Name (ARN) of the server certificate. * </p> */ private String sSLCertificateId; /** * Default constructor for Listener object. Callers should use the setter or * fluent setter (with...) methods to initialize the object after creating * it. */ public Listener() { } /** * Constructs a new Listener object. Callers should use the setter or fluent * setter (with...) methods to initialize any additional object members. * * @param protocol * The load balancer transport protocol to use for routing: HTTP, * HTTPS, TCP, or SSL. * @param loadBalancerPort * The port on which the load balancer is listening. On EC2-VPC, you * can specify any port from the range 1-65535. On EC2-Classic, you * can specify any port from the following list: 25, 80, 443, 465, * 587, 1024-65535. * @param instancePort * The port on which the instance is listening. */ public Listener(String protocol, Integer loadBalancerPort, Integer instancePort) { setProtocol(protocol); setLoadBalancerPort(loadBalancerPort); setInstancePort(instancePort); } /** * <p> * The load balancer transport protocol to use for routing: HTTP, HTTPS, * TCP, or SSL. * </p> * * @param protocol * The load balancer transport protocol to use for routing: HTTP, * HTTPS, TCP, or SSL. */ public void setProtocol(String protocol) { this.protocol = protocol; } /** * <p> * The load balancer transport protocol to use for routing: HTTP, HTTPS, * TCP, or SSL. * </p> * * @return The load balancer transport protocol to use for routing: HTTP, * HTTPS, TCP, or SSL. */ public String getProtocol() { return this.protocol; } /** * <p> * The load balancer transport protocol to use for routing: HTTP, HTTPS, * TCP, or SSL. * </p> * * @param protocol * The load balancer transport protocol to use for routing: HTTP, * HTTPS, TCP, or SSL. * @return Returns a reference to this object so that method calls can be * chained together. */ public Listener withProtocol(String protocol) { setProtocol(protocol); return this; } /** * <p> * The port on which the load balancer is listening. On EC2-VPC, you can * specify any port from the range 1-65535. On EC2-Classic, you can specify * any port from the following list: 25, 80, 443, 465, 587, 1024-65535. * </p> * * @param loadBalancerPort * The port on which the load balancer is listening. On EC2-VPC, you * can specify any port from the range 1-65535. On EC2-Classic, you * can specify any port from the following list: 25, 80, 443, 465, * 587, 1024-65535. */ public void setLoadBalancerPort(Integer loadBalancerPort) { this.loadBalancerPort = loadBalancerPort; } /** * <p> * The port on which the load balancer is listening. On EC2-VPC, you can * specify any port from the range 1-65535. On EC2-Classic, you can specify * any port from the following list: 25, 80, 443, 465, 587, 1024-65535. * </p> * * @return The port on which the load balancer is listening. On EC2-VPC, you * can specify any port from the range 1-65535. On EC2-Classic, you * can specify any port from the following list: 25, 80, 443, 465, * 587, 1024-65535. */ public Integer getLoadBalancerPort() { return this.loadBalancerPort; } /** * <p> * The port on which the load balancer is listening. On EC2-VPC, you can * specify any port from the range 1-65535. On EC2-Classic, you can specify * any port from the following list: 25, 80, 443, 465, 587, 1024-65535. * </p> * * @param loadBalancerPort * The port on which the load balancer is listening. On EC2-VPC, you * can specify any port from the range 1-65535. On EC2-Classic, you * can specify any port from the following list: 25, 80, 443, 465, * 587, 1024-65535. * @return Returns a reference to this object so that method calls can be * chained together. */ public Listener withLoadBalancerPort(Integer loadBalancerPort) { setLoadBalancerPort(loadBalancerPort); return this; } /** * <p> * The protocol to use for routing traffic to back-end instances: HTTP, * HTTPS, TCP, or SSL. * </p> * <p> * If the front-end protocol is HTTP, HTTPS, TCP, or SSL, * <code>InstanceProtocol</code> must be at the same protocol. * </p> * <p> * If there is another listener with the same <code>InstancePort</code> * whose <code>InstanceProtocol</code> is secure, (HTTPS or SSL), the * listener's <code>InstanceProtocol</code> must also be secure. * </p> * <p> * If there is another listener with the same <code>InstancePort</code> * whose <code>InstanceProtocol</code> is HTTP or TCP, the listener's * <code>InstanceProtocol</code> must be HTTP or TCP. * </p> * * @param instanceProtocol * The protocol to use for routing traffic to back-end instances: * HTTP, HTTPS, TCP, or SSL.</p> * <p> * If the front-end protocol is HTTP, HTTPS, TCP, or SSL, * <code>InstanceProtocol</code> must be at the same protocol. * </p> * <p> * If there is another listener with the same * <code>InstancePort</code> whose <code>InstanceProtocol</code> is * secure, (HTTPS or SSL), the listener's * <code>InstanceProtocol</code> must also be secure. * </p> * <p> * If there is another listener with the same * <code>InstancePort</code> whose <code>InstanceProtocol</code> is * HTTP or TCP, the listener's <code>InstanceProtocol</code> must be * HTTP or TCP. */ public void setInstanceProtocol(String instanceProtocol) { this.instanceProtocol = instanceProtocol; } /** * <p> * The protocol to use for routing traffic to back-end instances: HTTP, * HTTPS, TCP, or SSL. * </p> * <p> * If the front-end protocol is HTTP, HTTPS, TCP, or SSL, * <code>InstanceProtocol</code> must be at the same protocol. * </p> * <p> * If there is another listener with the same <code>InstancePort</code> * whose <code>InstanceProtocol</code> is secure, (HTTPS or SSL), the * listener's <code>InstanceProtocol</code> must also be secure. * </p> * <p> * If there is another listener with the same <code>InstancePort</code> * whose <code>InstanceProtocol</code> is HTTP or TCP, the listener's * <code>InstanceProtocol</code> must be HTTP or TCP. * </p> * * @return The protocol to use for routing traffic to back-end instances: * HTTP, HTTPS, TCP, or SSL.</p> * <p> * If the front-end protocol is HTTP, HTTPS, TCP, or SSL, * <code>InstanceProtocol</code> must be at the same protocol. * </p> * <p> * If there is another listener with the same * <code>InstancePort</code> whose <code>InstanceProtocol</code> is * secure, (HTTPS or SSL), the listener's * <code>InstanceProtocol</code> must also be secure. * </p> * <p> * If there is another listener with the same * <code>InstancePort</code> whose <code>InstanceProtocol</code> is * HTTP or TCP, the listener's <code>InstanceProtocol</code> must be * HTTP or TCP. */ public String getInstanceProtocol() { return this.instanceProtocol; } /** * <p> * The protocol to use for routing traffic to back-end instances: HTTP, * HTTPS, TCP, or SSL. * </p> * <p> * If the front-end protocol is HTTP, HTTPS, TCP, or SSL, * <code>InstanceProtocol</code> must be at the same protocol. * </p> * <p> * If there is another listener with the same <code>InstancePort</code> * whose <code>InstanceProtocol</code> is secure, (HTTPS or SSL), the * listener's <code>InstanceProtocol</code> must also be secure. * </p> * <p> * If there is another listener with the same <code>InstancePort</code> * whose <code>InstanceProtocol</code> is HTTP or TCP, the listener's * <code>InstanceProtocol</code> must be HTTP or TCP. * </p> * * @param instanceProtocol * The protocol to use for routing traffic to back-end instances: * HTTP, HTTPS, TCP, or SSL.</p> * <p> * If the front-end protocol is HTTP, HTTPS, TCP, or SSL, * <code>InstanceProtocol</code> must be at the same protocol. * </p> * <p> * If there is another listener with the same * <code>InstancePort</code> whose <code>InstanceProtocol</code> is * secure, (HTTPS or SSL), the listener's * <code>InstanceProtocol</code> must also be secure. * </p> * <p> * If there is another listener with the same * <code>InstancePort</code> whose <code>InstanceProtocol</code> is * HTTP or TCP, the listener's <code>InstanceProtocol</code> must be * HTTP or TCP. * @return Returns a reference to this object so that method calls can be * chained together. */ public Listener withInstanceProtocol(String instanceProtocol) { setInstanceProtocol(instanceProtocol); return this; } /** * <p> * The port on which the instance is listening. * </p> * * @param instancePort * The port on which the instance is listening. */ public void setInstancePort(Integer instancePort) { this.instancePort = instancePort; } /** * <p> * The port on which the instance is listening. * </p> * * @return The port on which the instance is listening. */ public Integer getInstancePort() { return this.instancePort; } /** * <p> * The port on which the instance is listening. * </p> * * @param instancePort * The port on which the instance is listening. * @return Returns a reference to this object so that method calls can be * chained together. */ public Listener withInstancePort(Integer instancePort) { setInstancePort(instancePort); return this; } /** * <p> * The Amazon Resource Name (ARN) of the server certificate. * </p> * * @param sSLCertificateId * The Amazon Resource Name (ARN) of the server certificate. */ public void setSSLCertificateId(String sSLCertificateId) { this.sSLCertificateId = sSLCertificateId; } /** * <p> * The Amazon Resource Name (ARN) of the server certificate. * </p> * * @return The Amazon Resource Name (ARN) of the server certificate. */ public String getSSLCertificateId() { return this.sSLCertificateId; } /** * <p> * The Amazon Resource Name (ARN) of the server certificate. * </p> * * @param sSLCertificateId * The Amazon Resource Name (ARN) of the server certificate. * @return Returns a reference to this object so that method calls can be * chained together. */ public Listener withSSLCertificateId(String sSLCertificateId) { setSSLCertificateId(sSLCertificateId); return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getProtocol() != null) sb.append("Protocol: " + getProtocol() + ","); if (getLoadBalancerPort() != null) sb.append("LoadBalancerPort: " + getLoadBalancerPort() + ","); if (getInstanceProtocol() != null) sb.append("InstanceProtocol: " + getInstanceProtocol() + ","); if (getInstancePort() != null) sb.append("InstancePort: " + getInstancePort() + ","); if (getSSLCertificateId() != null) sb.append("SSLCertificateId: " + getSSLCertificateId()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof Listener == false) return false; Listener other = (Listener) obj; if (other.getProtocol() == null ^ this.getProtocol() == null) return false; if (other.getProtocol() != null && other.getProtocol().equals(this.getProtocol()) == false) return false; if (other.getLoadBalancerPort() == null ^ this.getLoadBalancerPort() == null) return false; if (other.getLoadBalancerPort() != null && other.getLoadBalancerPort().equals( this.getLoadBalancerPort()) == false) return false; if (other.getInstanceProtocol() == null ^ this.getInstanceProtocol() == null) return false; if (other.getInstanceProtocol() != null && other.getInstanceProtocol().equals( this.getInstanceProtocol()) == false) return false; if (other.getInstancePort() == null ^ this.getInstancePort() == null) return false; if (other.getInstancePort() != null && other.getInstancePort().equals(this.getInstancePort()) == false) return false; if (other.getSSLCertificateId() == null ^ this.getSSLCertificateId() == null) return false; if (other.getSSLCertificateId() != null && other.getSSLCertificateId().equals( this.getSSLCertificateId()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getProtocol() == null) ? 0 : getProtocol().hashCode()); hashCode = prime * hashCode + ((getLoadBalancerPort() == null) ? 0 : getLoadBalancerPort() .hashCode()); hashCode = prime * hashCode + ((getInstanceProtocol() == null) ? 0 : getInstanceProtocol() .hashCode()); hashCode = prime * hashCode + ((getInstancePort() == null) ? 0 : getInstancePort() .hashCode()); hashCode = prime * hashCode + ((getSSLCertificateId() == null) ? 0 : getSSLCertificateId() .hashCode()); return hashCode; } @Override public Listener clone() { try { return (Listener) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException( "Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
mhurne/aws-sdk-java
aws-java-sdk-elasticloadbalancing/src/main/java/com/amazonaws/services/elasticloadbalancing/model/Listener.java
Java
apache-2.0
18,621
// Generated by xsd compiler for android/java // DO NOT CHANGE! package ebay.apis.eblbasecomponents; import java.io.Serializable; import com.leansoft.nano.annotation.*; /** * * Contains the ASQ subjects for the user specified in the request. * */ @RootElement(name = "GetMessagePreferencesResponse", namespace = "urn:ebay:apis:eBLBaseComponents") public class GetMessagePreferencesResponseType extends AbstractResponseType implements Serializable { private static final long serialVersionUID = -1L; @Element(name = "ASQPreferences") private ASQPreferencesType asqPreferences; /** * public getter * * * Returns a seller's ASQ subjects, each in its own Subject * node. If the seller has not customized the ASQ subjects * using SetMessagePreferences, the call will return the * current default values. Returned if * IncludeASQPreferences = true was specified in the * request. * * * @returns ebay.apis.eblbasecomponents.ASQPreferencesType */ public ASQPreferencesType getAsqPreferences() { return this.asqPreferences; } /** * public setter * * * Returns a seller's ASQ subjects, each in its own Subject * node. If the seller has not customized the ASQ subjects * using SetMessagePreferences, the call will return the * current default values. Returned if * IncludeASQPreferences = true was specified in the * request. * * * @param ebay.apis.eblbasecomponents.ASQPreferencesType */ public void setAsqPreferences(ASQPreferencesType asqPreferences) { this.asqPreferences = asqPreferences; } }
bulldog2011/nano-rest
sample/HelloEBayTrading/src/ebay/apis/eblbasecomponents/GetMessagePreferencesResponseType.java
Java
apache-2.0
1,650
/* * Copyright (c) 1996, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package java.awt.event; /** * An abstract adapter class for receiving keyboard events. * The methods in this class are empty. This class exists as * convenience for creating listener objects. * <P> * Extend this class to create a {@code KeyEvent} listener * and override the methods for the events of interest. (If you implement the * {@code KeyListener} interface, you have to define all of * the methods in it. This abstract class defines null methods for them * all, so you can only have to define methods for events you care about.) * <P> * Create a listener object using the extended class and then register it with * a component using the component's {@code addKeyListener} * method. When a key is pressed, released, or typed, * the relevant method in the listener object is invoked, * and the {@code KeyEvent} is passed to it. * * @author Carl Quinn * * @see KeyEvent * @see KeyListener * @see <a href="https://docs.oracle.com/javase/tutorial/uiswing/events/keylistener.html">Tutorial: Writing a Key Listener</a> * * @since 1.1 */ public abstract class KeyAdapter implements KeyListener { /** * Constructs a {@code KeyAdapter}. */ protected KeyAdapter() {} /** * Invoked when a key has been typed. * This event occurs when a key press is followed by a key release. */ public void keyTyped(KeyEvent e) {} /** * Invoked when a key has been pressed. */ public void keyPressed(KeyEvent e) {} /** * Invoked when a key has been released. */ public void keyReleased(KeyEvent e) {} }
mirkosertic/Bytecoder
classlib/java.desktop/src/main/resources/META-INF/modules/java.desktop/classes/java/awt/event/KeyAdapter.java
Java
apache-2.0
2,803
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.math.expr.vector; import org.apache.druid.math.expr.ExprType; /** * specialized {@link UnivariateFunctionVectorProcessor} for processing (double[]) -> double[] */ public abstract class DoubleOutDoubleInFunctionVectorProcessor extends UnivariateFunctionVectorProcessor<double[], double[]> { public DoubleOutDoubleInFunctionVectorProcessor(ExprVectorProcessor<double[]> processor, int maxVectorSize) { super(CastToTypeVectorProcessor.cast(processor, ExprType.DOUBLE), maxVectorSize, new double[maxVectorSize]); } public abstract double apply(double input); @Override public ExprType getOutputType() { return ExprType.DOUBLE; } @Override final void processIndex(double[] input, int i) { outValues[i] = apply(input[i]); } @Override final ExprEvalVector<double[]> asEval() { return new ExprEvalDoubleVector(outValues, outNulls); } }
gianm/druid
core/src/main/java/org/apache/druid/math/expr/vector/DoubleOutDoubleInFunctionVectorProcessor.java
Java
apache-2.0
1,722
#!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from ambari_commons import OSCheck from resource_management.libraries.functions import format from resource_management.libraries.functions.default import default from resource_management.libraries.functions.version import format_stack_version from resource_management.libraries.functions.stack_features import check_stack_feature from resource_management.libraries.functions import StackFeature from resource_management.libraries.functions import get_kinit_path from resource_management.libraries.script.script import Script # a map of the Ambari role to the component name # for use with <stack-root>/current/<component> SERVER_ROLE_DIRECTORY_MAP = { 'ZOOKEEPER_SERVER' : 'zookeeper-server', 'ZOOKEEPER_CLIENT' : 'zookeeper-client' } component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "ZOOKEEPER_CLIENT") config = Script.get_config() if OSCheck.is_windows_family(): zookeeper_win_service_name = "zkServer" else: zk_pid_dir = config['configurations']['zookeeper-env']['zk_pid_dir'] zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid") # Security related/required params hostname = config['hostname'] security_enabled = config['configurations']['cluster-env']['security_enabled'] kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None)) tmp_dir = Script.get_tmp_dir() zk_user = config['configurations']['zookeeper-env']['zk_user'] stack_version_unformatted = str(config['hostLevelParams']['stack_version']) stack_version_formatted = format_stack_version(stack_version_unformatted) stack_root = Script.get_stack_root() config_dir = "/etc/zookeeper/conf" if stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version_formatted): config_dir = format("{stack_root}/current/{component_directory}/conf") stack_name = default("/hostLevelParams/stack_name", None)
arenadata/ambari
ambari-server/src/main/resources/stacks/ADH/1.0/services/ZOOKEEPER/package/scripts/status_params.py
Python
apache-2.0
2,702
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.runtime.operators.python.table; import org.apache.flink.configuration.Configuration; import org.apache.flink.python.PythonFunctionRunner; import org.apache.flink.table.api.DataTypes; import org.apache.flink.table.data.RowData; import org.apache.flink.table.functions.python.PythonFunctionInfo; import org.apache.flink.table.planner.plan.utils.JoinTypeUtil; import org.apache.flink.table.runtime.util.RowDataHarnessAssertor; import org.apache.flink.table.runtime.utils.PassThroughPythonTableFunctionRunner; import org.apache.flink.table.runtime.utils.PythonTestUtils; import org.apache.flink.table.types.logical.LogicalType; import org.apache.flink.table.types.logical.RowType; import org.apache.flink.types.RowKind; import org.apache.calcite.rel.core.JoinRelType; import java.util.Collection; import java.util.HashMap; import static org.apache.flink.table.runtime.util.StreamRecordUtils.row; /** Tests for {@link RowDataPythonTableFunctionOperator}. */ public class RowDataPythonTableFunctionOperatorTest extends PythonTableFunctionOperatorTestBase<RowData, RowData, RowData> { private final RowDataHarnessAssertor assertor = new RowDataHarnessAssertor( new LogicalType[] { DataTypes.STRING().getLogicalType(), DataTypes.STRING().getLogicalType(), DataTypes.BIGINT().getLogicalType(), DataTypes.BIGINT().getLogicalType() }); @Override public RowData newRow(boolean accumulateMsg, Object... fields) { if (accumulateMsg) { return row(fields); } else { RowData row = row(fields); row.setRowKind(RowKind.DELETE); return row; } } @Override public void assertOutputEquals( String message, Collection<Object> expected, Collection<Object> actual) { assertor.assertOutputEquals(message, expected, actual); } @Override public AbstractPythonTableFunctionOperator<RowData, RowData, RowData> getTestOperator( Configuration config, PythonFunctionInfo tableFunction, RowType inputType, RowType outputType, int[] udfInputOffsets, JoinRelType joinRelType) { return new RowDataPassThroughPythonTableFunctionOperator( config, tableFunction, inputType, outputType, udfInputOffsets, joinRelType); } private static class RowDataPassThroughPythonTableFunctionOperator extends RowDataPythonTableFunctionOperator { RowDataPassThroughPythonTableFunctionOperator( Configuration config, PythonFunctionInfo tableFunction, RowType inputType, RowType outputType, int[] udfInputOffsets, JoinRelType joinRelType) { super( config, tableFunction, inputType, outputType, udfInputOffsets, JoinTypeUtil.getFlinkJoinType(joinRelType)); } @Override public PythonFunctionRunner createPythonFunctionRunner() { return new PassThroughPythonTableFunctionRunner( getRuntimeContext().getTaskName(), PythonTestUtils.createTestEnvironmentManager(), userDefinedFunctionInputType, userDefinedFunctionOutputType, getFunctionUrn(), getUserDefinedFunctionsProto(), getInputOutputCoderUrn(), new HashMap<>(), PythonTestUtils.createMockFlinkMetricContainer()); } } }
rmetzger/flink
flink-python/src/test/java/org/apache/flink/table/runtime/operators/python/table/RowDataPythonTableFunctionOperatorTest.java
Java
apache-2.0
4,621
// Code generated by smithy-go-codegen DO NOT EDIT. package types type EncryptionType string // Enum values for EncryptionType const ( EncryptionTypeAes256 EncryptionType = "AES256" EncryptionTypeKms EncryptionType = "KMS" ) // Values returns all known values for EncryptionType. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (EncryptionType) Values() []EncryptionType { return []EncryptionType{ "AES256", "KMS", } } type FindingSeverity string // Enum values for FindingSeverity const ( FindingSeverityInformational FindingSeverity = "INFORMATIONAL" FindingSeverityLow FindingSeverity = "LOW" FindingSeverityMedium FindingSeverity = "MEDIUM" FindingSeverityHigh FindingSeverity = "HIGH" FindingSeverityCritical FindingSeverity = "CRITICAL" FindingSeverityUndefined FindingSeverity = "UNDEFINED" ) // Values returns all known values for FindingSeverity. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (FindingSeverity) Values() []FindingSeverity { return []FindingSeverity{ "INFORMATIONAL", "LOW", "MEDIUM", "HIGH", "CRITICAL", "UNDEFINED", } } type ImageActionType string // Enum values for ImageActionType const ( ImageActionTypeExpire ImageActionType = "EXPIRE" ) // Values returns all known values for ImageActionType. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (ImageActionType) Values() []ImageActionType { return []ImageActionType{ "EXPIRE", } } type ImageFailureCode string // Enum values for ImageFailureCode const ( ImageFailureCodeInvalidImageDigest ImageFailureCode = "InvalidImageDigest" ImageFailureCodeInvalidImageTag ImageFailureCode = "InvalidImageTag" ImageFailureCodeImageTagDoesNotMatchDigest ImageFailureCode = "ImageTagDoesNotMatchDigest" ImageFailureCodeImageNotFound ImageFailureCode = "ImageNotFound" ImageFailureCodeMissingDigestAndTag ImageFailureCode = "MissingDigestAndTag" ImageFailureCodeImageReferencedByManifestList ImageFailureCode = "ImageReferencedByManifestList" ImageFailureCodeKmsError ImageFailureCode = "KmsError" ) // Values returns all known values for ImageFailureCode. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (ImageFailureCode) Values() []ImageFailureCode { return []ImageFailureCode{ "InvalidImageDigest", "InvalidImageTag", "ImageTagDoesNotMatchDigest", "ImageNotFound", "MissingDigestAndTag", "ImageReferencedByManifestList", "KmsError", } } type ImageTagMutability string // Enum values for ImageTagMutability const ( ImageTagMutabilityMutable ImageTagMutability = "MUTABLE" ImageTagMutabilityImmutable ImageTagMutability = "IMMUTABLE" ) // Values returns all known values for ImageTagMutability. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (ImageTagMutability) Values() []ImageTagMutability { return []ImageTagMutability{ "MUTABLE", "IMMUTABLE", } } type LayerAvailability string // Enum values for LayerAvailability const ( LayerAvailabilityAvailable LayerAvailability = "AVAILABLE" LayerAvailabilityUnavailable LayerAvailability = "UNAVAILABLE" ) // Values returns all known values for LayerAvailability. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (LayerAvailability) Values() []LayerAvailability { return []LayerAvailability{ "AVAILABLE", "UNAVAILABLE", } } type LayerFailureCode string // Enum values for LayerFailureCode const ( LayerFailureCodeInvalidLayerDigest LayerFailureCode = "InvalidLayerDigest" LayerFailureCodeMissingLayerDigest LayerFailureCode = "MissingLayerDigest" ) // Values returns all known values for LayerFailureCode. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (LayerFailureCode) Values() []LayerFailureCode { return []LayerFailureCode{ "InvalidLayerDigest", "MissingLayerDigest", } } type LifecyclePolicyPreviewStatus string // Enum values for LifecyclePolicyPreviewStatus const ( LifecyclePolicyPreviewStatusInProgress LifecyclePolicyPreviewStatus = "IN_PROGRESS" LifecyclePolicyPreviewStatusComplete LifecyclePolicyPreviewStatus = "COMPLETE" LifecyclePolicyPreviewStatusExpired LifecyclePolicyPreviewStatus = "EXPIRED" LifecyclePolicyPreviewStatusFailed LifecyclePolicyPreviewStatus = "FAILED" ) // Values returns all known values for LifecyclePolicyPreviewStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. // The ordering of this slice is not guaranteed to be stable across updates. func (LifecyclePolicyPreviewStatus) Values() []LifecyclePolicyPreviewStatus { return []LifecyclePolicyPreviewStatus{ "IN_PROGRESS", "COMPLETE", "EXPIRED", "FAILED", } } type ReplicationStatus string // Enum values for ReplicationStatus const ( ReplicationStatusInProgress ReplicationStatus = "IN_PROGRESS" ReplicationStatusComplete ReplicationStatus = "COMPLETE" ReplicationStatusFailed ReplicationStatus = "FAILED" ) // Values returns all known values for ReplicationStatus. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (ReplicationStatus) Values() []ReplicationStatus { return []ReplicationStatus{ "IN_PROGRESS", "COMPLETE", "FAILED", } } type RepositoryFilterType string // Enum values for RepositoryFilterType const ( RepositoryFilterTypePrefixMatch RepositoryFilterType = "PREFIX_MATCH" ) // Values returns all known values for RepositoryFilterType. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (RepositoryFilterType) Values() []RepositoryFilterType { return []RepositoryFilterType{ "PREFIX_MATCH", } } type ScanFrequency string // Enum values for ScanFrequency const ( ScanFrequencyScanOnPush ScanFrequency = "SCAN_ON_PUSH" ScanFrequencyContinuousScan ScanFrequency = "CONTINUOUS_SCAN" ScanFrequencyManual ScanFrequency = "MANUAL" ) // Values returns all known values for ScanFrequency. Note that this can be // expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (ScanFrequency) Values() []ScanFrequency { return []ScanFrequency{ "SCAN_ON_PUSH", "CONTINUOUS_SCAN", "MANUAL", } } type ScanningConfigurationFailureCode string // Enum values for ScanningConfigurationFailureCode const ( ScanningConfigurationFailureCodeRepositoryNotFound ScanningConfigurationFailureCode = "REPOSITORY_NOT_FOUND" ) // Values returns all known values for ScanningConfigurationFailureCode. Note that // this can be expanded in the future, and so it is only as up to date as the // client. The ordering of this slice is not guaranteed to be stable across // updates. func (ScanningConfigurationFailureCode) Values() []ScanningConfigurationFailureCode { return []ScanningConfigurationFailureCode{ "REPOSITORY_NOT_FOUND", } } type ScanningRepositoryFilterType string // Enum values for ScanningRepositoryFilterType const ( ScanningRepositoryFilterTypeWildcard ScanningRepositoryFilterType = "WILDCARD" ) // Values returns all known values for ScanningRepositoryFilterType. Note that this // can be expanded in the future, and so it is only as up to date as the client. // The ordering of this slice is not guaranteed to be stable across updates. func (ScanningRepositoryFilterType) Values() []ScanningRepositoryFilterType { return []ScanningRepositoryFilterType{ "WILDCARD", } } type ScanStatus string // Enum values for ScanStatus const ( ScanStatusInProgress ScanStatus = "IN_PROGRESS" ScanStatusComplete ScanStatus = "COMPLETE" ScanStatusFailed ScanStatus = "FAILED" ScanStatusUnsupportedImage ScanStatus = "UNSUPPORTED_IMAGE" ScanStatusActive ScanStatus = "ACTIVE" ScanStatusPending ScanStatus = "PENDING" ScanStatusScanEligibilityExpired ScanStatus = "SCAN_ELIGIBILITY_EXPIRED" ScanStatusFindingsUnavailable ScanStatus = "FINDINGS_UNAVAILABLE" ) // Values returns all known values for ScanStatus. Note that this can be expanded // in the future, and so it is only as up to date as the client. The ordering of // this slice is not guaranteed to be stable across updates. func (ScanStatus) Values() []ScanStatus { return []ScanStatus{ "IN_PROGRESS", "COMPLETE", "FAILED", "UNSUPPORTED_IMAGE", "ACTIVE", "PENDING", "SCAN_ELIGIBILITY_EXPIRED", "FINDINGS_UNAVAILABLE", } } type ScanType string // Enum values for ScanType const ( ScanTypeBasic ScanType = "BASIC" ScanTypeEnhanced ScanType = "ENHANCED" ) // Values returns all known values for ScanType. Note that this can be expanded in // the future, and so it is only as up to date as the client. The ordering of this // slice is not guaranteed to be stable across updates. func (ScanType) Values() []ScanType { return []ScanType{ "BASIC", "ENHANCED", } } type TagStatus string // Enum values for TagStatus const ( TagStatusTagged TagStatus = "TAGGED" TagStatusUntagged TagStatus = "UNTAGGED" TagStatusAny TagStatus = "ANY" ) // Values returns all known values for TagStatus. Note that this can be expanded in // the future, and so it is only as up to date as the client. The ordering of this // slice is not guaranteed to be stable across updates. func (TagStatus) Values() []TagStatus { return []TagStatus{ "TAGGED", "UNTAGGED", "ANY", } }
google/ko
vendor/github.com/aws/aws-sdk-go-v2/service/ecr/types/enums.go
GO
apache-2.0
10,460
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.log4j.varia; import java.io.*; import java.net.Socket; import java.net.ServerSocket; import org.apache.log4j.helpers.LogLog; import org.apache.log4j.RollingFileAppender; import org.apache.log4j.helpers.LogLog; /** This appender listens on a socket on the port specified by the <b>Port</b> property for a "RollOver" message. When such a message is received, the underlying log file is rolled over and an acknowledgment message is sent back to the process initiating the roll over. <p>This method of triggering roll over has the advantage of being operating system independent, fast and reliable. <p>A simple application {@link Roller} is provided to initiate the roll over. <p>Note that the initiator is not authenticated. Anyone can trigger a rollover. In production environments, it is recommended that you add some form of protection to prevent undesired rollovers. @author Ceki G&uuml;lc&uuml; @since version 0.9.0 @deprecated since 1.3, use org.apache.log4j.rolling.RollingFileAppender. */ public class ExternallyRolledFileAppender extends RollingFileAppender { /** The string constant sent to initiate a roll over. Current value of this string constant is <b>RollOver</b>. */ static final public String ROLL_OVER = "RollOver"; /** The string constant sent to acknowledge a roll over. Current value of this string constant is <b>OK</b>. */ static final public String OK = "OK"; int port = 0; HUP hup; /** The default constructor does nothing but calls its super-class constructor. */ public ExternallyRolledFileAppender() { } /** The <b>Port</b> [roperty is used for setting the port for listening to external roll over messages. */ public void setPort(int port) { this.port = port; } /** Returns value of the <b>Port</b> option. */ public int getPort() { return port; } /** Start listening on the port specified by a preceding call to {@link #setPort}. */ public void activateOptions() { super.activateOptions(); if(port != 0) { if(hup != null) { hup.interrupt(); } hup = new HUP(this, port); hup.setDaemon(true); hup.start(); } } } /** * @deprecated since log4j 1.3. */ class HUP extends Thread { int port; ExternallyRolledFileAppender er; HUP(ExternallyRolledFileAppender er, int port) { this.er = er; this.port = port; } public void run() { while(!isInterrupted()) { try { ServerSocket serverSocket = new ServerSocket(port); while(true) { Socket socket = serverSocket.accept(); LogLog.debug("Connected to client at " + socket.getInetAddress()); new Thread(new HUPNode(socket, er)).start(); } } catch(Exception e) { e.printStackTrace(); } } } } /** * @deprecated since log4j 1.3. */ class HUPNode implements Runnable { Socket socket; DataInputStream dis; DataOutputStream dos; ExternallyRolledFileAppender er; public HUPNode(Socket socket, ExternallyRolledFileAppender er) { this.socket = socket; this.er = er; try { dis = new DataInputStream(socket.getInputStream()); dos = new DataOutputStream(socket.getOutputStream()); } catch(Exception e) { e.printStackTrace(); } } public void run() { try { String line = dis.readUTF(); LogLog.debug("Got external roll over signal."); if(ExternallyRolledFileAppender.ROLL_OVER.equals(line)) { synchronized(er) { er.rollOver(); } dos.writeUTF(ExternallyRolledFileAppender.OK); } else { dos.writeUTF("Expecting [RollOver] string."); } dos.close(); } catch(Exception e) { LogLog.error("Unexpected exception. Exiting HUPNode.", e); } } }
prmsheriff/log4j
src/main/java/org/apache/log4j/varia/ExternallyRolledFileAppender.java
Java
apache-2.0
4,642
package mil.nga.giat.geowave.core.store.adapter; import mil.nga.giat.geowave.core.index.ByteArrayId; import mil.nga.giat.geowave.core.store.CloseableIterator; /** * This is responsible for persisting data adapters (either in memory or to disk * depending on the implementation). */ public interface AdapterStore { /** * Add the adapter to the store * * @param adapter * the adapter */ public void addAdapter( DataAdapter<?> adapter ); /** * Get an adapter from the store by its unique ID * * @param adapterId * the unique adapter ID * @return the adapter, null if it doesn't exist */ public DataAdapter<?> getAdapter( ByteArrayId adapterId ); /** * Check for the existence of the adapter with the given unique ID * * @param adapterId * the unique ID to look up * @return a boolean flag indicating whether the adapter exists */ public boolean adapterExists( ByteArrayId adapterId ); /** * Get the full set of adapters within this store * * @return an iterator over all of the adapters in this store */ public CloseableIterator<DataAdapter<?>> getAdapters(); }
ruks/geowave
core/store/src/main/java/mil/nga/giat/geowave/core/store/adapter/AdapterStore.java
Java
apache-2.0
1,163
//----------------------------------------------------------------------- // <copyright file="Shard.cs" company="Akka.NET Project"> // Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com> // Copyright (C) 2013-2015 Akka.NET project <https://github.com/akkadotnet/akka.net> // </copyright> //----------------------------------------------------------------------- using System; using System.Collections.Immutable; using System.Linq; using Akka.Actor; using Akka.Event; using Akka.Persistence; namespace Akka.Cluster.Sharding { using ShardId = String; using EntityId = String; using Msg = Object; //TODO: figure out how not to derive from persistent actor for the sake of alternative ddata based impl public abstract class Shard : PersistentActor { #region messages public interface IShardCommand { } public interface IShardQuery { } /// <summary> /// When a <see cref="StateChange"/> fails to write to the journal, we will retry it after a back off. /// </summary> [Serializable] public class RetryPersistence : IShardCommand { public readonly StateChange Payload; public RetryPersistence(StateChange payload) { Payload = payload; } } /// <summary> /// The Snapshot tick for the shards. /// </summary> [Serializable] public sealed class SnapshotTick : IShardCommand { public static readonly SnapshotTick Instance = new SnapshotTick(); private SnapshotTick() { } } /// <summary> /// When an remembering entries and the entity stops without issuing a <see cref="Passivate"/>, /// we restart it after a back off using this message. /// </summary> [Serializable] public sealed class RestartEntity : IShardCommand { public readonly EntityId EntityId; public RestartEntity(string entityId) { EntityId = entityId; } } public abstract class StateChange { public readonly EntityId EntityId; protected StateChange(EntityId entityId) { EntityId = entityId; } } /// <summary> /// <see cref="ShardState"/> change for starting an entity in this `Shard` /// </summary> [Serializable] public sealed class EntityStarted : StateChange { public EntityStarted(string entityId) : base(entityId) { } } /// <summary> /// <see cref="ShardState"/> change for an entity which has terminated. /// </summary> [Serializable] public sealed class EntityStopped : StateChange { public EntityStopped(string entityId) : base(entityId) { } } [Serializable] public sealed class GetCurrentShardState : IShardQuery { public static readonly GetCurrentShardState Instance = new GetCurrentShardState(); private GetCurrentShardState() { } } [Serializable] public sealed class CurrentShardState { public readonly string ShardId; public readonly string[] EntityIds; public CurrentShardState(string shardId, string[] entityIds) { ShardId = shardId; EntityIds = entityIds; } } [Serializable] public sealed class GetShardStats : IShardQuery { public static readonly GetShardStats Instance = new GetShardStats(); private GetShardStats() { } } [Serializable] public sealed class ShardStats { public readonly string ShardId; public readonly int EntityCount; public ShardStats(string shardId, int entityCount) { ShardId = shardId; EntityCount = entityCount; } } #endregion /// <summary> /// Persistent state of the Shard. /// </summary> [Serializable] public struct ShardState : IClusterShardingSerializable { public static readonly ShardState Empty = new ShardState(ImmutableHashSet<string>.Empty); public readonly IImmutableSet<EntityId> Entries; public ShardState(IImmutableSet<EntityId> entries) : this() { Entries = entries; } } public readonly string TypeName; public readonly ShardId ShardId; public readonly Actor.Props EntityProps; public readonly ClusterShardingSettings Settings; public readonly IdExtractor ExtractEntityId; public readonly ShardResolver ExtractShardId; public readonly object HandOffStopMessage; protected IImmutableDictionary<IActorRef, EntityId> IdByRef = ImmutableDictionary<IActorRef, EntityId>.Empty; protected IImmutableDictionary<EntityId, IActorRef> RefById = ImmutableDictionary<EntityId, IActorRef>.Empty; protected IImmutableSet<IActorRef> Passivating = ImmutableHashSet<IActorRef>.Empty; protected IImmutableDictionary<EntityId, IImmutableList<Tuple<Msg, IActorRef>>> MessageBuffers = ImmutableDictionary<EntityId, IImmutableList<Tuple<Msg, IActorRef>>>.Empty; protected IActorRef HandOffStopper = null; protected ShardState State = ShardState.Empty; private ILoggingAdapter _log; protected Shard( string typeName, string shardId, Props entityProps, ClusterShardingSettings settings, IdExtractor extractEntityId, ShardResolver extractShardId, object handOffStopMessage) { TypeName = typeName; ShardId = shardId; EntityProps = entityProps; Settings = settings; ExtractEntityId = extractEntityId; ExtractShardId = extractShardId; HandOffStopMessage = handOffStopMessage; } protected ILoggingAdapter Log { get { return _log ?? (_log = Context.GetLogger()); } } protected int TotalBufferSize { get { return MessageBuffers.Aggregate(0, (sum, entity) => sum + entity.Value.Count); } } #region common shard methods protected virtual void Initialized() { Context.Parent.Tell(new ShardRegion.ShardInitialized(ShardId)); } protected virtual void ProcessChange<T>(T evt, Action<T> handler) { handler(evt); } protected bool HandleCommand(object message) { if (message is Terminated) HandleTerminated(((Terminated) message).ActorRef); else if (message is PersistentShardCoordinator.ICoordinatorMessage) HandleCoordinatorMessage(message as PersistentShardCoordinator.ICoordinatorMessage); else if (message is IShardCommand) HandleShardCommand(message as IShardCommand); else if (message is ShardRegion.IShardRegionCommand) HandleShardRegionCommand(message as ShardRegion.IShardRegionCommand); else if (message is IShardQuery) HandleShardRegionQuery(message as IShardQuery); else if (ExtractEntityId(message) != null) DeliverMessage(message, Sender); else return false; return true; } private void HandleShardRegionQuery(IShardQuery query) { if(query is GetCurrentShardState) Sender.Tell(new CurrentShardState(ShardId, RefById.Keys.ToArray())); else if (query is GetShardStats) Sender.Tell(new ShardStats(ShardId, State.Entries.Count)); } protected virtual void EntityTerminated(IActorRef tref) { ShardId id; IImmutableList<Tuple<Msg, IActorRef>> buffer; if (IdByRef.TryGetValue(tref, out id) && MessageBuffers.TryGetValue(id, out buffer) && buffer.Count != 0) { Log.Debug("Starting entity [{0}] again, there are buffered messages for it", id); SendMessageBuffer(new EntityStarted(id)); } else { ProcessChange(new EntityStopped(id), PassivateCompleted); } Passivating = Passivating.Remove(tref); } protected void HandleShardCommand(IShardCommand message) { var restart = message as RestartEntity; if (restart != null) GetEntity(restart.EntityId); } protected void HandleShardRegionCommand(ShardRegion.IShardRegionCommand message) { var passivate = message as ShardRegion.Passivate; if (passivate != null) Passivate(Sender, passivate.StopMessage); else Unhandled(message); } protected void HandleCoordinatorMessage(PersistentShardCoordinator.ICoordinatorMessage message) { var handOff = message as PersistentShardCoordinator.HandOff; if (handOff != null) { if (handOff.Shard == ShardId) HandOff(Sender); else Log.Warning("Shard [{0}] can not hand off for another Shard [{1}]", ShardId, handOff.Shard); } else Unhandled(message); } protected void HandOff(IActorRef replyTo) { if (HandOffStopper != null) Log.Warning("HandOff shard [{0}] received during existing handOff", ShardId); else { Log.Debug("HandOff shard [{0}]", ShardId); if (State.Entries.Count != 0) { // handOffStopper = Some(context.watch(context.actorOf( // handOffStopperProps(shardId, replyTo, idByRef.keySet, handOffStopMessage)))) HandOffStopper = Context.Watch(Context.ActorOf( ShardRegion.HandOffStopper.Props(ShardId, replyTo, IdByRef.Keys, HandOffStopMessage))); //During hand off we only care about watching for termination of the hand off stopper Context.Become(message => { var terminated = message as Terminated; if (terminated != null) { HandleTerminated(terminated.ActorRef); return true; } return false; }); } else { replyTo.Tell(new PersistentShardCoordinator.ShardStopped(ShardId)); Context.Stop(Self); } } } protected void HandleTerminated(IActorRef terminatedRef) { if (Equals(HandOffStopper, terminatedRef)) Context.Stop(Self); else if (IdByRef.ContainsKey(terminatedRef) && HandOffStopper == null) EntityTerminated(terminatedRef); } protected void Passivate(IActorRef entity, object stopMessage) { ShardId id; if (IdByRef.TryGetValue(entity, out id) && !MessageBuffers.ContainsKey(id)) { Log.Debug("Passivating started on entity {0}", id); Passivating = Passivating.Add(entity); MessageBuffers = MessageBuffers.Add(id, ImmutableList<Tuple<object, IActorRef>>.Empty); entity.Tell(stopMessage); } } protected void PassivateCompleted(EntityStopped evt) { var id = evt.EntityId; Log.Debug("Entity stopped [{0}]", id); var entity = RefById[id]; IdByRef = IdByRef.Remove(entity); RefById = RefById.Remove(id); State = new ShardState(State.Entries.Remove(id)); MessageBuffers = MessageBuffers.Remove(id); } protected void SendMessageBuffer(EntityStarted message) { var id = message.EntityId; // Get the buffered messages and remove the buffer IImmutableList<Tuple<Msg, IActorRef>> buffer; if (MessageBuffers.TryGetValue(id, out buffer)) MessageBuffers = MessageBuffers.Remove(id); if (buffer.Count != 0) { Log.Debug("Sending message buffer for entity [{0}] ([{1}] messages)", id, buffer.Count); GetEntity(id); // Now there is no deliveryBuffer we can try to redeliver // and as the child exists, the message will be directly forwarded foreach (var pair in buffer) DeliverMessage(pair.Item1, pair.Item2); } } protected void DeliverMessage(object message, IActorRef sender) { var t = ExtractEntityId(message); var id = t.Item1; var payload = t.Item2; if (string.IsNullOrEmpty(id)) { Log.Warning("Id must not be empty, dropping message [{0}]", message.GetType()); Context.System.DeadLetters.Tell(message); } else { IImmutableList<Tuple<Msg, IActorRef>> buffer; if (MessageBuffers.TryGetValue(id, out buffer)) { if (TotalBufferSize >= Settings.TunningParameters.BufferSize) { Log.Warning("Buffer is full, dropping message for entity [{0}]", message.GetType()); Context.System.DeadLetters.Tell(message); } else { Log.Debug("Message for entity [{0}] buffered", id); MessageBuffers.SetItem(id, buffer.Add(Tuple.Create(message, sender))); } } else DeliverTo(id, message, payload, sender); } } protected virtual void DeliverTo(string id, object message, object payload, IActorRef sender) { var name = Uri.EscapeDataString(id); var child = Context.Child(name); if (Equals(child, ActorRefs.Nobody)) GetEntity(id).Tell(payload, sender); else child.Tell(payload, sender); } protected IActorRef GetEntity(string id) { var name = Uri.EscapeDataString(id); var child = Context.Child(name); if (Equals(child, ActorRefs.Nobody)) { Log.Debug("Starting entity [{0}] in shard [{1}]", id, ShardId); child = Context.Watch(Context.ActorOf(EntityProps, name)); IdByRef = IdByRef.SetItem(child, id); RefById = RefById.SetItem(id, child); State = new ShardState(State.Entries.Add(id)); } return child; } #endregion } }
stefansedich/akka.net
src/contrib/cluster/Akka.Cluster.Sharding/Shard.cs
C#
apache-2.0
15,414
/* * Copyright 2010-2010 LinkedIn, Inc * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package org.linkedin.util.io.resource; import org.linkedin.util.io.PathUtils; import org.linkedin.util.io.resource.internal.AbstractResource; import org.linkedin.util.io.resource.internal.FileResourceProvider; import org.linkedin.util.io.resource.internal.InternalResourceProvider; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.URI; /** * @author ypujante@linkedin.com * */ // TODO MED YP: toURI should preserve trailing slash... public class FileResource extends AbstractResource { private final File _file; /** * Basic implementation for file... */ private class FileInfo implements ResourceInfo { @Override public long getContentLength() throws IOException { return _file.isDirectory() ? 0 : _file.length(); } @Override public long getLastModified() throws IOException { return _file.lastModified(); } } /** * Constructor */ public FileResource(InternalResourceProvider resourceProvider, String path, File file) { super(resourceProvider, path); _file = file; } /** * @return <code>true</code> if the resource exists. */ @Override public boolean exists() { return _file.exists(); } /** * Returns a <code>File</code> handle for this resource. * * @throws IOException if the resource cannot be resolved as a <code>File</code> handle, i.e. it * is not available on the file system (or it cannot be made available). */ @Override public File getFile() throws IOException { return _file; } /** * Important note: the caller of this method is responsible for properly closing the input * stream! * * @return an input stream to the resource. * @throws IOException if cannot get an input stream */ @Override public InputStream getInputStream() throws IOException { return new FileInputStream(_file); } /** * Efficiently returns all information about the resource. * * @return information about this resource. * @throws IOException if cannot get information */ @Override public ResourceInfo getInfo() throws IOException { if(_file.exists()) return new FileInfo(); else throw new FileNotFoundException(_file.getPath()); } /** * @return <code>true</code> if this resource represents a directory. */ @Override public boolean isDirectory() { return _file.isDirectory(); } /** * @return a uri representation of the resource */ @Override public URI toURI() { return _file.toURI(); } /** * Creates a file resource from a file * * @param filename name of the file * @return the resource */ public static Resource create(String filename) { return create(new File(filename)); } /** * Creates a file resource from a file * * @param file the file * @return the resource (points to this file) */ public static Resource create(File file) { try { String path = file.getCanonicalPath(); if(file.isDirectory()) path = PathUtils.addTrailingSlash(path); return create(new File("/"), path); } catch(IOException e) { throw new IllegalArgumentException("invalid file " + file, e); } } /** * Creates a file resource from a file, with the root as this file (if it is a directory * otherwise its parent). * * @param rootFile the root file * @return the resource (points to this file) */ public static Resource createFromRoot(File rootFile) { File root = rootFile; String path = "/"; if(!root.isDirectory()) { root = rootFile.getParentFile(); path = rootFile.getName(); } return create(root, path); } /** * Creates a file resource with the root provided and the path (relative to the root). * * @param root the root of the resource * @param path the path (relative to root) * @return the resource */ public static Resource create(File root, String path) { FileResourceProvider frp = null; try { frp = new FileResourceProvider(root); } catch(IOException e) { throw new RuntimeException(e); } return frp.createResource(path); } }
pongasoft/utils-misc
org.linkedin.util-core/src/main/java/org/linkedin/util/io/resource/FileResource.java
Java
apache-2.0
4,944
package org.apereo.cas.support.saml.web.idp.web; import org.apereo.cas.authentication.AuthenticationServiceSelectionPlan; import org.apereo.cas.services.ServicesManager; import org.apereo.cas.ticket.registry.TicketRegistrySupport; import org.apereo.cas.web.flow.BaseSingleSignOnParticipationStrategy; import org.apereo.cas.web.flow.SingleSignOnParticipationRequest; import lombok.val; import org.opensaml.saml.saml2.core.AuthnRequest; import org.opensaml.saml.saml2.core.Issuer; /** * This is {@link SamlIdPSingleSignOnParticipationStrategy}. * * @author Misagh Moayyed * @since 6.4.0 */ public class SamlIdPSingleSignOnParticipationStrategy extends BaseSingleSignOnParticipationStrategy { public SamlIdPSingleSignOnParticipationStrategy(final ServicesManager servicesManager, final TicketRegistrySupport ticketRegistrySupport, final AuthenticationServiceSelectionPlan serviceSelectionStrategy) { super(servicesManager, ticketRegistrySupport, serviceSelectionStrategy); } @Override public boolean isParticipating(final SingleSignOnParticipationRequest ssoRequest) { val authnRequest = ssoRequest.getAttributeValue(AuthnRequest.class.getName(), AuthnRequest.class); return supports(ssoRequest) && !authnRequest.isForceAuthn(); } @Override public boolean supports(final SingleSignOnParticipationRequest ssoRequest) { return ssoRequest.containsAttribute(AuthnRequest.class.getName()) && ssoRequest.containsAttribute(Issuer.class.getName()); } }
apereo/cas
support/cas-server-support-saml-idp-web/src/main/java/org/apereo/cas/support/saml/web/idp/web/SamlIdPSingleSignOnParticipationStrategy.java
Java
apache-2.0
1,632
using Microsoft.SharePoint.Client.Taxonomy; using OfficeDevPnP.Core; using OfficeDevPnP.Core.Entities; using OfficeDevPnP.Core.Utilities; using System; using System.Collections.Generic; using System.ComponentModel; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Xml; using System.Xml.Linq; namespace Microsoft.SharePoint.Client { /// <summary> /// This class provides extension methods that will help you work with fields and content types. /// </summary> public static class FieldAndContentTypeExtensions { #region Site Columns [Obsolete("Use CreateField(Web web, FieldCreationInformation fieldCreationInformation, System.Boolean executeQuery = True)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static Field CreateField(this Web web, Guid id, string internalName, FieldType fieldType, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); FieldCreationInformation fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, InternalName = internalName, DisplayName = displayName, Group = group, AdditionalAttributes = additionalAttributes }; return CreateField(web, fieldCreationInformation, executeQuery); } [Obsolete("Use CreateField(Web web, FieldCreationInformation fieldCreationInformation, System.Boolean executeQuery = True)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static Field CreateField(this Web web, Guid id, string internalName, string fieldType, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); FieldCreationInformation fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, DisplayName = displayName, Group = group, InternalName = internalName, AddToDefaultView = false, AdditionalAttributes = additionalAttributes }; return CreateField(web, fieldCreationInformation, executeQuery); } [Obsolete("Use CreateField(Web web, FieldCreationInformation fieldCreationInformation, System.Boolean executeQuery = True)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static Field CreateField(this Web web, Guid id, string internalName, string fieldType, bool addToDefaultView, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); FieldCreationInformation fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, InternalName = internalName, DisplayName = displayName, Group = group, AddToDefaultView = addToDefaultView, AdditionalAttributes = additionalAttributes }; return CreateField(web, fieldCreationInformation, executeQuery); } /// <summary> /// Create field to web remotely /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="fieldCreationInformation">Creation Information for the field.</param> /// <param name="executeQuery">Optionally skip the executeQuery action</param> /// <returns>The newly created field or existing field.</returns> public static Field CreateField(this Web web, FieldCreationInformation fieldCreationInformation, bool executeQuery = true) { return CreateField<Field>(web, fieldCreationInformation, executeQuery); } [Obsolete("Use CreateField<TField>(this Web web, FieldCreationInformation fieldCreationInformation, bool executeQuery = true)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static TField CreateField<TField>(this Web web, Guid id, string internalName, FieldType fieldType, bool addToDefaultView, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) where TField : Field { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); var fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, InternalName = internalName, AddToDefaultView = addToDefaultView, Group = group, DisplayName = displayName, AdditionalAttributes = additionalAttributes }; return CreateField<TField>(web, fieldCreationInformation, executeQuery); } [Obsolete("Use CreateField<TField>(this Web web, FieldCreationInformation fieldCreationInformation, bool executeQuery = true)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static TField CreateField<TField>(this Web web, Guid id, string internalName, string fieldType, bool addToDefaultView, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) where TField : Field { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); var fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, InternalName = internalName, AddToDefaultView = addToDefaultView, Group = group, DisplayName = displayName, AdditionalAttributes = additionalAttributes }; return CreateField<TField>(web, fieldCreationInformation, executeQuery); } /// <summary> /// Create field to web remotely /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="fieldCreationInformation">Field creation information</param> /// <param name="executeQuery">Optionally skip the executeQuery action</param> /// <returns>The newly created field or existing field.</returns> public static TField CreateField<TField>(this Web web, FieldCreationInformation fieldCreationInformation, bool executeQuery = true) where TField : Field { if (string.IsNullOrEmpty(fieldCreationInformation.InternalName)) throw new ArgumentNullException("InternalName"); if (string.IsNullOrEmpty(fieldCreationInformation.DisplayName)) throw new ArgumentNullException("DisplayName"); FieldCollection fields = web.Fields; web.Context.Load(fields, fc => fc.Include(f => f.Id, f => f.InternalName)); web.Context.ExecuteQuery(); var field = CreateFieldBase<TField>(fields, fieldCreationInformation, executeQuery); return field; } /// <summary> /// Create field to web remotely /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="fieldAsXml">The XML declaration of SiteColumn definition</param> /// <returns>The newly created field or existing field.</returns> public static Field CreateField(this Web web, string fieldAsXml, bool executeQuery = true) { if (string.IsNullOrEmpty(fieldAsXml)) throw new ArgumentNullException("fieldAsXml"); XmlDocument xd = new XmlDocument(); xd.LoadXml(fieldAsXml); XmlNamespaceManager nsmgr = new XmlNamespaceManager(xd.NameTable); nsmgr.AddNamespace("namespace", "http://schemas.microsoft.com/sharepoint/"); XmlNode fieldNode = xd.SelectSingleNode("//namespace:Field", nsmgr); string id = fieldNode.Attributes["ID"].Value; string name = fieldNode.Attributes["Name"].Value; LoggingUtility.Internal.TraceInformation((int)EventId.CreateField, CoreResources.FieldAndContentTypeExtensions_CreateField01, name, id); FieldCollection fields = web.Fields; web.Context.Load(fields); web.Context.ExecuteQuery(); Field field = fields.AddFieldAsXml(fieldAsXml, false, AddFieldOptions.AddFieldInternalNameHint); web.Update(); if (executeQuery) web.Context.ExecuteQuery(); return field; } public static void RemoveFieldByInternalName(this Web web, string internalName) { var fields = web.Context.LoadQuery(web.Fields.Where(f => f.InternalName == internalName)); web.Context.ExecuteQuery(); if (fields.Count() == 0) { throw new ArgumentException(string.Format("Could not find field with internalName {0}", internalName)); } fields.First().DeleteObject(); } /// <summary> /// Creates fields from feature element xml file schema. XML file can contain one or many field definitions created using classic feature framework structure. /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="xmlFilePath">Absolute path to the xml location</param> public static void CreateFieldsFromXMLFile(this Web web, string xmlFilePath) { XmlDocument xd = new XmlDocument(); xd.Load(xmlFilePath); // Perform the action field creation CreateFieldsFromXML(web, xd); } /// <summary> /// Creates fields from feature element xml file schema. XML file can contain one or many field definitions created using classic feature framework structure. /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="xmlStructure">XML structure in string format</param> public static void CreateFieldsFromXMLString(this Web web, string xmlStructure) { XmlDocument xd = new XmlDocument(); xd.LoadXml(xmlStructure); // Perform the action field creation CreateFieldsFromXML(web, xd); } /// <summary> /// Creates field from xml structure which follows the classic feature framework structure /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="xmlDoc">Actual XML document</param> public static void CreateFieldsFromXML(this Web web, XmlDocument xmlDoc) { XmlNamespaceManager nsmgr = new XmlNamespaceManager(xmlDoc.NameTable); nsmgr.AddNamespace("namespace", "http://schemas.microsoft.com/sharepoint/"); XmlNodeList fields = xmlDoc.SelectNodes("//namespace:Field", nsmgr); int count = fields.Count; foreach (XmlNode field in fields) { string id = field.Attributes["ID"].Value; string name = field.Attributes["Name"].Value; // IF field already existed, let's move on if (web.FieldExistsByName(name)) { LoggingUtility.Internal.TraceWarning((int)EventId.FieldAlreadyExists, CoreResources.FieldAndContentTypeExtensions_Field01AlreadyExists, name, id); } else { web.CreateField(field.OuterXml); } } } /// <summary> /// Returns if the field is found /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="fieldId">Guid for the field ID</param> /// <returns>True or false depending on the field existence</returns> public static bool FieldExistsById(this Web web, Guid fieldId) { var field = web.GetFieldById<Field>(fieldId); if (field != null) return true; return false; } /// <summary> /// Returns the field if it exists. Null if it does not exist. /// </summary> /// <param name="TField">Field type to be returned</param> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="fieldId">Guid for the field ID</param> /// <returns>Field of type TField</returns> public static TField GetFieldById<TField>(this Web web, Guid fieldId) where TField : Field { var fields = web.Context.LoadQuery(web.Fields.Where(f => f.Id == fieldId)); web.Context.ExecuteQuery(); var field = fields.FirstOrDefault(); if (field == null) return null; else return web.Context.CastTo<TField>(field); } /// <summary> /// Returns the field if it exists. Null if it does not exist. /// </summary> /// <param name="TField">Field type to be returned</param> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="fieldId">Guid for the field ID</param> /// <returns>Field of type TField</returns> public static TField GetFieldByName<TField>(this FieldCollection fields, string internalName) where TField : Field { if (!fields.ServerObjectIsNull.HasValue || fields.ServerObjectIsNull.Value) { fields.Context.Load(fields); fields.Context.ExecuteQuery(); } var field = fields.FirstOrDefault(f => f.StaticName == internalName); if (field == null) return null; else return fields.Context.CastTo<TField>(field); } /// <summary> /// Returns if the field is found /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="fieldName">String for the field internal name to be used as query criteria</param> /// <returns>True or false depending on the field existence</returns> public static bool FieldExistsByName(this Web web, string fieldName) { if (string.IsNullOrEmpty(fieldName)) throw new ArgumentNullException("fieldName"); FieldCollection fields = web.Fields; IEnumerable<Field> results = web.Context.LoadQuery<Field>(fields.Where(item => item.InternalName == fieldName)); web.Context.ExecuteQuery(); if (results.FirstOrDefault() != null) { return true; } return false; } /// <summary> /// Does field exist in web /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="fieldId">String representation of the field ID (=guid)</param> /// <returns>True if exists, false otherwise</returns> public static bool FieldExistsById(this Web web, string fieldId) { if (string.IsNullOrEmpty(fieldId)) throw new ArgumentNullException("fieldId"); return FieldExistsById(web, new Guid(fieldId)); } /// <summary> /// Field exists in content type /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="contentTypeName">Name of the content type</param> /// <param name="fieldName">Name of the field</param> /// <returns>True if exists, false otherwise</returns> public static bool FieldExistsByNameInContentType(this Web web, string contentTypeName, string fieldName) { if (string.IsNullOrEmpty(contentTypeName)) throw new ArgumentNullException("contentTypeName"); if (string.IsNullOrEmpty(fieldName)) throw new ArgumentNullException("fieldName"); ContentType ct = GetContentTypeByName(web, contentTypeName); FieldCollection fields = ct.Fields; IEnumerable<Field> results = ct.Context.LoadQuery<Field>(fields.Where(item => item.InternalName == fieldName)); ct.Context.ExecuteQuery(); if (results.FirstOrDefault() != null) { return true; } return false; } /// <summary> /// Binds a field to a termset based on an xml structure which follows the classic feature framework structure /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="absolutePathToFile">Absolute path to the xml location</param> public static void BindFieldsToTermSetsFromXMLFile(this Web web, string absolutePathToFile) { XmlDocument xd = new XmlDocument(); xd.Load(absolutePathToFile); BindFieldsToTermSetsFromXML(web, xd); } /// <summary> /// Binds a field to a termset based on an xml structure which follows the classic feature framework structure /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="xmlStructure">XML structure in string format</param> public static void BindFieldsToTermSetsFromXMLString(this Web web, string xmlStructure) { XmlDocument xd = new XmlDocument(); xd.LoadXml(xmlStructure); BindFieldsToTermSetsFromXML(web, xd); } /// <summary> /// Binds a field to a termset based on an xml structure which follows the classic feature framework structure /// </summary> /// <param name="web">Site to be processed - can be root web or sub site. Site columns should be created to root site.</param> /// <param name="xmlDoc">Actual XML document</param> public static void BindFieldsToTermSetsFromXML(this Web web, XmlDocument xmlDoc) { XmlNodeList fields = xmlDoc.SelectNodes("//MMSField"); foreach (XmlNode mmsfield in fields) { string fieldGuid = mmsfield.Attributes["FieldGuid"].Value; string MMSGroupName = mmsfield.Attributes["MMSGroupName"].Value; string TermSet = mmsfield.Attributes["TermSet"].Value; TaxonomyExtensions.WireUpTaxonomyField(web, new Guid(fieldGuid), MMSGroupName, TermSet); } } #endregion #region List Fields [Obsolete("Use CreateField(List list, FieldCreationInformation fieldCreationInformation, System.Boolean executeQuery = True)")] public static Field CreateField(this List list, Guid id, string internalName, FieldType fieldType, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); FieldCreationInformation fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, AddToDefaultView = false, DisplayName = displayName, Group = group, InternalName = internalName, AdditionalAttributes = additionalAttributes }; return CreateField(list, fieldCreationInformation, executeQuery); } [Obsolete("Use CreateField(List list, FieldCreationInformation fieldCreationInformation, System.Boolean executeQuery = True)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static Field CreateField(this List list, Guid id, string internalName, string fieldType, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); FieldCreationInformation fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, AddToDefaultView = false, DisplayName = displayName, Group = group, InternalName = internalName, AdditionalAttributes = additionalAttributes }; return CreateField(list, fieldCreationInformation, executeQuery); } [Obsolete("Use CreateField(List list, FieldCreationInformation fieldCreationInformation, System.Boolean executeQuery = True)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static Field CreateField(this List list, Guid id, string internalName, string fieldType, bool addToDefaultView, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); FieldCreationInformation fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, AddToDefaultView = false, DisplayName = displayName, Group = group, InternalName = internalName, AdditionalAttributes = additionalAttributes }; return CreateField(list, fieldCreationInformation, executeQuery); } /// <summary> /// Adds field to a list /// </summary> /// <param name="list">List to process</param> /// <param name="fieldCreationInformation">Creation information for the field</param> /// <returns>The newly created field or existing field.</returns> public static Field CreateField(this List list, FieldCreationInformation fieldCreationInformation, bool executeQuery = true) { return CreateField<Field>(list, fieldCreationInformation, executeQuery); } [Obsolete("Use CreateField<TField>(List list, FieldCreationInformation fieldCreationInformation, System.Boolean executeQuery = True)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static TField CreateField<TField>(this List list, Guid id, string internalName, FieldType fieldType, bool addToDefaultView, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) where TField : Field { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); FieldCreationInformation fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, InternalName = internalName, AddToDefaultView = addToDefaultView, Group = group, AdditionalAttributes = additionalAttributes, DisplayName = displayName }; return CreateField<TField>(list, fieldCreationInformation, executeQuery); } [Obsolete("Use CreateField<TField>(List list, FieldCreationInformation fieldCreationInformation, System.Boolean executeQuery = True)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static TField CreateField<TField>(this List list, Guid id, string internalName, string fieldType, bool addToDefaultView, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) where TField : Field { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); FieldCreationInformation fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, InternalName = internalName, AddToDefaultView = addToDefaultView, Group = group, AdditionalAttributes = additionalAttributes, DisplayName = displayName }; return CreateField<TField>(list, fieldCreationInformation, executeQuery); } /// <summary> /// Adds field to a list /// </summary> /// <typeparam name="TField">The selected field type to return.</typeparam> /// <param name="list">List to process</param> /// <param name="fieldCreationInformation">Field creation information</param> /// <param name="executeQuery">Optionally skip the executeQuery action</param> /// <returns>The newly created field or existing field.</returns> public static TField CreateField<TField>(this List list, FieldCreationInformation fieldCreationInformation, bool executeQuery = true) where TField : Field { if (string.IsNullOrEmpty(fieldCreationInformation.InternalName)) throw new ArgumentNullException("InternalName"); if (string.IsNullOrEmpty(fieldCreationInformation.DisplayName)) throw new ArgumentNullException("DisplayName"); FieldCollection fields = list.Fields; list.Context.Load(fields, fc => fc.Include(f => f.Id, f => f.InternalName)); list.Context.ExecuteQuery(); var field = CreateFieldBase<TField>(fields, fieldCreationInformation, executeQuery); return field; } [Obsolete("Use CreateFieldBase<TField>(FieldCollection fields, FieldCreationInformation fieldCreationInformation, bool executeQuery = true")] static TField CreateFieldBase<TField>(FieldCollection fields, Guid id, string internalName, string fieldType, bool addToDefaultView, string displayName, string group, string additionalXmlAttributes = "", bool executeQuery = true) where TField : Field { var additionalAttributes = ParseAdditionalAttributes(additionalXmlAttributes); FieldCreationInformation fieldCreationInformation = new FieldCreationInformation(fieldType) { Id = id, InternalName = internalName, AddToDefaultView = addToDefaultView, Group = group, AdditionalAttributes = additionalAttributes, DisplayName = displayName }; return CreateFieldBase<TField>(fields, fieldCreationInformation, executeQuery); } /// <summary> /// Base implementation for creating fields /// </summary> /// <typeparam name="TField">The selected field type to return.</typeparam> /// <param name="fields">Field collection to which the created field will be added</param> /// <param name="id">Guid for the new field.</param> /// <param name="internalName">Internal name of the field</param> /// <param name="fieldType">Field type to be created.</param> /// <param name="addToDefaultView">Bool to add to the default view</param> /// <param name="displayName">The display name of the field</param> /// <param name="group">The field group name</param> /// <param name="additionalAttributes">Optionally specify additional XML attributes for the field creation</param> /// <param name="executeQuery">Optionally skip the executeQuery action</param> /// <returns></returns> static TField CreateFieldBase<TField>(FieldCollection fields, FieldCreationInformation fieldCreationInformation, bool executeQuery = true) where TField : Field { Field field = fields.FirstOrDefault(f => f.Id == fieldCreationInformation.Id || f.InternalName == fieldCreationInformation.InternalName) as TField; if (field != null) throw new ArgumentException("id", "Field already exists"); string newFieldCAML = FormatFieldXml(fieldCreationInformation); LoggingUtility.Internal.TraceInformation((int)EventId.CreateField, CoreResources.FieldAndContentTypeExtensions_CreateField01, fieldCreationInformation.InternalName, fieldCreationInformation.Id); field = fields.AddFieldAsXml(newFieldCAML, fieldCreationInformation.AddToDefaultView, AddFieldOptions.AddFieldInternalNameHint); fields.Context.Load(field); fields.Context.ExecuteQuery(); // Seems to be a bug in creating fields where the displayname is not persisted when creating them from xml field.Title = fieldCreationInformation.DisplayName; field.Update(); fields.Context.Load(field); if (executeQuery) fields.Context.ExecuteQuery(); return fields.Context.CastTo<TField>(field); } [Obsolete("Use FormatFieldXml(Guid id, string internalName, string fieldType, string displayName, string group, IEnumerable<KeyValuePair<string,string>> additionalAttributes)")] [EditorBrowsable(System.ComponentModel.EditorBrowsableState.Never)] public static string FormatFieldXml(Guid id, string internalName, string fieldType, string displayName, string group, string additionalXmlAttributes) { string newFieldCAML = string.Format(OfficeDevPnP.Core.Constants.FIELD_XML_FORMAT, fieldType, internalName, displayName, id, group, additionalXmlAttributes); return newFieldCAML; } public static string FormatFieldXml(FieldCreationInformation fieldCreationInformation) { List<string> additionalAttributesList = new List<string>(); if (fieldCreationInformation.AdditionalAttributes != null) { foreach (var keyvaluepair in fieldCreationInformation.AdditionalAttributes) { additionalAttributesList.Add(string.Format(Constants.FIELD_XML_PARAMETER_FORMAT, keyvaluepair.Key, keyvaluepair.Value)); } } string newFieldCAML = string.Format(OfficeDevPnP.Core.Constants.FIELD_XML_FORMAT, fieldCreationInformation.FieldType, fieldCreationInformation.InternalName, fieldCreationInformation.DisplayName, fieldCreationInformation.Id, fieldCreationInformation.Group, additionalAttributesList.Any() ? string.Join(" ", additionalAttributesList) : ""); return newFieldCAML; } /// <summary> /// Adds a field to a list /// </summary> /// <param name="list">List to process</param> /// <param name="fieldAsXml">The XML declaration of SiteColumn definition</param> /// <returns>The newly created field or existing field.</returns> public static Field CreateField(this List list, string fieldAsXml) { FieldCollection fields = list.Fields; list.Context.Load(fields); list.Context.ExecuteQuery(); XmlDocument xd = new XmlDocument(); xd.LoadXml(fieldAsXml); XmlNamespaceManager nsmgr = new XmlNamespaceManager(xd.NameTable); nsmgr.AddNamespace("namespace", "http://schemas.microsoft.com/sharepoint/"); XmlNode fieldNode = xd.SelectSingleNode("//namespace:Field", nsmgr); string id = fieldNode.Attributes["ID"].Value; string name = fieldNode.Attributes["Name"].Value; LoggingUtility.Internal.TraceInformation((int)EventId.CreateListField, CoreResources.FieldAndContentTypeExtensions_CreateField01, name, id); Field field = fields.AddFieldAsXml(fieldAsXml, false, AddFieldOptions.AddFieldInternalNameHint); list.Update(); list.Context.ExecuteQuery(); return field; } /// <summary> /// Returns if the field is found /// </summary> /// <param name="list">List to process</param> /// <param name="fieldId">Guid of the field ID</param> /// <returns>True if the fields exists, false otherwise</returns> public static bool FieldExistsById(this List list, Guid fieldId) { FieldCollection fields = list.Fields; list.Context.Load(fields); list.Context.ExecuteQuery(); foreach (var item in fields) { if (item.Id == fieldId) { return true; } } return false; } /// <summary> /// Returns if the field is found, query based on the ID /// </summary> /// <param name="list">List to process</param> /// <param name="fieldId">String representation of the field ID (=guid)</param> /// <returns>True if the fields exists, false otherwise</returns> public static bool FieldExistsById(this List list, string fieldId) { if (string.IsNullOrEmpty(fieldId)) throw new ArgumentNullException("fieldId"); return FieldExistsById(list, new Guid(fieldId)); } /// <summary> /// Field exists in list by name /// </summary> /// <param name="list">List to process</param> /// <param name="fieldName">Internal name of the field</param> /// <returns>True if the fields exists, false otherwise</returns> public static bool FieldExistsByName(this List list, string fieldName) { if (string.IsNullOrEmpty(fieldName)) throw new ArgumentNullException("fieldName"); FieldCollection fields = list.Fields; IEnumerable<Field> results = list.Context.LoadQuery<Field>(fields.Where(item => item.InternalName == fieldName)); list.Context.ExecuteQuery(); if (results.FirstOrDefault() != null) { return true; } return false; } /// <summary> /// Gets a list of fields from a list by names. /// </summary> /// <param name="list">The target list containing the fields.</param> /// <param name="fieldInternalNames">List of field names to retreieve.</param> /// <returns>List of fields requested.</returns> public static IEnumerable<Field> GetFields(this List list, params string[] fieldInternalNames) { var fields = new List<Field>(); if (fieldInternalNames == null || fieldInternalNames.Length == 0) return fields; foreach (var fieldName in fieldInternalNames) { var field = list.Fields.GetByInternalNameOrTitle(fieldName); list.Context.Load(field); fields.Add(field); } list.Context.ExecuteQuery(); return fields; } #endregion /// <summary> /// Helper method to parse Key="Value" strings into a keyvaluepair /// </summary> /// <param name="xmlAttributes"></param> /// <returns></returns> [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Globalization", "CA1303:Do not pass literals as localized parameters", MessageId = "System.Xml.Linq.XElement.Parse(System.String)")] private static List<KeyValuePair<string, string>> ParseAdditionalAttributes(string xmlAttributes) { List<KeyValuePair<string, string>> attributes = null; // The XmlAttributes should be presented in the Key="Value" AnotherKey="Value" format. if (!string.IsNullOrEmpty(xmlAttributes)) { attributes = new List<KeyValuePair<string, string>>(); string parameterXml = string.Format(Constants.FIELD_XML_PARAMETER_WRAPPER_FORMAT, xmlAttributes); // Temporary xml structure XElement xe = XElement.Parse(parameterXml); foreach (var attribute in xe.Attributes()) { attributes.Add(new KeyValuePair<string, string>(attribute.Name.LocalName, attribute.Value)); } } return attributes; } #region Content Types /// <summary> /// Adds content type to list /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="listTitle">Title of the list</param> /// <param name="contentTypeID">Complete ID for the content type</param> /// <param name="defaultContent">Optionally make this the default content type</param> public static void AddContentTypeToListById(this Web web, string listTitle, string contentTypeId, bool defaultContent = false) { // Get content type instance ContentType contentType = GetContentTypeById(web, contentTypeId); // Add content type to list AddContentTypeToList(web, listTitle, contentType, defaultContent); } /// <summary> /// Adds content type to list /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="listTitle">Title of the list</param> /// <param name="contentTypeName">Name of the content type</param> /// <param name="defaultContent">Optionally make this the default content type</param> public static void AddContentTypeToListByName(this Web web, string listTitle, string contentTypeName, bool defaultContent = false) { // Get content type instance ContentType contentType = GetContentTypeByName(web, contentTypeName); // Add content type to list AddContentTypeToList(web, listTitle, contentType, defaultContent); } /// <summary> /// Adds content type to list /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="listTitle">Title of the list</param> /// <param name="contentType">Content type to be added to the list</param> /// <param name="defaultContent">If set true, content type is updated to be default content type for the list</param> public static void AddContentTypeToList(this Web web, string listTitle, ContentType contentType, bool defaultContent = false) { // Get list instances List list = web.GetListByTitle(listTitle); // Add content type to list AddContentTypeToList(list, contentType, defaultContent); } /// <summary> /// Add content type to list /// </summary> /// <param name="list">List to add content type to</param> /// <param name="contentTypeID">Complete ID for the content type</param> /// <param name="defaultContent">If set true, content type is updated to be default content type for the list</param> public static void AddContentTypeToListById(this List list, string contentTypeID, bool defaultContent = false) { Web web = list.ParentWeb; ContentType contentType = GetContentTypeById(web, contentTypeID); AddContentTypeToList(list, contentType, defaultContent); } /// <summary> /// Add content type to list /// </summary> /// <param name="list">List to add content type to</param> /// <param name="contentTypeName">Name of the content type</param> /// <param name="defaultContent">If set true, content type is updated to be default content type for the list</param> public static void AddContentTypeToListByName(this List list, string contentTypeName, bool defaultContent = false) { Web web = list.ParentWeb; ContentType contentType = GetContentTypeByName(web, contentTypeName); AddContentTypeToList(list, contentType, defaultContent); } /// <summary> /// Add content type to list /// </summary> /// <param name="list">List to add content type to</param> /// <param name="contentType">Content type to add to the list</param> /// <param name="defaultContent">If set true, content type is updated to be default content type for the list</param> public static void AddContentTypeToList(this List list, ContentType contentType, bool defaultContent = false) { if (contentType == null) throw new ArgumentNullException("contentType"); if (list.ContentTypeExistsById(contentType.Id.StringValue)) return; list.ContentTypesEnabled = true; list.Update(); list.Context.ExecuteQuery(); list.ContentTypes.AddExistingContentType(contentType); list.Context.ExecuteQuery(); //set the default content type if (defaultContent) { SetDefaultContentTypeToList(list, contentType); } } /// <summary> /// Associates field to content type /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="id">Complete ID for the content type</param> /// <param name="fieldID">String representation of the field ID (=guid)</param> public static void AddFieldToContentTypeById(this Web web, string contentTypeID, string fieldID, bool required = false, bool hidden = false) { // Get content type ContentType ct = web.GetContentTypeById(contentTypeID); web.Context.Load(ct); web.Context.Load(ct.FieldLinks); web.Context.ExecuteQuery(); // Get field Field fld = web.Fields.GetById(new Guid(fieldID)); // Add field association to content type AddFieldToContentType(web, ct, fld, required, hidden); } /// <summary> /// Associates field to content type /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="contentTypeName">Name of the content type</param> /// <param name="fieldID">Guid representation of the field ID</param> public static void AddFieldToContentTypeByName(this Web web, string contentTypeName, Guid fieldID, bool required = false, bool hidden = false) { // Get content type ContentType ct = web.GetContentTypeByName(contentTypeName); web.Context.Load(ct); web.Context.Load(ct.FieldLinks); web.Context.ExecuteQuery(); // Get field Field fld = web.Fields.GetById(fieldID); // Add field association to content type AddFieldToContentType(web, ct, fld, required, hidden); } /// <summary> /// Associates field to content type /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="contentType">Content type to associate field to</param> /// <param name="field">Field to associate to the content type</param> /// <param name="required">Optionally make this a required field</param> /// <param name="hidden">Optionally make this a hidden field</param> public static void AddFieldToContentType(this Web web, ContentType contentType, Field field, bool required = false, bool hidden = false) { if (!contentType.IsPropertyAvailable("Id")) { web.Context.Load(contentType, ct => ct.Id); web.Context.ExecuteQuery(); } if (!field.IsPropertyAvailable("Id")) { web.Context.Load(field, f => f.Id); web.Context.ExecuteQuery(); } LoggingUtility.Internal.TraceInformation((int)EventId.AddFieldToContentType, CoreResources.FieldAndContentTypeExtensions_AddField0ToContentType1, field.Id, contentType.Id); FieldLinkCreationInformation fldInfo = new FieldLinkCreationInformation(); fldInfo.Field = field; contentType.FieldLinks.Add(fldInfo); contentType.Update(true); web.Context.ExecuteQuery(); web.Context.Load(field); web.Context.ExecuteQuery(); if (required || hidden) { //Update FieldLink FieldLink flink = contentType.FieldLinks.GetById(field.Id); flink.Required = required; flink.Hidden = hidden; contentType.Update(true); web.Context.ExecuteQuery(); } } /// <summary> /// Searches the list content types and returns the content type identifier (ID) that is the /// nearest match to the specified content type ID. /// </summary> /// <param name="list">The list to check for content types</param> /// <param name="baseContentTypeId">A string with the base content type ID to match.</param> /// <returns>The value of the Id property for the content type with the closest match to the value /// of the specified content type ID. </returns> /// <remarks> /// <para> /// If the search finds multiple matches, the shorter ID is returned. For example, if 0x0101 is the /// argument, and the collection contains both 0x010109 and 0x01010901, the method returns 0x010109. /// </para> /// </remarks> public static ContentTypeId BestMatchContentTypeId(this List list, string baseContentTypeId) { if (baseContentTypeId == null) { throw new ArgumentNullException("contentTypeId"); } if (string.IsNullOrWhiteSpace(baseContentTypeId)) { throw new ArgumentException("Content type must be provided and cannot be empty.", "contentTypeId"); } return BestMatchContentTypeIdImplementation(list, baseContentTypeId); } private static ContentTypeId BestMatchContentTypeIdImplementation(this List list, string baseContentTypeId) { var contentTypes = list.ContentTypes; list.Context.Load(contentTypes); list.Context.ExecuteQuery(); LoggingUtility.Internal.TraceVerbose("Checking {0} content types in list for best match", contentTypes.Count); var shortestMatchLength = int.MaxValue; ContentTypeId bestMatchId = null; foreach (var contentType in contentTypes) { if (contentType.StringId.StartsWith(baseContentTypeId, StringComparison.InvariantCultureIgnoreCase)) { LoggingUtility.Internal.TraceVerbose("Found match {0}", contentType.StringId); if (contentType.StringId.Length < shortestMatchLength) { bestMatchId = contentType.Id; shortestMatchLength = contentType.StringId.Length; LoggingUtility.Internal.TraceVerbose(" - Is best match. Best match length now {0}", shortestMatchLength); } } } return bestMatchId; } /// <summary> /// Does content type exists in the web /// </summary> /// <param name="web">Web to be processed</param> /// <param name="contentTypeID">Complete ID for the content type</param> /// <returns>True if the content type exists, false otherwise</returns> public static bool ContentTypeExistsById(this Web web, string contentTypeId) { if (string.IsNullOrEmpty(contentTypeId)) throw new ArgumentNullException("contentTypeId"); ContentTypeCollection ctCol = web.ContentTypes; web.Context.Load(ctCol); web.Context.ExecuteQuery(); foreach (var item in ctCol) { if (item.Id.StringValue.StartsWith(contentTypeId, StringComparison.OrdinalIgnoreCase)) { return true; } } return false; } /// <summary> /// Does content type exists in the web /// </summary> /// <param name="web">Web to be processed</param> /// <param name="contentTypeName">Name of the content type</param> /// <returns>True if the content type exists, false otherwise</returns> public static bool ContentTypeExistsByName(this Web web, string contentTypeName) { if (string.IsNullOrEmpty(contentTypeName)) throw new ArgumentNullException("contentTypeName"); ContentTypeCollection ctCol = web.ContentTypes; IEnumerable<ContentType> results = web.Context.LoadQuery<ContentType>(ctCol.Where(item => item.Name == contentTypeName)); web.Context.ExecuteQuery(); ContentType ct = results.FirstOrDefault(); if (ct != null) { return true; } return false; } /// <summary> /// Does content type exist in web /// </summary> /// <param name="web">Web to be processed</param> /// <param name="listTitle">Title of the list to be updated</param> /// <param name="contentTypeID">Complete ID for the content type</param> /// <returns>True if the content type exists, false otherwise</returns> public static bool ContentTypeExistsById(this Web web, string listTitle, string contentTypeId) { if (string.IsNullOrEmpty(listTitle)) throw new ArgumentNullException("listTitle"); if (string.IsNullOrEmpty(contentTypeId)) throw new ArgumentNullException("contentTypeId"); List list = web.GetListByTitle(listTitle); return ContentTypeExistsById(list, contentTypeId); } /// <summary> /// Does content type exist in list /// </summary> /// <param name="list">List to update</param> /// <param name="contentTypeID">Complete ID for the content type</param> /// <returns>True if the content type exists, false otherwise</returns> public static bool ContentTypeExistsById(this List list, string contentTypeId) { if (string.IsNullOrEmpty(contentTypeId)) throw new ArgumentNullException("contentTypeId"); if (!list.ContentTypesEnabled) { return false; } ContentTypeCollection ctCol = list.ContentTypes; list.Context.Load(ctCol); list.Context.ExecuteQuery(); foreach (var item in ctCol) { if (item.Id.StringValue.StartsWith(contentTypeId, StringComparison.OrdinalIgnoreCase)) { return true; } } return false; } /// <summary> /// Does content type exist in web /// </summary> /// <param name="web">Web to be processed</param> /// <param name="listTitle">Title of the list to be updated</param> /// <param name="contentTypeName">Name of the content type</param> /// <returns>True if the content type exists, false otherwise</returns> public static bool ContentTypeExistsByName(this Web web, string listTitle, string contentTypeName) { if (string.IsNullOrEmpty(listTitle)) throw new ArgumentNullException("listTitle"); if (string.IsNullOrEmpty(contentTypeName)) throw new ArgumentNullException("contentTypeName"); List list = web.GetListByTitle(listTitle); return ContentTypeExistsByName(list, contentTypeName); } /// <summary> /// Does content type exist in list /// </summary> /// <param name="list">List to update</param> /// <param name="contentTypeName">Name of the content type</param> /// <returns>True if the content type exists, false otherwise</returns> public static bool ContentTypeExistsByName(this List list, string contentTypeName) { if (string.IsNullOrEmpty(contentTypeName)) throw new ArgumentNullException("contentTypeName"); if (!list.ContentTypesEnabled) { return false; } ContentTypeCollection ctCol = list.ContentTypes; IEnumerable<ContentType> results = list.Context.LoadQuery<ContentType>(ctCol.Where(item => item.Name == contentTypeName)); list.Context.ExecuteQuery(); if (results.FirstOrDefault() != null) { return true; } return false; } /// <summary> /// Create a content type based on the classic feature framework structure. /// </summary> /// <param name="web">Web to operate against</param> /// <param name="absolutePathToFile">Absolute path to the xml location</param> public static void CreateContentTypeFromXMLFile(this Web web, string absolutePathToFile) { XmlDocument xd = new XmlDocument(); xd.Load(absolutePathToFile); CreateContentTypeFromXML(web, xd); } /// <summary> /// Create a content type based on the classic feature framework structure. /// </summary> /// <param name="web">Web to operate against</param> /// <param name="xmlStructure">XML structure in string format</param> public static void CreateContentTypeFromXMLString(this Web web, string xmlStructure) { XmlDocument xd = new XmlDocument(); xd.LoadXml(xmlStructure); CreateContentTypeFromXML(web, xd); } /// <summary> /// Create a content type based on the classic feature framework structure. /// </summary> /// <param name="web">Web to operate against</param> /// <param name="xmlDoc">Actual XML document</param> public static void CreateContentTypeFromXML(this Web web, XmlDocument xmlDoc) { XmlNamespaceManager nsmgr = new XmlNamespaceManager(xmlDoc.NameTable); nsmgr.AddNamespace("namespace", "http://schemas.microsoft.com/sharepoint/"); XmlNodeList contentTypes = xmlDoc.SelectNodes("//namespace:ContentType", nsmgr); int count = contentTypes.Count; foreach (XmlNode ct in contentTypes) { string ctid = ct.Attributes["ID"].Value; string name = ct.Attributes["Name"].Value; if (web.ContentTypeExistsByName(name)) { LoggingUtility.Internal.TraceWarning((int)EventId.ContentTypeAlreadyExists, CoreResources.FieldAndContentTypeExtensions_ContentType01AlreadyExists, name, ctid); // Skip } else { var description = ""; if (((XmlElement)ct).HasAttribute("Description")) { description = ((XmlElement)ct).GetAttribute("Description"); } var group = ""; if (((XmlElement)ct).HasAttribute("Group")) { group = ((XmlElement)ct).GetAttribute("Group"); } //Create CT web.CreateContentType(name, description, ctid, group); //Add fields to content type XmlNodeList fieldRefs = ct.SelectNodes(".//namespace:FieldRef", nsmgr); XmlAttribute attr = null; foreach (XmlNode fr in fieldRefs) { bool required = false; bool hidden = false; string frid = fr.Attributes["ID"].Value; string frName = fr.Attributes["Name"].Value; attr = fr.Attributes["Required"]; if (attr != null) { required = attr.Value.ToBoolean(); } attr = fr.Attributes["Hidden"]; if (attr != null) { hidden = attr.Value.ToBoolean(); } web.AddFieldToContentTypeById(ctid, frid, required, hidden); } } } } /// <summary> /// Create new content type to web /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="name">Name of the content type</param> /// <param name="id">Complete ID for the content type</param> /// <param name="group">Group for the content type</param> /// <returns></returns> public static ContentType CreateContentType(this Web web, string name, string id, string group) { // Load the current collection of content types return CreateContentType(web, name, string.Empty, id, group); } /// <summary> /// Create new content type to web /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="name">Name of the content type</param> /// <param name="description">Description for the content type</param> /// <param name="id">Complete ID for the content type</param> /// <param name="group">Group for the content type</param> /// <param name="parentContentType">Parent Content Type</param> /// <returns>The created content type</returns> public static ContentType CreateContentType(this Web web, string name, string description, string id, string group, ContentType parentContentType = null) { LoggingUtility.Internal.TraceInformation((int)EventId.CreateContentType, CoreResources.FieldAndContentTypeExtensions_CreateContentType01, name, id); // Load the current collection of content types ContentTypeCollection contentTypes = web.ContentTypes; web.Context.Load(contentTypes); web.Context.ExecuteQuery(); ContentTypeCreationInformation newCt = new ContentTypeCreationInformation(); // Set the properties for the content type newCt.Name = name; newCt.Id = id; newCt.Description = description; newCt.Group = group; newCt.ParentContentType = parentContentType; ContentType myContentType = contentTypes.Add(newCt); web.Context.ExecuteQuery(); //Return the content type object return myContentType; } /// <summary> /// Return content type by name /// </summary> /// <param name="web">Web to be processed</param> /// <param name="contentTypeName">Name of the content type</param> /// <returns>Content type object or null if was not found</returns> public static ContentType GetContentTypeByName(this Web web, string contentTypeName) { if (string.IsNullOrEmpty(contentTypeName)) throw new ArgumentNullException("contentTypeName"); ContentTypeCollection ctCol = web.ContentTypes; IEnumerable<ContentType> results = web.Context.LoadQuery<ContentType>(ctCol.Where(item => item.Name == contentTypeName)); web.Context.ExecuteQuery(); return results.FirstOrDefault(); } /// <summary> /// Return content type by Id /// </summary> /// <param name="web">Web to be processed</param> /// <param name="contentTypeID">Complete ID for the content type</param> /// <returns>Content type object or null if was not found</returns> public static ContentType GetContentTypeById(this Web web, string contentTypeId) { if (string.IsNullOrEmpty(contentTypeId)) throw new ArgumentNullException("contentTypeId"); ContentTypeCollection ctCol = web.ContentTypes; web.Context.Load(ctCol); web.Context.ExecuteQuery(); foreach (var item in ctCol) { if (item.Id.StringValue.Equals(contentTypeId, StringComparison.OrdinalIgnoreCase)) { return item; } } return null; } /// <summary> /// Return content type by name /// </summary> /// <param name="list">List to update</param> /// <param name="contentTypeName">Name of the content type</param> /// <returns>Content type object or null if was not found</returns> public static ContentType GetContentTypeByName(this List list, string contentTypeName) { if (string.IsNullOrEmpty(contentTypeName)) throw new ArgumentNullException("contentTypeName"); ContentTypeCollection ctCol = list.ContentTypes; IEnumerable<ContentType> results = list.Context.LoadQuery<ContentType>(ctCol.Where(item => item.Name == contentTypeName)); list.Context.ExecuteQuery(); return results.FirstOrDefault(); } /// <summary> /// Return content type by Id /// </summary> /// <param name="list">List to update</param> /// <param name="contentTypeID">Complete ID for the content type</param> /// <returns>Content type object or null if was not found</returns> public static ContentType GetContentTypeById(this List list, string contentTypeId) { if (string.IsNullOrEmpty(contentTypeId)) throw new ArgumentNullException("contentTypeId"); ContentTypeCollection ctCol = list.ContentTypes; list.Context.Load(ctCol); list.Context.ExecuteQuery(); foreach (var item in ctCol) { if (item.Id.StringValue.Equals(contentTypeId, StringComparison.OrdinalIgnoreCase)) { return item; } } return null; } /// <summary> /// Set default content type to list /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="list">List to update</param> /// <param name="contentTypeID">Complete ID for the content type</param> public static void SetDefaultContentTypeToList(this Web web, List list, string contentTypeId) { SetDefaultContentTypeToList(list, contentTypeId); } /// <summary> /// Set default content type to list /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="list">List to update</param> /// <param name="contentType">Content type to make default</param> public static void SetDefaultContentTypeToList(this Web web, List list, ContentType contentType) { SetDefaultContentTypeToList(list, contentType.Id.ToString()); } /// <summary> /// Set default content type to list /// </summary> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="listTitle">Title of the list to be updated</param> /// <param name="contentTypeID">Complete ID for the content type</param> public static void SetDefaultContentTypeToList(this Web web, string listTitle, string contentTypeId) { // Get list instances List list = web.GetListByTitle(listTitle); web.Context.Load(list); web.Context.ExecuteQuery(); // Add content type to list SetDefaultContentTypeToList(list, contentTypeId); } /// <summary> /// Set's default content type list. /// </summary> /// <remarks>Notice. Currently removes other content types from the list. Known issue</remarks> /// <param name="web">Site to be processed - can be root web or sub site</param> /// <param name="listTitle">Title of the list to be updated</param> /// <param name="contentType">Content type to make default</param> public static void SetDefaultContentTypeToList(this Web web, string listTitle, ContentType contentType) { SetDefaultContentTypeToList(web, listTitle, contentType.Id.ToString()); } /// <summary> /// Set's default content type list. /// </summary> /// <remarks>Notice. Currently removes other content types from the list. Known issue</remarks> /// <param name="list">List to update</param> /// <param name="contentTypeID">Complete ID for the content type</param> public static void SetDefaultContentTypeToList(this List list, string contentTypeId) { ContentTypeCollection ctCol = list.ContentTypes; list.Context.Load(ctCol); list.Context.ExecuteQuery(); var ctIds = new List<ContentTypeId>(); foreach (ContentType ct in ctCol) { ctIds.Add(ct.Id); } var newOrder = ctIds.Except( // remove the folder content type ctIds.Where(id => id.StringValue.StartsWith("0x012000")) ) .OrderBy(x => !x.StringValue.StartsWith(contentTypeId, StringComparison.OrdinalIgnoreCase)) .ToArray(); list.RootFolder.UniqueContentTypeOrder = newOrder; list.RootFolder.Update(); list.Update(); list.Context.ExecuteQuery(); } /// <summary> /// Set default content type to list /// </summary> /// <param name="list">List to update</param> /// <param name="contentType">Content type to make default</param> public static void SetDefaultContentTypeToList(this List list, ContentType contentType) { SetDefaultContentTypeToList(list, contentType.Id.ToString()); } /// <summary> /// Reorders content types on the list. The first one in the list is the default item. /// Any items left out from the list will still be on the content type, but will not be visible on the new button. /// </summary> /// <param name="list">Target list containing the content types</param> /// <param name="contentTypeNamesOrIds">Content type names or ids to sort.</param> public static void ReorderContentTypes(this List list, IEnumerable<string> contentTypeNamesOrIds) { var listContentTypes = list.ContentTypes; list.Context.Load(listContentTypes); list.Context.ExecuteQuery(); IList<ContentTypeId> newOrder = new List<ContentTypeId>(); var ctCol = listContentTypes.Cast<ContentType>().ToList(); foreach (var ctypeName in contentTypeNamesOrIds) { var ctype = ctCol.Find(ct => ctypeName.Equals(ct.Name, StringComparison.OrdinalIgnoreCase) || ct.StringId.StartsWith(ctypeName)); if (ctype != null) newOrder.Add(ctype.Id); } list.RootFolder.UniqueContentTypeOrder = newOrder; list.RootFolder.Update(); list.Update(); list.Context.ExecuteQuery(); } #endregion #if !CLIENTSDKV15 #region Localization /// <summary> /// Set localized labels for content type /// </summary> /// <param name="web">Web to operate on</param> /// <param name="contentTypeName">Name of the content type</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="nameResource">Localized value for the Name property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForContentType(this Web web, string contentTypeName, string cultureName, string nameResource, string descriptionResource) { ContentType contentType = web.GetContentTypeByName(contentTypeName); contentType.SetLocalizationForContentType(cultureName, nameResource, descriptionResource); } /// <summary> /// Set localized labels for content type /// </summary> /// <param name="list">List to update</param> /// <param name="contentTypeId">Complete ID for the content type</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="nameResource">Localized value for the Name property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForContentType(this List list, string contentTypeId, string cultureName, string nameResource, string descriptionResource) { ContentTypeCollection contentTypes = list.ContentTypes; list.Context.Load(contentTypes); list.Context.ExecuteQuery(); ContentType contentType = contentTypes.GetById(contentTypeId); list.Context.ExecuteQuery(); contentType.SetLocalizationForContentType(cultureName, nameResource, descriptionResource); } /// <summary> /// Set localized labels for content type /// </summary> /// <param name="contentType">Name of the content type</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="nameResource">Localized value for the Name property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForContentType(this ContentType contentType, string cultureName, string nameResource, string descriptionResource) { if (contentType.IsObjectPropertyInstantiated("TitleResource")) { contentType.Context.Load(contentType); contentType.Context.ExecuteQuery(); } // Set translations for the culture contentType.NameResource.SetValueForUICulture(cultureName, nameResource); contentType.DescriptionResource.SetValueForUICulture(cultureName, descriptionResource); contentType.Update(true); contentType.Context.ExecuteQuery(); } /// <summary> /// Set localized labels for field /// </summary> /// <param name="web">Web to operate on</param> /// <param name="siteColumnId">Guid with the site column ID</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="titleResource">Localized value for the Title property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForField(this Web web, Guid siteColumnId, string cultureName, string titleResource, string descriptionResource) { FieldCollection fields = web.Fields; Field fld = fields.GetById(siteColumnId); SetLocalizationForField(fld, cultureName, titleResource, descriptionResource); } /// <summary> /// Set localized labels for field /// </summary> /// <param name="web">Web to operate on</param> /// <param name="siteColumnName">Name of the site column</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="titleResource">Localized value for the Title property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForField(this Web web, string siteColumnName, string cultureName, string titleResource, string descriptionResource) { FieldCollection fields = web.Fields; Field fld = fields.GetByInternalNameOrTitle(siteColumnName); SetLocalizationForField(fld, cultureName, titleResource, descriptionResource); } /// <summary> /// Set localized labels for field /// </summary> /// <param name="web">Web to operate on</param> /// <param name="siteColumn">Site column to localize</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="titleResource">Localized value for the Title property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForField(this Web web, Field siteColumn, string cultureName, string titleResource, string descriptionResource) { SetLocalizationForField(siteColumn, cultureName, titleResource, descriptionResource); } /// <summary> /// Set localized labels for field /// </summary> /// <param name="list">List to update</param> /// <param name="siteColumnId">Guid of the site column ID</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="titleResource">Localized value for the Title property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForField(this List list, Guid siteColumnId, string cultureName, string titleResource, string descriptionResource) { FieldCollection fields = list.Fields; Field fld = fields.GetById(siteColumnId); SetLocalizationForField(fld, cultureName, titleResource, descriptionResource); } /// <summary> /// Set localized labels for field /// </summary> /// <param name="list">List to update</param> /// <param name="siteColumnName">Name of the site column</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="titleResource">Localized value for the Title property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForField(this List list, string siteColumnName, string cultureName, string titleResource, string descriptionResource) { FieldCollection fields = list.Fields; Field fld = fields.GetByInternalNameOrTitle(siteColumnName); SetLocalizationForField(fld, cultureName, titleResource, descriptionResource); } /// <summary> /// Set localized labels for field /// </summary> /// <param name="list">List to update</param> /// <param name="siteColumn">Site column to update</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="titleResource">Localized value for the Title property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForField(this List list, Field siteColumn, string cultureName, string titleResource, string descriptionResource) { SetLocalizationForField(siteColumn, cultureName, titleResource, descriptionResource); } /// <summary> /// Set localized labels for field /// </summary> /// <param name="field">Field to update</param> /// <param name="cultureName">Culture for the localization (en-es, nl-be, fi-fi,...)</param> /// <param name="titleResource">Localized value for the Title property</param> /// <param name="descriptionResource">Localized value for the Description property</param> public static void SetLocalizationForField(this Field field, string cultureName, string titleResource, string descriptionResource) { if (string.IsNullOrEmpty(cultureName)) throw new ArgumentNullException("cultureName"); if (string.IsNullOrEmpty(titleResource)) throw new ArgumentNullException("titleResource"); if (field.IsObjectPropertyInstantiated("TitleResource")) { field.Context.Load(field); field.Context.ExecuteQuery(); } // Set translations for the culture field.TitleResource.SetValueForUICulture(cultureName, titleResource); field.DescriptionResource.SetValueForUICulture(cultureName, descriptionResource); field.UpdateAndPushChanges(true); field.Context.ExecuteQuery(); } #endregion #endif } }
shankargurav/PnP
OfficeDevPnP.Core/OfficeDevPnP.Core/AppModelExtensions/FieldAndContentTypeExtensions.cs
C#
apache-2.0
78,859
/* * Copyright 2000-2008 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jetbrains.yaml; import com.intellij.lexer.Lexer; import com.intellij.openapi.editor.colors.TextAttributesKey; import com.intellij.openapi.fileTypes.SyntaxHighlighterBase; import com.intellij.psi.tree.IElementType; import org.jetbrains.annotations.NotNull; import org.jetbrains.yaml.lexer.YAMLFlexLexer; import java.util.HashMap; import java.util.Map; /** * @author: oleg * @date: Feb 11, 2008 */ public class YAMLSyntaxHighlighter extends SyntaxHighlighterBase implements YAMLTokenTypes { private static final Map<IElementType, TextAttributesKey> ATTRIBUTES = new HashMap<IElementType, TextAttributesKey>(); static { ATTRIBUTES.put(SCALAR_KEY, YAMLHighlighter.SCALAR_KEY); ATTRIBUTES.put(SCALAR_STRING, YAMLHighlighter.SCALAR_STRING); ATTRIBUTES.put(SCALAR_DSTRING, YAMLHighlighter.SCALAR_DSTRING); ATTRIBUTES.put(SCALAR_TEXT, YAMLHighlighter.SCALAR_TEXT); ATTRIBUTES.put(SCALAR_LIST, YAMLHighlighter.SCALAR_LIST); ATTRIBUTES.put(COMMENT, YAMLHighlighter.COMMENT); ATTRIBUTES.put(TEXT, YAMLHighlighter.TEXT); ATTRIBUTES.put(LBRACE, YAMLHighlighter.SIGN); ATTRIBUTES.put(RBRACE, YAMLHighlighter.SIGN); ATTRIBUTES.put(LBRACKET, YAMLHighlighter.SIGN); ATTRIBUTES.put(RBRACKET, YAMLHighlighter.SIGN); ATTRIBUTES.put(COMMA, YAMLHighlighter.SIGN); ATTRIBUTES.put(QUESTION, YAMLHighlighter.SIGN); ATTRIBUTES.put(COLON, YAMLHighlighter.SIGN); ATTRIBUTES.put(DOCUMENT_MARKER, YAMLHighlighter.SIGN); ATTRIBUTES.put(SEQUENCE_MARKER, YAMLHighlighter.SIGN); } @NotNull public TextAttributesKey[] getTokenHighlights(IElementType tokenType) { return SyntaxHighlighterBase.pack(ATTRIBUTES.get(tokenType)); } @NotNull public Lexer getHighlightingLexer() { return new YAMLFlexLexer(); } }
Soya93/Extract-Refactoring
plugins/yaml/src/org/jetbrains/yaml/YAMLSyntaxHighlighter.java
Java
apache-2.0
2,386
/* * Licensed to GraphHopper GmbH under one or more contributor * license agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * * GraphHopper GmbH licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.graphhopper.jsprit.analysis.util; import com.graphhopper.jsprit.core.util.BenchmarkResult; import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.Collection; public class HtmlBenchmarkTableWriter implements BenchmarkWriter { private String filename; public HtmlBenchmarkTableWriter(String filename) { this.filename = filename; } @Override public void write(Collection<BenchmarkResult> results) { try { BufferedWriter writer = new BufferedWriter(new FileWriter(new File(filename))); writer.write(openTable() + newline()); //table head writer.write(openRow() + newline()); writer.write(head("inst") + newline()); writer.write(head("runs") + newline()); writer.write(head("&Oslash; time [sec]") + newline()); writer.write(head("results", 4)); writer.write(head("vehicles", 4)); writer.write(head("res*") + newline()); writer.write(head("veh*") + newline()); writer.write(closeRow() + newline()); writer.write(openRow() + newline()); writer.write(head("") + newline()); writer.write(head("") + newline()); writer.write(head("") + newline()); writer.write(head("best") + newline()); writer.write(head("avg") + newline()); writer.write(head("worst") + newline()); writer.write(head("stdev") + newline()); writer.write(head("best") + newline()); writer.write(head("avg") + newline()); writer.write(head("worst") + newline()); writer.write(head("stdev") + newline()); writer.write(head("") + newline()); writer.write(head("") + newline()); writer.write(closeRow() + newline()); //data double sum_avg_time = 0.0; double sum_best_result = 0.0; double sum_avg_result = 0.0; double sum_worst_result = 0.0; double sum_dev_result = 0.0; double sum_best_veh = 0.0; double sum_avg_veh = 0.0; double sum_worst_veh = 0.0; double sum_dev_veh = 0.0; Integer runs = null; Double sum_res_star = null; Double sum_veh_star = null; for (BenchmarkResult result : results) { if (runs == null) runs = result.runs; writer.write(openRow() + newline()); writer.write(date(result.instance.name) + newline()); writer.write(date(Integer.valueOf(result.runs).toString()) + newline()); Double avg_time = round(result.getTimesStats().getMean(), 2); writer.write(date(Double.valueOf(avg_time).toString()) + newline()); //bestRes Double best_result = round(result.getResultStats().getMin(), 2); writer.write(date(Double.valueOf(best_result).toString()) + newline()); //avgRes Double avg_result = round(result.getResultStats().getMean(), 2); writer.write(date(Double.valueOf(avg_result).toString()) + newline()); //worstRes Double worst_result = round(result.getResultStats().getMax(), 2); writer.write(date(Double.valueOf(worst_result).toString()) + newline()); //stdevRes Double std_result = round(result.getResultStats().getStandardDeviation(), 2); writer.write(date(Double.valueOf(std_result).toString()) + newline()); //bestVeh Double best_vehicle = round(result.getVehicleStats().getMin(), 2); writer.write(date(Double.valueOf(best_vehicle).toString()) + newline()); //avgVeh Double avg_vehicle = round(result.getVehicleStats().getMean(), 2); writer.write(date(Double.valueOf(avg_vehicle).toString()) + newline()); //worstVeh Double worst_vehicle = round(result.getVehicleStats().getMax(), 2); writer.write(date(Double.valueOf(worst_vehicle).toString()) + newline()); //stdevVeh Double std_vehicle = round(result.getVehicleStats().getStandardDeviation(), 2); writer.write(date(Double.valueOf(std_vehicle).toString()) + newline()); //bestKnownRes writer.write(date("" + result.instance.bestKnownResult + newline())); //bestKnownVeh writer.write(date("" + result.instance.bestKnownVehicles + newline())); writer.write(closeRow() + newline()); sum_avg_time += avg_time; sum_best_result += best_result; sum_avg_result += avg_result; sum_worst_result += worst_result; sum_dev_result += std_result; sum_best_veh += best_vehicle; sum_avg_veh += avg_vehicle; sum_worst_veh += worst_vehicle; sum_dev_veh += std_vehicle; if (result.instance.bestKnownResult != null) { if (sum_res_star == null) sum_res_star = result.instance.bestKnownResult; else sum_res_star += result.instance.bestKnownResult; } if (result.instance.bestKnownVehicles != null) { if (sum_veh_star == null) sum_veh_star = result.instance.bestKnownVehicles; else sum_veh_star += result.instance.bestKnownVehicles; } } writer.write(openRow() + newline()); writer.write(date("&Oslash;") + newline()); writer.write(date("" + runs) + newline()); Double average_time = round(sum_avg_time / (double) results.size(), 2); writer.write(date(Double.valueOf(average_time).toString()) + newline()); //bestRes writer.write(date(Double.valueOf(round(sum_best_result / (double) results.size(), 2)).toString()) + newline()); //avgRes Double average_result = round(sum_avg_result / (double) results.size(), 2); writer.write(date(Double.valueOf(average_result).toString()) + newline()); //worstRes writer.write(date(Double.valueOf(round(sum_worst_result / (double) results.size(), 2)).toString()) + newline()); //stdevRes writer.write(date(Double.valueOf(round(sum_dev_result / (double) results.size(), 2)).toString()) + newline()); //bestVeh writer.write(date(Double.valueOf(round(sum_best_veh / (double) results.size(), 2)).toString()) + newline()); //avgVeh Double average_vehicles = round(sum_avg_veh / (double) results.size(), 2); writer.write(date(Double.valueOf(average_vehicles).toString()) + newline()); //worstVeh writer.write(date(Double.valueOf(round(sum_worst_veh / (double) results.size(), 2)).toString()) + newline()); //stdevVeh writer.write(date(Double.valueOf(round(sum_dev_veh / (double) results.size(), 2)).toString()) + newline()); //bestKnownRes Double delta_res = null; if (sum_res_star != null) { writer.write(date(Double.valueOf(round(sum_res_star.doubleValue() / (double) results.size(), 2)).toString()) + newline()); delta_res = (sum_avg_result / sum_res_star - 1) * 100; } else writer.write(date("null") + newline()); //bestKnownVeh Double delta_veh = null; if (sum_veh_star != null) { writer.write(date(Double.valueOf(round(sum_veh_star.doubleValue() / (double) results.size(), 2)).toString()) + newline()); delta_veh = (sum_avg_veh - sum_veh_star) / (double) results.size(); } else writer.write(date("null") + newline()); writer.write(closeRow() + newline()); writer.write(closeTable() + newline()); writer.write("avg. percentage deviation to best-known result: " + round(delta_res, 2) + newline() + newline()); writer.write("avg. absolute deviation to best-known vehicles: " + round(delta_veh, 2) + newline()); writer.write(openTable() + newline()); writer.write(openRow() + newline()); writer.write(date("") + newline()); writer.write(date("") + newline()); writer.write(date("") + newline()); writer.write(date("") + newline()); writer.write(date(Double.valueOf(average_time).toString(), "align=\"right\"") + newline()); writer.write(date(Double.valueOf(average_result).toString(), "align=\"right\"") + newline()); writer.write(date(Double.valueOf(average_vehicles).toString(), "align=\"right\"") + newline()); if (delta_res != null) { writer.write(date(Double.valueOf(round(delta_res, 2)).toString(), "align=\"right\"") + newline()); } else writer.write(date("n.a.") + newline()); if (delta_veh != null) { writer.write(date(Double.valueOf(round(delta_veh, 2)).toString(), "align=\"right\"") + newline()); } else writer.write(date("n.a.") + newline()); writer.write(closeRow() + newline()); writer.write(closeTable() + newline()); writer.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } private String head(String string, int i) { return "<th colspan=\"" + i + "\">" + string + "</th>"; } private Double round(Double value, int i) { if (value == null) return null; long roundedVal = Math.round(value * Math.pow(10, i)); return (double) roundedVal / (double) (Math.pow(10, i)); } private String head(String head) { return "<th>" + head + "</th>"; } private String closeTable() { return "</table>"; } private String openTable() { return "<table>"; } private String closeRow() { return "</tr>"; } private String date(String date) { return "<td>" + date + "</td>"; } private String date(String date, String metaData) { return "<td " + metaData + ">" + date + "</td>"; } private String newline() { return "\n"; } private String openRow() { return "<tr>"; } }
LEOLEOl/jsprit-me
jsprit-analysis/src/main/java/com/graphhopper/jsprit/analysis/util/HtmlBenchmarkTableWriter.java
Java
apache-2.0
11,463
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package command import ( "encoding/binary" "encoding/json" "fmt" "io" "os" "os/exec" "path/filepath" "time" "github.com/coreos/etcd/client" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api" "github.com/coreos/etcd/etcdserver/api/membership" "github.com/coreos/etcd/etcdserver/api/snap" "github.com/coreos/etcd/etcdserver/api/v2error" "github.com/coreos/etcd/etcdserver/api/v2store" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" "github.com/coreos/etcd/mvcc/mvccpb" "github.com/coreos/etcd/pkg/pbutil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/wal" "github.com/coreos/etcd/wal/walpb" "github.com/gogo/protobuf/proto" "github.com/spf13/cobra" "go.uber.org/zap" ) var ( migrateExcludeTTLKey bool migrateDatadir string migrateWALdir string migrateTransformer string ) // NewMigrateCommand returns the cobra command for "migrate". func NewMigrateCommand() *cobra.Command { mc := &cobra.Command{ Use: "migrate", Short: "Migrates keys in a v2 store to a mvcc store", Run: migrateCommandFunc, } mc.Flags().BoolVar(&migrateExcludeTTLKey, "no-ttl", false, "Do not convert TTL keys") mc.Flags().StringVar(&migrateDatadir, "data-dir", "", "Path to the data directory") mc.Flags().StringVar(&migrateWALdir, "wal-dir", "", "Path to the WAL directory") mc.Flags().StringVar(&migrateTransformer, "transformer", "", "Path to the user-provided transformer program") return mc } func migrateCommandFunc(cmd *cobra.Command, args []string) { var ( writer io.WriteCloser reader io.ReadCloser errc chan error ) if migrateTransformer != "" { writer, reader, errc = startTransformer() } else { fmt.Println("using default transformer") writer, reader, errc = defaultTransformer() } st, index := rebuildStoreV2() be := prepareBackend() defer be.Close() go func() { writeStore(writer, st) writer.Close() }() readKeys(reader, be) mvcc.UpdateConsistentIndex(be, index) err := <-errc if err != nil { fmt.Println("failed to transform keys") ExitWithError(ExitError, err) } fmt.Println("finished transforming keys") } func prepareBackend() backend.Backend { var be backend.Backend bch := make(chan struct{}) dbpath := filepath.Join(migrateDatadir, "member", "snap", "db") go func() { defer close(bch) be = backend.NewDefaultBackend(dbpath) }() select { case <-bch: case <-time.After(time.Second): fmt.Fprintf(os.Stderr, "waiting for etcd to close and release its lock on %q\n", dbpath) <-bch } tx := be.BatchTx() tx.Lock() tx.UnsafeCreateBucket([]byte("key")) tx.UnsafeCreateBucket([]byte("meta")) tx.Unlock() return be } func rebuildStoreV2() (v2store.Store, uint64) { var index uint64 cl := membership.NewCluster(zap.NewExample(), "") waldir := migrateWALdir if len(waldir) == 0 { waldir = filepath.Join(migrateDatadir, "member", "wal") } snapdir := filepath.Join(migrateDatadir, "member", "snap") ss := snap.New(zap.NewExample(), snapdir) snapshot, err := ss.Load() if err != nil && err != snap.ErrNoSnapshot { ExitWithError(ExitError, err) } var walsnap walpb.Snapshot if snapshot != nil { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term index = snapshot.Metadata.Index } w, err := wal.OpenForRead(zap.NewExample(), waldir, walsnap) if err != nil { ExitWithError(ExitError, err) } defer w.Close() _, _, ents, err := w.ReadAll() if err != nil { ExitWithError(ExitError, err) } st := v2store.New() if snapshot != nil { err := st.Recovery(snapshot.Data) if err != nil { ExitWithError(ExitError, err) } } cl.SetStore(st) cl.Recover(api.UpdateCapability) applier := etcdserver.NewApplierV2(zap.NewExample(), st, cl) for _, ent := range ents { if ent.Type == raftpb.EntryConfChange { var cc raftpb.ConfChange pbutil.MustUnmarshal(&cc, ent.Data) applyConf(cc, cl) continue } var raftReq pb.InternalRaftRequest if !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { // backward compatible var r pb.Request pbutil.MustUnmarshal(&r, ent.Data) applyRequest(&r, applier) } else { if raftReq.V2 != nil { req := raftReq.V2 applyRequest(req, applier) } } if ent.Index > index { index = ent.Index } } return st, index } func applyConf(cc raftpb.ConfChange, cl *membership.RaftCluster) { if err := cl.ValidateConfigurationChange(cc); err != nil { return } switch cc.Type { case raftpb.ConfChangeAddNode: m := new(membership.Member) if err := json.Unmarshal(cc.Context, m); err != nil { panic(err) } cl.AddMember(m) case raftpb.ConfChangeRemoveNode: cl.RemoveMember(types.ID(cc.NodeID)) case raftpb.ConfChangeUpdateNode: m := new(membership.Member) if err := json.Unmarshal(cc.Context, m); err != nil { panic(err) } cl.UpdateRaftAttributes(m.ID, m.RaftAttributes) } } func applyRequest(req *pb.Request, applyV2 etcdserver.ApplierV2) { r := (*etcdserver.RequestV2)(req) r.TTLOptions() switch r.Method { case "POST": applyV2.Post(r) case "PUT": applyV2.Put(r) case "DELETE": applyV2.Delete(r) case "QGET": applyV2.QGet(r) case "SYNC": applyV2.Sync(r) default: panic("unknown command") } } func writeStore(w io.Writer, st v2store.Store) uint64 { all, err := st.Get("/1", true, true) if err != nil { if eerr, ok := err.(*v2error.Error); ok && eerr.ErrorCode == v2error.EcodeKeyNotFound { fmt.Println("no v2 keys to migrate") os.Exit(0) } ExitWithError(ExitError, err) } return writeKeys(w, all.Node) } func writeKeys(w io.Writer, n *v2store.NodeExtern) uint64 { maxIndex := n.ModifiedIndex nodes := n.Nodes // remove store v2 bucket prefix n.Key = n.Key[2:] if n.Key == "" { n.Key = "/" } if n.Dir { n.Nodes = nil } if !migrateExcludeTTLKey || n.TTL == 0 { b, err := json.Marshal(n) if err != nil { ExitWithError(ExitError, err) } fmt.Fprint(w, string(b)) } for _, nn := range nodes { max := writeKeys(w, nn) if max > maxIndex { maxIndex = max } } return maxIndex } func readKeys(r io.Reader, be backend.Backend) error { for { length64, err := readInt64(r) if err != nil { if err == io.EOF { return nil } return err } buf := make([]byte, int(length64)) if _, err = io.ReadFull(r, buf); err != nil { return err } var kv mvccpb.KeyValue err = proto.Unmarshal(buf, &kv) if err != nil { return err } mvcc.WriteKV(be, kv) } } func readInt64(r io.Reader) (int64, error) { var n int64 err := binary.Read(r, binary.LittleEndian, &n) return n, err } func startTransformer() (io.WriteCloser, io.ReadCloser, chan error) { cmd := exec.Command(migrateTransformer) cmd.Stderr = os.Stderr writer, err := cmd.StdinPipe() if err != nil { ExitWithError(ExitError, err) } reader, rerr := cmd.StdoutPipe() if rerr != nil { ExitWithError(ExitError, rerr) } if err := cmd.Start(); err != nil { ExitWithError(ExitError, err) } errc := make(chan error, 1) go func() { errc <- cmd.Wait() }() return writer, reader, errc } func defaultTransformer() (io.WriteCloser, io.ReadCloser, chan error) { // transformer decodes v2 keys from sr sr, sw := io.Pipe() // transformer encodes v3 keys into dw dr, dw := io.Pipe() decoder := json.NewDecoder(sr) errc := make(chan error, 1) go func() { defer func() { sr.Close() dw.Close() }() for decoder.More() { node := &client.Node{} if err := decoder.Decode(node); err != nil { errc <- err return } kv := transform(node) if kv == nil { continue } data, err := proto.Marshal(kv) if err != nil { errc <- err return } buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, uint64(len(data))) if _, err := dw.Write(buf); err != nil { errc <- err return } if _, err := dw.Write(data); err != nil { errc <- err return } } errc <- nil }() return sw, dr, errc } func transform(n *client.Node) *mvccpb.KeyValue { const unKnownVersion = 1 if n.Dir { return nil } kv := &mvccpb.KeyValue{ Key: []byte(n.Key), Value: []byte(n.Value), CreateRevision: int64(n.CreatedIndex), ModRevision: int64(n.ModifiedIndex), Version: unKnownVersion, } return kv }
sebrandon1/etcd
etcdctl/ctlv3/command/migrate_command.go
GO
apache-2.0
8,980
/* Copyright 1996-2008 Ariba, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. $Id:$ */ package ariba.ideplugin.idea.lang.structure; import ariba.ideplugin.idea.lang.grammer.psi.OSSFile; import com.intellij.ide.structureView.StructureViewModel; import com.intellij.ide.structureView.StructureViewModelBase; import com.intellij.ide.structureView.StructureViewTreeElement; import com.intellij.psi.PsiFile; public class OSSStructureViewModel extends StructureViewModelBase implements StructureViewModel.ElementInfoProvider { public OSSStructureViewModel (PsiFile psiFile) { super(psiFile, new OSSFileStructureViewElement((OSSFile)psiFile)); } @Override public boolean isAlwaysShowsPlus (StructureViewTreeElement element) { return false; } @Override public boolean isAlwaysLeaf (StructureViewTreeElement element) { return element instanceof OSSFile; } }
pascalrobert/aribaweb
src/ideplugin/ariba/ideplugin/idea/lang/structure/OSSStructureViewModel.java
Java
apache-2.0
1,455
/// <reference path="../../src/Dexie.js" /> /// <reference path="../../addons/Dexie.Observable/Dexie.Observable.js" /> /// <reference path="https://code.jquery.com/jquery-2.1.1.js" /> function DBMonitor(searchBox, tbody) { var searchValue = searchBox.val(); var databases = []; reload(); searchBox.change(updateSearch); window.addEventListener('storage', function (event) { if (event.key === "Dexie.DatabaseNames") { reload(); } }); reload(); function reload() { databases.forEach(function (db) { db.close(); }); Dexie.getDatabaseNames(function (names) { for (var i = 0; i < names.length; ++i) { var db = new Dexie(names[i]); db. } }); } }
cesarmarinhorj/Dexie.js
samples/db-monitor/db-monitor.js
JavaScript
apache-2.0
811
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.sql.planner.assertions; import com.facebook.presto.Session; import com.facebook.presto.metadata.Metadata; import com.facebook.presto.sql.planner.Symbol; import com.facebook.presto.sql.planner.plan.GroupIdNode; import com.facebook.presto.sql.planner.plan.PlanNode; import java.util.List; import java.util.Map; import static com.google.common.base.MoreObjects.toStringHelper; public class GroupIdMatcher implements Matcher { private final List<List<Symbol>> groups; private final Map<Symbol, Symbol> identityMappings; public GroupIdMatcher(List<List<Symbol>> groups, Map<Symbol, Symbol> identityMappings) { this.groups = groups; this.identityMappings = identityMappings; } @Override public boolean matches(PlanNode node, Session session, Metadata metadata, ExpressionAliases expressionAliases) { if (!(node instanceof GroupIdNode)) { return false; } GroupIdNode groudIdNode = (GroupIdNode) node; List<List<Symbol>> actualGroups = groudIdNode.getGroupingSets(); Map<Symbol, Symbol> actualArgumentMappings = groudIdNode.getArgumentMappings(); if (actualGroups.size() != groups.size()) { return false; } for (int i = 0; i < actualGroups.size(); i++) { if (!AggregationMatcher.matches(actualGroups.get(i), groups.get(i))) { return false; } } if (!AggregationMatcher.matches(identityMappings.keySet(), actualArgumentMappings.keySet())) { return false; } return true; } @Override public String toString() { return toStringHelper(this) .add("groups", groups) .toString(); } }
albertocsm/presto
presto-main/src/test/java/com/facebook/presto/sql/planner/assertions/GroupIdMatcher.java
Java
apache-2.0
2,353
#!/usr/bin/python #from openflow.optin_manager.sfa.util.sfalogging import logger class RSpecVersion: type = None content_type = None version = None schema = None namespace = None extensions = {} namespaces = dict(extensions.items() + [('default', namespace)]) elements = [] enabled = False def __init__(self, xml=None): self.xml = xml def to_dict(self): return { 'type': self.type, 'version': self.version, 'schema': self.schema, 'namespace': self.namespace, 'extensions': self.extensions.values() } def __str__(self): return "%s %s" % (self.type, self.version)
dana-i2cat/felix
optin_manager/src/python/openflow/optin_manager/sfa/rspecs/version.py
Python
apache-2.0
712
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.falcon.extensions.mirroring.hdfs; import org.apache.commons.lang3.StringUtils; import org.apache.falcon.FalconException; import org.apache.falcon.entity.ClusterHelper; import org.apache.falcon.entity.v0.cluster.Cluster; import org.apache.falcon.extensions.AbstractExtension; import java.net.URI; import java.net.URISyntaxException; import java.util.Properties; /** * Hdfs mirroring extension. */ public class HdfsMirroringExtension extends AbstractExtension { private static final String COMMA_SEPARATOR = ","; private static final String EXTENSION_NAME = "HDFS-MIRRORING"; @Override public String getName() { return EXTENSION_NAME; } @Override public void validate(final Properties extensionProperties) throws FalconException { for (HdfsMirroringExtensionProperties option : HdfsMirroringExtensionProperties.values()) { if (extensionProperties.getProperty(option.getName()) == null && option.isRequired()) { throw new FalconException("Missing extension property: " + option.getName()); } } } @Override public Properties getAdditionalProperties(final Properties extensionProperties) throws FalconException { Properties additionalProperties = new Properties(); // Add default properties if not passed String distcpMaxMaps = extensionProperties.getProperty( HdfsMirroringExtensionProperties.DISTCP_MAX_MAPS.getName()); if (StringUtils.isBlank(distcpMaxMaps)) { additionalProperties.put(HdfsMirroringExtensionProperties.DISTCP_MAX_MAPS.getName(), "1"); } String distcpMapBandwidth = extensionProperties.getProperty( HdfsMirroringExtensionProperties.DISTCP_MAP_BANDWIDTH_IN_MB.getName()); if (StringUtils.isBlank(distcpMapBandwidth)) { additionalProperties.put(HdfsMirroringExtensionProperties.DISTCP_MAP_BANDWIDTH_IN_MB.getName(), "100"); } // Construct fully qualified hdfs src path String srcPaths = extensionProperties.getProperty(HdfsMirroringExtensionProperties .SOURCE_DIR.getName()); StringBuilder absoluteSrcPaths = new StringBuilder(); String sourceClusterName = extensionProperties.getProperty( HdfsMirroringExtensionProperties.SOURCE_CLUSTER.getName()); // Since source cluster get read interface Cluster srcCluster = ClusterHelper.getCluster(sourceClusterName); if (srcCluster == null) { throw new FalconException("Cluster entity " + sourceClusterName + " not found"); } String srcClusterEndPoint = ClusterHelper.getReadOnlyStorageUrl(srcCluster); if (StringUtils.isNotBlank(srcPaths)) { String[] paths = srcPaths.split(COMMA_SEPARATOR); URI pathUri; for (String path : paths) { try { pathUri = new URI(path.trim()); } catch (URISyntaxException e) { throw new FalconException(e); } String authority = pathUri.getAuthority(); StringBuilder srcpath = new StringBuilder(); if (authority == null) { srcpath.append(srcClusterEndPoint); } srcpath.append(path.trim()); srcpath.append(COMMA_SEPARATOR); absoluteSrcPaths.append(srcpath); } } additionalProperties.put(HdfsMirroringExtensionProperties.SOURCE_DIR.getName(), StringUtils.removeEnd(absoluteSrcPaths.toString(), COMMA_SEPARATOR)); // Target dir shouldn't have the namenode String targetDir = extensionProperties.getProperty(HdfsMirroringExtensionProperties .TARGET_DIR.getName()); URI targetPathUri; try { targetPathUri = new URI(targetDir.trim()); } catch (URISyntaxException e) { throw new FalconException(e); } if (targetPathUri.getScheme() != null) { additionalProperties.put(HdfsMirroringExtensionProperties.TARGET_DIR.getName(), targetPathUri.getPath()); } // add sourceClusterFS and targetClusterFS additionalProperties.put(HdfsMirroringExtensionProperties.SOURCE_CLUSTER_FS_WRITE_ENDPOINT.getName(), ClusterHelper.getStorageUrl(srcCluster)); String targetClusterName = extensionProperties.getProperty( HdfsMirroringExtensionProperties.TARGET_CLUSTER.getName()); Cluster targetCluster = ClusterHelper.getCluster(targetClusterName); if (targetCluster == null) { throw new FalconException("Cluster entity " + targetClusterName + " not found"); } additionalProperties.put(HdfsMirroringExtensionProperties.TARGET_CLUSTER_FS_WRITE_ENDPOINT.getName(), ClusterHelper.getStorageUrl(targetCluster)); if (StringUtils.isBlank( extensionProperties.getProperty(HdfsMirroringExtensionProperties.TDE_ENCRYPTION_ENABLED.getName()))) { additionalProperties.put(HdfsMirroringExtensionProperties.TDE_ENCRYPTION_ENABLED.getName(), "false"); } addAdditionalDistCPProperties(extensionProperties, additionalProperties); return additionalProperties; } }
vramachan/falcon
extensions/src/main/java/org/apache/falcon/extensions/mirroring/hdfs/HdfsMirroringExtension.java
Java
apache-2.0
6,214
<?php /* * XXX */ ?> <?php $this->load->view('header');?> <div class="container" id="main-content"> <h2 class="alert">The boring details</h2> <h3>Setup and initialisation</h3> <p> First, include javascript for css resources for <strong>jQuery</strong>, <strong>Bootstrap</strong>, and the widget itself: </p> <pre class="prettyprint"> &lt;script src="http://code.jquery.com/jquery-1.9.1.min.js"&gt;&lt;/script&gt; &lt;link href="http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/css/bootstrap-combined.min.css" rel="stylesheet"&gt;&lt;/link&gt; &lt;script src="http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/js/bootstrap.min.js"&gt;&lt;/script&gt; &lt;link href="css/rosearch_widget.css" rel="stylesheet"&gt;&lt;/link&gt; &lt;script src="js/rosearch_widget.js"&gt;&lt;/script&gt; </pre> <p> The input field is a normal text box: </p> <pre class="prettyprint"> &lt;input type="text" id="rosearch"&gt; </pre> <p> And there's a javascript snippet to initialise the widget: </p> <pre class="prettyprint"> $(document).ready(function() { $("#rosearch").ro_search_widget(); }); </pre> <p> The widget takes some initialisation options, passed as a plain javascript object/hash: </p> <pre class="prettyprint"> $(document).ready(function() { $("#rosearch").ro_search_widget(<strong>{[option_name]:[option_value],...}</strong>); }); </pre> <h3>The options</h3> <dl class="dl-horizontal"> <dt>class</dt> <dd>Registry object class to filter by (activity, party, collection, service). Defaults to 'all'.</dd> <dt>datasource</dt> <dd>Data Source ID to filter by. Defaults to 'all'.</dd> <dt>lock_presets</dt> <dd>If <em>class</em> or <em>datasource</em> are set [to something other than 'all'], should the selection be locked? Defaults to boolean <code>false</code>.</dd> <dt>endpoint</dt> <dd>URL for the search service. Defaults to <code>/registry/registry_object_search/</code></dd> <dt>error_msg</dt> <dd>Error message title. set to boolean <code>false</code> to suppress error messages (displayed using javascript alert)</dd> <dt>target_field</dt> <dd>Which registry object field to populate the input box with. Defaults to <code>key</code>, but can be anything that's available. (Inspect the attached record from the <code>selected.rosearch.ands</code> event to see what's in a registry object</dd> </dl> <h3>Further interaction</h3> <h4>Resetting</h4> <p> An initialised widget can be removed by passing in the <code>reset</code> command: </p> <pre class="prettyprint"> ... $("#rosearch").ro_search_widget('reset'); </pre> <h4>Events</h4> <p> Once a search result has been selected form the widget's modal dialogue, a <code>selected.rosearch.ands</code> event will be triggered from the input box, with a registry object hash passed along. The input box will also have its content updated (according to the widget's <code>target_field</code> setting); the event is there to provide access to the full registry object record for additional use: </p> <pre class="prettyprint"> $("#rosearch").on('selected.rosearch.ands', function(event, registry_object) { ... }); </pre> <h2 class="alert alert-success">The actual demonstrator</h2> <form class="form form-horizontal"> <div class="control-group"> <label class="control-label" for="rosearch">Related object</label> <div class="controls"> <input type="text" id="rosearch"/> <span class="help-block">Note the widget adds the search button; all you need to provide is an input box</span> </div> </div> </form> </div> <?php $this->load->view('footer');?>
au-research/ANDS-Registry-Contrib
registry_object_search/views/demo.php
PHP
apache-2.0
3,655
function __processArg(obj, key) { var arg = null; if (obj) { arg = obj[key] || null; delete obj[key]; } return arg; } function Controller() { require("alloy/controllers/BaseController").apply(this, Array.prototype.slice.call(arguments)); this.__controllerPath = "index"; this.args = arguments[0] || {}; if (arguments[0]) { { __processArg(arguments[0], "__parentSymbol"); } { __processArg(arguments[0], "$model"); } { __processArg(arguments[0], "__itemTemplate"); } } var $ = this; var exports = {}; $.__views.index = Ti.UI.createWindow({ backgroundColor: "#fff", fullscreen: false, exitOnClose: true, id: "index" }); $.__views.index && $.addTopLevelView($.__views.index); $.__views.appVersion = Ti.UI.createLabel({ id: "appVersion", textAlign: "centerAlign", text: Ti.App.getVersion() + "test", height: Ti.UI.SIZE, top: 50 }); $.__views.index.add($.__views.appVersion); $.__views.appVersion = Ti.UI.createLabel({ id: "appVersion", textAlign: "centerAlign", text: "test" + Ti.App.getVersion(), height: Ti.UI.SIZE, top: 100 }); $.__views.index.add($.__views.appVersion); exports.destroy = function() {}; _.extend($, $.__views); $.index.open(); _.extend($, exports); } var Alloy = require("alloy"), Backbone = Alloy.Backbone, _ = Alloy._; module.exports = Controller;
brentonhouse/brentonhouse.alloy
test/apps/testing/ALOY-1516/_generated/windows/alloy/controllers/index.js
JavaScript
apache-2.0
1,575
import httplib import logging #import traceback from xml.etree.ElementTree import _escape_cdata from pypes.component import Component log = logging.getLogger(__name__) class Solr(Component): __metatype__ = 'PUBLISHER' def __init__(self): # initialize parent class Component.__init__(self) # remove the output port since this is a publisher self.remove_output('out') # solr host, port, and path (core) self.set_parameter('host', 'localhost') self.set_parameter('port', 8983) self.set_parameter('path', '/solr') # if we should commit after each batch # set to OFF if using the auto commit feature self.set_parameter('commit', 'True', ['True', 'False']) # wait_flush and wait_searcher self.set_parameter('wait_flush', 'True', ['True', 'False']) self.set_parameter('wait_searcher', 'True', ['True', 'False']) # overwrite previously commited docs with same id self.set_parameter('overwrite', 'True', ['True', 'False']) # commit within time in milliseconds (0 = disabled) self.set_parameter('commit_within', '0') # log successful initialization message log.info('Component Initialized: %s' % self.__class__.__name__) def _escape(self, val): result = None if isinstance(val, (str, unicode)): result = _escape_cdata(val) else: try: strval = val.__str__() except: pass else: result = _escape_cdata(strval) return result def run(self): # Define our components entry point while True: # get parameters outside doc loop for better performace try: host = self.get_parameter('host') if host is None: raise ValueError, 'Host not set' port = self.get_parameter('port') if port is None: raise ValueError, 'Port not set' else: port = int(port) path = self.get_parameter('path') if path is None: raise ValueError, 'Path not set' commit = self.get_parameter('commit') if commit is None: raise ValueError, 'Commit not set' commit_within = self.get_parameter('commit_within') if commit_within is None: raise ValueError, 'Commit Within not set' wait_flush = self.get_parameter('wait_flush') if wait_flush is None: raise ValueError, 'Wait Flush not set' wait_searcher = self.get_parameter('wait_searcher') if wait_searcher is None: raise ValueError, 'Wait Searcher not set' overwrite = self.get_parameter('overwrite') if overwrite is None: raise ValueError, 'Overwrite not set' # convert to booleans if commit == 'True': commit = True else: commit = False if wait_flush == 'True': wait_flush = True else: wait_flush = False if wait_searcher == 'True': wait_searcher = True else: wait_searcher = False if overwrite == 'True': overwrite = True else: overwrite = False # validate commit within value try: commit_within = int(commit_within) if commit_within < 0: raise ValueError except: log.warn('Commit Within invalid, using default') commit_within = 0 # strip trailing slash from path if path.endswith('/'): path = path[:-1] except Exception as e: log.error('Component Failed: %s' % self.__class__.__name__) log.error('Reason: %s' % str(e)) self.yield_ctrl() continue # so next time we are called we continue at the top # for each document waiting on our input port cnt = 0 writebuf = [] for doc in self.receive_all('in'): cnt = cnt + 1 try: # check for a document boost try: boost = float(doc.get_meta('boost')) except: boost = 1 writebuf.append('<doc%s>' % ( \ ' boost="%s">' % boost if boost > 1 else '')) for key, vals in doc: # see if we need to do a field boost try: fboost = float(doc.get_meta('boost', attr=key)) except: fboost = 1 for val in vals: escaped = self._escape(val) if val is None: log.warn('Invalid value in field %s' % key) continue writebuf.append('\t<field name="%s"%s>' \ '<![CDATA[%s]]></field>' % (key, ' boost="%s"' % fboost if fboost > 1 else '', escaped)) writebuf.append('</doc>') except Exception as e: log.error('Component Failed: %s' % self.__class__.__name__) log.error('Reason: %s' % str(e)) #log.error(traceback.print_exc()) # decrement the failed document cnt = cnt - 1 # check if we have a batch of documents to submit to solr if cnt > 0: batch = '<add%s%s>\n%s\n</add>\n' % ( \ ' overwrite="false"' if not overwrite else '', ' commitWithin="%s"' % commit_within if commit_within > 0 \ else '', '\n'.join(writebuf)) conn = None try: headers = {'Content-Type': 'text/xml; charset=utf-8'} updatepth = '%s/update' % path conn = httplib.HTTPConnection(host, port) conn.request('POST', updatepth, batch.encode('utf-8'), headers) res = conn.getresponse() if res.status != 200: raise ValueError, res.reason commitstr = '<commit%s%s />' % ( \ ' waitFlush="false"' if not wait_flush else '', ' waitSearcher="false"' if not wait_searcher else '') if commit: conn.request('POST', updatepth, commitstr, headers) # the following causes a ResponseNotReady exception #res = conn.getresponse() #if res.status != 200: # raise ValueError, res.reason except Exception as e: log.error('Solr batch submission failed') log.error('Reason: %s' % str(e)) #log.error(traceback.print_exc()) finally: if conn is not None: conn.close() else: log.info('No documents to submit to Solr') # yield the CPU, allowing another component to run self.yield_ctrl()
klyap/pypes
ui/pypesvds/plugins/solrpublisher/solrpublisher.py
Python
apache-2.0
8,171
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. # import needed classes/functions from Cocoa from Foundation import * from AppKit import * # import Nib loading functionality from AppKit from PyObjCTools import NibClassBuilder, AppHelper from twisted.internet import _threadedselect _threadedselect.install() from twisted.internet import reactor, protocol from twisted.web import http from twisted.python import log import sys, urlparse # create ObjC classes as defined in MainMenu.nib NibClassBuilder.extractClasses("MainMenu") class TwistzillaClient(http.HTTPClient): def __init__(self, delegate, urls): self.urls = urls self.delegate = delegate def connectionMade(self): self.sendCommand('GET', str(self.urls[2])) self.sendHeader('Host', '%s:%d' % (self.urls[0], self.urls[1])) self.sendHeader('User-Agent', 'CocoaTwistzilla') self.endHeaders() def handleResponse(self, data): self.delegate.gotResponse_(data) class MyAppDelegate(NibClassBuilder.AutoBaseClass): def gotResponse_(self, html): s = self.resultTextField.textStorage() s.replaceCharactersInRange_withString_((0, s.length()), html) self.progressIndicator.stopAnimation_(self) def doTwistzillaFetch_(self, sender): s = self.resultTextField.textStorage() s.deleteCharactersInRange_((0, s.length())) self.progressIndicator.startAnimation_(self) u = urlparse.urlparse(self.messageTextField.stringValue()) pos = u[1].find(':') if pos == -1: host, port = u[1], 80 else: host, port = u[1][:pos], int(u[1][pos+1:]) if u[2] == '': fname = '/' else: fname = u[2] host = host.encode('utf8') fname = fname.encode('utf8') protocol.ClientCreator(reactor, TwistzillaClient, self, (host, port, fname)).connectTCP(host, port).addErrback(lambda f:self.gotResponse_(f.getBriefTraceback())) def applicationDidFinishLaunching_(self, aNotification): """ Invoked by NSApplication once the app is done launching and immediately before the first pass through the main event loop. """ self.messageTextField.setStringValue_("http://www.twistedmatrix.com/") reactor.interleave(AppHelper.callAfter) def applicationShouldTerminate_(self, sender): if reactor.running: reactor.addSystemEventTrigger( 'after', 'shutdown', AppHelper.stopEventLoop) reactor.stop() return False return True if __name__ == '__main__': log.startLogging(sys.stdout) AppHelper.runEventLoop()
sorenh/cc
vendor/Twisted-10.0.0/doc/core/examples/threadedselect/Cocoa/SimpleWebClient/Twistzilla.py
Python
apache-2.0
2,737
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.client.cli; import org.apache.flink.api.java.tuple.Tuple2; import org.apache.flink.table.api.TableResult; import org.apache.flink.table.client.cli.utils.SqlParserHelper; import org.apache.flink.table.client.gateway.Executor; import org.apache.flink.table.client.gateway.ProgramTargetDescriptor; import org.apache.flink.table.client.gateway.ResultDescriptor; import org.apache.flink.table.client.gateway.SessionContext; import org.apache.flink.table.client.gateway.SqlExecutionException; import org.apache.flink.table.client.gateway.TypedResult; import org.apache.flink.table.delegation.Parser; import org.apache.flink.types.Row; import org.apache.flink.util.function.BiFunctionWithException; import org.apache.flink.util.function.FunctionWithException; import org.apache.flink.util.function.SupplierWithException; import org.apache.flink.util.function.TriFunctionWithException; import java.util.List; import java.util.Map; /** * A customizable {@link Executor} for testing purposes. */ class TestingExecutor implements Executor { private int numCancelCalls = 0; private int numRetrieveResultChancesCalls = 0; private final List<SupplierWithException<TypedResult<List<Tuple2<Boolean, Row>>>, SqlExecutionException>> resultChanges; private int numSnapshotResultCalls = 0; private final List<SupplierWithException<TypedResult<Integer>, SqlExecutionException>> snapshotResults; private int numRetrieveResultPageCalls = 0; private final List<SupplierWithException<List<Row>, SqlExecutionException>> resultPages; private int numExecuteSqlCalls = 0; private final BiFunctionWithException<String, String, TableResult, SqlExecutionException> executeSqlConsumer; private int numSetSessionPropertyCalls = 0; private final TriFunctionWithException<String, String, String, Void, SqlExecutionException> setSessionPropertyFunction; private int numResetSessionPropertiesCalls = 0; private final FunctionWithException<String, Void, SqlExecutionException> resetSessionPropertiesFunction; private final SqlParserHelper helper; TestingExecutor( List<SupplierWithException<TypedResult<List<Tuple2<Boolean, Row>>>, SqlExecutionException>> resultChanges, List<SupplierWithException<TypedResult<Integer>, SqlExecutionException>> snapshotResults, List<SupplierWithException<List<Row>, SqlExecutionException>> resultPages, BiFunctionWithException<String, String, TableResult, SqlExecutionException> executeSqlConsumer, TriFunctionWithException<String, String, String, Void, SqlExecutionException> setSessionPropertyFunction, FunctionWithException<String, Void, SqlExecutionException> resetSessionPropertiesFunction) { this.resultChanges = resultChanges; this.snapshotResults = snapshotResults; this.resultPages = resultPages; this.executeSqlConsumer = executeSqlConsumer; this.setSessionPropertyFunction = setSessionPropertyFunction; this.resetSessionPropertiesFunction = resetSessionPropertiesFunction; helper = new SqlParserHelper(); helper.registerTables(); } @Override public void cancelQuery(String sessionId, String resultId) throws SqlExecutionException { numCancelCalls++; } @Override public TypedResult<List<Tuple2<Boolean, Row>>> retrieveResultChanges(String sessionId, String resultId) throws SqlExecutionException { return resultChanges.get(Math.min(numRetrieveResultChancesCalls++, resultChanges.size() - 1)).get(); } @Override public List<Row> retrieveResultPage(String resultId, int page) throws SqlExecutionException { return resultPages.get(Math.min(numRetrieveResultPageCalls++, resultPages.size() - 1)).get(); } @Override public TypedResult<Integer> snapshotResult(String sessionId, String resultId, int pageSize) throws SqlExecutionException { return snapshotResults.get(Math.min(numSnapshotResultCalls++, snapshotResults.size() - 1)).get(); } @Override public void start() throws SqlExecutionException { } @Override public String openSession(SessionContext session) throws SqlExecutionException { return session.getSessionId(); } @Override public void closeSession(String sessionId) throws SqlExecutionException { } @Override public Map<String, String> getSessionProperties(String sessionId) throws SqlExecutionException { throw new UnsupportedOperationException("Not implemented."); } @Override public void resetSessionProperties(String sessionId) throws SqlExecutionException { numResetSessionPropertiesCalls++; resetSessionPropertiesFunction.apply(sessionId); } @Override public void setSessionProperty(String sessionId, String key, String value) throws SqlExecutionException { numSetSessionPropertyCalls++; setSessionPropertyFunction.apply(sessionId, key, value); } @Override public TableResult executeSql(String sessionId, String statement) throws SqlExecutionException { numExecuteSqlCalls++; return executeSqlConsumer.apply(sessionId, statement); } @Override public List<String> listModules(String sessionId) throws SqlExecutionException { throw new UnsupportedOperationException("Not implemented."); } @Override public Parser getSqlParser(String sessionId) { return helper.getSqlParser(); } @Override public List<String> completeStatement(String sessionId, String statement, int position) { throw new UnsupportedOperationException("Not implemented."); } @Override public ResultDescriptor executeQuery(String sessionId, String query) throws SqlExecutionException { throw new UnsupportedOperationException("Not implemented."); } @Override public ProgramTargetDescriptor executeUpdate(String sessionId, String statement) throws SqlExecutionException { throw new UnsupportedOperationException("Not implemented."); } public int getNumCancelCalls() { return numCancelCalls; } public int getNumRetrieveResultChancesCalls() { return numRetrieveResultChancesCalls; } public int getNumSnapshotResultCalls() { return numSnapshotResultCalls; } public int getNumRetrieveResultPageCalls() { return numRetrieveResultPageCalls; } public int getNumExecuteSqlCalls() { return numExecuteSqlCalls; } public int getNumSetSessionPropertyCalls() { return numSetSessionPropertyCalls; } public int getNumResetSessionPropertiesCalls() { return numResetSessionPropertiesCalls; } }
darionyaphet/flink
flink-table/flink-sql-client/src/test/java/org/apache/flink/table/client/cli/TestingExecutor.java
Java
apache-2.0
7,087
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var _ionicIonic = require('ionic/ionic'); var _angular2Angular2 = require('angular2/angular2'); var _helpers = require('../../helpers'); var __decorate = undefined && undefined.__decorate || function (decorators, target, key, desc) { if (typeof Reflect === "object" && typeof Reflect.decorate === "function") return Reflect.decorate(decorators, target, key, desc); switch (arguments.length) { case 2: return decorators.reduceRight(function (o, d) { return d && d(o) || o; }, target); case 3: return decorators.reduceRight(function (o, d) { return (d && d(target, key), void 0); }, void 0); case 4: return decorators.reduceRight(function (o, d) { return d && d(target, key, o) || o; }, desc); } }; var __metadata = undefined && undefined.__metadata || function (k, v) { if (typeof Reflect === "object" && typeof Reflect.metadata === "function") return Reflect.metadata(k, v); }; var BasicPage = function BasicPage() { _classCallCheck(this, BasicPage); }; exports.BasicPage = BasicPage; exports.BasicPage = BasicPage = __decorate([(0, _ionicIonic.Page)({ templateUrl: 'slides/basic/template.html', directives: [(0, _angular2Angular2.forwardRef)(function () { return _helpers.AndroidAttribute; })] }), __metadata('design:paramtypes', [])], BasicPage);
philmerrell/ionic-site
docs/v2/dist/demos/component-docs/slides/basic/pages.js
JavaScript
apache-2.0
1,673
/* * #%L * BroadleafCommerce Common Libraries * %% * Copyright (C) 2009 - 2013 Broadleaf Commerce * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package org.broadleafcommerce.common.web.resource; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang3.reflect.FieldUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.broadleafcommerce.common.classloader.release.ThreadLocalManager; import org.broadleafcommerce.common.extension.ExtensionResultHolder; import org.broadleafcommerce.common.resource.GeneratedResource; import org.broadleafcommerce.common.resource.service.ResourceBundlingService; import org.broadleafcommerce.common.resource.service.ResourceMinificationService; import org.broadleafcommerce.common.web.BroadleafRequestContext; import org.broadleafcommerce.common.web.BroadleafSandBoxResolver; import org.broadleafcommerce.common.web.BroadleafSiteResolver; import org.broadleafcommerce.common.web.BroadleafThemeResolver; import org.springframework.beans.factory.annotation.Value; import org.springframework.core.io.Resource; import org.springframework.security.core.context.SecurityContext; import org.springframework.security.core.context.SecurityContextHolder; import org.springframework.security.web.context.HttpSessionSecurityContextRepository; import org.springframework.util.StreamUtils; import org.springframework.web.context.request.RequestContextHolder; import org.springframework.web.context.request.ServletRequestAttributes; import org.springframework.web.context.request.ServletWebRequest; import org.springframework.web.servlet.HandlerMapping; import org.springframework.web.servlet.resource.ResourceHttpRequestHandler; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; public class BroadleafResourceHttpRequestHandler extends ResourceHttpRequestHandler { private static final Log LOG = LogFactory.getLog(BroadleafResourceHttpRequestHandler.class); // XML Configured generated resource handlers protected List<AbstractGeneratedResourceHandler> handlers; protected List<AbstractGeneratedResourceHandler> sortedHandlers; @javax.annotation.Resource(name = "blResourceBundlingService") protected ResourceBundlingService bundlingService; @javax.annotation.Resource(name = "blResourceMinificationService") protected ResourceMinificationService minifyService; @javax.annotation.Resource(name = "blResourceRequestExtensionManager") protected ResourceRequestExtensionManager extensionManager; @javax.annotation.Resource(name = "blSiteResolver") protected BroadleafSiteResolver siteResolver; @javax.annotation.Resource(name = "blSandBoxResolver") protected BroadleafSandBoxResolver sbResolver; @javax.annotation.Resource(name = "blThemeResolver") protected BroadleafThemeResolver themeResolver; @Value("${global.admin.prefix}") protected String globalAdminPrefix; @Value("${global.admin.url}") protected String globalAdminUrl; /** * Checks to see if the requested path corresponds to a registered bundle. If so, returns the generated bundle. * Otherwise, checks to see if any of the configured GeneratedResourceHandlers can handle the given request. * If neither of those cases match, delegates to the normal ResourceHttpRequestHandler */ @Override protected Resource getResource(HttpServletRequest request) { establishThinRequestContext(); return getResourceInternal(request); } protected Resource getResourceInternal(HttpServletRequest request) { String path = (String) request.getAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE); if (bundlingService.hasBundle(path)) { return bundlingService.getBundle(path); } Resource unminifiedResource = null; if (sortedHandlers == null && handlers != null) { sortHandlers(); } if (sortedHandlers != null) { for (AbstractGeneratedResourceHandler handler : sortedHandlers) { if (handler.canHandle(path)) { unminifiedResource = handler.getResource(path, getLocations()); break; } } } if (unminifiedResource == null) { ExtensionResultHolder erh = new ExtensionResultHolder(); extensionManager.getProxy().getOverrideResource(path, erh); if (erh.getContextMap().get(ResourceRequestExtensionHandler.RESOURCE_ATTR) != null) { unminifiedResource = (Resource) erh.getContextMap().get(ResourceRequestExtensionHandler.RESOURCE_ATTR); } } if (unminifiedResource == null) { unminifiedResource = super.getResource(request); } try { if (!minifyService.getEnabled() || !minifyService.getAllowSingleMinification()) { return unminifiedResource; } } finally { ThreadLocalManager.remove(); } LOG.warn("Minifying individual file - this should only be used in development to trace down particular " + "files that are causing an exception in the minification service. The results of the minification " + "performed outside of a bundle are not stored to disk."); ByteArrayOutputStream baos = new ByteArrayOutputStream(); byte[] bytes = null; InputStream is = null; try { is = unminifiedResource.getInputStream(); StreamUtils.copy(is, baos); bytes = baos.toByteArray(); } catch (IOException e) { throw new RuntimeException(e); } finally { try { is.close(); baos.close(); } catch (IOException e2) { throw new RuntimeException("Could not close input stream", e2); } } LOG.debug("Attempting to minifiy " + unminifiedResource.getFilename()); byte[] minifiedBytes = minifyService.minify(unminifiedResource.getFilename(), bytes); return new GeneratedResource(minifiedBytes, unminifiedResource.getFilename()); } public boolean isBundleRequest(HttpServletRequest request) { String path = (String) request.getAttribute(HandlerMapping.PATH_WITHIN_HANDLER_MAPPING_ATTRIBUTE); return bundlingService.hasBundle(path); } /** * @return a clone of the locations list that is in {@link ResourceHttpRequestHandler}. Note that we must use * reflection to access this field as it is marked private. */ @SuppressWarnings("unchecked") public List<Resource> getLocations() { try { List<Resource> locations = (List<Resource>) FieldUtils.readField(this, "locations", true); return new ArrayList<Resource>(locations); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } protected void establishThinRequestContext() { BroadleafRequestContext oldBrc = BroadleafRequestContext.getBroadleafRequestContext(); if (oldBrc == null || oldBrc.getSite() == null || oldBrc.getTheme() == null) { // Resolving sites and sandboxes is often dependent on having a security context present in the request. // For example, resolving a sandbox requires the current user to have the BLC_ADMIN_USER in his Authentication. // For performance reasons, we do not go through the entire Spring Security filter chain on requests // for resources like JavaScript and CSS files. However, when theming is enabled, we potentially have to // resolve a specific version of the theme for a sandbox so that we can replace variables appropriately. This // then depends on the sandbox being resolved, which requires the Authentication object to be present. // We will grab the Authentication object associated with this user's session and set it on the // SecurityContextHolder since Spring Security will be bypassed. HttpServletRequest req = ((ServletRequestAttributes) RequestContextHolder.getRequestAttributes()).getRequest(); HttpSession session = req.getSession(false); SecurityContext ctx = readSecurityContextFromSession(session); if (ctx != null) { SecurityContextHolder.setContext(ctx); } BroadleafRequestContext newBrc = new BroadleafRequestContext(); if (!isGlobalAdmin(req)) { ServletWebRequest swr = new ServletWebRequest(req); newBrc.setSite(siteResolver.resolveSite(swr, true)); newBrc.setSandBox(sbResolver.resolveSandBox(swr, newBrc.getSite())); BroadleafRequestContext.setBroadleafRequestContext(newBrc); newBrc.setTheme(themeResolver.resolveTheme(swr)); } } } protected boolean isGlobalAdmin(HttpServletRequest request) { String uri = request.getRequestURI(); if (!StringUtils.isEmpty(globalAdminPrefix)) { if (globalAdminPrefix.equals(getContextName(request))) { return true; } else { if (!StringUtils.isEmpty(globalAdminUrl)) { return uri.startsWith(globalAdminUrl); } } } return false; } protected String getContextName(HttpServletRequest request) { String contextName = request.getServerName(); int pos = contextName.indexOf('.'); if (pos >= 0) { contextName = contextName.substring(0, contextName.indexOf('.')); } return contextName; } // **NOTE** This method is lifted from HttpSessionSecurityContextRepository protected SecurityContext readSecurityContextFromSession(HttpSession httpSession) { if (httpSession == null) { return null; } Object ctxFromSession = httpSession.getAttribute(HttpSessionSecurityContextRepository.SPRING_SECURITY_CONTEXT_KEY); if (ctxFromSession == null) { return null; } if (!(ctxFromSession instanceof SecurityContext)) { return null; } return (SecurityContext) ctxFromSession; } protected void sortHandlers() { sortedHandlers = new ArrayList<AbstractGeneratedResourceHandler>(handlers); Collections.sort(sortedHandlers, new Comparator<AbstractGeneratedResourceHandler>() { @Override public int compare(AbstractGeneratedResourceHandler o1, AbstractGeneratedResourceHandler o2) { return new Integer(o1.getOrder()).compareTo(o2.getOrder()); } }); } /* *********** */ /* BOILERPLATE */ /* *********** */ public List<AbstractGeneratedResourceHandler> getHandlers() { if (sortedHandlers == null && handlers != null) { sortHandlers(); } return sortedHandlers; } public void setHandlers(List<AbstractGeneratedResourceHandler> handlers) { this.handlers = handlers; } }
passion1014/metaworks_framework
common/src/main/java/org/broadleafcommerce/common/web/resource/BroadleafResourceHttpRequestHandler.java
Java
apache-2.0
12,072
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Collections.Generic; using System.Windows.Data; using Microsoft.CodeAnalysis; using Microsoft.CodeAnalysis.CodeStyle; using Microsoft.CodeAnalysis.CSharp.CodeStyle; using Microsoft.CodeAnalysis.Options; using Microsoft.VisualStudio.LanguageServices.Implementation.Options; namespace Microsoft.VisualStudio.LanguageServices.CSharp.Options.Formatting { /// <summary> /// This is the view model for CodeStyle options page. /// </summary> /// <remarks> /// The codestyle options page is defined in <see cref="CodeStylePage"/> /// </remarks> internal class StyleViewModel : AbstractOptionPreviewViewModel { #region "Preview Text" private static readonly string s_fieldDeclarationPreviewTrue = @" class C{ int capacity; void Method() { //[ this.capacity = 0; //] } }"; private static readonly string s_fieldDeclarationPreviewFalse = @" class C{ int capacity; void Method() { //[ capacity = 0; //] } }"; private static readonly string s_propertyDeclarationPreviewTrue = @" class C{ public int Id { get; set; } void Method() { //[ this.Id = 0; //] } }"; private static readonly string s_propertyDeclarationPreviewFalse = @" class C{ public int Id { get; set; } void Method() { //[ Id = 0; //] } }"; private static readonly string s_eventDeclarationPreviewTrue = @" using System; class C{ event EventHandler Elapsed; void Handler(object sender, EventArgs args) { //[ this.Elapsed += Handler; //] } }"; private static readonly string s_eventDeclarationPreviewFalse = @" using System; class C{ event EventHandler Elapsed; void Handler(object sender, EventArgs args) { //[ Elapsed += Handler; //] } }"; private static readonly string s_methodDeclarationPreviewTrue = @" using System; class C{ void Display() { //[ this.Display(); //] } }"; private static readonly string s_methodDeclarationPreviewFalse = @" using System; class C{ void Display() { //[ Display(); //] } }"; private static readonly string s_intrinsicPreviewDeclarationTrue = @" class Program { //[ private int _member; static void M(int argument) { int local; } //] }"; private static readonly string s_intrinsicPreviewDeclarationFalse = @" using System; class Program { //[ private Int32 _member; static void M(Int32 argument) { Int32 local; } //] }"; private static readonly string s_intrinsicPreviewMemberAccessTrue = @" class Program { //[ static void M() { var local = int.MaxValue; } //] }"; private static readonly string s_intrinsicPreviewMemberAccessFalse = @" using System; class Program { //[ static void M() { var local = Int32.MaxValue; } //] }"; private static readonly string s_varForIntrinsicsPreviewFalse = $@" using System; class C{{ void Method() {{ //[ int x = 5; // {ServicesVSResources.built_in_types} //] }} }}"; private static readonly string s_varForIntrinsicsPreviewTrue = $@" using System; class C{{ void Method() {{ //[ var x = 5; // {ServicesVSResources.built_in_types} //] }} }}"; private static readonly string s_varWhereApparentPreviewFalse = $@" using System; class C{{ void Method() {{ //[ C cobj = new C(); // {ServicesVSResources.type_is_apparent_from_assignment_expression} //] }} }}"; private static readonly string s_varWhereApparentPreviewTrue = $@" using System; class C{{ void Method() {{ //[ var cobj = new C(); // {ServicesVSResources.type_is_apparent_from_assignment_expression} //] }} }}"; private static readonly string s_varWherePossiblePreviewFalse = $@" using System; class C{{ void Init() {{ //[ Action f = this.Init(); // {ServicesVSResources.everywhere_else} //] }} }}"; private static readonly string s_varWherePossiblePreviewTrue = $@" using System; class C{{ void Init() {{ //[ var f = this.Init(); // {ServicesVSResources.everywhere_else} //] }} }}"; private static readonly string s_preferThrowExpression = $@" using System; class C {{ private string s; public C(string s) {{ //[ // {ServicesVSResources.Prefer_colon} this.s = s ?? throw new ArgumentNullException(nameof(s)); // {ServicesVSResources.Over_colon} if (s == null) {{ throw new ArgumentNullException(nameof(s)); }} this.s = s; //] }} }} "; private static readonly string s_preferCoalesceExpression = $@" using System; class C {{ private string s; public C(string s) {{ //[ // {ServicesVSResources.Prefer_colon} var v = x ?? y; // {ServicesVSResources.Over_colon} var v = x != null ? x : y; // {ServicesVSResources.or} var v = x == null ? y : x; //] }} }} "; private static readonly string s_preferConditionalDelegateCall = $@" using System; class C {{ private string s; public C(string s) {{ //[ // {ServicesVSResources.Prefer_colon} func?.Invoke(args); // {ServicesVSResources.Over_colon} if (func != null) {{ func(args); }} //] }} }} "; private static readonly string s_preferNullPropagation = $@" using System; class C {{ public C(object o) {{ //[ // {ServicesVSResources.Prefer_colon} var v = o?.ToString(); // {ServicesVSResources.Over_colon} var v = o == null ? null : o.ToString(); // {ServicesVSResources.or} var v = o != null ? o.ToString() : null; //] }} }} "; private static readonly string s_preferPatternMatchingOverAsWithNullCheck = $@" class C {{ void M() {{ //[ // {ServicesVSResources.Prefer_colon} if (o is string s) {{ }} // {ServicesVSResources.Over_colon} var s = o as string; if (s != null) {{ }} //] }} }} "; private static readonly string s_preferPatternMatchingOverIsWithCastCheck = $@" class C {{ void M() {{ //[ // {ServicesVSResources.Prefer_colon} if (o is int i) {{ }} // {ServicesVSResources.Over_colon} if (o is int) {{ var i = (int)o; }} //] }} }} "; private static readonly string s_preferObjectInitializer = $@" using System; class Customer {{ private int Age; public Customer() {{ //[ // {ServicesVSResources.Prefer_colon} var c = new Customer() {{ Age = 21 }}; // {ServicesVSResources.Over_colon} var c = new Customer(); c.Age = 21; //] }} }} "; private static readonly string s_preferCollectionInitializer = $@" using System.Collections.Generic; class Customer {{ private int Age; public Customer() {{ //[ // {ServicesVSResources.Prefer_colon} var list = new List<int> {{ 1, 2, 3 }}; // {ServicesVSResources.Over_colon} var list = new List<int>(); list.Add(1); list.Add(2); list.Add(3); //] }} }} "; private static readonly string s_preferExplicitTupleName = $@" class Customer {{ public Customer() {{ //[ // {ServicesVSResources.Prefer_colon} (string name, int age) customer = GetCustomer(); var name = customer.name; var age = customer.age; // {ServicesVSResources.Over_colon} (string name, int age) customer = GetCustomer(); var name = customer.Item1; var age = customer.Item2; //] }} }} "; private static readonly string s_preferSimpleDefaultExpression = $@" using System.Threading; class Customer {{ //[ // {ServicesVSResources.Prefer_colon} void DoWork(CancellationToken cancellationToken = default) {{ }} // {ServicesVSResources.Over_colon} void DoWork(CancellationToken cancellationToken = default(CancellationToken)) {{ }} //] }} "; private static readonly string s_preferInlinedVariableDeclaration = $@" using System; class Customer {{ public Customer(string value) {{ //[ // {ServicesVSResources.Prefer_colon} if (int.TryParse(value, out int i)) {{ }} // {ServicesVSResources.Over_colon} int i; if (int.TryParse(value, out i)) {{ }} //] }} }} "; private static readonly string s_preferBraces = $@" using System; class Customer {{ private int Age; public int GetAge() {{ //[ // {ServicesVSResources.Prefer_colon} if (test) {{ this.Display(); }} // {ServicesVSResources.Over_colon} if (test) this.Display(); //] }} }} "; private static readonly string s_preferExpressionBodyForMethods = @" using System; //[ class Customer { private int Age; public int GetAge() => this.Age; } //] "; private static readonly string s_preferBlockBodyForMethods = @" using System; //[ class Customer { private int Age; public int GetAge() { return this.Age; } } //] "; private static readonly string s_preferExpressionBodyForConstructors = @" using System; //[ class Customer { private int Age; public Customer(int age) => Age = age; } //] "; private static readonly string s_preferBlockBodyForConstructors = @" using System; //[ class Customer { private int Age; public Customer(int age) { Age = age; } } //] "; private static readonly string s_preferExpressionBodyForOperators = @" using System; struct ComplexNumber { //[ public static ComplexNumber operator +(ComplexNumber c1, ComplexNumber c2) => new ComplexNumber(c1.Real + c2.Real, c1.Imaginary + c2.Imaginary); //] } "; private static readonly string s_preferBlockBodyForOperators = @" using System; struct ComplexNumber { //[ public static ComplexNumber operator +(ComplexNumber c1, ComplexNumber c2) { return new ComplexNumber(c1.Real + c2.Real, c1.Imaginary + c2.Imaginary); } //] } "; private static readonly string s_preferExpressionBodyForProperties = @" using System; //[ class Customer { private int _age; public int Age => _age; } //] "; private static readonly string s_preferBlockBodyForProperties = @" using System; //[ class Customer { private int _age; public int Age { get { return _age; } } } //] "; private static readonly string s_preferExpressionBodyForAccessors = @" using System; //[ class Customer { private int _age; public int Age { get => _age; set => _age = value; } } //] "; private static readonly string s_preferBlockBodyForAccessors = @" using System; //[ class Customer { private int _age; public int Age { get { return _age; } set { _age = value; } } } //] "; private static readonly string s_preferExpressionBodyForIndexers= @" using System; //[ class List<T> { private T[] _values; public T this[int i] => _values[i]; } //] "; private static readonly string s_preferBlockBodyForIndexers = @" using System; //[ class List<T> { private T[] _values; public T this[int i] { get { return _values[i]; } } } //] "; #endregion internal StyleViewModel(OptionSet optionSet, IServiceProvider serviceProvider) : base(optionSet, serviceProvider, LanguageNames.CSharp) { var collectionView = (ListCollectionView)CollectionViewSource.GetDefaultView(CodeStyleItems); collectionView.GroupDescriptions.Add(new PropertyGroupDescription(nameof(AbstractCodeStyleOptionViewModel.GroupName))); var qualifyGroupTitle = CSharpVSResources.this_preferences_colon; var predefinedTypesGroupTitle = CSharpVSResources.predefined_type_preferences_colon; var varGroupTitle = CSharpVSResources.var_preferences_colon; var nullCheckingGroupTitle = CSharpVSResources.null_checking_colon; var codeBlockPreferencesGroupTitle = ServicesVSResources.Code_block_preferences_colon; var expressionPreferencesGroupTitle = ServicesVSResources.Expression_preferences_colon; var variablePreferencesGroupTitle = ServicesVSResources.Variable_preferences_colon; var qualifyMemberAccessPreferences = new List<CodeStylePreference> { new CodeStylePreference(CSharpVSResources.Prefer_this, isChecked: true), new CodeStylePreference(CSharpVSResources.Do_not_prefer_this, isChecked: false), }; var predefinedTypesPreferences = new List<CodeStylePreference> { new CodeStylePreference(ServicesVSResources.Prefer_predefined_type, isChecked: true), new CodeStylePreference(ServicesVSResources.Prefer_framework_type, isChecked: false), }; var typeStylePreferences = new List<CodeStylePreference> { new CodeStylePreference(CSharpVSResources.Prefer_var, isChecked: true), new CodeStylePreference(CSharpVSResources.Prefer_explicit_type, isChecked: false), }; CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.QualifyFieldAccess, CSharpVSResources.Qualify_field_access_with_this, s_fieldDeclarationPreviewTrue, s_fieldDeclarationPreviewFalse, this, optionSet, qualifyGroupTitle, qualifyMemberAccessPreferences)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.QualifyPropertyAccess, CSharpVSResources.Qualify_property_access_with_this, s_propertyDeclarationPreviewTrue, s_propertyDeclarationPreviewFalse, this, optionSet, qualifyGroupTitle, qualifyMemberAccessPreferences)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.QualifyMethodAccess, CSharpVSResources.Qualify_method_access_with_this, s_methodDeclarationPreviewTrue, s_methodDeclarationPreviewFalse, this, optionSet, qualifyGroupTitle, qualifyMemberAccessPreferences)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.QualifyEventAccess, CSharpVSResources.Qualify_event_access_with_this, s_eventDeclarationPreviewTrue, s_eventDeclarationPreviewFalse, this, optionSet, qualifyGroupTitle, qualifyMemberAccessPreferences)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.PreferIntrinsicPredefinedTypeKeywordInDeclaration, ServicesVSResources.For_locals_parameters_and_members, s_intrinsicPreviewDeclarationTrue, s_intrinsicPreviewDeclarationFalse, this, optionSet, predefinedTypesGroupTitle, predefinedTypesPreferences)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.PreferIntrinsicPredefinedTypeKeywordInMemberAccess, ServicesVSResources.For_member_access_expressions, s_intrinsicPreviewMemberAccessTrue, s_intrinsicPreviewMemberAccessFalse, this, optionSet, predefinedTypesGroupTitle, predefinedTypesPreferences)); // Use var CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CSharpCodeStyleOptions.UseImplicitTypeForIntrinsicTypes, CSharpVSResources.For_built_in_types, s_varForIntrinsicsPreviewTrue, s_varForIntrinsicsPreviewFalse, this, optionSet, varGroupTitle, typeStylePreferences)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CSharpCodeStyleOptions.UseImplicitTypeWhereApparent, CSharpVSResources.When_variable_type_is_apparent, s_varWhereApparentPreviewTrue, s_varWhereApparentPreviewFalse, this, optionSet, varGroupTitle, typeStylePreferences)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CSharpCodeStyleOptions.UseImplicitTypeWherePossible, CSharpVSResources.Elsewhere, s_varWherePossiblePreviewTrue, s_varWherePossiblePreviewFalse, this, optionSet, varGroupTitle, typeStylePreferences)); // Code block CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CSharpCodeStyleOptions.PreferBraces, ServicesVSResources.Prefer_braces, s_preferBraces, s_preferBraces, this, optionSet, codeBlockPreferencesGroupTitle)); // Expression preferences CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.PreferObjectInitializer, ServicesVSResources.Prefer_object_initializer, s_preferObjectInitializer, s_preferObjectInitializer, this, optionSet, expressionPreferencesGroupTitle)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.PreferCollectionInitializer, ServicesVSResources.Prefer_collection_initializer, s_preferCollectionInitializer, s_preferCollectionInitializer, this, optionSet, expressionPreferencesGroupTitle)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CSharpCodeStyleOptions.PreferPatternMatchingOverIsWithCastCheck, CSharpVSResources.Prefer_pattern_matching_over_is_with_cast_check, s_preferPatternMatchingOverIsWithCastCheck, s_preferPatternMatchingOverIsWithCastCheck, this, optionSet, expressionPreferencesGroupTitle)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CSharpCodeStyleOptions.PreferPatternMatchingOverAsWithNullCheck, CSharpVSResources.Prefer_pattern_matching_over_as_with_null_check, s_preferPatternMatchingOverAsWithNullCheck, s_preferPatternMatchingOverAsWithNullCheck, this, optionSet, expressionPreferencesGroupTitle)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.PreferExplicitTupleNames, ServicesVSResources.Prefer_explicit_tuple_name, s_preferExplicitTupleName, s_preferExplicitTupleName, this, optionSet, expressionPreferencesGroupTitle)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CSharpCodeStyleOptions.PreferSimpleDefaultExpression, ServicesVSResources.Prefer_simple_default_expression, s_preferSimpleDefaultExpression, s_preferSimpleDefaultExpression, this, optionSet, expressionPreferencesGroupTitle)); AddExpressionBodyOptions(optionSet, expressionPreferencesGroupTitle); // Variable preferences CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.PreferInlinedVariableDeclaration, ServicesVSResources.Prefer_inlined_variable_declaration, s_preferInlinedVariableDeclaration, s_preferInlinedVariableDeclaration, this, optionSet, variablePreferencesGroupTitle)); // Null preferences. CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.PreferThrowExpression, CSharpVSResources.Prefer_throw_expression, s_preferThrowExpression, s_preferThrowExpression, this, optionSet, nullCheckingGroupTitle)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CSharpCodeStyleOptions.PreferConditionalDelegateCall, CSharpVSResources.Prefer_conditional_delegate_call, s_preferConditionalDelegateCall, s_preferConditionalDelegateCall, this, optionSet, nullCheckingGroupTitle)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.PreferCoalesceExpression, ServicesVSResources.Prefer_coalesce_expression, s_preferCoalesceExpression, s_preferCoalesceExpression, this, optionSet, nullCheckingGroupTitle)); CodeStyleItems.Add(new BooleanCodeStyleOptionViewModel(CodeStyleOptions.PreferNullPropagation, ServicesVSResources.Prefer_null_propagation, s_preferNullPropagation, s_preferNullPropagation, this, optionSet, nullCheckingGroupTitle)); } private void AddExpressionBodyOptions(OptionSet optionSet, string expressionPreferencesGroupTitle) { var expressionBodyPreferences = new List<CodeStylePreference> { new CodeStylePreference(CSharpVSResources.Never, isChecked: false), new CodeStylePreference(CSharpVSResources.When_possible, isChecked: false), new CodeStylePreference(CSharpVSResources.When_on_single_line, isChecked: false), }; var enumValues = new[] { ExpressionBodyPreference.Never, ExpressionBodyPreference.WhenPossible, ExpressionBodyPreference.WhenOnSingleLine }; CodeStyleItems.Add(new EnumCodeStyleOptionViewModel<ExpressionBodyPreference>( CSharpCodeStyleOptions.PreferExpressionBodiedMethods, ServicesVSResources.Use_expression_body_for_methods, enumValues, new[] { s_preferBlockBodyForMethods, s_preferExpressionBodyForMethods, s_preferExpressionBodyForMethods }, this, optionSet, expressionPreferencesGroupTitle, expressionBodyPreferences)); CodeStyleItems.Add(new EnumCodeStyleOptionViewModel<ExpressionBodyPreference>( CSharpCodeStyleOptions.PreferExpressionBodiedConstructors, ServicesVSResources.Use_expression_body_for_constructors, enumValues, new[] { s_preferBlockBodyForConstructors, s_preferExpressionBodyForConstructors, s_preferExpressionBodyForConstructors }, this, optionSet, expressionPreferencesGroupTitle, expressionBodyPreferences)); CodeStyleItems.Add(new EnumCodeStyleOptionViewModel<ExpressionBodyPreference>( CSharpCodeStyleOptions.PreferExpressionBodiedOperators, ServicesVSResources.Use_expression_body_for_operators, enumValues, new[] { s_preferBlockBodyForOperators, s_preferExpressionBodyForOperators, s_preferExpressionBodyForOperators }, this, optionSet, expressionPreferencesGroupTitle, expressionBodyPreferences)); CodeStyleItems.Add(new EnumCodeStyleOptionViewModel<ExpressionBodyPreference>( CSharpCodeStyleOptions.PreferExpressionBodiedProperties, ServicesVSResources.Use_expression_body_for_properties, enumValues, new[] { s_preferBlockBodyForProperties, s_preferExpressionBodyForProperties, s_preferExpressionBodyForProperties }, this, optionSet, expressionPreferencesGroupTitle, expressionBodyPreferences)); CodeStyleItems.Add(new EnumCodeStyleOptionViewModel<ExpressionBodyPreference>( CSharpCodeStyleOptions.PreferExpressionBodiedIndexers, ServicesVSResources.Use_expression_body_for_indexers, enumValues, new[] { s_preferBlockBodyForIndexers, s_preferExpressionBodyForIndexers, s_preferExpressionBodyForIndexers }, this, optionSet, expressionPreferencesGroupTitle, expressionBodyPreferences)); CodeStyleItems.Add(new EnumCodeStyleOptionViewModel<ExpressionBodyPreference>( CSharpCodeStyleOptions.PreferExpressionBodiedAccessors, ServicesVSResources.Use_expression_body_for_accessors, enumValues, new[] { s_preferBlockBodyForAccessors, s_preferExpressionBodyForAccessors, s_preferExpressionBodyForAccessors }, this, optionSet, expressionPreferencesGroupTitle, expressionBodyPreferences)); } } }
yeaicc/roslyn
src/VisualStudio/CSharp/Impl/Options/Formatting/StyleViewModel.cs
C#
apache-2.0
23,622
/* * Copyright (c) 2012-2013 rebaze GmbH * All rights reserved. * * This library and the accompanying materials are made available under the terms of the Apache License Version 2.0, * which accompanies this distribution and is available at http://www.apache.org/licenses/LICENSE-2.0. * */ package org.rebaze.integrity.tree.api; /** * The central element for this library. Actually you will get many instances of {@link Tree}. They * are all unmodifiable. * * Trees are made of a selector which identifies it. Note: selector does not have to be globally * unique but unique for its parent. * * A tree then has a fingerprint (also call it "hash" which stands for the data of all its sub * branches. * * Subbranches are accessible from here to. They may get initialized lazily. * * * @author Toni Menzel <toni.menzel@rebaze.com> * */ public interface Tree { /** * The algorithm used to create the fingerprint of this tree. * * @return the hash value of this tree plus algo used. */ TreeValue value(); /** * Hash value of all its sub branches. Can also be called a hash. Default implementations may * use a SHA-1. * * @deprecated * @return the hash value of this tree. */ String fingerprint(); /** * Identification of this tree for the parent. Selectors are not global. Actually used to create * indexes. * * @return string version of the selector. */ Selector selector(); /** * Sub branches of this tree. May be empty or a list of sub trees. All sub branches are supposed * to have unique selectors. * * @return List of sub trees. */ Tree[] branches(); /** * * @return Tags for this tree. */ Tag tags(); }
rebaze/trees
src/main/java/org/rebaze/integrity/tree/api/Tree.java
Java
apache-2.0
1,797
/* Copyright JS Foundation and other contributors, http://js.foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ var obj = {}; var array = ['Apple', 'Banana', "zero", 0, obj, 'Apple']; var index = array.at(0); assert(index === 'Apple'); assert(array[index] === undefined); assert(array.at(array.length) === undefined); assert(array.at(array.length+1) === undefined); assert(array.at(array.length-1) === 'Apple'); assert(array.at("1") === 'Banana'); assert(array.at(-1) === 'Apple'); assert(array.at("-1") === 'Apple'); assert(array.at("-20") === undefined); /* 7 */ var obj = {} obj.length = 1; Object.defineProperty(obj, '0', { 'get' : function () {throw new ReferenceError ("foo"); } }); obj.at = Array.prototype.at; try { obj.at(0); assert(false); } catch(e) { assert(e.message === "foo"); assert(e instanceof ReferenceError); } try { Array.prototype.at.call(undefined) assert (false); } catch(e) { assert(e instanceof TypeError); }
zherczeg/jerryscript
tests/jerry/es.next/array-prototype-at.js
JavaScript
apache-2.0
1,476
/* Copyright 2013 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.hraven.etl; import java.util.List; import org.apache.hadoop.hbase.client.Put; import com.twitter.hraven.JobDetails; import com.twitter.hraven.JobKey; import com.twitter.hraven.datasource.ProcessingException; /** * Interface for job history file parsing Should be implemented for parsing * different formats of history files change by MAPREDUCE-1016 in hadoop1.0 as * well as hadoop2.0 * */ public interface JobHistoryFileParser { /** * this method should parse the history file and populate the puts * * @throws ProcessingException */ public void parse(byte[] historyFile, JobKey jobKey); /** * Calculates the megabytmillis taken up by this job * should be called after {@link JobHistoryFileParser#parse(byte[], JobKey)} * since the values it needs for calculations are * populated in the parser object while parsing */ public Long getMegaByteMillis(); /** * Return the generated list of job puts assembled when history file is * parsed * * @return a list of jobPuts */ public List<Put> getJobPuts(); /** * Return the generated list of task puts assembled when history file is * parsed * * @return a list of taskPuts */ public List<Put> getTaskPuts(); /** * get the Job Details object for this history file so that * it can be used for storing aggregation summary * @return JobDetails */ public JobDetails getJobDetails(); }
ogre0403/hraven
hraven-etl/src/main/java/com/twitter/hraven/etl/JobHistoryFileParser.java
Java
apache-2.0
1,976
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.operator.project; import com.facebook.presto.spi.ConnectorSession; import com.facebook.presto.spi.Page; import com.facebook.presto.spi.block.Block; import com.facebook.presto.spi.block.BlockBuilder; import com.facebook.presto.spi.block.BlockBuilderStatus; import com.facebook.presto.spi.block.DictionaryBlock; import com.facebook.presto.spi.block.LazyBlock; import com.facebook.presto.spi.block.LongArrayBlock; import com.facebook.presto.spi.block.RunLengthEncodedBlock; import com.facebook.presto.spi.type.Type; import com.google.common.collect.ImmutableList; import it.unimi.dsi.fastutil.ints.IntArrayList; import org.testng.annotations.Test; import java.util.Arrays; import static com.facebook.presto.block.BlockAssertions.assertBlockEquals; import static com.facebook.presto.block.BlockAssertions.createLongSequenceBlock; import static com.facebook.presto.spi.block.DictionaryId.randomDictionaryId; import static com.facebook.presto.spi.type.BigintType.BIGINT; import static io.airlift.testing.Assertions.assertInstanceOf; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertThrows; public class TestDictionaryAwarePageProjection { @Test public void testDelegateMethods() throws Exception { DictionaryAwarePageProjection projection = createProjection(); assertEquals(projection.isDeterministic(), true); assertEquals(projection.getInputChannels().getInputChannels(), ImmutableList.of(3)); assertEquals(projection.getType(), BIGINT); } @Test public void testSimpleBlock() throws Exception { Block block = createLongSequenceBlock(0, 100); testProject(block, block.getClass()); } @Test public void testRleBlock() throws Exception { Block value = createLongSequenceBlock(42, 43); RunLengthEncodedBlock block = new RunLengthEncodedBlock(value, 100); testProject(block, RunLengthEncodedBlock.class); } @Test public void testRleBlockWithFailure() throws Exception { Block value = createLongSequenceBlock(-43, -42); RunLengthEncodedBlock block = new RunLengthEncodedBlock(value, 100); testProjectFails(block, RunLengthEncodedBlock.class); } @Test public void testDictionaryBlock() throws Exception { DictionaryBlock block = createDictionaryBlock(10, 100); testProject(block, DictionaryBlock.class); } @Test public void testDictionaryBlockWithFailure() throws Exception { DictionaryBlock block = createDictionaryBlockWithFailure(10, 100); testProjectFails(block, DictionaryBlock.class); } @Test public void testDictionaryBlockProcessingWithUnusedFailure() throws Exception { DictionaryBlock block = createDictionaryBlockWithUnusedEntries(10, 100); // failures in the dictionary processing will cause a fallback to normal columnar processing testProject(block, LongArrayBlock.class); } @Test public void testDictionaryProcessingEnableDisable() throws Exception { DictionaryAwarePageProjection projection = createProjection(); // function will always processes the first dictionary DictionaryBlock ineffectiveBlock = createDictionaryBlock(100, 20); testProjectRange(ineffectiveBlock, DictionaryBlock.class, projection); testProjectList(ineffectiveBlock, DictionaryBlock.class, projection); // last dictionary not effective, so dictionary processing is disabled DictionaryBlock effectiveBlock = createDictionaryBlock(10, 100); testProjectRange(effectiveBlock, LongArrayBlock.class, projection); testProjectList(effectiveBlock, LongArrayBlock.class, projection); // last dictionary not effective, so dictionary processing is enabled again testProjectRange(ineffectiveBlock, DictionaryBlock.class, projection); testProjectList(ineffectiveBlock, DictionaryBlock.class, projection); // last dictionary not effective, so dictionary processing is disabled again testProjectRange(effectiveBlock, LongArrayBlock.class, projection); testProjectList(effectiveBlock, LongArrayBlock.class, projection); } private static DictionaryBlock createDictionaryBlock(int dictionarySize, int blockSize) { Block dictionary = createLongSequenceBlock(0, dictionarySize); int[] ids = new int[blockSize]; Arrays.setAll(ids, index -> index % dictionarySize); return new DictionaryBlock(dictionary, ids); } private static DictionaryBlock createDictionaryBlockWithFailure(int dictionarySize, int blockSize) { Block dictionary = createLongSequenceBlock(-10, dictionarySize - 10); int[] ids = new int[blockSize]; Arrays.setAll(ids, index -> index % dictionarySize); return new DictionaryBlock(dictionary, ids); } private static DictionaryBlock createDictionaryBlockWithUnusedEntries(int dictionarySize, int blockSize) { Block dictionary = createLongSequenceBlock(-10, dictionarySize); int[] ids = new int[blockSize]; Arrays.setAll(ids, index -> (index % dictionarySize) + 10); return new DictionaryBlock(dictionary, ids); } private static void testProject(Block block, Class<? extends Block> expectedResultType) { testProjectRange(block, expectedResultType, createProjection()); testProjectList(block, expectedResultType, createProjection()); testProjectRange(lazyWrapper(block), expectedResultType, createProjection()); testProjectList(lazyWrapper(block), expectedResultType, createProjection()); } private static void testProjectFails(Block block, Class<? extends Block> expectedResultType) { assertThrows(NegativeValueException.class, () -> testProjectRange(block, expectedResultType, createProjection())); assertThrows(NegativeValueException.class, () -> testProjectList(block, expectedResultType, createProjection())); assertThrows(NegativeValueException.class, () -> testProjectRange(lazyWrapper(block), expectedResultType, createProjection())); assertThrows(NegativeValueException.class, () -> testProjectList(lazyWrapper(block), expectedResultType, createProjection())); } private static void testProjectRange(Block block, Class<? extends Block> expectedResultType, DictionaryAwarePageProjection projection) { Block result = projection.project(null, new Page(block), SelectedPositions.positionsRange(5, 10)); assertBlockEquals( BIGINT, result, block.getRegion(5, 10)); assertInstanceOf(result, expectedResultType); } private static void testProjectList(Block block, Class<? extends Block> expectedResultType, DictionaryAwarePageProjection projection) { int[] positions = {0, 2, 4, 6, 8, 10}; Block result = projection.project(null, new Page(block), SelectedPositions.positionsList(positions, 0, positions.length)); assertBlockEquals( BIGINT, result, block.copyPositions(new IntArrayList(positions))); assertInstanceOf(result, expectedResultType); } private static DictionaryAwarePageProjection createProjection() { return new DictionaryAwarePageProjection( new TestPageProjection(), block -> randomDictionaryId()); } private static LazyBlock lazyWrapper(Block block) { return new LazyBlock(block.getPositionCount(), lazyBlock -> lazyBlock.setBlock(block)); } private static class TestPageProjection implements PageProjection { @Override public Type getType() { return BIGINT; } @Override public boolean isDeterministic() { return true; } @Override public InputChannels getInputChannels() { return new InputChannels(3); } @Override public Block project(ConnectorSession session, Page page, SelectedPositions selectedPositions) { Block block = page.getBlock(0); BlockBuilder blockBuilder = BIGINT.createBlockBuilder(new BlockBuilderStatus(), selectedPositions.size()); if (selectedPositions.isList()) { int offset = selectedPositions.getOffset(); int[] positions = selectedPositions.getPositions(); for (int index = offset; index < offset + selectedPositions.size(); index++) { blockBuilder.writeLong(verifyPositive(block.getLong(positions[index], 0))); } } else { int offset = selectedPositions.getOffset(); for (int position = offset; position < offset + selectedPositions.size(); position++) { blockBuilder.writeLong(verifyPositive(block.getLong(position, 0))); } } return blockBuilder.build(); } private static long verifyPositive(long value) { if (value < 0) { throw new NegativeValueException(value); } return value; } } private static class NegativeValueException extends RuntimeException { public NegativeValueException(long value) { super("value is negative: " + value); } } }
jiangyifangh/presto
presto-main/src/test/java/com/facebook/presto/operator/project/TestDictionaryAwarePageProjection.java
Java
apache-2.0
10,233
<?php include_once __DIR__ . '/lib/SubscriptionContainer.lib.php'; class DeleteSubscriptionContainer { private $obj, $mid, $did, $result, $authToken; public function __construct() { $this->obj = new SubscriptionContainerLib; } public function setParam($rawdata) { $json = json_decode($rawdata, true); $this->authToken = $json['authToken']; $this->did = $json['datasetId']; $this->mid = $json['memberId']; } public function set() { if (!isset($this->mid) || !isset($this->authToken) || !isset($this->did)) { $this->result = ['result' => false, 'errorMessage' => 'Input keys not fullfill']; return json_encode($this->result); } if (is_null($this->authToken)) { $this->result = ['result' => false, 'errorMessage' => 'No authorization key']; return json_encode($this->result); } if (is_null($this->did)) { $this->result = ['result' => false, 'errorMessage' => 'No dataset id']; return json_encode($this->result); } if (is_null($this->mid)) { $this->result = ['result' => false, 'errorMessage' => 'No member id']; return json_encode($this->result); } if ($this->authToken !== AUTH_TOKEN) { $this->result = ['result' => false, 'errorMessage' => 'Authorization fail']; return json_encode($this->result); } $this->result = $this->obj->deleteSubscriptionContainer($this->mid, $this->did); return json_encode($this->result); } }
taipeicity/TPElineService
v2/php/restfulapi/v1/ws/ws_sc04.php
PHP
apache-2.0
1,615
/* * JBoss, Home of Professional Open Source * Copyright 2010, Red Hat Middleware LLC, and individual contributors * by the @authors tag. See the copyright.txt in the distribution for a * full listing of individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jboss.shrinkwrap.impl.base.classloader; import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.util.logging.Logger; import org.jboss.shrinkwrap.api.Archive; import org.jboss.shrinkwrap.api.GenericArchive; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.Asset; import org.jboss.shrinkwrap.api.asset.StringAsset; import org.jboss.shrinkwrap.api.classloader.ShrinkWrapClassLoader; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.jboss.shrinkwrap.impl.base.io.IOUtil; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; /** * Ensures the {@link ShrinkWrapClassLoader} is working as contracted * * @author <a href="mailto:aslak@redhat.com">Aslak Knutsen</a> * @author <a href="mailto:andrew.rubinger@jboss.org">ALR</a> * @version $Revision: $ */ public class ShrinkWrapClassLoaderTestCase { // -------------------------------------------------------------------------------------|| // Class Members ----------------------------------------------------------------------|| // -------------------------------------------------------------------------------------|| /** * Logger */ private static final Logger log = Logger.getLogger(ShrinkWrapClassLoaderTestCase.class.getName()); /** * Class to be accessed via a ShrinkWrap ClassLoader */ private static final Class<?> applicationClassLoaderClass = LoadedTestClass.class; /** * Archive to be read via a {@link ShrinkWrapClassLoaderTestCase#shrinkWrapClassLoader} */ private static final JavaArchive archive = ShrinkWrap.create(JavaArchive.class).addClass( applicationClassLoaderClass); // -------------------------------------------------------------------------------------|| // Instance Members -------------------------------------------------------------------|| // -------------------------------------------------------------------------------------|| /** * ClassLoader used to load {@link ShrinkWrapClassLoaderTestCase#applicationClassLoaderClass} */ private ClassLoader shrinkWrapClassLoader; // -------------------------------------------------------------------------------------|| // Lifecycle --------------------------------------------------------------------------|| // -------------------------------------------------------------------------------------|| /** * Creates the {@link ShrinkWrapClassLoaderTestCase#shrinkWrapClassLoader} used to load classes from an * {@link Archive}. The {@link ClassLoader} will be isolated from the application classpath by specifying a null * parent explicitly. */ @Before public void createClassLoader() { shrinkWrapClassLoader = new ShrinkWrapClassLoader((ClassLoader) null, archive); } /** * Closes resources associated with the {@link ShrinkWrapClassLoaderTestCase#shrinkWrapClassLoader} */ @After public void closeClassLoader() { if (shrinkWrapClassLoader instanceof Closeable) { try { ((Closeable) shrinkWrapClassLoader).close(); } catch (final IOException e) { log.warning("Could not close the " + shrinkWrapClassLoader + ": " + e); } } } // -------------------------------------------------------------------------------------|| // Tests ------------------------------------------------------------------------------|| // -------------------------------------------------------------------------------------|| /** * Ensures we can load a Class instance from the {@link ShrinkWrapClassLoader} */ @Test public void shouldBeAbleToLoadClassFromArchive() throws ClassNotFoundException { // Load the test class from the CL final Class<?> loadedTestClass = Class.forName(applicationClassLoaderClass.getName(), false, shrinkWrapClassLoader); final ClassLoader loadedTestClassClassLoader = loadedTestClass.getClassLoader(); log.info("Got " + loadedTestClass + " from " + loadedTestClassClassLoader); // Assertions Assert.assertNotNull("Test class could not be found via the ClassLoader", loadedTestClass); Assert.assertSame("Test class should have been loaded via the archive ClassLoader", shrinkWrapClassLoader, loadedTestClassClassLoader); Assert.assertNotSame("Class Loaded from the CL should not be the same as the one on the appCL", loadedTestClass, applicationClassLoaderClass); } /** * Ensures that we can open up directory content as obtained via a {@link URL} from the * {@link ShrinkWrapClassLoader} (ie. should return null, not throw an exception) * * SHRINKWRAP-306 */ @Test public void shouldBeAbleToOpenStreamOnDirectoryUrl() throws IOException { // Make a new Archive with some content in a directory final String nestedResourceName = "nested/test"; final Asset testAsset = new StringAsset("testContent"); final GenericArchive archive = ShrinkWrap.create(GenericArchive.class).add(testAsset, nestedResourceName); // Make a CL to load the content final ClassLoader swCl = new ShrinkWrapClassLoader(archive); // Get the URL to the parent directory final URL nestedResourceUrl = swCl.getResource(nestedResourceName); final URL nestedResourceUpALevelUrl = new URL(nestedResourceUrl, "../"); // openStream on the URL to the parent directory; should return null, not throw an exception final InputStream in = nestedResourceUpALevelUrl.openStream(); Assert.assertNull("URLs pointing to a directory should openStream as null", in); } /** * Ensures that we can open up an asset that doesn't exist via a {@link URL} from the {@link ShrinkWrapClassLoader} * (ie. should throw {@link FileNotFoundException} * * SHRINKWRAP-308 */ @Test(expected = FileNotFoundException.class) public void shouldNotBeAbleToOpenStreamOnNonexistantAsset() throws IOException { // Make a new Archive with some content in a directory final String nestedResourceName = "nested/test"; final Asset testAsset = new StringAsset("testContent"); final GenericArchive archive = ShrinkWrap.create(GenericArchive.class).add(testAsset, nestedResourceName); // Make a CL to load the content final ClassLoader swCl = new ShrinkWrapClassLoader(archive); // Get the URL to something that doesn't exist final URL nestedResourceUrl = swCl.getResource(nestedResourceName); final URL nestedResourceThatDoesntExistUrl = new URL(nestedResourceUrl, "../fake"); // openStream on the URL that doesn't exist should throw FNFE nestedResourceThatDoesntExistUrl.openStream(); } /** * Ensures we can load a resource by name from the {@link ShrinkWrapClassLoader} */ @Test public void shouldBeAbleToLoadResourceFromArchive() { // Load the class as a resource final URL resource = shrinkWrapClassLoader.getResource(getResourceNameOfClass(applicationClassLoaderClass)); // Assertions Assert.assertNotNull(resource); } /** * SHRINKWRAP-237: Reading the same resource multiple times cause IOException */ @Test public void shouldBeAbleToLoadAResourceFromArchiveMultipleTimes() throws Exception { String resourceName = getResourceNameOfClass(applicationClassLoaderClass); // Load the class as a resource URL resource = shrinkWrapClassLoader.getResource(resourceName); // Assertions Assert.assertNotNull(resource); // Read the stream until EOF IOUtil.copyWithClose(resource.openStream(), new ByteArrayOutputStream()); // Load the class as a resource for the second time resource = shrinkWrapClassLoader.getResource(resourceName); // Assertions Assert.assertNotNull(resource); // SHRINKWRAP-237: This throws IOException: Stream closed IOUtil.copyWithClose(resource.openStream(), new ByteArrayOutputStream()); } // -------------------------------------------------------------------------------------|| // Internal Helper Methods ------------------------------------------------------------|| // -------------------------------------------------------------------------------------|| /** * Obtains the resource name for a given class */ private static String getResourceNameOfClass(final Class<?> clazz) { assert clazz != null : "clazz must be specified"; final StringBuilder sb = new StringBuilder(); final String className = clazz.getName().replace('.', '/'); sb.append(className); sb.append(".class"); return sb.toString(); } }
chirino/shrinkwrap
impl-base/src/test/java/org/jboss/shrinkwrap/impl/base/classloader/ShrinkWrapClassLoaderTestCase.java
Java
apache-2.0
9,879
/** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.zookeeper; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.security.Permission; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Test; import org.junit.experimental.categories.Category; @Category(SmallTests.class) public class TestZooKeeperMainServer { // ZKMS calls System.exit. Catch the call and prevent exit using trick described up in // http://stackoverflow.com/questions/309396/java-how-to-test-methods-that-call-system-exit protected static class ExitException extends SecurityException { private static final long serialVersionUID = 1L; public final int status; public ExitException(int status) { super("There is no escape!"); this.status = status; } } private static class NoExitSecurityManager extends SecurityManager { @Override public void checkPermission(Permission perm) { // allow anything. } @Override public void checkPermission(Permission perm, Object context) { // allow anything. } @Override public void checkExit(int status) { super.checkExit(status); throw new ExitException(status); } } /** * We need delete of a znode to work at least. * @throws Exception */ @Test public void testCommandLineWorks() throws Exception { System.setSecurityManager(new NoExitSecurityManager()); HBaseTestingUtility htu = new HBaseTestingUtility(); htu.getConfiguration().setInt(HConstants.ZK_SESSION_TIMEOUT, 1000); htu.startMiniZKCluster(); try { ZooKeeperWatcher zkw = htu.getZooKeeperWatcher(); String znode = "/testCommandLineWorks"; ZKUtil.createWithParents(zkw, znode, HConstants.EMPTY_BYTE_ARRAY); ZKUtil.checkExists(zkw, znode); boolean exception = false; try { ZooKeeperMainServer.main(new String [] {"-server", "localhost:" + htu.getZkCluster().getClientPort(), "delete", znode}); } catch (ExitException ee) { // ZKMS calls System.exit which should trigger this exception. exception = true; } assertTrue(exception); assertEquals(-1, ZKUtil.checkExists(zkw, znode)); } finally { htu.shutdownMiniZKCluster(); System.setSecurityManager(null); // or save and restore original } } @Test public void testHostPortParse() { ZooKeeperMainServer parser = new ZooKeeperMainServer(); Configuration c = HBaseConfiguration.create(); assertEquals("localhost:" + c.get(HConstants.ZOOKEEPER_CLIENT_PORT), parser.parse(c)); final String port = "1234"; c.set(HConstants.ZOOKEEPER_CLIENT_PORT, port); c.set("hbase.zookeeper.quorum", "example.com"); assertEquals("example.com:" + port, parser.parse(c)); c.set("hbase.zookeeper.quorum", "example1.com,example2.com,example3.com"); String ensemble = parser.parse(c); assertTrue(port, ensemble.matches("(example[1-3]\\.com:1234,){2}example[1-3]\\.com:" + port)); } }
grokcoder/pbase
hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperMainServer.java
Java
apache-2.0
3,916
/** * <copyright> * </copyright> * * $Id$ */ package org.wso2.developerstudio.eclipse.ds.impl; import java.math.BigInteger; import org.eclipse.emf.common.notify.Notification; import org.eclipse.emf.common.notify.NotificationChain; import org.eclipse.emf.ecore.EClass; import org.eclipse.emf.ecore.InternalEObject; import org.eclipse.emf.ecore.impl.ENotificationImpl; import org.eclipse.emf.ecore.impl.EObjectImpl; import org.eclipse.emf.ecore.util.BasicFeatureMap; import org.eclipse.emf.ecore.util.FeatureMap; import org.eclipse.emf.ecore.util.InternalEList; import org.wso2.developerstudio.eclipse.ds.DsPackage; import org.wso2.developerstudio.eclipse.ds.ExcelQuery; import org.wso2.developerstudio.eclipse.ds.HasHeader; import org.wso2.developerstudio.eclipse.ds.MaxRowCount; import org.wso2.developerstudio.eclipse.ds.StartingRow; import org.wso2.developerstudio.eclipse.ds.WorkBookName; /** * <!-- begin-user-doc --> * An implementation of the model object '<em><b>Excel Query</b></em>'. * <!-- end-user-doc --> * <p> * The following features are implemented: * <ul> * <li>{@link org.wso2.developerstudio.eclipse.ds.impl.ExcelQueryImpl#getMixed * <em>Mixed</em>}</li> * <li> * {@link org.wso2.developerstudio.eclipse.ds.impl.ExcelQueryImpl#getWorkbookname * <em>Workbookname</em>}</li> * <li> * {@link org.wso2.developerstudio.eclipse.ds.impl.ExcelQueryImpl#getHasheader * <em>Hasheader</em>}</li> * <li> * {@link org.wso2.developerstudio.eclipse.ds.impl.ExcelQueryImpl#getStartingrow * <em>Startingrow</em>}</li> * <li> * {@link org.wso2.developerstudio.eclipse.ds.impl.ExcelQueryImpl#getMaxrowcount * <em>Maxrowcount</em>}</li> * </ul> * </p> * * @generated */ public class ExcelQueryImpl extends EObjectImpl implements ExcelQuery { /** * The cached value of the '{@link #getMixed() <em>Mixed</em>}' attribute * list. * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @see #getMixed() * @generated * @ordered */ protected FeatureMap mixed; /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ protected ExcelQueryImpl() { super(); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override protected EClass eStaticClass() { return DsPackage.Literals.EXCEL_QUERY; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public FeatureMap getMixed() { if (mixed == null) { mixed = new BasicFeatureMap(this, DsPackage.EXCEL_QUERY__MIXED); } return mixed; } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public WorkBookName getWorkbookname() { return (WorkBookName) getMixed().get(DsPackage.Literals.EXCEL_QUERY__WORKBOOKNAME, true); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public NotificationChain basicSetWorkbookname(WorkBookName newWorkbookname, NotificationChain msgs) { return ((FeatureMap.Internal) getMixed()).basicAdd(DsPackage.Literals.EXCEL_QUERY__WORKBOOKNAME, newWorkbookname, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public void setWorkbookname(WorkBookName newWorkbookname) { ((FeatureMap.Internal) getMixed()).set(DsPackage.Literals.EXCEL_QUERY__WORKBOOKNAME, newWorkbookname); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public HasHeader getHasheader() { return (HasHeader) getMixed().get(DsPackage.Literals.EXCEL_QUERY__HASHEADER, true); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public NotificationChain basicSetHasheader(HasHeader newHasheader, NotificationChain msgs) { return ((FeatureMap.Internal) getMixed()).basicAdd(DsPackage.Literals.EXCEL_QUERY__HASHEADER, newHasheader, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public void setHasheader(HasHeader newHasheader) { ((FeatureMap.Internal) getMixed()).set(DsPackage.Literals.EXCEL_QUERY__HASHEADER, newHasheader); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public StartingRow getStartingrow() { return (StartingRow) getMixed().get(DsPackage.Literals.EXCEL_QUERY__STARTINGROW, true); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public NotificationChain basicSetStartingrow(StartingRow newStartingrow, NotificationChain msgs) { return ((FeatureMap.Internal) getMixed()).basicAdd(DsPackage.Literals.EXCEL_QUERY__STARTINGROW, newStartingrow, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public void setStartingrow(StartingRow newStartingrow) { ((FeatureMap.Internal) getMixed()).set(DsPackage.Literals.EXCEL_QUERY__STARTINGROW, newStartingrow); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public MaxRowCount getMaxrowcount() { return (MaxRowCount) getMixed().get(DsPackage.Literals.EXCEL_QUERY__MAXROWCOUNT, true); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public NotificationChain basicSetMaxrowcount(MaxRowCount newMaxrowcount, NotificationChain msgs) { return ((FeatureMap.Internal) getMixed()).basicAdd(DsPackage.Literals.EXCEL_QUERY__MAXROWCOUNT, newMaxrowcount, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ public void setMaxrowcount(MaxRowCount newMaxrowcount) { ((FeatureMap.Internal) getMixed()).set(DsPackage.Literals.EXCEL_QUERY__MAXROWCOUNT, newMaxrowcount); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public NotificationChain eInverseRemove(InternalEObject otherEnd, int featureID, NotificationChain msgs) { switch (featureID) { case DsPackage.EXCEL_QUERY__MIXED: return ((InternalEList<?>) getMixed()).basicRemove(otherEnd, msgs); case DsPackage.EXCEL_QUERY__WORKBOOKNAME: return basicSetWorkbookname(null, msgs); case DsPackage.EXCEL_QUERY__HASHEADER: return basicSetHasheader(null, msgs); case DsPackage.EXCEL_QUERY__STARTINGROW: return basicSetStartingrow(null, msgs); case DsPackage.EXCEL_QUERY__MAXROWCOUNT: return basicSetMaxrowcount(null, msgs); } return super.eInverseRemove(otherEnd, featureID, msgs); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public Object eGet(int featureID, boolean resolve, boolean coreType) { switch (featureID) { case DsPackage.EXCEL_QUERY__MIXED: if (coreType) return getMixed(); return ((FeatureMap.Internal) getMixed()).getWrapper(); case DsPackage.EXCEL_QUERY__WORKBOOKNAME: return getWorkbookname(); case DsPackage.EXCEL_QUERY__HASHEADER: return getHasheader(); case DsPackage.EXCEL_QUERY__STARTINGROW: return getStartingrow(); case DsPackage.EXCEL_QUERY__MAXROWCOUNT: return getMaxrowcount(); } return super.eGet(featureID, resolve, coreType); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void eSet(int featureID, Object newValue) { switch (featureID) { case DsPackage.EXCEL_QUERY__MIXED: ((FeatureMap.Internal) getMixed()).set(newValue); return; case DsPackage.EXCEL_QUERY__WORKBOOKNAME: setWorkbookname((WorkBookName) newValue); return; case DsPackage.EXCEL_QUERY__HASHEADER: setHasheader((HasHeader) newValue); return; case DsPackage.EXCEL_QUERY__STARTINGROW: setStartingrow((StartingRow) newValue); return; case DsPackage.EXCEL_QUERY__MAXROWCOUNT: setMaxrowcount((MaxRowCount) newValue); return; } super.eSet(featureID, newValue); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public void eUnset(int featureID) { switch (featureID) { case DsPackage.EXCEL_QUERY__MIXED: getMixed().clear(); return; case DsPackage.EXCEL_QUERY__WORKBOOKNAME: setWorkbookname((WorkBookName) null); return; case DsPackage.EXCEL_QUERY__HASHEADER: setHasheader((HasHeader) null); return; case DsPackage.EXCEL_QUERY__STARTINGROW: setStartingrow((StartingRow) null); return; case DsPackage.EXCEL_QUERY__MAXROWCOUNT: setMaxrowcount((MaxRowCount) null); return; } super.eUnset(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public boolean eIsSet(int featureID) { switch (featureID) { case DsPackage.EXCEL_QUERY__MIXED: return mixed != null && !mixed.isEmpty(); case DsPackage.EXCEL_QUERY__WORKBOOKNAME: return getWorkbookname() != null; case DsPackage.EXCEL_QUERY__HASHEADER: return getHasheader() != null; case DsPackage.EXCEL_QUERY__STARTINGROW: return getStartingrow() != null; case DsPackage.EXCEL_QUERY__MAXROWCOUNT: return getMaxrowcount() != null; } return super.eIsSet(featureID); } /** * <!-- begin-user-doc --> * <!-- end-user-doc --> * * @generated */ @Override public String toString() { if (eIsProxy()) return super.toString(); StringBuffer result = new StringBuffer(super.toString()); result.append(" (mixed: "); result.append(mixed); result.append(')'); return result.toString(); } } // ExcelQueryImpl
splinter/developer-studio
data-services/org.wso2.developerstudio.eclipse.ds/src/org/wso2/developerstudio/eclipse/ds/impl/ExcelQueryImpl.java
Java
apache-2.0
9,856
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.compute import base from tempest.common.utils import data_utils from tempest import test class KeyPairsV3Test(base.BaseComputeTest): _api_version = 3 @classmethod def setUpClass(cls): super(KeyPairsV3Test, cls).setUpClass() cls.client = cls.keypairs_client def _delete_keypair(self, keypair_name): resp, _ = self.client.delete_keypair(keypair_name) def _create_keypair(self, keypair_name, pub_key=None): resp, body = self.client.create_keypair(keypair_name, pub_key) self.addCleanup(self._delete_keypair, keypair_name) return resp, body @test.attr(type='gate') def test_keypairs_create_list_delete(self): # Keypairs created should be available in the response list # Create 3 keypairs key_list = list() for i in range(3): k_name = data_utils.rand_name('keypair-') resp, keypair = self._create_keypair(k_name) # Need to pop these keys so that our compare doesn't fail later, # as the keypair dicts from list API doesn't have them. keypair.pop('private_key') keypair.pop('user_id') key_list.append(keypair) # Fetch all keypairs and verify the list # has all created keypairs resp, fetched_list = self.client.list_keypairs() self.assertEqual(200, resp.status) # We need to remove the extra 'keypair' element in the # returned dict. See comment in keypairs_client.list_keypairs() new_list = list() for keypair in fetched_list: new_list.append(keypair['keypair']) fetched_list = new_list # Now check if all the created keypairs are in the fetched list missing_kps = [kp for kp in key_list if kp not in fetched_list] self.assertFalse(missing_kps, "Failed to find keypairs %s in fetched list" % ', '.join(m_key['name'] for m_key in missing_kps)) @test.attr(type='gate') def test_keypair_create_delete(self): # Keypair should be created, verified and deleted k_name = data_utils.rand_name('keypair-') resp, keypair = self._create_keypair(k_name) private_key = keypair['private_key'] key_name = keypair['name'] self.assertEqual(key_name, k_name, "The created keypair name is not equal " "to the requested name") self.assertTrue(private_key is not None, "Field private_key is empty or not found.") @test.attr(type='gate') def test_get_keypair_detail(self): # Keypair should be created, Got details by name and deleted k_name = data_utils.rand_name('keypair-') resp, keypair = self._create_keypair(k_name) resp, keypair_detail = self.client.get_keypair(k_name) self.assertEqual(200, resp.status) self.assertIn('name', keypair_detail) self.assertIn('public_key', keypair_detail) self.assertEqual(keypair_detail['name'], k_name, "The created keypair name is not equal " "to requested name") public_key = keypair_detail['public_key'] self.assertTrue(public_key is not None, "Field public_key is empty or not found.") @test.attr(type='gate') def test_keypair_create_with_pub_key(self): # Keypair should be created with a given public key k_name = data_utils.rand_name('keypair-') pub_key = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCs" "Ne3/1ILNCqFyfYWDeTKLD6jEXC2OQHLmietMWW+/vd" "aZq7KZEwO0jhglaFjU1mpqq4Gz5RX156sCTNM9vRbw" "KAxfsdF9laBYVsex3m3Wmui3uYrKyumsoJn2g9GNnG1P" "I1mrVjZ61i0GY3khna+wzlTpCCmy5HNlrmbj3XLqBUpip" "TOXmsnr4sChzC53KCd8LXuwc1i/CZPvF+3XipvAgFSE53pCt" "LOeB1kYMOBaiUPLQTWXR3JpckqFIQwhIH0zoHlJvZE8hh90" "XcPojYN56tI0OlrGqojbediJYD0rUsJu4weZpbn8vilb3JuDY+jws" "snSA8wzBx3A/8y9Pp1B nova@ubuntu") resp, keypair = self._create_keypair(k_name, pub_key) self.assertFalse('private_key' in keypair, "Field private_key is not empty!") key_name = keypair['name'] self.assertEqual(key_name, k_name, "The created keypair name is not equal " "to the requested name!") class KeyPairsV2TestJSON(KeyPairsV3Test): _api_version = 2 class KeyPairsV2TestXML(KeyPairsV2TestJSON): _interface = 'xml'
Mirantis/tempest
tempest/api/compute/keypairs/test_keypairs.py
Python
apache-2.0
5,296
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.bookkeeper.common.router; /** * A router based on hash values. */ public interface HashRouter<K> extends Router<K, Long> { }
sijie/bookkeeper
stream/common/src/main/java/org/apache/bookkeeper/common/router/HashRouter.java
Java
apache-2.0
955
package org.apache.archiva.security; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import org.apache.archiva.redback.authentication.AuthenticationException; import org.apache.archiva.redback.authentication.AuthenticationResult; import org.apache.archiva.redback.authorization.AuthorizationException; import org.apache.archiva.redback.authorization.UnauthorizedException; import org.apache.archiva.redback.policy.AccountLockedException; import org.apache.archiva.redback.policy.MustChangePasswordException; import org.apache.archiva.redback.system.SecuritySession; import javax.servlet.http.HttpServletRequest; /** * @version */ public interface ServletAuthenticator { /** * Authentication check for users. * * @param request * @param result * @return * @throws AuthenticationException * @throws AccountLockedException * @throws MustChangePasswordException */ boolean isAuthenticated( HttpServletRequest request, AuthenticationResult result ) throws AuthenticationException, AccountLockedException, MustChangePasswordException; /** * Authorization check for valid users. * * @param request * @param securitySession * @param repositoryId * @param permission * @return * @throws AuthorizationException * @throws UnauthorizedException */ boolean isAuthorized( HttpServletRequest request, SecuritySession securitySession, String repositoryId, String permission ) throws AuthorizationException, UnauthorizedException; /** * Authorization check specific for user guest, which doesn't go through * HttpBasicAuthentication#getAuthenticationResult( HttpServletRequest request, HttpServletResponse response ) * since no credentials are attached to the request. * * See also MRM-911 * * @param principal * @param repoId * @param permission * @return * @throws UnauthorizedException */ boolean isAuthorized( String principal, String repoId, String permission ) throws UnauthorizedException; }
apache/archiva
archiva-modules/archiva-web/archiva-security/src/main/java/org/apache/archiva/security/ServletAuthenticator.java
Java
apache-2.0
2,862
import Resource from '@rancher/ember-api-store/models/resource'; import { get, computed } from '@ember/object'; import { reference } from '@rancher/ember-api-store/utils/denormalize'; export default Resource.extend({ project: reference('projectId'), projectName: computed('project.displayName', function() { return get(this, 'project.displayName'); }), clusterName: computed('project.cluster.displayName', function() { return get(this, 'project.cluster.displayName'); }), clusterId: computed('projectId', function() { let { projectId } = this; return projectId.split(':')[0]; }), appLink: computed('projectId', 'appId', function() { const { projectId } = this; if (projectId) { return `${ projectId.split(':')[1] }:${ this.appId }`; } return null; }), });
westlywright/ui
app/models/target.js
JavaScript
apache-2.0
821
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.query.aggregation.cardinality.types; import org.apache.druid.java.util.common.IAE; import org.apache.druid.query.dimension.ColumnSelectorStrategyFactory; import org.apache.druid.segment.ColumnValueSelector; import org.apache.druid.segment.column.ColumnCapabilities; public class CardinalityAggregatorColumnSelectorStrategyFactory implements ColumnSelectorStrategyFactory<CardinalityAggregatorColumnSelectorStrategy> { @Override public CardinalityAggregatorColumnSelectorStrategy makeColumnSelectorStrategy( ColumnCapabilities capabilities, ColumnValueSelector selector ) { switch (capabilities.getType()) { case STRING: return new StringCardinalityAggregatorColumnSelectorStrategy(); case LONG: return new LongCardinalityAggregatorColumnSelectorStrategy(); case FLOAT: return new FloatCardinalityAggregatorColumnSelectorStrategy(); case DOUBLE: return new DoubleCardinalityAggregatorColumnSelectorStrategy(); default: throw new IAE("Cannot create query type helper from invalid type [%s]", capabilities.asTypeString()); } } }
nishantmonu51/druid
processing/src/main/java/org/apache/druid/query/aggregation/cardinality/types/CardinalityAggregatorColumnSelectorStrategyFactory.java
Java
apache-2.0
1,966
// Copyright 2014 The Souper Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "klee/Expr.h" #include "klee/util/ExprPPrinter.h" #include "klee/util/ExprSMTLIBPrinter.h" #include "souper/Extractor/ExprBuilder.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Support/CommandLine.h" using namespace klee; using namespace souper; namespace { static llvm::cl::opt<bool> DumpKLEEExprs( "dump-klee-exprs", llvm::cl::desc("Dump KLEE expressions after SMTLIB queries"), llvm::cl::init(false)); class KLEEBuilder : public ExprBuilder { UniqueNameSet ArrayNames; std::vector<std::unique_ptr<Array>> Arrays; std::map<Inst *, ref<Expr>> ExprMap; std::vector<Inst *> Vars; public: KLEEBuilder(InstContext &IC) : ExprBuilder(IC) {} std::string GetExprStr(const BlockPCs &BPCs, const std::vector<InstMapping> &PCs, InstMapping Mapping, std::vector<Inst *> *ModelVars, bool Negate, bool DropUB) override { Inst *Cand = GetCandidateExprForReplacement(BPCs, PCs, Mapping, /*Precondition=*/0, Negate, DropUB); if (!Cand) return std::string(); prepopulateExprMap(Cand); ref<Expr> E = get(Cand); std::string SStr; llvm::raw_string_ostream SS(SStr); std::unique_ptr<ExprPPrinter> PP(ExprPPrinter::create(SS)); PP->setForceNoLineBreaks(true); PP->scan(E); PP->print(E); return SS.str(); } std::string BuildQuery(const BlockPCs &BPCs, const std::vector<InstMapping> &PCs, InstMapping Mapping, std::vector<Inst *> *ModelVars, Inst *Precondition, bool Negate, bool DropUB) override { std::string SMTStr; llvm::raw_string_ostream SMTSS(SMTStr); ConstraintManager Manager; Inst *Cand = GetCandidateExprForReplacement(BPCs, PCs, Mapping, Precondition, Negate, DropUB); if (!Cand) return std::string(); prepopulateExprMap(Cand); ref<Expr> E = get(Cand); Query KQuery(Manager, E); ExprSMTLIBPrinter Printer; Printer.setOutput(SMTSS); Printer.setQuery(KQuery); std::vector<const klee::Array *> Arr; if (ModelVars) { for (unsigned I = 0; I != Vars.size(); ++I) { if (Vars[I]) { Arr.push_back(Arrays[I].get()); ModelVars->push_back(Vars[I]); } } Printer.setArrayValuesToGet(Arr); } Printer.generateOutput(); if (DumpKLEEExprs) { SMTSS << "; KLEE expression:\n; "; std::unique_ptr<ExprPPrinter> PP(ExprPPrinter::create(SMTSS)); PP->setForceNoLineBreaks(true); PP->scan(E); PP->print(E); SMTSS << '\n'; } return SMTSS.str(); } private: ref<Expr> countOnes(ref<Expr> L) { Expr::Width Width = L->getWidth(); ref<Expr> Count = klee::ConstantExpr::alloc(llvm::APInt(Width, 0)); for (unsigned i=0; i<Width; i++) { ref<Expr> Bit = ExtractExpr::create(L, i, Expr::Bool); ref<Expr> BitExt = ZExtExpr::create(Bit, Width); Count = AddExpr::create(Count, BitExt); } return Count; } ref<Expr> buildAssoc( std::function<ref<Expr>(ref<Expr>, ref<Expr>)> F, llvm::ArrayRef<Inst *> Ops) { ref<Expr> E = get(Ops[0]); for (Inst *I : llvm::ArrayRef<Inst *>(Ops.data()+1, Ops.size()-1)) { E = F(E, get(I)); } return E; } ref<Expr> build(Inst *I) { const std::vector<Inst *> &Ops = I->orderedOps(); switch (I->K) { case Inst::UntypedConst: assert(0 && "unexpected kind"); case Inst::Const: return klee::ConstantExpr::alloc(I->Val); case Inst::Hole: case Inst::Var: return makeSizedArrayRead(I->Width, I->Name, I); case Inst::Phi: { const auto &PredExpr = I->B->PredVars; assert((PredExpr.size() || Ops.size() == 1) && "there must be block predicates"); ref<Expr> E = get(Ops[0]); // e.g. P2 ? (P1 ? Op1_Expr : Op2_Expr) : Op3_Expr for (unsigned J = 1; J < Ops.size(); ++J) { E = SelectExpr::create(get(PredExpr[J-1]), E, get(Ops[J])); } return E; } case Inst::Freeze: return get(Ops[0]); case Inst::Add: return buildAssoc(AddExpr::create, Ops); case Inst::AddNSW: { ref<Expr> Add = AddExpr::create(get(Ops[0]), get(Ops[1])); return Add; } case Inst::AddNUW: { ref<Expr> Add = AddExpr::create(get(Ops[0]), get(Ops[1])); return Add; } case Inst::AddNW: { ref<Expr> Add = AddExpr::create(get(Ops[0]), get(Ops[1])); return Add; } case Inst::Sub: return SubExpr::create(get(Ops[0]), get(Ops[1])); case Inst::SubNSW: { ref<Expr> Sub = SubExpr::create(get(Ops[0]), get(Ops[1])); return Sub; } case Inst::SubNUW: { ref<Expr> Sub = SubExpr::create(get(Ops[0]), get(Ops[1])); return Sub; } case Inst::SubNW: { ref<Expr> Sub = SubExpr::create(get(Ops[0]), get(Ops[1])); return Sub; } case Inst::Mul: return buildAssoc(MulExpr::create, Ops); case Inst::MulNSW: { ref<Expr> Mul = MulExpr::create(get(Ops[0]), get(Ops[1])); return Mul; } case Inst::MulNUW: { ref<Expr> Mul = MulExpr::create(get(Ops[0]), get(Ops[1])); return Mul; } case Inst::MulNW: { ref<Expr> Mul = MulExpr::create(get(Ops[0]), get(Ops[1])); return Mul; } // We introduce these extra checks here because KLEE invokes llvm::APInt's // div functions, which crash upon divide-by-zero. case Inst::UDiv: case Inst::SDiv: case Inst::UDivExact: case Inst::SDivExact: case Inst::URem: case Inst::SRem: { // Fall-through // If the second oprand is 0, then it definitely causes UB. // There are quite a few cases where KLEE folds operations into zero, // e.g., "sext i16 0 to i32", "0 + 0", "2 - 2", etc. In all cases, // we skip building the corresponding KLEE expressions and just return // a constant zero. ref<Expr> R = get(Ops[1]); if (R->isZero()) { return klee::ConstantExpr::create(0, Ops[1]->Width); } switch (I->K) { default: break; case Inst::UDiv: { ref<Expr> Udiv = UDivExpr::create(get(Ops[0]), R); return Udiv; } case Inst::SDiv: { ref<Expr> Sdiv = SDivExpr::create(get(Ops[0]), R); return Sdiv; } case Inst::UDivExact: { ref<Expr> Udiv = UDivExpr::create(get(Ops[0]), R); return Udiv; } case Inst::SDivExact: { ref<Expr> Sdiv = SDivExpr::create(get(Ops[0]), R); return Sdiv; } case Inst::URem: { ref<Expr> Urem = URemExpr::create(get(Ops[0]), R); return Urem; } case Inst::SRem: { ref<Expr> Srem = SRemExpr::create(get(Ops[0]), R); return Srem; } llvm_unreachable("unknown kind"); } } case Inst::And: return buildAssoc(AndExpr::create, Ops); case Inst::Or: return buildAssoc(OrExpr::create, Ops); case Inst::Xor: return buildAssoc(XorExpr::create, Ops); case Inst::Shl: { ref<Expr> Result = ShlExpr::create(get(Ops[0]), get(Ops[1])); return Result; } case Inst::ShlNSW: { ref<Expr> Result = ShlExpr::create(get(Ops[0]), get(Ops[1])); return Result; } case Inst::ShlNUW: { ref<Expr> Result = ShlExpr::create(get(Ops[0]), get(Ops[1])); return Result; } case Inst::ShlNW: { ref<Expr> Result = ShlExpr::create(get(Ops[0]), get(Ops[1])); return Result; } case Inst::LShr: { ref<Expr> Result = LShrExpr::create(get(Ops[0]), get(Ops[1])); return Result; } case Inst::LShrExact: { ref<Expr> Result = LShrExpr::create(get(Ops[0]), get(Ops[1])); return Result; } case Inst::AShr: { ref<Expr> Result = AShrExpr::create(get(Ops[0]), get(Ops[1])); return Result; } case Inst::AShrExact: { ref<Expr> Result = AShrExpr::create(get(Ops[0]), get(Ops[1])); return Result; } case Inst::Select: return SelectExpr::create(get(Ops[0]), get(Ops[1]), get(Ops[2])); case Inst::ZExt: return ZExtExpr::create(get(Ops[0]), I->Width); case Inst::SExt: return SExtExpr::create(get(Ops[0]), I->Width); case Inst::Trunc: return ExtractExpr::create(get(Ops[0]), 0, I->Width); case Inst::Eq: return EqExpr::create(get(Ops[0]), get(Ops[1])); case Inst::Ne: return NeExpr::create(get(Ops[0]), get(Ops[1])); case Inst::Ult: return UltExpr::create(get(Ops[0]), get(Ops[1])); case Inst::Slt: return SltExpr::create(get(Ops[0]), get(Ops[1])); case Inst::Ule: return UleExpr::create(get(Ops[0]), get(Ops[1])); case Inst::Sle: return SleExpr::create(get(Ops[0]), get(Ops[1])); case Inst::CtPop: return countOnes(get(Ops[0])); case Inst::BSwap: { ref<Expr> L = get(Ops[0]); constexpr unsigned bytelen = 8; ref<Expr> res = ExtractExpr::create(L, 0, bytelen); for (unsigned i = 1; i < L->getWidth() / bytelen; i++) { res = ConcatExpr::create(res, ExtractExpr::create(L, i * bytelen, bytelen)); } return res; } case Inst::BitReverse: { ref<Expr> L = get(Ops[0]); auto res = ExtractExpr::create(L, 0, 1); for (unsigned i = 1; i < L->getWidth(); i++) { auto tmp = ExtractExpr::create(L, i, 1); res = ConcatExpr::create(res, tmp); } return res; } case Inst::Cttz: { ref<Expr> L = get(Ops[0]); unsigned Width = L->getWidth(); ref<Expr> Val = L; for (unsigned i=0, j=0; j<Width/2; i++) { j = 1<<i; Val = OrExpr::create(Val, ShlExpr::create(Val, klee::ConstantExpr::create(j, Width))); } return SubExpr::create(klee::ConstantExpr::create(Width, Width), countOnes(Val)); } case Inst::Ctlz: { ref<Expr> L = get(Ops[0]); unsigned Width = L->getWidth(); ref<Expr> Val = L; for (unsigned i=0, j=0; j<Width/2; i++) { j = 1<<i; Val = OrExpr::create(Val, LShrExpr::create(Val, klee::ConstantExpr::create(j, Width))); } return SubExpr::create(klee::ConstantExpr::create(Width, Width), countOnes(Val)); } case Inst::FShl: case Inst::FShr: { unsigned IWidth = I->Width; ref<Expr> High = get(Ops[0]); ref<Expr> Low = get(Ops[1]); ref<Expr> ShAmt = get(Ops[2]); ref<Expr> ShAmtModWidth = URemExpr::create(ShAmt, klee::ConstantExpr::create(IWidth, IWidth)); ref<Expr> Concatenated = ConcatExpr::create(High, Low); unsigned CWidth = Concatenated->getWidth(); ref<Expr> ShAmtModWidthZExt = ZExtExpr::create(ShAmtModWidth, CWidth); ref<Expr> Shifted = I->K == Inst::FShl ? ShlExpr::create(Concatenated, ShAmtModWidthZExt) : LShrExpr::create(Concatenated, ShAmtModWidthZExt); unsigned BitOffset = I->K == Inst::FShr ? 0 : IWidth; return ExtractExpr::create(Shifted, BitOffset, IWidth); } case Inst::SAddO: return XorExpr::create(get(addnswUB(I)), klee::ConstantExpr::create(1, Expr::Bool)); case Inst::UAddO: return XorExpr::create(get(addnuwUB(I)), klee::ConstantExpr::create(1, Expr::Bool)); case Inst::SSubO: return XorExpr::create(get(subnswUB(I)), klee::ConstantExpr::create(1, Expr::Bool)); case Inst::USubO: return XorExpr::create(get(subnuwUB(I)), klee::ConstantExpr::create(1, Expr::Bool)); case Inst::SMulO: return XorExpr::create(get(mulnswUB(I)), klee::ConstantExpr::create(1, Expr::Bool)); case Inst::UMulO: return XorExpr::create(get(mulnuwUB(I)), klee::ConstantExpr::create(1, Expr::Bool)); case Inst::ExtractValue: { unsigned Index = Ops[1]->Val.getZExtValue(); return get(Ops[0]->Ops[Index]); } case Inst::SAddSat: { ref<Expr> add = AddExpr::create(get(Ops[0]), get(Ops[1])); auto sextL = SExtExpr::create(get(Ops[0]), I->Width + 1); auto sextR = SExtExpr::create(get(Ops[1]), I->Width + 1); auto addExt = AddExpr::create(sextL, sextR); auto smin = klee::ConstantExpr::alloc(llvm::APInt::getSignedMinValue(I->Width)); auto smax = klee::ConstantExpr::alloc(llvm::APInt::getSignedMaxValue(I->Width)); auto sminExt = SExtExpr::create(smin, I->Width + 1); auto smaxExt = SExtExpr::create(smax, I->Width + 1); auto pred = SleExpr::create(addExt, sminExt); auto pred2 = SgeExpr::create(addExt, smaxExt); auto select2 = SelectExpr::create(pred2, smax, add); return SelectExpr::create(pred, smin, select2); } case Inst::UAddSat: { ref<Expr> add = AddExpr::create(get(Ops[0]), get(Ops[1])); return SelectExpr::create(get(addnuwUB(I)), add, klee::ConstantExpr::alloc(llvm::APInt::getMaxValue(I->Width))); } case Inst::SSubSat: { ref<Expr> sub = SubExpr::create(get(Ops[0]), get(Ops[1])); auto sextL = SExtExpr::create(get(Ops[0]), I->Width + 1); auto sextR = SExtExpr::create(get(Ops[1]), I->Width + 1); auto subExt = SubExpr::create(sextL, sextR); auto smin = klee::ConstantExpr::alloc(llvm::APInt::getSignedMinValue(I->Width)); auto smax = klee::ConstantExpr::alloc(llvm::APInt::getSignedMaxValue(I->Width)); auto sminExt = SExtExpr::create(smin, I->Width + 1); auto smaxExt = SExtExpr::create(smax, I->Width + 1); auto pred = SleExpr::create(subExt, sminExt); auto pred2 = SgeExpr::create(subExt, smaxExt); auto select2 = SelectExpr::create(pred2, smax, sub); return SelectExpr::create(pred, smin, select2); } case Inst::USubSat: { ref<Expr> sub = SubExpr::create(get(Ops[0]), get(Ops[1])); return SelectExpr::create(get(subnuwUB(I)), sub, klee::ConstantExpr::alloc(llvm::APInt::getMinValue(I->Width))); } case Inst::SAddWithOverflow: case Inst::UAddWithOverflow: case Inst::SSubWithOverflow: case Inst::USubWithOverflow: case Inst::SMulWithOverflow: case Inst::UMulWithOverflow: default: break; } llvm_unreachable("unknown kind"); } ref<Expr> get(Inst *I) { ref<Expr> &E = ExprMap[I]; if (E.isNull()) { E = build(I); assert(E->getWidth() == I->Width); } return E; } // get() is recursive. It already has problems with running out of stack // space with very trivial inputs, since there are a lot of IR instructions // it knows how to produce, and thus a lot of possible replacements. // But it has built-in caching. And recursion only kicks in if the Inst is not // found in cache. Thus we simply need to *try* to prepopulate the cache. // Note that we can't really split `get()` into `getSimple()` and // `getOrBuildRecursive()` because we won't reach every single of these Inst // because we only look at Ops... void prepopulateExprMap(Inst *Root) { // Collect all Inst that are reachable from this root. Go Use->Def. // An assumption is being made that there are no circular references. // Note that we really do want a simple vector, and do want duplicate // elements. In other words, if we have already added that Inst into vector, // we "move" it to the back of the vector. But without actually moving. llvm::SmallVector<Inst *, 32> AllInst; AllInst.emplace_back(Root); // Visit every Inst in the vector, but address them by index, // since we will be appending new entries at the end. for (size_t InstNum = 0; InstNum < AllInst.size(); InstNum++) { Inst *CurrInst = AllInst[InstNum]; const std::vector<Inst *> &Ops = CurrInst->orderedOps(); AllInst.insert(AllInst.end(), Ops.rbegin(), Ops.rend()); } // And now, 'get()' every Inst, going in Def->Use direction. // That is, when we visit Inst N2, that has Ops N0 and N1, // the Inst's for N0 and N1 were already generated. llvm::for_each(llvm::reverse(AllInst), [this](Inst *CurrInst) { switch (CurrInst->K) { case Inst::UntypedConst: case Inst::SAddWithOverflow: case Inst::UAddWithOverflow: case Inst::SSubWithOverflow: case Inst::USubWithOverflow: case Inst::SMulWithOverflow: case Inst::UMulWithOverflow: return; // Avoid pre-generation for some Inst'ructions. default: break; } (void)get(CurrInst); }); } ref<Expr> makeSizedArrayRead(unsigned Width, llvm::StringRef Name, Inst *Origin) { std::string NameStr; if (Name.empty()) NameStr = "arr"; else if (Name[0] >= '0' && Name[0] <= '9') NameStr = ("a" + Name).str(); else NameStr = Name; Arrays.emplace_back( new Array(ArrayNames.makeName(NameStr), 1, 0, 0, Expr::Int32, Width)); Vars.push_back(Origin); UpdateList UL(Arrays.back().get(), 0); return ReadExpr::create(UL, klee::ConstantExpr::alloc(0, Expr::Int32)); } }; } std::unique_ptr<ExprBuilder> souper::createKLEEBuilder(InstContext &IC) { return std::unique_ptr<ExprBuilder>(new KLEEBuilder(IC)); }
google/souper
lib/Extractor/KLEEBuilder.cpp
C++
apache-2.0
17,824
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.storm; import java.lang.reflect.Method; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.UnaryOperator; import org.apache.storm.blobstore.BlobStore; import org.apache.storm.cluster.ClusterStateContext; import org.apache.storm.cluster.ClusterUtils; import org.apache.storm.cluster.IStateStorage; import org.apache.storm.cluster.IStormClusterState; import org.apache.storm.daemon.Acker; import org.apache.storm.daemon.DaemonCommon; import org.apache.storm.daemon.Shutdownable; import org.apache.storm.daemon.StormCommon; import org.apache.storm.daemon.nimbus.Nimbus; import org.apache.storm.daemon.nimbus.Nimbus.StandaloneINimbus; import org.apache.storm.daemon.supervisor.ReadClusterState; import org.apache.storm.daemon.supervisor.StandaloneSupervisor; import org.apache.storm.daemon.supervisor.Supervisor; import org.apache.storm.executor.LocalExecutor; import org.apache.storm.generated.AlreadyAliveException; import org.apache.storm.generated.AuthorizationException; import org.apache.storm.generated.BeginDownloadResult; import org.apache.storm.generated.ClusterSummary; import org.apache.storm.generated.ComponentPageInfo; import org.apache.storm.generated.Credentials; import org.apache.storm.generated.GetInfoOptions; import org.apache.storm.generated.InvalidTopologyException; import org.apache.storm.generated.KeyAlreadyExistsException; import org.apache.storm.generated.KeyNotFoundException; import org.apache.storm.generated.KillOptions; import org.apache.storm.generated.ListBlobsResult; import org.apache.storm.generated.LogConfig; import org.apache.storm.generated.NimbusSummary; import org.apache.storm.generated.NotAliveException; import org.apache.storm.generated.OwnerResourceSummary; import org.apache.storm.generated.ProfileAction; import org.apache.storm.generated.ProfileRequest; import org.apache.storm.generated.ReadableBlobMeta; import org.apache.storm.generated.RebalanceOptions; import org.apache.storm.generated.SettableBlobMeta; import org.apache.storm.generated.StormTopology; import org.apache.storm.generated.SubmitOptions; import org.apache.storm.generated.SupervisorPageInfo; import org.apache.storm.generated.TopologyHistoryInfo; import org.apache.storm.generated.TopologyInfo; import org.apache.storm.generated.TopologyPageInfo; import org.apache.storm.generated.Nimbus.Iface; import org.apache.storm.generated.Nimbus.Processor; import org.apache.storm.messaging.IContext; import org.apache.storm.messaging.local.Context; import org.apache.storm.nimbus.ILeaderElector; import org.apache.storm.scheduler.INimbus; import org.apache.storm.scheduler.ISupervisor; import org.apache.storm.security.auth.IGroupMappingServiceProvider; import org.apache.storm.security.auth.ThriftConnectionType; import org.apache.storm.security.auth.ThriftServer; import org.apache.storm.task.IBolt; import org.apache.storm.testing.InProcessZookeeper; import org.apache.storm.testing.NonRichBoltTracker; import org.apache.storm.testing.TmpPath; import org.apache.storm.testing.TrackedTopology; import org.apache.storm.utils.ConfigUtils; import org.apache.storm.utils.DRPCClient; import org.apache.storm.utils.NimbusClient; import org.apache.storm.utils.Utils; import org.apache.storm.utils.ObjectReader; import org.apache.storm.utils.RegisteredGlobalState; import org.apache.storm.utils.StormCommonInstaller; import org.apache.storm.utils.Time; import org.apache.storm.utils.Time.SimulatedTime; import org.apache.thrift.TException; import org.json.simple.JSONValue; import org.json.simple.parser.ParseException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A stand alone storm cluster that runs inside a single process. * It is intended to be used for testing. Both internal testing for * Apache Storm itself and for people building storm topologies. * * LocalCluster is an AutoCloseable so if you are using it in tests you can use * a try block to be sure it is shut down. * * try (LocalCluster cluster = new LocalCluster()) { * // Do some tests * } * // The cluster has been shut down. */ public class LocalCluster implements ILocalClusterTrackedTopologyAware, Iface { private static final Logger LOG = LoggerFactory.getLogger(LocalCluster.class); private static ThriftServer startNimbusDaemon(Map<String, Object> conf, Nimbus nimbus) { ThriftServer ret = new ThriftServer(conf, new Processor<>(nimbus), ThriftConnectionType.NIMBUS); LOG.info("Starting Nimbus server..."); new Thread(() -> ret.serve()).start(); return ret; } /** * Simple way to configure a LocalCluster to meet your needs. */ public static class Builder { private int supervisors = 2; private int portsPerSupervisor = 3; private Map<String, Object> daemonConf = new HashMap<>(); private INimbus inimbus = null; private IGroupMappingServiceProvider groupMapper = null; private int supervisorSlotPortMin = 1024; private boolean nimbusDaemon = false; private UnaryOperator<Nimbus> nimbusWrapper = null; private BlobStore store = null; private IStormClusterState clusterState = null; private ILeaderElector leaderElector = null; private String trackId = null; private boolean simulateTime = false; /** * Set the number of supervisors the cluster should have. */ public Builder withSupervisors(int supervisors) { if (supervisors < 0) { throw new IllegalArgumentException("supervisors cannot be negative"); } this.supervisors = supervisors; return this; } /** * Set the number of slots/ports each supervisor should have */ public Builder withPortsPerSupervisor(int portsPerSupervisor) { if (portsPerSupervisor < 0) { throw new IllegalArgumentException("supervisor ports cannot be negative"); } this.portsPerSupervisor = portsPerSupervisor; return this; } /** * Set the base config that the daemons should use. */ public Builder withDaemonConf(Map<String, Object> conf) { if (conf != null) { this.daemonConf = new HashMap<>(conf); } return this; } /** * Add an single key/value config to the daemon conf */ public Builder withDaemonConf(String key, Object value) { this.daemonConf.put(key, value); return this; } /** * Override the INimbus instance that nimbus will use. */ public Builder withINimbus(INimbus inimbus) { this.inimbus = inimbus; return this; } /** * Override the code that maps users to groups for authorization. */ public Builder withGroupMapper(IGroupMappingServiceProvider groupMapper) { this.groupMapper = groupMapper; return this; } /** * When assigning ports to worker slots start at minPort. */ public Builder withSupervisorSlotPortMin(Number minPort) { int port = 1024; if (minPort == null) { LOG.warn("Number is null... {}", minPort); } else { port = minPort.intValue(); } if (port <= 0) { throw new IllegalArgumentException("port must be positive"); } this.supervisorSlotPortMin = port; return this; } /** * Have the local nimbus actually launch a thrift server. This is intended to * be used mostly for internal storm testing. */ public Builder withNimbusDaemon() { return withNimbusDaemon(true); } /** * If nimbusDaemon is true the local nimbus will launch a thrift server. This is intended to * be used mostly for internal storm testing. */ public Builder withNimbusDaemon(Boolean nimbusDaemon) { if (nimbusDaemon == null) { nimbusDaemon = false; LOG.warn("nimbusDaemon is null"); } this.nimbusDaemon = nimbusDaemon; return this; } /** * Turn on simulated time in the cluster. This allows someone to simulate long periods of * time for timeouts etc when testing nimbus/supervisors themselves. NOTE: that this only * works for code that uses the {@link org.apache.storm.utils.Time} class for time management * so it will not work in all cases. */ public Builder withSimulatedTime() { return withSimulatedTime(true); } /** * Turn on simulated time in the cluster. This allows someone to simulate long periods of * time for timeouts etc when testing nimbus/supervisors themselves. NOTE: that this only * works for code that uses the {@link org.apache.storm.utils.Time} class for time management * so it will not work in all cases. */ public Builder withSimulatedTime(boolean simulateTime) { this.simulateTime = simulateTime; return this; } /** * Before nimbus is created/used call nimbusWrapper on it first and use the * result instead. This is intended for internal testing only, and it here to * allow a mocking framework to spy on the nimbus class. */ public Builder withNimbusWrapper(UnaryOperator<Nimbus> nimbusWrapper) { this.nimbusWrapper = nimbusWrapper; return this; } /** * Use the following blobstore instead of the one in the config. * This is intended mostly for internal testing with Mocks. */ public Builder withBlobStore(BlobStore store) { this.store = store; return this; } /** * Use the following clusterState instead of the one in the config. * This is intended mostly for internal testing with Mocks. */ public Builder withClusterState(IStormClusterState clusterState) { this.clusterState = clusterState; return this; } /** * Use the following leaderElector instead of the one in the config. * This is intended mostly for internal testing with Mocks. */ public Builder withLeaderElector(ILeaderElector leaderElector) { this.leaderElector = leaderElector; return this; } /** * A tracked cluster can run tracked topologies. * See {@link org.apache.storm.testing.TrackedTopology} for more information * on tracked topologies. * @param trackId an arbitrary unique id that is used to keep track of tracked topologies */ public Builder withTracked(String trackId) { this.trackId = trackId; return this; } /** * A tracked cluster can run tracked topologies. * See {@link org.apache.storm.testing.TrackedTopology} for more information * on tracked topologies. */ public Builder withTracked() { this.trackId = Utils.uuid(); return this; } /** * @return the LocalCluster * @throws Exception on any one of many different errors. * This is intended for testing so yes it is ugly and throws Exception... */ public LocalCluster build() throws Exception { return new LocalCluster(this); } } private static class TrackedStormCommon extends StormCommon { private final String id; public TrackedStormCommon(String id) { this.id = id; } @Override public IBolt makeAckerBoltImpl() { return new NonRichBoltTracker(new Acker(), id); } } private final Nimbus nimbus; //This is very private and does not need to be exposed private final AtomicInteger portCounter; private final Map<String, Object> daemonConf; private final List<Supervisor> supervisors; private final IStateStorage state; private final IStormClusterState clusterState; private final List<TmpPath> tmpDirs; private final InProcessZookeeper zookeeper; private final IContext sharedContext; private final ThriftServer thriftServer; private final String trackId; private final StormCommonInstaller commonInstaller; private final SimulatedTime time; /** * Create a default LocalCluster * @throws Exception on any error */ public LocalCluster() throws Exception { this(new Builder().withDaemonConf(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true)); } /** * Create a LocalCluster that connects to an existing Zookeeper instance * @param zkHost the host for ZK * @param zkPort the port for ZK * @throws Exception on any error */ public LocalCluster(String zkHost, Long zkPort) throws Exception { this(new Builder().withDaemonConf(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, true) .withDaemonConf(Config.STORM_ZOOKEEPER_SERVERS, Arrays.asList(zkHost)) .withDaemonConf(Config.STORM_ZOOKEEPER_PORT, zkPort)); } @SuppressWarnings("deprecation") private LocalCluster(Builder builder) throws Exception { if (builder.simulateTime) { time = new SimulatedTime(); } else { time = null; } boolean success = false; try { this.trackId = builder.trackId; if (trackId != null) { ConcurrentHashMap<String, AtomicInteger> metrics = new ConcurrentHashMap<>(); metrics.put("spout-emitted", new AtomicInteger(0)); metrics.put("transferred", new AtomicInteger(0)); metrics.put("processed", new AtomicInteger(0)); this.commonInstaller = new StormCommonInstaller(new TrackedStormCommon(this.trackId)); LOG.warn("Adding tracked metrics for ID {}", this.trackId); RegisteredGlobalState.setState(this.trackId, metrics); LocalExecutor.setTrackId(this.trackId); } else { this.commonInstaller = null; } this.tmpDirs = new ArrayList<>(); this.supervisors = new ArrayList<>(); TmpPath nimbusTmp = new TmpPath(); this.tmpDirs.add(nimbusTmp); Map<String, Object> conf = ConfigUtils.readStormConfig(); conf.put(Config.TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS, true); conf.put(Config.TOPOLOGY_ENABLE_MESSAGE_TIMEOUTS, false); conf.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, 50); conf.put(Config.STORM_CLUSTER_MODE, "local"); conf.put(Config.BLOBSTORE_SUPERUSER, System.getProperty("user.name")); conf.put(Config.BLOBSTORE_DIR, nimbusTmp.getPath()); InProcessZookeeper zookeeper = null; if (!builder.daemonConf.containsKey(Config.STORM_ZOOKEEPER_SERVERS)) { zookeeper = new InProcessZookeeper(); conf.put(Config.STORM_ZOOKEEPER_PORT, zookeeper.getPort()); conf.put(Config.STORM_ZOOKEEPER_SERVERS, Arrays.asList("localhost")); } this.zookeeper = zookeeper; conf.putAll(builder.daemonConf); this.daemonConf = new HashMap<>(conf); this.portCounter = new AtomicInteger(builder.supervisorSlotPortMin); ClusterStateContext cs = new ClusterStateContext(); this.state = ClusterUtils.mkStateStorage(this.daemonConf, null, null, cs); if (builder.clusterState == null) { clusterState = ClusterUtils.mkStormClusterState(this.daemonConf, null, cs); } else { this.clusterState = builder.clusterState; } //Set it for nimbus only conf.put(Config.STORM_LOCAL_DIR, nimbusTmp.getPath()); Nimbus nimbus = new Nimbus(conf, builder.inimbus == null ? new StandaloneINimbus() : builder.inimbus, this.getClusterState(), null, builder.store, builder.leaderElector, builder.groupMapper); if (builder.nimbusWrapper != null) { nimbus = builder.nimbusWrapper.apply(nimbus); } this.nimbus = nimbus; this.nimbus.launchServer(); IContext context = null; if (!ObjectReader.getBoolean(this.daemonConf.get(Config.STORM_LOCAL_MODE_ZMQ), false)) { context = new Context(); context.prepare(this.daemonConf); } this.sharedContext = context; this.thriftServer = builder.nimbusDaemon ? startNimbusDaemon(this.daemonConf, this.nimbus) : null; for (int i = 0; i < builder.supervisors; i++) { addSupervisor(builder.portsPerSupervisor, null, null); } //Wait for a leader to be elected (or topology submission can be rejected) try { long timeoutAfter = System.currentTimeMillis() + 10_000; while (!hasLeader()) { if (timeoutAfter > System.currentTimeMillis()) { throw new IllegalStateException("Timed out waiting for nimbus to become the leader"); } Thread.sleep(1); } } catch (Exception e) { //Ignore any exceptions we might be doing a test for authentication } success = true; } finally { if (!success) { close(); } } } private boolean hasLeader() throws AuthorizationException, TException { ClusterSummary summary = getNimbus().getClusterInfo(); if (summary.is_set_nimbuses()) { for (NimbusSummary sum: summary.get_nimbuses()) { if (sum.is_isLeader()) { return true; } } } return false; } /** * @return Nimbus itself so you can interact with it directly, if needed. */ public Nimbus getNimbus() { return nimbus; } /** * @return the base config for the daemons. */ public Map<String, Object> getDaemonConf() { return new HashMap<>(daemonConf); } public static final KillOptions KILL_NOW = new KillOptions(); static { KILL_NOW.set_wait_secs(0); } /** * When running a topology locally, for tests etc. It is helpful to be sure * that the topology is dead before the test exits. This is an AutoCloseable * topology that not only gives you access to the compiled StormTopology * but also will kill the topology when it closes. * * try (LocalTopology testTopo = cluster.submitTopology("testing", ...)) { * // Run Some test * } * // The topology has been killed */ public class LocalTopology extends StormTopology implements ILocalTopology { private static final long serialVersionUID = 6145919776650637748L; private final String topoName; public LocalTopology(String topoName, StormTopology topo) { super(topo); this.topoName = topoName; } @Override public void close() throws TException { killTopologyWithOpts(topoName, KILL_NOW); } } @Override public LocalTopology submitTopology(String topologyName, Map<String, Object> conf, StormTopology topology) throws TException { if (!Utils.isValidConf(conf)) { throw new IllegalArgumentException("Topology conf is not json-serializable"); } getNimbus().submitTopology(topologyName, null, JSONValue.toJSONString(conf), Utils.addVersions(topology)); ISubmitterHook hook = (ISubmitterHook) Utils.getConfiguredClass(conf, Config.STORM_TOPOLOGY_SUBMISSION_NOTIFIER_PLUGIN); if (hook != null) { TopologyInfo topologyInfo = Utils.getTopologyInfo(topologyName, null, conf); try { hook.notify(topologyInfo, conf, topology); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } return new LocalTopology(topologyName, topology); } @Override public LocalTopology submitTopologyWithOpts(String topologyName, Map<String, Object> conf, StormTopology topology, SubmitOptions submitOpts) throws TException { if (!Utils.isValidConf(conf)) { throw new IllegalArgumentException("Topology conf is not json-serializable"); } getNimbus().submitTopologyWithOpts(topologyName, null, JSONValue.toJSONString(conf), Utils.addVersions(topology), submitOpts); return new LocalTopology(topologyName, topology); } @Override public LocalTopology submitTopology(String topologyName, Map<String, Object> conf, TrackedTopology topology) throws TException { return submitTopology(topologyName, conf, topology.getTopology()); } @Override public LocalTopology submitTopologyWithOpts(String topologyName, Map<String, Object> conf, TrackedTopology topology, SubmitOptions submitOpts) throws TException { return submitTopologyWithOpts(topologyName, conf, topology.getTopology(), submitOpts); } @Override public void uploadNewCredentials(String topologyName, Credentials creds) throws TException { getNimbus().uploadNewCredentials(topologyName, creds); } @Override public void killTopology(String topologyName) throws TException { getNimbus().killTopology(topologyName); } @Override public void killTopologyWithOpts(String name, KillOptions options) throws TException { getNimbus().killTopologyWithOpts(name, options); } @Override public void activate(String topologyName) throws TException { getNimbus().activate(topologyName); } @Override public void deactivate(String topologyName) throws TException { getNimbus().deactivate(topologyName); } @Override public void rebalance(String name, RebalanceOptions options) throws TException { getNimbus().rebalance(name, options); } @Override public void shutdown() { try { close(); } catch (Exception e) { throw new RuntimeException(e); } } @Override public String getTopologyConf(String id) throws TException { return getNimbus().getTopologyConf(id); } @Override public StormTopology getTopology(String id) throws TException { return getNimbus().getTopology(id); } @Override public ClusterSummary getClusterInfo() throws TException { return getNimbus().getClusterInfo(); } @Override public TopologyInfo getTopologyInfo(String id) throws TException { return getNimbus().getTopologyInfo(id); } public int getThriftServerPort() { return thriftServer.getPort(); } @Override public synchronized void close() throws Exception { if (nimbus != null) { nimbus.shutdown(); } if (thriftServer != null) { LOG.info("shutting down thrift server"); try { thriftServer.stop(); } catch (Exception e) { LOG.info("failed to stop thrift", e); } } if (state != null) { state.close(); } if (getClusterState() != null) { getClusterState().disconnect(); } for (Supervisor s: supervisors) { s.shutdownAllWorkers(null, ReadClusterState.THREAD_DUMP_ON_ERROR); s.close(); } ProcessSimulator.killAllProcesses(); if (zookeeper != null) { LOG.info("Shutting down in process zookeeper"); zookeeper.close(); LOG.info("Done shutting down in process zookeeper"); } for (TmpPath p: tmpDirs) { p.close(); } if (this.trackId != null) { LOG.warn("Clearing tracked metrics for ID {}", this.trackId); LocalExecutor.clearTrackId(); RegisteredGlobalState.clearState(this.trackId); } if (this.commonInstaller != null) { this.commonInstaller.close(); } if (time != null) { time.close(); } } /** * Get a specific Supervisor. This is intended mostly for internal testing. * @param id the id of the supervisor */ public synchronized Supervisor getSupervisor(String id) { for (Supervisor s: supervisors) { if (id.equals(s.getId())) { return s; } } return null; } /** * Kill a specific supervisor. This is intended mostly for internal testing. * @param id the id of the supervisor */ public synchronized void killSupervisor(String id) { for (Iterator<Supervisor> it = supervisors.iterator(); it.hasNext();) { Supervisor s = it.next(); if (id.equals(s.getId())) { it.remove(); s.close(); //tmpDir will be handled separately return; } } } /** * Add another supervisor to the topology. This is intended mostly for internal testing. */ public Supervisor addSupervisor() throws Exception { return addSupervisor(null, null, null); } /** * Add another supervisor to the topology. This is intended mostly for internal testing. * @param ports the number of ports/slots the supervisor should have */ public Supervisor addSupervisor(Number ports) throws Exception { return addSupervisor(ports, null, null); } /** * Add another supervisor to the topology. This is intended mostly for internal testing. * @param ports the number of ports/slots the supervisor should have * @param id the id of the new supervisor, so you can find it later. */ public Supervisor addSupervisor(Number ports, String id) throws Exception { return addSupervisor(ports, null, id); } /** * Add another supervisor to the topology. This is intended mostly for internal testing. * @param ports the number of ports/slots the supervisor should have * @param conf any config values that should be added/over written in the daemon conf of the cluster. * @param id the id of the new supervisor, so you can find it later. */ public synchronized Supervisor addSupervisor(Number ports, Map<String, Object> conf, String id) throws Exception { if (ports == null) { ports = 2; } TmpPath tmpDir = new TmpPath(); tmpDirs.add(tmpDir); List<Integer> portNumbers = new ArrayList<>(ports.intValue()); for (int i = 0; i < ports.intValue(); i++) { portNumbers.add(portCounter.getAndIncrement()); } Map<String, Object> superConf = new HashMap<>(daemonConf); if (conf != null) { superConf.putAll(conf); } superConf.put(Config.STORM_LOCAL_DIR, tmpDir.getPath()); superConf.put(DaemonConfig.SUPERVISOR_SLOTS_PORTS, portNumbers); final String superId = id == null ? Utils.uuid() : id; ISupervisor isuper = new StandaloneSupervisor() { @Override public String generateSupervisorId() { return superId; } }; if (!ConfigUtils.isLocalMode(superConf)) { throw new IllegalArgumentException("Cannot start server in distrubuted mode!"); } Supervisor s = new Supervisor(superConf, sharedContext, isuper); s.launch(); supervisors.add(s); return s; } private boolean areAllSupervisorsWaiting() { boolean ret = true; for (Supervisor s: supervisors) { ret = ret && s.isWaiting(); } return ret; } private static boolean areAllWorkersWaiting() { boolean ret = true; for (Shutdownable s: ProcessSimulator.getAllProcessHandles()) { if (s instanceof DaemonCommon) { ret = ret && ((DaemonCommon)s).isWaiting(); } } return ret; } /** * Wait for the cluster to be idle. This is intended to be used with * Simulated time and is for internal testing. * @throws InterruptedException if interrupted while waiting. * @throws AssertionError if the cluster did not come to an idle point with * a timeout. */ public void waitForIdle() throws InterruptedException { waitForIdle(Testing.TEST_TIMEOUT_MS); } /** * Wait for the cluster to be idle. This is intended to be used with * Simulated time and is for internal testing. * @param timeoutMs the number of ms to wait before throwing an error. * @throws InterruptedException if interrupted while waiting. * @throws AssertionError if the cluster did not come to an idle point with * a timeout. */ public void waitForIdle(long timeoutMs) throws InterruptedException { Random rand = ThreadLocalRandom.current(); //wait until all workers, supervisors, and nimbus is waiting final long endTime = System.currentTimeMillis() + timeoutMs; while (!(nimbus.isWaiting() && areAllSupervisorsWaiting() && areAllWorkersWaiting())) { if (System.currentTimeMillis() >= endTime) { LOG.info("Cluster was not idle in {} ms", timeoutMs); LOG.info(Utils.threadDump()); throw new AssertionError("Test timed out (" + timeoutMs + "ms) cluster not idle"); } Thread.sleep(rand.nextInt(20)); } } @Override public void advanceClusterTime(int secs) throws InterruptedException { advanceClusterTime(secs, 1); } @Override public void advanceClusterTime(int secs, int incSecs) throws InterruptedException { for (int amountLeft = secs; amountLeft > 0; amountLeft -= incSecs) { int diff = Math.min(incSecs, amountLeft); Time.advanceTimeSecs(diff); waitForIdle(); } } @Override public IStormClusterState getClusterState() { return clusterState; } @Override public String getTrackedId() { return trackId; } //Nimbus Compatibility @Override public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException { try { @SuppressWarnings("unchecked") Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf); submitTopology(name, conf, topology); } catch (ParseException e) { throw new RuntimeException(e); } } @Override public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology, SubmitOptions options) throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException { try { @SuppressWarnings("unchecked") Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf); submitTopologyWithOpts(name, conf, topology, options); } catch (ParseException e) { throw new RuntimeException(e); } } @Override public void setLogConfig(String name, LogConfig config) throws TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public LogConfig getLogConfig(String name) throws TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public void debug(String name, String component, boolean enable, double samplingPercentage) throws NotAliveException, AuthorizationException, TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public void setWorkerProfiler(String id, ProfileRequest profileRequest) throws TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public List<ProfileRequest> getComponentPendingProfileActions(String id, String component_id, ProfileAction action) throws TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public String beginCreateBlob(String key, SettableBlobMeta meta) throws AuthorizationException, KeyAlreadyExistsException, TException { throw new RuntimeException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public String beginUpdateBlob(String key) throws AuthorizationException, KeyNotFoundException, TException { throw new KeyNotFoundException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public void uploadBlobChunk(String session, ByteBuffer chunk) throws AuthorizationException, TException { throw new RuntimeException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public void finishBlobUpload(String session) throws AuthorizationException, TException { throw new RuntimeException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public void cancelBlobUpload(String session) throws AuthorizationException, TException { throw new RuntimeException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public ReadableBlobMeta getBlobMeta(String key) throws AuthorizationException, KeyNotFoundException, TException { throw new KeyNotFoundException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public void setBlobMeta(String key, SettableBlobMeta meta) throws AuthorizationException, KeyNotFoundException, TException { throw new KeyNotFoundException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public BeginDownloadResult beginBlobDownload(String key) throws AuthorizationException, KeyNotFoundException, TException { throw new KeyNotFoundException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public ByteBuffer downloadBlobChunk(String session) throws AuthorizationException, TException { throw new RuntimeException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public void deleteBlob(String key) throws AuthorizationException, KeyNotFoundException, TException { throw new KeyNotFoundException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public ListBlobsResult listBlobs(String session) throws TException { //Blobs are not supported in local mode. Return nothing ListBlobsResult ret = new ListBlobsResult(); ret.set_keys(new ArrayList<>()); return ret; } @Override public int getBlobReplication(String key) throws AuthorizationException, KeyNotFoundException, TException { throw new KeyNotFoundException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public int updateBlobReplication(String key, int replication) throws AuthorizationException, KeyNotFoundException, TException { throw new KeyNotFoundException("BLOBS NOT SUPPORTED IN LOCAL MODE"); } @Override public void createStateInZookeeper(String key) throws TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public String beginFileUpload() throws AuthorizationException, TException { //Just ignore these for now. We are going to throw it away anyways return Utils.uuid(); } @Override public void uploadChunk(String location, ByteBuffer chunk) throws AuthorizationException, TException { //Just throw it away in local mode } @Override public void finishFileUpload(String location) throws AuthorizationException, TException { //Just throw it away in local mode } @Override public String beginFileDownload(String file) throws AuthorizationException, TException { throw new AuthorizationException("FILE DOWNLOAD NOT SUPPORTED IN LOCAL MODE"); } @Override public ByteBuffer downloadChunk(String id) throws AuthorizationException, TException { throw new AuthorizationException("FILE DOWNLOAD NOT SUPPORTED IN LOCAL MODE"); } @Override public String getNimbusConf() throws AuthorizationException, TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public NimbusSummary getLeader() throws AuthorizationException, TException { return nimbus.getLeader(); } @Override public boolean isTopologyNameAllowed(String name) throws AuthorizationException, TException { return nimbus.isTopologyNameAllowed(name); } @Override public TopologyInfo getTopologyInfoWithOpts(String id, GetInfoOptions options) throws NotAliveException, AuthorizationException, TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public TopologyPageInfo getTopologyPageInfo(String id, String window, boolean is_include_sys) throws NotAliveException, AuthorizationException, TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public SupervisorPageInfo getSupervisorPageInfo(String id, String host, boolean is_include_sys) throws NotAliveException, AuthorizationException, TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public ComponentPageInfo getComponentPageInfo(String topology_id, String component_id, String window, boolean is_include_sys) throws NotAliveException, AuthorizationException, TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public StormTopology getUserTopology(String id) throws NotAliveException, AuthorizationException, TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } @Override public TopologyHistoryInfo getTopologyHistory(String user) throws AuthorizationException, TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } /** * Run c with a local mode cluster overriding the NimbusClient and DRPCClient calls. * @param c the callable to run in this mode * @param ttlSec the number of seconds to let the cluster run after c has completed * @return the result of calling C * @throws Exception on any Exception. */ public static <T> T withLocalModeOverride(Callable<T> c, long ttlSec) throws Exception { LOG.info("\n\n\t\tSTARTING LOCAL MODE CLUSTER\n\n"); try (LocalCluster local = new LocalCluster(); NimbusClient.LocalOverride nimbusOverride = new NimbusClient.LocalOverride(local); LocalDRPC drpc = new LocalDRPC(); DRPCClient.LocalOverride drpcOverride = new DRPCClient.LocalOverride(drpc)) { T ret = c.call(); LOG.info("\n\n\t\tRUNNING LOCAL CLUSTER for {} seconds.\n\n", ttlSec); Thread.sleep(ttlSec * 1000); LOG.info("\n\n\t\tSTOPPING LOCAL MODE CLUSTER\n\n"); return ret; } } @Override public List<OwnerResourceSummary> getOwnerResourceSummaries(String owner) throws AuthorizationException, TException { // TODO Auto-generated method stub throw new RuntimeException("NOT IMPLEMENTED YET"); } public static void main(final String [] args) throws Exception { if (args.length < 1) { throw new IllegalArgumentException("No class was specified to run"); } long ttl = 20; String ttlString = System.getProperty("storm.local.sleeptime", "20"); try { ttl = Long.valueOf(ttlString); } catch (NumberFormatException e) { LOG.warn("could not parse the sleep time defaulting to {} seconds", ttl); } withLocalModeOverride(() -> { String klass = args[0]; String [] newArgs = Arrays.copyOfRange(args, 1, args.length); Class<?> c = Class.forName(klass); Method main = c.getDeclaredMethod("main", String[].class); LOG.info("\n\n\t\tRUNNING {} with args {}\n\n", main, Arrays.toString(newArgs)); main.invoke(null, (Object)newArgs); return (Void)null; }, ttl); //Sometimes external things used with testing don't shut down all the way System.exit(0); } }
kamleshbhatt/storm
storm-server/src/main/java/org/apache/storm/LocalCluster.java
Java
apache-2.0
43,394
package assertion_test import ( . "github.com/cloudfoundry/bosh-utils/internal/github.com/onsi/ginkgo" . "github.com/cloudfoundry/bosh-utils/internal/github.com/onsi/gomega" "testing" ) func TestAssertion(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Assertion Suite") }
uzzz/bosh-agent
vendor/github.com/cloudfoundry/bosh-utils/internal/github.com/onsi/gomega/internal/assertion/assertion_suite_test.go
GO
apache-2.0
288
/* Copyright 1996-2008 Ariba, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. $Id: //ariba/platform/util/core/ariba/util/io/FilenameExtensionFilter.java#5 $ */ package ariba.util.io; import java.util.List; import ariba.util.core.ListUtil; import java.io.File; import java.io.FilenameFilter; /** A FilenameFilter to pick all files that end with a extension (case insensitive match). @aribaapi private */ public class FilenameExtensionFilter implements FilenameFilter { List extensions; public FilenameExtensionFilter (String extension) { this.extensions = ListUtil.list(extension.toLowerCase()); } /** @param source - a List of strings, each a filename extension */ public FilenameExtensionFilter (List source) { int sourceSize = source.size(); this.extensions = ListUtil.list(sourceSize); for (int idx = 0; idx < sourceSize; idx++) { ListUtil.addElementIfAbsent(this.extensions, ((String)source.get(idx)).toLowerCase()); } } public boolean accept (File dir, String name) { if (name == null) { return false; } int extensionsSize = this.extensions.size(); for (int idx = 0; idx < extensionsSize; idx++) { String cursor = (String)this.extensions.get(idx); if (name.toLowerCase().endsWith(cursor)) { return true; } } return false; } }
google-code/aribaweb
src/util/ariba/util/io/FilenameExtensionFilter.java
Java
apache-2.0
2,012
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.streams.kstream; import org.apache.kafka.streams.errors.TopologyException; import org.apache.kafka.streams.kstream.internals.PrintedInternal; import org.apache.kafka.streams.processor.Processor; import org.apache.kafka.streams.processor.ProcessorSupplier; import org.apache.kafka.test.TestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertThat; public class PrintedTest { private final PrintStream originalSysOut = System.out; private final ByteArrayOutputStream sysOut = new ByteArrayOutputStream(); private final Printed<String, Integer> sysOutPrinter = Printed.toSysOut(); @Before public void before() { System.setOut(new PrintStream(sysOut)); } @After public void after() { System.setOut(originalSysOut); } @Test public void shouldCreateProcessorThatPrintsToFile() throws IOException { final File file = TestUtils.tempFile(); final ProcessorSupplier<String, Integer> processorSupplier = new PrintedInternal<>( Printed.<String, Integer>toFile(file.getPath())) .build("processor"); final Processor<String, Integer> processor = processorSupplier.get(); processor.process("hi", 1); processor.close(); try (final FileInputStream stream = new FileInputStream(file)) { final byte[] data = new byte[stream.available()]; stream.read(data); assertThat(new String(data, StandardCharsets.UTF_8.name()), equalTo("[processor]: hi, 1\n")); } } @Test public void shouldCreateProcessorThatPrintsToStdOut() throws UnsupportedEncodingException { final ProcessorSupplier<String, Integer> supplier = new PrintedInternal<>(sysOutPrinter).build("processor"); supplier.get().process("good", 2); assertThat(sysOut.toString(StandardCharsets.UTF_8.name()), equalTo("[processor]: good, 2\n")); } @Test public void shouldPrintWithLabel() throws UnsupportedEncodingException { final Processor<String, Integer> processor = new PrintedInternal<>(sysOutPrinter.withLabel("label")) .build("processor") .get(); processor.process("hello", 3); assertThat(sysOut.toString(StandardCharsets.UTF_8.name()), equalTo("[label]: hello, 3\n")); } @Test public void shouldPrintWithKeyValueMapper() throws UnsupportedEncodingException { final Processor<String, Integer> processor = new PrintedInternal<>(sysOutPrinter.withKeyValueMapper( new KeyValueMapper<String, Integer, String>() { @Override public String apply(final String key, final Integer value) { return String.format("%s -> %d", key, value); } })).build("processor") .get(); processor.process("hello", 1); assertThat(sysOut.toString(StandardCharsets.UTF_8.name()), equalTo("[processor]: hello -> 1\n")); } @Test(expected = NullPointerException.class) public void shouldThrowNullPointerExceptionIfFilePathIsNull() { Printed.toFile(null); } @Test(expected = NullPointerException.class) public void shouldThrowNullPointerExceptionIfMapperIsNull() { sysOutPrinter.withKeyValueMapper(null); } @Test(expected = NullPointerException.class) public void shouldThrowNullPointerExceptionIfLabelIsNull() { sysOutPrinter.withLabel(null); } @Test(expected = TopologyException.class) public void shouldThrowTopologyExceptionIfFilePathIsEmpty() { Printed.toFile(""); } @Test(expected = TopologyException.class) public void shouldThrowTopologyExceptionIfFilePathDoesntExist() { Printed.toFile("/this/should/not/exist"); } }
MyPureCloud/kafka
streams/src/test/java/org/apache/kafka/streams/kstream/PrintedTest.java
Java
apache-2.0
4,974
/** * Copyright 2019 The AMP HTML Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS-IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import {CSS} from '../../../build/amp-truncate-text-0.1.css'; import {Services} from '../../../src/services'; import {CSS as ShadowCSS} from '../../../build/amp-truncate-text-shadow-0.1.css'; import { closestAncestorElementBySelector, iterateCursor, } from '../../../src/dom'; import {createShadowRoot} from './shadow-utils'; import {dev, userAssert} from '../../../src/log'; import {htmlFor} from '../../../src/static-template'; import {isExperimentOn} from '../../../src/experiments'; import {toArray} from '../../../src/types'; import {truncateText} from './truncate-text'; /** * TODO(sparhami) List of stuff to do / consider: * - Delay truncateing for things outside of the viewport * - Only truncate a few things in a single pass, and defer others * - If estimation + mutation takes too long, fall back to gradient * or perhaps nothing and position absolute the button on top of * text * * Maybe let the developer specify the gradient * - If we had some rough bucket of performance, maybe just fallback * immediately to gradient / hard cut off. * - Custom fonts can cause truncation to end up being wrong * when they load * * Can we just wait to layout if we know a font is loading? * Since all fonts are statically declared in AMP, this is just a * one time thing */ export class AmpTruncateText extends AMP.BaseElement { /** * Sets up the actions supported by this element. * @private */ setupActions_() { this.registerAction('expand', () => this.expand_()); this.registerAction('collapse', () => this.collapse_()); } /** @param {!AmpElement} element */ constructor(element) { super(element); /** @private {?Element} */ this.content_ = null; /** @private {?Element} */ this.collapsedSlot_ = null; /** @private {?Element} */ this.expandedSlot_ = null; /** @private {?Element} */ this.persistentSlot_ = null; /** @private {boolean} */ this.useShadow_ = false; /** @private {!MutationObserver} */ this.mutationObserver_ = new this.win.MutationObserver(() => { this.truncate_(); }); } /** @override */ buildCallback() { userAssert( isExperimentOn(this.win, 'amp-truncate-text'), 'The amp-truncate-text experiment must be enabled to use this ' + 'component.' ); this.useShadow_ = !!this.element.attachShadow && isExperimentOn(this.win, 'amp-truncate-text-shadow'); if (this.useShadow_) { this.buildShadow_(); } else { this.build_(); } this.setupActions_(); this.collapsedSlot_.addEventListener('click', event => { this.maybeExpand_(event); }); this.expandedSlot_.addEventListener('click', event => { this.maybeCollapse_(event); }); } /** * Builds the component when not using Shadow DOM. */ build_() { const html = htmlFor(this.element); this.content_ = html` <div class="i-amphtml-truncate-content"> <span class="i-amphtml-default-slot"></span> <span class="i-amphtml-truncate-collapsed-slot" name="collapsed"></span> <span class="i-amphtml-truncate-expanded-slot" name="expanded"></span> <span class="i-amphtml-truncate-persistent-slot" name="persistent" ></span> </div> `; const defaultSlot = this.content_.querySelector('.i-amphtml-default-slot'); this.collapsedSlot_ = this.content_.querySelector( '.i-amphtml-truncate-collapsed-slot' ); this.expandedSlot_ = this.content_.querySelector( '.i-amphtml-truncate-expanded-slot' ); this.persistentSlot_ = this.content_.querySelector( '.i-amphtml-truncate-persistent-slot' ); iterateCursor(this.element.querySelectorAll('[slot="collapsed"]'), el => { this.collapsedSlot_.appendChild(el); }); iterateCursor(this.element.querySelectorAll('[slot="expanded"]'), el => { this.expandedSlot_.appendChild(el); }); iterateCursor(this.element.querySelectorAll('[slot="persistent"]'), el => { this.persistentSlot_.appendChild(el); }); this.getRealChildNodes().forEach(node => { defaultSlot.appendChild(node); }); this.element.appendChild(this.content_); } /** * Builds the component when using Shadow DOM. */ buildShadow_() { const html = htmlFor(this.element); const sr = createShadowRoot( this.element, ShadowCSS, html` <div class="content"> <slot></slot> <slot class="collapsed-slot" name="collapsed"></slot> <slot class="expanded-slot" name="expanded"></slot> <slot class="persistent-slot" name="persistent"></slot> </div> ` ); this.content_ = null; this.collapsedSlot_ = sr.querySelector('.collapsed-slot'); this.expandedSlot_ = sr.querySelector('.expanded-slot'); this.persistentSlot_ = sr.querySelector('.persistent-slot'); } /** @override */ layoutCallback() { return this.mutateElement(() => { this.truncate_(); }); } /** @override */ firstAttachedCallback() { this.mutationObserver_.observe(this.element, { attributes: true, characterData: true, childList: true, subtree: true, }); } /** @override */ isRelayoutNeeded() { return true; } /** @override */ isLayoutSupported() { return true; } /** * @return {!Array<!Node>} The nodes to show when overflowing. */ getNodesForOverflow_() { if (this.useShadow_) { return toArray( this.element.querySelectorAll('[slot="persistent"], [slot="collapsed"]') ); } return toArray( this.element.querySelectorAll( '.i-amphtml-truncate-persistent-slot, .i-amphtml-truncate-collapsed-slot' ) ); } /** * Truncates the content of the element. * @private */ truncate_() { const container = dev().assertElement( this.useShadow_ ? this.element : this.content_ ); const overflowNodes = this.getNodesForOverflow_(); truncateText({ container, overflowNodes, }); // Take the records to clear them out. This prevents mutations from // the truncation from invoking the observer's callback. this.mutationObserver_.takeRecords(); } /** * Expands the component, unless the event came from an element that is * actionable. * @param {!Event} event */ maybeExpand_(event) { this.maybeToggle_(event, true); } /** * Collapses the component, unless the event came from an element that is * actionable. * @param {!Event} event */ maybeCollapse_(event) { this.maybeToggle_(event, false); } /** * Expand/collapses the component unless the element already has an * associated action or will navigate. * @param {!Event} event * @param {boolean} expand Whether to expand or collapse. */ maybeToggle_(event, expand) { const target = dev().assertElement(event.target); const actionService = Services.actionServiceForDoc(this.element); // If we have a tap action on any ancestor, then skip expansion. if (actionService.hasAction(target, 'tap')) { return; } // If we have an ancestor anchor (either for the slotted element, or // wrapping the whole amp-truncate-text). skip expansion. if (closestAncestorElementBySelector(target, 'a[href]')) { return; } if (expand) { this.expand_(); } else { this.collapse_(); } } /** * Expands the component by removing any height restriction via CSS. */ expand_() { this.element.setAttribute('i-amphtml-truncate-expanded', ''); } /** * Collapses the component by undoing the effects of `expand_()`. */ collapse_() { this.element.removeAttribute('i-amphtml-truncate-expanded'); } } AMP.extension('amp-truncate-text', '0.1', AMP => { AMP.registerElement('amp-truncate-text', AmpTruncateText, CSS); });
dotandads/amphtml
extensions/amp-truncate-text/0.1/amp-truncate-text.js
JavaScript
apache-2.0
8,602
package fixtures.primitives; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.Path; import javax.ws.rs.core.Response; @Path("/primitives/longs") @SuppressWarnings("javadoc") public class LongsResource { @GET public long get() { return 0; } @POST public Response create(long value) { return Response.ok().build(); } }
smecsia/swagger-jaxrs-doclet
swagger-doclet/src/test/resources/fixtures/primitives/LongsResource.java
Java
apache-2.0
350
#include "repo-service-helper.h" #include "filebrowser/data-mgr.h" #include "filebrowser/progress-dialog.h" #include "filebrowser/file-browser-requests.h" #include "filebrowser/tasks.h" #include "filebrowser/auto-update-mgr.h" #include "ui/set-repo-password-dialog.h" #include <QDir> #include "utils/utils.h" #include "utils/file-utils.h" #include "seafile-applet.h" #include "repo-service.h" void FileDownloadHelper::openFile(const QString& path, bool work_around_mac_auto_udpate) { QFileInfo file(path); QString file_name = file.fileName(); if (!file.exists()) { QString msg = QObject::tr("File \"%1\" doesn't exist in \"%2\"").arg(file_name).arg(file.path()); seafApplet->warningBox(msg); return; } if (!::openInNativeExtension(path) && !::showInGraphicalShell(path)) { QString msg = QObject::tr("%1 couldn't find an application to open file %2").arg(getBrand()).arg(file_name); seafApplet->warningBox(msg); return; } #ifdef Q_OS_MAC MacImageFilesWorkAround::instance()->fileOpened(path); #endif } FileDownloadHelper::FileDownloadHelper(const Account &account, const ServerRepo &repo, const QString &path, QWidget *parent) : account_(account), repo_(repo), path_(path), file_name_(QFileInfo(path).fileName()), parent_(parent), req_(NULL) { } FileDownloadHelper::~FileDownloadHelper() { onCancel(); if (req_) req_->deleteLater(); } void FileDownloadHelper::start() { if (req_) return; const QString file_name = QFileInfo(path_).fileName(); const QString dirent_path = ::getParentPath(path_); req_ = new GetDirentsRequest(account_, repo_.id, dirent_path); connect(req_, SIGNAL(success(bool, const QList<SeafDirent> &)), this, SLOT(onGetDirentsSuccess(bool, const QList<SeafDirent> &))); connect(req_, SIGNAL(failed(const ApiError &)), this, SLOT(onGetDirentsFailure(const ApiError &))); req_->send(); } void FileDownloadHelper::onCancel() { if (req_) disconnect(req_, 0, this, 0); } void FileDownloadHelper::onGetDirentsSuccess(bool current_readonly, const QList<SeafDirent> &dirents) { Q_UNUSED(current_readonly); bool found_file = false; Q_FOREACH(const SeafDirent &dirent, dirents) { if (dirent.name == file_name_) { if (dirent.isDir()) { RepoService::instance()->openFolder(repo_.id, path_); return; } downloadFile(dirent.id); found_file = true; break; } } // critally important if (!found_file) { QString msg = QObject::tr("File \"%1\" doesn't exist in \"%2\"").arg(file_name_).arg(QFileInfo(path_).path()); seafApplet->warningBox(msg); } } void FileDownloadHelper::downloadFile(const QString &id) { DataManager data_mgr(account_); QString cached_file = data_mgr.getLocalCachedFile(repo_.id, path_, id); if (!cached_file.isEmpty()) { openFile(cached_file, false); return; } // endless loop for setPasswordDialog while(true) { FileDownloadTask *task = data_mgr.createDownloadTask(repo_.id, path_); FileBrowserProgressDialog dialog(task, parent_); if (dialog.exec()) { QString full_path = data_mgr.getLocalCachedFile(repo_.id, path_, task->fileId()); if (!full_path.isEmpty()) openFile(full_path, true); break; } // if the user canceled the task, don't bother it if (task->error() == FileNetworkTask::TaskCanceled) break; // if the repo_sitory is encrypted and password is incorrect if (repo_.encrypted && task->httpErrorCode() == 400) { SetRepoPasswordDialog password_dialog(repo_, parent_); if (password_dialog.exec()) continue; // the user canceled the dialog? skip break; } QString msg = QObject::tr("Unable to download item \"%1\"").arg(path_); seafApplet->warningBox(msg); break; } }
daodaoliang/seafile-client
src/repo-service-helper.cpp
C++
apache-2.0
4,126
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.checkpoint; import org.apache.flink.api.common.JobID; import org.apache.flink.configuration.Configuration; import org.apache.flink.runtime.OperatorIDPair; import org.apache.flink.runtime.checkpoint.metadata.CheckpointMetadata; import org.apache.flink.runtime.checkpoint.metadata.MetadataSerializer; import org.apache.flink.runtime.checkpoint.metadata.MetadataSerializers; import org.apache.flink.runtime.checkpoint.metadata.MetadataV3Serializer; import org.apache.flink.runtime.executiongraph.ExecutionJobVertex; import org.apache.flink.runtime.jobgraph.JobVertexID; import org.apache.flink.runtime.jobgraph.OperatorID; import org.apache.flink.runtime.state.CheckpointStorage; import org.apache.flink.runtime.state.CheckpointStorageLoader; import org.apache.flink.runtime.state.CompletedCheckpointStorageLocation; import org.apache.flink.runtime.state.StateBackend; import org.apache.flink.runtime.state.StateBackendLoader; import org.apache.flink.runtime.state.StreamStateHandle; import org.apache.flink.runtime.state.hashmap.HashMapStateBackend; import org.apache.flink.runtime.state.storage.JobManagerCheckpointStorage; import org.apache.flink.util.ExceptionUtils; import org.apache.flink.util.FlinkException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nonnull; import javax.annotation.Nullable; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.util.HashMap; import java.util.Map; import static org.apache.flink.util.Preconditions.checkNotNull; /** * A utility class with the methods to write/load/dispose the checkpoint and savepoint metadata. * * <p>Stored checkpoint metadata files have the following format: * * <pre>[MagicNumber (int) | Format Version (int) | Checkpoint Metadata (variable)]</pre> * * <p>The actual savepoint serialization is version-specific via the {@link MetadataSerializer}. */ public class Checkpoints { private static final Logger LOG = LoggerFactory.getLogger(Checkpoints.class); /** Magic number at the beginning of every checkpoint metadata file, for sanity checks. */ public static final int HEADER_MAGIC_NUMBER = 0x4960672d; // ------------------------------------------------------------------------ // Writing out checkpoint metadata // ------------------------------------------------------------------------ public static void storeCheckpointMetadata( CheckpointMetadata checkpointMetadata, OutputStream out) throws IOException { DataOutputStream dos = new DataOutputStream(out); storeCheckpointMetadata(checkpointMetadata, dos); } public static void storeCheckpointMetadata( CheckpointMetadata checkpointMetadata, DataOutputStream out) throws IOException { // write generic header out.writeInt(HEADER_MAGIC_NUMBER); out.writeInt(MetadataV3Serializer.VERSION); MetadataV3Serializer.serialize(checkpointMetadata, out); } // ------------------------------------------------------------------------ // Reading and validating checkpoint metadata // ------------------------------------------------------------------------ public static CheckpointMetadata loadCheckpointMetadata( DataInputStream in, ClassLoader classLoader, String externalPointer) throws IOException { checkNotNull(in, "input stream"); checkNotNull(classLoader, "classLoader"); final int magicNumber = in.readInt(); if (magicNumber == HEADER_MAGIC_NUMBER) { final int version = in.readInt(); final MetadataSerializer serializer = MetadataSerializers.getSerializer(version); return serializer.deserialize(in, classLoader, externalPointer); } else { throw new IOException( "Unexpected magic number. This can have multiple reasons: " + "(1) You are trying to load a Flink 1.0 savepoint, which is not supported by this " + "version of Flink. (2) The file you were pointing to is not a savepoint at all. " + "(3) The savepoint file has been corrupted."); } } public static CompletedCheckpoint loadAndValidateCheckpoint( JobID jobId, Map<JobVertexID, ExecutionJobVertex> tasks, CompletedCheckpointStorageLocation location, ClassLoader classLoader, boolean allowNonRestoredState) throws IOException { checkNotNull(jobId, "jobId"); checkNotNull(tasks, "tasks"); checkNotNull(location, "location"); checkNotNull(classLoader, "classLoader"); final StreamStateHandle metadataHandle = location.getMetadataHandle(); final String checkpointPointer = location.getExternalPointer(); // (1) load the savepoint final CheckpointMetadata checkpointMetadata; try (InputStream in = metadataHandle.openInputStream()) { DataInputStream dis = new DataInputStream(in); checkpointMetadata = loadCheckpointMetadata(dis, classLoader, checkpointPointer); } // generate mapping from operator to task Map<OperatorID, ExecutionJobVertex> operatorToJobVertexMapping = new HashMap<>(); for (ExecutionJobVertex task : tasks.values()) { for (OperatorIDPair operatorIDPair : task.getOperatorIDs()) { operatorToJobVertexMapping.put(operatorIDPair.getGeneratedOperatorID(), task); operatorIDPair .getUserDefinedOperatorID() .ifPresent(id -> operatorToJobVertexMapping.put(id, task)); } } // (2) validate it (parallelism, etc) HashMap<OperatorID, OperatorState> operatorStates = new HashMap<>(checkpointMetadata.getOperatorStates().size()); for (OperatorState operatorState : checkpointMetadata.getOperatorStates()) { ExecutionJobVertex executionJobVertex = operatorToJobVertexMapping.get(operatorState.getOperatorID()); if (executionJobVertex != null) { if (executionJobVertex.getMaxParallelism() == operatorState.getMaxParallelism() || executionJobVertex.canRescaleMaxParallelism( operatorState.getMaxParallelism())) { operatorStates.put(operatorState.getOperatorID(), operatorState); } else { String msg = String.format( "Failed to rollback to checkpoint/savepoint %s. " + "Max parallelism mismatch between checkpoint/savepoint state and new program. " + "Cannot map operator %s with max parallelism %d to new program with " + "max parallelism %d. This indicates that the program has been changed " + "in a non-compatible way after the checkpoint/savepoint.", checkpointMetadata, operatorState.getOperatorID(), operatorState.getMaxParallelism(), executionJobVertex.getMaxParallelism()); throw new IllegalStateException(msg); } } else if (allowNonRestoredState) { LOG.info( "Skipping savepoint state for operator {}.", operatorState.getOperatorID()); } else { if (operatorState.getCoordinatorState() != null) { throwNonRestoredStateException( checkpointPointer, operatorState.getOperatorID()); } for (OperatorSubtaskState operatorSubtaskState : operatorState.getStates()) { if (operatorSubtaskState.hasState()) { throwNonRestoredStateException( checkpointPointer, operatorState.getOperatorID()); } } LOG.info( "Skipping empty savepoint state for operator {}.", operatorState.getOperatorID()); } } // (3) convert to checkpoint so the system can fall back to it CheckpointProperties props = CheckpointProperties.forSavepoint(false); return new CompletedCheckpoint( jobId, checkpointMetadata.getCheckpointId(), 0L, 0L, operatorStates, checkpointMetadata.getMasterStates(), props, location); } private static void throwNonRestoredStateException( String checkpointPointer, OperatorID operatorId) { String msg = String.format( "Failed to rollback to checkpoint/savepoint %s. " + "Cannot map checkpoint/savepoint state for operator %s to the new program, " + "because the operator is not available in the new program. If " + "you want to allow to skip this, you can set the --allowNonRestoredState " + "option on the CLI.", checkpointPointer, operatorId); throw new IllegalStateException(msg); } // ------------------------------------------------------------------------ // Savepoint Disposal Hooks // ------------------------------------------------------------------------ public static void disposeSavepoint( String pointer, CheckpointStorage checkpointStorage, ClassLoader classLoader) throws IOException, FlinkException { checkNotNull(pointer, "location"); checkNotNull(checkpointStorage, "stateBackend"); checkNotNull(classLoader, "classLoader"); final CompletedCheckpointStorageLocation checkpointLocation = checkpointStorage.resolveCheckpoint(pointer); final StreamStateHandle metadataHandle = checkpointLocation.getMetadataHandle(); // load the savepoint object (the metadata) to have all the state handles that we need // to dispose of all state final CheckpointMetadata metadata; try (InputStream in = metadataHandle.openInputStream(); DataInputStream dis = new DataInputStream(in)) { metadata = loadCheckpointMetadata(dis, classLoader, pointer); } Exception exception = null; // first dispose the savepoint metadata, so that the savepoint is not // addressable any more even if the following disposal fails try { metadataHandle.discardState(); } catch (Exception e) { exception = e; } // now dispose the savepoint data try { metadata.dispose(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } // now dispose the location (directory, table, whatever) try { checkpointLocation.disposeStorageLocation(); } catch (Exception e) { exception = ExceptionUtils.firstOrSuppressed(e, exception); } // forward exceptions caught in the process if (exception != null) { ExceptionUtils.rethrowIOException(exception); } } public static void disposeSavepoint( String pointer, Configuration configuration, ClassLoader classLoader, @Nullable Logger logger) throws IOException, FlinkException { checkNotNull(pointer, "location"); checkNotNull(configuration, "configuration"); checkNotNull(classLoader, "classLoader"); CheckpointStorage storage = loadCheckpointStorage(configuration, classLoader, logger); disposeSavepoint(pointer, storage, classLoader); } @Nonnull public static StateBackend loadStateBackend( Configuration configuration, ClassLoader classLoader, @Nullable Logger logger) { if (logger != null) { logger.info("Attempting to load configured state backend for savepoint disposal"); } StateBackend backend = null; try { backend = StateBackendLoader.loadStateBackendFromConfig(configuration, classLoader, null); if (backend == null && logger != null) { logger.debug( "No state backend configured, attempting to dispose savepoint " + "with configured checkpoint storage"); } } catch (Throwable t) { // catches exceptions and errors (like linking errors) if (logger != null) { logger.info("Could not load configured state backend."); logger.debug("Detailed exception:", t); } } if (backend == null) { // We use the hashmap state backend by default. This will // force the checkpoint storage loader to load // the configured storage backend. backend = new HashMapStateBackend(); } return backend; } @Nonnull public static CheckpointStorage loadCheckpointStorage( Configuration configuration, ClassLoader classLoader, @Nullable Logger logger) { StateBackend backend = loadStateBackend(configuration, classLoader, logger); if (logger != null) { logger.info("Attempting to load configured checkpoint storage for savepoint disposal"); } CheckpointStorage checkpointStorage = null; try { checkpointStorage = CheckpointStorageLoader.load( null, null, backend, configuration, classLoader, null); } catch (Throwable t) { // catches exceptions and errors (like linking errors) if (logger != null) { logger.info("Could not load configured state backend."); logger.debug("Detailed exception:", t); } } if (checkpointStorage == null) { // We use the jobmanager checkpoint storage by default. // The JobManagerCheckpointStorage is actually // FileSystem-based for metadata return new JobManagerCheckpointStorage(); } return checkpointStorage; } // ------------------------------------------------------------------------ /** This class contains only static utility methods and is not meant to be instantiated. */ private Checkpoints() {} }
clarkyzl/flink
flink-runtime/src/main/java/org/apache/flink/runtime/checkpoint/Checkpoints.java
Java
apache-2.0
15,882
package org.batfish.representation.cisco_xr; import java.util.Objects; import javax.annotation.Nonnull; import javax.annotation.ParametersAreNonnullByDefault; /** * A structure representing a space of route-target extended communities given by a 32-bit range * expression and 16-bit range expression in the format 'R1:R2' for the 32 bits of the global * administrator and the 16 bits of the local administrator respectively. */ @ParametersAreNonnullByDefault public final class ExtcommunitySetRtElemAsColon implements ExtcommunitySetRtElem { public ExtcommunitySetRtElemAsColon(Uint32RangeExpr gaRangeExpr, Uint16RangeExpr laRangeExpr) { _gaRangeExpr = gaRangeExpr; _laRangeExpr = laRangeExpr; } @Override public <T, U> T accept(ExtcommunitySetRtElemVisitor<T, U> visitor, U arg) { return visitor.visitExtcommunitySetRtElemAsColon(this, arg); } public @Nonnull Uint32RangeExpr getGaRangeExpr() { return _gaRangeExpr; } public @Nonnull Uint16RangeExpr getLaRangeExpr() { return _laRangeExpr; } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (!(obj instanceof ExtcommunitySetRtElemAsColon)) { return false; } ExtcommunitySetRtElemAsColon rhs = (ExtcommunitySetRtElemAsColon) obj; return _gaRangeExpr.equals(rhs._gaRangeExpr) && _laRangeExpr.equals(rhs._laRangeExpr); } @Override public int hashCode() { return Objects.hash(_gaRangeExpr, _laRangeExpr); } private final @Nonnull Uint32RangeExpr _gaRangeExpr; private final @Nonnull Uint16RangeExpr _laRangeExpr; }
arifogel/batfish
projects/batfish/src/main/java/org/batfish/representation/cisco_xr/ExtcommunitySetRtElemAsColon.java
Java
apache-2.0
1,603
package org.wikipedia.interlanguage; import android.content.Context; import android.support.annotation.NonNull; import android.support.annotation.Nullable; import org.wikipedia.settings.Prefs; import org.wikipedia.util.StringUtil; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Locale; import static org.wikipedia.util.StringUtil.defaultIfNull; /** Language lookup and state management for the application language and most recently used article * and application languages. */ public class AppLanguageState { public static final String SYSTEM_LANGUAGE_CODE = null; @NonNull private final AppLanguageLookUpTable appLanguageLookUpTable; // The language code used by the app when the article language is unspecified. It's possible for // this code to be unsupported if the languages supported changes. Null is a special value that // indicates the system language should used. @Nullable private String appLanguageCode; // Language codes that have been explicitly chosen by the user in most recently used order. This // list includes both app and article languages. @NonNull private final List<String> mruLanguageCodes; public AppLanguageState(@NonNull Context context) { appLanguageLookUpTable = new AppLanguageLookUpTable(context); appLanguageCode = Prefs.getAppLanguageCode(); mruLanguageCodes = unmarshalMruLanguageCodes(); } @Nullable public String getAppLanguageCode() { return appLanguageCode; } @NonNull public String getAppOrSystemLanguageCode() { return isSystemLanguageEnabled() ? getSystemLanguageCode() : appLanguageCode; } public void setAppLanguageCode(@Nullable String code) { appLanguageCode = code; Prefs.setAppLanguageCode(code); } public boolean isSystemLanguageEnabled() { return isSystemLanguageCode(getAppLanguageCode()); } public boolean isSystemLanguageCode(@Nullable String code) { return StringUtil.equals(code, SYSTEM_LANGUAGE_CODE); } @NonNull public String getSystemLanguageCode() { String code = LanguageUtil.languageCodeToWikiLanguageCode(Locale.getDefault().getLanguage()); return appLanguageLookUpTable.isSupportedCode(code) ? code : AppLanguageLookUpTable.FALLBACK_LANGUAGE_CODE; } /** Note: returned codes may include languages offered by articles but not the app. */ @NonNull public List<String> getMruLanguageCodes() { return mruLanguageCodes; } public void setMruLanguageCode(@Nullable String code) { List<String> codes = getMruLanguageCodes(); codes.remove(code); codes.add(0, code); Prefs.setMruLanguageCodeCsv(StringUtil.listToCsv(codes)); } /** @return All app supported languages in MRU order. */ public List<String> getAppMruLanguageCodes() { List<String> codes = new ArrayList<>(appLanguageLookUpTable.getCodes()); int insertIndex = 0; for (String code : getMruLanguageCodes()) { if (codes.contains(code)) { codes.remove(code); codes.add(insertIndex, code); ++insertIndex; } } return codes; } /** @return English name if app language is supported. */ @Nullable public String getAppLanguageCanonicalName(@Nullable String code) { return appLanguageLookUpTable.getCanonicalName(code); } @Nullable public String getAppOrSystemLanguageLocalizedName() { return getAppLanguageLocalizedName(getAppOrSystemLanguageCode()); } /** @return Native name if app language is supported. */ @Nullable public String getAppLanguageLocalizedName(@Nullable String code) { return appLanguageLookUpTable.getLocalizedName(code); } @NonNull private List<String> unmarshalMruLanguageCodes() { // Null value is used to indicate that system language should be used. String systemLanguageCodeString = String.valueOf(SYSTEM_LANGUAGE_CODE); String csv = defaultIfNull(Prefs.getMruLanguageCodeCsv(), systemLanguageCodeString); List<String> list = new ArrayList<>(StringUtil.csvToList(csv)); Collections.replaceAll(list, systemLanguageCodeString, SYSTEM_LANGUAGE_CODE); return list; } }
Wikinaut/wikipedia-app
app/src/main/java/org/wikipedia/interlanguage/AppLanguageState.java
Java
apache-2.0
4,425
// Copyright (c) 2014-2020 Dr. Colin Hirsch and Daniel Frey // Please see LICENSE for license or visit https://github.com/taocpp/PEGTL/ #ifndef TAO_JSON_PEGTL_PARSE_ERROR_HPP #define TAO_JSON_PEGTL_PARSE_ERROR_HPP #include <ostream> #include <sstream> #include <stdexcept> #include <string> #include <utility> #include <vector> #include "config.hpp" #include "position.hpp" namespace TAO_JSON_PEGTL_NAMESPACE { struct parse_error : std::runtime_error { template< typename Msg > parse_error( Msg&& msg, std::vector< position > in_positions ) : std::runtime_error( std::forward< Msg >( msg ) ), positions( std::move( in_positions ) ) { } template< typename Msg > parse_error( Msg&& msg, const position& pos ) : std::runtime_error( std::forward< Msg >( msg ) ), positions( 1, pos ) { } template< typename Msg > parse_error( Msg&& msg, position&& pos ) : std::runtime_error( std::forward< Msg >( msg ) ) { positions.emplace_back( std::move( pos ) ); } template< typename Msg, typename Input > parse_error( Msg&& msg, const Input& in ) : parse_error( std::forward< Msg >( msg ), in.position() ) { } std::vector< position > positions; }; inline std::ostream& operator<<( std::ostream& o, const parse_error& e ) { for( auto it = e.positions.rbegin(); it != e.positions.rend(); ++it ) { o << *it << ": "; } return o << e.what(); } [[nodiscard]] inline std::string to_string( const parse_error& e ) { std::ostringstream o; o << e; return o.str(); } } // namespace TAO_JSON_PEGTL_NAMESPACE #endif
arangodb/arangodb
3rdParty/taocpp-json/include/tao/json/external/pegtl/parse_error.hpp
C++
apache-2.0
1,747
/** * * @authors Your Name (you@example.org) * @date 2015-09-17 10:22:59 * @version $Id$ */ (function(i, s, o, g, r, a, m) { i['GoogleAnalyticsObject'] = r; i[r] = i[r] || function() { (i[r].q = i[r].q || []).push(arguments) }, i[r].l = 1 * new Date(); a = s.createElement(o), m = s.getElementsByTagName(o)[0]; a.async = 1; a.src = g; m.parentNode.insertBefore(a, m) })(window, document, 'script', '#path/js/analytics.js', 'ga'); ga('create', 'UA-4625583-2', 'webapplayers.com'); ga('send', 'pageview');
freiby/flex
server/nirvana-plugins/nirvana-inspinia-style/src/main/webapp/js/embeddedjs2.js
JavaScript
apache-2.0
606
package virtual; public class CalleeTwoTop { }
spring-projects/spring-loaded
testdata/src/main/java/virtual/CalleeTwoTop.java
Java
apache-2.0
49
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.devtools.profiler.model; import org.openqa.selenium.json.JsonInput; import java.util.Objects; /** * Specifies a number of samples attributed to a certain source position. */ public class PositionTickInfo { /** * Source line number (1-based). */ private final int line; /** * Number of samples attributed to the source line. */ private final int ticks; public PositionTickInfo(int line, int ticks) { this.line = line; this.ticks = ticks; } private static PositionTickInfo fromJson(JsonInput input) { int line = input.read(Integer.class); int ticks = 0; while (input.hasNext()) { switch (input.nextName()) { case "ticks": ticks = input.read(Integer.class); break; default: input.skipValue(); break; } } return new PositionTickInfo(line, ticks); } public int getLine() { return line; } public int getTicks() { return ticks; } @Override public boolean equals(Object obj) { if (null == obj || !(obj instanceof PositionTickInfo)) { return false; } return this.getLine() == ((PositionTickInfo) obj).getLine() && this.getTicks() == ((PositionTickInfo) obj).getTicks(); } @Override public int hashCode() { return Objects.hash(getLine(), getTicks()); } }
chrisblock/selenium
java/client/src/org/openqa/selenium/devtools/profiler/model/PositionTickInfo.java
Java
apache-2.0
2,170
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.client.thin; import java.io.IOException; import java.lang.reflect.Array; import java.util.AbstractMap.SimpleEntry; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; import javax.cache.expiry.ExpiryPolicy; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.binary.BinaryRawWriter; import org.apache.ignite.cache.CacheAtomicityMode; import org.apache.ignite.cache.CacheKeyConfiguration; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.CacheRebalanceMode; import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.cache.PartitionLossPolicy; import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.cache.QueryIndex; import org.apache.ignite.cache.QueryIndexType; import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.client.ClientCacheConfiguration; import org.apache.ignite.internal.binary.BinaryContext; import org.apache.ignite.internal.binary.BinaryFieldMetadata; import org.apache.ignite.internal.binary.BinaryMetadata; import org.apache.ignite.internal.binary.BinaryObjectImpl; import org.apache.ignite.internal.binary.BinaryRawWriterEx; import org.apache.ignite.internal.binary.BinaryReaderExImpl; import org.apache.ignite.internal.binary.BinaryReaderHandles; import org.apache.ignite.internal.binary.BinarySchema; import org.apache.ignite.internal.binary.BinaryThreadLocalContext; import org.apache.ignite.internal.binary.BinaryUtils; import org.apache.ignite.internal.binary.BinaryWriterExImpl; import org.apache.ignite.internal.binary.streams.BinaryHeapInputStream; import org.apache.ignite.internal.binary.streams.BinaryInputStream; import org.apache.ignite.internal.binary.streams.BinaryOutputStream; import org.apache.ignite.internal.processors.platform.cache.expiry.PlatformExpiryPolicy; import org.apache.ignite.internal.util.MutableSingletonList; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.client.thin.ProtocolVersionFeature.EXPIRY_POLICY; import static org.apache.ignite.internal.client.thin.ProtocolVersionFeature.QUERY_ENTITY_PRECISION_AND_SCALE; import static org.apache.ignite.internal.processors.platform.cache.expiry.PlatformExpiryPolicy.convertDuration; /** * Shared serialization/deserialization utils. */ public final class ClientUtils { /** Marshaller. */ private final ClientBinaryMarshaller marsh; /** * Constructor. */ ClientUtils(ClientBinaryMarshaller marsh) { this.marsh = marsh; } /** * Get cache ID by cache name. */ static int cacheId(String name) { Objects.requireNonNull(name, "name"); return name.hashCode(); } /** * @param col Collection to serialize. * @param out Output stream. * @param elemWriter Collection element serializer */ public static <E> void collection( Collection<E> col, BinaryOutputStream out, BiConsumer<BinaryOutputStream, E> elemWriter ) { if (col == null || col.isEmpty()) out.writeInt(0); else { out.writeInt(col.size()); for (E e : col) elemWriter.accept(out, e); } } /** * @param col Collection to serialize. * @param out Output stream. * @param elemWriter Collection element serializer */ static <E> void collection(E[] col, BinaryOutputStream out, BiConsumer<BinaryOutputStream, E> elemWriter) { if (col == null || col.length == 0) out.writeInt(0); else { out.writeInt(col.length); for (E e : col) elemWriter.accept(out, e); } } /** * @param in Input stream. * @param elemReader Collection element deserializer. * @return Deserialized collection. */ static <E> Collection<E> collection(BinaryInputStream in, Function<BinaryInputStream, E> elemReader) { Collection<E> col = new LinkedList<>(); // needs to be ordered for some use cases int cnt = in.readInt(); for (int i = 0; i < cnt; i++) col.add(elemReader.apply(in)); return col; } /** * @return Deserialized map */ private static <K, V> Map<K, V> map( BinaryInputStream in, Function<BinaryInputStream, K> keyReader, Function<BinaryInputStream, V> valReader ) { int cnt = in.readInt(); Map<K, V> map = new HashMap<>(cnt); for (int i = 0; i < cnt; i++) map.put(keyReader.apply(in), valReader.apply(in)); return map; } /** Deserialize binary type metadata from stream. */ BinaryMetadata binaryMetadata(BinaryInputStream in) throws IOException { try (BinaryReaderExImpl reader = createBinaryReader(in)) { int typeId = reader.readInt(); String typeName = reader.readString(); String affKeyFieldName = reader.readString(); Map<String, BinaryFieldMetadata> fields = ClientUtils.map( in, unused -> reader.readString(), unused2 -> new BinaryFieldMetadata(reader.readInt(), reader.readInt()) ); boolean isEnum = reader.readBoolean(); Map<String, Integer> enumValues = isEnum ? ClientUtils.map(in, unsed -> reader.readString(), unsed2 -> reader.readInt()) : null; Collection<BinarySchema> schemas = ClientUtils.collection( in, unused -> new BinarySchema( reader.readInt(), new ArrayList<>(ClientUtils.collection(in, unused2 -> reader.readInt())) ) ); return new BinaryMetadata( typeId, typeName, fields, affKeyFieldName, schemas, isEnum, enumValues ); } } /** Serialize binary type metadata to stream. */ void binaryMetadata(BinaryMetadata meta, BinaryOutputStream out) { try (BinaryRawWriterEx w = new BinaryWriterExImpl(marsh.context(), out, null, null)) { w.writeInt(meta.typeId()); w.writeString(meta.typeName()); w.writeString(meta.affinityKeyFieldName()); collection( meta.fieldsMap().entrySet(), out, (unused, e) -> { w.writeString(e.getKey()); w.writeInt(e.getValue().typeId()); w.writeInt(e.getValue().fieldId()); } ); w.writeBoolean(meta.isEnum()); if (meta.isEnum()) collection( meta.enumMap().entrySet(), out, (unused, e) -> { w.writeString(e.getKey()); w.writeInt(e.getValue()); } ); collection( meta.schemas(), out, (unused, s) -> { w.writeInt(s.schemaId()); collection( Arrays.stream(s.fieldIds()).boxed().collect(Collectors.toList()), out, (unused2, i) -> w.writeInt(i) ); } ); } } /** Serialize configuration to stream. */ void cacheConfiguration(ClientCacheConfiguration cfg, BinaryOutputStream out, ProtocolContext protocolCtx) { try (BinaryRawWriterEx writer = new BinaryWriterExImpl(marsh.context(), out, null, null)) { int origPos = out.position(); writer.writeInt(0); // configuration length is to be assigned in the end writer.writeShort((short)0); // properties count is to be assigned in the end AtomicInteger propCnt = new AtomicInteger(0); BiConsumer<CfgItem, Consumer<BinaryRawWriter>> itemWriter = (cfgItem, cfgWriter) -> { writer.writeShort(cfgItem.code()); cfgWriter.accept(writer); propCnt.incrementAndGet(); }; itemWriter.accept(CfgItem.NAME, w -> w.writeString(cfg.getName())); itemWriter.accept(CfgItem.CACHE_MODE, w -> w.writeInt(cfg.getCacheMode().ordinal())); itemWriter.accept(CfgItem.ATOMICITY_MODE, w -> w.writeInt(cfg.getAtomicityMode().ordinal())); itemWriter.accept(CfgItem.BACKUPS, w -> w.writeInt(cfg.getBackups())); itemWriter.accept(CfgItem.WRITE_SYNC_MODE, w -> w.writeInt(cfg.getWriteSynchronizationMode().ordinal())); itemWriter.accept(CfgItem.READ_FROM_BACKUP, w -> w.writeBoolean(cfg.isReadFromBackup())); itemWriter.accept(CfgItem.EAGER_TTL, w -> w.writeBoolean(cfg.isEagerTtl())); itemWriter.accept(CfgItem.GROUP_NAME, w -> w.writeString(cfg.getGroupName())); itemWriter.accept(CfgItem.DEFAULT_LOCK_TIMEOUT, w -> w.writeLong(cfg.getDefaultLockTimeout())); itemWriter.accept(CfgItem.PART_LOSS_POLICY, w -> w.writeInt(cfg.getPartitionLossPolicy().ordinal())); itemWriter.accept(CfgItem.REBALANCE_BATCH_SIZE, w -> w.writeInt(cfg.getRebalanceBatchSize())); itemWriter.accept(CfgItem.REBALANCE_BATCHES_PREFETCH_COUNT, w -> w.writeLong(cfg.getRebalanceBatchesPrefetchCount())); itemWriter.accept(CfgItem.REBALANCE_DELAY, w -> w.writeLong(cfg.getRebalanceDelay())); itemWriter.accept(CfgItem.REBALANCE_MODE, w -> w.writeInt(cfg.getRebalanceMode().ordinal())); itemWriter.accept(CfgItem.REBALANCE_ORDER, w -> w.writeInt(cfg.getRebalanceOrder())); itemWriter.accept(CfgItem.REBALANCE_THROTTLE, w -> w.writeLong(cfg.getRebalanceThrottle())); itemWriter.accept(CfgItem.REBALANCE_TIMEOUT, w -> w.writeLong(cfg.getRebalanceTimeout())); itemWriter.accept(CfgItem.COPY_ON_READ, w -> w.writeBoolean(cfg.isCopyOnRead())); itemWriter.accept(CfgItem.DATA_REGION_NAME, w -> w.writeString(cfg.getDataRegionName())); itemWriter.accept(CfgItem.STATS_ENABLED, w -> w.writeBoolean(cfg.isStatisticsEnabled())); itemWriter.accept(CfgItem.MAX_ASYNC_OPS, w -> w.writeInt(cfg.getMaxConcurrentAsyncOperations())); itemWriter.accept(CfgItem.MAX_QUERY_ITERATORS, w -> w.writeInt(cfg.getMaxQueryIteratorsCount())); itemWriter.accept(CfgItem.ONHEAP_CACHE_ENABLED, w -> w.writeBoolean(cfg.isOnheapCacheEnabled())); itemWriter.accept(CfgItem.QUERY_METRIC_SIZE, w -> w.writeInt(cfg.getQueryDetailMetricsSize())); itemWriter.accept(CfgItem.QUERY_PARALLELISM, w -> w.writeInt(cfg.getQueryParallelism())); itemWriter.accept(CfgItem.SQL_ESCAPE_ALL, w -> w.writeBoolean(cfg.isSqlEscapeAll())); itemWriter.accept(CfgItem.SQL_IDX_MAX_INLINE_SIZE, w -> w.writeInt(cfg.getSqlIndexMaxInlineSize())); itemWriter.accept(CfgItem.SQL_SCHEMA, w -> w.writeString(cfg.getSqlSchema())); itemWriter.accept( CfgItem.KEY_CONFIGS, w -> ClientUtils.collection( cfg.getKeyConfiguration(), out, (unused, i) -> { w.writeString(i.getTypeName()); w.writeString(i.getAffinityKeyFieldName()); } ) ); itemWriter.accept( CfgItem.QUERY_ENTITIES, w -> ClientUtils.collection( cfg.getQueryEntities(), out, (unused, e) -> { w.writeString(e.getKeyType()); w.writeString(e.getValueType()); w.writeString(e.getTableName()); w.writeString(e.getKeyFieldName()); w.writeString(e.getValueFieldName()); ClientUtils.collection( e.getFields().entrySet(), out, (unused2, f) -> { QueryField qf = new QueryField(e, f); w.writeString(qf.getName()); w.writeString(qf.getTypeName()); w.writeBoolean(qf.isKey()); w.writeBoolean(qf.isNotNull()); w.writeObject(qf.getDefaultValue()); if (protocolCtx.isFeatureSupported(QUERY_ENTITY_PRECISION_AND_SCALE)) { w.writeInt(qf.getPrecision()); w.writeInt(qf.getScale()); } } ); ClientUtils.collection( e.getAliases().entrySet(), out, (unused3, a) -> { w.writeString(a.getKey()); w.writeString(a.getValue()); } ); ClientUtils.collection( e.getIndexes(), out, (unused4, i) -> { w.writeString(i.getName()); w.writeByte((byte)i.getIndexType().ordinal()); w.writeInt(i.getInlineSize()); ClientUtils.collection(i.getFields().entrySet(), out, (unused5, f) -> { w.writeString(f.getKey()); w.writeBoolean(f.getValue()); } ); }); } ) ); if (protocolCtx.isFeatureSupported(EXPIRY_POLICY)) { itemWriter.accept(CfgItem.EXPIRE_POLICY, w -> { ExpiryPolicy expiryPlc = cfg.getExpiryPolicy(); if (expiryPlc == null) w.writeBoolean(false); else { w.writeBoolean(true); w.writeLong(convertDuration(expiryPlc.getExpiryForCreation())); w.writeLong(convertDuration(expiryPlc.getExpiryForUpdate())); w.writeLong(convertDuration(expiryPlc.getExpiryForAccess())); } }); } else if (cfg.getExpiryPolicy() != null) { throw new ClientProtocolError(String.format("Expire policies are not supported by the server " + "version %s, required version %s", protocolCtx.version(), EXPIRY_POLICY.verIntroduced())); } writer.writeInt(origPos, out.position() - origPos - 4); // configuration length writer.writeInt(origPos + 4, propCnt.get()); // properties count } } /** Deserialize configuration from stream. */ ClientCacheConfiguration cacheConfiguration(BinaryInputStream in, ProtocolContext protocolCtx) throws IOException { try (BinaryReaderExImpl reader = createBinaryReader(in)) { reader.readInt(); // Do not need length to read data. The protocol defines fixed configuration layout. return new ClientCacheConfiguration().setName("TBD") // cache name is to be assigned later .setAtomicityMode(CacheAtomicityMode.fromOrdinal(reader.readInt())) .setBackups(reader.readInt()) .setCacheMode(CacheMode.fromOrdinal(reader.readInt())) .setCopyOnRead(reader.readBoolean()) .setDataRegionName(reader.readString()) .setEagerTtl(reader.readBoolean()) .setStatisticsEnabled(reader.readBoolean()) .setGroupName(reader.readString()) .setDefaultLockTimeout(reader.readLong()) .setMaxConcurrentAsyncOperations(reader.readInt()) .setMaxQueryIteratorsCount(reader.readInt()) .setName(reader.readString()) .setOnheapCacheEnabled(reader.readBoolean()) .setPartitionLossPolicy(PartitionLossPolicy.fromOrdinal((byte)reader.readInt())) .setQueryDetailMetricsSize(reader.readInt()) .setQueryParallelism(reader.readInt()) .setReadFromBackup(reader.readBoolean()) .setRebalanceBatchSize(reader.readInt()) .setRebalanceBatchesPrefetchCount(reader.readLong()) .setRebalanceDelay(reader.readLong()) .setRebalanceMode(CacheRebalanceMode.fromOrdinal(reader.readInt())) .setRebalanceOrder(reader.readInt()) .setRebalanceThrottle(reader.readLong()) .setRebalanceTimeout(reader.readLong()) .setSqlEscapeAll(reader.readBoolean()) .setSqlIndexMaxInlineSize(reader.readInt()) .setSqlSchema(reader.readString()) .setWriteSynchronizationMode(CacheWriteSynchronizationMode.fromOrdinal(reader.readInt())) .setKeyConfiguration( ClientUtils.collection(in, unused -> new CacheKeyConfiguration(reader.readString(), reader.readString())) .toArray(new CacheKeyConfiguration[0]) ).setQueryEntities(ClientUtils.collection( in, unused -> { QueryEntity qryEntity = new QueryEntity(reader.readString(), reader.readString()) .setTableName(reader.readString()) .setKeyFieldName(reader.readString()) .setValueFieldName(reader.readString()); boolean isPrecisionAndScaleSupported = protocolCtx.isFeatureSupported(QUERY_ENTITY_PRECISION_AND_SCALE); Collection<QueryField> qryFields = ClientUtils.collection( in, unused2 -> { String name = reader.readString(); String typeName = reader.readString(); boolean isKey = reader.readBoolean(); boolean isNotNull = reader.readBoolean(); Object dfltVal = reader.readObject(); int precision = isPrecisionAndScaleSupported ? reader.readInt() : -1; int scale = isPrecisionAndScaleSupported ? reader.readInt() : -1; return new QueryField(name, typeName, isKey, isNotNull, dfltVal, precision, scale); } ); return qryEntity .setFields(qryFields.stream().collect(Collectors.toMap( QueryField::getName, QueryField::getTypeName, (a, b) -> a, LinkedHashMap::new ))) .setKeyFields(qryFields.stream() .filter(QueryField::isKey) .map(QueryField::getName) .collect(Collectors.toCollection(LinkedHashSet::new)) ) .setNotNullFields(qryFields.stream() .filter(QueryField::isNotNull) .map(QueryField::getName) .collect(Collectors.toSet()) ) .setDefaultFieldValues(qryFields.stream() .filter(f -> f.getDefaultValue() != null) .collect(Collectors.toMap(QueryField::getName, QueryField::getDefaultValue)) ) .setFieldsPrecision(qryFields.stream() .filter(f -> f.getPrecision() != -1) .collect(Collectors.toMap(QueryField::getName, QueryField::getPrecision)) ) .setFieldsScale(qryFields.stream() .filter(f -> f.getScale() != -1) .collect(Collectors.toMap(QueryField::getName, QueryField::getScale)) ) .setAliases(ClientUtils.collection( in, unused3 -> new SimpleEntry<>(reader.readString(), reader.readString()) ).stream().collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue))) .setIndexes(ClientUtils.collection( in, unused4 -> { String name = reader.readString(); QueryIndexType type = QueryIndexType.fromOrdinal(reader.readByte()); int inlineSize = reader.readInt(); LinkedHashMap<String, Boolean> fields = ClientUtils.collection( in, unused5 -> new SimpleEntry<>(reader.readString(), reader.readBoolean()) ).stream().collect(Collectors.toMap( SimpleEntry::getKey, SimpleEntry::getValue, (a, b) -> a, LinkedHashMap::new )); return new QueryIndex(fields, type).setName(name).setInlineSize(inlineSize); } )); } ).toArray(new QueryEntity[0])) .setExpiryPolicy(!protocolCtx.isFeatureSupported(EXPIRY_POLICY) ? null : reader.readBoolean() ? new PlatformExpiryPolicy(reader.readLong(), reader.readLong(), reader.readLong()) : null ); } } /** Serialize SQL field query to stream. */ void write(SqlFieldsQuery qry, BinaryOutputStream out) { writeObject(out, qry.getSchema()); out.writeInt(qry.getPageSize()); out.writeInt(-1); // do not limit writeObject(out, qry.getSql()); ClientUtils.collection(qry.getArgs() == null ? null : Arrays.asList(qry.getArgs()), out, this::writeObject); out.writeByte((byte)0); // statement type ANY out.writeBoolean(qry.isDistributedJoins()); out.writeBoolean(qry.isLocal()); out.writeBoolean(qry.isReplicatedOnly()); out.writeBoolean(qry.isEnforceJoinOrder()); out.writeBoolean(qry.isCollocated()); out.writeBoolean(qry.isLazy()); out.writeLong(qry.getTimeout()); out.writeBoolean(true); // include column names if (qry.getPartitions() != null) { out.writeInt(qry.getPartitions().length); for (int part : qry.getPartitions()) out.writeInt(part); } else out.writeInt(-1); out.writeInt(qry.getUpdateBatchSize()); } /** Write Ignite binary object to output stream. */ void writeObject(BinaryOutputStream out, Object obj) { out.writeByteArray(marsh.marshal(obj)); } /** * @param out Output stream. */ BinaryRawWriterEx createBinaryWriter(BinaryOutputStream out) { return new BinaryWriterExImpl(marsh.context(), out, BinaryThreadLocalContext.get().schemaHolder(), null); } /** * @param in Input stream. */ BinaryReaderExImpl createBinaryReader(BinaryInputStream in) { return createBinaryReader(marsh.context(), in); } /** * @param binaryCtx Binary context. * @param in Input stream. */ static BinaryReaderExImpl createBinaryReader(@Nullable BinaryContext binaryCtx, BinaryInputStream in) { return new BinaryReaderExImpl(binaryCtx, in, null, null, true, true); } /** Read Ignite binary object from input stream. */ <T> T readObject(BinaryInputStream in, boolean keepBinary) { return readObject(in, keepBinary, null); } /** Read Ignite binary object from input stream. */ <T> T readObject(BinaryInputStream in, boolean keepBinary, Class<T> clazz) { if (keepBinary) return (T)marsh.unmarshal(in); else { BinaryReaderHandles hnds = new BinaryReaderHandles(); return (T)unwrapBinary(marsh.deserialize(in, hnds), hnds, clazz); } } /** * Unwrap binary object. */ private Object unwrapBinary(Object obj, BinaryReaderHandles hnds, Class<?> clazz) { if (obj instanceof BinaryObjectImpl) { BinaryObjectImpl obj0 = (BinaryObjectImpl)obj; return marsh.deserialize(BinaryHeapInputStream.create(obj0.array(), obj0.start()), hnds); } else if (obj instanceof BinaryObject) return ((BinaryObject)obj).deserialize(); else if (BinaryUtils.knownCollection(obj)) return unwrapCollection((Collection<Object>)obj, hnds); else if (BinaryUtils.knownMap(obj)) return unwrapMap((Map<Object, Object>)obj, hnds); else if (obj instanceof Object[]) return unwrapArray((Object[])obj, hnds, clazz); else return obj; } /** * Unwrap collection with binary objects. */ private Collection<Object> unwrapCollection(Collection<Object> col, BinaryReaderHandles hnds) { Collection<Object> col0 = BinaryUtils.newKnownCollection(col); for (Object obj0 : col) col0.add(unwrapBinary(obj0, hnds, null)); return (col0 instanceof MutableSingletonList) ? U.convertToSingletonList(col0) : col0; } /** * Unwrap map with binary objects. */ private Map<Object, Object> unwrapMap(Map<Object, Object> map, BinaryReaderHandles hnds) { Map<Object, Object> map0 = BinaryUtils.newMap(map); for (Map.Entry<Object, Object> e : map.entrySet()) map0.put(unwrapBinary(e.getKey(), hnds, null), unwrapBinary(e.getValue(), hnds, null)); return map0; } /** * Unwrap array with binary objects. */ private Object[] unwrapArray(Object[] arr, BinaryReaderHandles hnds, Class<?> arrayClass) { if (BinaryUtils.knownArray(arr)) return arr; Class<?> componentType = arrayClass != null && arrayClass.isArray() ? arrayClass.getComponentType() : arr.getClass().getComponentType(); Object[] res = (Object[])Array.newInstance(componentType, arr.length); for (int i = 0; i < arr.length; i++) res[i] = unwrapBinary(arr[i], hnds, null); return res; } /** A helper class to translate query fields. */ private static final class QueryField { /** Name. */ private final String name; /** Type name. */ private final String typeName; /** Is key. */ private final boolean isKey; /** Is not null. */ private final boolean isNotNull; /** Default value. */ private final Object dfltVal; /** Precision. */ private final int precision; /** Scale. */ private final int scale; /** Serialization constructor. */ QueryField(QueryEntity e, Map.Entry<String, String> nameAndTypeName) { name = nameAndTypeName.getKey(); typeName = nameAndTypeName.getValue(); Set<String> keys = e.getKeyFields(); Set<String> notNulls = e.getNotNullFields(); Map<String, Object> dflts = e.getDefaultFieldValues(); Map<String, Integer> fldsPrecision = e.getFieldsPrecision(); Map<String, Integer> fldsScale = e.getFieldsScale(); isKey = keys != null && keys.contains(name); isNotNull = notNulls != null && notNulls.contains(name); dfltVal = dflts == null ? null : dflts.get(name); precision = fldsPrecision == null ? -1 : fldsPrecision.getOrDefault(name, -1); scale = fldsScale == null ? -1 : fldsScale.getOrDefault(name, -1); } /** Deserialization constructor. */ public QueryField(String name, String typeName, boolean isKey, boolean isNotNull, Object dfltVal, int precision, int scale) { this.name = name; this.typeName = typeName; this.isKey = isKey; this.isNotNull = isNotNull; this.dfltVal = dfltVal; this.precision = precision; this.scale = scale; } /** * @return Name. */ String getName() { return name; } /** * @return Type name. */ String getTypeName() { return typeName; } /** * @return Is Key. */ boolean isKey() { return isKey; } /** * @return Is Not Null. */ boolean isNotNull() { return isNotNull; } /** * @return Default value. */ Object getDefaultValue() { return dfltVal; } /** * @return Precision. */ public int getPrecision() { return precision; } /** * @return Scale. */ public int getScale() { return scale; } } /** Thin client protocol cache configuration item codes. */ private enum CfgItem { /** Name. */ NAME(0), /** Cache mode. */ CACHE_MODE(1), /** Atomicity mode. */ ATOMICITY_MODE(2), /** Backups. */ BACKUPS(3), /** Write synchronization mode. */ WRITE_SYNC_MODE(4), /** Read from backup. */ READ_FROM_BACKUP(6), /** Eager ttl. */ EAGER_TTL(405), /** Group name. */ GROUP_NAME(400), /** Default lock timeout. */ DEFAULT_LOCK_TIMEOUT(402), /** Partition loss policy. */ PART_LOSS_POLICY(404), /** Rebalance batch size. */ REBALANCE_BATCH_SIZE(303), /** Rebalance batches prefetch count. */ REBALANCE_BATCHES_PREFETCH_COUNT(304), /** Rebalance delay. */ REBALANCE_DELAY(301), /** Rebalance mode. */ REBALANCE_MODE(300), /** Rebalance order. */ REBALANCE_ORDER(305), /** Rebalance throttle. */ REBALANCE_THROTTLE(306), /** Rebalance timeout. */ REBALANCE_TIMEOUT(302), /** Copy on read. */ COPY_ON_READ(5), /** Data region name. */ DATA_REGION_NAME(100), /** Stats enabled. */ STATS_ENABLED(406), /** Max async ops. */ MAX_ASYNC_OPS(403), /** Max query iterators. */ MAX_QUERY_ITERATORS(206), /** Onheap cache enabled. */ ONHEAP_CACHE_ENABLED(101), /** Query metric size. */ QUERY_METRIC_SIZE(202), /** Query parallelism. */ QUERY_PARALLELISM(201), /** Sql escape all. */ SQL_ESCAPE_ALL(205), /** Sql index max inline size. */ SQL_IDX_MAX_INLINE_SIZE(204), /** Sql schema. */ SQL_SCHEMA(203), /** Key configs. */ KEY_CONFIGS(401), /** Key entities. */ QUERY_ENTITIES(200), /** Expire policy. */ EXPIRE_POLICY(407); /** Code. */ private final short code; /** */ CfgItem(int code) { this.code = (short)code; } /** @return Code. */ short code() { return code; } } }
NSAmelchev/ignite
modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java
Java
apache-2.0
33,859
# == Schema Information # # Table name: weekdays # # id :integer not null, primary key # batch_id :integer # weekday :string(255) # name :string(255) # sort_order :integer # day_of_week :integer # is_deleted :boolean default(FALSE) # #Fedena #Copyright 2011 Foradian Technologies Private Limited # #This product includes software developed at #Project Fedena - http://www.projectfedena.org/ # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. class Weekday < ActiveRecord::Base belongs_to :batch has_many :timetable_entries , dependent: :destroy default_scope { order('weekday asc') } scope :default, -> { where(batch_id: nil, is_deleted: false)} scope :for_batch, -> (b) { where(batch_id: b.to_i, is_deleted: false) } def self.weekday_by_day(batch_id) days={} weekdays = Weekday.where(batch_id: batch_id) if weekdays.empty? weekdays = Weekday.default end days = weekdays.group_by(&:day_of_week) end def deactivate self.update_attribute(:is_deleted,true) end def self.add_day(batch_id,day) unless batch_id == 0 unless Weekday.where(batch_id: batch_id, day_of_week: day).blank? Weekday.where(batch_id: batch_id, day_of_week: day).first.update_attributes(is_deleted: false, day_of_week: day) else w = Weekday.new(day_of_week: day, weekday: day, batch_id: batch_id, is_deleted: false) w.save end else unless Weekday.where(batch_id: nil, day_of_week: day).blank? Weekday.where(batch_id: nil, day_of_week: day).first.update_attributes( is_deleted: false, day_of_week: day) else w = Weekday.new(day_of_week: day, weekday: day, is_deleted: false) w.save end end end end
tachyons/somya
app/models/weekday.rb
Ruby
apache-2.0
2,251
/* Copyright 2016 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package persistentvolume import ( "fmt" "time" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/client/cache" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/runtime" vol "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/watch" "github.com/golang/glog" ) // This file contains the controller base functionality, i.e. framework to // process PV/PVC added/updated/deleted events. The real binding, provisioning, // recycling and deleting is done in controller.go // NewPersistentVolumeController creates a new PersistentVolumeController func NewPersistentVolumeController( kubeClient clientset.Interface, syncPeriod time.Duration, provisioner vol.ProvisionableVolumePlugin, recyclers []vol.VolumePlugin, cloud cloudprovider.Interface, clusterName string, volumeSource, claimSource cache.ListerWatcher, eventRecorder record.EventRecorder, ) *PersistentVolumeController { if eventRecorder == nil { broadcaster := record.NewBroadcaster() broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventRecorder = broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"}) } controller := &PersistentVolumeController{ kubeClient: kubeClient, eventRecorder: eventRecorder, runningOperations: make(map[string]bool), cloud: cloud, provisioner: provisioner, clusterName: clusterName, createProvisionedPVRetryCount: createProvisionedPVRetryCount, createProvisionedPVInterval: createProvisionedPVInterval, } controller.recyclePluginMgr.InitPlugins(recyclers, controller) if controller.provisioner != nil { if err := controller.provisioner.Init(controller); err != nil { glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err) } } if volumeSource == nil { volumeSource = &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return kubeClient.Core().PersistentVolumes().List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return kubeClient.Core().PersistentVolumes().Watch(options) }, } } if claimSource == nil { claimSource = &cache.ListWatch{ ListFunc: func(options api.ListOptions) (runtime.Object, error) { return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) }, WatchFunc: func(options api.ListOptions) (watch.Interface, error) { return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) }, } } controller.volumes.store, controller.volumeController = framework.NewIndexerInformer( volumeSource, &api.PersistentVolume{}, syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: controller.addVolume, UpdateFunc: controller.updateVolume, DeleteFunc: controller.deleteVolume, }, cache.Indexers{"accessmodes": accessModesIndexFunc}, ) controller.claims, controller.claimController = framework.NewInformer( claimSource, &api.PersistentVolumeClaim{}, syncPeriod, framework.ResourceEventHandlerFuncs{ AddFunc: controller.addClaim, UpdateFunc: controller.updateClaim, DeleteFunc: controller.deleteClaim, }, ) return controller } // addVolume is callback from framework.Controller watching PersistentVolume // events. func (ctrl *PersistentVolumeController) addVolume(obj interface{}) { if !ctrl.isFullySynced() { return } pv, ok := obj.(*api.PersistentVolume) if !ok { glog.Errorf("expected PersistentVolume but handler received %+v", obj) return } if err := ctrl.syncVolume(pv); err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) } else { glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err) } } } // updateVolume is callback from framework.Controller watching PersistentVolume // events. func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) { if !ctrl.isFullySynced() { return } newVolume, ok := newObj.(*api.PersistentVolume) if !ok { glog.Errorf("Expected PersistentVolume but handler received %+v", newObj) return } if err := ctrl.syncVolume(newVolume); err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) } else { glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err) } } } // deleteVolume is callback from framework.Controller watching PersistentVolume // events. func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) { if !ctrl.isFullySynced() { return } var volume *api.PersistentVolume var ok bool volume, ok = obj.(*api.PersistentVolume) if !ok { if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { volume, ok = unknown.Obj.(*api.PersistentVolume) if !ok { glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj) return } } else { glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", obj) return } } if !ok || volume == nil || volume.Spec.ClaimRef == nil { return } if claimObj, exists, _ := ctrl.claims.GetByKey(claimrefToClaimKey(volume.Spec.ClaimRef)); exists { if claim, ok := claimObj.(*api.PersistentVolumeClaim); ok && claim != nil { // sync the claim when its volume is deleted. Explicitly syncing the // claim here in response to volume deletion prevents the claim from // waiting until the next sync period for its Lost status. err := ctrl.syncClaim(claim) if err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the // controller recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteVolume handler: %+v", claimToClaimKey(claim), err) } else { glog.Errorf("PersistentVolumeController could not update volume %q from deleteVolume handler: %+v", claimToClaimKey(claim), err) } } } else { glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj) } } } // addClaim is callback from framework.Controller watching PersistentVolumeClaim // events. func (ctrl *PersistentVolumeController) addClaim(obj interface{}) { if !ctrl.isFullySynced() { return } claim, ok := obj.(*api.PersistentVolumeClaim) if !ok { glog.Errorf("Expected PersistentVolumeClaim but addClaim received %+v", obj) return } if err := ctrl.syncClaim(claim); err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) } else { glog.Errorf("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err) } } } // updateClaim is callback from framework.Controller watching PersistentVolumeClaim // events. func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) { if !ctrl.isFullySynced() { return } newClaim, ok := newObj.(*api.PersistentVolumeClaim) if !ok { glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj) return } if err := ctrl.syncClaim(newClaim); err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) } else { glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err) } } } // deleteClaim is callback from framework.Controller watching PersistentVolumeClaim // events. func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) { if !ctrl.isFullySynced() { return } var volume *api.PersistentVolume var claim *api.PersistentVolumeClaim var ok bool claim, ok = obj.(*api.PersistentVolumeClaim) if !ok { if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { claim, ok = unknown.Obj.(*api.PersistentVolumeClaim) if !ok { glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj) return } } else { glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj) return } } if !ok || claim == nil { return } if pvObj, exists, _ := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName); exists { if volume, ok = pvObj.(*api.PersistentVolume); ok { // sync the volume when its claim is deleted. Explicitly sync'ing the // volume here in response to claim deletion prevents the volume from // waiting until the next sync period for its Release. if volume != nil { err := ctrl.syncVolume(volume) if err != nil { if errors.IsConflict(err) { // Version conflict error happens quite often and the // controller recovers from it easily. glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) } else { glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err) } } } } else { glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj) } } } // Run starts all of this controller's control loops func (ctrl *PersistentVolumeController) Run() { glog.V(4).Infof("starting PersistentVolumeController") if ctrl.volumeControllerStopCh == nil { ctrl.volumeControllerStopCh = make(chan struct{}) go ctrl.volumeController.Run(ctrl.volumeControllerStopCh) } if ctrl.claimControllerStopCh == nil { ctrl.claimControllerStopCh = make(chan struct{}) go ctrl.claimController.Run(ctrl.claimControllerStopCh) } } // Stop gracefully shuts down this controller func (ctrl *PersistentVolumeController) Stop() { glog.V(4).Infof("stopping PersistentVolumeController") close(ctrl.volumeControllerStopCh) close(ctrl.claimControllerStopCh) } // isFullySynced returns true, if both volume and claim caches are fully loaded // after startup. // We do not want to process events with not fully loaded caches - e.g. we might // recycle/delete PVs that don't have corresponding claim in the cache yet. func (ctrl *PersistentVolumeController) isFullySynced() bool { return ctrl.volumeController.HasSynced() && ctrl.claimController.HasSynced() } // Stateless functions func hasAnnotation(obj api.ObjectMeta, ann string) bool { _, found := obj.Annotations[ann] return found } func setAnnotation(obj *api.ObjectMeta, ann string, value string) { if obj.Annotations == nil { obj.Annotations = make(map[string]string) } obj.Annotations[ann] = value } func getClaimStatusForLogging(claim *api.PersistentVolumeClaim) string { bound := hasAnnotation(claim.ObjectMeta, annBindCompleted) boundByController := hasAnnotation(claim.ObjectMeta, annBoundByController) return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController) } func getVolumeStatusForLogging(volume *api.PersistentVolume) string { boundByController := hasAnnotation(volume.ObjectMeta, annBoundByController) claimName := "" if volume.Spec.ClaimRef != nil { claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID) } return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController) } // isVolumeBoundToClaim returns true, if given volume is pre-bound or bound // to specific claim. Both claim.Name and claim.Namespace must be equal. // If claim.UID is present in volume.Spec.ClaimRef, it must be equal too. func isVolumeBoundToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) bool { if volume.Spec.ClaimRef == nil { return false } if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace { return false } if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID { return false } return true }
tnachen/kubernetes
pkg/controller/persistentvolume/controller_base.go
GO
apache-2.0
13,565
# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from six.moves import queue as Queue import time from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from ovs.db import idl from neutron.agent.ovsdb import api from neutron.agent.ovsdb.native import commands as cmd from neutron.agent.ovsdb.native import connection from neutron.agent.ovsdb.native import idlutils from neutron.i18n import _LE OPTS = [ cfg.StrOpt('ovsdb_connection', default='tcp:127.0.0.1:6640', help=_('The connection string for the native OVSDB backend')), ] cfg.CONF.register_opts(OPTS, 'OVS') # TODO(twilson) DEFAULT.ovs_vsctl_timeout should be OVS.vsctl_timeout cfg.CONF.import_opt('ovs_vsctl_timeout', 'neutron.agent.common.ovs_lib') LOG = logging.getLogger(__name__) class Transaction(api.Transaction): def __init__(self, api, ovsdb_connection, timeout, check_error=False, log_errors=False): self.api = api self.check_error = check_error self.log_errors = log_errors self.commands = [] self.results = Queue.Queue(1) self.ovsdb_connection = ovsdb_connection self.timeout = timeout def add(self, command): """Add a command to the transaction returns The command passed as a convenience """ self.commands.append(command) return command def commit(self): self.ovsdb_connection.queue_txn(self) result = self.results.get() if self.check_error: if isinstance(result, idlutils.ExceptionResult): if self.log_errors: LOG.error(result.tb) raise result.ex return result def do_commit(self): start_time = time.time() attempts = 0 while True: elapsed_time = time.time() - start_time if attempts > 0 and elapsed_time > self.timeout: raise RuntimeError("OVS transaction timed out") attempts += 1 # TODO(twilson) Make sure we don't loop longer than vsctl_timeout txn = idl.Transaction(self.api.idl) for i, command in enumerate(self.commands): LOG.debug("Running txn command(idx=%(idx)s): %(cmd)s", {'idx': i, 'cmd': command}) try: command.run_idl(txn) except Exception: with excutils.save_and_reraise_exception() as ctx: txn.abort() if not self.check_error: ctx.reraise = False seqno = self.api.idl.change_seqno status = txn.commit_block() if status == txn.TRY_AGAIN: LOG.debug("OVSDB transaction returned TRY_AGAIN, retrying") if self.api.idl._session.rpc.status != 0: LOG.debug("Lost connection to OVSDB, reconnecting!") self.api.idl.force_reconnect() idlutils.wait_for_change( self.api.idl, self.timeout - elapsed_time, seqno) continue elif status == txn.ERROR: msg = _LE("OVSDB Error: %s") % txn.get_error() if self.log_errors: LOG.error(msg) if self.check_error: # For now, raise similar error to vsctl/utils.execute() raise RuntimeError(msg) return elif status == txn.ABORTED: LOG.debug("Transaction aborted") return elif status == txn.UNCHANGED: LOG.debug("Transaction caused no change") return [cmd.result for cmd in self.commands] class OvsdbIdl(api.API): ovsdb_connection = connection.Connection(cfg.CONF.OVS.ovsdb_connection, cfg.CONF.ovs_vsctl_timeout, 'Open_vSwitch') def __init__(self, context): super(OvsdbIdl, self).__init__(context) OvsdbIdl.ovsdb_connection.start() self.idl = OvsdbIdl.ovsdb_connection.idl @property def _tables(self): return self.idl.tables @property def _ovs(self): return self._tables['Open_vSwitch'].rows.values()[0] def transaction(self, check_error=False, log_errors=True, **kwargs): return Transaction(self, OvsdbIdl.ovsdb_connection, self.context.vsctl_timeout, check_error, log_errors) def add_br(self, name, may_exist=True): return cmd.AddBridgeCommand(self, name, may_exist) def del_br(self, name, if_exists=True): return cmd.DelBridgeCommand(self, name, if_exists) def br_exists(self, name): return cmd.BridgeExistsCommand(self, name) def port_to_br(self, name): return cmd.PortToBridgeCommand(self, name) def iface_to_br(self, name): # For our purposes, ports and interfaces always have the same name return cmd.PortToBridgeCommand(self, name) def list_br(self): return cmd.ListBridgesCommand(self) def br_get_external_id(self, name, field): return cmd.BrGetExternalIdCommand(self, name, field) def br_set_external_id(self, name, field, value): return cmd.BrSetExternalIdCommand(self, name, field, value) def db_set(self, table, record, *col_values): return cmd.DbSetCommand(self, table, record, *col_values) def db_clear(self, table, record, column): return cmd.DbClearCommand(self, table, record, column) def db_get(self, table, record, column): return cmd.DbGetCommand(self, table, record, column) def db_list(self, table, records=None, columns=None, if_exists=False): return cmd.DbListCommand(self, table, records, columns, if_exists) def db_find(self, table, *conditions, **kwargs): return cmd.DbFindCommand(self, table, *conditions, **kwargs) def set_controller(self, bridge, controllers): return cmd.SetControllerCommand(self, bridge, controllers) def del_controller(self, bridge): return cmd.DelControllerCommand(self, bridge) def get_controller(self, bridge): return cmd.GetControllerCommand(self, bridge) def set_fail_mode(self, bridge, mode): return cmd.SetFailModeCommand(self, bridge, mode) def add_port(self, bridge, port, may_exist=True): return cmd.AddPortCommand(self, bridge, port, may_exist) def del_port(self, port, bridge=None, if_exists=True): return cmd.DelPortCommand(self, port, bridge, if_exists) def list_ports(self, bridge): return cmd.ListPortsCommand(self, bridge)
infobloxopen/neutron
neutron/agent/ovsdb/impl_idl.py
Python
apache-2.0
7,372
/* * Copyright © 2014 Cask Data, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package co.cask.cdap.security.zookeeper; import co.cask.cdap.api.common.Bytes; import co.cask.cdap.common.conf.CConfiguration; import co.cask.cdap.common.conf.Constants; import co.cask.cdap.common.guice.ConfigModule; import co.cask.cdap.common.guice.ZKClientModule; import co.cask.cdap.common.io.Codec; import com.google.common.base.Stopwatch; import com.google.common.collect.Lists; import com.google.common.util.concurrent.SettableFuture; import com.google.inject.Guice; import com.google.inject.Injector; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster; import org.apache.twill.zookeeper.ZKClientService; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; /** * Tests covering the {@link SharedResourceCache} implementation. */ public class SharedResourceCacheTest { private static final String ZK_NAMESPACE = "/SharedResourceCacheTest"; private static final Logger LOG = LoggerFactory.getLogger(SharedResourceCacheTest.class); private static MiniZooKeeperCluster zkCluster; private static String zkConnectString; private static Injector injector1; private static Injector injector2; @BeforeClass public static void startUp() throws Exception { HBaseTestingUtility testUtil = new HBaseTestingUtility(); zkCluster = testUtil.startMiniZKCluster(); zkConnectString = testUtil.getConfiguration().get(HConstants.ZOOKEEPER_QUORUM) + ":" + zkCluster.getClientPort(); LOG.info("Running ZK cluster at " + zkConnectString); CConfiguration cConf = CConfiguration.create(); cConf.set(Constants.Zookeeper.QUORUM, zkConnectString); injector1 = Guice.createInjector(new ConfigModule(cConf, testUtil.getConfiguration()), new ZKClientModule()); injector2 = Guice.createInjector(new ConfigModule(cConf, testUtil.getConfiguration()), new ZKClientModule()); } @AfterClass public static void tearDown() throws Exception { zkCluster.shutdown(); } @Test public void testCache() throws Exception { String parentZNode = ZK_NAMESPACE + "/testCache"; List<ACL> acls = Lists.newArrayList(ZooDefs.Ids.OPEN_ACL_UNSAFE); // create 2 cache instances ZKClientService zkClient1 = injector1.getInstance(ZKClientService.class); zkClient1.startAndWait(); SharedResourceCache<String> cache1 = new SharedResourceCache<>(zkClient1, new StringCodec(), parentZNode, acls); cache1.init(); // add items to one and wait for them to show up in the second String key1 = "key1"; String value1 = "value1"; cache1.put(key1, value1); ZKClientService zkClient2 = injector2.getInstance(ZKClientService.class); zkClient2.startAndWait(); SharedResourceCache<String> cache2 = new SharedResourceCache<>(zkClient2, new StringCodec(), parentZNode, acls); cache2.init(); waitForEntry(cache2, key1, value1, 10000); assertEquals(cache1.get(key1), cache2.get(key1)); final String key2 = "key2"; String value2 = "value2"; cache1.put(key2, value2); waitForEntry(cache2, key2, value2, 10000); assertEquals(cache1.get(key2), cache2.get(key2)); final String key3 = "key3"; String value3 = "value3"; cache2.put(key3, value3); waitForEntry(cache1, key3, value3, 10000); assertEquals(cache2.get(key3), cache1.get(key3)); // replace an existing key String value2new = "value2.2"; final SettableFuture<String> value2future = SettableFuture.create(); ResourceListener<String> value2listener = new BaseResourceListener<String>() { @Override public void onResourceUpdate(String name, String instance) { LOG.info("Resource updated: {}={}", name, instance); if (name.equals(key2)) { value2future.set(instance); } } }; cache2.addListener(value2listener); cache1.put(key2, value2new); //String newValue = value2future.get(10000, TimeUnit.MILLISECONDS); String newValue = value2future.get(); assertEquals(value2new, newValue); assertEquals(value2new, cache2.get(key2)); cache2.removeListener(value2listener); // remove items from the second and wait for them to disappear from the first // Use a latch to make sure both cache see the changes final CountDownLatch key3RemoveLatch = new CountDownLatch(2); cache1.addListener(new BaseResourceListener<String>() { @Override public void onResourceDelete(String name) { LOG.info("Resource deleted on cache 1 {}", name); if (name.equals(key3)) { key3RemoveLatch.countDown(); } } }); final SettableFuture<String> key3RemoveFuture = SettableFuture.create(); ResourceListener<String> key3Listener = new BaseResourceListener<String>() { @Override public void onResourceDelete(String name) { LOG.info("Resource deleted on cache 2 {}", name); if (name.equals(key3)) { key3RemoveFuture.set(name); key3RemoveLatch.countDown(); } } }; cache2.addListener(key3Listener); cache1.remove(key3); String removedKey = key3RemoveFuture.get(); assertEquals(key3, removedKey); assertNull(cache2.get(key3)); key3RemoveLatch.await(5, TimeUnit.SECONDS); // verify that cache contents are equal assertEquals(cache1, cache2); } private static final class StringCodec implements Codec<String> { @Override public byte[] encode(String object) throws IOException { return Bytes.toBytes(object); } @Override public String decode(byte[] data) throws IOException { return Bytes.toString(data); } } private void waitForEntry(SharedResourceCache<String> cache, String key, String expectedValue, long timeToWaitMillis) throws InterruptedException { String value = cache.get(key); boolean isPresent = expectedValue.equals(value); Stopwatch watch = new Stopwatch().start(); while (!isPresent && watch.elapsedTime(TimeUnit.MILLISECONDS) < timeToWaitMillis) { TimeUnit.MILLISECONDS.sleep(200); value = cache.get(key); isPresent = expectedValue.equals(value); } if (!isPresent) { throw new RuntimeException("Timed out waiting for expected value '" + expectedValue + "' in cache"); } } }
chtyim/cdap
cdap-security/src/test/java/co/cask/cdap/security/zookeeper/SharedResourceCacheTest.java
Java
apache-2.0
7,404
/* * //****************************************************************** * // * // Copyright 2017 Samsung Electronics All Rights Reserved. * // * //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= * // * // Licensed under the Apache License, Version 2.0 (the "License"); * // you may not use this file except in compliance with the License. * // You may obtain a copy of the License at * // * // http://www.apache.org/licenses/LICENSE-2.0 * // * // Unless required by applicable law or agreed to in writing, software * // distributed under the License is distributed on an "AS IS" BASIS, * // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * // See the License for the specific language governing permissions and * // limitations under the License. * // * //-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */ var EventEmitter = require('events').EventEmitter; var WS = require('websocket').w3cwebsocket; var convertBlob = require('blob-to-buffer'); var HashMap = require('hashmap'); var coapTokenMap = new HashMap(); var localStorage = window.localStorage; if (typeof localStorage === "undefined" || localStorage === null) { var LocalStorage = require('node-localstorage').LocalStorage; localStorage = new LocalStorage('./client'); } const coap = require('./components/CoapWebsocketCodec').coap; const parse = require('./components/CoapWebsocketCodec').parse; const path = require('./components/CoapWebsocketCodec').path; const CONNECTED = 'connected'; const DISCONNECTED = 'disconnected'; const SIGNUP = 'signup'; const SIGNIN = 'signin'; const SIGNOUT = 'signout'; const ERROR = 'error'; var Client = new function() { var ws; this.event = new EventEmitter(); // coap websocket client initialize. this.init = function(address) { console.log('client init: ' + address); /* TODO websocket secured */ var serverURL = "ws://" + address + "/.well-known/coap"; this.ws = new WS(serverURL, 'coap'); this.ws.onopen = function() { console.debug('Connected to server ' + address); Client.event.emit(CONNECTED); }; this.ws.onclose = function() { console.debug('Disconnected from server ' + address); Client.event.emit(DISCONNECTED); }; this.ws.onerror = function() { console.error('Error occurs'); Client.event.emit(ERROR, 'Error occurs while websocket connection'); Client.event.emit(DISCONNECTED); }; // response callback. this.ws.onmessage = function(event) { console.debug('Message received -'); convertBlob(event.data, function (err, buffer) { if (err) throw err; var packet = parse(buffer); console.debug(packet); console.debug(packet.getPayloadObject); var func = coapTokenMap.get(packet.getToken.toString()); func(packet); if(packet.getSequenceNumber === -1){ coapTokenMap.remove(packet.getToken.toString()); } }); }; } // coap websocket client close. this.close = function() { console.log('client close'); this.ws.close(); Client.event.emit(DISCONNECTED); } // send sign-up request. this.onSignUp = function(packet) { if(packet.getCode === 68) { Client.event.emit(SIGNUP, packet.getPayloadObject.uid, packet.getPayloadObject.accesstoken); } else { Client.event.emit(ERROR, "SignUp Failed" + packet.getCode); } } this.signUp = function(di, provider, authcode) { console.log('client signUp'); var payload = { di: di, authprovider: provider, authcode: authcode, devicetype: "device" }; this.ws.send(this.doRequest("POST", path.ACCOUNT_FULL_URI, null, payload, this.onSignUp)); } // send sign-in request. this.onSignIn = function(packet) { if(packet.getCode === 68) { Client.event.emit(SIGNIN); } else { Client.event.emit(ERROR, "SignIn Failed" + packet.getCode); } } this.signIn = function(di, uid, accesstoken) { console.log('client signIn'); var payload = { di: di, uid: uid, accesstoken: accesstoken, login: true }; this.ws.send(this.doRequest("POST", path.ACCOUNT_SESSION_FULL_URI, null, payload, this.onSignIn)); } // send sign-out request. this.onSignOut = function(packet) { if(packet.getCode === 68) { Client.event.emit(SIGNOUT); } else { Client.event.emit(ERROR, "SignOut Failed" + packet.getCode); } } this.signOut = function(di, accesstoken) { console.log('client signOut'); var payload = { di: di, accesstoken: accesstoken, login: false }; this.ws.send(this.doRequest("POST", path.ACCOUNT_SESSION_FULL_URI, null, payload, this.onSignOut)); } // send resource discovery request. this.discoverResource = function(queries, response) { console.log('client discoverResource ' + queries); this.ws.send(this.doRequest("GET", path.WELL_KNOWN_FULL_URI, queries, null, response)); } // send control message. this.sendMessage = function(uri, method, payload, queries, response) { console.log('client sendMessage'); this.ws.send(this.doRequest(method, uri, queries, payload, response)); } this.doRequest = function(method, uri, query, payload, response) { var newCoaptoken = require('crypto').randomBytes(8); coapTokenMap.set(newCoaptoken.toString(), response); return coap.createTokenRequest(newCoaptoken, method, uri, query, payload); } // erase data in local storage this.removeClientData = function(keyArray) { for (var i = 0; i < keyArray.length; i++) { localStorage.removeItem(keyArray[i]); } }; this.writeClientData = function(keyValArray) { for (var i = 0; i < keyValArray.length; i++) { localStorage.setItem(keyValArray[i][0], keyValArray[i][1]); } }; this.readClientData = function(key) { return localStorage.getItem(key); }; this.getResourceList = function(discoveryPayload) { if (discoveryPayload === null) { return []; } var resourceList = []; console.debug("Discovered devices: " + discoveryPayload.length); for (var i = 0; i < discoveryPayload.length; i++) { for (var j = 0; j < discoveryPayload[i].links.length; j++) { console.debug("[" + i + "] " + discoveryPayload[i].links[j].href); resourceList.push({ n: discoveryPayload[i].n, di: discoveryPayload[i].di, uri: discoveryPayload[i].links[j].href, rts: this.buildArrayString(discoveryPayload[i].links[j].rt), ifs: this.buildArrayString(discoveryPayload[i].links[j].if) }); } } return resourceList; } this.buildArrayString = function(array) { var result = ''; var seperates = ', '; for (var i = 0; i < array.length; i++) { // last data if (i === array.length -1) seperates = ''; result += array[i] + seperates; } return result; } }(); module.exports = Client;
JunhwanPark/TizenRT
external/iotivity/iotivity_1.3-rel/cloud/dashboard/src/Client.js
JavaScript
apache-2.0
7,702
/* * Copyright 2016 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.keycloak.saml.processing.core.util; import org.keycloak.dom.xmlsec.w3.xmldsig.DSAKeyValueType; import org.keycloak.dom.xmlsec.w3.xmldsig.KeyValueType; import org.keycloak.dom.xmlsec.w3.xmldsig.RSAKeyValueType; import org.keycloak.dom.xmlsec.w3.xmldsig.SignatureType; import org.keycloak.saml.common.PicketLinkLogger; import org.keycloak.saml.common.PicketLinkLoggerFactory; import org.keycloak.saml.common.constants.GeneralConstants; import org.keycloak.saml.common.constants.JBossSAMLConstants; import org.keycloak.saml.common.constants.WSTrustConstants; import org.keycloak.saml.common.exceptions.ParsingException; import org.keycloak.saml.common.exceptions.ProcessingException; import org.keycloak.saml.common.util.Base64; import org.keycloak.saml.common.util.DocumentUtil; import org.keycloak.saml.common.util.StringUtil; import org.keycloak.saml.common.util.SystemPropertiesUtil; import org.keycloak.saml.common.util.TransformerUtil; import org.w3c.dom.Attr; import org.w3c.dom.Document; import org.w3c.dom.Element; import org.w3c.dom.NamedNodeMap; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.SAXException; import javax.xml.bind.JAXBException; import javax.xml.crypto.MarshalException; import javax.xml.crypto.dsig.CanonicalizationMethod; import javax.xml.crypto.dsig.DigestMethod; import javax.xml.crypto.dsig.Reference; import javax.xml.crypto.dsig.SignatureMethod; import javax.xml.crypto.dsig.SignedInfo; import javax.xml.crypto.dsig.Transform; import javax.xml.crypto.dsig.XMLSignature; import javax.xml.crypto.dsig.XMLSignatureException; import javax.xml.crypto.dsig.XMLSignatureFactory; import javax.xml.crypto.dsig.dom.DOMSignContext; import javax.xml.crypto.dsig.dom.DOMValidateContext; import javax.xml.crypto.dsig.keyinfo.KeyInfo; import javax.xml.crypto.dsig.keyinfo.KeyInfoFactory; import javax.xml.crypto.dsig.spec.C14NMethodParameterSpec; import javax.xml.crypto.dsig.spec.TransformParameterSpec; import javax.xml.namespace.QName; import javax.xml.parsers.ParserConfigurationException; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; import javax.xml.transform.TransformerFactory; import javax.xml.transform.stream.StreamResult; import java.io.ByteArrayInputStream; import java.io.OutputStream; import java.security.GeneralSecurityException; import java.security.Key; import java.security.KeyException; import java.security.KeyManagementException; import java.security.KeyPair; import java.security.NoSuchProviderException; import java.security.PrivateKey; import java.security.PublicKey; import java.security.cert.CertificateFactory; import java.security.cert.X509Certificate; import java.security.interfaces.DSAPublicKey; import java.security.interfaces.RSAPublicKey; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import javax.xml.crypto.AlgorithmMethod; import javax.xml.crypto.KeySelector; import javax.xml.crypto.KeySelectorException; import javax.xml.crypto.KeySelectorResult; import javax.xml.crypto.XMLCryptoContext; import javax.xml.crypto.dsig.keyinfo.KeyName; import org.keycloak.rotation.KeyLocator; import org.keycloak.saml.processing.api.util.KeyInfoTools; /** * Utility for XML Signature <b>Note:</b> You can change the canonicalization method type by using the system property * "picketlink.xmlsig.canonicalization" * * @author Anil.Saldhana@redhat.com * @author alessio.soldano@jboss.com * @since Dec 15, 2008 */ public class XMLSignatureUtil { private static final PicketLinkLogger logger = PicketLinkLoggerFactory.getLogger(); // Set some system properties and Santuario providers. Run this block before any other class initialization. static { ProvidersUtil.ensure(); SystemPropertiesUtil.ensure(); String keyInfoProp = SecurityActions.getSystemProperty("picketlink.xmlsig.includeKeyInfo", null); if (StringUtil.isNotNull(keyInfoProp)) { includeKeyInfoInSignature = Boolean.parseBoolean(keyInfoProp); } } ; private static final XMLSignatureFactory fac = getXMLSignatureFactory(); /** * By default, we include the keyinfo in the signature */ private static boolean includeKeyInfoInSignature = true; private static class KeySelectorUtilizingKeyNameHint extends KeySelector { private final KeyLocator locator; private boolean keyLocated = false; private String keyName = null; public KeySelectorUtilizingKeyNameHint(KeyLocator locator) { this.locator = locator; } @Override public KeySelectorResult select(KeyInfo keyInfo, KeySelector.Purpose purpose, AlgorithmMethod method, XMLCryptoContext context) throws KeySelectorException { try { KeyName keyNameEl = KeyInfoTools.getKeyName(keyInfo); this.keyName = keyNameEl == null ? null : keyNameEl.getName(); final Key key = locator.getKey(keyName); this.keyLocated = key != null; return new KeySelectorResult() { @Override public Key getKey() { return key; } }; } catch (KeyManagementException ex) { throw new KeySelectorException(ex); } } private boolean wasKeyLocated() { return this.keyLocated; } } private static class KeySelectorPresetKey extends KeySelector { private final Key key; public KeySelectorPresetKey(Key key) { this.key = key; } @Override public KeySelectorResult select(KeyInfo keyInfo, KeySelector.Purpose purpose, AlgorithmMethod method, XMLCryptoContext context) { return new KeySelectorResult() { @Override public Key getKey() { return key; } }; } } private static XMLSignatureFactory getXMLSignatureFactory() { XMLSignatureFactory xsf = null; try { xsf = XMLSignatureFactory.getInstance("DOM", "ApacheXMLDSig"); } catch (NoSuchProviderException ex) { try { xsf = XMLSignatureFactory.getInstance("DOM"); } catch (Exception err) { throw new RuntimeException(logger.couldNotCreateInstance("DOM", err)); } } return xsf; } /** * Use this method to not include the KeyInfo in the signature * * @param includeKeyInfoInSignature * * @since v2.0.1 */ public static void setIncludeKeyInfoInSignature(boolean includeKeyInfoInSignature) { XMLSignatureUtil.includeKeyInfoInSignature = includeKeyInfoInSignature; } /** * Sign a node in a document * * @param doc * @param nodeToBeSigned * @param keyPair * @param digestMethod * @param signatureMethod * @param referenceURI * * @return * * @throws ParserConfigurationException * @throws XMLSignatureException * @throws MarshalException * @throws GeneralSecurityException */ public static Document sign(Document doc, Node nodeToBeSigned, String keyName, KeyPair keyPair, String digestMethod, String signatureMethod, String referenceURI, X509Certificate x509Certificate, String canonicalizationMethodType) throws ParserConfigurationException, GeneralSecurityException, MarshalException, XMLSignatureException { if (nodeToBeSigned == null) throw logger.nullArgumentError("Node to be signed"); if (logger.isTraceEnabled()) { logger.trace("Document to be signed=" + DocumentUtil.asString(doc)); } Node parentNode = nodeToBeSigned.getParentNode(); // Let us create a new Document Document newDoc = DocumentUtil.createDocument(); // Import the node Node signingNode = newDoc.importNode(nodeToBeSigned, true); newDoc.appendChild(signingNode); if (!referenceURI.isEmpty()) { propagateIDAttributeSetup(nodeToBeSigned, newDoc.getDocumentElement()); } newDoc = sign(newDoc, keyName, keyPair, digestMethod, signatureMethod, referenceURI, x509Certificate, canonicalizationMethodType); // if the signed element is a SAMLv2.0 assertion we need to move the signature element to the position // specified in the schema (before the assertion subject element). if (nodeToBeSigned.getLocalName().equals("Assertion") && WSTrustConstants.SAML2_ASSERTION_NS.equals(nodeToBeSigned.getNamespaceURI())) { Node signatureNode = DocumentUtil.getElement(newDoc, new QName(WSTrustConstants.DSIG_NS, "Signature")); Node subjectNode = DocumentUtil.getElement(newDoc, new QName(WSTrustConstants.SAML2_ASSERTION_NS, "Subject")); if (signatureNode != null && subjectNode != null) { newDoc.getDocumentElement().removeChild(signatureNode); newDoc.getDocumentElement().insertBefore(signatureNode, subjectNode); } } // Now let us import this signed doc into the original document we got in the method call Node signedNode = doc.importNode(newDoc.getFirstChild(), true); if (!referenceURI.isEmpty()) { propagateIDAttributeSetup(newDoc.getDocumentElement(), (Element) signedNode); } parentNode.replaceChild(signedNode, nodeToBeSigned); // doc.getDocumentElement().replaceChild(signedNode, nodeToBeSigned); return doc; } /** * Sign only specified element (assumption is that it already has ID attribute set) * * @param elementToSign element to sign with set ID * @param nextSibling child of elementToSign, which will be used as next sibling of created signature * @param keyPair * @param digestMethod * @param signatureMethod * @param referenceURI * * @throws GeneralSecurityException * @throws MarshalException * @throws XMLSignatureException */ public static void sign(Element elementToSign, Node nextSibling, String keyName, KeyPair keyPair, String digestMethod, String signatureMethod, String referenceURI, String canonicalizationMethodType) throws GeneralSecurityException, MarshalException, XMLSignatureException { sign(elementToSign, nextSibling, keyName, keyPair, digestMethod, signatureMethod, referenceURI, null, canonicalizationMethodType); } /** * Sign only specified element (assumption is that it already has ID attribute set) * * @param elementToSign element to sign with set ID * @param nextSibling child of elementToSign, which will be used as next sibling of created signature * @param keyPair * @param digestMethod * @param signatureMethod * @param referenceURI * @param x509Certificate {@link X509Certificate} to be placed in SignedInfo * * @throws GeneralSecurityException * @throws MarshalException * @throws XMLSignatureException * @since 2.5.0 */ public static void sign(Element elementToSign, Node nextSibling, String keyName, KeyPair keyPair, String digestMethod, String signatureMethod, String referenceURI, X509Certificate x509Certificate, String canonicalizationMethodType) throws GeneralSecurityException, MarshalException, XMLSignatureException { PrivateKey signingKey = keyPair.getPrivate(); PublicKey publicKey = keyPair.getPublic(); DOMSignContext dsc = new DOMSignContext(signingKey, elementToSign, nextSibling); signImpl(dsc, digestMethod, signatureMethod, referenceURI, keyName, publicKey, x509Certificate, canonicalizationMethodType); } /** * Setup the ID attribute into <code>destElement</code> depending on the <code>isId</code> flag of an attribute of * <code>sourceNode</code>. * * @param sourceNode */ public static void propagateIDAttributeSetup(Node sourceNode, Element destElement) { NamedNodeMap nnm = sourceNode.getAttributes(); for (int i = 0; i < nnm.getLength(); i++) { Attr attr = (Attr) nnm.item(i); if (attr.isId()) { destElement.setIdAttribute(attr.getName(), true); break; } } } /** * Sign the root element * * @param doc * @param digestMethod * @param signatureMethod * @param referenceURI * * @return * * @throws GeneralSecurityException * @throws XMLSignatureException * @throws MarshalException */ public static Document sign(Document doc, String keyName, KeyPair keyPair, String digestMethod, String signatureMethod, String referenceURI, String canonicalizationMethodType) throws GeneralSecurityException, MarshalException, XMLSignatureException { return sign(doc, keyName, keyPair, digestMethod, signatureMethod, referenceURI, null, canonicalizationMethodType); } /** * Sign the root element * * @param doc * @param digestMethod * @param signatureMethod * @param referenceURI * * @return * * @throws GeneralSecurityException * @throws XMLSignatureException * @throws MarshalException * @since 2.5.0 */ public static Document sign(Document doc, String keyName, KeyPair keyPair, String digestMethod, String signatureMethod, String referenceURI, X509Certificate x509Certificate, String canonicalizationMethodType) throws GeneralSecurityException, MarshalException, XMLSignatureException { logger.trace("Document to be signed=" + DocumentUtil.asString(doc)); PrivateKey signingKey = keyPair.getPrivate(); PublicKey publicKey = keyPair.getPublic(); DOMSignContext dsc = new DOMSignContext(signingKey, doc.getDocumentElement()); signImpl(dsc, digestMethod, signatureMethod, referenceURI, keyName, publicKey, x509Certificate, canonicalizationMethodType); return doc; } /** * Sign the root element * * * @return * * @throws GeneralSecurityException * @throws XMLSignatureException * @throws MarshalException */ public static Document sign(SignatureUtilTransferObject dto, String canonicalizationMethodType) throws GeneralSecurityException, MarshalException, XMLSignatureException { Document doc = dto.getDocumentToBeSigned(); String keyName = dto.getKeyName(); KeyPair keyPair = dto.getKeyPair(); Node nextSibling = dto.getNextSibling(); String digestMethod = dto.getDigestMethod(); String referenceURI = dto.getReferenceURI(); String signatureMethod = dto.getSignatureMethod(); logger.trace("Document to be signed=" + DocumentUtil.asString(doc)); PrivateKey signingKey = keyPair.getPrivate(); PublicKey publicKey = keyPair.getPublic(); DOMSignContext dsc = new DOMSignContext(signingKey, doc.getDocumentElement(), nextSibling); signImpl(dsc, digestMethod, signatureMethod, referenceURI, keyName, publicKey, dto.getX509Certificate(), canonicalizationMethodType); return doc; } /** * Validate a signed document with the given public key. All elements that contain a Signature are checked, * this way both assertions and the containing document are verified when signed. * * @param signedDoc * @param publicKey * * @return * * @throws MarshalException * @throws XMLSignatureException */ @SuppressWarnings("unchecked") public static boolean validate(Document signedDoc, final KeyLocator locator) throws MarshalException, XMLSignatureException { if (signedDoc == null) throw logger.nullArgumentError("Signed Document"); propagateIDAttributeSetup(signedDoc.getDocumentElement(), signedDoc.getDocumentElement()); NodeList nl = signedDoc.getElementsByTagNameNS(XMLSignature.XMLNS, "Signature"); if (nl == null || nl.getLength() == 0) { logger.debug("Cannot find Signature element"); return false; } if (locator == null) throw logger.nullValueError("Public Key"); int signedAssertions = 0; String assertionNameSpaceUri = null; for (int i = 0; i < nl.getLength(); i++) { Node signatureNode = nl.item(i); Node parent = signatureNode.getParentNode(); if (parent != null && JBossSAMLConstants.ASSERTION.get().equals(parent.getLocalName())) { ++signedAssertions; if (assertionNameSpaceUri == null) { assertionNameSpaceUri = parent.getNamespaceURI(); } } if (! validateSingleNode(signatureNode, locator)) return false; } NodeList assertions = signedDoc.getElementsByTagNameNS(assertionNameSpaceUri, JBossSAMLConstants.ASSERTION.get()); if (signedAssertions > 0 && assertions != null && assertions.getLength() != signedAssertions) { if (logger.isDebugEnabled()) { logger.debug("SAML Response document may contain malicious assertions. Signature validation will fail."); } // there are unsigned assertions mixed with signed ones return false; } return true; } private static boolean validateSingleNode(Node signatureNode, final KeyLocator locator) throws MarshalException, XMLSignatureException { KeySelectorUtilizingKeyNameHint sel = new KeySelectorUtilizingKeyNameHint(locator); try { if (validateUsingKeySelector(signatureNode, sel)) { return true; } if (sel.wasKeyLocated()) { return false; } } catch (XMLSignatureException ex) { // pass through MarshalException logger.debug("Verification failed for key " + sel.keyName + ": " + ex); logger.trace(ex); } logger.trace("Could not validate signature using ds:KeyInfo/ds:KeyName hint."); if (locator instanceof Iterable) { Iterable<Key> availableKeys = (Iterable<Key>) locator; logger.trace("Trying hard to validate XML signature using all available keys."); for (Key key : availableKeys) { try { if (validateUsingKeySelector(signatureNode, new KeySelectorPresetKey(key))) { return true; } } catch (XMLSignatureException ex) { // pass through MarshalException logger.debug("Verification failed: " + ex); logger.trace(ex); } } } return false; } private static boolean validateUsingKeySelector(Node signatureNode, KeySelector validationKeySelector) throws XMLSignatureException, MarshalException { DOMValidateContext valContext = new DOMValidateContext(validationKeySelector, signatureNode); XMLSignature signature = fac.unmarshalXMLSignature(valContext); boolean coreValidity = signature.validate(valContext); if (! coreValidity) { if (logger.isTraceEnabled()) { boolean sv = signature.getSignatureValue().validate(valContext); logger.trace("Signature validation status: " + sv); List<Reference> references = signature.getSignedInfo().getReferences(); for (Reference ref : references) { logger.trace("[Ref id=" + ref.getId() + ":uri=" + ref.getURI() + "]validity status:" + ref.validate(valContext)); } } } return coreValidity; } /** * Marshall a SignatureType to output stream * * @param signature * @param os * * @throws SAXException * @throws JAXBException */ public static void marshall(SignatureType signature, OutputStream os) throws JAXBException, SAXException { throw logger.notImplementedYet("NYI"); /* * JAXBElement<SignatureType> jsig = objectFactory.createSignature(signature); Marshaller marshaller = * JAXBUtil.getValidatingMarshaller(pkgName, schemaLocation); marshaller.marshal(jsig, os); */ } /** * Marshall the signed document to an output stream * * @param signedDocument * @param os * * @throws TransformerException */ public static void marshall(Document signedDocument, OutputStream os) throws TransformerException { TransformerFactory tf = TransformerUtil.getTransformerFactory(); Transformer trans = tf.newTransformer(); trans.transform(DocumentUtil.getXMLSource(signedDocument), new StreamResult(os)); } /** * Given the X509Certificate in the keyinfo element, get a {@link X509Certificate} * * @param certificateString * * @return * * @throws org.keycloak.saml.common.exceptions.ProcessingException */ public static X509Certificate getX509CertificateFromKeyInfoString(String certificateString) throws ProcessingException { X509Certificate cert = null; StringBuilder builder = new StringBuilder(); builder.append("-----BEGIN CERTIFICATE-----\n").append(certificateString).append("\n-----END CERTIFICATE-----"); String derFormattedString = builder.toString(); try { CertificateFactory cf = CertificateFactory.getInstance("X.509"); ByteArrayInputStream bais = new ByteArrayInputStream(derFormattedString.getBytes(GeneralConstants.SAML_CHARSET)); while (bais.available() > 0) { cert = (X509Certificate) cf.generateCertificate(bais); } } catch (java.security.cert.CertificateException e) { throw logger.processingError(e); } return cert; } /** * Given a dsig:DSAKeyValue element, return {@link DSAKeyValueType} * * @param element * * @return * * @throws ProcessingException */ public static DSAKeyValueType getDSAKeyValue(Element element) throws ParsingException { DSAKeyValueType dsa = new DSAKeyValueType(); NodeList nl = element.getChildNodes(); int length = nl.getLength(); for (int i = 0; i < length; i++) { Node node = nl.item(i); if (node instanceof Element) { Element childElement = (Element) node; String tag = childElement.getLocalName(); byte[] text = childElement.getTextContent().getBytes(GeneralConstants.SAML_CHARSET); if (WSTrustConstants.XMLDSig.P.equals(tag)) { dsa.setP(text); } else if (WSTrustConstants.XMLDSig.Q.equals(tag)) { dsa.setQ(text); } else if (WSTrustConstants.XMLDSig.G.equals(tag)) { dsa.setG(text); } else if (WSTrustConstants.XMLDSig.Y.equals(tag)) { dsa.setY(text); } else if (WSTrustConstants.XMLDSig.SEED.equals(tag)) { dsa.setSeed(text); } else if (WSTrustConstants.XMLDSig.PGEN_COUNTER.equals(tag)) { dsa.setPgenCounter(text); } } } return dsa; } /** * Given a dsig:DSAKeyValue element, return {@link DSAKeyValueType} * * @param element * * @return * * @throws ProcessingException */ public static RSAKeyValueType getRSAKeyValue(Element element) throws ParsingException { RSAKeyValueType rsa = new RSAKeyValueType(); NodeList nl = element.getChildNodes(); int length = nl.getLength(); for (int i = 0; i < length; i++) { Node node = nl.item(i); if (node instanceof Element) { Element childElement = (Element) node; String tag = childElement.getLocalName(); byte[] text = childElement.getTextContent().getBytes(GeneralConstants.SAML_CHARSET); if (WSTrustConstants.XMLDSig.MODULUS.equals(tag)) { rsa.setModulus(text); } else if (WSTrustConstants.XMLDSig.EXPONENT.equals(tag)) { rsa.setExponent(text); } } } return rsa; } /** * <p> * Creates a {@code KeyValueType} that wraps the specified public key. This method supports DSA and RSA keys. * </p> * * @param key the {@code PublicKey} that will be represented as a {@code KeyValueType}. * * @return the constructed {@code KeyValueType} or {@code null} if the specified key is neither a DSA nor a RSA * key. */ public static KeyValueType createKeyValue(PublicKey key) { if (key instanceof RSAPublicKey) { RSAPublicKey pubKey = (RSAPublicKey) key; byte[] modulus = pubKey.getModulus().toByteArray(); byte[] exponent = pubKey.getPublicExponent().toByteArray(); RSAKeyValueType rsaKeyValue = new RSAKeyValueType(); rsaKeyValue.setModulus(Base64.encodeBytes(modulus).getBytes(GeneralConstants.SAML_CHARSET)); rsaKeyValue.setExponent(Base64.encodeBytes(exponent).getBytes(GeneralConstants.SAML_CHARSET)); return rsaKeyValue; } else if (key instanceof DSAPublicKey) { DSAPublicKey pubKey = (DSAPublicKey) key; byte[] P = pubKey.getParams().getP().toByteArray(); byte[] Q = pubKey.getParams().getQ().toByteArray(); byte[] G = pubKey.getParams().getG().toByteArray(); byte[] Y = pubKey.getY().toByteArray(); DSAKeyValueType dsaKeyValue = new DSAKeyValueType(); dsaKeyValue.setP(Base64.encodeBytes(P).getBytes(GeneralConstants.SAML_CHARSET)); dsaKeyValue.setQ(Base64.encodeBytes(Q).getBytes(GeneralConstants.SAML_CHARSET)); dsaKeyValue.setG(Base64.encodeBytes(G).getBytes(GeneralConstants.SAML_CHARSET)); dsaKeyValue.setY(Base64.encodeBytes(Y).getBytes(GeneralConstants.SAML_CHARSET)); return dsaKeyValue; } throw logger.unsupportedType(key.toString()); } private static void signImpl(DOMSignContext dsc, String digestMethod, String signatureMethod, String referenceURI, String keyName, PublicKey publicKey, X509Certificate x509Certificate, String canonicalizationMethodType) throws GeneralSecurityException, MarshalException, XMLSignatureException { dsc.setDefaultNamespacePrefix("dsig"); DigestMethod digestMethodObj = fac.newDigestMethod(digestMethod, null); Transform transform1 = fac.newTransform(Transform.ENVELOPED, (TransformParameterSpec) null); Transform transform2 = fac.newTransform("http://www.w3.org/2001/10/xml-exc-c14n#", (TransformParameterSpec) null); List<Transform> transformList = new ArrayList<>(); transformList.add(transform1); transformList.add(transform2); Reference ref = fac.newReference(referenceURI, digestMethodObj, transformList, null, null); CanonicalizationMethod canonicalizationMethod = fac.newCanonicalizationMethod(canonicalizationMethodType, (C14NMethodParameterSpec) null); List<Reference> referenceList = Collections.singletonList(ref); SignatureMethod signatureMethodObj = fac.newSignatureMethod(signatureMethod, null); SignedInfo si = fac.newSignedInfo(canonicalizationMethod, signatureMethodObj, referenceList); KeyInfo ki; if (includeKeyInfoInSignature) { ki = createKeyInfo(keyName, publicKey, x509Certificate); } else { ki = createKeyInfo(keyName, null, null); } XMLSignature signature = fac.newXMLSignature(si, ki); signature.sign(dsc); } private static KeyInfo createKeyInfo(String keyName, PublicKey publicKey, X509Certificate x509Certificate) throws KeyException { KeyInfoFactory keyInfoFactory = fac.getKeyInfoFactory(); List<Object> items = new LinkedList<>(); if (keyName != null) { items.add(keyInfoFactory.newKeyName(keyName)); } if (x509Certificate != null) { items.add(keyInfoFactory.newX509Data(Collections.singletonList(x509Certificate))); } if (publicKey != null) { items.add(keyInfoFactory.newKeyValue(publicKey)); } return keyInfoFactory.newKeyInfo(items); } }
didiez/keycloak
saml-core/src/main/java/org/keycloak/saml/processing/core/util/XMLSignatureUtil.java
Java
apache-2.0
29,728
package com.navercorp.pinpoint.web.filter; import com.navercorp.pinpoint.common.service.DefaultServiceTypeRegistryService; import com.navercorp.pinpoint.common.service.ServiceTypeRegistryService; import com.navercorp.pinpoint.common.trace.ServiceType; import com.navercorp.pinpoint.common.bo.SpanBo; import org.junit.Assert; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Arrays; import java.util.Collections; import static org.junit.Assert.*; /** * @author emeroad */ public class LinkFilterTest { private final Logger logger = LoggerFactory.getLogger(this.getClass()); private final ServiceTypeRegistryService serviceTypeRegistryService = new DefaultServiceTypeRegistryService(); @Test public void fromToFilterTest() { ServiceType tomcat = serviceTypeRegistryService.findServiceTypeByName("TOMCAT"); final short tomcatServiceType = tomcat.getCode(); FilterDescriptor descriptor = new FilterDescriptor(); descriptor.setFromApplicationName("APP_A"); descriptor.setFromServiceType(tomcat.getName()); // descriptor.setFromAgentId("AGENT_A"); descriptor.setToApplicationName("APP_B"); descriptor.setToServiceType(tomcat.getName()); // descriptor.setToAgentId("AGENT_B"); FilterHint hint = new FilterHint(Collections.<RpcHint>emptyList()); LinkFilter linkFilter = new LinkFilter(descriptor, hint, serviceTypeRegistryService); logger.debug(linkFilter.toString()); SpanBo fromSpanBo = new SpanBo(); fromSpanBo.setApplicationId("APP_A"); fromSpanBo.setServiceType(tomcatServiceType); fromSpanBo.setAgentId("AGENT_A"); fromSpanBo.setSpanID(100); SpanBo toSpanBO = new SpanBo(); toSpanBO.setApplicationId("APP_B"); toSpanBO.setServiceType(tomcatServiceType); toSpanBO.setAgentId("AGENT_B"); toSpanBO.setParentSpanId(100); SpanBo spanBoC = new SpanBo(); spanBoC.setApplicationId("APP_C"); spanBoC.setServiceType(tomcatServiceType); spanBoC.setAgentId("AGENT_C"); Assert.assertTrue(linkFilter.include(Arrays.asList(fromSpanBo, toSpanBO))); Assert.assertFalse(linkFilter.include(Arrays.asList(fromSpanBo, spanBoC))); } @Test public void fromToFilterAgentTest() { final ServiceType tomcat = serviceTypeRegistryService.findServiceTypeByName("TOMCAT"); final short tomcatServiceType = tomcat.getCode(); FilterDescriptor descriptor = new FilterDescriptor(); descriptor.setFromApplicationName("APP_A"); descriptor.setFromServiceType(tomcat.getName()); descriptor.setFromAgentId("AGENT_A"); descriptor.setToApplicationName("APP_B"); descriptor.setToServiceType(tomcat.getName()); descriptor.setToAgentId("AGENT_B"); FilterHint hint = new FilterHint(Collections.<RpcHint>emptyList()); LinkFilter linkFilter = new LinkFilter(descriptor, hint, serviceTypeRegistryService); logger.debug(linkFilter.toString()); SpanBo fromSpanBo = new SpanBo(); fromSpanBo.setApplicationId("APP_A"); fromSpanBo.setServiceType(tomcatServiceType); fromSpanBo.setAgentId("AGENT_A"); fromSpanBo.setSpanID(100); SpanBo toSpanBO = new SpanBo(); toSpanBO.setApplicationId("APP_B"); toSpanBO.setServiceType(tomcatServiceType); toSpanBO.setAgentId("AGENT_B"); toSpanBO.setParentSpanId(100); SpanBo spanBoC = new SpanBo(); spanBoC.setApplicationId("APP_C"); spanBoC.setServiceType(tomcatServiceType); spanBoC.setAgentId("AGENT_C"); Assert.assertTrue(linkFilter.include(Arrays.asList(fromSpanBo, toSpanBO))); Assert.assertFalse(linkFilter.include(Arrays.asList(fromSpanBo, spanBoC))); } }
gspandy/pinpoint
web/src/test/java/com/navercorp/pinpoint/web/filter/LinkFilterTest.java
Java
apache-2.0
3,898
/* Copyright 2012 Software Freedom Conservancy Copyright 2007-2012 Selenium committers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.openqa.selenium; import org.junit.Test; import org.openqa.selenium.testing.Ignore; import org.openqa.selenium.testing.JUnit4TestBase; import org.openqa.selenium.testing.JavascriptEnabled; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.openqa.selenium.testing.Ignore.Driver.ANDROID; import static org.openqa.selenium.testing.Ignore.Driver.CHROME; import static org.openqa.selenium.testing.Ignore.Driver.IPHONE; import static org.openqa.selenium.testing.Ignore.Driver.OPERA; import static org.openqa.selenium.testing.Ignore.Driver.OPERA_MOBILE; import static org.openqa.selenium.testing.Ignore.Driver.SELENESE; import static org.openqa.selenium.testing.TestUtilities.isOldIe; import static org.openqa.selenium.TestWaiter.waitFor; import static org.openqa.selenium.WaitingConditions.pageTitleToBe; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import java.util.List; import java.util.concurrent.Callable; public class ElementFindingTest extends JUnit4TestBase { @Test public void testShouldReturnTitleOfPageIfSet() { driver.get(pages.xhtmlTestPage); assertThat(driver.getTitle(), equalTo(("XHTML Test Page"))); driver.get(pages.simpleTestPage); assertThat(driver.getTitle(), equalTo("Hello WebDriver")); } @Test public void testShouldNotBeAbleToLocateASingleElementThatDoesNotExist() { driver.get(pages.formPage); try { driver.findElement(By.id("nonExistantButton")); fail("Should not have succeeded"); } catch (NoSuchElementException e) { // this is expected } } @Test public void testShouldBeAbleToClickOnLinkIdentifiedByText() { driver.get(pages.xhtmlTestPage); driver.findElement(By.linkText("click me")).click(); waitFor(pageTitleToBe(driver, "We Arrive Here")); assertThat(driver.getTitle(), equalTo("We Arrive Here")); } @Test public void testDriverShouldBeAbleToFindElementsAfterLoadingMoreThanOnePageAtATime() { driver.get(pages.formPage); driver.get(pages.xhtmlTestPage); driver.findElement(By.linkText("click me")).click(); waitFor(pageTitleToBe(driver, "We Arrive Here")); assertThat(driver.getTitle(), equalTo("We Arrive Here")); } @Test public void testshouldBeAbleToClickOnLinkIdentifiedById() { driver.get(pages.xhtmlTestPage); driver.findElement(By.id("linkId")).click(); waitFor(pageTitleToBe(driver, "We Arrive Here")); assertThat(driver.getTitle(), equalTo("We Arrive Here")); } @Test public void testShouldThrowAnExceptionWhenThereIsNoLinkToClickAndItIsFoundWithLinkText() { driver.get(pages.xhtmlTestPage); try { driver.findElement(By.linkText("Not here either")); fail("Should not have succeeded"); } catch (NoSuchElementException e) { // this is expected } } @Test public void testShouldfindAnElementBasedOnId() { driver.get(pages.formPage); WebElement element = driver.findElement(By.id("checky")); assertThat(element.isSelected(), is(false)); } @Test public void testShouldNotBeAbleTofindElementsBasedOnIdIfTheElementIsNotThere() { driver.get(pages.formPage); try { driver.findElement(By.id("notThere")); fail("Should not have succeeded"); } catch (NoSuchElementException e) { // this is expected } } @Test public void testShouldBeAbleToFindChildrenOfANode() { driver.get(pages.selectableItemsPage); List<WebElement> elements = driver.findElements(By.xpath("/html/head")); WebElement head = elements.get(0); List<WebElement> importedScripts = head.findElements(By.tagName("script")); assertThat(importedScripts.size(), equalTo(3)); } @Test public void testReturnAnEmptyListWhenThereAreNoChildrenOfANode() { driver.get(pages.xhtmlTestPage); WebElement table = driver.findElement(By.id("table")); List<WebElement> rows = table.findElements(By.tagName("tr")); assertThat(rows.size(), equalTo(0)); } @Ignore(value = SELENESE, reason = "Value returned as 'off'") @Test public void testShouldFindElementsByName() { driver.get(pages.formPage); WebElement element = driver.findElement(By.name("checky")); assertThat(element.getAttribute("value"), is("furrfu")); } @Test public void testShouldFindElementsByClass() { driver.get(pages.xhtmlTestPage); WebElement element = driver.findElement(By.className("extraDiv")); assertTrue(element.getText().startsWith("Another div starts here.")); } @Test public void testShouldFindElementsByClassWhenItIsTheFirstNameAmongMany() { driver.get(pages.xhtmlTestPage); WebElement element = driver.findElement(By.className("nameA")); assertThat(element.getText(), equalTo("An H2 title")); } @Test public void testShouldFindElementsByClassWhenItIsTheLastNameAmongMany() { driver.get(pages.xhtmlTestPage); WebElement element = driver.findElement(By.className("nameC")); assertThat(element.getText(), equalTo("An H2 title")); } @Test public void testShouldFindElementsByClassWhenItIsInTheMiddleAmongMany() { driver.get(pages.xhtmlTestPage); WebElement element = driver.findElement(By.className("nameBnoise")); assertThat(element.getText(), equalTo("An H2 title")); } @Test public void testShouldFindElementByClassWhenItsNameIsSurroundedByWhitespace() { driver.get(pages.xhtmlTestPage); WebElement element = driver.findElement(By.className("spaceAround")); assertThat(element.getText(), equalTo("Spaced out")); } @Test public void testShouldFindElementsByClassWhenItsNameIsSurroundedByWhitespace() { driver.get(pages.xhtmlTestPage); List<WebElement> elements = driver.findElements(By.className("spaceAround")); assertThat(elements.size(), equalTo(1)); assertThat(elements.get(0).getText(), equalTo("Spaced out")); } @Test public void testShouldNotFindElementsByClassWhenTheNameQueriedIsShorterThanCandidateName() { driver.get(pages.xhtmlTestPage); try { driver.findElement(By.className("nameB")); fail("Should not have succeeded"); } catch (NoSuchElementException e) { // this is expected } } @Test public void testShouldBeAbleToFindMultipleElementsByXPath() { driver.get(pages.xhtmlTestPage); List<WebElement> elements = driver.findElements(By.xpath("//div")); assertTrue(elements.size() > 1); } @Test public void testShouldBeAbleToFindMultipleElementsByLinkText() { driver.get(pages.xhtmlTestPage); List<WebElement> elements = driver.findElements(By.linkText("click me")); assertTrue("Expected 2 links, got " + elements.size(), elements.size() == 2); } @Test public void testShouldBeAbleToFindMultipleElementsByPartialLinkText() { driver.get(pages.xhtmlTestPage); List<WebElement> elements = driver.findElements(By.partialLinkText("ick me")); assertTrue(elements.size() == 2); } @Test public void testShouldBeAbleToFindElementByPartialLinkText() { driver.get(pages.xhtmlTestPage); try { driver.findElement(By.partialLinkText("anon")); } catch (NoSuchElementException e) { fail("Expected element to be found"); } } @Test public void testShouldFindElementByLinkTextContainingEqualsSign() { driver.get(pages.xhtmlTestPage); try { WebElement element = driver.findElement(By.linkText("Link=equalssign")); assertEquals("linkWithEqualsSign", element.getAttribute("id")); } catch (NoSuchElementException e) { fail("Expected element to be found"); } } @Test public void testShouldFindElementByPartialLinkTextContainingEqualsSign() { driver.get(pages.xhtmlTestPage); try { WebElement element = driver.findElement(By.partialLinkText("Link=")); assertEquals("linkWithEqualsSign", element.getAttribute("id")); } catch (NoSuchElementException e) { fail("Expected element to be found"); } } @Test public void testShouldFindElementsByLinkTextContainingEqualsSign() { driver.get(pages.xhtmlTestPage); List<WebElement> elements = driver.findElements(By.linkText("Link=equalssign")); assertEquals(1, elements.size()); assertEquals("linkWithEqualsSign", elements.get(0).getAttribute("id")); } @Test public void testShouldFindElementsByPartialLinkTextContainingEqualsSign() { driver.get(pages.xhtmlTestPage); List<WebElement> elements = driver.findElements(By.partialLinkText("Link=")); assertEquals(1, elements.size()); assertEquals("linkWithEqualsSign", elements.get(0).getAttribute("id")); } @Test public void testShouldBeAbleToFindMultipleElementsByName() { driver.get(pages.nestedPage); List<WebElement> elements = driver.findElements(By.name("checky")); assertTrue(elements.size() > 1); } @Ignore(value = ANDROID, reason = "Bug in Android's XPath library.") @Test public void testShouldBeAbleToFindMultipleElementsById() { driver.get(pages.nestedPage); List<WebElement> elements = driver.findElements(By.id("2")); assertEquals(8, elements.size()); } @Test public void testShouldBeAbleToFindMultipleElementsByClassName() { driver.get(pages.xhtmlTestPage); List<WebElement> elements = driver.findElements(By.className("nameC")); assertTrue(elements.size() > 1); } // You don't want to ask why this is here @Test public void testWhenFindingByNameShouldNotReturnById() { driver.get(pages.formPage); WebElement element = driver.findElement(By.name("id-name1")); assertThat(element.getAttribute("value"), is("name")); element = driver.findElement(By.id("id-name1")); assertThat(element.getAttribute("value"), is("id")); element = driver.findElement(By.name("id-name2")); assertThat(element.getAttribute("value"), is("name")); element = driver.findElement(By.id("id-name2")); assertThat(element.getAttribute("value"), is("id")); } @Test public void testShouldFindGrandChildren() { driver.get(pages.formPage); WebElement form = driver.findElement(By.id("nested_form")); form.findElement(By.name("x")); } @Test public void testShouldNotFindElementOutSideTree() { driver.get(pages.formPage); WebElement element = driver.findElement(By.name("login")); try { element.findElement(By.name("x")); } catch (NoSuchElementException e) { // this is expected } } @Test public void testShouldReturnElementsThatDoNotSupportTheNameProperty() { driver.get(pages.nestedPage); driver.findElement(By.name("div1")); // If this works, we're all good } @Test public void testShouldFindHiddenElementsByName() { driver.get(pages.formPage); try { driver.findElement(By.name("hidden")); } catch (NoSuchElementException e) { fail("Expected to be able to find hidden element"); } } @Test public void testShouldfindAnElementBasedOnTagName() { driver.get(pages.formPage); WebElement element = driver.findElement(By.tagName("input")); assertNotNull(element); } @Test public void testShouldfindElementsBasedOnTagName() { driver.get(pages.formPage); List<WebElement> elements = driver.findElements(By.tagName("input")); assertNotNull(elements); } @Test public void testFindingByCompoundClassNameIsAnError() { driver.get(pages.xhtmlTestPage); try { driver.findElement(By.className("a b")); fail("Compound class names aren't allowed"); } catch (InvalidSelectorException e) { // This is expected } try { driver.findElements(By.className("a b")); fail("Compound class names aren't allowed"); } catch (InvalidSelectorException e) { // This is expected } } @JavascriptEnabled @Test public void testShouldBeAbleToClickOnLinksWithNoHrefAttribute() { driver.get(pages.javascriptPage); WebElement element = driver.findElement(By.linkText("No href")); element.click(); // if any exception is thrown, we won't get this far. Sanity check waitFor(pageTitleToBe(driver, "Changed")); assertEquals("Changed", driver.getTitle()); } @Ignore({SELENESE}) @Test public void testShouldNotBeAbleToFindAnElementOnABlankPage() { driver.get("about:blank"); try { // Search for anything. This used to cause an IllegalStateException in IE. driver.findElement(By.tagName("a")); fail("Should not have been able to find a link"); } catch (NoSuchElementException e) { // this is expected } } @Ignore({IPHONE}) @NeedsFreshDriver @Test public void testShouldNotBeAbleToLocateASingleElementOnABlankPage() { // Note we're on the default start page for the browser at this point. try { driver.findElement(By.id("nonExistantButton")); fail("Should not have succeeded"); } catch (NoSuchElementException e) { // this is expected } } @JavascriptEnabled @Test public void testRemovingAnElementDynamicallyFromTheDomShouldCauseAStaleRefException() { driver.get(pages.javascriptPage); WebElement toBeDeleted = driver.findElement(By.id("deleted")); assertTrue(toBeDeleted.isDisplayed()); driver.findElement(By.id("delete")).click(); boolean wasStale = waitFor(elementToBeStale(toBeDeleted)); assertTrue("Element should be stale at this point", wasStale); } private Callable<Boolean> elementToBeStale(final WebElement element) { return new Callable<Boolean>() { public Boolean call() throws Exception { try { element.isDisplayed(); return false; } catch (StaleElementReferenceException e) { return true; } } }; } @Test public void testFindingALinkByXpathUsingContainsKeywordShouldWork() { driver.get(pages.nestedPage); try { driver.findElement(By.xpath("//a[contains(.,'hello world')]")); } catch (Exception e) { fail("Should not have thrown an exception"); } } @JavascriptEnabled @Test public void testShouldBeAbleToFindAnElementByCssSelector() { driver.get(pages.xhtmlTestPage); driver.findElement(By.cssSelector("div.content")); } @JavascriptEnabled @Test public void testShouldBeAbleToFindElementsByCssSelector() { driver.get(pages.xhtmlTestPage); driver.findElements(By.cssSelector("p")); } @JavascriptEnabled @Ignore(CHROME) @Test public void testShouldBeAbleToFindAnElementByCompoundCssSelector() { driver.get(pages.xhtmlTestPage); WebElement element = driver.findElement(By.cssSelector("div.extraDiv, div.content")); assertEquals("content", element.getAttribute("class")); } @JavascriptEnabled @Ignore(CHROME) @Test public void testShouldBeAbleToFindElementsByCompoundCssSelector() { driver.get(pages.xhtmlTestPage); List<WebElement> elements = driver.findElements(By.cssSelector("div.extraDiv, div.content")); assertEquals("content", elements.get(0).getAttribute("class")); assertEquals("extraDiv", elements.get(1).getAttribute("class")); } @Test public void testFindingByTagNameShouldNotIncludeParentElementIfSameTagType() { driver.get(pages.xhtmlTestPage); WebElement parent = driver.findElement(By.id("my_span")); assertEquals(2, parent.findElements(By.tagName("div")).size()); assertEquals(2, parent.findElements(By.tagName("span")).size()); } @Test public void testFindingByCssShouldNotIncludeParentElementIfSameTagType() { driver.get(pages.xhtmlTestPage); WebElement parent = driver.findElement(By.cssSelector("div#parent")); WebElement child = parent.findElement(By.cssSelector("div")); assertEquals("child", child.getAttribute("id")); } // TODO(danielwh): Add extensive CSS selector tests @Ignore(value = {ANDROID, OPERA, SELENESE, OPERA_MOBILE}, reason = "Just not working") @Test public void testAnElementFoundInADifferentFrameIsStale() { driver.get(pages.missedJsReferencePage); driver.switchTo().frame("inner"); WebElement element = driver.findElement(By.id("oneline")); driver.switchTo().defaultContent(); try { element.getText(); fail("Expected exception"); } catch (StaleElementReferenceException expected) { // Expected } } @JavascriptEnabled @Ignore({ANDROID, IPHONE, OPERA, SELENESE, OPERA_MOBILE}) @Test public void testAnElementFoundInADifferentFrameViaJsCanBeUsed() { driver.get(pages.missedJsReferencePage); try { driver.switchTo().frame("inner"); WebElement first = driver.findElement(By.id("oneline")); driver.switchTo().defaultContent(); WebElement element = (WebElement) ((JavascriptExecutor) driver).executeScript( "return frames[0].document.getElementById('oneline');"); driver.switchTo().frame("inner"); WebElement second = driver.findElement(By.id("oneline")); assertEquals(first, element); assertEquals(second, element); } finally { driver.switchTo().defaultContent(); } } @Test @Ignore({CHROME, OPERA}) public void findsByLinkTextOnXhtmlPage() { if (isOldIe(driver)) { // Old IE doesn't render XHTML pages, don't try loading XHTML pages in it return; } driver.get(appServer.whereIs("actualXhtmlPage.xhtml")); String linkText = "Foo"; WebElement element = driver.findElement(By.linkText(linkText)); assertEquals(linkText, element.getText()); } }
krosenvold/selenium-git-release-candidate
java/client/test/org/openqa/selenium/ElementFindingTest.java
Java
apache-2.0
18,265
#include "grr/client/minicomm/client_actions/delete_grr_temp_files.h" #include <unistd.h> #include <fstream> #include <vector> #include "gtest/gtest.h" #include "grr/client/minicomm/client_test_base.h" #include "grr/client/minicomm/file_operations.h" #include "grr/client/minicomm/tempfiles.h" #include "grr/client/minicomm/test_util.h" #include "grr/client/minicomm/util.h" namespace grr { namespace {} class DeleteGRRTempFiles : public grr::ClientTestBase {}; TEST_F(DeleteGRRTempFiles, ActionTest) { std::vector<std::string> files; WriteValidConfigFile(false, true); ASSERT_TRUE(config_.ReadConfig()); TemporaryFiles temp_files(config_); for (int i = 0; i < 50; ++i) { std::string path; std::string error; path = temp_files.CreateGRRTempFile("Testing", &error); ASSERT_GT(path.size(), 0); ASSERT_EQ(access(path.c_str(), R_OK), 0); files.emplace_back(path); } PathSpec spec; spec.set_path(config_.TemporaryDirectory()); spec.set_pathtype(PathSpec::OS); GrrMessage message; message.set_args(spec.SerializeAsString()); message.set_args_rdf_name("PathSpec"); MessageQueue queue(5, 20000); ActionContext context(message, &queue, &config_); actions::DeleteGRRTempFiles action; action.ProcessRequest(&context); const auto r = queue.GetMessages(10, 20000, true); for (auto& file : files) { ASSERT_EQ(access(file.c_str(), R_OK), -1); } } } // namespace grr
pidydx/grr
grr/client/minicomm/client_actions/delete_grr_temp_files_test.cc
C++
apache-2.0
1,431
'use strict'; var _ansiStyles = _interopRequireDefault(require('ansi-styles')); var _collections = require('./collections'); var _AsymmetricMatcher = _interopRequireDefault( require('./plugins/AsymmetricMatcher') ); var _ConvertAnsi = _interopRequireDefault(require('./plugins/ConvertAnsi')); var _DOMCollection = _interopRequireDefault(require('./plugins/DOMCollection')); var _DOMElement = _interopRequireDefault(require('./plugins/DOMElement')); var _Immutable = _interopRequireDefault(require('./plugins/Immutable')); var _ReactElement = _interopRequireDefault(require('./plugins/ReactElement')); var _ReactTestComponent = _interopRequireDefault( require('./plugins/ReactTestComponent') ); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : {default: obj}; } var Symbol = global['jest-symbol-do-not-touch'] || global.Symbol; const toString = Object.prototype.toString; const toISOString = Date.prototype.toISOString; const errorToString = Error.prototype.toString; const regExpToString = RegExp.prototype.toString; const symbolToString = Symbol.prototype.toString; /** * Explicitly comparing typeof constructor to function avoids undefined as name * when mock identity-obj-proxy returns the key as the value for any key. */ const getConstructorName = val => (typeof val.constructor === 'function' && val.constructor.name) || 'Object'; /* global window */ /** Is val is equal to global window object? Works even if it does not exist :) */ const isWindow = val => typeof window !== 'undefined' && val === window; const SYMBOL_REGEXP = /^Symbol\((.*)\)(.*)$/; const NEWLINE_REGEXP = /\n/gi; class PrettyFormatPluginError extends Error { constructor(message, stack) { super(message); this.stack = stack; this.name = this.constructor.name; } } function isToStringedArrayType(toStringed) { return ( toStringed === '[object Array]' || toStringed === '[object ArrayBuffer]' || toStringed === '[object DataView]' || toStringed === '[object Float32Array]' || toStringed === '[object Float64Array]' || toStringed === '[object Int8Array]' || toStringed === '[object Int16Array]' || toStringed === '[object Int32Array]' || toStringed === '[object Uint8Array]' || toStringed === '[object Uint8ClampedArray]' || toStringed === '[object Uint16Array]' || toStringed === '[object Uint32Array]' ); } function printNumber(val) { return Object.is(val, -0) ? '-0' : String(val); } function printBigInt(val) { return String(`${val}n`); } function printFunction(val, printFunctionName) { if (!printFunctionName) { return '[Function]'; } return '[Function ' + (val.name || 'anonymous') + ']'; } function printSymbol(val) { return symbolToString.call(val).replace(SYMBOL_REGEXP, 'Symbol($1)'); } function printError(val) { return '[' + errorToString.call(val) + ']'; } /** * The first port of call for printing an object, handles most of the * data-types in JS. */ function printBasicValue(val, printFunctionName, escapeRegex, escapeString) { if (val === true || val === false) { return '' + val; } if (val === undefined) { return 'undefined'; } if (val === null) { return 'null'; } const typeOf = typeof val; if (typeOf === 'number') { return printNumber(val); } if (typeOf === 'bigint') { return printBigInt(val); } if (typeOf === 'string') { if (escapeString) { return '"' + val.replace(/"|\\/g, '\\$&') + '"'; } return '"' + val + '"'; } if (typeOf === 'function') { return printFunction(val, printFunctionName); } if (typeOf === 'symbol') { return printSymbol(val); } const toStringed = toString.call(val); if (toStringed === '[object WeakMap]') { return 'WeakMap {}'; } if (toStringed === '[object WeakSet]') { return 'WeakSet {}'; } if ( toStringed === '[object Function]' || toStringed === '[object GeneratorFunction]' ) { return printFunction(val, printFunctionName); } if (toStringed === '[object Symbol]') { return printSymbol(val); } if (toStringed === '[object Date]') { return isNaN(+val) ? 'Date { NaN }' : toISOString.call(val); } if (toStringed === '[object Error]') { return printError(val); } if (toStringed === '[object RegExp]') { if (escapeRegex) { // https://github.com/benjamingr/RegExp.escape/blob/master/polyfill.js return regExpToString.call(val).replace(/[\\^$*+?.()|[\]{}]/g, '\\$&'); } return regExpToString.call(val); } if (val instanceof Error) { return printError(val); } return null; } /** * Handles more complex objects ( such as objects with circular references. * maps and sets etc ) */ function printComplexValue( val, config, indentation, depth, refs, hasCalledToJSON ) { if (refs.indexOf(val) !== -1) { return '[Circular]'; } refs = refs.slice(); refs.push(val); const hitMaxDepth = ++depth > config.maxDepth; const min = config.min; if ( config.callToJSON && !hitMaxDepth && val.toJSON && typeof val.toJSON === 'function' && !hasCalledToJSON ) { return printer(val.toJSON(), config, indentation, depth, refs, true); } const toStringed = toString.call(val); if (toStringed === '[object Arguments]') { return hitMaxDepth ? '[Arguments]' : (min ? '' : 'Arguments ') + '[' + (0, _collections.printListItems)( val, config, indentation, depth, refs, printer ) + ']'; } if (isToStringedArrayType(toStringed)) { return hitMaxDepth ? '[' + val.constructor.name + ']' : (min ? '' : val.constructor.name + ' ') + '[' + (0, _collections.printListItems)( val, config, indentation, depth, refs, printer ) + ']'; } if (toStringed === '[object Map]') { return hitMaxDepth ? '[Map]' : 'Map {' + (0, _collections.printIteratorEntries)( val.entries(), config, indentation, depth, refs, printer, ' => ' ) + '}'; } if (toStringed === '[object Set]') { return hitMaxDepth ? '[Set]' : 'Set {' + (0, _collections.printIteratorValues)( val.values(), config, indentation, depth, refs, printer ) + '}'; } // Avoid failure to serialize global window object in jsdom test environment. // For example, not even relevant if window is prop of React element. return hitMaxDepth || isWindow(val) ? '[' + getConstructorName(val) + ']' : (min ? '' : getConstructorName(val) + ' ') + '{' + (0, _collections.printObjectProperties)( val, config, indentation, depth, refs, printer ) + '}'; } function isNewPlugin(plugin) { return plugin.serialize != null; } function printPlugin(plugin, val, config, indentation, depth, refs) { let printed; try { printed = isNewPlugin(plugin) ? plugin.serialize(val, config, indentation, depth, refs, printer) : plugin.print( val, valChild => printer(valChild, config, indentation, depth, refs), str => { const indentationNext = indentation + config.indent; return ( indentationNext + str.replace(NEWLINE_REGEXP, '\n' + indentationNext) ); }, { edgeSpacing: config.spacingOuter, min: config.min, spacing: config.spacingInner }, config.colors ); } catch (error) { throw new PrettyFormatPluginError(error.message, error.stack); } if (typeof printed !== 'string') { throw new Error( `pretty-format: Plugin must return type "string" but instead returned "${typeof printed}".` ); } return printed; } function findPlugin(plugins, val) { for (let p = 0; p < plugins.length; p++) { try { if (plugins[p].test(val)) { return plugins[p]; } } catch (error) { throw new PrettyFormatPluginError(error.message, error.stack); } } return null; } function printer(val, config, indentation, depth, refs, hasCalledToJSON) { const plugin = findPlugin(config.plugins, val); if (plugin !== null) { return printPlugin(plugin, val, config, indentation, depth, refs); } const basicResult = printBasicValue( val, config.printFunctionName, config.escapeRegex, config.escapeString ); if (basicResult !== null) { return basicResult; } return printComplexValue( val, config, indentation, depth, refs, hasCalledToJSON ); } const DEFAULT_THEME = { comment: 'gray', content: 'reset', prop: 'yellow', tag: 'cyan', value: 'green' }; const DEFAULT_THEME_KEYS = Object.keys(DEFAULT_THEME); const DEFAULT_OPTIONS = { callToJSON: true, escapeRegex: false, escapeString: true, highlight: false, indent: 2, maxDepth: Infinity, min: false, plugins: [], printFunctionName: true, theme: DEFAULT_THEME }; function validateOptions(options) { Object.keys(options).forEach(key => { if (!DEFAULT_OPTIONS.hasOwnProperty(key)) { throw new Error(`pretty-format: Unknown option "${key}".`); } }); if (options.min && options.indent !== undefined && options.indent !== 0) { throw new Error( 'pretty-format: Options "min" and "indent" cannot be used together.' ); } if (options.theme !== undefined) { if (options.theme === null) { throw new Error(`pretty-format: Option "theme" must not be null.`); } if (typeof options.theme !== 'object') { throw new Error( `pretty-format: Option "theme" must be of type "object" but instead received "${typeof options.theme}".` ); } } } const getColorsHighlight = options => DEFAULT_THEME_KEYS.reduce((colors, key) => { const value = options.theme && options.theme[key] !== undefined ? options.theme[key] : DEFAULT_THEME[key]; const color = value && _ansiStyles.default[value]; if ( color && typeof color.close === 'string' && typeof color.open === 'string' ) { colors[key] = color; } else { throw new Error( `pretty-format: Option "theme" has a key "${key}" whose value "${value}" is undefined in ansi-styles.` ); } return colors; }, Object.create(null)); const getColorsEmpty = () => DEFAULT_THEME_KEYS.reduce((colors, key) => { colors[key] = { close: '', open: '' }; return colors; }, Object.create(null)); const getPrintFunctionName = options => options && options.printFunctionName !== undefined ? options.printFunctionName : DEFAULT_OPTIONS.printFunctionName; const getEscapeRegex = options => options && options.escapeRegex !== undefined ? options.escapeRegex : DEFAULT_OPTIONS.escapeRegex; const getEscapeString = options => options && options.escapeString !== undefined ? options.escapeString : DEFAULT_OPTIONS.escapeString; const getConfig = options => ({ callToJSON: options && options.callToJSON !== undefined ? options.callToJSON : DEFAULT_OPTIONS.callToJSON, colors: options && options.highlight ? getColorsHighlight(options) : getColorsEmpty(), escapeRegex: getEscapeRegex(options), escapeString: getEscapeString(options), indent: options && options.min ? '' : createIndent( options && options.indent !== undefined ? options.indent : DEFAULT_OPTIONS.indent ), maxDepth: options && options.maxDepth !== undefined ? options.maxDepth : DEFAULT_OPTIONS.maxDepth, min: options && options.min !== undefined ? options.min : DEFAULT_OPTIONS.min, plugins: options && options.plugins !== undefined ? options.plugins : DEFAULT_OPTIONS.plugins, printFunctionName: getPrintFunctionName(options), spacingInner: options && options.min ? ' ' : '\n', spacingOuter: options && options.min ? '' : '\n' }); function createIndent(indent) { return new Array(indent + 1).join(' '); } /** * Returns a presentation string of your `val` object * @param val any potential JavaScript object * @param options Custom settings */ function prettyFormat(val, options) { if (options) { validateOptions(options); if (options.plugins) { const plugin = findPlugin(options.plugins, val); if (plugin !== null) { return printPlugin(plugin, val, getConfig(options), '', 0, []); } } } const basicResult = printBasicValue( val, getPrintFunctionName(options), getEscapeRegex(options), getEscapeString(options) ); if (basicResult !== null) { return basicResult; } return printComplexValue(val, getConfig(options), '', 0, []); } prettyFormat.plugins = { AsymmetricMatcher: _AsymmetricMatcher.default, ConvertAnsi: _ConvertAnsi.default, DOMCollection: _DOMCollection.default, DOMElement: _DOMElement.default, Immutable: _Immutable.default, ReactElement: _ReactElement.default, ReactTestComponent: _ReactTestComponent.default }; /* eslint-disable-next-line no-redeclare */ module.exports = prettyFormat;
GoogleCloudPlatform/prometheus-engine
third_party/prometheus_ui/base/web/ui/react-app/node_modules/pretty-format/build/index.js
JavaScript
apache-2.0
13,582
from queue import LifoQueue, Queue import signal from threading import current_thread, Lock, main_thread from app.util import app_info, log, process_utils from app.util.singleton import Singleton class UnhandledExceptionHandler(Singleton): """ This class implements functionality to catch and log exceptions in a block of code, and also execute a set of teardown handlers intended to shut down the application gracefully and do any desired cleanup. It is implemented as a singleton because the teardown handlers can have global effects (e.g., stopping the event loop). This class is intended to be used as a context manager: >>> unhandled_exception_handler = UnhandledExceptionHandler.singleton() >>> with unhandled_exception_handler: >>> # code which may throw an exception goes here! """ HANDLED_EXCEPTION_EXIT_CODE = 1 EXCEPTION_DURING_TEARDOWN_EXIT_CODE = 2 _SIGINFO_DEBUG_LOG = '/tmp/clusterrunner.debug.log' _signal_names = { process_utils.SIGINFO: 'SIGINFO', signal.SIGINT: 'SIGINT', signal.SIGTERM: 'SIGTERM', } def __init__(self): super().__init__() self._handling_lock = Lock() self._teardown_callback_stack = LifoQueue() # we execute callbacks in the reverse order that they were added self._logger = log.get_logger(__name__) self._handled_exceptions = Queue() self._teardown_callback_raised_exception = False # Set up handlers to be called when the application process receives certain signals. # Note: this will raise if called on a non-main thread, but we should NOT work around that here. (That could # prevent the teardown handler from ever being registered!) Calling code should be organized so that this # singleton is only ever initialized on the main thread. signal.signal(signal.SIGTERM, self._application_teardown_signal_handler) signal.signal(signal.SIGINT, self._application_teardown_signal_handler) try: signal.signal(process_utils.SIGINFO, self._application_info_dump_signal_handler) except ValueError: self._logger.warning('Failed to register signal handler for SIGINFO. This is expected if ClusterRunner ' 'is running on Windows.') @classmethod def reset_signal_handlers(cls): """ Reset all signal handlers to their default values. This is useful in forked subprocesses since we often do not want to inherit all the signal handlers. """ signals_to_reset = dict(cls._signal_names) signals_to_reset.pop(process_utils.SIGINFO, None) # Leave the SIGINFO handler for forked subprocesses for signal_num in signals_to_reset: signal.signal(signal_num, signal.SIG_DFL) # SIG_DFL restores the default behavior for each signal def add_teardown_callback(self, callback, *callback_args, **callback_kwargs): """ Add a callback to be executed in the event of application teardown. :param callback: The method callback to execute :type callback: callable :param callback_args: args to be passed to the callback function :type callback_args: list :param callback_kwargs: kwargs to be passed to the callback function :type callback_kwargs: dict """ self._teardown_callback_stack.put((callback, callback_args, callback_kwargs)) def _application_teardown_signal_handler(self, sig, frame): """ A signal handler that will trigger application teardown. :param sig: Signal number of the received signal :type sig: int :param frame: The interrupted stack frame :type frame: frame """ self._logger.info('{} signal received. Triggering teardown.', self._signal_names[sig]) raise AppTeardown def _application_info_dump_signal_handler(self, sig, frame): """ A signal handler that will dump application info to the logs. :param sig: Signal number of the received signal :type sig: int :param frame: The interrupted stack frame :type frame: frame """ self._logger.info('{} signal received. Dumping application info.', self._signal_names[sig]) app_info_string = app_info.get_app_info_string() self._logger.notice(app_info_string) with open(self._SIGINFO_DEBUG_LOG, 'a') as f: f.write("{}\n".format(app_info_string)) def __enter__(self): """ Enables this to be used as a context manager. No special handling is needed on enter. """ pass def __exit__(self, exc_type, exc_value, traceback): """ Enables this to be used as a context manager. If an exception was raised during the execution block (inside the "with" statement) then exc_value will be set to the exception object. There are four situations in which we can go through this method: 1. Exception, on main thread - The exception is logged and in some cases (e.g., SystemExit) may be immediately reraised. - Teardown callbacks are executed. - Example: A KeyboardInterrupt exception raised because user presses ctrl-c / sends SIGINT signal 2. Exception, not on main thread - The exception is logged and in some cases may be passed to the main thread to be reraised. - Teardown callbacks are executed. - Example: Any unhandled exception that is raised on a SafeThread 3. Normal exit, on main thread - We check to see if there was an exception that we need to reraise on the main thread. In almost all cases we will *not* reraise an exception on the main thread since it has already been logged and teardown callbacks have already been executed on the thread that raised the exception. - Teardown callbacks are *not* executed. - Example: A SystemExit exception raised by sys.exit() is passed from a SafeThread to the main thread to make Python set the exit code. 4. Normal exit, not on main thread - Do nothing! All is well. """ if exc_value: # An exception occurred during execution, so run the teardown callbacks. We use a lock here since multiple # threads could raise exceptions at the same time and we only want to execute these once. with self._handling_lock: if not isinstance(exc_value, (SystemExit, AppTeardown, KeyboardInterrupt)): # It is not very useful to log the SystemExit exception since it is raised by sys.exit(), and thus # application exit is completely expected. self._logger.exception('Unhandled exception handler caught exception.') while not self._teardown_callback_stack.empty(): callback, args, kwargs = self._teardown_callback_stack.get() self._logger.debug('Executing teardown callback: {}', callback) try: callback(*args, **kwargs) except: # pylint: disable=bare-except # Also catch any exception that occurs during a teardown callback and log it. self._teardown_callback_raised_exception = True self._logger.exception('Exception raised by teardown callback {}', callback) self._handled_exceptions.put(exc_value) if current_thread() is main_thread(): # The usage of this class on the main thread is a special case since only exceptions raised on the main # thread may affect the exit code of the overall application. Any unhandled exceptions raised on child # threads will only interrupt execution on that particular thread. # # This main-thread-only code path serves to ensure that exceptions raised on child threads during a `with # unhandled_exception_handler` block will also raise an exception on the main thread upon exit of the main # thread's `with unhandled_exception_handler` block. This ensures we will set a failing exit code even if # an exception is raised on a child thread. # # Note: this only works for child threads protected by the UnhandledExceptionHandler (e.g., an instance of # a SafeThread). # # We check the self._handled_exceptions queue to see if there was an exception that we want to reraise. We # only care about the first exception on the queue -- it was the first caught exception so it "wins". if not self._handled_exceptions.empty(): handled_exception = self._handled_exceptions.get() # We reraise SystemExit on the main thread -- this specific exception is how Python controls setting # the process exit code, and that only works if raised on the main thread. if isinstance(handled_exception, SystemExit): raise handled_exception # We also want to make sure the process exit code is set non-zero if the UnhandledExceptionHandler # handled any Exception at all. (Note: this does not include AppTeardown or KeyboardInterrupt, which # both inherit from BaseException.) if isinstance(handled_exception, Exception): raise SystemExit(self.HANDLED_EXCEPTION_EXIT_CODE) # If an exception was raised while executing one of the teardown callbacks, also make sure to exit with a # non-zero exit code. if self._teardown_callback_raised_exception: raise SystemExit(self.EXCEPTION_DURING_TEARDOWN_EXIT_CODE) # Returning True from this method tells Python not to re-raise the exc_value exception on the current thread. return True class AppTeardown(BaseException): """ Trigger application teardown. This works similarly to raising SystemExit, but unlike SystemExit this will not be reraised on the main thread. Essentially, this would allow execution of main() in main.py to continue past the `with unhandled_exception_handler` block. """
nickzuber/ClusterRunner
app/util/unhandled_exception_handler.py
Python
apache-2.0
10,423
<?php use Illuminate\Database\Migrations\Migration; use Illuminate\Database\Schema\Blueprint; class AddForeignKeysToVrMenuTranslationsTable extends Migration { /** * Run the migrations. * * @return void */ public function up() { Schema::table('vr_menu_translations', function(Blueprint $table) { $table->foreign('language_code', 'fk_vr_menu_translations_vr_language_codes1')->references('language_code')->on('vr_language_codes')->onUpdate('NO ACTION')->onDelete('NO ACTION'); $table->foreign('menu_id', 'fk_vr_menu_translations_vr_menu1')->references('id')->on('vr_menu')->onUpdate('NO ACTION')->onDelete('NO ACTION'); }); } /** * Reverse the migrations. * * @return void */ public function down() { Schema::table('vr_menu_translations', function(Blueprint $table) { $table->dropForeign('fk_vr_menu_translations_vr_language_codes1'); $table->dropForeign('fk_vr_menu_translations_vr_menu1'); }); } }
RamintaRam/atrask_vr
database/migrations/2017_05_23_080810_add_foreign_keys_to_vr_menu_translations_table.php
PHP
apache-2.0
953