repo_name
stringlengths
4
116
path
stringlengths
4
379
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
mwilliamson-firefly/aws-sdk-net
sdk/src/Services/RDS/Generated/Model/ModifyDBClusterParameterGroupRequest.cs
4221
/* * Copyright 2010-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ /* * Do not modify this file. This file is generated from the rds-2014-10-31.normal.json service model. */ using System; using System.Collections.Generic; using System.Xml.Serialization; using System.Text; using System.IO; using Amazon.Runtime; using Amazon.Runtime.Internal; namespace Amazon.RDS.Model { /// <summary> /// Container for the parameters to the ModifyDBClusterParameterGroup operation. /// Modifies the parameters of a DB cluster parameter group. To modify more than one /// parameter, submit a list of the following: <code>ParameterName</code>, <code>ParameterValue</code>, /// and <code>ApplyMethod</code>. A maximum of 20 parameters can be modified in a single /// request. /// /// /// <para> /// For more information on Amazon Aurora, see <a href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html">Aurora /// on Amazon RDS</a> in the <i>Amazon RDS User Guide.</i> /// </para> /// <note> /// <para> /// Changes to dynamic parameters are applied immediately. Changes to static parameters /// require a reboot without failover to the DB cluster associated with the parameter /// group before the change can take effect. /// </para> /// </note> <important> /// <para> /// After you create a DB cluster parameter group, you should wait at least 5 minutes /// before creating your first DB cluster that uses that DB cluster parameter group as /// the default parameter group. This allows Amazon RDS to fully complete the create action /// before the parameter group is used as the default for a new DB cluster. This is especially /// important for parameters that are critical when creating the default database for /// a DB cluster, such as the character set for the default database defined by the <code>character_set_database</code> /// parameter. You can use the <i>Parameter Groups</i> option of the <a href="https://console.aws.amazon.com/rds/">Amazon /// RDS console</a> or the <a>DescribeDBClusterParameters</a> command to verify that your /// DB cluster parameter group has been created or modified. /// </para> /// </important> /// </summary> public partial class ModifyDBClusterParameterGroupRequest : AmazonRDSRequest { private string _dbClusterParameterGroupName; private List<Parameter> _parameters = new List<Parameter>(); /// <summary> /// Gets and sets the property DBClusterParameterGroupName. /// <para> /// The name of the DB cluster parameter group to modify. /// </para> /// </summary> public string DBClusterParameterGroupName { get { return this._dbClusterParameterGroupName; } set { this._dbClusterParameterGroupName = value; } } // Check to see if DBClusterParameterGroupName property is set internal bool IsSetDBClusterParameterGroupName() { return this._dbClusterParameterGroupName != null; } /// <summary> /// Gets and sets the property Parameters. /// <para> /// A list of parameters in the DB cluster parameter group to modify. /// </para> /// </summary> public List<Parameter> Parameters { get { return this._parameters; } set { this._parameters = value; } } // Check to see if Parameters property is set internal bool IsSetParameters() { return this._parameters != null && this._parameters.Count > 0; } } }
apache-2.0
mirego/j2objc
tree_shaker/src/main/java/com/google/devtools/treeshaker/TypeGraphBuilder.java
3997
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.devtools.treeshaker; import java.util.Collection; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; /** Give information about inheritance relationships between types. */ class TypeGraphBuilder { private final Collection<Type> types; private final Set<String> externalTypeReferences; private final Set<String> unknownMethodReferences; TypeGraphBuilder(LibraryInfo libraryInfo) { Map<String, Type> typesByName = new LinkedHashMap<>(); externalTypeReferences = new HashSet<>(); unknownMethodReferences = new HashSet<>(); for (TypeInfo typeInfo : libraryInfo.getTypeList()) { Type type = Type.buildFrom(typeInfo, libraryInfo.getTypeMap(typeInfo.getTypeId())); typesByName.put(type.getName(), type); } // Build cross-references between types and members buildCrossReferences(libraryInfo, typesByName); types = typesByName.values(); } Collection<Type> getTypes() { return types; } Collection<String> getExternalTypeReferences() { return externalTypeReferences; } Collection<String> getUnknownMethodReferences() { return unknownMethodReferences; } private void buildCrossReferences(LibraryInfo libraryInfo, Map<String, Type> typesByName) { for (TypeInfo typeInfo : libraryInfo.getTypeList()) { Type type = typesByName.get(libraryInfo.getTypeMap(typeInfo.getTypeId())); String superClassName = libraryInfo.getTypeMap(typeInfo.getExtendsType()); Type superClass = typesByName.get(superClassName); if (superClass == null) { externalTypeReferences.add(superClassName); } else { superClass.addImmediateSubtype(type); type.setSuperClass(superClass); } for (int implementsId : typeInfo.getImplementsTypeList()) { Type superInterface = typesByName.get(libraryInfo.getTypeMap(implementsId)); if (superInterface == null) { externalTypeReferences.add(libraryInfo.getTypeMap(implementsId)); continue; } superInterface.addImmediateSubtype(type); type.addSuperInterface(superInterface); } for (MemberInfo memberInfo : typeInfo.getMemberList()) { Member member = type.getMemberByName(memberInfo.getName()); for (int referencedId : memberInfo.getReferencedTypesList()) { Type referencedType = typesByName.get(libraryInfo.getTypeMap(referencedId)); if (referencedType == null) { externalTypeReferences.add(libraryInfo.getTypeMap(referencedId)); continue; } member.addReferencedType(referencedType); } for (MethodInvocation methodInvocation : memberInfo.getInvokedMethodsList()) { Type enclosingType = typesByName.get(libraryInfo.getTypeMap(methodInvocation.getEnclosingType())); if (enclosingType == null) { externalTypeReferences.add(libraryInfo.getTypeMap(methodInvocation.getEnclosingType())); continue; } Member referencedMember = enclosingType.getMemberByName(methodInvocation.getMethod()); if (referencedMember == null) { unknownMethodReferences.add( enclosingType.getName() + "." + methodInvocation.getMethod()); continue; } member.addReferencedMember(referencedMember); } } } } }
apache-2.0
wenpos/elasticsearch
core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java
38113
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.index.shard; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.CircuitBreakerStats; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.containsString; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; public class IndexShardIT extends ESSingleNodeTestCase { @Override protected Collection<Class<? extends Plugin>> getPlugins() { return pluginList(InternalSettingsPlugin.class); } public void testLockTryingToDelete() throws Exception { createIndex("test"); ensureGreen(); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); ClusterService cs = getInstanceFromNode(ClusterService.class); final Index index = cs.state().metaData().index("test").getIndex(); Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0)); logger.info("--> paths: [{}]", (Object)shardPaths); // Should not be able to acquire the lock because it's already open try { NodeEnvironment.acquireFSLockForPaths(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), shardPaths); fail("should not have been able to acquire the lock"); } catch (LockObtainFailedException e) { assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock")); } // Test without the regular shard lock to assume we can acquire it // (worst case, meaning that the shard lock could be acquired and // we're green to delete the shard's directory) ShardLock sLock = new DummyShardLock(new ShardId(index, 0)); try { env.deleteShardDirectoryUnderLock(sLock, IndexSettingsModule.newIndexSettings("test", Settings.EMPTY)); fail("should not have been able to delete the directory"); } catch (LockObtainFailedException e) { assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock")); } } public void testMarkAsInactiveTriggersSyncedFlush() throws Exception { assertAcked(client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))); client().prepareIndex("test", "test").setSource("{}", XContentType.JSON).get(); ensureGreen("test"); IndicesService indicesService = getInstanceFromNode(IndicesService.class); indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); assertBusy(() -> { IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test"); assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0); } ); IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); } public void testDurableFlagHasEffect() throws Exception { createIndex("test"); ensureGreen(); client().prepareIndex("test", "bar", "1").setSource("{}", XContentType.JSON).get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); Translog translog = ShardUtilsTests.getShardEngine(shard).getTranslog(); Predicate<Translog> needsSync = (tlog) -> { // we can't use tlog.needsSync() here since it also takes the global checkpoint into account // we explicitly want to check here if our durability checks are taken into account so we only // check if we are synced upto the current write location Translog.Location lastWriteLocation = tlog.getLastWriteLocation(); try { // the lastWriteLocaltion has a Integer.MAX_VALUE size so we have to create a new one return tlog.ensureSynced(new Translog.Location(lastWriteLocation.generation, lastWriteLocation.translogLocation, 0)); } catch (IOException e) { throw new UncheckedIOException(e); } }; setDurability(shard, Translog.Durability.REQUEST); assertFalse(needsSync.test(translog)); setDurability(shard, Translog.Durability.ASYNC); client().prepareIndex("test", "bar", "2").setSource("{}", XContentType.JSON).get(); assertTrue(needsSync.test(translog)); setDurability(shard, Translog.Durability.REQUEST); client().prepareDelete("test", "bar", "1").get(); assertFalse(needsSync.test(translog)); setDurability(shard, Translog.Durability.ASYNC); client().prepareDelete("test", "bar", "2").get(); assertTrue(translog.syncNeeded()); setDurability(shard, Translog.Durability.REQUEST); assertNoFailures(client().prepareBulk() .add(client().prepareIndex("test", "bar", "3").setSource("{}", XContentType.JSON)) .add(client().prepareDelete("test", "bar", "1")).get()); assertFalse(needsSync.test(translog)); setDurability(shard, Translog.Durability.ASYNC); assertNoFailures(client().prepareBulk() .add(client().prepareIndex("test", "bar", "4").setSource("{}", XContentType.JSON)) .add(client().prepareDelete("test", "bar", "3")).get()); setDurability(shard, Translog.Durability.REQUEST); assertTrue(needsSync.test(translog)); } private void setDurability(IndexShard shard, Translog.Durability durability) { client().admin().indices().prepareUpdateSettings(shard.shardId().getIndexName()).setSettings( Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability.name()).build()).get(); assertEquals(durability, shard.getTranslogDurability()); } public void testUpdatePriority() { assertAcked(client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 200))); IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400) .build()).get(); assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); } public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { Environment env = getInstanceFromNode(Environment.class); Path idxPath = env.sharedDataFile().resolve(randomAlphaOfLength(10)); logger.info("--> idxPath: [{}]", idxPath); Settings idxSettings = Settings.builder() .put(IndexMetaData.SETTING_DATA_PATH, idxPath) .build(); createIndex("test", idxSettings); ensureGreen("test"); client().prepareIndex("test", "bar", "1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); SearchResponse response = client().prepareSearch("test").get(); assertHitCount(response, 1L); client().admin().indices().prepareDelete("test").get(); assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); assertPathHasBeenCleared(idxPath); } public void testExpectedShardSizeIsPresent() throws InterruptedException { assertAcked(client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))); for (int i = 0; i < 50; i++) { client().prepareIndex("test", "test").setSource("{}", XContentType.JSON).get(); } ensureGreen("test"); InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class); clusterInfoService.refresh(); ClusterState state = getInstanceFromNode(ClusterService.class).state(); Long test = clusterInfoService.getClusterInfo().getShardSize(state.getRoutingTable().index("test") .getShards().get(0).primaryShard()); assertNotNull(test); assertTrue(test > 0); } public void testIndexCanChangeCustomDataPath() throws Exception { Environment env = getInstanceFromNode(Environment.class); Path idxPath = env.sharedDataFile().resolve(randomAlphaOfLength(10)); final String INDEX = "idx"; Path startDir = idxPath.resolve("start-" + randomAlphaOfLength(10)); Path endDir = idxPath.resolve("end-" + randomAlphaOfLength(10)); logger.info("--> start dir: [{}]", startDir.toAbsolutePath().toString()); logger.info("--> end dir: [{}]", endDir.toAbsolutePath().toString()); // temp dirs are automatically created, but the end dir is what // startDir is going to be renamed as, so it needs to be deleted // otherwise we get all sorts of errors about the directory // already existing IOUtils.rm(endDir); Settings sb = Settings.builder() .put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString()) .build(); Settings sb2 = Settings.builder() .put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString()) .build(); logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString()); createIndex(INDEX, sb); ensureGreen(INDEX); client().prepareIndex(INDEX, "bar", "1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get(); assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); logger.info("--> closing the index [{}]", INDEX); client().admin().indices().prepareClose(INDEX).get(); logger.info("--> index closed, re-opening..."); client().admin().indices().prepareOpen(INDEX).get(); logger.info("--> index re-opened"); ensureGreen(INDEX); resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get(); assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); // Now, try closing and changing the settings logger.info("--> closing the index [{}]", INDEX); client().admin().indices().prepareClose(INDEX).get(); logger.info("--> moving data on disk [{}] to [{}]", startDir.getFileName(), endDir.getFileName()); assert Files.exists(endDir) == false : "end directory should not exist!"; Files.move(startDir, endDir, StandardCopyOption.REPLACE_EXISTING); logger.info("--> updating settings..."); client().admin().indices().prepareUpdateSettings(INDEX) .setSettings(sb2) .setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true)) .get(); assert Files.exists(startDir) == false : "start dir shouldn't exist"; logger.info("--> settings updated and files moved, re-opening index"); client().admin().indices().prepareOpen(INDEX).get(); logger.info("--> index re-opened"); ensureGreen(INDEX); resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get(); assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L)); assertAcked(client().admin().indices().prepareDelete(INDEX)); assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class))); assertPathHasBeenCleared(startDir.toAbsolutePath()); assertPathHasBeenCleared(endDir.toAbsolutePath()); } public void testMaybeFlush() throws Exception { createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST) .build()); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(117 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); client().prepareIndex("test", "test", "0") .setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertFalse(shard.shouldFlush()); shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); assertTrue(shard.shouldFlush()); final Translog translog = shard.getEngine().getTranslog(); assertEquals(2, translog.uncommittedOperations()); client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertBusy(() -> { // this is async assertFalse(shard.shouldFlush()); }); assertEquals(0, translog.uncommittedOperations()); translog.sync(); long size = translog.uncommittedSizeInBytes(); logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), translog.uncommittedOperations(), translog.getGeneration()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put( IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)) .build()).get(); client().prepareDelete("test", "test", "2").get(); logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), translog.uncommittedOperations(), translog.getGeneration()); assertBusy(() -> { // this is async logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), translog.uncommittedOperations(), translog.getGeneration()); assertFalse(shard.shouldFlush()); }); assertEquals(0, translog.uncommittedOperations()); } public void testMaybeRollTranslogGeneration() throws Exception { final int generationThreshold = randomIntBetween(64, 512); final Settings settings = Settings .builder() .put("index.number_of_shards", 1) .put("index.translog.generation_threshold_size", generationThreshold + "b") .build(); createIndex("test", settings); ensureGreen("test"); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); final IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); int rolls = 0; final Translog translog = shard.getEngine().getTranslog(); final long generation = translog.currentFileGeneration(); final int numberOfDocuments = randomIntBetween(32, 128); for (int i = 0; i < numberOfDocuments; i++) { assertThat(translog.currentFileGeneration(), equalTo(generation + rolls)); final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); final Translog.Location location = result.getTranslogLocation(); shard.afterWriteOperation(); if (location.translogLocation + location.size > generationThreshold) { // wait until the roll completes assertBusy(() -> assertFalse(shard.shouldRollTranslogGeneration())); rolls++; assertThat(translog.currentFileGeneration(), equalTo(generation + rolls)); } } } public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldFlush()); final String key; final boolean flush = randomBoolean(); if (flush) { key = "index.translog.flush_threshold_size"; } else { key = "index.translog.generation_threshold_size"; } // size of the operation plus header and footer final Settings settings = Settings.builder().put(key, "117b").build(); client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get(); client().prepareIndex("test", "test", "0") .setSource("{}", XContentType.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) .get(); assertFalse(shard.shouldFlush()); final AtomicBoolean running = new AtomicBoolean(true); final int numThreads = randomIntBetween(2, 4); final Thread[] threads = new Thread[numThreads]; final CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); for (int i = 0; i < threads.length; i++) { threads[i] = new Thread(() -> { try { barrier.await(); } catch (final InterruptedException | BrokenBarrierException e) { throw new RuntimeException(e); } while (running.get()) { shard.afterWriteOperation(); } }); threads[i].start(); } barrier.await(); final CheckedRunnable<Exception> check; if (flush) { final FlushStats flushStats = shard.flushStats(); final long total = flushStats.getTotal(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); check = () -> assertEquals(total + 1, shard.flushStats().getTotal()); } else { final long generation = shard.getEngine().getTranslog().currentFileGeneration(); client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); check = () -> assertEquals( generation + 1, shard.getEngine().getTranslog().currentFileGeneration()); } assertBusy(check); running.set(false); for (int i = 0; i < threads.length; i++) { threads[i].join(); } check.run(); } public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.indexService(resolveIndex("test")); IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); client().prepareDelete("test", "test", "0").get(); client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {}; shard.close("simon says", false); AtomicReference<IndexShard> shardRef = new AtomicReference<>(); List<Exception> failures = new ArrayList<>(); IndexingOperationListener listener = new IndexingOperationListener() { @Override public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) { try { assertNotNull(shardRef.get()); // this is all IMC needs to do - check current memory and refresh assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0); shardRef.get().refresh("test"); } catch (Exception e) { failures.add(e); throw e; } } @Override public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) { try { assertNotNull(shardRef.get()); // this is all IMC needs to do - check current memory and refresh assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0); shardRef.get().refresh("test"); } catch (Exception e) { failures.add(e); throw e; } } }; final IndexShard newShard = newIndexShard(indexService, shard, wrapper, getInstanceFromNode(CircuitBreakerService.class), listener); shardRef.set(newShard); recoverShard(newShard); try { ExceptionsHelper.rethrowAndSuppress(failures); } finally { newShard.close("just do it", randomBoolean()); } } /** Check that the accounting breaker correctly matches the segments API for memory usage */ private void checkAccountingBreaker() { CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class); CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); long usedMem = acctBreaker.getUsed(); assertThat(usedMem, greaterThan(0L)); NodesStatsResponse response = client().admin().cluster().prepareNodesStats().setIndices(true).setBreaker(true).get(); NodeStats stats = response.getNodes().get(0); assertNotNull(stats); SegmentsStats segmentsStats = stats.getIndices().getSegments(); CircuitBreakerStats breakerStats = stats.getBreaker().getStats(CircuitBreaker.ACCOUNTING); assertEquals(usedMem, segmentsStats.getMemoryInBytes()); assertEquals(usedMem, breakerStats.getEstimated()); } public void testCircuitBreakerIncrementedByIndexShard() throws Exception { client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder().put("network.breaker.inflight_requests.overhead", 0.0)).get(); // Generate a couple of segments client().prepareIndex("test", "doc", "1").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE).get(); // Use routing so 2 documents are guarenteed to be on the same shard String routing = randomAlphaOfLength(5); client().prepareIndex("test", "doc", "2").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE).setRouting(routing).get(); client().prepareIndex("test", "doc", "3").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) .setRefreshPolicy(IMMEDIATE).setRouting(routing).get(); checkAccountingBreaker(); // Test that force merging causes the breaker to be correctly adjusted logger.info("--> force merging to a single segment"); client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).setFlush(randomBoolean()).get(); client().admin().indices().prepareRefresh().get(); checkAccountingBreaker(); client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder().put("indices.breaker.total.limit", "1kb")).get(); // Test that we're now above the parent limit due to the segments Exception e = expectThrows(Exception.class, () -> client().prepareSearch("test").addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get()); logger.info("--> got: {}", ExceptionsHelper.detailedMessage(e)); assertThat(ExceptionsHelper.detailedMessage(e), containsString("[parent] Data too large, data for [<agg [foo_terms]>]")); client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() .putNull("indices.breaker.total.limit") .putNull("network.breaker.inflight_requests.overhead")).get(); // Test that deleting the index causes the breaker to correctly be decremented logger.info("--> deleting index"); client().admin().indices().prepareDelete("test").get(); // Accounting breaker should now be 0 CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class); CircuitBreaker acctBreaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); assertThat(acctBreaker.getUsed(), equalTo(0L)); } public static final IndexShard recoverShard(IndexShard newShard) throws IOException { DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); IndexShardTestCase.updateRoutingEntry(newShard, newShard.routingEntry().moveToStarted()); return newShard; } public static final IndexShard newIndexShard(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper, CircuitBreakerService cbs, IndexingOperationListener... listeners) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners), () -> {}, cbs); return newShard; } private static ShardRouting getInitializingShardRouting(ShardRouting existingShardRouting) { ShardRouting shardRouting = TestShardRouting.newShardRouting(existingShardRouting.shardId(), existingShardRouting.currentNodeId(), null, existingShardRouting.primary(), ShardRoutingState.INITIALIZING, existingShardRouting.allocationId()); shardRouting = shardRouting.updateUnassigned(new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE); return shardRouting; } public void testAutomaticRefresh() throws InterruptedException { TimeValue randomTimeValue = randomFrom(random(), null, TimeValue.ZERO, TimeValue.timeValueMillis(randomIntBetween(0, 1000))); Settings.Builder builder = Settings.builder(); if (randomTimeValue != null) { builder.put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), randomTimeValue); } IndexService indexService = createIndex("test", builder.build()); assertFalse(indexService.getIndexSettings().isExplicitRefresh()); ensureGreen(); AtomicInteger totalNumDocs = new AtomicInteger(Integer.MAX_VALUE); CountDownLatch started = new CountDownLatch(1); Thread t = new Thread(() -> { SearchResponse searchResponse; started.countDown(); do { searchResponse = client().prepareSearch().get(); } while (searchResponse.getHits().totalHits != totalNumDocs.get()); }); t.start(); started.await(); assertNoSearchHits(client().prepareSearch().get()); int numDocs = scaledRandomIntBetween(25, 100); totalNumDocs.set(numDocs); CountDownLatch indexingDone = new CountDownLatch(numDocs); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); indexingDone.countDown(); // one doc is indexed above blocking IndexShard shard = indexService.getShard(0); boolean hasRefreshed = shard.scheduledRefresh(); if (randomTimeValue == TimeValue.ZERO) { // with ZERO we are guaranteed to see the doc since we will wait for a refresh in the background assertFalse(hasRefreshed); assertTrue(shard.isSearchIdle()); } else if (randomTimeValue == null){ // with null we are guaranteed to see the doc since do execute the refresh. // we can't assert on hasRefreshed since it might have been refreshed in the background on the shard concurrently assertFalse(shard.isSearchIdle()); } assertHitCount(client().prepareSearch().get(), 1); for (int i = 1; i < numDocs; i++) { client().prepareIndex("test", "test", "" + i).setSource("{\"foo\" : \"bar\"}", XContentType.JSON) .execute(new ActionListener<IndexResponse>() { @Override public void onResponse(IndexResponse indexResponse) { indexingDone.countDown(); } @Override public void onFailure(Exception e) { indexingDone.countDown(); throw new AssertionError(e); } }); } indexingDone.await(); t.join(); } public void testPendingRefreshWithIntervalChange() throws InterruptedException { Settings.Builder builder = Settings.builder(); builder.put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO); IndexService indexService = createIndex("test", builder.build()); assertFalse(indexService.getIndexSettings().isExplicitRefresh()); ensureGreen(); assertNoSearchHits(client().prepareSearch().get()); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); IndexShard shard = indexService.getShard(0); assertFalse(shard.scheduledRefresh()); assertTrue(shard.isSearchIdle()); CountDownLatch refreshLatch = new CountDownLatch(1); client().admin().indices().prepareRefresh() .execute(ActionListener.wrap(refreshLatch::countDown));// async on purpose to make sure it happens concurrently assertHitCount(client().prepareSearch().get(), 1); client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); assertFalse(shard.scheduledRefresh()); // now disable background refresh and make sure the refresh happens CountDownLatch updateSettingsLatch = new CountDownLatch(1); client().admin().indices() .prepareUpdateSettings("test") .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1).build()) .execute(ActionListener.wrap(updateSettingsLatch::countDown)); assertHitCount(client().prepareSearch().get(), 2); // wait for both to ensure we don't have in-flight operations updateSettingsLatch.await(); refreshLatch.await(); client().prepareIndex("test", "test", "2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); assertTrue(shard.scheduledRefresh()); assertTrue(shard.isSearchIdle()); assertHitCount(client().prepareSearch().get(), 3); } }
apache-2.0
dayutianfei/impala-Q
tests/metadata/test_ddl.py
14163
#!/usr/bin/env python # Copyright (c) 2012 Cloudera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Impala tests for DDL statements import logging import pytest import shlex import time from tests.common.test_result_verifier import * from subprocess import call from tests.common.test_vector import * from tests.common.test_dimensions import ALL_NODES_ONLY from tests.common.impala_test_suite import * # Validates DDL statements (create, drop) class TestDdlStatements(ImpalaTestSuite): TEST_DBS = ['ddl_test_db', 'alter_table_test_db', 'alter_table_test_db2', 'function_ddl_test', 'udf_test', 'data_src_test'] @classmethod def get_workload(self): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestDdlStatements, cls).add_test_dimensions() sync_ddl_opts = [0, 1] if cls.exploration_strategy() != 'exhaustive': # Only run with sync_ddl on exhaustive since it increases test runtime. sync_ddl_opts = [0] cls.TestMatrix.add_dimension(create_exec_option_dimension( cluster_sizes=ALL_NODES_ONLY, disable_codegen_options=[False], batch_sizes=[0], sync_ddl=sync_ddl_opts)) # There is no reason to run these tests using all dimensions. cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload())) def setup_method(self, method): self.cleanup() # Get the current number of queries that are in the 'EXCEPTION' state. Used for # verification after running each test case. self.start_exception_count = self.query_exception_count() self.cleanup_hdfs_dirs() def teardown_method(self, method): end_exception_count = self.query_exception_count() # The number of exceptions may be < than what was in setup if the queries in the # EXCEPTION state were bumped out of the FINISHED list. We should never see an # increase in the number of queries in the exception state. assert end_exception_count <= self.start_exception_count def query_exception_count(self): """Returns the number of occurrences of 'EXCEPTION' on the debug /queries page""" return len(re.findall('EXCEPTION', self.impalad_test_service.read_debug_webpage('queries'))) def cleanup(self): map(self.cleanup_db, self.TEST_DBS) self.cleanup_hdfs_dirs() def cleanup_hdfs_dirs(self): # Cleanup the test table HDFS dirs between test runs so there are no errors the next # time a table is created with the same location. This also helps remove any stale # data from the last test run. self.hdfs_client.delete_file_dir("test-warehouse/part_data/", recursive=True) self.hdfs_client.delete_file_dir("test-warehouse/t1_tmp1/", recursive=True) self.hdfs_client.delete_file_dir("test-warehouse/t_part_tmp/", recursive=True) @pytest.mark.execute_serially def test_drop_cleans_hdfs_dirs(self): self.hdfs_client.delete_file_dir("test-warehouse/ddl_test_db.db/", recursive=True) assert not self.hdfs_client.exists("test-warehouse/ddl_test_db.db/") self.client.execute('use default') self.client.execute('create database ddl_test_db') # Verify the db directory exists assert self.hdfs_client.exists("test-warehouse/ddl_test_db.db/") self.client.execute("create table ddl_test_db.t1(i int)") # Verify the table directory exists assert self.hdfs_client.exists("test-warehouse/ddl_test_db.db/t1/") # Dropping the table removes the table's directory and preserves the db's directory self.client.execute("drop table ddl_test_db.t1") assert not self.hdfs_client.exists("test-warehouse/ddl_test_db.db/t1/") assert self.hdfs_client.exists("test-warehouse/ddl_test_db.db/") # Dropping the db removes the db's directory self.client.execute("drop database ddl_test_db") assert not self.hdfs_client.exists("test-warehouse/ddl_test_db.db/") @pytest.mark.execute_serially def test_create(self, vector): vector.get_value('exec_option')['abort_on_error'] = False self.__create_db_synced('ddl_test_db', vector) self.run_test_case('QueryTest/create', vector, use_db='ddl_test_db', multiple_impalad=self.__use_multiple_impalad(vector)) @pytest.mark.execute_serially def test_sync_ddl_drop(self, vector): """Verifies the catalog gets updated properly when dropping objects with sync_ddl enabled""" self.client.set_configuration({'sync_ddl': 0}) self.client.execute('create database ddl_test_db') self.client.set_configuration({'sync_ddl': 1}) # Drop the database immediately after creation (within a statestore heartbeat) and # verify the catalog gets updated properly. self.client.execute('drop database ddl_test_db') assert 'ddl_test_db' not in self.client.execute("show databases").data @pytest.mark.execute_serially def test_alter_table(self, vector): vector.get_value('exec_option')['abort_on_error'] = False # Create directory for partition data that does not use the (key=value) # format. self.hdfs_client.make_dir("test-warehouse/part_data/", permission=777) self.hdfs_client.create_file("test-warehouse/part_data/data.txt", file_data='1984') # Create test databases self.__create_db_synced('alter_table_test_db', vector) self.__create_db_synced('alter_table_test_db2', vector) self.run_test_case('QueryTest/alter-table', vector, use_db='alter_table_test_db', multiple_impalad=self.__use_multiple_impalad(vector)) @pytest.mark.execute_serially def test_views_ddl(self, vector): vector.get_value('exec_option')['abort_on_error'] = False self.__create_db_synced('ddl_test_db', vector) self.run_test_case('QueryTest/views-ddl', vector, use_db='ddl_test_db', multiple_impalad=self.__use_multiple_impalad(vector)) @pytest.mark.execute_serially def test_functions_ddl(self, vector): self.__create_db_synced('function_ddl_test', vector) self.run_test_case('QueryTest/functions-ddl', vector, use_db='function_ddl_test', multiple_impalad=self.__use_multiple_impalad(vector)) @pytest.mark.execute_serially def test_create_drop_function(self, vector): # This will create, run, and drop the same function repeatedly, exercising the # lib cache mechanism. create_fn_stmt = """create function f() returns int location '/test-warehouse/libTestUdfs.so' symbol='NoArgs'""" select_stmt = """select f() from functional.alltypes limit 10""" drop_fn_stmt = "drop function %s f()" self.create_drop_ddl(vector, "udf_test", [create_fn_stmt], [drop_fn_stmt], select_stmt) @pytest.mark.execute_serially def test_create_drop_data_src(self, vector): # This will create, run, and drop the same data source repeatedly, exercising # the lib cache mechanism. create_ds_stmt = """CREATE DATA SOURCE test_data_src LOCATION '/test-warehouse/data-sources/test-data-source.jar' CLASS 'com.cloudera.impala.extdatasource.AllTypesDataSource' API_VERSION 'V1'""" create_tbl_stmt = """CREATE TABLE data_src_tbl (x int) PRODUCED BY DATA SOURCE test_data_src""" drop_ds_stmt = "drop data source %s test_data_src" drop_tbl_stmt = "drop table %s data_src_tbl" select_stmt = "select * from data_src_tbl limit 1" create_stmts = [create_ds_stmt, create_tbl_stmt] drop_stmts = [drop_tbl_stmt, drop_ds_stmt] self.create_drop_ddl(vector, "data_src_test", create_stmts, drop_stmts, select_stmt) def create_drop_ddl(self, vector, db_name, create_stmts, drop_stmts, select_stmt): # Helper method to run CREATE/DROP DDL commands repeatedly and exercise the lib cache # create_stmts is the list of CREATE statements to be executed in order drop_stmts is # the list of DROP statements to be executed in order. Each statement should have a # '%s' placeholder to insert "IF EXISTS" or "". The select_stmt is just a single # statement to test after executing the CREATE statements. # TODO: it's hard to tell that the cache is working (i.e. if it did nothing to drop # the cache, these tests would still pass). Testing that is a bit harder and requires # us to update the udf binary in the middle. self.__create_db_synced(db_name, vector) self.client.set_configuration(vector.get_value('exec_option')) self.client.execute("use %s" % (db_name,)) for drop_stmt in drop_stmts: self.client.execute(drop_stmt % ("if exists")) for i in xrange(1, 10): for create_stmt in create_stmts: self.client.execute(create_stmt) self.client.execute(select_stmt) for drop_stmt in drop_stmts: self.client.execute(drop_stmt % ("")) @pytest.mark.execute_serially def test_create_alter_bulk_partition(self, vector): # Change the scale depending on the exploration strategy, with 50 partitions this # takes a few minutes to run, with 10 partitions it takes ~50s for two configurations. num_parts = 50 if self.exploration_strategy() != 'exhaustive': num_parts = 10 self.client.execute("use default") self.client.execute("drop table if exists foo_part") self.client.execute("create table foo_part(i int) partitioned by(j int, s string)") # Add some partitions (first batch of two) for i in xrange(num_parts / 5): start = time.time() self.client.execute("alter table foo_part add partition(j=%d, s='%s')" % (i, i)) print 'ADD PARTITION #%d exec time: %s' % (i, time.time() - start) # Modify one of the partitions self.client.execute("""alter table foo_part partition(j=1, s='1') set fileformat parquetfile""") # Alter one partition to a non-existent location twice (IMPALA-741) self.hdfs_client.delete_file_dir("tmp/dont_exist1/", recursive=True) self.hdfs_client.delete_file_dir("tmp/dont_exist2/", recursive=True) self.execute_query_expect_success(self.client, "alter table foo_part partition(j=1,s='1') set location '/tmp/dont_exist1'") self.execute_query_expect_success(self.client, "alter table foo_part partition(j=1,s='1') set location '/tmp/dont_exist2'") # Add some more partitions for i in xrange(num_parts / 5, num_parts): start = time.time() self.client.execute("alter table foo_part add partition(j=%d,s='%s')" % (i,i)) print 'ADD PARTITION #%d exec time: %s' % (i, time.time() - start) # Insert data and verify it shows up. self.client.execute("insert into table foo_part partition(j=1, s='1') select 1") assert '1' == self.execute_scalar("select count(*) from foo_part") self.client.execute("drop table foo_part") @pytest.mark.execute_serially def test_create_alter_tbl_properties(self, vector): self.__create_db_synced('alter_table_test_db', vector) self.client.execute("use alter_table_test_db") # Specify TBLPROPERTIES and SERDEPROPERTIES at CREATE time self.client.execute("""create table test_alter_tbl (i int) with serdeproperties ('s1'='s2', 's3'='s4') tblproperties ('p1'='v0', 'p1'='v1')""") properties = self.__get_tbl_properties('test_alter_tbl') assert len(properties) == 2 # The transient_lastDdlTime is variable, so don't verify the value. assert 'transient_lastDdlTime' in properties del properties['transient_lastDdlTime'] assert {'p1': 'v1'} == properties properties = self.__get_serde_properties('test_alter_tbl') assert {'s1': 's2', 's3': 's4'} == properties # Modify the SERDEPROPERTIES using ALTER TABLE SET. self.client.execute("alter table test_alter_tbl set serdeproperties "\ "('s1'='new', 's5'='s6')") properties = self.__get_serde_properties('test_alter_tbl') assert {'s1': 'new', 's3': 's4', 's5': 's6'} == properties # Modify the TBLPROPERTIES using ALTER TABLE SET. self.client.execute("alter table test_alter_tbl set tblproperties "\ "('prop1'='val1', 'p2'='val2', 'p2'='val3', ''='')") properties = self.__get_tbl_properties('test_alter_tbl') assert 'transient_lastDdlTime' in properties assert properties['p1'] == 'v1' assert properties['prop1'] == 'val1' assert properties['p2'] == 'val3' assert properties[''] == '' @classmethod def __use_multiple_impalad(cls, vector): return vector.get_value('exec_option')['sync_ddl'] == 1 @classmethod def __create_db_synced(cls, db_name, vector): """Creates a database using synchronized DDL to ensure all nodes have the test database available for use before executing the .test file(s). """ cls.client.execute('use default') cls.client.set_configuration({'sync_ddl': 1}) cls.client.execute('create database %s' % db_name) cls.client.set_configuration(vector.get_value('exec_option')) def __get_tbl_properties(self, table_name): """Extracts the table properties mapping from the output of DESCRIBE FORMATTED""" return self.__get_properties('Table Parameters:', table_name) def __get_serde_properties(self, table_name): """Extracts the serde properties mapping from the output of DESCRIBE FORMATTED""" return self.__get_properties('Storage Desc Params:', table_name) def __get_properties(self, section_name, table_name): """Extracts the table properties mapping from the output of DESCRIBE FORMATTED""" result = self.client.execute("describe formatted " + table_name) match = False properties = dict(); for row in result.data: if section_name in row: match = True elif match: row = row.split('\t') if (row[1] == 'NULL'): break properties[row[1].rstrip()] = row[2].rstrip() return properties
apache-2.0
MSG134/IVCT_Framework
IEEE1516e/src/main/java/hla/rti1516e/exceptions/TimeConstrainedIsNotEnabled.java
807
/* * The IEEE hereby grants a general, royalty-free license to copy, distribute, * display and make derivative works from this material, for all purposes, * provided that any use of the material contains the following * attribution: "Reprinted with permission from IEEE 1516.1(TM)-2010". * Should you require additional information, contact the Manager, Standards * Intellectual Property, IEEE Standards Association (stds-ipr@ieee.org). */ package hla.rti1516e.exceptions; /** * Public exception class TimeConstrainedIsNotEnabled */ public final class TimeConstrainedIsNotEnabled extends RTIexception { public TimeConstrainedIsNotEnabled(String msg) { super(msg); } public TimeConstrainedIsNotEnabled(String message, Throwable cause) { super(message, cause); } }
apache-2.0
fayland/binary-static
src/javascript/binary/websocket_pages/trade/purchase.js
11927
const Contract = require('./contract').Contract; const Symbols = require('./symbols').Symbols; const Tick = require('./tick').Tick; const WSTickDisplay = require('./tick_trade').WSTickDisplay; const Content = require('../../common_functions/content').Content; const isVisible = require('../../common_functions/common_functions').isVisible; const updatePurchaseStatus = require('./common').updatePurchaseStatus; const updateContractBalance = require('./common').updateContractBalance; const elementTextContent = require('../../common_functions/common_functions').elementTextContent; const elementInnerHtml = require('../../common_functions/common_functions').elementInnerHtml; /* * Purchase object that handles all the functions related to * contract purchase response */ const Purchase = (function () { 'use strict'; let purchase_data = {}; const display = function (details) { purchase_data = details; const receipt = details.buy, passthrough = details.echo_req.passthrough, container = document.getElementById('contract_confirmation_container'), message_container = document.getElementById('confirmation_message'), heading = document.getElementById('contract_purchase_heading'), descr = document.getElementById('contract_purchase_descr'), barrier_element = document.getElementById('contract_purchase_barrier'), reference = document.getElementById('contract_purchase_reference'), chart = document.getElementById('tick_chart'), payout = document.getElementById('contract_purchase_payout'), cost = document.getElementById('contract_purchase_cost'), profit = document.getElementById('contract_purchase_profit'), spots = document.getElementById('contract_purchase_spots'), confirmation_error = document.getElementById('confirmation_error'), contracts_list = document.getElementById('contracts_list'), button = document.getElementById('contract_purchase_button'); const error = details.error; const show_chart = !error && passthrough.duration <= 10 && passthrough.duration_unit === 't' && (sessionStorage.formname === 'risefall' || sessionStorage.formname === 'higherlower' || sessionStorage.formname === 'asian'); contracts_list.style.display = 'none'; if (error) { container.style.display = 'block'; message_container.hide(); confirmation_error.show(); elementInnerHtml(confirmation_error, error.message); } else { const guideBtn = document.getElementById('guideBtn'); if (guideBtn) { guideBtn.style.display = 'none'; } container.style.display = 'table-row'; message_container.show(); confirmation_error.hide(); elementTextContent(heading, Content.localize().textContractConfirmationHeading); elementTextContent(descr, receipt.longcode); if (barrier_element) barrier_element.textContent = ''; elementTextContent(reference, Content.localize().textContractConfirmationReference + ' ' + receipt.transaction_id); let payout_value, cost_value; if (passthrough.basis === 'payout') { payout_value = passthrough.amount; cost_value = passthrough['ask-price']; } else { cost_value = passthrough.amount; payout_value = receipt.payout; } const profit_value = Math.round((payout_value - cost_value) * 100) / 100; if (sessionStorage.getItem('formname') === 'spreads') { elementInnerHtml(payout, Content.localize().textStopLoss + ' <p>' + receipt.stop_loss_level + '</p>'); elementInnerHtml(cost, Content.localize().textAmountPerPoint + ' <p>' + receipt.amount_per_point + '</p>'); elementInnerHtml(profit, Content.localize().textStopProfit + ' <p>' + receipt.stop_profit_level + '</p>'); } else { elementInnerHtml(payout, Content.localize().textContractConfirmationPayout + ' <p>' + payout_value + '</p>'); elementInnerHtml(cost, Content.localize().textContractConfirmationCost + ' <p>' + cost_value + '</p>'); elementInnerHtml(profit, Content.localize().textContractConfirmationProfit + ' <p>' + profit_value + '</p>'); } updateContractBalance(receipt.balance_after); if (show_chart) { chart.show(); } else { chart.hide(); } if (Contract.form() === 'digits') { elementTextContent(spots, ''); spots.className = ''; spots.show(); } else { spots.hide(); } if (Contract.form() !== 'digits' && !show_chart) { elementTextContent(button, Content.localize().textContractConfirmationButton); button.setAttribute('contract_id', receipt.contract_id); button.show(); $('.open_contract_detailsws').attr('contract_id', receipt.contract_id).removeClass('invisible'); } else { button.hide(); $('.open_contract_detailsws').addClass('invisible'); } } if (show_chart) { let contract_sentiment; if (passthrough.contract_type === 'CALL' || passthrough.contract_type === 'ASIANU') { contract_sentiment = 'up'; } else { contract_sentiment = 'down'; } // calculate number of decimals needed to display tick-chart according to the spot // value of the underlying let decimal_points = 2; const tick_spots = Tick.spots(); const tick_spot_epochs = Object.keys(tick_spots); if (tick_spot_epochs.length > 0) { const last_quote = tick_spots[tick_spot_epochs[0]].toString(); if (last_quote.indexOf('.') !== -1) { decimal_points = last_quote.split('.')[1].length; } } let barrier; if (sessionStorage.getItem('formname') === 'higherlower') { barrier = passthrough.barrier; } WSTickDisplay.initialize({ symbol : passthrough.symbol, barrier : barrier, number_of_ticks : passthrough.duration, previous_tick_epoch : receipt.start_time, contract_category : sessionStorage.getItem('formname') === 'asian' ? 'asian' : 'callput', display_symbol : Symbols.getName(passthrough.symbol), contract_start : receipt.start_time, display_decimals : decimal_points, contract_sentiment : contract_sentiment, price : passthrough['ask-price'], payout : receipt.payout, show_contract_result: 1, width : $('#confirmation_message').width(), }); WSTickDisplay.spots_list = {}; } }; const update_spot_list = function() { if ($('#contract_purchase_spots:hidden').length) { return; } let duration = purchase_data.echo_req && purchase_data.echo_req.passthrough ? purchase_data.echo_req.passthrough.duration : null; if (!duration) { return; } const spots = document.getElementById('contract_purchase_spots'); const spots2 = Tick.spots(); const epoches = Object.keys(spots2).sort(function(a, b) { return a - b; }); if (spots) spots.textContent = ''; let last_digit; const replace = function(d) { last_digit = d; return '<b>' + d + '</b>'; }; for (let s = 0; s < epoches.length; s++) { const tick_d = { epoch: epoches[s], quote: spots2[epoches[s]], }; if (isVisible(spots) && tick_d.epoch && tick_d.epoch > purchase_data.buy.start_time) { const fragment = document.createElement('div'); fragment.classList.add('row'); const el1 = document.createElement('div'); el1.classList.add('col'); elementTextContent(el1, Content.localize().textTickResultLabel + ' ' + (spots.getElementsByClassName('row').length + 1)); fragment.appendChild(el1); const el2 = document.createElement('div'); el2.classList.add('col'); const date = new Date(tick_d.epoch * 1000); const hours = date.getUTCHours() < 10 ? '0' + date.getUTCHours() : date.getUTCHours(); const minutes = date.getUTCMinutes() < 10 ? '0' + date.getUTCMinutes() : date.getUTCMinutes(); const seconds = date.getUTCSeconds() < 10 ? '0' + date.getUTCSeconds() : date.getUTCSeconds(); elementTextContent(el2, hours + ':' + minutes + ':' + seconds); fragment.appendChild(el2); const tick = tick_d.quote.replace(/\d$/, replace); const el3 = document.createElement('div'); el3.classList.add('col'); elementInnerHtml(el3, tick); fragment.appendChild(el3); spots.appendChild(fragment); spots.scrollTop = spots.scrollHeight; if (last_digit && duration === 1) { let contract_status, final_price, pnl; const pass_contract_type = purchase_data.echo_req.passthrough.contract_type, pass_barrier = purchase_data.echo_req.passthrough.barrier; if ( (pass_contract_type === 'DIGITMATCH' && +last_digit === +pass_barrier) || (pass_contract_type === 'DIGITDIFF' && +last_digit !== +pass_barrier) || (pass_contract_type === 'DIGITEVEN' && +last_digit % 2 === 0) || (pass_contract_type === 'DIGITODD' && +last_digit % 2) || (pass_contract_type === 'DIGITOVER' && +last_digit > pass_barrier) || (pass_contract_type === 'DIGITUNDER' && +last_digit < pass_barrier) ) { spots.className = 'won'; final_price = $('#contract_purchase_payout').find('p').text(); pnl = $('#contract_purchase_cost').find('p').text(); contract_status = Content.localize().textContractStatusWon; } else { spots.className = 'lost'; final_price = 0; pnl = -$('#contract_purchase_cost').find('p').text(); contract_status = Content.localize().textContractStatusLost; } updatePurchaseStatus(final_price, pnl, contract_status); } duration--; if (!duration) { purchase_data.echo_req.passthrough.duration = 0; } } } }; return { display : display, update_spot_list: update_spot_list, }; })(); module.exports = { Purchase: Purchase, };
apache-2.0
chkir/backups
backups-service/src/main/java/com/yammer/backups/policy/LastDurationRetentionPolicy.java
1979
package com.yammer.backups.policy; /* * #%L * Backups * %% * Copyright (C) 2013 - 2014 Microsoft Corporation * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import com.google.common.base.Predicate; import com.google.common.collect.Sets; import com.yammer.backups.api.metadata.AbstractMetadata; import io.dropwizard.util.Duration; import org.joda.time.DateTime; import org.joda.time.Seconds; import java.util.Set; public class LastDurationRetentionPolicy<T extends AbstractMetadata<?>> implements RetentionPolicy<T> { private final Duration duration; public LastDurationRetentionPolicy(Duration duration) { this.duration = duration; } protected long getAgeInSeconds(T item, DateTime now) { final DateTime date = item.getStartedDate(); return Seconds.secondsBetween(date, now).getSeconds(); } @Override public Set<T> retain(Set<T> items) { final DateTime now = DateTime.now(); final long durationInSeconds = duration.toSeconds(); return Sets.filter(items, new Predicate<T>() { @Override public boolean apply(T item) { final long ageInSeconds = getAgeInSeconds(item, now); return ageInSeconds < durationInSeconds; } }); } @Override public String toString() { return "LastDurationRetentionPolicy{" + "duration=" + duration + '}'; } }
apache-2.0
vivekjuneja/AppStash
vamp/vagrant/modules/consul/spec/defines/consul_watch_spec.rb
7177
require 'spec_helper' describe 'consul::watch' do let(:facts) {{ :architecture => 'x86_64', :version => '0.4.0' }} let(:title) { "my_watch" } describe 'version checks' do context 'with version < 0.4.0' do let (:facts) {{ :architecture => 'x86_64' }} let(:hiera_data) {{ 'consul::version' => '0.3.0' }} let (:params) {{ 'type' => 'nodes', 'handler' => 'handler_path', }} it { expect { should contain_file('/etc/consul/watch_my_watch.json') }.to raise_error(Puppet::Error, /Watches are only supported in Consul 0.4.0 and above/) } end context 'with version 0.4.1' do let (:facts) {{ :architecture => 'x86_64' }} let(:hiera_data) {{ 'consul::version' => '0.4.1' }} let (:params) {{ 'type' => 'nodes', 'handler' => 'handler_path', }} it { should contain_file('/etc/consul/watch_my_watch.json') } end context 'with version 1.3.0' do let (:facts) {{ :architecture => 'x86_64' }} let(:hiera_data) {{ 'consul::version' => '1.3.0' }} let (:params) {{ 'type' => 'nodes', 'handler' => 'handler_path', }} it { should contain_file('/etc/consul/watch_my_watch.json') } end end describe 'with no args' do let(:params) {{}} it { expect { should raise_error(Puppet::Error)} } end describe 'with handler no type' do let(:params) {{ 'handler' => 'handler_path', }} it { expect { should raise_error(Puppet::Error)} } end describe 'with valid type no handler' do let(:params) {{ 'type' => 'nodes', }} it { expect { should raise_error(Puppet::Error)} } end describe 'with valid type and handler' do let(:params) {{ 'type' => 'nodes', 'handler' => 'handler_path', }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"handler" *: *"handler_path"/) .with_content(/"type" *: *"nodes"/) } end describe 'global attributes' do let (:params) {{ 'type' => 'nodes', 'handler' => 'handler_path', 'datacenter' => 'dcName', 'token' => 'tokenValue', }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"datacenter" *: *"dcName"/) .with_content(/"token" *: *"tokenValue"/) } end describe 'type validation' do context '"key" type' do context 'without key' do let (:params) {{ 'type' => 'key', 'handler' => 'handler_path' }} it { expect { should raise_error(Puppet::Error)} } end context 'with key' do let (:params) {{ 'type' => 'key', 'handler' => 'handler_path', 'key' => 'KeyName', }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"type" *: *"key"/) .with_content(/"key" *: *"KeyName"/) } end end context '"keyprefix" type' do context 'without keyprefix' do let (:params) {{ 'type' => 'keyprefix', 'handler' => 'handler_path' }} it { expect { should raise_error(Puppet::Error)} } end context 'with keyprefix' do let (:params) {{ 'type' => 'keyprefix', 'handler' => 'handler_path', 'keyprefix' => 'keyPref', }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"type" *: *"keyprefix"/) .with_content(/"keyprefix" *: *"keyPref"/) } end end context '"service" type' do context 'without service' do let (:params) {{ 'type' => 'service', 'handler' => 'handler_path', }} it { expect { should raise_error(Puppet::Error) } } end context 'with service' do let (:params) {{ 'type' => 'service', 'handler' => 'handler_path', 'service' => 'serviceName', }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"type" *: *"service"/) .with_content(/"service" *: *"serviceName"/) } end context 'with all optionals' do let (:params) {{ 'type' => 'service', 'handler' => 'handler_path', 'service' => 'serviceName', 'service_tag' => 'serviceTagName', 'passingonly' => 'true' }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"tag" *: *"serviceTagName"/) .with_content(/"passingonly" *: *"true"/) } end end context '"checks" type' do context 'without optionals' do let (:params) {{ 'type' => 'checks', 'handler' => 'handler_path', }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"type" *: *"checks"/) } end context 'with all optionals' do let (:params) {{ 'type' => 'checks', 'handler' => 'handler_path', 'service' => 'serviceName', 'state' => 'serviceState', }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"service" *: *"serviceName"/) .with_content(/"state" *: *"serviceState"/) } end end context '"event" type' do context 'without optionals' do let (:params) {{ 'type' => 'event', 'handler' => 'handler_path', }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"type" *: *"event"/) } end context 'with optionals' do let (:params) {{ 'type' => 'event', 'handler' => 'handler_path', 'event_name'=> 'eventName', }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"name" *: *"eventName"/) } end end context '"nodes" type' do let (:params) {{ 'type' => 'nodes', 'handler' => 'handler_path' }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"type" *: *"nodes"/) } end context '"services" type' do let (:params) {{ 'type' => 'services', 'handler' => 'handler_path' }} it { should contain_file('/etc/consul/watch_my_watch.json') \ .with_content(/"type" *: *"services"/) } end context '"unknown_type" type' do let(:params) {{ 'type' => 'unknown_type', 'handler' => 'handler_path', }} it { expect { should raise_error(Puppet::Error)} } end end end
apache-2.0
icloudkit/net.cloudkit
net.cloudkit.enterprises/src/test/java/net/cloudkit/enterprises/xml/message/XMLReader.java
11002
/* * Copyright (C) 2016. The CloudKit Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.cloudkit.enterprises.xml.message; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.xml.stream.XMLInputFactory; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; public class XMLReader { private static final String MESSAGE_CONFIG = "message_config.xml"; public void test() { // XMLInputFactory factory = XMLInputFactory.newInstance(); // XMLStreamReader r = factory.createXMLStreamReader(input); // try { // int event = r.getEventType(); // while (true) { // switch (event) { // case XMLStreamConstants.START_DOCUMENT: // System.out.println("Start Document."); // break; // case XMLStreamConstants.START_ELEMENT: // System.out.println("Start Element: " + r.getName()); // for (int i = 0, n = r.getAttributeCount(); i < n; ++i) // System.out.println("Attribute: " + r.getAttributeName(i) + "=" + r.getAttributeValue(i)); // break; // case XMLStreamConstants.CHARACTERS: // if (r.isWhiteSpace()) // break; // System.out.println("Text: " + r.getText()); // break; // case XMLStreamConstants.END_ELEMENT: // System.out.println("End Element:" + r.getName()); // break; // case XMLStreamConstants.END_DOCUMENT: // System.out.println("End Document."); // break; // } // if (!r.hasNext()) // break; // event = r.next(); // } // } finally { // r.close(); // } // URL url = new URL(uri); // InputStream input = url.openStream(); // XMLInputFactory f = XMLInputFactory.newInstance(); // XMLStreamReader r = f.createXMLStreamReader(uri, input); // XMLStreamReader fr = new StreamReaderDelegate(r) { // public int next() throws XMLStreamException { // while (true) { // int event = super.next(); // switch (event) { // case XMLStreamConstants.COMMENT: // case XMLStreamConstants.PROCESSING_INSTRUCTION: // continue; // default: // return event; // } // } // } // }; // try { // int event = fr.getEventType(); // while (true) { // switch (event) { // case XMLStreamConstants.COMMENT: // case XMLStreamConstants.PROCESSING_INSTRUCTION: // // this should never happen // throw new IllegalStateException("Filter failed!"); // default: // // process XML normally // } // if (!fr.hasNext()) // break; // event = fr.next(); // } // } finally { // fr.close(); // } // input.close(); // ------------------------------------------------------------ // XMLInputFactory inputFactory = XMLInputFactory.newInstance(); // XMLEventReader reader = inputFactory.createXMLEventReader(input); // try { // while (reader.hasNext()) { // XMLEvent e = reader.nextEvent(); // if (e.isCharacters() && ((Characters) e).isWhiteSpace()) // continue; // System.out.println(e); // } // } finally { // reader.close(); // } // final QName ICON = new QName("http://www.w3.org/2005/Atom", "icon"); // URL url = new URL(uri); // InputStream input = url.openStream(); // // XMLInputFactory factory = XMLInputFactory.newInstance(); // XMLEventReader reader = factory.createXMLEventReader(uri, input); // try { // while (reader.hasNext()) { // XMLEvent event = reader.peek(); // if (event.isStartElement()) { // StartElement start = event.asStartElement(); // if (ICON.equals(start.getName())) { // System.out.println(reader.getElementText()); // break; // } // } // // reader.nextEvent(); // } // } finally { // reader.close(); // } // input.close(); // final String xml = "<?xml version=\"1.0\" standalone=\"no\" ?>" + "<!DOCTYPE catalog [" + "<!ELEMENT catalog (publication+) >" + "<!ELEMENT publication (#PCDATA) >" + "<!ATTLIST publication title CDATA #REQUIRED >" + "<!NOTATION pdf SYSTEM \"application/pdf\" >" + "<!NOTATION html SYSTEM \"text/html\" >" + "<!ENTITY overview SYSTEM \"resources/overview.pdf\" NDATA pdf >" + "<!ENTITY chapter1 SYSTEM \"resources/chapter_1.html\" NDATA html >" + "]>" + "<catalog>" + "<ext title=\"Overview\">&overview;</ext>" + "<ext title=\"Chapter 1\">&chapter1;</ext>" + "</catalog>"; // Map notations = new HashMap(); // StringReader input = new StringReader(xml); // XMLInputFactory f = XMLInputFactory.newInstance(); // XMLEventReader r = null; // PrintWriter out = new PrintWriter(System.out); // try { // r = f.createXMLEventReader("http://example.com/catalog.xml", input); // while (r.hasNext()) { // XMLEvent event = r.nextEvent(); // switch (event.getEventType()) { // case XMLStreamConstants.ENTITY_REFERENCE: // EntityReference ref = (EntityReference) event; // EntityDeclaration decl = ref.getDeclaration(); // NotationDeclaration n = (NotationDeclaration) notations.get(decl.getNotationName()); // // out.print("Object of type "); // out.print(n.getSystemId()); // out.print(" located at "); // out.print(decl.getSystemId()); // out.print(" would be placed here."); // break; // case XMLStreamConstants.DTD: // DTD dtd = (DTD) event; // for (Iterator i = dtd.getNotations().iterator(); i.hasNext();) { // n = (NotationDeclaration) i.next(); // notations.put(n.getName(), n); // } // default: // event.writeAsEncodedUnicode(out); // out.println(); // } // } // } catch (XMLStreamException e) { // // TODO Auto-generated catch block // e.printStackTrace(); // } finally { // try { // r.close(); // } catch (XMLStreamException e) { // // TODO Auto-generated catch block // e.printStackTrace(); // } // } // // input.close(); // out.flush(); // final String ATOM_NS = "http://www.w3.org/2005/Atom"; // URL url = new URL(uri); // InputStream input = url.openStream(); // XMLInputFactory f = XMLInputFactory.newInstance(); // XMLEventReader r = f.createXMLEventReader(uri, input); // try { // while (r.hasNext()) { // XMLEvent event = r.nextEvent(); // if (event.isStartElement()) { // StartElement start = event.asStartElement(); // boolean isExtension = false; // boolean elementPrinted = false; // if (!ATOM_NS.equals(start.getName().getNamespaceURI())) { // System.out.println(start.getName()); // isExtension = true; // elementPrinted = true; // } // for (Iterator i = start.getAttributes(); i.hasNext();) { // Attribute attr = (Attribute) i.next(); // String ns = attr.getName().getNamespaceURI(); // if (ATOM_NS.equals(ns)) // continue; // if ("".equals(ns) && !isExtension) // continue; // if ("xml".equals(attr.getName().getPrefix())) // continue; // if (!elementPrinted) { // elementPrinted = true; // System.out.println(start.getName()); // } // System.out.print("\t"); // System.out.println(attr); // } // } // } // } finally { // r.close(); // } // input.close(); } /** * @param args */ public static void main(String[] args) { // TODO Auto-generated method stub Map<String, List<String>> dataMap = new HashMap<String, List<String>>(); List<String> messageList = null; String key = null; // MESSAGE_CONFIG // 创建InputStream InputStream in = null; try { in = XMLReader.class.getClassLoader().getResourceAsStream(MESSAGE_CONFIG); // 创建StAX分析工厂 XMLInputFactory xif = XMLInputFactory.newInstance(); // 创建分析器 XMLStreamReader reader = xif.createXMLStreamReader(in); boolean flag = false; // 迭代 while (reader.hasNext()) { // 读取下一个事件 int event = reader.next(); // 如果这个事件是元素开始 // if (reader.getEventType() == XMLStreamConstants.START_ELEMENT) { if (event == XMLStreamReader.START_ELEMENT) { // System.out.println(reader.getLocalName()); // System.out.println(reader.getElementText()); if ("messages".equals(reader.getLocalName())) { key = reader.getAttributeValue(null, "name"); messageList = new ArrayList<String>(); flag = true; continue; } if (flag) { // System.out.println(reader.getLocalName()); //// 判断元素是不是message //// 如果是message则输出元素的文本内容 // if ("message".equals(reader.getLocalName())) { messageList.add(reader.getAttributeValue(null, "type")); // System.out.println(reader.getAttributeValue(null, "type")); // System.out.println(reader.getAttributeValue(null, "builder")); // } } //if ("element".equals(reader.getLocalName())) { // System.out.println(reader.getAttributeValue(null, "name") + ":" + reader.getElementText()); //} } if (event == XMLStreamReader.END_ELEMENT) { if ("messages".equals(reader.getLocalName())) { // System.out.println(reader.getLocalName()); dataMap.put(key, messageList); messageList = null; // flag = false; continue; } } //if (reader.getEventType() == XMLStreamConstants.START_ELEMENT) { // System.out.println("Start Element: " + reader.getName()); // for (int i = 0, n = reader.getAttributeCount(); i < n; ++i) { // // QName name = reader.getAttributeName(i); // String value = reader.getAttributeValue(i); // System.out.println("Attribute: " + name + " = " + value); // } //} } } catch (XMLStreamException e) { e.printStackTrace(); } finally { if (in != null) { try { in.close(); } catch (IOException e) { e.printStackTrace(); } } } for (Map.Entry<String, List<String>> entry : dataMap.entrySet()) { System.out.println(entry.getKey() + "--------------------------"); for (String string : entry.getValue()) { System.out.println(string); } } } }
apache-2.0
xavivars/abot
Abot/Core/CsQueryHyperLinkParser.cs
3143
using Abot.Poco; using CsQuery; using System; using System.Collections.Generic; using System.Linq; namespace Abot.Core { /// <summary> /// Parser that uses CsQuery https://github.com/jamietre/CsQuery to parse page links /// </summary> [Serializable] public class CSQueryHyperlinkParser : HyperLinkParser { Func<string, string> _cleanURLFunc; bool _isRespectMetaRobotsNoFollowEnabled; bool _isRespectAnchorRelNoFollowEnabled; public CSQueryHyperlinkParser() { } public CSQueryHyperlinkParser(bool isRespectMetaRobotsNoFollowEnabled, bool isRespectAnchorRelNoFollowEnabled, Func<string, string> cleanURLFunc = null) : base(isRespectMetaRobotsNoFollowEnabled) { _isRespectMetaRobotsNoFollowEnabled = isRespectMetaRobotsNoFollowEnabled; _isRespectAnchorRelNoFollowEnabled = isRespectAnchorRelNoFollowEnabled; _cleanURLFunc = cleanURLFunc; } protected override string ParserType { get { return "CsQuery"; } } protected override IEnumerable<string> GetHrefValues(CrawledPage crawledPage) { if (HasRobotsNoFollow(crawledPage)) return null; IEnumerable<string> hrefValues = crawledPage.CsQueryDocument.Select("a, area") .Elements .Where(e => !HasRelNoFollow(e)) .Select(y => _cleanURLFunc != null ? _cleanURLFunc(y.GetAttribute("href")) : y.GetAttribute("href")) .Where(a => !string.IsNullOrWhiteSpace(a)); IEnumerable<string> canonicalHref = crawledPage.CsQueryDocument. Select("link").Elements. Where(e => HasRelCanonicalPointingToDifferentUrl(e, crawledPage.Uri.ToString())). Select(e => e.Attributes["href"]); return hrefValues.Concat(canonicalHref); } protected bool HasRelCanonicalPointingToDifferentUrl(IDomElement e, string orginalUrl) { return e.HasAttribute("rel") && !string.IsNullOrWhiteSpace(e.Attributes["rel"]) && string.Equals(e.Attributes["rel"], "canonical", StringComparison.OrdinalIgnoreCase) && e.HasAttribute("href") && !string.IsNullOrWhiteSpace(e.Attributes["href"]) && !string.Equals(e.Attributes["href"], orginalUrl, StringComparison.OrdinalIgnoreCase); } protected override string GetBaseHrefValue(CrawledPage crawledPage) { string baseTagValue = crawledPage.CsQueryDocument.Select("base").Attr("href") ?? ""; return baseTagValue.Trim(); } protected override string GetMetaRobotsValue(CrawledPage crawledPage) { return crawledPage.CsQueryDocument["meta[name]"].Filter(d => d.Name.ToLowerInvariant() == "robots").Attr("content"); } private bool HasRelNoFollow(IDomElement e) { return _isRespectAnchorRelNoFollowEnabled && (e.HasAttribute("rel") && e.GetAttribute("rel").ToLower().Trim() == "nofollow"); } } }
apache-2.0
Shan1024/carbon-uuf
samples/components/org.wso2.carbon.uuf.sample.simple-auth/src/main/pages/login.js
1746
/* * Copyright (c) 2016, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ function onRequest(env) { var session = getSession(); if (session) { sendRedirect(env.contextPath + env.config['loginRedirectUri']); } if (env.request.method == "POST") { var username = env.request.formParams['username']; var password = env.request.formParams['password']; // calling dummy authentication service var result = authenticate(username, password); if (result.success) { //configure login redirect uri sendRedirect(env.contextPath + env.config['loginRedirectUri']); } else { return {errorMessage: result.message}; } } } function authenticate(username, password) { try { // Calling dummy osgi authentication service var SimpleAuthHandler = Java.type("org.wso2.carbon.uuf.sample.simpleauth.bundle.SimpleAuthHandler"); var user = SimpleAuthHandler.authenticate(username, password); createSession(user); return {success: true, message: "success"} } catch (e) { return {success: false, message: e.message}; } }
apache-2.0
cloverink/ebisu
wp-content/plugins/formidable/classes/models/FrmEntryMeta.php
13482
<?php if ( ! defined('ABSPATH') ) { die( 'You are not allowed to call this page directly.' ); } class FrmEntryMeta { /** * @param string $meta_key */ public static function add_entry_meta( $entry_id, $field_id, $meta_key = null, $meta_value ) { global $wpdb; if ( FrmAppHelper::is_empty_value( $meta_value ) ) { // don't save blank fields return 0; } $new_values = array( 'meta_value' => is_array( $meta_value ) ? serialize( array_filter( $meta_value, 'FrmAppHelper::is_not_empty_value' ) ) : trim( $meta_value ), 'item_id' => $entry_id, 'field_id' => $field_id, 'created_at' => current_time('mysql', 1), ); $new_values = apply_filters('frm_add_entry_meta', $new_values); $query_results = $wpdb->insert( $wpdb->prefix . 'frm_item_metas', $new_values ); if ( $query_results ) { self::clear_cache(); $id = $wpdb->insert_id; } else { $id = 0; } return $id; } /** * @param string $meta_key */ public static function update_entry_meta( $entry_id, $field_id, $meta_key = null, $meta_value ) { if ( ! $field_id ) { return false; } global $wpdb; $values = $where_values = array( 'item_id' => $entry_id, 'field_id' => $field_id ); $values['meta_value'] = $meta_value; $values = apply_filters('frm_update_entry_meta', $values); if ( is_array($values['meta_value']) ) { $values['meta_value'] = array_filter( $values['meta_value'], 'FrmAppHelper::is_not_empty_value' ); } $meta_value = maybe_serialize($values['meta_value']); wp_cache_delete( $entry_id, 'frm_entry'); self::clear_cache(); return $wpdb->update( $wpdb->prefix . 'frm_item_metas', array( 'meta_value' => $meta_value ), $where_values ); } public static function update_entry_metas( $entry_id, $values ) { global $wpdb; $prev_values = FrmDb::get_col( $wpdb->prefix . 'frm_item_metas', array( 'item_id' => $entry_id, 'field_id !' => 0 ), 'field_id' ); foreach ( $values as $field_id => $meta_value ) { $field = false; if ( ! empty( $field_id ) ) { $field = FrmField::getOne( $field_id ); } // set the value for the file upload field and add new tags (in Pro version) $meta_value = apply_filters( 'frm_prepare_data_before_db', $meta_value, $field_id, $entry_id, compact( 'field' ) ); if ( $prev_values && in_array($field_id, $prev_values) ) { if ( ( is_array( $meta_value ) && empty( $meta_value ) ) || ( ! is_array( $meta_value ) && trim( $meta_value ) == '' ) ) { // remove blank fields unset( $values[ $field_id ] ); } else { // if value exists, then update it self::update_entry_meta( $entry_id, $field_id, '', $meta_value ); } } else { // if value does not exist, then create it self::add_entry_meta( $entry_id, $field_id, '', $meta_value ); } } if ( empty($prev_values) ) { return; } $prev_values = array_diff($prev_values, array_keys($values)); if ( empty($prev_values) ) { return; } // prepare the query $where = array( 'item_id' => $entry_id, 'field_id' => $prev_values ); FrmDb::get_where_clause_and_values( $where ); // Delete any leftovers $wpdb->query( $wpdb->prepare( 'DELETE FROM ' . $wpdb->prefix . 'frm_item_metas ' . $where['where'], $where['values'] ) ); self::clear_cache(); } public static function duplicate_entry_metas( $old_id, $new_id ) { $metas = self::get_entry_meta_info($old_id); foreach ( $metas as $meta ) { self::add_entry_meta($new_id, $meta->field_id, null, $meta->meta_value); unset($meta); } self::clear_cache(); } public static function delete_entry_meta( $entry_id, $field_id ) { global $wpdb; self::clear_cache(); return $wpdb->query($wpdb->prepare("DELETE FROM {$wpdb->prefix}frm_item_metas WHERE field_id=%d AND item_id=%d", $field_id, $entry_id)); } /** * Clear entry meta caching * Called when a meta is added or changed * * @since 2.0.5 */ public static function clear_cache() { FrmAppHelper::cache_delete_group( 'frm_entry_meta' ); FrmAppHelper::cache_delete_group( 'frm_item_meta' ); } /** * @since 2.0.9 */ public static function get_meta_value( $entry, $field_id ) { if ( isset( $entry->metas ) ) { return isset( $entry->metas[ $field_id ] ) ? $entry->metas[ $field_id ] : false; } else { return self::get_entry_meta_by_field( $entry->id, $field_id ); } } public static function get_entry_meta_by_field( $entry_id, $field_id ) { global $wpdb; if ( is_object( $entry_id ) ) { $entry = $entry_id; $entry_id = $entry->id; $cached = $entry; } else { $entry_id = (int) $entry_id; $cached = FrmAppHelper::check_cache( $entry_id, 'frm_entry' ); } if ( $cached && isset( $cached->metas ) && isset( $cached->metas[ $field_id ] ) ) { $result = $cached->metas[ $field_id ]; return stripslashes_deep($result); } $get_table = $wpdb->prefix . 'frm_item_metas'; $query = array( 'item_id' => $entry_id ); if ( is_numeric($field_id) ) { $query['field_id'] = $field_id; } else { $get_table .= ' it LEFT OUTER JOIN ' . $wpdb->prefix . 'frm_fields fi ON it.field_id=fi.id'; $query['fi.field_key'] = $field_id; } $result = FrmDb::get_var( $get_table, $query, 'meta_value' ); $result = maybe_unserialize($result); $result = stripslashes_deep($result); return $result; } public static function get_entry_metas( $entry_id ) { _deprecated_function( __FUNCTION__, '1.07.10'); global $wpdb; return FrmDb::get_col( $wpdb->prefix . 'frm_item_metas', array( 'item_id' => $entry_id ), 'meta_value' ); } public static function get_entry_metas_for_field( $field_id, $order = '', $limit = '', $args = array() ) { $defaults = array( 'value' => false, 'unique' => false, 'stripslashes' => true, 'is_draft' => false ); $args = wp_parse_args( $args, $defaults ); $query = array(); self::meta_field_query($field_id, $order, $limit, $args, $query); $query = implode(' ', $query); $cache_key = 'entry_metas_for_field_' . $field_id . $order . $limit . maybe_serialize( $args ); $values = FrmAppHelper::check_cache($cache_key, 'frm_entry', $query, 'get_col'); if ( ! $args['stripslashes'] ) { return $values; } foreach ( $values as $k => $v ) { $values[ $k ] = maybe_unserialize( $v ); unset($k, $v); } return stripslashes_deep($values); } /** * @param string $order * @param string $limit */ private static function meta_field_query( $field_id, $order, $limit, $args, array &$query ) { global $wpdb; $query[] = 'SELECT'; $query[] = $args['unique'] ? 'DISTINCT(em.meta_value)' : 'em.meta_value'; $query[] = 'FROM ' . $wpdb->prefix . 'frm_item_metas em '; if ( ! $args['is_draft'] ) { $query[] = 'INNER JOIN ' . $wpdb->prefix . 'frm_items e ON (e.id=em.item_id)'; } if ( is_numeric($field_id) ) { $query[] = $wpdb->prepare('WHERE em.field_id=%d', $field_id); } else { $query[] = $wpdb->prepare( 'LEFT JOIN ' . $wpdb->prefix . 'frm_fields fi ON (em.field_id = fi.id) WHERE fi.field_key=%s', $field_id ); } if ( ! $args['is_draft'] ) { $query[] = 'AND e.is_draft=0'; } if ( $args['value'] ) { $query[] = $wpdb->prepare(' AND meta_value=%s', $args['value']); } $query[] = $order . $limit; } public static function get_entry_meta_info( $entry_id ) { return FrmDb::get_results( 'frm_item_metas', array( 'item_id' => $entry_id ) ); } public static function getAll( $where = array(), $order_by = '', $limit = '', $stripslashes = false ) { global $wpdb; $query = 'SELECT it.*, fi.type as field_type, fi.field_key as field_key, fi.required as required, fi.form_id as field_form_id, fi.name as field_name, fi.options as fi_options FROM ' . $wpdb->prefix . 'frm_item_metas it LEFT OUTER JOIN ' . $wpdb->prefix . 'frm_fields fi ON it.field_id=fi.id' . FrmAppHelper::prepend_and_or_where(' WHERE ', $where) . $order_by . $limit; $cache_key = 'all_' . maybe_serialize( $where ) . $order_by . $limit; $results = FrmAppHelper::check_cache($cache_key, 'frm_entry', $query, ($limit == ' LIMIT 1' ? 'get_row' : 'get_results')); if ( ! $results || ! $stripslashes ) { return $results; } foreach ( $results as $k => $result ) { $results[ $k ]->meta_value = stripslashes_deep( maybe_unserialize( $result->meta_value ) ); unset($k, $result); } return $results; } public static function getEntryIds( $where = array(), $order_by = '', $limit = '', $unique = true, $args = array() ) { $defaults = array( 'is_draft' => false, 'user_id' => '', 'group_by' => '', ); $args = wp_parse_args($args, $defaults); $query = array(); self::get_ids_query($where, $order_by, $limit, $unique, $args, $query ); $query = implode(' ', $query); $cache_key = 'ids_' . maybe_serialize( $where ) . $order_by . 'l' . $limit . 'u' . $unique . maybe_serialize( $args ); $results = FrmAppHelper::check_cache($cache_key, 'frm_entry', $query, ($limit == ' LIMIT 1' ? 'get_var' : 'get_col')); return $results; } /** * @param string|array $where * @param string $order_by * @param string $limit */ private static function get_ids_query( $where, $order_by, $limit, $unique, $args, array &$query ) { global $wpdb; $query[] = 'SELECT'; $defaults = array( 'return_parent_id' => false ); $args = array_merge( $defaults, $args ); if ( $args['return_parent_id'] ) { $query[] = $unique ? 'DISTINCT(e.parent_item_id)' : 'e.parent_item_id'; } else { $query[] = $unique ? 'DISTINCT(it.item_id)' : 'it.item_id'; } $query[] = 'FROM ' . $wpdb->prefix . 'frm_item_metas it LEFT OUTER JOIN ' . $wpdb->prefix . 'frm_fields fi ON it.field_id=fi.id'; $query[] = 'INNER JOIN ' . $wpdb->prefix . 'frm_items e ON (e.id=it.item_id)'; if ( is_array($where) ) { if ( ! $args['is_draft'] ) { $where['e.is_draft'] = 0; } else if ( $args['is_draft'] == 1 ) { $where['e.is_draft'] = 1; } if ( ! empty($args['user_id']) ) { $where['e.user_id'] = $args['user_id']; } $query[] = FrmAppHelper::prepend_and_or_where(' WHERE ', $where) . $order_by . $limit; if ( $args['group_by'] ) { $query[] = ' GROUP BY ' . sanitize_text_field( $args['group_by'] ); } return; } $draft_where = $user_where = ''; if ( ! $args['is_draft'] ) { $draft_where = $wpdb->prepare( ' AND e.is_draft=%d', 0 ); } else if ( $args['is_draft'] == 1 ) { $draft_where = $wpdb->prepare( ' AND e.is_draft=%d', 1 ); } if ( ! empty($args['user_id']) ) { $user_where = $wpdb->prepare(' AND e.user_id=%d', $args['user_id']); } if ( strpos($where, ' GROUP BY ') ) { // don't inject WHERE filtering after GROUP BY $parts = explode(' GROUP BY ', $where); $where = $parts[0]; $where .= $draft_where . $user_where; $where .= ' GROUP BY ' . $parts[1]; } else { $where .= $draft_where . $user_where; } // The query has already been prepared $query[] = FrmAppHelper::prepend_and_or_where(' WHERE ', $where) . $order_by . $limit; } public static function search_entry_metas( $search, $field_id = '', $operator ) { $cache_key = 'search_' . maybe_serialize( $search ) . $field_id . $operator; $results = wp_cache_get($cache_key, 'frm_entry'); if ( false !== $results ) { return $results; } global $wpdb; if ( is_array( $search ) ) { $where = ''; foreach ( $search as $field => $value ) { if ( $value <= 0 || ! in_array( $field, array( 'year', 'month', 'day' ) ) ) { continue; } switch ( $field ) { case 'year': $value = '%' . $value; break; case 'month': $value .= '%'; break; case 'day': $value = '%' . $value . '%'; } $where .= $wpdb->prepare(' meta_value ' . $operator . ' %s and', $value ); } $where .= $wpdb->prepare(' field_id=%d', $field_id); $query = 'SELECT DISTINCT item_id FROM ' . $wpdb->prefix . 'frm_item_metas' . FrmAppHelper::prepend_and_or_where( ' WHERE ', $where ); } else { if ( $operator == 'LIKE' ) { $search = '%' . $search . '%'; } $query = $wpdb->prepare("SELECT DISTINCT item_id FROM {$wpdb->prefix}frm_item_metas WHERE meta_value {$operator} %s and field_id = %d", $search, $field_id); } $results = $wpdb->get_col($query, 0); FrmAppHelper::set_cache( $cache_key, $results, 'frm_entry' ); return $results; } }
apache-2.0
frapid/frapid
src/Libraries/Frapid.WebApi/DataAccess/FormRepository.cs
48987
using System; using System.Collections.Generic; using System.Data.Common; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Web.Security; using Frapid.ApplicationState.Cache; using Frapid.Configuration; using Frapid.Configuration.Db; using Frapid.DataAccess; using Frapid.DataAccess.Models; using Frapid.DbPolicy; using Frapid.Framework.Extensions; using Frapid.i18n; using Frapid.Mapper; using Frapid.Mapper.Database; using Frapid.Mapper.Extensions; using Frapid.Mapper.Query.NonQuery; using Frapid.Mapper.Query.Select; using Serilog; namespace Frapid.WebApi.DataAccess { public class FormRepository : DbAccess, IFormRepository { public FormRepository(string schemaName, string tableName, string database, long loginId, int userId) { var me = AppUsers.GetCurrentAsync().GetAwaiter().GetResult(); this._ObjectNamespace = Sanitizer.SanitizeIdentifierName(schemaName); this._ObjectName = Sanitizer.SanitizeIdentifierName(tableName.Replace("-", "_")); this.LoginId = me.LoginId; this.OfficeId = me.OfficeId; this.UserId = me.UserId; this.Database = database; this.LoginId = loginId; this.UserId = userId; if (!string.IsNullOrWhiteSpace(this._ObjectNamespace) && !string.IsNullOrWhiteSpace(this._ObjectName)) { this.FullyQualifiedObjectName = this._ObjectNamespace + "." + this._ObjectName; this.PrimaryKey = this.GetCandidateKey(); this.LookupField = this.GetLookupField(); this.NameColumn = this.GetNameColumn(); this.IsValid = true; } } public sealed override string _ObjectNamespace { get; } public sealed override string _ObjectName { get; } public string FullyQualifiedObjectName { get; set; } public string PrimaryKey { get; set; } public string IdentityColumn { get; set; } public string LookupField { get; set; } public string NameColumn { get; set; } public string Database { get; set; } public int UserId { get; set; } public bool IsValid { get; set; } public long LoginId { get; set; } public int OfficeId { get; set; } public async Task<long> CountAsync() { if (string.IsNullOrWhiteSpace(this.Database)) { return 0; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to count entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } string sql = $"SELECT COUNT(*) FROM {this.FullyQualifiedObjectName} WHERE DELETED = @0;"; try { return await Factory.ScalarAsync<long>(this.Database, sql, false).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<dynamic>> GetAllAsync() { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.ExportData, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to the export entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } string sql = $"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted=@0"; if (!string.IsNullOrWhiteSpace(this.PrimaryKey)) { sql += $" ORDER BY {this.PrimaryKey};"; } try { return await Factory.GetAsync<dynamic>(this.Database, sql, false).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<dynamic> GetAsync(object primaryKey) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to the get entity \"{this.FullyQualifiedObjectName}\" filtered by \"{this.PrimaryKey}\" with value {primaryKey} was denied to the user with Login ID {this.LoginId}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } if (string.IsNullOrWhiteSpace(this.PrimaryKey)) { return null; } string sql = $"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted=@0 AND {this.PrimaryKey}=@1;"; try { return (await Factory.GetAsync<dynamic>(this.Database, sql, false, primaryKey).ConfigureAwait(false)).FirstOrDefault(); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<dynamic> GetFirstAsync() { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to the get the first record of entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } var sql = new Sql($"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted=@0", false); sql.OrderBy(this.PrimaryKey); sql.Append(FrapidDbServer.AddOffset(this.Database, "@0"), 0); sql.Append(FrapidDbServer.AddLimit(this.Database, "@0"), 1); try { return (await Factory.GetAsync<dynamic>(this.Database, sql).ConfigureAwait(false)).FirstOrDefault(); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<dynamic> GetPreviousAsync(object primaryKey) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to the get the previous entity of \"{this.FullyQualifiedObjectName}\" by \"{this.PrimaryKey}\" with value {primaryKey} was denied to the user with Login ID {this.LoginId}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } var sql = new Sql($"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted=@0", false); sql.And($"{this.PrimaryKey} < @0", primaryKey); sql.Append($"ORDER BY {this.PrimaryKey} DESC"); sql.Append(FrapidDbServer.AddOffset(this.Database, "@0"), 0); sql.Append(FrapidDbServer.AddLimit(this.Database, "@0"), 1); try { return (await Factory.GetAsync<dynamic>(this.Database, sql).ConfigureAwait(false)).FirstOrDefault(); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<dynamic> GetNextAsync(object primaryKey) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to the get the next entity of \"{this.FullyQualifiedObjectName}\" by \"{this.PrimaryKey}\" with value {primaryKey} was denied to the user with Login ID {this.LoginId}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } //$"SELECT * FROM {this.FullyQualifiedObjectName} WHERE {this.PrimaryKey} > @0 //ORDER BY {this.PrimaryKey} LIMIT 1;"; var sql = new Sql($"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted=@0", false); sql.And($"{this.PrimaryKey} > @0", primaryKey); sql.OrderBy(this.PrimaryKey); sql.Append(FrapidDbServer.AddOffset(this.Database, "@0"), 0); sql.Append(FrapidDbServer.AddLimit(this.Database, "@0"), 1); try { return (await Factory.GetAsync<dynamic>(this.Database, sql).ConfigureAwait(false)).FirstOrDefault(); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<dynamic> GetLastAsync() { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to the get the last record of entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } //$"SELECT * FROM {this.FullyQualifiedObjectName} //ORDER BY {this.PrimaryKey} DESC LIMIT 1;"; var sql = new Sql($"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted=@0", false); sql.Append($"ORDER BY {this.PrimaryKey} DESC"); sql.Append(FrapidDbServer.AddOffset(this.Database, "@0"), 0); sql.Append(FrapidDbServer.AddLimit(this.Database, "@0"), 1); try { return (await Factory.GetAsync<dynamic>(this.Database, sql).ConfigureAwait(false)).FirstOrDefault(); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<dynamic>> GetAsync(object[] primaryKeys) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}. Keys: {primaryKeys}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } var sql = new Sql("SELECT * FROM {this.FullyQualifiedObjectName}"); sql.Where("deleted=@0", false); sql.Append("AND"); sql.In("\"{this.PrimaryKey}\" IN (@0)", primaryKeys); try { return await Factory.GetAsync<dynamic>(this.Database, sql).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<CustomField>> GetCustomFieldsAsync(string resourceId) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to get custom fields for entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } string sql; if (string.IsNullOrWhiteSpace(resourceId)) { sql = $"SELECT * FROM config.custom_field_definition_view WHERE table_name='{this.FullyQualifiedObjectName}' ORDER BY field_order;"; return await Factory.GetAsync<CustomField>(this.Database, sql).ConfigureAwait(false); } sql = FrapidDbServer.GetProcedureCommand ( this.Database, "config.get_custom_field_definition", new[] { "@0", "@1" }); try { return await Factory.GetAsync<CustomField>(this.Database, sql, this.FullyQualifiedObjectName, resourceId).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<DisplayField>> GetDisplayFieldsAsync() { if (string.IsNullOrWhiteSpace(this.Database)) { return new List<DisplayField>(); } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to get display field for entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}", this.LoginId); throw new UnauthorizedException(Resources.AccessIsDenied); } } string sql = $"SELECT {this.PrimaryKey} AS \"key\", {this.NameColumn} as \"value\" FROM {this.FullyQualifiedObjectName} WHERE deleted=@0 ORDER BY 1;"; try { return await Factory.GetAsync<DisplayField>(this.Database, sql, false).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<DisplayField>> GetLookupFieldsAsync() { if (string.IsNullOrWhiteSpace(this.Database)) { return new List<DisplayField>(); } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to get display field for entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}", this.LoginId); throw new UnauthorizedException(Resources.AccessIsDenied); } } string sql = $"SELECT {this.LookupField} AS \"key\", {this.NameColumn} as \"value\" FROM {this.FullyQualifiedObjectName} WHERE deleted=@0 ORDER BY 1;"; try { return await Factory.GetAsync<DisplayField>(this.Database, sql, false).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<object> AddOrEditAsync(Dictionary<string, object> item, List<CustomField> customFields) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } var primaryKeyValue = item.FirstOrDefault(x => x.Key.ToPascalCase().Equals(this.PrimaryKey.ToPascalCase())).Value; if (primaryKeyValue != null) { await this.UpdateAsync(item, primaryKeyValue, customFields).ConfigureAwait(false); } else { primaryKeyValue = await this.AddAsync(item, customFields, true).ConfigureAwait(false); } return primaryKeyValue; } public async Task<List<object>> BulkImportAsync(List<Dictionary<string, object>> items) { if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.ImportData, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to import entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } var result = new List<object>(); int line = 0; using (var db = DbProvider.GetDatabase(this.Database)) { try { await db.BeginTransactionAsync().ConfigureAwait(false); foreach (var item in items) { line++; item["AuditUserId"] = this.UserId; item["AuditTs"] = DateTimeOffset.UtcNow; item["Deleted"] = false; var primaryKeyValue = item[this.PrimaryKey.ToPascalCase()]; if (primaryKeyValue != null) { result.Add(primaryKeyValue); var sql = new Sql("UPDATE " + this.FullyQualifiedObjectName + " SET"); int index = 0; foreach (var prop in item.Where(x => !x.Key.Equals(this.PrimaryKey.ToPascalCase()))) { if (index > 0) { sql.Append(","); } sql.Append(Sanitizer.SanitizeIdentifierName(prop.Key.ToUnderscoreLowerCase()) + "=@0", prop.Value); index++; } sql.Where(this.PrimaryKey + "=@0", primaryKeyValue); await db.NonQueryAsync(sql).ConfigureAwait(false); } else { string columns = string.Join(",", item.Where(x => !x.Key.Equals(this.PrimaryKey.ToPascalCase())) .Select(x => Sanitizer.SanitizeIdentifierName(x.Key.ToUnderscoreLowerCase()))); string parameters = string.Join(",", Enumerable.Range(0, item.Count - 1).Select(x => "@" + x)); var arguments = item.Where(x => !x.Key.Equals(this.PrimaryKey.ToPascalCase())) .Select(x => x.Value).ToArray(); var sql = new Sql("INSERT INTO " + this.FullyQualifiedObjectName + "(" + columns + ")"); sql.Append("SELECT " + parameters, arguments); sql.Append(FrapidDbServer.AddReturnInsertedKey(this.Database, this.PrimaryKey)); result.Add(await db.ScalarAsync<object>(sql).ConfigureAwait(false)); } } db.CommitTransaction(); return result; } catch (Exception ex) { db.RollbackTransaction(); string errorMessage = $"Error on line {line}. {ex.Message} "; throw new DataAccessException(errorMessage, ex); } } } public async Task UpdateAsync(Dictionary<string, object> item, object primaryKeyValue, List<CustomField> customFields) { if (string.IsNullOrWhiteSpace(this.Database)) { return; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Edit, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to edit entity \"{this.FullyQualifiedObjectName}\" with Primary Key {this.PrimaryKey} was denied to the user with Login ID {this.LoginId}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } item["AuditUserId"] = this.UserId; item["AuditTs"] = DateTimeOffset.UtcNow; item["Deleted"] = false; using (var db = DbProvider.GetDatabase(this.Database)) { var sql = new Sql("UPDATE " + this.FullyQualifiedObjectName + " SET"); int index = 0; foreach (var prop in item.Where(x => !x.Key.Equals(this.IdentityColumn.Or(string.Empty).ToPascalCase()))) { if (index > 0) { sql.Append(","); } sql.Append(Sanitizer.SanitizeIdentifierName(prop.Key.ToUnderscoreLowerCase()) + "=@0", prop.Value); index++; } sql.Where(this.PrimaryKey + "=@0", primaryKeyValue); try { await db.NonQueryAsync(sql).ConfigureAwait(false); await this.AddCustomFieldsAsync(primaryKeyValue, customFields).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } } public async Task DeleteAsync(object primaryKey) { if (string.IsNullOrWhiteSpace(this.Database)) { return; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Delete, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to delete entity \"{this.FullyQualifiedObjectName}\" with Primary Key {this.PrimaryKey} was denied to the user with Login ID {this.LoginId}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } string sql = $"UPDATE {this.FullyQualifiedObjectName} SET deleted = @0, audit_user_id=@1, audit_ts=@2 WHERE {this.PrimaryKey}=@3;"; try { await Factory.NonQueryAsync(this.Database, sql, true, this.UserId, DateTimeOffset.UtcNow, primaryKey).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<dynamic>> GetPaginatedResultAsync() { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to the first page of the entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } var sql = new Sql($"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted=@0", false); sql.OrderBy(this.PrimaryKey); sql.Append(FrapidDbServer.AddOffset(this.Database, "@0"), 0); sql.Append(FrapidDbServer.AddLimit(this.Database, "@0"), Config.GetPageSize(this.Database)); try { return await Factory.GetAsync<dynamic>(this.Database, sql).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<dynamic>> GetPaginatedResultAsync(long pageNumber) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to Page #{pageNumber} of the entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } long offset = (pageNumber - 1)* Config.GetPageSize(this.Database); string sql = $"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted=@0 ORDER BY {this.PrimaryKey}"; sql += FrapidDbServer.AddOffset(this.Database, "@1"); sql += FrapidDbServer.AddLimit(this.Database, Config.GetPageSize(this.Database).ToString()); try { return await Factory.GetAsync<dynamic>(this.Database, sql, false, offset).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<Filter>> GetFiltersAsync(string tenant, string filterName) { using (var db = DbProvider.GetDatabase(this.Database)) { var sql = new Sql("SELECT * FROM config.filters"); sql.Where("object_name = @0", this.FullyQualifiedObjectName); sql.And("LOWER(filter_name)=@0", filterName.ToLower()); try { return await db.SelectAsync<Filter>(sql).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } } public async Task<long> CountWhereAsync(List<Filter> filters) { if (string.IsNullOrWhiteSpace(this.Database)) { return 0; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to count entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}. Filters: {filters}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } var sql = new Sql($"SELECT COUNT(*) FROM {this.FullyQualifiedObjectName} WHERE deleted = @0", false); FilterManager.AddFilters(ref sql, filters); try { return await Factory.ScalarAsync<long>(this.Database, sql).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<dynamic>> GetWhereAsync(long pageNumber, List<Filter> filters) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to Page #{pageNumber} of the filtered entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}. Filters: {filters}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } long offset = (pageNumber - 1)* Config.GetPageSize(this.Database); var sql = new Sql($"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted = @0", false); FilterManager.AddFilters(ref sql, filters); if (!string.IsNullOrWhiteSpace(this.PrimaryKey)) { sql.OrderBy(this.PrimaryKey); } if (pageNumber > 0) { sql.Append(FrapidDbServer.AddOffset(this.Database, "@0"), offset); sql.Append(FrapidDbServer.AddLimit(this.Database, "@0"), Config.GetPageSize(this.Database)); } try { var result = await Factory.GetAsync<dynamic>(this.Database, sql).ConfigureAwait(false); return result; } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<long> CountFilteredAsync(string filterName) { if (string.IsNullOrWhiteSpace(this.Database)) { return 0; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to count entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}. Filter: {filterName}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } var filters = await this.GetFiltersAsync(this.Database, filterName).ConfigureAwait(false); var sql = new Sql($"SELECT COUNT(*) FROM {this.FullyQualifiedObjectName} WHERE deleted = @0", false); FilterManager.AddFilters(ref sql, filters.ToList()); try { return await Factory.ScalarAsync<long>(this.Database, sql).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<dynamic>> GetFilteredAsync(long pageNumber, string filterName) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information( $"Access to Page #{pageNumber} of the filtered entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}. Filter: {filterName}."); throw new UnauthorizedException(Resources.AccessIsDenied); } } var filters = await this.GetFiltersAsync(this.Database, filterName).ConfigureAwait(false); long offset = (pageNumber - 1)* Config.GetPageSize(this.Database); var sql = new Sql($"SELECT * FROM {this.FullyQualifiedObjectName} WHERE deleted = @0", false); FilterManager.AddFilters(ref sql, filters.ToList()); if (!string.IsNullOrWhiteSpace(this.PrimaryKey)) { sql.OrderBy(this.PrimaryKey); } if (pageNumber > 0) { sql.Append(FrapidDbServer.AddOffset(this.Database, "@0"), offset); sql.Append(FrapidDbServer.AddLimit(this.Database, "@0"), Config.GetPageSize(this.Database)); } try { return await Factory.GetAsync<dynamic>(this.Database, sql).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<object> AddAsync(Dictionary<string, object> item, List<CustomField> customFields, bool skipPrimaryKey) { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Create, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to add entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}. {item}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } item["AuditUserId"] = this.UserId; item["AuditTs"] = DateTimeOffset.UtcNow; item["Deleted"] = false; using (var db = DbProvider.GetDatabase(this.Database)) { string columns = string.Join (",", skipPrimaryKey ? item.Where(x => !x.Key.ToUnderscoreLowerCase().Equals(this.PrimaryKey)) .Select(x => Sanitizer.SanitizeIdentifierName(x.Key).ToUnderscoreLowerCase()) : item.Select(x => Sanitizer.SanitizeIdentifierName(x.Key).ToUnderscoreLowerCase())); string parameters = string.Join(",", Enumerable.Range(0, skipPrimaryKey ? item.Count - 1 : item.Count).Select(x => "@" + x)); var arguments = skipPrimaryKey ? item.Where(x => !x.Key.ToUnderscoreLowerCase().Equals(this.PrimaryKey)) .Select(x => x.Value).ToArray() : item.Select(x => x.Value).ToArray(); var sql = new Sql("INSERT INTO " + this.FullyQualifiedObjectName + "(" + columns + ")"); sql.Append("SELECT " + parameters, arguments); sql.Append(FrapidDbServer.AddReturnInsertedKey(this.Database, this.PrimaryKey)); try { var primaryKeyValue = await db.ScalarAsync<object>(sql).ConfigureAwait(false); await this.AddCustomFieldsAsync(primaryKeyValue, customFields).ConfigureAwait(false); return primaryKeyValue; } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } } public async Task UpdateAsync(Dictionary<string, object> item, object primaryKeyValue, List<CustomField> customFields, EntityView meta) { if (!string.IsNullOrWhiteSpace(meta.PrimaryKey)) { this.PrimaryKey = meta.PrimaryKey; } this.IdentityColumn = meta.Columns.FirstOrDefault(x => x.IsSerial)?.ColumnName; try { await this.UpdateAsync(item, primaryKeyValue, customFields).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<object> AddAsync(Dictionary<string, object> item, List<CustomField> customFields, bool skipPrimaryKey, EntityView meta) { if (!string.IsNullOrWhiteSpace(meta.PrimaryKey)) { this.PrimaryKey = meta.PrimaryKey; } this.IdentityColumn = meta.Columns.FirstOrDefault(x => x.IsSerial)?.ColumnName; try { return await this.AddAsync(item, customFields, skipPrimaryKey).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<DisplayField>> GetDisplayFieldsAsync(List<Filter> filters) { if (string.IsNullOrWhiteSpace(this.Database)) { return new List<DisplayField>(); } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to get display field for entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}", this.LoginId); throw new UnauthorizedException(Resources.AccessIsDenied); } } var sql = new Sql($"SELECT {this.PrimaryKey} AS \"key\", {this.NameColumn} as \"value\" FROM {this.FullyQualifiedObjectName} WHERE deleted=@0 ", false); FilterManager.AddFilters(ref sql, filters); sql.OrderBy("1"); try { return await Factory.GetAsync<DisplayField>(this.Database, sql).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task<IEnumerable<DisplayField>> GetLookupFieldsAsync(List<Filter> filters) { if (string.IsNullOrWhiteSpace(this.Database)) { return new List<DisplayField>(); } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to get display field for entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}", this.LoginId); throw new UnauthorizedException(Resources.AccessIsDenied); } } var sql = new Sql($"SELECT {this.LookupField} AS \"key\", {this.NameColumn} as \"value\" FROM {this.FullyQualifiedObjectName} WHERE deleted=@0 ", false); FilterManager.AddFilters(ref sql, filters); sql.OrderBy("1"); try { return await Factory.GetAsync<DisplayField>(this.Database, sql).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task AddCustomFieldsAsync(object primaryKeyValue, List<CustomField> customFields) { try { string sql = $"DELETE FROM config.custom_fields WHERE custom_field_setup_id IN(" + "SELECT custom_field_setup_id " + "FROM config.custom_field_setup " + "WHERE form_name=config.get_custom_field_form_name('{this.FullyQualifiedObjectName}')" + ");"; await Factory.NonQueryAsync(this.Database, sql).ConfigureAwait(false); if (customFields == null) { return; } foreach (var field in customFields) { sql = $"INSERT INTO config.custom_fields(custom_field_setup_id, resource_id, value) " + "SELECT config.get_custom_field_setup_id_by_table_name('{this.FullyQualifiedObjectName}', @0), " + "@1, @2;"; await Factory.NonQueryAsync(this.Database, sql, field.FieldName, primaryKeyValue, field.Value) .ConfigureAwait(false); } } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } private string GetTableName() { string tableName = this._ObjectName.Replace("-", "_"); return tableName; } private string GetCandidateKey() { string candidateKey = Inflector.MakeSingular(this._ObjectName); if (!string.IsNullOrWhiteSpace(candidateKey)) { candidateKey += "_id"; } candidateKey = candidateKey ?? ""; return Sanitizer.SanitizeIdentifierName(candidateKey); } private string GetLookupField() { string candidateKey = Inflector.MakeSingular(this._ObjectName); if (!string.IsNullOrWhiteSpace(candidateKey)) { candidateKey += "_code"; } candidateKey = candidateKey?.Replace("_code_code", "_code") ?? ""; return Sanitizer.SanitizeIdentifierName(candidateKey); } private string GetNameColumn() { string nameKey = Inflector.MakeSingular(this._ObjectName); if (!string.IsNullOrWhiteSpace(nameKey)) { nameKey += "_name"; } return nameKey?.Replace("_name_name", "_name") ?? ""; } public async Task<EntityView> GetMetaAsync() { if (string.IsNullOrWhiteSpace(this.Database)) { return null; } if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Read, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to view meta information on entity \"{this.FullyQualifiedObjectName}\" was denied to the user with Login ID {this.LoginId}"); throw new UnauthorizedException(Resources.AccessIsDenied); } } try { return await EntityView.GetAsync(this.Database, this.PrimaryKey, this._ObjectNamespace, this.GetTableName()).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } public async Task VerifyAsync(Verification model) { if (!this.SkipValidation) { if (!this.Validated) { await this.ValidateAsync(AccessTypeEnum.Verify, this.LoginId, this.Database, false).ConfigureAwait(false); } if (!this.HasAccess) { Log.Information($"Access to verify entity \"{this.FullyQualifiedObjectName}\" with Primary Key {PrimaryKey} was denied to the user with Login ID {LoginId}.", model.PrimaryKeyValue, this.LoginId); throw new UnauthorizedException(Resources.AccessIsDenied); } } if (model.PrimaryKeyValue != null) { var sql = new Sql($"UPDATE {this.FullyQualifiedObjectName}"); sql.Append("SET"); sql.Append("verification_status_id=@0, ", model.VerificationStatusId); sql.Append("verified_by_user_id=@0, ", this.UserId); sql.Append("verified_on=@0, ", DateTimeOffset.UtcNow); sql.Append("verification_reason=@0 ", model.Reason); sql.Where($"{this.PrimaryKey}=@0", model.PrimaryKeyValue); try { await Factory.NonQueryAsync(this.Database, sql).ConfigureAwait(false); } catch (DbException ex) { Log.Error(ex.Message); throw new DataAccessException(this.Database, ex.Message, ex); } } } } }
apache-2.0
mhajas/keycloak
testsuite/integration-arquillian/tests/base/src/main/java/org/keycloak/testsuite/adapter/page/PhotozClientAuthzTestApp.java
11062
/* * Copyright 2016 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.keycloak.testsuite.adapter.page; import org.jboss.arquillian.container.test.api.OperateOnDeployment; import org.jboss.arquillian.drone.api.annotation.Drone; import org.jboss.arquillian.graphene.page.Page; import org.jboss.arquillian.test.api.ArquillianResource; import org.keycloak.testsuite.auth.page.login.OIDCLogin; import org.keycloak.testsuite.page.AbstractPageWithInjectedUrl; import org.keycloak.testsuite.util.JavascriptBrowser; import org.keycloak.testsuite.util.UIUtils; import org.keycloak.testsuite.util.URLUtils; import org.keycloak.testsuite.util.javascript.JavascriptStateValidator; import org.keycloak.testsuite.util.javascript.JavascriptTestExecutorWithAuthorization; import org.keycloak.testsuite.util.javascript.ResponseValidator; import org.keycloak.testsuite.util.javascript.XMLHttpRequest; import org.openqa.selenium.By; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; import org.openqa.selenium.support.FindBy; import java.net.URL; import static org.junit.Assert.assertEquals; import static org.keycloak.testsuite.util.UIUtils.clickLink; import static org.keycloak.testsuite.util.WaitUtils.pause; import static org.keycloak.testsuite.util.WaitUtils.waitForPageToLoad; import static org.keycloak.testsuite.util.WaitUtils.waitUntilElement; /** * @author <a href="mailto:psilva@redhat.com">Pedro Igor</a> * @author Vaclav Muzikar <vmuzikar@redhat.com> */ public class PhotozClientAuthzTestApp extends AbstractPageWithInjectedUrl { public static final String DEPLOYMENT_NAME = "photoz-html5-client"; public static final int WAIT_AFTER_OPERATION = 1000; @ArquillianResource @OperateOnDeployment(DEPLOYMENT_NAME) private URL url; @Drone @JavascriptBrowser protected WebDriver driver; @Page @JavascriptBrowser protected OIDCLogin loginPage; @FindBy(xpath = "//a[@ng-click = 'Identity.logout()']") @JavascriptBrowser private WebElement signOutButton; @FindBy(id = "entitlement") @JavascriptBrowser private WebElement entitlement; @FindBy(id = "entitlements") @JavascriptBrowser private WebElement entitlements; @FindBy(id = "get-all-resources") @JavascriptBrowser private WebElement viewAllAlbums; @FindBy(id = "output") @JavascriptBrowser private WebElement output; private JavascriptTestExecutorWithAuthorization testExecutor; private String apiUrl; public void setTestExecutorPlayground(JavascriptTestExecutorWithAuthorization executor, String apiUrl) { testExecutor = executor; this.apiUrl = apiUrl; } public void createAlbum(String name) { createAlbum(name, false); } public void createAlbum(String name, boolean managed) { createAlbum(name, managed, false, null); } public void createAlbum(String name, boolean managed, boolean invalidUser, ResponseValidator validator) { testExecutor.sendXMLHttpRequest( XMLHttpRequest.create() .method("POST") .url(apiUrl + "/album" + (invalidUser ? "?user=invalidUser" : "")) .content("JSON.stringify(JSON.parse('{\"name\" : \"" + name + "\", \"userManaged\": " + Boolean.toString(managed) + " }'))") .addHeader("Content-Type", "application/json; charset=UTF-8") , validator); } public void createAlbumWithInvalidUser(String name, ResponseValidator validator) { createAlbum(name, false, true, validator); } @Override public URL getInjectedUrl() { return this.url; } public void deleteAlbum(String name, ResponseValidator validator) { testExecutor.sendXMLHttpRequest( XMLHttpRequest.create() .method("DELETE") .url(apiUrl + "/album/" + name + "/") // it doesn't work without ending "/" , validator); } public void navigateToAdminAlbum(ResponseValidator validator) { testExecutor.sendXMLHttpRequest( XMLHttpRequest.create() .method("GET") .addHeader("Accept", "application/json") .url(apiUrl + "/admin/album") , validator); } public void logOut() { navigateTo(); waitUntilElement(signOutButton).is().clickable(); // Sometimes doesn't work in PhantomJS! clickLink(signOutButton); } public void requestEntitlement(JavascriptStateValidator validator) { testExecutor.executeAsyncScript("var callback = arguments[arguments.length - 1];" + "window.authorization.entitlement('photoz-restful-api', {" + " \"permissions\": [" + " {" + " \"id\" : \"Album Resource\"" + " }" + " ]" + "}).then(function (rpt) {" + " callback(JSON.stringify(jwt_decode(rpt), null, ' '));" + "});", validator); } public void requestEntitlements(JavascriptStateValidator validator) { testExecutor.executeAsyncScript("var callback = arguments[arguments.length - 1];" + "window.authorization.entitlement('photoz-restful-api', {}).then(function (rpt) {" + " callback(JSON.stringify(jwt_decode(rpt), null, ' '));" + "});", validator); } private void waitForDenial() { waitUntilElement(output).text().contains("You can not access"); } private void waitForNotDenial() { waitUntilElement(output).text().not().contains("You can not access"); } public void viewAllAlbums() { viewAllAlbums.click(); pause(WAIT_AFTER_OPERATION); } public void viewAlbum(String name, ResponseValidator validator) { testExecutor.sendXMLHttpRequest( XMLHttpRequest.create() .method("GET") .addHeader("Accept", "application/json") .url(apiUrl + "/album/" + name + "/") , validator); } public void accountPage() { testExecutor.openAccountPage(null); } public void accountMyResources() { accountPage(); WebElement myResources = driver.findElement(By.xpath("//a[text() = 'My Resources']")); waitUntilElement(myResources).is().clickable(); myResources.click(); } public void accountMyResource(String name) { accountMyResources(); WebElement myResource = driver.findElement(By.id("detail-" + name)); waitUntilElement(myResource).is().clickable(); myResource.click(); } public void accountGrantResource(String name, String requester) { accountMyResources(); grantResource(name, requester); } public void grantResource(String name, String requester) { WebElement grantResource = driver.findElement(By.id("grant-" + name + "-" + requester)); waitUntilElement(grantResource).is().clickable(); grantResource.click(); } public void accountGrantRemoveScope(String name, String requester, String scope) { accountMyResources(); WebElement grantRemoveScope = driver.findElement(By.id("grant-remove-scope-" + name + "-" + requester + "-" + scope)); waitUntilElement(grantRemoveScope).is().clickable(); grantRemoveScope.click(); } public void accountRevokeResource(String name, String requester) { accountMyResource(name); revokeResource(name, requester); } public void revokeResource(String name, String requester) { WebElement revokeResource = driver.findElement(By.id("revoke-" + name + "-" + requester)); waitUntilElement(revokeResource).is().clickable(); revokeResource.click(); } public void accountShareResource(String name, String user) { accountMyResource(name); shareResource(user); } public void accountShareRemoveScope(String name, String user, String scope) { accountMyResource(name); WebElement shareRemoveScope = driver.findElement(By.id("share-remove-scope-" + name + "-" + scope)); waitUntilElement(shareRemoveScope).is().clickable(); shareRemoveScope.click(); shareResource(user); } public void shareResource(String user) { WebElement userIdInput = driver.findElement(By.id("user_id")); UIUtils.setTextInputValue(userIdInput, user); pause(200); // We need to wait a bit for the form to "accept" the input (otherwise it registers the input as empty) waitUntilElement(userIdInput).attribute(UIUtils.VALUE_ATTR_NAME).contains(user); WebElement shareButton = driver.findElement(By.id("share-button")); waitUntilElement(shareButton).is().clickable(); shareButton.click(); } public void assertError() { assertEquals("We are sorry...", driver.findElement(By.id("kc-page-title")).getText()); } public void accountDenyResource(String name) { accountMyResource(name); WebElement denyLink = driver.findElement(By.linkText("Deny")); waitUntilElement(denyLink).is().clickable(); denyLink.click(); waitForPageToLoad(); } public void requestResourceProtectedAnyScope(ResponseValidator validator) { testExecutor.sendXMLHttpRequest( XMLHttpRequest.create() .method("GET") .url(apiUrl + "/scope-any") , validator); } public void requestResourceProtectedAllScope(ResponseValidator validator) { testExecutor.sendXMLHttpRequest( XMLHttpRequest.create() .method("GET") .url(apiUrl + "/scope-all") , validator); } public WebElement getOutput() { return output; } @Override public void navigateTo() { driver.navigate().to(toString()); waitForPageToLoad(); } @Override public boolean isCurrent() { return URLUtils.currentUrlStartsWith(toString()); } public void executeScript(String script) { testExecutor.executeScript(script); } }
apache-2.0
mogoweb/webkit_for_android5.1
v8/src/lithium.cc
7667
// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "v8.h" #include "lithium.h" namespace v8 { namespace internal { void LOperand::PrintTo(StringStream* stream) { LUnallocated* unalloc = NULL; switch (kind()) { case INVALID: stream->Add("(0)"); break; case UNALLOCATED: unalloc = LUnallocated::cast(this); stream->Add("v%d", unalloc->virtual_register()); switch (unalloc->policy()) { case LUnallocated::NONE: break; case LUnallocated::FIXED_REGISTER: { const char* register_name = Register::AllocationIndexToString(unalloc->fixed_index()); stream->Add("(=%s)", register_name); break; } case LUnallocated::FIXED_DOUBLE_REGISTER: { const char* double_register_name = DoubleRegister::AllocationIndexToString(unalloc->fixed_index()); stream->Add("(=%s)", double_register_name); break; } case LUnallocated::FIXED_SLOT: stream->Add("(=%dS)", unalloc->fixed_index()); break; case LUnallocated::MUST_HAVE_REGISTER: stream->Add("(R)"); break; case LUnallocated::WRITABLE_REGISTER: stream->Add("(WR)"); break; case LUnallocated::SAME_AS_FIRST_INPUT: stream->Add("(1)"); break; case LUnallocated::ANY: stream->Add("(-)"); break; } break; case CONSTANT_OPERAND: stream->Add("[constant:%d]", index()); break; case STACK_SLOT: stream->Add("[stack:%d]", index()); break; case DOUBLE_STACK_SLOT: stream->Add("[double_stack:%d]", index()); break; case REGISTER: stream->Add("[%s|R]", Register::AllocationIndexToString(index())); break; case DOUBLE_REGISTER: stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index())); break; case ARGUMENT: stream->Add("[arg:%d]", index()); break; } } #define DEFINE_OPERAND_CACHE(name, type) \ name* name::cache = NULL; \ void name::SetUpCache() { \ if (cache) return; \ cache = new name[kNumCachedOperands]; \ for (int i = 0; i < kNumCachedOperands; i++) { \ cache[i].ConvertTo(type, i); \ } \ } \ DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND) DEFINE_OPERAND_CACHE(LStackSlot, STACK_SLOT) DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT) DEFINE_OPERAND_CACHE(LRegister, REGISTER) DEFINE_OPERAND_CACHE(LDoubleRegister, DOUBLE_REGISTER) #undef DEFINE_OPERAND_CACHE void LOperand::SetUpCaches() { LConstantOperand::SetUpCache(); LStackSlot::SetUpCache(); LDoubleStackSlot::SetUpCache(); LRegister::SetUpCache(); LDoubleRegister::SetUpCache(); } bool LParallelMove::IsRedundant() const { for (int i = 0; i < move_operands_.length(); ++i) { if (!move_operands_[i].IsRedundant()) return false; } return true; } void LParallelMove::PrintDataTo(StringStream* stream) const { bool first = true; for (int i = 0; i < move_operands_.length(); ++i) { if (!move_operands_[i].IsEliminated()) { LOperand* source = move_operands_[i].source(); LOperand* destination = move_operands_[i].destination(); if (!first) stream->Add(" "); first = false; if (source->Equals(destination)) { destination->PrintTo(stream); } else { destination->PrintTo(stream); stream->Add(" = "); source->PrintTo(stream); } stream->Add(";"); } } } void LEnvironment::PrintTo(StringStream* stream) { stream->Add("[id=%d|", ast_id()); stream->Add("[parameters=%d|", parameter_count()); stream->Add("[arguments_stack_height=%d|", arguments_stack_height()); for (int i = 0; i < values_.length(); ++i) { if (i != 0) stream->Add(";"); if (values_[i] == NULL) { stream->Add("[hole]"); } else { values_[i]->PrintTo(stream); } } stream->Add("]"); } void LPointerMap::RecordPointer(LOperand* op) { // Do not record arguments as pointers. if (op->IsStackSlot() && op->index() < 0) return; ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); pointer_operands_.Add(op); } void LPointerMap::RemovePointer(LOperand* op) { // Do not record arguments as pointers. if (op->IsStackSlot() && op->index() < 0) return; ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); for (int i = 0; i < pointer_operands_.length(); ++i) { if (pointer_operands_[i]->Equals(op)) { pointer_operands_.Remove(i); --i; } } } void LPointerMap::RecordUntagged(LOperand* op) { // Do not record arguments as pointers. if (op->IsStackSlot() && op->index() < 0) return; ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot()); untagged_operands_.Add(op); } void LPointerMap::PrintTo(StringStream* stream) { stream->Add("{"); for (int i = 0; i < pointer_operands_.length(); ++i) { if (i != 0) stream->Add(";"); pointer_operands_[i]->PrintTo(stream); } stream->Add("} @%d", position()); } int ElementsKindToShiftSize(ElementsKind elements_kind) { switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_PIXEL_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: return 0; case EXTERNAL_SHORT_ELEMENTS: case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: return 1; case EXTERNAL_INT_ELEMENTS: case EXTERNAL_UNSIGNED_INT_ELEMENTS: case EXTERNAL_FLOAT_ELEMENTS: return 2; case EXTERNAL_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: return 3; case FAST_SMI_ONLY_ELEMENTS: case FAST_ELEMENTS: case DICTIONARY_ELEMENTS: case NON_STRICT_ARGUMENTS_ELEMENTS: return kPointerSizeLog2; } UNREACHABLE(); return 0; } } } // namespace v8::internal
apache-2.0
chrisvfabio/hospital-java
src/com/chrisvfabio/app/Staff.java
1009
/* * Copyright (c) 2014 Chris Fabio * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.chrisvfabio.app; public class Staff extends Person { private static long sequenceId = 1; private long staffId; public Staff(String firstName, String lastName) { super(firstName, lastName); this.staffId = sequenceId++; } public long getStaffId() { return staffId; } public void setStaffId(long staffId) { this.staffId = staffId; } }
apache-2.0
Shopify/collins
test/ApplicationSpecification.scala
839
package test import org.specs2._ import specification._ import play.api.Play import play.api.test.FakeApplication import play.api.test.Helpers._ trait ApplicationSpecification extends mutable.Specification with ResourceFinder { private def collinsDatabase = Map[String,String]( "db.collins.driver" -> "org.h2.Driver", "db.collins.url" -> "jdbc:h2:mem:play-test-%d;IGNORECASE=TRUE".format(scala.util.Random.nextInt) ) def applicationSetup = { val app = FakeApplication( additionalConfiguration = collinsDatabase, additionalPlugins = Seq("play.api.db.evolutions.EvolutionsPlugin") ) Play.start(app) evolutionFor("collins") } def applicationTeardown = { Play.stop() } override def map(fs: => Fragments) = Step(applicationSetup) ^ super.map(fs) ^ Step(applicationTeardown) }
apache-2.0
anastasia-tarasova/indy-sdk
libindy/src/services/blob_storage/default_reader.rs
2117
extern crate digest; extern crate sha2; extern crate rust_base58; use self::digest::{FixedOutput, Input}; use self::sha2::Sha256; use self::rust_base58::ToBase58; use super::{ReadableBlob, Reader, ReaderType}; use errors::prelude::*; use serde_json; use std::fs::File; use std::io::{Read, Seek, SeekFrom}; use std::path::PathBuf; pub struct DefaultReader { file: File, hash: Vec<u8>, } #[derive(Serialize, Deserialize)] struct DefaultReaderConfig { base_dir: String, } impl ReaderType for DefaultReaderType { fn open(&self, config: &str) -> IndyResult<Box<Reader>> { let config: DefaultReaderConfig = serde_json::from_str(config) .to_indy(IndyErrorKind::InvalidStructure, "Can't deserialize DefaultReaderConfig")?; Ok(Box::new(config)) } } impl Reader for DefaultReaderConfig { fn open(&self, hash: &[u8], _location: &str) -> IndyResult<Box<ReadableBlob>> { let mut path = PathBuf::from(&self.base_dir); path.push(hash.to_base58()); let file = File::open(path)?; Ok(Box::new(DefaultReader { file, hash: hash.to_owned() })) } } impl ReadableBlob for DefaultReader { fn verify(&mut self) -> IndyResult<bool> { self.file.seek(SeekFrom::Start(0))?; let mut hasher = Sha256::default(); let mut buf = [0u8; 1024]; loop { let sz = self.file.read(&mut buf)?; if sz == 0 { return Ok(hasher.fixed_result().as_slice().eq(self.hash.as_slice())); } hasher.process(&buf[0..sz]) } } fn close(&self) -> IndyResult<()> { /* nothing to do */ Ok(()) } fn read(&mut self, size: usize, offset: usize) -> IndyResult<Vec<u8>> { let mut buf = vec![0u8; size]; self.file.seek(SeekFrom::Start(offset as u64))?; let act_size = self.file.read(buf.as_mut_slice())?; buf.truncate(act_size); Ok(buf) } } pub struct DefaultReaderType {} impl DefaultReaderType { pub fn new() -> Self { DefaultReaderType {} } }
apache-2.0
dropbox/DropboxBusinessAdminTool
Source/DfBAdminToolkit/Presenter/IDataMigrationPresenter.cs
148
namespace DfBAdminToolkit.Presenter { public interface IDataMigrationPresenter : IPresenter { void UpdateSettings(); } }
apache-2.0
levi-h/aluminumproject
tests/src/test/java/com/googlecode/aluminumproject/context/g11n/EnvironmentBasedDateFormatProviderTest.java
4324
/* * Copyright 2010-2012 Aluminum project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.googlecode.aluminumproject.context.g11n; import com.googlecode.aluminumproject.context.Context; import com.googlecode.aluminumproject.context.DefaultContext; import java.text.DateFormat; import java.util.Date; import java.util.Locale; import java.util.TimeZone; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; @SuppressWarnings("javadoc") @Test(groups = {"libraries", "libraries-g11n", "fast"}) public class EnvironmentBasedDateFormatProviderTest { private DateFormatProvider dateFormatProvider; private Context context; private Date date; @BeforeMethod public void createContextAndDate() { LocaleProvider localeProvider = new ConstantLocaleProvider(new Locale("es")); TimeZoneProvider timeZoneProvider = new ConstantTimeZoneProvider(TimeZone.getTimeZone("GMT")); dateFormatProvider = new EnvironmentBasedDateFormatProvider("yyyyMMdd HHmm"); GlobalisationContext globalisationContext = new GlobalisationContext(localeProvider, timeZoneProvider, null, dateFormatProvider, null); context = new DefaultContext(); context.addImplicitObject(GlobalisationContext.GLOBALISATION_CONTEXT, globalisationContext); date = new Date(2713500000L); } public void shortDateTypeShouldResultInShortDateProvider() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.SHORT_DATE, context); assert dateFormat != null; assert dateFormat.format(date).equals("1/02/70"); } public void shortTimeTypeShouldResultInShortTimeProvider() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.SHORT_TIME, context); assert dateFormat != null; assert dateFormat.format(date).equals("9:45"); } public void shortDateAndTimeTypeShouldResultInShortDateAndTimeProvider() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.SHORT_DATE_AND_TIME, context); assert dateFormat != null; assert dateFormat.format(date).equals("1/02/70 9:45"); } public void mediumDateTypeShouldResultInMediumDateProvider() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.MEDIUM_DATE, context); assert dateFormat != null; assert dateFormat.format(date).equals("01-feb-1970"); } public void mediumTimeTypeShouldResultInMediumTimeProvider() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.MEDIUM_TIME, context); assert dateFormat != null; assert dateFormat.format(date).equals("9:45:00"); } public void mediumDateAndTimeTypeShouldResultInMediumDateAndTimeProvider() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.MEDIUM_DATE_AND_TIME, context); assert dateFormat != null; assert dateFormat.format(date).equals("01-feb-1970 9:45:00"); } public void longDateTypeShouldResultInLongDateProvider() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.LONG_DATE, context); assert dateFormat != null; assert dateFormat.format(date).equals("1 de febrero de 1970"); } public void longTimeTypeShouldResultInLongTimeProvider() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.LONG_TIME, context); assert dateFormat != null; assert dateFormat.format(date).equals("9:45:00 GMT"); } public void longDateAndTimeTypeShouldResultInLongDateAndTimeProvider() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.LONG_DATE_AND_TIME, context); assert dateFormat != null; assert dateFormat.format(date).equals("1 de febrero de 1970 9:45:00 GMT"); } public void customTypeShouldResultInDateProviderWithCustomPattern() { DateFormat dateFormat = dateFormatProvider.provide(DateFormatType.CUSTOM, context); assert dateFormat != null; assert dateFormat.format(date).equals("19700201 0945"); } }
apache-2.0
anthonydahanne/terracotta-platform
management/monitoring-service/src/main/java/org/terracotta/management/service/monitoring/DefaultPassiveEntityMonitoringService.java
3734
/* * Copyright Terracotta, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.terracotta.management.service.monitoring; import org.terracotta.entity.ClientDescriptor; import org.terracotta.entity.PlatformConfiguration; import org.terracotta.management.model.call.ContextualReturn; import org.terracotta.management.model.capabilities.Capability; import org.terracotta.management.model.cluster.ClientIdentifier; import org.terracotta.management.model.cluster.ManagementRegistry; import org.terracotta.management.model.context.ContextContainer; import org.terracotta.management.model.notification.ContextualNotification; import org.terracotta.management.model.stats.ContextualStatistics; import org.terracotta.monitoring.IMonitoringProducer; import java.util.concurrent.CompletableFuture; import static org.terracotta.management.service.monitoring.DefaultDataListener.TOPIC_SERVER_ENTITY_NOTIFICATION; import static org.terracotta.management.service.monitoring.DefaultDataListener.TOPIC_SERVER_ENTITY_STATISTICS; /** * @author Mathieu Carbou */ class DefaultPassiveEntityMonitoringService extends AbstractEntityMonitoringService { private final IMonitoringProducer monitoringProducer; DefaultPassiveEntityMonitoringService(long consumerId, IMonitoringProducer monitoringProducer, PlatformConfiguration platformConfiguration) { super(consumerId, platformConfiguration); this.monitoringProducer = monitoringProducer; monitoringProducer.addNode(new String[0], "management-answer", null); } @Override public void exposeManagementRegistry(ContextContainer contextContainer, Capability... capabilities) { logger.trace("[{}] exposeManagementRegistry({})", getConsumerId(), contextContainer); ManagementRegistry registry = ManagementRegistry.create(contextContainer); registry.addCapabilities(capabilities); monitoringProducer.addNode(new String[0], "registry", registry); } @Override public void pushNotification(ContextualNotification notification) { logger.trace("[{}] pushNotification({})", getConsumerId(), notification); monitoringProducer.pushBestEffortsData(TOPIC_SERVER_ENTITY_NOTIFICATION, notification); } @Override public void pushStatistics(ContextualStatistics... statistics) { if (statistics.length > 0) { logger.trace("[{}] pushStatistics({})", getConsumerId(), statistics.length); monitoringProducer.pushBestEffortsData(TOPIC_SERVER_ENTITY_STATISTICS, statistics); } } @Override public void answerManagementCall(String managementCallIdentifier, ContextualReturn<?> contextualReturn) { logger.trace("[{}] answerManagementCall({}, {})", getConsumerId(), managementCallIdentifier, contextualReturn); monitoringProducer.addNode(new String[]{"management-answer"}, managementCallIdentifier, contextualReturn); } @Override public CompletableFuture<ClientIdentifier> getClientIdentifier(ClientDescriptor clientDescriptor) { CompletableFuture<ClientIdentifier> future = new CompletableFuture<>(); future.completeExceptionally(new UnsupportedOperationException("getClientIdentifier() cannot be called from a passive entity (consumerId=" + getConsumerId() + ")")); return future; } }
apache-2.0
blueboxgroup/ursula-monitoring
sensu/plugins/check-dir-new-files.rb
2355
#! /usr/bin/env ruby # # check-dir-new-files # # DESCRIPTION: # Checks the number of specific files in a directory # # OUTPUT: # plain text # # PLATFORMS: # Linux, BSD # # DEPENDENCIES: # gem: sensu-plugin # # USAGE: # #YELLOW # # NOTES: # # LICENSE: # Copyright 2014 Sonian, Inc. and contributors. <support@sensuapp.org> # Released under the same terms as Sensu (the MIT license); see LICENSE # for details. # require 'sensu-plugin/check/cli' require 'fileutils' require 'time' class DirCount < Sensu::Plugin::Check::CLI BASE_DIR = '/var/cache/sensu/check-dir-new-files' option :directory, description: 'Directory to count files in', short: '-d DIR', long: '--dir DIR', default: '/var/crash' option :filename_pattern, description: 'filename patten to match', short: '-p PATTERN', long: '--pattern PATTERN', default: '*.crash' option :criticality, description: "Set sensu alert level, default is critical", short: '-z CRITICALITY', long: '--criticality CRITICALITY', default: 'critical' def getLastCheckTime() begin @last_check_time = 0 @state_file = File.join(BASE_DIR,config[:directory].gsub('/','_'),config[:filename_pattern]) File.open(@state_file, "r") do |file| file.flock(File::LOCK_SH) @last_check_time = file.readline.to_i end rescue return end end def setLastCheckTime() begin FileUtils.mkdir_p(File.dirname(@state_file)) File.open(@state_file, File::RDWR|File::CREAT, 0644) do |file| file.flock(File::LOCK_EX) file.truncate(0) file.write(Time.now.to_i) end rescue return end end def run getLastCheckTime() file_count = 0 begin Dir.chdir(config[:directory]) Dir.glob(config[:filename_pattern]).each {|file| file_count += 1 if File.mtime(file) >= Time.at(@last_check_time)} rescue Exception => e puts e unknown "Error listing files in #{config[:directory]}" end setLastCheckTime() msg = "#{file_count} new file(s) like #{config[:filename_pattern]} created at #{config[:directory]}." ok msg if file_count == 0 warning msg if config[:criticality] == "warning" critical msg end end
apache-2.0
mongodb/mongo-php-driver
tests/bson-corpus/decimal128-3-valid-135.phpt
889
--TEST-- Decimal128: [basx038] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- <?php require_once __DIR__ . '/../utils/basic.inc'; $canonicalBson = hex2bin('1800000013640079DF0D8648700000000000000000223000'); $canonicalExtJson = '{"d" : {"$numberDecimal" : "0.123456789012345"}}'; // Canonical BSON -> Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== <?php exit(0); ?> --EXPECT-- 1800000013640079df0d8648700000000000000000223000 {"d":{"$numberDecimal":"0.123456789012345"}} 1800000013640079df0d8648700000000000000000223000 ===DONE===
apache-2.0
msebire/intellij-community
java/java-impl/src/com/intellij/codeInsight/daemon/impl/quickfix/CreateFieldFromUsageFix.java
6508
/* * Copyright 2000-2017 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.codeInsight.daemon.impl.quickfix; import com.intellij.codeInsight.ExpectedTypeInfo; import com.intellij.codeInsight.daemon.QuickFixBundle; import com.intellij.codeInsight.template.Template; import com.intellij.codeInsight.template.TemplateEditingAdapter; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.project.Project; import com.intellij.psi.*; import com.intellij.psi.codeStyle.CodeStyleManager; import com.intellij.psi.util.PsiTreeUtil; import com.intellij.psi.util.PsiUtil; import org.jetbrains.annotations.NotNull; import java.util.ArrayList; import java.util.List; /** * @author Mike */ public class CreateFieldFromUsageFix extends CreateVarFromUsageFix { public CreateFieldFromUsageFix(@NotNull PsiReferenceExpression referenceElement) { super(referenceElement); } @Override protected String getText(String varName) { return QuickFixBundle.message("create.field.from.usage.text", varName); } protected boolean createConstantField() { return false; } @NotNull @Override protected List<PsiClass> getTargetClasses(PsiElement element) { final List<PsiClass> targetClasses = new ArrayList<>(); for (PsiClass psiClass : super.getTargetClasses(element)) { if (canModify(psiClass) && (!psiClass.isInterface() && !psiClass.isAnnotationType() || shouldCreateStaticMember(myReferenceExpression, psiClass))) { targetClasses.add(psiClass); } } return targetClasses; } @Override protected boolean canBeTargetClass(PsiClass psiClass) { return canModify(psiClass) && !psiClass.isInterface() && !psiClass.isAnnotationType(); } @Override protected void invokeImpl(final PsiClass targetClass) { final Project project = myReferenceExpression.getProject(); JVMElementFactory factory = JVMElementFactories.getFactory(targetClass.getLanguage(), project); if (factory == null) factory = JavaPsiFacade.getElementFactory(project); PsiMember enclosingContext = null; PsiClass parentClass; do { enclosingContext = PsiTreeUtil.getParentOfType(enclosingContext == null ? myReferenceExpression : enclosingContext, PsiMethod.class, PsiField.class, PsiClassInitializer.class); parentClass = enclosingContext == null ? null : enclosingContext.getContainingClass(); } while (parentClass instanceof PsiAnonymousClass); ExpectedTypeInfo[] expectedTypes = CreateFromUsageUtils.guessExpectedTypes(myReferenceExpression, false); String fieldName = myReferenceExpression.getReferenceName(); assert fieldName != null; PsiField field = factory.createField(fieldName, PsiType.INT); if (createConstantField()) { PsiUtil.setModifierProperty(field, PsiModifier.FINAL, true); } if (createConstantField()) { PsiUtil.setModifierProperty(field, PsiModifier.STATIC, true); PsiUtil.setModifierProperty(field, PsiModifier.FINAL, true); } else { if (!targetClass.isInterface() && shouldCreateStaticMember(myReferenceExpression, targetClass)) { PsiUtil.setModifierProperty(field, PsiModifier.STATIC, true); } if (shouldCreateFinalMember(myReferenceExpression, targetClass)) { PsiUtil.setModifierProperty(field, PsiModifier.FINAL, true); } } field = CreateFieldFromUsageHelper.insertField(targetClass, field, myReferenceExpression); setupVisibility(parentClass, targetClass, field.getModifierList()); createFieldFromUsageTemplate(targetClass, project, expectedTypes, field, createConstantField(), myReferenceExpression); } public static void createFieldFromUsageTemplate(final PsiClass targetClass, final Project project, final Object expectedTypes, final PsiField field, final boolean createConstantField, final PsiElement context) { final PsiFile targetFile = targetClass.getContainingFile(); final Editor newEditor = positionCursor(project, targetFile, field); if (newEditor == null) return; Template template = CreateFieldFromUsageHelper.setupTemplate(field, expectedTypes, targetClass, newEditor, context, createConstantField); startTemplate(newEditor, template, project, new TemplateEditingAdapter() { @Override public void templateFinished(@NotNull Template template, boolean brokenOff) { PsiDocumentManager.getInstance(project).commitDocument(newEditor.getDocument()); final int offset = newEditor.getCaretModel().getOffset(); final PsiField psiField = PsiTreeUtil.findElementOfClassAtOffset(targetFile, offset, PsiField.class, false); if (psiField != null) { ApplicationManager.getApplication().runWriteAction(() -> { CodeStyleManager.getInstance(project).reformat(psiField); }); newEditor.getCaretModel().moveToOffset(psiField.getTextRange().getEndOffset() - 1); } } }); } public static boolean shouldCreateFinalMember(@NotNull PsiReferenceExpression ref, @NotNull PsiClass targetClass) { if (!PsiTreeUtil.isAncestor(targetClass, ref, true)) { return false; } final PsiElement element = PsiTreeUtil.getParentOfType(ref, PsiClassInitializer.class, PsiMethod.class, PsiField.class); if (element instanceof PsiClassInitializer) { return true; } if (element instanceof PsiMethod && ((PsiMethod)element).isConstructor()) { return true; } return false; } @Override @NotNull public String getFamilyName() { return QuickFixBundle.message("create.field.from.usage.family"); } }
apache-2.0
Durgesh1988/core
server/app/model/v2.0/providers/base-provider.js
1425
/* Copyright [2016] [Relevance Lab] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ var mongoose = require('mongoose'); var util = require('util'); var Schema = mongoose.Schema; var BaseProviderSchema = function BaseProviderSchema() { Schema.apply(this, arguments); this.add({ name: { type: String, required: true, trim: true }, type: { type: String, required: true, trim: true }, organizationId: { type: String, required: true, trim: false }, isDeleted: { type: Boolean, required: true, default: false }, invalidCredentials: { type: Boolean, required: true, default: false } }); }; util.inherits(BaseProviderSchema, Schema); module.exports = BaseProviderSchema;
apache-2.0
robertoschwald/cas
support/cas-server-support-saml-idp-web/src/main/java/org/apereo/cas/support/saml/web/idp/profile/IdPInitiatedProfileHandlerController.java
8567
package org.apereo.cas.support.saml.web.idp.profile; import org.apereo.cas.authentication.AuthenticationSystemSupport; import org.apereo.cas.authentication.principal.Service; import org.apereo.cas.authentication.principal.ServiceFactory; import org.apereo.cas.authentication.principal.WebApplicationService; import org.apereo.cas.configuration.CasConfigurationProperties; import org.apereo.cas.services.ServicesManager; import org.apereo.cas.services.UnauthorizedServiceException; import org.apereo.cas.support.saml.OpenSamlConfigBean; import org.apereo.cas.support.saml.SamlIdPConstants; import org.apereo.cas.support.saml.SamlProtocolConstants; import org.apereo.cas.support.saml.services.idp.metadata.cache.SamlRegisteredServiceCachingMetadataResolver; import org.apereo.cas.support.saml.web.idp.profile.builders.SamlProfileObjectBuilder; import org.apereo.cas.support.saml.web.idp.profile.builders.enc.SamlIdPObjectSigner; import org.apereo.cas.support.saml.web.idp.profile.builders.enc.SamlObjectSignatureValidator; import lombok.extern.slf4j.Slf4j; import lombok.val; import net.shibboleth.utilities.java.support.xml.ParserPool; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.math.NumberUtils; import org.apache.commons.lang3.tuple.Pair; import org.jasig.cas.client.util.CommonUtils; import org.joda.time.DateTime; import org.joda.time.chrono.ISOChronology; import org.opensaml.messaging.context.MessageContext; import org.opensaml.messaging.decoder.MessageDecodingException; import org.opensaml.saml.common.SAMLObjectBuilder; import org.opensaml.saml.common.SignableSAMLObject; import org.opensaml.saml.common.messaging.context.SAMLBindingContext; import org.opensaml.saml.common.xml.SAMLConstants; import org.opensaml.saml.saml2.core.AuthnRequest; import org.opensaml.saml.saml2.core.Issuer; import org.opensaml.saml.saml2.core.NameIDPolicy; import org.opensaml.saml.saml2.core.Response; import org.springframework.web.bind.annotation.GetMapping; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.util.concurrent.TimeUnit; /** * This is {@link IdPInitiatedProfileHandlerController}. * * @author Misagh Moayyed * @since 5.0.0 */ @Slf4j public class IdPInitiatedProfileHandlerController extends AbstractSamlProfileHandlerController { public IdPInitiatedProfileHandlerController(final SamlIdPObjectSigner samlObjectSigner, final ParserPool parserPool, final AuthenticationSystemSupport authenticationSystemSupport, final ServicesManager servicesManager, final ServiceFactory<WebApplicationService> webApplicationServiceFactory, final SamlRegisteredServiceCachingMetadataResolver samlRegisteredServiceCachingMetadataResolver, final OpenSamlConfigBean configBean, final SamlProfileObjectBuilder<Response> responseBuilder, final CasConfigurationProperties casProperties, final SamlObjectSignatureValidator samlObjectSignatureValidator, final Service callbackService) { super(samlObjectSigner, parserPool, authenticationSystemSupport, servicesManager, webApplicationServiceFactory, samlRegisteredServiceCachingMetadataResolver, configBean, responseBuilder, casProperties, samlObjectSignatureValidator, callbackService); } /** * Handle idp initiated sso requests. * * @param response the response * @param request the request * @throws Exception the exception */ @GetMapping(path = SamlIdPConstants.ENDPOINT_SAML2_IDP_INIT_PROFILE_SSO) protected void handleIdPInitiatedSsoRequest(final HttpServletResponse response, final HttpServletRequest request) throws Exception { // The name (i.e., the entity ID) of the service provider. val providerId = CommonUtils.safeGetParameter(request, SamlIdPConstants.PROVIDER_ID); if (StringUtils.isBlank(providerId)) { LOGGER.warn("No providerId parameter given in unsolicited SSO authentication request."); throw new MessageDecodingException("No providerId parameter given in unsolicited SSO authentication request."); } val registeredService = verifySamlRegisteredService(providerId); val adaptor = getSamlMetadataFacadeFor(registeredService, providerId); if (!adaptor.isPresent()) { throw new UnauthorizedServiceException(UnauthorizedServiceException.CODE_UNAUTHZ_SERVICE, "Cannot find metadata linked to " + providerId); } // The URL of the response location at the SP (called the "Assertion Consumer Service") // but can be omitted in favor of the IdP picking the default endpoint location from metadata. var shire = CommonUtils.safeGetParameter(request, SamlIdPConstants.SHIRE); val facade = adaptor.get(); if (StringUtils.isBlank(shire)) { LOGGER.warn("Resolving service provider assertion consumer service URL for [{}] and binding [{}]", providerId, SAMLConstants.SAML2_POST_BINDING_URI); val acs = facade.getAssertionConsumerService(SAMLConstants.SAML2_POST_BINDING_URI); if (acs == null || StringUtils.isBlank(acs.getLocation())) { throw new MessageDecodingException("Unable to resolve SP ACS URL location for binding " + SAMLConstants.SAML2_POST_BINDING_URI); } shire = acs.getLocation(); } if (StringUtils.isBlank(shire)) { LOGGER.warn("Unable to resolve service provider assertion consumer service URL for AuthnRequest construction for entityID: [{}]", providerId); throw new MessageDecodingException("Unable to resolve SP ACS URL for AuthnRequest construction"); } // The target resource at the SP, or a state token generated by an SP to represent the resource. val target = CommonUtils.safeGetParameter(request, SamlIdPConstants.TARGET); // A timestamp to help with stale request detection. val time = CommonUtils.safeGetParameter(request, SamlIdPConstants.TIME); val builder = (SAMLObjectBuilder) configBean.getBuilderFactory().getBuilder(AuthnRequest.DEFAULT_ELEMENT_NAME); val authnRequest = (AuthnRequest) builder.buildObject(); authnRequest.setAssertionConsumerServiceURL(shire); val isBuilder = (SAMLObjectBuilder) configBean.getBuilderFactory().getBuilder(Issuer.DEFAULT_ELEMENT_NAME); val issuer = (Issuer) isBuilder.buildObject(); issuer.setValue(providerId); authnRequest.setIssuer(issuer); authnRequest.setProtocolBinding(SAMLConstants.SAML2_POST_BINDING_URI); val pBuilder = (SAMLObjectBuilder) configBean.getBuilderFactory().getBuilder(NameIDPolicy.DEFAULT_ELEMENT_NAME); val nameIDPolicy = (NameIDPolicy) pBuilder.buildObject(); nameIDPolicy.setAllowCreate(Boolean.TRUE); authnRequest.setNameIDPolicy(nameIDPolicy); if (NumberUtils.isCreatable(time)) { authnRequest.setIssueInstant(new DateTime(TimeUnit.SECONDS.convert(Long.parseLong(time), TimeUnit.MILLISECONDS), ISOChronology.getInstanceUTC())); } else { authnRequest.setIssueInstant(new DateTime(DateTime.now(), ISOChronology.getInstanceUTC())); } authnRequest.setForceAuthn(Boolean.FALSE); if (StringUtils.isNotBlank(target)) { request.setAttribute(SamlProtocolConstants.PARAMETER_SAML_RELAY_STATE, target); } val ctx = new MessageContext(); ctx.setAutoCreateSubcontexts(true); if (facade.isAuthnRequestsSigned()) { samlObjectSigner.encode(authnRequest, registeredService, facade, response, request, SAMLConstants.SAML2_POST_BINDING_URI, authnRequest); } ctx.setMessage(authnRequest); ctx.getSubcontext(SAMLBindingContext.class, true).setHasBindingSignature(false); val pair = Pair.<SignableSAMLObject, MessageContext>of(authnRequest, ctx); initiateAuthenticationRequest(pair, response, request); } }
apache-2.0
wiltonlazary/arangodb
3rdParty/iresearch/core/analysis/analyzers.cpp
8392
//////////////////////////////////////////////////////////////////////////////// /// DISCLAIMER /// /// Copyright 2016 by EMC Corporation, All Rights Reserved /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// http://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. /// /// Copyright holder is EMC Corporation /// /// @author Andrey Abramov /// @author Vasiliy Nabatchikov //////////////////////////////////////////////////////////////////////////////// #include "utils/register.hpp" // list of statically loaded scorers via init() #ifndef IRESEARCH_DLL #include "analysis/delimited_token_stream.hpp" #include "analysis/collation_token_stream.hpp" #include "analysis/classification_stream.hpp" #include "analysis/nearest_neighbors_stream.hpp" #include "analysis/ngram_token_stream.hpp" #include "analysis/text_token_normalizing_stream.hpp" #include "analysis/text_token_stemming_stream.hpp" #include "analysis/text_token_stream.hpp" #include "analysis/token_stopwords_stream.hpp" #include "analysis/pipeline_token_stream.hpp" #include "analysis/segmentation_token_stream.hpp" #endif #include "analysis/analyzers.hpp" #include "utils/hash_utils.hpp" namespace { using namespace irs; struct key { key(const string_ref& type, const irs::type_info& args_format) : type(type), args_format(args_format) { } bool operator==(const key& other) const noexcept { return args_format == other.args_format && type == other.type; } bool operator!=(const key& other) const noexcept { return !(*this == other); } string_ref type; irs::type_info args_format; }; struct value{ explicit value( analysis::factory_f factory = nullptr, analysis::normalizer_f normalizer = nullptr) : factory(factory), normalizer(normalizer) { } bool empty() const noexcept { return nullptr == factory; } bool operator==(const value& other) const noexcept { return factory == other.factory && normalizer == other.normalizer; } bool operator!=(const value& other) const noexcept { return !(*this == other); } const analysis::factory_f factory; const analysis::normalizer_f normalizer; }; } namespace std { template<> struct hash<::key> { size_t operator()(const ::key& value) const noexcept { return irs::hash_combine( std::hash<irs::type_info::type_id>()(value.args_format.id()), value.type); } }; // hash } // std namespace { const std::string FILENAME_PREFIX("libanalyzer-"); class analyzer_register : public irs::tagged_generic_register<::key, ::value, irs::string_ref, analyzer_register> { protected: virtual std::string key_to_filename(const key_type& key) const override { const auto& name = key.type; std::string filename(FILENAME_PREFIX.size() + name.size(), 0); std::memcpy( &filename[0], FILENAME_PREFIX.c_str(), FILENAME_PREFIX.size()); irs::string_ref::traits_type::copy( &filename[0] + FILENAME_PREFIX.size(), name.c_str(), name.size()); return filename; } }; } namespace iresearch { namespace analysis { /*static*/ bool analyzers::exists( const string_ref& name, const type_info& args_format, bool load_library /*= true*/) { return !analyzer_register::instance().get(::key(name, args_format), load_library).empty(); } /*static*/ bool analyzers::normalize( std::string& out, const string_ref& name, const type_info& args_format, const string_ref& args, bool load_library /*= true*/) noexcept { try { auto* normalizer = analyzer_register::instance().get( ::key(name, args_format), load_library).normalizer; return normalizer ? normalizer(args, out) : false; } catch (...) { IR_FRMT_ERROR("Caught exception while normalizing analyzer '%s' arguments", static_cast<std::string>(name).c_str()); } return false; } /*static*/ result analyzers::get( analyzer::ptr& analyzer, const string_ref& name, const type_info& args_format, const string_ref& args, bool load_library /*= true*/) noexcept { try { auto* factory = analyzer_register::instance().get( ::key(name, args_format), load_library).factory; if (!factory) { return result::make<result::NOT_FOUND>(); } analyzer = factory(args); } catch (const std::exception& e) { return result::make<result::INVALID_ARGUMENT>( "Caught exception while getting an analyzer instance", e.what()); } catch (...) { return result::make<result::INVALID_ARGUMENT>( "Caught exception while getting an analyzer instance"); } return {}; } /*static*/ analyzer::ptr analyzers::get( const string_ref& name, const type_info& args_format, const string_ref& args, bool load_library /*= true*/) noexcept { try { auto* factory = analyzer_register::instance().get( ::key(name, args_format), load_library ).factory; return factory ? factory(args) : nullptr; } catch (...) { IR_FRMT_ERROR("Caught exception while getting an analyzer instance"); } return nullptr; } /*static*/ void analyzers::init() { #ifndef IRESEARCH_DLL irs::analysis::delimited_token_stream::init(); irs::analysis::collation_token_stream::init(); irs::analysis::classification_stream::init(); irs::analysis::ngram_token_stream_base::init(); irs::analysis::normalizing_token_stream::init(); irs::analysis::nearest_neighbors_stream::init(); irs::analysis::stemming_token_stream::init(); irs::analysis::text_token_stream::init(); irs::analysis::token_stopwords_stream::init(); irs::analysis::pipeline_token_stream::init(); irs::analysis::segmentation_token_stream::init(); #endif } /*static*/ void analyzers::load_all(const std::string& path) { load_libraries(path, FILENAME_PREFIX, ""); } /*static*/ bool analyzers::visit( const std::function<bool(const string_ref&, const type_info&)>& visitor) { analyzer_register::visitor_t wrapper = [&visitor](const ::key& key)->bool { return visitor(key.type, key.args_format); }; return analyzer_register::instance().visit(wrapper); } // ----------------------------------------------------------------------------- // --SECTION-- analyzer registration // ----------------------------------------------------------------------------- analyzer_registrar::analyzer_registrar( const type_info& type, const type_info& args_format, analyzer::ptr(*factory)(const string_ref& args), bool(*normalizer)(const string_ref& args, std::string& config), const char* source /*= nullptr*/) { const string_ref source_ref(source); const auto new_entry = ::value(factory, normalizer); auto entry = analyzer_register::instance().set( ::key(type.name(), args_format), new_entry, source_ref.null() ? nullptr : &source_ref ); registered_ = entry.second; if (!registered_ && new_entry != entry.first) { auto* registered_source = analyzer_register::instance().tag(::key(type.name(), args_format)); if (source && registered_source) { IR_FRMT_WARN( "type name collision detected while registering analyzer, ignoring: type '%s' from %s, previously from %s", type.name().c_str(), source, registered_source->c_str() ); } else if (source) { IR_FRMT_WARN( "type name collision detected while registering analyzer, ignoring: type '%s' from %s", type.name().c_str(), source ); } else if (registered_source) { IR_FRMT_WARN( "type name collision detected while registering analyzer, ignoring: type '%s', previously from %s", type.name().c_str(), registered_source->c_str() ); } else { IR_FRMT_WARN( "type name collision detected while registering analyzer, ignoring: type '%s'", type.name().c_str() ); } } } } // analysis }
apache-2.0
modulexcite/msgpack-cli
test/MsgPack.UnitTest/gen/map/MsgPack_Serialization_PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObjectSerializer.cs
9047
//------------------------------------------------------------------------------ // <auto-generated> // このコードはツールによって生成されました。 // ランタイム バージョン:4.0.30319.34209 // // このファイルへの変更は、以下の状況下で不正な動作の原因になったり、 // コードが再生成されるときに損失したりします。 // </auto-generated> //------------------------------------------------------------------------------ namespace MsgPack.Serialization.GeneratedSerializers.MapBased { [System.CodeDom.Compiler.GeneratedCodeAttribute("MsgPack.Serialization.CodeDomSerializers.CodeDomSerializerBuilder", "0.6.0.0")] [System.Diagnostics.DebuggerNonUserCodeAttribute()] public class MsgPack_Serialization_PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObjectSerializer : MsgPack.Serialization.MessagePackSerializer<MsgPack.Serialization.PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject> { private MsgPack.Serialization.MessagePackSerializer<string> _serializer0; private MsgPack.Serialization.MessagePackSerializer<object> _serializer1; private System.Reflection.MethodBase _methodBasePolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject_set_Polymorphic0; public MsgPack_Serialization_PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObjectSerializer(MsgPack.Serialization.SerializationContext context) : base(context) { MsgPack.Serialization.PolymorphismSchema schema0 = default(MsgPack.Serialization.PolymorphismSchema); System.Collections.Generic.Dictionary<string, System.Type> typeMap0 = default(System.Collections.Generic.Dictionary<string, System.Type>); typeMap0 = new System.Collections.Generic.Dictionary<string, System.Type>(2); typeMap0.Add("1", typeof(MsgPack.Serialization.DirectoryEntry)); typeMap0.Add("0", typeof(MsgPack.Serialization.FileEntry)); schema0 = MsgPack.Serialization.PolymorphismSchema.ForPolymorphicObject(typeof(object), typeMap0); this._serializer0 = context.GetSerializer<string>(schema0); MsgPack.Serialization.PolymorphismSchema schema1 = default(MsgPack.Serialization.PolymorphismSchema); System.Collections.Generic.Dictionary<string, System.Type> typeMap1 = default(System.Collections.Generic.Dictionary<string, System.Type>); typeMap1 = new System.Collections.Generic.Dictionary<string, System.Type>(2); typeMap1.Add("1", typeof(MsgPack.Serialization.DirectoryEntry)); typeMap1.Add("0", typeof(MsgPack.Serialization.FileEntry)); schema1 = MsgPack.Serialization.PolymorphismSchema.ForPolymorphicObject(typeof(object), typeMap1); this._serializer1 = context.GetSerializer<object>(schema1); this._methodBasePolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject_set_Polymorphic0 = typeof(MsgPack.Serialization.PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject).GetMethod("set_Polymorphic", (System.Reflection.BindingFlags.Instance | (System.Reflection.BindingFlags.Public | System.Reflection.BindingFlags.NonPublic)), null, new System.Type[] { typeof(object)}, null); } protected internal override void PackToCore(MsgPack.Packer packer, MsgPack.Serialization.PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject objectTree) { packer.PackMapHeader(1); this._serializer0.PackTo(packer, "Polymorphic"); this._serializer1.PackTo(packer, objectTree.Polymorphic); } protected internal override MsgPack.Serialization.PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject UnpackFromCore(MsgPack.Unpacker unpacker) { MsgPack.Serialization.PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject result = default(MsgPack.Serialization.PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject); result = new MsgPack.Serialization.PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject(); if (unpacker.IsArrayHeader) { int unpacked = default(int); int itemsCount = default(int); itemsCount = MsgPack.Serialization.UnpackHelpers.GetItemsCount(unpacker); object nullable = default(object); if ((unpacked < itemsCount)) { if ((unpacker.Read() == false)) { throw MsgPack.Serialization.SerializationExceptions.NewMissingItem(0); } if (((unpacker.IsArrayHeader == false) && (unpacker.IsMapHeader == false))) { nullable = this._serializer1.UnpackFrom(unpacker); } else { MsgPack.Unpacker disposable = default(MsgPack.Unpacker); disposable = unpacker.ReadSubtree(); try { nullable = this._serializer1.UnpackFrom(disposable); } finally { if (((disposable == null) == false)) { disposable.Dispose(); } } } } if (((nullable == null) == false)) { this._methodBasePolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject_set_Polymorphic0.Invoke(result, new object[] { nullable}); } unpacked = (unpacked + 1); } else { int itemsCount0 = default(int); itemsCount0 = MsgPack.Serialization.UnpackHelpers.GetItemsCount(unpacker); for (int i = 0; (i < itemsCount0); i = (i + 1)) { string key = default(string); string nullable0 = default(string); nullable0 = MsgPack.Serialization.UnpackHelpers.UnpackStringValue(unpacker, typeof(MsgPack.Serialization.PolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject), "MemberName"); if (((nullable0 == null) == false)) { key = nullable0; } else { throw MsgPack.Serialization.SerializationExceptions.NewNullIsProhibited("MemberName"); } if ((key == "Polymorphic")) { object nullable1 = default(object); if ((unpacker.Read() == false)) { throw MsgPack.Serialization.SerializationExceptions.NewMissingItem(i); } if (((unpacker.IsArrayHeader == false) && (unpacker.IsMapHeader == false))) { nullable1 = this._serializer1.UnpackFrom(unpacker); } else { MsgPack.Unpacker disposable0 = default(MsgPack.Unpacker); disposable0 = unpacker.ReadSubtree(); try { nullable1 = this._serializer1.UnpackFrom(disposable0); } finally { if (((disposable0 == null) == false)) { disposable0.Dispose(); } } } if (((nullable1 == null) == false)) { this._methodBasePolymorphicMemberTypeKnownType_PolymorphicPrivateSetterPropertyAndConstructorAsObject_set_Polymorphic0.Invoke(result, new object[] { nullable1}); } } else { unpacker.Skip(); } } } return result; } private static T @__Conditional<T>(bool condition, T whenTrue, T whenFalse) { if (condition) { return whenTrue; } else { return whenFalse; } } } }
apache-2.0
paulcastro/openwhisk
tests/src/test/scala/common/WskTestHelpers.scala
11549
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package common import java.time.Instant import org.scalatest.Matchers import scala.collection.mutable.ListBuffer import scala.util.Failure import scala.util.Try import scala.concurrent.duration.Duration import scala.concurrent.duration.DurationInt import spray.json._ import TestUtils.RunResult import TestUtils.CONFLICT import akka.http.scaladsl.model.StatusCodes /** * An arbitrary response of a whisk action. Includes the result as a JsObject as the * structure of "result" is not defined. * * @param result a JSON object used to save the result of the execution of the action * @param status a string used to indicate the status of the action * @param success a boolean value used to indicate whether the action is executed successfully or not */ case class ActivationResponse(result: Option[JsObject], status: String, success: Boolean) object ActivationResponse extends DefaultJsonProtocol { implicit val serdes = jsonFormat3(ActivationResponse.apply) } /** * Activation record as it is returned from the OpenWhisk service. * * @param activationId a String to save the ID of the activation * @param logs a list of String to save the logs of the activation * @param response an Object of ActivationResponse to save the response of the activation * @param start an Instant to save the start time of activation * @param end an Instant to save the end time of activation * @param duration a Long to save the duration of the activation * @param cases String to save the cause of failure if the activation fails * @param annotations a list of JSON objects to save the annotations of the activation */ case class ActivationResult(activationId: String, logs: Option[List[String]], response: ActivationResponse, start: Instant, end: Instant, duration: Long, cause: Option[String], annotations: Option[List[JsObject]]) { def getAnnotationValue(key: String): Option[JsValue] = { Try { val annotation = annotations.get.filter(x => x.getFields("key")(0) == JsString(key)) assert(annotation.size == 1) // only one annotation with this value val value = annotation(0).getFields("value") assert(value.size == 1) value(0) }.toOption } } object ActivationResult extends DefaultJsonProtocol { private implicit val instantSerdes = new RootJsonFormat[Instant] { def write(t: Instant) = t.toEpochMilli.toJson def read(value: JsValue) = Try { value match { case JsNumber(i) => Instant.ofEpochMilli(i.bigDecimal.longValue) case _ => deserializationError("timetsamp malformed") } } getOrElse deserializationError("timetsamp malformed 2") } implicit val serdes = new RootJsonFormat[ActivationResult] { private val format = jsonFormat8(ActivationResult.apply) def write(result: ActivationResult) = format.write(result) def read(value: JsValue) = { val obj = value.asJsObject obj.getFields("activationId", "response", "start") match { case Seq(JsString(activationId), response, start) => Try { val logs = obj.fields.get("logs").map(_.convertTo[List[String]]) val end = obj.fields.get("end").map(_.convertTo[Instant]).getOrElse(Instant.EPOCH) val duration = obj.fields.get("duration").map(_.convertTo[Long]).getOrElse(0L) val cause = obj.fields.get("cause").map(_.convertTo[String]) val annotations = obj.fields.get("annotations").map(_.convertTo[List[JsObject]]) new ActivationResult( activationId, logs, response.convertTo[ActivationResponse], start.convertTo[Instant], end, duration, cause, annotations) } getOrElse deserializationError("Failed to deserialize the activation result.") case _ => deserializationError("Failed to deserialize the activation ID, response or start.") } } } } /** * Test fixture to ease cleaning of whisk entities created during testing. * * The fixture records the entities created during a test and when the test * completed, will delete them all. */ trait WskTestHelpers extends Matchers { type Assets = ListBuffer[(BaseDeleteFromCollection, String, Boolean)] /** * Helper to register an entity to delete once a test completes. * The helper sanitizes (deletes) a previous instance of the entity if it exists * in given collection. * */ class AssetCleaner(assetsToDeleteAfterTest: Assets, wskprops: WskProps) { def withCleaner[T <: BaseDeleteFromCollection](cli: T, name: String, confirmDelete: Boolean = true)( cmd: (T, String) => RunResult): RunResult = { // sanitize (delete) if asset exists cli.sanitize(name)(wskprops) assetsToDeleteAfterTest += ((cli, name, confirmDelete)) cmd(cli, name) } } /** * Creates a test closure which records all entities created inside the test into a * list that is iterated at the end of the test so that these entities are deleted * (from most recently created to oldest). */ def withAssetCleaner(wskprops: WskProps)(test: (WskProps, AssetCleaner) => Any) = { // create new asset list to track what must be deleted after test completes val assetsToDeleteAfterTest = new Assets() try { test(wskprops, new AssetCleaner(assetsToDeleteAfterTest, wskprops)) } catch { case t: Throwable => // log the exception that occurred in the test and rethrow it println(s"Exception occurred during test execution: $t") throw t } finally { // delete assets in reverse order so that was created last is deleted first val deletedAll = assetsToDeleteAfterTest.reverse map { case ((cli, n, delete)) => n -> Try { cli match { case _: BasePackage if delete => val rr = cli.delete(n)(wskprops) rr.exitCode match { case CONFLICT | StatusCodes.Conflict.intValue => whisk.utils.retry(cli.delete(n)(wskprops), 5, Some(1.second)) case _ => rr } case _ => if (delete) cli.delete(n)(wskprops) else cli.sanitize(n)(wskprops) } } } forall { case (n, Failure(t)) => println(s"ERROR: deleting asset failed for $n: $t") false case _ => true } assert(deletedAll, "some assets were not deleted") } } /** * Extracts an activation id from a wsk command producing a RunResult with such an id. * If id is found, polls activations until one matching id is found. If found, pass * the activation to the post processor which then check for expected values. */ def withActivation( wsk: BaseActivation, run: RunResult, initialWait: Duration = 1.second, pollPeriod: Duration = 1.second, totalWait: Duration = 60.seconds)(check: ActivationResult => Unit)(implicit wskprops: WskProps): Unit = { val activationId = wsk.extractActivationId(run) withClue(s"did not find an activation id in '$run'") { activationId shouldBe a[Some[_]] } withActivation(wsk, activationId.get, initialWait, pollPeriod, totalWait)(check) } /** * Polls activations until one matching id is found. If found, pass * the activation to the post processor which then check for expected values. */ def withActivation(wsk: BaseActivation, activationId: String, initialWait: Duration, pollPeriod: Duration, totalWait: Duration)(check: ActivationResult => Unit)(implicit wskprops: WskProps): Unit = { val id = activationId val activation = wsk.waitForActivation(id, initialWait, pollPeriod, totalWait) if (activation.isLeft) { assert(false, s"error waiting for activation $id: ${activation.left.get}") } else try { check(activation.right.get.convertTo[ActivationResult]) } catch { case error: Throwable => println(s"check failed for activation $id: ${activation.right.get}") throw error } } /** * Polls until it finds {@code N} activationIds from an entity. Asserts the count * of the activationIds actually equal {@code N}. Takes a {@code since} parameter * defining the oldest activationId to consider valid. */ def withActivationsFromEntity( wsk: BaseActivation, entity: String, N: Int = 1, since: Option[Instant] = None, pollPeriod: Duration = 1.second, totalWait: Duration = 60.seconds)(check: Seq[ActivationResult] => Unit)(implicit wskprops: WskProps): Unit = { val activationIds = wsk.pollFor(N, Some(entity), since = since, retries = (totalWait / pollPeriod).toInt, pollPeriod = pollPeriod) withClue( s"expecting $N activations matching '$entity' name since $since but found ${activationIds.mkString(",")} instead") { activationIds.length shouldBe N } val parsed = activationIds.map { id => wsk.parseJsonString(wsk.get(Some(id)).stdout).convertTo[ActivationResult] } try { check(parsed) } catch { case error: Throwable => println(s"check failed for activations $activationIds: ${parsed}") throw error } } /** * In the case that test throws an exception, print stderr and stdout * from the provided RunResult. */ def withPrintOnFailure(runResult: RunResult)(test: () => Unit) { try { test() } catch { case error: Throwable => println(s"[stderr] ${runResult.stderr}") println(s"[stdout] ${runResult.stdout}") throw error } } def removeCLIHeader(response: String): String = { if (response.contains("\n")) response.substring(response.indexOf("\n")) else response } def getJSONFromResponse(response: String, isCli: Boolean = false): JsObject = { if (isCli) removeCLIHeader(response).parseJson.asJsObject else response.parseJson.asJsObject } def getAdditionalTestSubject(newUser: String): WskProps = { val wskadmin = new RunWskAdminCmd {} WskProps(namespace = newUser, authKey = wskadmin.cli(Seq("user", "create", newUser)).stdout.trim) } def disposeAdditionalTestSubject(subject: String): Unit = { val wskadmin = new RunWskAdminCmd {} withClue(s"failed to delete temporary subject $subject") { wskadmin.cli(Seq("user", "delete", subject)).stdout should include("Subject deleted") } } }
apache-2.0
domeo/DomeoClient
src/org/mindinformatics/gwt/domeo/plugins/resource/pubmed/model/JsoPubMedEntry.java
1117
package org.mindinformatics.gwt.domeo.plugins.resource.pubmed.model; import com.google.gwt.core.client.JavaScriptObject; public class JsoPubMedEntry extends JavaScriptObject implements IDocumentBibliographicData { protected JsoPubMedEntry() {} public final native String getUrl() /*-{ return this.url; }-*/; public final native String getTitle() /*-{ return this.title; }-*/; public final native String getPublicationAuthors() /*-{ return this.publicationAuthors; }-*/; public final native String getPublicationInfo() /*-{ return this.publicationInfo; }-*/; public final native String getPublicationDate() /*-{ return this.publicationDate; }-*/; public final native String getPmcId() /*-{ return this.pmcid; }-*/; public final native String getPmId() /*-{ return this.pmid; }-*/; public final native String getDoi() /*-{ return this.doi; }-*/; public final native String getJournalName() /*-{ return this.journalName; }-*/; public final native String getJournalIssn() /*-{ return this.journalIssn; }-*/; //public final native JsArray<JsoPerson> getAuthorNames() /*-{ return this.authorNames; }-*/; }
apache-2.0
frenemations/WPFoverlay
WpfOverlay/bin/Debug/ICSharpCode.AvalonEdit/Editing/SelectionMouseHandler.cs
22363
// Copyright (c) AlphaSierraPapa for the SharpDevelop Team (for details please see \doc\copyright.txt) // This code is distributed under the GNU LGPL (for details please see \doc\license.txt) using System; using System.ComponentModel; using System.Diagnostics; using System.Linq; using System.Runtime.InteropServices; using System.Windows; using System.Windows.Documents; using System.Windows.Input; using System.Windows.Media.TextFormatting; using System.Windows.Threading; using ICSharpCode.AvalonEdit.Document; using ICSharpCode.AvalonEdit.Rendering; using ICSharpCode.AvalonEdit.Utils; using ICSharpCode.NRefactory.Editor; namespace ICSharpCode.AvalonEdit.Editing { /// <summary> /// Handles selection of text using the mouse. /// </summary> sealed class SelectionMouseHandler : ITextAreaInputHandler { #region enum SelectionMode enum SelectionMode { /// <summary> /// no selection (no mouse button down) /// </summary> None, /// <summary> /// left mouse button down on selection, might be normal click /// or might be drag'n'drop /// </summary> PossibleDragStart, /// <summary> /// dragging text /// </summary> Drag, /// <summary> /// normal selection (click+drag) /// </summary> Normal, /// <summary> /// whole-word selection (double click+drag or ctrl+click+drag) /// </summary> WholeWord, /// <summary> /// whole-line selection (triple click+drag) /// </summary> WholeLine, /// <summary> /// rectangular selection (alt+click+drag) /// </summary> Rectangular } #endregion readonly TextArea textArea; SelectionMode mode; AnchorSegment startWord; Point possibleDragStartMousePos; #region Constructor + Attach + Detach public SelectionMouseHandler(TextArea textArea) { if (textArea == null) throw new ArgumentNullException("textArea"); this.textArea = textArea; } public TextArea TextArea { get { return textArea; } } public void Attach() { textArea.MouseLeftButtonDown += textArea_MouseLeftButtonDown; textArea.MouseMove += textArea_MouseMove; textArea.MouseLeftButtonUp += textArea_MouseLeftButtonUp; textArea.QueryCursor += textArea_QueryCursor; textArea.OptionChanged += textArea_OptionChanged; enableTextDragDrop = textArea.Options.EnableTextDragDrop; if (enableTextDragDrop) { AttachDragDrop(); } } public void Detach() { mode = SelectionMode.None; textArea.MouseLeftButtonDown -= textArea_MouseLeftButtonDown; textArea.MouseMove -= textArea_MouseMove; textArea.MouseLeftButtonUp -= textArea_MouseLeftButtonUp; textArea.QueryCursor -= textArea_QueryCursor; textArea.OptionChanged -= textArea_OptionChanged; if (enableTextDragDrop) { DetachDragDrop(); } } void AttachDragDrop() { textArea.AllowDrop = true; textArea.GiveFeedback += textArea_GiveFeedback; textArea.QueryContinueDrag += textArea_QueryContinueDrag; textArea.DragEnter += textArea_DragEnter; textArea.DragOver += textArea_DragOver; textArea.DragLeave += textArea_DragLeave; textArea.Drop += textArea_Drop; } void DetachDragDrop() { textArea.AllowDrop = false; textArea.GiveFeedback -= textArea_GiveFeedback; textArea.QueryContinueDrag -= textArea_QueryContinueDrag; textArea.DragEnter -= textArea_DragEnter; textArea.DragOver -= textArea_DragOver; textArea.DragLeave -= textArea_DragLeave; textArea.Drop -= textArea_Drop; } bool enableTextDragDrop; void textArea_OptionChanged(object sender, PropertyChangedEventArgs e) { bool newEnableTextDragDrop = textArea.Options.EnableTextDragDrop; if (newEnableTextDragDrop != enableTextDragDrop) { enableTextDragDrop = newEnableTextDragDrop; if (newEnableTextDragDrop) AttachDragDrop(); else DetachDragDrop(); } } #endregion #region Dropping text [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")] void textArea_DragEnter(object sender, DragEventArgs e) { try { e.Effects = GetEffect(e); textArea.Caret.Show(); } catch (Exception ex) { OnDragException(ex); } } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")] void textArea_DragOver(object sender, DragEventArgs e) { try { e.Effects = GetEffect(e); } catch (Exception ex) { OnDragException(ex); } } DragDropEffects GetEffect(DragEventArgs e) { if (e.Data.GetDataPresent(DataFormats.UnicodeText, true)) { e.Handled = true; int visualColumn; int offset = GetOffsetFromMousePosition(e.GetPosition(textArea.TextView), out visualColumn); if (offset >= 0) { textArea.Caret.Position = new TextViewPosition(textArea.Document.GetLocation(offset), visualColumn); textArea.Caret.DesiredXPos = double.NaN; if (textArea.ReadOnlySectionProvider.CanInsert(offset)) { if ((e.AllowedEffects & DragDropEffects.Move) == DragDropEffects.Move && (e.KeyStates & DragDropKeyStates.ControlKey) != DragDropKeyStates.ControlKey) { return DragDropEffects.Move; } else { return e.AllowedEffects & DragDropEffects.Copy; } } } } return DragDropEffects.None; } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")] void textArea_DragLeave(object sender, DragEventArgs e) { try { e.Handled = true; if (!textArea.IsKeyboardFocusWithin) textArea.Caret.Hide(); } catch (Exception ex) { OnDragException(ex); } } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")] void textArea_Drop(object sender, DragEventArgs e) { try { DragDropEffects effect = GetEffect(e); e.Effects = effect; if (effect != DragDropEffects.None) { string text = e.Data.GetData(DataFormats.UnicodeText, true) as string; if (text != null) { int start = textArea.Caret.Offset; if (mode == SelectionMode.Drag && textArea.Selection.Contains(start)) { Debug.WriteLine("Drop: did not drop: drop target is inside selection"); e.Effects = DragDropEffects.None; } else { Debug.WriteLine("Drop: insert at " + start); bool rectangular = e.Data.GetDataPresent(RectangleSelection.RectangularSelectionDataType); string newLine = TextUtilities.GetNewLineFromDocument(textArea.Document, textArea.Caret.Line); text = TextUtilities.NormalizeNewLines(text, newLine); // Mark the undo group with the currentDragDescriptor, if the drag // is originating from the same control. This allows combining // the undo groups when text is moved. textArea.Document.UndoStack.StartUndoGroup(this.currentDragDescriptor); try { if (rectangular && RectangleSelection.PerformRectangularPaste(textArea, textArea.Caret.Position, text, true)) { } else { textArea.Document.Insert(start, text); textArea.Selection = Selection.Create(textArea, start, start + text.Length); } } finally { textArea.Document.UndoStack.EndUndoGroup(); } } e.Handled = true; } } } catch (Exception ex) { OnDragException(ex); } } void OnDragException(Exception ex) { // WPF swallows exceptions during drag'n'drop or reports them incorrectly, so // we re-throw them later to allow the application's unhandled exception handler // to catch them textArea.Dispatcher.BeginInvoke( DispatcherPriority.Normal, new Action(delegate { throw new DragDropException("Exception during drag'n'drop", ex); })); } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")] void textArea_GiveFeedback(object sender, GiveFeedbackEventArgs e) { try { e.UseDefaultCursors = true; e.Handled = true; } catch (Exception ex) { OnDragException(ex); } } [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")] void textArea_QueryContinueDrag(object sender, QueryContinueDragEventArgs e) { try { if (e.EscapePressed) { e.Action = DragAction.Cancel; } else if ((e.KeyStates & DragDropKeyStates.LeftMouseButton) != DragDropKeyStates.LeftMouseButton) { e.Action = DragAction.Drop; } else { e.Action = DragAction.Continue; } e.Handled = true; } catch (Exception ex) { OnDragException(ex); } } #endregion #region Start Drag object currentDragDescriptor; void StartDrag() { // prevent nested StartDrag calls mode = SelectionMode.Drag; // mouse capture and Drag'n'Drop doesn't mix textArea.ReleaseMouseCapture(); DataObject dataObject = textArea.Selection.CreateDataObject(textArea); DragDropEffects allowedEffects = DragDropEffects.All; var deleteOnMove = textArea.Selection.Segments.Select(s => new AnchorSegment(textArea.Document, s)).ToList(); foreach (ISegment s in deleteOnMove) { ISegment[] result = textArea.GetDeletableSegments(s); if (result.Length != 1 || result[0].Offset != s.Offset || result[0].EndOffset != s.EndOffset) { allowedEffects &= ~DragDropEffects.Move; } } object dragDescriptor = new object(); this.currentDragDescriptor = dragDescriptor; DragDropEffects resultEffect; using (textArea.AllowCaretOutsideSelection()) { var oldCaretPosition = textArea.Caret.Position; try { Debug.WriteLine("DoDragDrop with allowedEffects=" + allowedEffects); resultEffect = DragDrop.DoDragDrop(textArea, dataObject, allowedEffects); Debug.WriteLine("DoDragDrop done, resultEffect=" + resultEffect); } catch (COMException ex) { // ignore COM errors - don't crash on badly implemented drop targets Debug.WriteLine("DoDragDrop failed: " + ex.ToString()); return; } if (resultEffect == DragDropEffects.None) { // reset caret if drag was aborted textArea.Caret.Position = oldCaretPosition; } } this.currentDragDescriptor = null; if (deleteOnMove != null && resultEffect == DragDropEffects.Move && (allowedEffects & DragDropEffects.Move) == DragDropEffects.Move) { bool draggedInsideSingleDocument = (dragDescriptor == textArea.Document.UndoStack.LastGroupDescriptor); if (draggedInsideSingleDocument) textArea.Document.UndoStack.StartContinuedUndoGroup(null); textArea.Document.BeginUpdate(); try { foreach (ISegment s in deleteOnMove) { textArea.Document.Remove(s.Offset, s.Length); } } finally { textArea.Document.EndUpdate(); if (draggedInsideSingleDocument) textArea.Document.UndoStack.EndUndoGroup(); } } } #endregion #region QueryCursor // provide the IBeam Cursor for the text area void textArea_QueryCursor(object sender, QueryCursorEventArgs e) { if (!e.Handled) { if (mode != SelectionMode.None || !enableTextDragDrop) { e.Cursor = Cursors.IBeam; e.Handled = true; } else if (textArea.TextView.VisualLinesValid) { // Only query the cursor if the visual lines are valid. // If they are invalid, the cursor will get re-queried when the visual lines // get refreshed. Point p = e.GetPosition(textArea.TextView); if (p.X >= 0 && p.Y >= 0 && p.X <= textArea.TextView.ActualWidth && p.Y <= textArea.TextView.ActualHeight) { int visualColumn; int offset = GetOffsetFromMousePosition(e, out visualColumn); if (textArea.Selection.Contains(offset)) e.Cursor = Cursors.Arrow; else e.Cursor = Cursors.IBeam; e.Handled = true; } } } } #endregion #region LeftButtonDown void textArea_MouseLeftButtonDown(object sender, MouseButtonEventArgs e) { mode = SelectionMode.None; if (!e.Handled && e.ChangedButton == MouseButton.Left) { ModifierKeys modifiers = Keyboard.Modifiers; bool shift = (modifiers & ModifierKeys.Shift) == ModifierKeys.Shift; if (enableTextDragDrop && e.ClickCount == 1 && !shift) { int visualColumn; int offset = GetOffsetFromMousePosition(e, out visualColumn); if (textArea.Selection.Contains(offset)) { if (textArea.CaptureMouse()) { mode = SelectionMode.PossibleDragStart; possibleDragStartMousePos = e.GetPosition(textArea); } e.Handled = true; return; } } var oldPosition = textArea.Caret.Position; SetCaretOffsetToMousePosition(e); if (!shift) { textArea.ClearSelection(); } if (textArea.CaptureMouse()) { if ((modifiers & ModifierKeys.Alt) == ModifierKeys.Alt && textArea.Options.EnableRectangularSelection) { mode = SelectionMode.Rectangular; if (shift && textArea.Selection is RectangleSelection) { textArea.Selection = textArea.Selection.StartSelectionOrSetEndpoint(oldPosition, textArea.Caret.Position); } } else if (e.ClickCount == 1 && ((modifiers & ModifierKeys.Control) == 0)) { mode = SelectionMode.Normal; if (shift && !(textArea.Selection is RectangleSelection)) { textArea.Selection = textArea.Selection.StartSelectionOrSetEndpoint(oldPosition, textArea.Caret.Position); } } else { SimpleSegment startWord; if (e.ClickCount == 3) { mode = SelectionMode.WholeLine; startWord = GetLineAtMousePosition(e); } else { mode = SelectionMode.WholeWord; startWord = GetWordAtMousePosition(e); } if (startWord == SimpleSegment.Invalid) { mode = SelectionMode.None; textArea.ReleaseMouseCapture(); return; } if (shift && !textArea.Selection.IsEmpty) { if (startWord.Offset < textArea.Selection.SurroundingSegment.Offset) { textArea.Selection = textArea.Selection.SetEndpoint(new TextViewPosition(textArea.Document.GetLocation(startWord.Offset))); } else if (startWord.EndOffset > textArea.Selection.SurroundingSegment.EndOffset) { textArea.Selection = textArea.Selection.SetEndpoint(new TextViewPosition(textArea.Document.GetLocation(startWord.EndOffset))); } this.startWord = new AnchorSegment(textArea.Document, textArea.Selection.SurroundingSegment); } else { textArea.Selection = Selection.Create(textArea, startWord.Offset, startWord.EndOffset); this.startWord = new AnchorSegment(textArea.Document, startWord.Offset, startWord.Length); } } } } e.Handled = true; } #endregion #region Mouse Position <-> Text coordinates SimpleSegment GetWordAtMousePosition(MouseEventArgs e) { TextView textView = textArea.TextView; if (textView == null) return SimpleSegment.Invalid; Point pos = e.GetPosition(textView); if (pos.Y < 0) pos.Y = 0; if (pos.Y > textView.ActualHeight) pos.Y = textView.ActualHeight; pos += textView.ScrollOffset; VisualLine line = textView.GetVisualLineFromVisualTop(pos.Y); if (line != null) { int visualColumn = line.GetVisualColumn(pos, textArea.Selection.EnableVirtualSpace); int wordStartVC = line.GetNextCaretPosition(visualColumn + 1, LogicalDirection.Backward, CaretPositioningMode.WordStartOrSymbol, textArea.Selection.EnableVirtualSpace); if (wordStartVC == -1) wordStartVC = 0; int wordEndVC = line.GetNextCaretPosition(wordStartVC, LogicalDirection.Forward, CaretPositioningMode.WordBorderOrSymbol, textArea.Selection.EnableVirtualSpace); if (wordEndVC == -1) wordEndVC = line.VisualLength; int relOffset = line.FirstDocumentLine.Offset; int wordStartOffset = line.GetRelativeOffset(wordStartVC) + relOffset; int wordEndOffset = line.GetRelativeOffset(wordEndVC) + relOffset; return new SimpleSegment(wordStartOffset, wordEndOffset - wordStartOffset); } else { return SimpleSegment.Invalid; } } SimpleSegment GetLineAtMousePosition(MouseEventArgs e) { TextView textView = textArea.TextView; if (textView == null) return SimpleSegment.Invalid; Point pos = e.GetPosition(textView); if (pos.Y < 0) pos.Y = 0; if (pos.Y > textView.ActualHeight) pos.Y = textView.ActualHeight; pos += textView.ScrollOffset; VisualLine line = textView.GetVisualLineFromVisualTop(pos.Y); if (line != null) { return new SimpleSegment(line.StartOffset, line.LastDocumentLine.EndOffset - line.StartOffset); } else { return SimpleSegment.Invalid; } } int GetOffsetFromMousePosition(MouseEventArgs e, out int visualColumn) { return GetOffsetFromMousePosition(e.GetPosition(textArea.TextView), out visualColumn); } int GetOffsetFromMousePosition(Point positionRelativeToTextView, out int visualColumn) { visualColumn = 0; TextView textView = textArea.TextView; Point pos = positionRelativeToTextView; if (pos.Y < 0) pos.Y = 0; if (pos.Y > textView.ActualHeight) pos.Y = textView.ActualHeight; pos += textView.ScrollOffset; if (pos.Y > textView.DocumentHeight) pos.Y = textView.DocumentHeight - ExtensionMethods.Epsilon; VisualLine line = textView.GetVisualLineFromVisualTop(pos.Y); if (line != null) { visualColumn = line.GetVisualColumn(pos, textArea.Selection.EnableVirtualSpace); return line.GetRelativeOffset(visualColumn) + line.FirstDocumentLine.Offset; } return -1; } int GetOffsetFromMousePositionFirstTextLineOnly(Point positionRelativeToTextView, out int visualColumn) { visualColumn = 0; TextView textView = textArea.TextView; Point pos = positionRelativeToTextView; if (pos.Y < 0) pos.Y = 0; if (pos.Y > textView.ActualHeight) pos.Y = textView.ActualHeight; pos += textView.ScrollOffset; if (pos.Y > textView.DocumentHeight) pos.Y = textView.DocumentHeight - ExtensionMethods.Epsilon; VisualLine line = textView.GetVisualLineFromVisualTop(pos.Y); if (line != null) { visualColumn = line.GetVisualColumn(line.TextLines.First(), pos.X, textArea.Selection.EnableVirtualSpace); return line.GetRelativeOffset(visualColumn) + line.FirstDocumentLine.Offset; } return -1; } #endregion #region MouseMove void textArea_MouseMove(object sender, MouseEventArgs e) { if (e.Handled) return; if (mode == SelectionMode.Normal || mode == SelectionMode.WholeWord || mode == SelectionMode.WholeLine || mode == SelectionMode.Rectangular) { e.Handled = true; if (textArea.TextView.VisualLinesValid) { // If the visual lines are not valid, don't extend the selection. // Extending the selection forces a VisualLine refresh, and it is sufficient // to do that on MouseUp, we don't have to do it every MouseMove. ExtendSelectionToMouse(e); } } else if (mode == SelectionMode.PossibleDragStart) { e.Handled = true; Vector mouseMovement = e.GetPosition(textArea) - possibleDragStartMousePos; if (Math.Abs(mouseMovement.X) > SystemParameters.MinimumHorizontalDragDistance || Math.Abs(mouseMovement.Y) > SystemParameters.MinimumVerticalDragDistance) { StartDrag(); } } } #endregion #region ExtendSelection void SetCaretOffsetToMousePosition(MouseEventArgs e) { SetCaretOffsetToMousePosition(e, null); } void SetCaretOffsetToMousePosition(MouseEventArgs e, ISegment allowedSegment) { int visualColumn; int offset; if (mode == SelectionMode.Rectangular) offset = GetOffsetFromMousePositionFirstTextLineOnly(e.GetPosition(textArea.TextView), out visualColumn); else offset = GetOffsetFromMousePosition(e, out visualColumn); if (allowedSegment != null) { offset = offset.CoerceValue(allowedSegment.Offset, allowedSegment.EndOffset); } if (offset >= 0) { textArea.Caret.Position = new TextViewPosition(textArea.Document.GetLocation(offset), visualColumn); textArea.Caret.DesiredXPos = double.NaN; } } void ExtendSelectionToMouse(MouseEventArgs e) { TextViewPosition oldPosition = textArea.Caret.Position; if (mode == SelectionMode.Normal || mode == SelectionMode.Rectangular) { SetCaretOffsetToMousePosition(e); if (mode == SelectionMode.Normal && textArea.Selection is RectangleSelection) textArea.Selection = new SimpleSelection(textArea, oldPosition, textArea.Caret.Position); else if (mode == SelectionMode.Rectangular && !(textArea.Selection is RectangleSelection)) textArea.Selection = new RectangleSelection(textArea, oldPosition, textArea.Caret.Position); else textArea.Selection = textArea.Selection.StartSelectionOrSetEndpoint(oldPosition, textArea.Caret.Position); } else if (mode == SelectionMode.WholeWord || mode == SelectionMode.WholeLine) { var newWord = (mode == SelectionMode.WholeLine) ? GetLineAtMousePosition(e) : GetWordAtMousePosition(e); if (newWord != SimpleSegment.Invalid) { textArea.Selection = Selection.Create(textArea, Math.Min(newWord.Offset, startWord.Offset), Math.Max(newWord.EndOffset, startWord.EndOffset)); // Set caret offset, but limit the caret to stay inside the selection. // in whole-word selection, it's otherwise possible that we get the caret outside the // selection - but the TextArea doesn't like that and will reset the selection, causing // flickering. SetCaretOffsetToMousePosition(e, textArea.Selection.SurroundingSegment); } } textArea.Caret.BringCaretToView(5.0); } #endregion #region MouseLeftButtonUp void textArea_MouseLeftButtonUp(object sender, MouseButtonEventArgs e) { if (mode == SelectionMode.None || e.Handled) return; e.Handled = true; if (mode == SelectionMode.PossibleDragStart) { // -> this was not a drag start (mouse didn't move after mousedown) SetCaretOffsetToMousePosition(e); textArea.ClearSelection(); } else if (mode == SelectionMode.Normal || mode == SelectionMode.WholeWord || mode == SelectionMode.WholeLine || mode == SelectionMode.Rectangular) { ExtendSelectionToMouse(e); } mode = SelectionMode.None; textArea.ReleaseMouseCapture(); } #endregion } }
apache-2.0
haoyanjun21/jstorm
jstorm-core/src/main/java/storm/trident/windowing/WindowsStateUpdater.java
3387
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * <p/> * http://www.apache.org/licenses/LICENSE-2.0 * <p/> * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package storm.trident.windowing; import java.util.List; import java.util.Map; import org.apache.commons.lang.IllegalClassException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; import backtype.storm.topology.FailedException; import storm.trident.operation.TridentCollector; import storm.trident.operation.TridentOperationContext; import storm.trident.state.StateUpdater; import storm.trident.tuple.TridentTuple; /** * {@code StateUpdater<WindowState>} instance which removes successfully emitted triggers from store */ public class WindowsStateUpdater implements StateUpdater<WindowsState> { private static final Logger LOG = LoggerFactory.getLogger(WindowsStateUpdater.class); private final WindowsStoreFactory windowStoreFactory; private WindowsStore windowsStore; public WindowsStateUpdater(WindowsStoreFactory windowStoreFactory) { this.windowStoreFactory = windowStoreFactory; } @Override public void updateState(WindowsState state, List<TridentTuple> tuples, TridentCollector collector) { Long currentTxId = state.getCurrentTxId(); LOG.debug("Removing triggers using WindowStateUpdater, txnId: [{}] ", currentTxId); for (TridentTuple tuple : tuples) { try { Object fieldValue = tuple.getValueByField(WindowTridentProcessor.TRIGGER_FIELD_NAME); if(! (fieldValue instanceof WindowTridentProcessor.TriggerInfo)) { throw new IllegalClassException(WindowTridentProcessor.TriggerInfo.class, fieldValue.getClass()); } WindowTridentProcessor.TriggerInfo triggerInfo = (WindowTridentProcessor.TriggerInfo) fieldValue; String triggerCompletedKey = WindowTridentProcessor.getWindowTriggerInprocessIdPrefix(triggerInfo.windowTaskId)+currentTxId; LOG.debug("Removing trigger key [{}] and trigger completed key [{}] from store: [{}]", triggerInfo, triggerCompletedKey, windowsStore); windowsStore.removeAll(Lists.newArrayList(triggerInfo.generateTriggerKey(), triggerCompletedKey)); } catch (Exception ex) { LOG.warn(ex.getMessage()); collector.reportError(ex); throw new FailedException(ex); } } } @Override public void prepare(Map conf, TridentOperationContext context) { windowsStore = windowStoreFactory.create(); } @Override public void cleanup() { windowsStore.shutdown(); } }
apache-2.0
xingwu1/azure-sdk-for-node
test/services/table/tableservice-batch-tests.js
12037
// // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // var assert = require('assert'); // Test includes var testutil = require('../../util/util'); var tabletestutil = require('../../framework/table-test-utils'); // Lib includes var common = require('azure-common'); var storage = testutil.libRequire('services/legacyStorage'); var azureutil = common.util; var ServiceClient = common.ServiceClient; var TableQuery = storage.TableQuery; var Constants = common.Constants; var HttpConstants = Constants.HttpConstants; var tableNames = []; var tablePrefix = 'tablebatch'; var testPrefix = 'tableservice-batch-tests'; var tableService; var testUtil; suite('tableservice-batch-tests', function () { suiteSetup(function (done) { tableService = storage.createTableService() .withFilter(new common.ExponentialRetryPolicyFilter()); testUtil = tabletestutil.createTableTestUtils(tableService, testPrefix); testUtil.setupSuite(done); }); suiteTeardown(function (done) { testUtil.teardownSuite(done); }); setup(function (done) { testUtil.setupTest(done); }); teardown(function (done) { testUtil.teardownTest(done); }); test('QueryEntities_All', function (done) { var tableName = testutil.generateId(tablePrefix, tableNames, testUtil.isMocked); tableService.createTable(tableName, function (createError, table, createResponse) { assert.equal(createError, null); assert.notEqual(table, null); assert.ok(createResponse.isSuccessful); var entities = generateEntities(20); tableService.beginBatch(); entities.forEach(function (entity) { tableService.insertEntity(tableName, entity); }); tableService.commitBatch(function (batchError, performBatchOperationResponses, batchResponse) { assert.equal(batchError, null); assert.ok(batchResponse.isSuccessful); var tableQuery = TableQuery.select() .from(tableName); tableService.queryEntities(tableQuery, function (queryError, entries, entriesContinuation, queryResponse) { assert.equal(queryError, null); assert.ok(queryResponse.isSuccessful); if (entries) { assert.equal(entries.length, 20); } done(); }); }); }); }); test('QueryEntities_Single1', function (done) { var tableName = testutil.generateId(tablePrefix, tableNames, testUtil.isMocked); tableService.createTable(tableName, function (createError, table, createResponse) { assert.equal(createError, null); assert.notEqual(table, null); assert.ok(createResponse.isSuccessful); var entities = generateEntities(20); tableService.beginBatch(); entities.forEach(function (entity) { tableService.insertEntity(tableName, entity); }); tableService.commitBatch(function (batchError, performBatchOperationResponses, batchResponse) { assert.equal(batchError, null); assert.ok(batchResponse.isSuccessful); var tableQuery = TableQuery.select() .from(tableName) .whereKeys(entities[0].PartitionKey, entities[0].RowKey.toString()); tableService.queryEntities(tableQuery, function (queryError, entries, entriesContinuation, queryResponse) { assert.equal(queryError, null); assert.ok(queryResponse.isSuccessful); if (entries) { assert.equal(entries.length, 1); } done(); }); }); }); }); test('QueryEntities_Single2', function (done) { var tableName = testutil.generateId(tablePrefix, tableNames, testUtil.isMocked); tableService.createTable(tableName, function (createError, table, createResponse) { assert.equal(createError, null); assert.notEqual(table, null); assert.ok(createResponse.isSuccessful); var entities = generateEntities(20); tableService.beginBatch(); entities.forEach(function (entity) { tableService.insertEntity(tableName, entity); }); tableService.commitBatch(function (batchError, performBatchOperationResponses, batchResponse) { assert.equal(batchError, null); assert.ok(batchResponse.isSuccessful); tableService.queryEntity(tableName, entities[0].PartitionKey, entities[0].RowKey.toString(), function (queryError, entry, queryResponse) { assert.equal(queryError, null); assert.ok(queryResponse.isSuccessful); assert.notEqual(entry, null); assert.equal(entry.PartitionKey, entities[0].PartitionKey); assert.equal(entry.RowKey, entities[0].RowKey); done(); }); }); }); }); test('RetrieveEntities_TableQuery1', function (done) { var tableName = testutil.generateId(tablePrefix, tableNames, testUtil.isMocked); tableService.createTable(tableName, function (createError, table, createResponse) { assert.equal(createError, null); assert.notEqual(table, null); assert.ok(createResponse.isSuccessful); var entities = generateEntities(20); tableService.beginBatch(); entities.forEach(function (entity) { tableService.insertEntity(tableName, entity); }); tableService.commitBatch(function (batchError, performBatchOperationResponses, batchResponse) { assert.equal(batchError, null); assert.ok(batchResponse.isSuccessful); var tableQuery = TableQuery.select() .from(tableName) .where('address eq ?', entities[0].address) .and('RowKey eq ?', entities[0].RowKey.toString()); tableService.queryEntities(tableQuery, function (queryError, entries, entriesContinuation, queryResponse) { assert.equal(queryError, null); assert.notEqual(entries, null); assert.ok(queryResponse.isSuccessful); if (entries) { assert.equal(entries.length, 1); if (entries[0]) { assert.equal(entries[0].address, entities[0].address); assert.equal(entries[0].RowKey, entities[0].RowKey); assert.equal(entries[0].PartitionKey, entities[0].PartitionKey); } } done(); }); }); }); }); test('RetrieveEntities_TableQuery2', function (done) { var tableName = testutil.generateId(tablePrefix, tableNames, testUtil.isMocked); tableService.createTable(tableName, function (createError, table, createResponse) { assert.equal(createError, null); assert.notEqual(table, null); assert.ok(createResponse.isSuccessful); var entities = generateEntities(20); // Make sure the address for the first entity is different than the remaining entities entities.forEach(function (entity) { entity.address = 'other'; }); entities[0].address = 'unique'; tableService.beginBatch(); entities.forEach(function (entity) { tableService.insertEntity(tableName, entity); }); tableService.commitBatch(function (batchError, performBatchOperationResponses, batchResponse) { assert.equal(batchError, null); assert.notEqual(batchResponse, null); if (batchResponse) { assert.ok(batchResponse.isSuccessful); } var tableQuery = TableQuery.select() .from(tableName) .where('address eq ?', entities[0].address) .and('PartitionKey eq ?', entities[0].PartitionKey); tableService.queryEntities(tableQuery, function (queryError, entries, entriesContinuation, queryResponse) { assert.equal(queryError, null); assert.notEqual(entries, null); if (entries) { assert.equal(entries.length, 1); if (entries[0]) { assert.equal(entries[0].address, entities[0].address); assert.equal(entries[0].RowKey, entities[0].RowKey); assert.equal(entries[0].PartitionKey, entities[0].PartitionKey); } } assert.notEqual(queryResponse, null); if (queryResponse) { assert.ok(queryResponse.isSuccessful); } done(); }); }); }); }); test('RetrieveEntities_Top', function (done) { var tableName = testutil.generateId(tablePrefix, tableNames, testUtil.isMocked); tableService.createTable(tableName, function (createError, table, createResponse) { assert.equal(createError, null); assert.notEqual(table, null); assert.notEqual(createResponse, null); if (createResponse) { assert.ok(createResponse.isSuccessful); } var entities = generateEntities(20); tableService.beginBatch(); entities.forEach(function (entity) { tableService.insertEntity(tableName, entity); }); tableService.commitBatch(function (batchError, performBatchOperationResponses, batchResponse) { assert.equal(batchError, null); assert.notEqual(batchResponse, null); if (batchResponse) { assert.ok(batchResponse.isSuccessful); } var tableQuery = TableQuery.select() .from(tableName) .top(4); tableService.queryEntities(tableQuery, function (queryError, entries, entriesContinuation, queryResponse) { assert.equal(queryError, null); assert.notEqual(entries, null); if (entries) { assert.equal(entries.length, 4); } assert.notEqual(queryResponse, null); if (queryResponse) { assert.ok(queryResponse.isSuccessful); } done(); }); }); }); }); // TODO: fix /* test('FailBatch', function (done) { var tableName = testutil.generateId(tablePrefix, tableNames, testUtil.isMocked); tableService.createTable(tableName, function (createError, table, createResponse) { assert.equal(createError, null); assert.notEqual(table, null); assert.notEqual(createResponse, null); if (createResponse) { assert.ok(createResponse.isSuccessful); } var simpleEntity = { PartitionKey: 'part', RowKey: 1, MyField: 'value' }; tableService.beginBatch(); tableService.insertEntity(tableName, simpleEntity); // Doing an update on the same entity within the same batch should make the batch fail simpleEntity.MyField = 'othervalue'; tableService.updateEntity(tableName, simpleEntity); tableService.commitBatch(function (batchError, performBatchOperationResponses, batchResponse) { assert.equal(batchError, null); assert.notEqual(performBatchOperationResponses, null); assert.equal(performBatchOperationResponses.length, 1); assert.notEqual(performBatchOperationResponses[0].error, null); assert.equal(performBatchOperationResponses[0].error.code, Constants.StorageErrorCodeStrings.RESOURCE_NOT_FOUND); assert.notEqual(batchResponse, null); if (batchResponse) { assert.ok(batchResponse.isSuccessful); } done(); }); }); });*/ }); function generateEntities(count) { var entities = []; for(var i = 0 ; i < count ; i++) { var entity = { PartitionKey: 'partition1', RowKey: (i + 1).toString(), address: 'street' + (i + 1) }; entities.push(entity); } return entities; };
apache-2.0
harjup/WizardBroadcast
Assets/Vendor/JsonDotNet/Source/WinRT/RT_NullValueHandling.cs
2066
#if UNITY_WINRT && !UNITY_EDITOR && !UNITY_WP8 #region License // Copyright (c) 2007 James Newton-King // // Permission is hereby granted, free of charge, to any person // obtaining a copy of this software and associated documentation // files (the "Software"), to deal in the Software without // restriction, including without limitation the rights to use, // copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following // conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES // OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, // WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR // OTHER DEALINGS IN THE SOFTWARE. #endregion namespace Newtonsoft.Json { /// <summary> /// Specifies null value handling options for the <see cref="JsonSerializer"/>. /// </summary> /// <example> /// <code lang="cs" source="..\Src\Newtonsoft.Json.Tests\Documentation\SerializationTests.cs" region="ReducingSerializedJsonSizeNullValueHandlingObject" title="NullValueHandling Class" /> /// <code lang="cs" source="..\Src\Newtonsoft.Json.Tests\Documentation\SerializationTests.cs" region="ReducingSerializedJsonSizeNullValueHandlingExample" title="NullValueHandling Ignore Example" /> /// </example> public enum NullValueHandling { /// <summary> /// Include null values when serializing and deserializing objects. /// </summary> Include = 0, /// <summary> /// Ignore null values when serializing and deserializing objects. /// </summary> Ignore = 1 } } #endif
apache-2.0
gabelula/b-counted
.google_appengine/google/appengine/api/images/__init__.py
15334
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Image manipulation API. Classes defined in this module: Image: class used to encapsulate image information and transformations for that image. The current manipulations that are available are resize, rotate, horizontal_flip, vertical_flip, crop and im_feeling_lucky. It should be noted that each transform can only be called once per image per execute_transforms() call. """ from google.appengine.api import apiproxy_stub_map from google.appengine.api.images import images_service_pb from google.appengine.runtime import apiproxy_errors JPEG = images_service_pb.OutputSettings.JPEG PNG = images_service_pb.OutputSettings.PNG OUTPUT_ENCODING_TYPES = frozenset([JPEG, PNG]) class Error(Exception): """Base error class for this module.""" class TransformationError(Error): """Error while attempting to transform the image.""" class BadRequestError(Error): """The parameters given had something wrong with them.""" class NotImageError(Error): """The image data given is not recognizable as an image.""" class BadImageError(Error): """The image data given is corrupt.""" class LargeImageError(Error): """The image data given is too large to process.""" class Image(object): """Image object to manipulate.""" def __init__(self, image_data): """Constructor. Args: image_data: str, image data in string form. Raises: NotImageError if the given data is empty. """ if not image_data: raise NotImageError("Empty image data.") self._image_data = image_data self._transforms = [] self._transform_map = {} def _check_transform_limits(self, transform): """Ensure some simple limits on the number of transforms allowed. Args: transform: images_service_pb.ImagesServiceTransform, enum of the trasnform called. Raises: BadRequestError if the transform has already been requested for the image. """ if not images_service_pb.ImagesServiceTransform.Type_Name(transform): raise BadRequestError("'%s' is not a valid transform." % transform) if transform in self._transform_map: transform_name = images_service_pb.ImagesServiceTransform.Type_Name( transform) raise BadRequestError("A '%s' transform has already been " "requested on this image." % transform_name) self._transform_map[transform] = True def resize(self, width=0, height=0): """Resize the image maintaining the aspect ratio. If both width and height are specified, the more restricting of the two values will be used when resizing the photo. The maximum dimension allowed for both width and height is 4000 pixels. Args: width: int, width (in pixels) to change the image width to. height: int, height (in pixels) to change the image height to. Raises: TypeError when width or height is not either 'int' or 'long' types. BadRequestError when there is something wrong with the given height or width or if a Resize has already been requested on this image. """ if (not isinstance(width, (int, long)) or not isinstance(height, (int, long))): raise TypeError("Width and height must be integers.") if width < 0 or height < 0: raise BadRequestError("Width and height must be >= 0.") if not width and not height: raise BadRequestError("At least one of width or height must be > 0.") if width > 4000 or height > 4000: raise BadRequestError("Both width and height must be < 4000.") self._check_transform_limits( images_service_pb.ImagesServiceTransform.RESIZE) transform = images_service_pb.Transform() transform.set_width(width) transform.set_height(height) self._transforms.append(transform) def rotate(self, degrees): """Rotate an image a given number of degrees clockwise. Args: degrees: int, must be a multiple of 90. Raises: TypeError when degrees is not either 'int' or 'long' types. BadRequestError when there is something wrong with the given degrees or if a Rotate trasnform has already been requested. """ if not isinstance(degrees, (int, long)): raise TypeError("Degrees must be integers.") if degrees % 90 != 0: raise BadRequestError("degrees argument must be multiple of 90.") degrees = degrees % 360 self._check_transform_limits( images_service_pb.ImagesServiceTransform.ROTATE) transform = images_service_pb.Transform() transform.set_rotate(degrees) self._transforms.append(transform) def horizontal_flip(self): """Flip the image horizontally. Raises: BadRequestError if a HorizontalFlip has already been requested on the image. """ self._check_transform_limits( images_service_pb.ImagesServiceTransform.HORIZONTAL_FLIP) transform = images_service_pb.Transform() transform.set_horizontal_flip(True) self._transforms.append(transform) def vertical_flip(self): """Flip the image vertically. Raises: BadRequestError if a HorizontalFlip has already been requested on the image. """ self._check_transform_limits( images_service_pb.ImagesServiceTransform.VERTICAL_FLIP) transform = images_service_pb.Transform() transform.set_vertical_flip(True) self._transforms.append(transform) def _validate_crop_arg(self, val, val_name): """Validate the given value of a Crop() method argument. Args: val: float, value of the argument. val_name: str, name of the argument. Raises: TypeError if the args are not of type 'float'. BadRequestError when there is something wrong with the given bounding box. """ if type(val) != float: raise TypeError("arg '%s' must be of type 'float'." % val_name) if not (0 <= val <= 1.0): raise BadRequestError("arg '%s' must be between 0.0 and 1.0 " "(inclusive)" % val_name) def crop(self, left_x, top_y, right_x, bottom_y): """Crop the image. The four arguments are the scaling numbers to describe the bounding box which will crop the image. The upper left point of the bounding box will be at (left_x*image_width, top_y*image_height) the lower right point will be at (right_x*image_width, bottom_y*image_height). Args: left_x: float value between 0.0 and 1.0 (inclusive). top_y: float value between 0.0 and 1.0 (inclusive). right_x: float value between 0.0 and 1.0 (inclusive). bottom_y: float value between 0.0 and 1.0 (inclusive). Raises: TypeError if the args are not of type 'float'. BadRequestError when there is something wrong with the given bounding box or if there has already been a crop transform requested for this image. """ self._validate_crop_arg(left_x, "left_x") self._validate_crop_arg(top_y, "top_y") self._validate_crop_arg(right_x, "right_x") self._validate_crop_arg(bottom_y, "bottom_y") if left_x >= right_x: raise BadRequestError("left_x must be less than right_x") if top_y >= bottom_y: raise BadRequestError("top_y must be less than bottom_y") self._check_transform_limits(images_service_pb.ImagesServiceTransform.CROP) transform = images_service_pb.Transform() transform.set_crop_left_x(left_x) transform.set_crop_top_y(top_y) transform.set_crop_right_x(right_x) transform.set_crop_bottom_y(bottom_y) self._transforms.append(transform) def im_feeling_lucky(self): """Automatically adjust image contrast and color levels. This is similar to the "I'm Feeling Lucky" button in Picasa. Raises: BadRequestError if this transform has already been requested for this image. """ self._check_transform_limits( images_service_pb.ImagesServiceTransform.IM_FEELING_LUCKY) transform = images_service_pb.Transform() transform.set_autolevels(True) self._transforms.append(transform) def execute_transforms(self, output_encoding=PNG): """Perform transformations on given image. Args: output_encoding: A value from OUTPUT_ENCODING_TYPES. Returns: str, image data after the transformations have been performed on it. Raises: BadRequestError when there is something wrong with the request specifications. NotImageError when the image data given is not an image. BadImageError when the image data given is corrupt. LargeImageError when the image data given is too large to process. TransformtionError when something errors during image manipulation. Error when something unknown, but bad, happens. """ if output_encoding not in OUTPUT_ENCODING_TYPES: raise BadRequestError("Output encoding type not in recognized set " "%s" % OUTPUT_ENCODING_TYPES) if not self._transforms: raise BadRequestError("Must specify at least one transformation.") request = images_service_pb.ImagesTransformRequest() response = images_service_pb.ImagesTransformResponse() request.mutable_image().set_content(self._image_data) for transform in self._transforms: request.add_transform().CopyFrom(transform) request.mutable_output().set_mime_type(output_encoding) try: apiproxy_stub_map.MakeSyncCall("images", "Transform", request, response) except apiproxy_errors.ApplicationError, e: if (e.application_error == images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA): raise BadRequestError() elif (e.application_error == images_service_pb.ImagesServiceError.NOT_IMAGE): raise NotImageError() elif (e.application_error == images_service_pb.ImagesServiceError.BAD_IMAGE_DATA): raise BadImageError() elif (e.application_error == images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE): raise LargeImageError() elif (e.application_error == images_service_pb.ImagesServiceError.UNSPECIFIED_ERROR): raise TransformationError() else: raise Error() self._image_data = response.image().content() self._transforms = [] self._transform_map.clear() return self._image_data def resize(image_data, width=0, height=0, output_encoding=PNG): """Resize a given image file maintaining the aspect ratio. If both width and height are specified, the more restricting of the two values will be used when resizing the photo. The maximum dimension allowed for both width and height is 4000 pixels. Args: image_data: str, source image data. width: int, width (in pixels) to change the image width to. height: int, height (in pixels) to change the image height to. output_encoding: a value from OUTPUT_ENCODING_TYPES. Raises: TypeError when width or height not either 'int' or 'long' types. BadRequestError when there is something wrong with the given height or width or if a Resize has already been requested on this image. Error when something went wrong with the call. See Image.ExecuteTransforms for more details. """ image = Image(image_data) image.resize(width, height) return image.execute_transforms(output_encoding=output_encoding) def rotate(image_data, degrees, output_encoding=PNG): """Rotate a given image a given number of degrees clockwise. Args: image_data: str, source image data. degrees: value from ROTATE_DEGREE_VALUES. output_encoding: a value from OUTPUT_ENCODING_TYPES. Raises: TypeError when degrees is not either 'int' or 'long' types. BadRequestError when there is something wrong with the given degrees or if a Rotate trasnform has already been requested. Error when something went wrong with the call. See Image.ExecuteTransforms for more details. """ image = Image(image_data) image.rotate(degrees) return image.execute_transforms(output_encoding=output_encoding) def horizontal_flip(image_data, output_encoding=PNG): """Flip the image horizontally. Args: image_data: str, source image data. output_encoding: a value from OUTPUT_ENCODING_TYPES. Raises: Error when something went wrong with the call. See Image.ExecuteTransforms for more details. """ image = Image(image_data) image.horizontal_flip() return image.execute_transforms(output_encoding=output_encoding) def vertical_flip(image_data, output_encoding=PNG): """Flip the image vertically. Args: image_data: str, source image data. output_encoding: a value from OUTPUT_ENCODING_TYPES. Raises: Error when something went wrong with the call. See Image.ExecuteTransforms for more details. """ image = Image(image_data) image.vertical_flip() return image.execute_transforms(output_encoding=output_encoding) def crop(image_data, left_x, top_y, right_x, bottom_y, output_encoding=PNG): """Crop the given image. The four arguments are the scaling numbers to describe the bounding box which will crop the image. The upper left point of the bounding box will be at (left_x*image_width, top_y*image_height) the lower right point will be at (right_x*image_width, bottom_y*image_height). Args: image_data: str, source image data. left_x: float value between 0.0 and 1.0 (inclusive). top_y: float value between 0.0 and 1.0 (inclusive). right_x: float value between 0.0 and 1.0 (inclusive). bottom_y: float value between 0.0 and 1.0 (inclusive). output_encoding: a value from OUTPUT_ENCODING_TYPES. Raises: TypeError if the args are not of type 'float'. BadRequestError when there is something wrong with the given bounding box or if there has already been a crop transform requested for this image. Error when something went wrong with the call. See Image.ExecuteTransforms for more details. """ image = Image(image_data) image.crop(left_x, top_y, right_x, bottom_y) return image.execute_transforms(output_encoding=output_encoding) def im_feeling_lucky(image_data, output_encoding=PNG): """Automatically adjust image levels. This is similar to the "I'm Feeling Lucky" button in Picasa. Args: image_data: str, source image data. output_encoding: a value from OUTPUT_ENCODING_TYPES. Raises: Error when something went wrong with the call. See Image.ExecuteTransforms for more details. """ image = Image(image_data) image.im_feeling_lucky() return image.execute_transforms(output_encoding=output_encoding)
apache-2.0
enzovici/PoC-TruthChain
client/lib/js/plugins/ledger_utils.js
629
import BigchainDBLedgerPlugin from 'ilp-plugin-bigchaindb'; const connectToBigchainDBLedger = (account) => { const ledgerPlugin = new BigchainDBLedgerPlugin({ auth: { account: { id: account.vk, key: account.sk, uri: { api: `http://${account.ledger.api}`, ws: `ws://${account.ledger.ws}/users/${account.vk}` } } }, ledgerId: account.ledger.id }); ledgerPlugin.connect().catch(console.error); return ledgerPlugin; }; export default connectToBigchainDBLedger;
apache-2.0
andreeadanielachivu/PracticalTest02
proiect_colocviu/PracticalTest022/app/src/androidTest/java/ro/pub/cs/systems/pdsd/practicaltest02/ApplicationTest.java
381
package ro.pub.cs.systems.pdsd.practicaltest02; import android.app.Application; import android.test.ApplicationTestCase; /** * <a href="http://d.android.com/tools/testing/testing_android.html">Testing Fundamentals</a> */ public class ApplicationTest extends ApplicationTestCase<Application> { public ApplicationTest() { super(Application.class); } }
apache-2.0
neilconway/mesos
src/docker/executor.hpp
3476
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef __DOCKER_EXECUTOR_HPP__ #define __DOCKER_EXECUTOR_HPP__ #include <stdio.h> #include <map> #include <string> #include <process/process.hpp> #include <stout/option.hpp> #include "logging/flags.hpp" namespace mesos { namespace internal { namespace docker { struct Flags : public virtual mesos::internal::logging::Flags { Flags() { add(&Flags::container, "container", "The name of the docker container to run."); add(&Flags::docker, "docker", "The path to the docker executable."); add(&Flags::docker_socket, "docker_socket", "Resource used by the agent and the executor to provide CLI access\n" "to the Docker daemon. On Unix, this is typically a path to a\n" "socket, such as '/var/run/docker.sock'. On Windows this must be a\n" "named pipe, such as '//./pipe/docker_engine'."); add(&Flags::sandbox_directory, "sandbox_directory", "The path to the container sandbox holding stdout and stderr files\n" "into which docker container logs will be redirected."); add(&Flags::mapped_directory, "mapped_directory", "The sandbox directory path that is mapped in the docker container."); // TODO(alexr): Remove this after the deprecation cycle (started in 1.0). add(&Flags::stop_timeout, "stop_timeout", "The duration for docker to wait after stopping a running container\n" "before it kills that container. This flag is deprecated; use task's\n" "kill policy instead."); add(&Flags::launcher_dir, "launcher_dir", "Directory path of Mesos binaries. Mesos would find fetcher,\n" "containerizer and executor binary files under this directory."); add(&Flags::task_environment, "task_environment", "A JSON map of environment variables and values that should\n" "be passed into the task launched by this executor."); add(&Flags::cgroups_enable_cfs, "cgroups_enable_cfs", "Cgroups feature flag to enable hard limits on CPU resources\n" "via the CFS bandwidth limiting subfeature.\n", false); } Option<std::string> container; Option<std::string> docker; Option<std::string> docker_socket; Option<std::string> sandbox_directory; Option<std::string> mapped_directory; Option<std::string> launcher_dir; Option<std::string> task_environment; bool cgroups_enable_cfs; // TODO(alexr): Remove this after the deprecation cycle (started in 1.0). Option<Duration> stop_timeout; }; } // namespace docker { } // namespace internal { } // namespace mesos { #endif // __DOCKER_EXECUTOR_HPP__
apache-2.0
gzsombor/ranger
kms/src/main/java/org/apache/hadoop/crypto/key/VerifyIsDBMasterkeyCorrect.java
3064
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.crypto.key; import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.ranger.kms.dao.DaoManager; public class VerifyIsDBMasterkeyCorrect { RangerMasterKey rangerMasterKey; RangerKeyStore dbStore; DaoManager daoManager; public VerifyIsDBMasterkeyCorrect() throws Throwable { Configuration conf = RangerKeyStoreProvider.getDBKSConf(); RangerKMSDB rangerKMSDB = new RangerKMSDB(conf); daoManager = rangerKMSDB.getDaoManager(); dbStore = new RangerKeyStore(daoManager); } public static void main(String[] args) throws Throwable { if (args.length == 0) { System.err.println("Invalid number of parameters found."); System.exit(1); } try { String password = args[0]; if (password == null || password.trim().isEmpty()) { System.err.println("KMS Masterkey Password not provided."); System.exit(1); } new VerifyIsDBMasterkeyCorrect().verifyMasterkey(password); } catch (Exception e) { e.printStackTrace(); } } public void verifyMasterkey(String pass) { try { // Get Master Key from DB rangerMasterKey = new RangerMasterKey(daoManager); String masterKey = rangerMasterKey.getMasterKey(pass); if(masterKey == null){ // Master Key does not exists throw new IOException("Ranger MasterKey does not exists"); } dbStore.engineLoad(null, masterKey.toCharArray()); System.out.println("KMS keystore engine loaded successfully."); } catch (Throwable e) { throw new RuntimeException("Unable to load keystore engine with given password or Masterkey was tampered.", e); } } }
apache-2.0
KevinLoiseau/manageiq
app/helpers/toolbar_helper.rb
7349
module ToolbarHelper # Public interface # Render a list of buttons (toolbar) to html # # Called directly when updating toolbars in an existing page. # def buttons_to_html(buttons_in) groups = split_to_groups(Array(buttons_in)) groups.collect do |buttons| buttons = Array(buttons) # exceptional behavior for view toolbar view mode buttons view_buttons = buttons.first.present? && buttons.first[:name] =~ /^view_/ if buttons.first.kind_of?(ApplicationHelper::Toolbar::Custom) buttons.first.content else cls = view_buttons ? 'toolbar-pf-view-selector ' : '' cls += 'hidden ' unless buttons.find { |button| !button[:hidden] } content_tag(:div, :class => "#{cls} form-group") do # form-group aroung each toolbar section if view_buttons view_mode_buttons(buttons) else normal_toolbar_buttons(buttons) end end end end.join('').html_safe end # Render a set of whole toolbars # def render_toolbars(toolbars) toolbars.collect do |div_id, toolbar_name| content_tag(:div, :id => div_id) do # div for each toolbar buttons = toolbar_name ? build_toolbar(toolbar_name) : nil buttons_to_html(buttons) end end.join('').html_safe end # Internal stuff to generate html markup # Render a group of view toolbar buttons # def view_mode_buttons(buttons) content_tag(:ul, :class => 'list-inline') do buttons.collect do |button| toolbar_button_view(button) end.join('').html_safe end end # Render a group of normal toolbar buttons # def normal_toolbar_buttons(buttons) buttons.collect do |button| toolbar_top_button(button) end.join('').html_safe end # Split buttons to groups at separators # def split_to_groups(buttons) buttons.slice_before do |props| props.kind_of?(ApplicationHelper::Toolbar::Custom) || props[:type] == :separator || props[:name] == 'download_choice' # exceptional behavior for view toolbar download drop down end.to_a end # Render toolbar top button. # def toolbar_top_button(props) case props[:type] when :buttonSelect then toolbar_top_button_select(props) when :button then toolbar_top_button_normal(props) when :buttonTwoState then toolbar_top_button_normal(props) when :separator else raise 'Invalid top button type.' end end # Render image/icon to go on a toolbar button # def toolbar_image(props) if props[:icon].present? content_tag(:i, '', :class => props[:icon], :style => props[:text].present? ? 'margin-right: 5px;' : '') else img = ActionController::Base.helpers.image_path("toolbars/#{props[:img]}") imgdis = ActionController::Base.helpers.image_path("toolbars/#{props[:imgdis]}") tag(:img, :src => t = "#{img}", 'data-enabled' => t, 'data-disabled' => "#{imgdis}") end end # Render drop-down top button # def toolbar_top_button_select(props) cls = props[:hidden] ? 'hidden ' : '' content_tag(:div, :class => "#{cls}btn-group dropdown") do cls += 'disabled ' unless props[:enabled] out = [] out << content_tag(:button, prepare_tag_keys(props).update( :type => "button", :class => "#{cls}btn btn-default dropdown-toggle", 'data-toggle' => "dropdown", )) do (toolbar_image(props) + props.localized(:text) + "&nbsp;".html_safe + content_tag(:span, '', :class => "caret")).html_safe end out << content_tag(:ul, :class => 'dropdown-menu') do Array(props[:items]).collect do |button| toolbar_button(button) end.join('').html_safe end out.join('').html_safe end end # Render normal push top button # def toolbar_top_button_normal(props) cls = props[:hidden] ? 'hidden ' : '' cls += 'active ' if props[:selected] # for buttonTwoState only cls += 'disabled ' unless props[:enabled] content_tag(:button, prepare_tag_keys(props).update( :type => "button", :class => "#{cls}btn btn-default")) do (toolbar_image(props) + props.localized(:text) + "&nbsp;".html_safe).html_safe end end # Render child button (in the drop-down) # def toolbar_button(props) case props[:type] when :button then toolbar_button_normal(props) when :separator then toolbar_button_separator(props) else raise 'Invalid button type.' end end # Render separator in the drop down # def toolbar_button_separator(props) cls = props[:hidden] ? ' hidden' : '' content_tag(:div, '', :class => "divider #{cls}", :role => "presentation") end # Render normal push child button # def toolbar_button_normal(props) hidden = props[:hidden] cls = props[:enabled] ? '' : 'disabled ' content_tag(:li, :class => cls + (hidden ? 'hidden' : '')) do content_tag(:a, prepare_data_keys(props) .update(:href => '#') .update(prepare_tag_keys(props))) do (toolbar_image(props) + props.localized(:text).html_safe) end end end # Render normal/twostate view button # def toolbar_button_view(props) hidden = props[:hidden] cls = if props[:type] == :buttonTwoState props[:selected] ? 'active' : '' else props[:enabled] ? '' : 'disabled ' end content_tag(:li, :class => cls + (hidden ? 'hidden' : '')) do content_tag(:a, prepare_data_keys(props) .update(:href => '#') .update(prepare_tag_keys(props))) do (toolbar_image(props) + props.localized(:text).html_safe) end end end # Get keys and values from tb button definition that map 1:1 to data-* # attributes in html # def data_hash_keys(props) %i(pressed popup window_url prompt explorer onwhen url_parms url).each_with_object({}) do |key, h| h["data-#{key}"] = props[key] if props[key].present? end end # Calculate common html tag keys and values from toolbar button definition # def prepare_tag_keys(props) h = data_hash_keys(props) h.update('title' => props.localized(:title), 'id' => props[:id], 'data-click' => props[:id]) h['name'] = props[:name] if props.key?(:name) h['data-confirm-tb'] = props.localized(:confirm) if props.key?(:confirm) h end # Calculate 'data-*' tags for <a> tag from custom attributes in button # definition. # # These are added fists so that they cannot overwrite any data-* # tags needed for generic toolbar functionality. # def prepare_data_keys(props) Hash(props[:data]).each_with_object({}) { |(k, v), h| h["data-#{k}"] = v } end # Method for generating toolbar hash. # This method calls calculate_toolbars and build_toolbar(toolbar_name) # each button is pushed to array. def toolbar_from_hash calculate_toolbars.collect do |_div_id, toolbar_name| toolbar_name ? build_toolbar(toolbar_name) : nil end end end
apache-2.0
shuwnyuantee/binary-static
src/javascript/binary/websocket_pages/user/new_account/virtual_acc_opening.js
3928
pjax_config_page("new_account/virtualws", function(){ return { onLoad: function() { if (getCookieItem('login')) { window.location.href = page.url.url_for('user/my_accountws'); return; } Content.populate(); var virtualForm = $('#virtual-form'); handle_residence_state_ws(); BinarySocket.send({residence_list:1}); var form = document.getElementById('virtual-form'); var errorPassword = document.getElementById('error-password'), errorRPassword = document.getElementById('error-r-password'), errorResidence = document.getElementById('error-residence'), errorAccount = document.getElementById('error-account-opening'), errorVerificationCode = document.getElementById('error-verification-code'); if (isIE() === false) { $('#password').on('input', function() { $('#password-meter').attr('value', testPassword($('#password').val())[0]); }); } else { $('#password-meter').remove(); } if (form) { virtualForm.submit( function(evt) { evt.preventDefault(); var verificationCode = document.getElementById('verification-code').value, residence = document.getElementById('residence').value, password = document.getElementById('password').value, rPassword = document.getElementById('r-password').value; Validate.errorMessageResidence(residence, errorResidence); Validate.errorMessageToken(verificationCode, errorVerificationCode); Validate.hideErrorMessage(errorAccount); if (Validate.errorMessagePassword(password, rPassword, errorPassword, errorRPassword) && !Validate.errorMessageResidence(residence, errorResidence) && !Validate.errorMessageToken(verificationCode, errorVerificationCode)){ BinarySocket.init({ onmessage: function(msg){ var response = JSON.parse(msg.data); if (response) { var type = response.msg_type; var error = response.error; if (type === 'new_account_virtual' && !error) { page.client.set_cookie('residence', response.echo_req.residence); page.client.process_new_account( response.new_account_virtual.email, response.new_account_virtual.client_id, response.new_account_virtual.oauth_token, true); } else if (type === 'error' || error) { if (error.code === 'InvalidToken' || error.code === 'duplicate email') { virtualForm.empty(); $('.notice-message').remove(); var noticeText; if (error.code === 'InvalidToken') { noticeText = '<p>' + Content.localize().textClickHereToRestart.replace('[_1]', page.url.url_for('')) + '</p>'; } else if (error.code === 'duplicate email') { noticeText = '<p>' + Content.localize().textDuplicatedEmail.replace('[_1]', page.url.url_for('user/lost_passwordws')) + '</p>'; } virtualForm.html(noticeText); return; } else if (error.code === 'PasswordError') { errorAccount.textContent = text.localize('Password is not strong enough.'); } else if (error.message) { errorAccount.textContent = error.message; } Validate.displayErrorMessage(errorAccount); } } } }); VirtualAccOpeningData.getDetails(password, residence, verificationCode); } }); } } }; });
apache-2.0
kaazing/snmp4j
src/org/snmp4j/util/VariableTextFormat.java
3555
/*_############################################################################ _## _## SNMP4J - VariableTextFormat.java _## _## Copyright (C) 2003-2009 Frank Fock and Jochen Katz (SNMP4J.org) _## _## Licensed under the Apache License, Version 2.0 (the "License"); _## you may not use this file except in compliance with the License. _## You may obtain a copy of the License at _## _## http://www.apache.org/licenses/LICENSE-2.0 _## _## Unless required by applicable law or agreed to in writing, software _## distributed under the License is distributed on an "AS IS" BASIS, _## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. _## See the License for the specific language governing permissions and _## limitations under the License. _## _##########################################################################*/ package org.snmp4j.util; import java.text.ParseException; import org.snmp4j.smi.OID; import org.snmp4j.smi.Variable; import org.snmp4j.smi.VariableBinding; /** * The <code>VariableTextFormat</code> provides a textual representation * of SNMP {@link Variable}s, in dependence of their associated (instance) OID. * * @author Frank Fock * @version 1.10 * @since 1.10 */ public interface VariableTextFormat { /** * Returns a textual representation of the supplied variable against the * optionally supplied instance OID. * * @param instanceOID * the instance OID <code>variable</code> is associated with. * If <code>null</code> the formatting cannot take any MIB specification * of the variable into account and has to format it based on its type * only. * @param variable * the variable to format. * @param withOID * if <code>true</code> the <code>instanceOID</code> should be included * in the textual representation to form a {@link VariableBinding} * representation. * @return * the textual representation. */ String format(OID instanceOID, Variable variable, boolean withOID); /** * Parses a textual representation of a variable binding. * * @param text * a textual representation of the variable binding. * @return * the new <code>VariableBinding</code> instance. * @throws ParseException * if the variable binding cannot be parsed successfully. */ VariableBinding parseVariableBinding(String text) throws ParseException; /** * Parses a textual representation of a variable against its associated * OBJECT-TYPE OID. * * @param classOrInstanceOID * the instance OID <code>variable</code> is associated with. Must not * be <code>null</code>. * @param text * a textual representation of the variable. * @return * the new <code>Variable</code> instance. * @throws ParseException * if the variable cannot be parsed successfully. */ Variable parse(OID classOrInstanceOID, String text) throws ParseException; /** * Parses a textual representation of a variable against a SMI type. * @param smiSyntax * the SMI syntax identifier identifying the target <code>Variable</code>. * @param text * a textual representation of the variable. * @return * the new <code>Variable</code> instance. * @throws ParseException * if the variable cannot be parsed successfully. */ Variable parse(int smiSyntax, String text) throws ParseException; }
apache-2.0
hcvazquez/UFFOptimizer
src/transform/fileRestore.js
1158
"use strict"; var through = require('through2') var path = require("../model/utilpath.js"); module.exports = function (file) { if (file.endsWith('.js') && path.isInstrumentable(file)) { var fs = require('fs'); var fileOriginal = path.getBackupName(file); fs.unlink(file, function (err) { if (err) { console.log("ERROR removing file " + file + err); } fs.readFile(fileOriginal, 'utf8', function (err, data) { if (err) { return console.log("ERROR reading " + file); } fs.writeFile(file, data, function (err) { if (err) { return console.log("ERROR restoring " + file); } console.log("file restored: " + file); fs.unlink(fileOriginal, function (err) { if (err) { console.log("ERROR removing file " + fileOriginal + err); } }); }); }); }); } return through(); }
apache-2.0
cilium/cilium
api/v1/server/restapi/prefilter/delete_prefilter.go
1796
// Code generated by go-swagger; DO NOT EDIT. // Copyright 2017-2022 Authors of Cilium // SPDX-License-Identifier: Apache-2.0 package prefilter // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the generate command import ( "net/http" "github.com/go-openapi/runtime/middleware" ) // DeletePrefilterHandlerFunc turns a function with the right signature into a delete prefilter handler type DeletePrefilterHandlerFunc func(DeletePrefilterParams) middleware.Responder // Handle executing the request and returning a response func (fn DeletePrefilterHandlerFunc) Handle(params DeletePrefilterParams) middleware.Responder { return fn(params) } // DeletePrefilterHandler interface for that can handle valid delete prefilter params type DeletePrefilterHandler interface { Handle(DeletePrefilterParams) middleware.Responder } // NewDeletePrefilter creates a new http.Handler for the delete prefilter operation func NewDeletePrefilter(ctx *middleware.Context, handler DeletePrefilterHandler) *DeletePrefilter { return &DeletePrefilter{Context: ctx, Handler: handler} } /*DeletePrefilter swagger:route DELETE /prefilter prefilter deletePrefilter Delete list of CIDRs */ type DeletePrefilter struct { Context *middleware.Context Handler DeletePrefilterHandler } func (o *DeletePrefilter) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { r = rCtx } var Params = NewDeletePrefilterParams() if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request o.Context.Respond(rw, r, route.Produces, route, res) }
apache-2.0
debovis/webanno
webanno-brat/src/main/java/de/tudarmstadt/ukp/clarin/webanno/brat/curation/AnnotationOption.java
1732
/******************************************************************************* * Copyright 2012 * Ubiquitous Knowledge Processing (UKP) Lab and FG Language Technology * Technische Universität Darmstadt * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package de.tudarmstadt.ukp.clarin.webanno.brat.curation; import java.io.Serializable; import java.util.LinkedList; import java.util.List; /** * This class comprises instances of {@link AnnotationSelection}, each representing * a possible choice of disagreeing annotation sets. * * @author Andreas Straninger */ public class AnnotationOption implements Serializable { /** * */ private static final long serialVersionUID = -688656645133996937L; private List<AnnotationSelection> annotationSelections = new LinkedList<AnnotationSelection>(); public List<AnnotationSelection> getAnnotationSelections() { return annotationSelections; } public void setAnnotationSelections( List<AnnotationSelection> annotationSelections) { this.annotationSelections = annotationSelections; } @Override public String toString() { return annotationSelections.toString(); } }
apache-2.0
linux-on-ibm-z/kubernetes
test/e2e/storage/testsuites/snapshottable.go
17358
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package testsuites import ( "context" "fmt" "time" "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" ) // snapshot CRD api group const snapshotGroup = "snapshot.storage.k8s.io" // snapshot CRD api version const snapshotAPIVersion = "snapshot.storage.k8s.io/v1beta1" var ( // SnapshotGVR is GroupVersionResource for volumesnapshots SnapshotGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1beta1", Resource: "volumesnapshots"} // SnapshotClassGVR is GroupVersionResource for volumesnapshotclasses SnapshotClassGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1beta1", Resource: "volumesnapshotclasses"} // SnapshotContentGVR is GroupVersionResource for volumesnapshotcontents SnapshotContentGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1beta1", Resource: "volumesnapshotcontents"} ) type snapshottableTestSuite struct { tsInfo TestSuiteInfo } var _ TestSuite = &snapshottableTestSuite{} var ( sDriver SnapshottableTestDriver dDriver DynamicPVTestDriver ) // InitSnapshottableTestSuite returns snapshottableTestSuite that implements TestSuite interface func InitSnapshottableTestSuite() TestSuite { return &snapshottableTestSuite{ tsInfo: TestSuiteInfo{ Name: "snapshottable", TestPatterns: []testpatterns.TestPattern{ testpatterns.DynamicSnapshot, }, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", }, }, } } func (s *snapshottableTestSuite) GetTestSuiteInfo() TestSuiteInfo { return s.tsInfo } func (s *snapshottableTestSuite) SkipRedundantSuite(driver TestDriver, pattern testpatterns.TestPattern) { } func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) { ginkgo.BeforeEach(func() { // Check preconditions. framework.ExpectEqual(pattern.SnapshotType, testpatterns.DynamicCreatedSnapshot) dInfo := driver.GetDriverInfo() ok := false sDriver, ok = driver.(SnapshottableTestDriver) if !dInfo.Capabilities[CapSnapshotDataSource] || !ok { e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name) } dDriver, ok = driver.(DynamicPVTestDriver) if !ok { e2eskipper.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name) } }) // This intentionally comes after checking the preconditions because it // registers its own BeforeEach which creates the namespace. Beware that it // also registers an AfterEach which renders f unusable. Any code using // f must run inside an It or Context callback. f := framework.NewDefaultFramework("snapshotting") init := func(l *snapshottableLocal) { l.cs = f.ClientSet l.dc = f.DynamicClient // Now do the more expensive test initialization. config, driverCleanup := driver.PrepareTest(f) l.config = config l.driverCleanup = driverCleanup l.sc = dDriver.GetDynamicProvisionStorageClass(config, "") if l.sc == nil { framework.Failf("This driver should support dynamic provisioning") } testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange) framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange) l.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: claimSize, StorageClassName: &(l.sc.Name), }, config.Framework.Namespace.Name) framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc) ginkgo.By("creating a StorageClass " + l.sc.Name) l.sc, err = l.cs.StorageV1().StorageClasses().Create(context.TODO(), l.sc, metav1.CreateOptions{}) framework.ExpectNoError(err) l.cleanupSteps = append(l.cleanupSteps, func() { framework.Logf("deleting storage class %s", l.sc.Name) framework.ExpectNoError(l.cs.StorageV1().StorageClasses().Delete(context.TODO(), l.sc.Name, metav1.DeleteOptions{})) }) ginkgo.By("creating a claim") l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Create(context.TODO(), l.pvc, metav1.CreateOptions{}) framework.ExpectNoError(err) l.cleanupSteps = append(l.cleanupSteps, func() { framework.Logf("deleting claim %q/%q", l.pvc.Namespace, l.pvc.Name) // typically this claim has already been deleted err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Delete(context.TODO(), l.pvc.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", l.pvc.Name, err) } }) ginkgo.By("starting a pod to use the claim") command := "echo 'hello world' > /mnt/test/data" l.pod = StartInPodWithVolume(l.cs, l.pvc.Namespace, l.pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection) l.cleanupSteps = append(l.cleanupSteps, func() { StopPod(l.cs, l.pod) }) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) ginkgo.By("checking the claim") // Get new copy of the claim l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(context.TODO(), l.pvc.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // Get the bound PV _, err = l.cs.CoreV1().PersistentVolumes().Get(context.TODO(), l.pvc.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) } cleanup := func(l *snapshottableLocal) { // Depending on how far the test executed, cleanup accordingly // Execute in reverse order, similar to defer stack for i := len(l.cleanupSteps) - 1; i >= 0; i-- { err := tryFunc(l.cleanupSteps[i]) framework.ExpectNoError(err, "while running cleanup steps") } // All tests will require these driver cleanup tests err := tryFunc(l.driverCleanup) l.driverCleanup = nil framework.ExpectNoError(err, "while cleaning up driver") } ginkgo.It("should create snapshot with delete policy [Feature:VolumeSnapshotDataSource]", func() { l := &snapshottableLocal{} init(l) defer cleanup(l) TestSnapshottable(l, SnapshotClassTest{ DeletionPolicy: "Delete", }) TestSnapshotDeleted(l, SnapshotClassTest{ DeletionPolicy: "Delete", }) }) ginkgo.It("should not delete snapshot with retain policy [Feature:VolumeSnapshotDataSource]", func() { l := &snapshottableLocal{} init(l) defer cleanup(l) TestSnapshottable(l, SnapshotClassTest{ DeletionPolicy: "Retain", }) TestSnapshotDeleted(l, SnapshotClassTest{ DeletionPolicy: "Retain", }) }) } // snapshottableLocal is used to keep the current state of a snapshottable // test, associated objects, and cleanup steps. type snapshottableLocal struct { config *PerTestConfig driverCleanup func() cleanupSteps []func() cs clientset.Interface dc dynamic.Interface pvc *v1.PersistentVolumeClaim sc *storagev1.StorageClass pod *v1.Pod vsc *unstructured.Unstructured vs *unstructured.Unstructured vscontent *unstructured.Unstructured } // SnapshotClassTest represents parameters to be used by snapshot tests. // Not all parameters are used by all tests. type SnapshotClassTest struct { DeletionPolicy string } // TestSnapshottable tests volume snapshots based on a given SnapshotClassTest func TestSnapshottable(l *snapshottableLocal, sct SnapshotClassTest) { var err error ginkgo.By("creating a SnapshotClass") l.vsc = sDriver.GetSnapshotClass(l.config) if l.vsc == nil { framework.Failf("Failed to get snapshot class based on test config") } l.vsc.Object["deletionPolicy"] = sct.DeletionPolicy l.vsc, err = l.dc.Resource(SnapshotClassGVR).Create(context.TODO(), l.vsc, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting SnapshotClass %s", l.vsc.GetName()) l.dc.Resource(SnapshotClassGVR).Delete(context.TODO(), l.vsc.GetName(), metav1.DeleteOptions{}) }() l.vsc, err = l.dc.Resource(SnapshotClassGVR).Namespace(l.vsc.GetNamespace()).Get(context.TODO(), l.vsc.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("creating a snapshot") l.vs = getSnapshot(l.pvc.Name, l.pvc.Namespace, l.vsc.GetName()) l.vs, err = l.dc.Resource(SnapshotGVR).Namespace(l.vs.GetNamespace()).Create(context.TODO(), l.vs, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting snapshot %q/%q", l.vs.GetNamespace(), l.vs.GetName()) // typically this snapshot has already been deleted err = l.dc.Resource(SnapshotGVR).Namespace(l.vs.GetNamespace()).Delete(context.TODO(), l.vs.GetName(), metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting snapshot %q. Error: %v", l.pvc.Name, err) } }() err = WaitForSnapshotReady(l.dc, l.vs.GetNamespace(), l.vs.GetName(), framework.Poll, framework.SnapshotCreateTimeout) framework.ExpectNoError(err) ginkgo.By("checking the snapshot") // Get new copy of the snapshot l.vs, err = l.dc.Resource(SnapshotGVR).Namespace(l.vs.GetNamespace()).Get(context.TODO(), l.vs.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) // Get the bound snapshotContent snapshotStatus := l.vs.Object["status"].(map[string]interface{}) snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) l.vscontent, err = l.dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) framework.ExpectNoError(err) snapshotContentSpec := l.vscontent.Object["spec"].(map[string]interface{}) volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{}) // Check SnapshotContent properties ginkgo.By("checking the SnapshotContent") framework.ExpectEqual(snapshotContentSpec["volumeSnapshotClassName"], l.vsc.GetName()) framework.ExpectEqual(volumeSnapshotRef["name"], l.vs.GetName()) framework.ExpectEqual(volumeSnapshotRef["namespace"], l.vs.GetNamespace()) } // TestSnapshotDeleted tests the results of deleting a VolumeSnapshot // depending on the deletion policy currently set. func TestSnapshotDeleted(l *snapshottableLocal, sct SnapshotClassTest) { var err error ginkgo.By("creating a SnapshotClass") l.vsc = sDriver.GetSnapshotClass(l.config) if l.vsc == nil { framework.Failf("Failed to get snapshot class based on test config") } l.vsc.Object["deletionPolicy"] = sct.DeletionPolicy l.vsc, err = l.dc.Resource(SnapshotClassGVR).Create(context.TODO(), l.vsc, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting SnapshotClass %s", l.vsc.GetName()) l.dc.Resource(SnapshotClassGVR).Delete(context.TODO(), l.vsc.GetName(), metav1.DeleteOptions{}) }() l.vsc, err = l.dc.Resource(SnapshotClassGVR).Namespace(l.vsc.GetNamespace()).Get(context.TODO(), l.vsc.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("creating a snapshot to delete") l.vs = getSnapshot(l.pvc.Name, l.pvc.Namespace, l.vsc.GetName()) l.vs, err = l.dc.Resource(SnapshotGVR).Namespace(l.vs.GetNamespace()).Create(context.TODO(), l.vs, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting snapshot %q/%q", l.vs.GetNamespace(), l.vs.GetName()) // typically this snapshot has already been deleted err = l.dc.Resource(SnapshotGVR).Namespace(l.vs.GetNamespace()).Delete(context.TODO(), l.vs.GetName(), metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting snapshot %q. Error: %v", l.pvc.Name, err) } }() err = WaitForSnapshotReady(l.dc, l.vs.GetNamespace(), l.vs.GetName(), framework.Poll, framework.SnapshotCreateTimeout) framework.ExpectNoError(err) ginkgo.By("get the snapshot to delete") l.vs, err = l.dc.Resource(SnapshotGVR).Namespace(l.vs.GetNamespace()).Get(context.TODO(), l.vs.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) snapshotStatus := l.vs.Object["status"].(map[string]interface{}) snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) framework.Logf("received snapshotStatus %v", snapshotStatus) framework.Logf("snapshotContentName %s", snapshotContentName) l.vscontent, err = l.dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("deleting the snapshot") err = l.dc.Resource(SnapshotGVR).Namespace(l.vs.GetNamespace()).Delete(context.TODO(), l.vs.GetName(), metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting snapshot %s in namespace %s. Error: %v", l.vs.GetName(), l.vs.GetNamespace(), err) } ginkgo.By("checking the Snapshot has been deleted") err = utils.WaitForGVRDeletion(l.dc, SnapshotGVR, l.sc.Name, framework.Poll, framework.SnapshotDeleteTimeout) framework.ExpectNoError(err) if sct.DeletionPolicy == "Delete" { ginkgo.By("checking the SnapshotContent has been deleted") err = utils.WaitForGVRDeletion(l.dc, SnapshotContentGVR, snapshotContentName, framework.Poll, framework.SnapshotDeleteTimeout) framework.ExpectNoError(err) } else if sct.DeletionPolicy == "Retain" { ginkgo.By("checking the SnapshotContent has not been deleted") err = utils.WaitForGVRDeletion(l.dc, SnapshotContentGVR, snapshotContentName, 1*time.Second /* poll */, 30*time.Second /* timeout */) framework.ExpectError(err) // should fail deletion check // The purpose of this block is to prevent physical snapshotContent leaks. // We must update the SnapshotContent to have Delete Deletion policy, // or else the physical snapshot content will be leaked. ginkgo.By("get the latest copy of volume snapshot content") snapshotContent, err := l.dc.Resource(SnapshotContentGVR).Get(context.TODO(), l.vscontent.GetName(), metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("updating the the SnapshotContent to have Delete Deletion policy") snapshotContent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete" l.vscontent, err = l.dc.Resource(SnapshotContentGVR).Update(context.TODO(), snapshotContent, metav1.UpdateOptions{}) framework.ExpectNoError(err) ginkgo.By("manually deleting the SnapshotContent") err = l.dc.Resource(SnapshotContentGVR).Delete(context.TODO(), snapshotContent.GetName(), metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting snapshot content %q. Error: %v", snapshotContent.GetName(), err) } ginkgo.By("checking the SnapshotContent has been deleted") err = utils.WaitForGVRDeletion(l.dc, SnapshotContentGVR, snapshotContentName, framework.Poll, framework.SnapshotDeleteTimeout) framework.ExpectNoError(err) } else { framework.Failf("Invalid test config. DeletionPolicy should be either Delete or Retain. DeletionPolicy: %v", sct.DeletionPolicy) } } // WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first. func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error { framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName) if successful := utils.WaitUntil(poll, timeout, func() bool { snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{}) if err != nil { framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err) return false } status := snapshot.Object["status"] if status == nil { framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName) return false } value := status.(map[string]interface{}) if value["readyToUse"] == true { framework.Logf("VolumeSnapshot %s found and is ready", snapshotName) return true } framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName) return false }); successful { return nil } return fmt.Errorf("VolumeSnapshot %s is not ready within %v", snapshotName, timeout) }
apache-2.0
zaqwes8811/matlab_ext
img_video/opencv_tut.py
1275
#!/usr/bin/env python # coding: utf-8 import time import numpy as np import cv2 cap = cv2.VideoCapture('../Documents/sample.avi') rois = [] frame_counter = 0 while cap.isOpened(): ret, frame = cap.read() #if not ret: # break frame_counter += 1 #If the last frame is reached, reset the capture and the frame_counter if frame_counter == cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT): frame_counter = 0 #Or whatever as long as it is the same as next line cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, 0) gray = cv2.cvtColor( frame, cv2.COLOR_BGR2GRAY ) # add rects #print img.shape # fixme: присваивание очень медленное! numpy img = gray / 10 # fixme: как бы обернуть то #print 0:20 # numpy.take(...) img[ 0:20, 1:10 ] = 220 # break # draw cv2.circle( img, (100,150), 10, 255 ) font = cv2.FONT_HERSHEY_SIMPLEX text = 'marker:' + str( frame_counter ) cv2.putText( img, text, (5,200), font, 0.4, (225),1 ) # processing blur = img#cv2.GaussianBlur( img,(5,5), 0 ) cv2.imshow( 'frame', blur ) # Нужно жать на окне if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
apache-2.0
microsoft/TypeScript
tests/baselines/reference/gettersAndSetters.js
2141
//// [gettersAndSetters.ts] // classes class C { public fooBack = ""; static barBack:string = ""; public bazBack = ""; public get Foo() { return this.fooBack;} // ok public set Foo(foo:string) {this.fooBack = foo;} // ok static get Bar() {return C.barBack;} // ok static set Bar(bar:string) {C.barBack = bar;} // ok public get = function() {} // ok public set = function() {} // ok } var c = new C(); var foo = c.Foo; c.Foo = "foov"; var bar = C.Bar; C.Bar = "barv"; var baz = c.Baz; c.Baz = "bazv"; // The Foo accessors' return and param types should be contextually typed to the Foo field var o : {Foo:number;} = {get Foo() {return 0;}, set Foo(val:number){val}}; // o var ofg = o.Foo; o.Foo = 0; interface I1 { (n:number):number; } var i:I1 = function (n) {return n;} // Repro from #45006 const x: string | number = Math.random() < 0.5 ? "str" : 123; if (typeof x === "string") { let obj = { set prop(_: any) { x.toUpperCase(); }, get prop() { return x.toUpperCase() }, method() { return x.toUpperCase() } } } //// [gettersAndSetters.js] // classes class C { constructor() { this.fooBack = ""; this.bazBack = ""; this.get = function () { }; // ok this.set = function () { }; // ok } get Foo() { return this.fooBack; } // ok set Foo(foo) { this.fooBack = foo; } // ok static get Bar() { return C.barBack; } // ok static set Bar(bar) { C.barBack = bar; } // ok } C.barBack = ""; var c = new C(); var foo = c.Foo; c.Foo = "foov"; var bar = C.Bar; C.Bar = "barv"; var baz = c.Baz; c.Baz = "bazv"; // The Foo accessors' return and param types should be contextually typed to the Foo field var o = { get Foo() { return 0; }, set Foo(val) { val; } }; // o var ofg = o.Foo; o.Foo = 0; var i = function (n) { return n; }; // Repro from #45006 const x = Math.random() < 0.5 ? "str" : 123; if (typeof x === "string") { let obj = { set prop(_) { x.toUpperCase(); }, get prop() { return x.toUpperCase(); }, method() { return x.toUpperCase(); } }; }
apache-2.0
begoldsm/azure-sdk-for-node
lib/services/serviceFabric/lib/models/deployedApplicationsHealthEvaluation.js
2672
/* * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for * license information. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is * regenerated. */ 'use strict'; const models = require('./index'); /** * @class * Initializes a new instance of the DeployedApplicationsHealthEvaluation class. * @constructor * The evaluation of the deployed applications health * * @member {array} [unhealthyEvaluations] * * @member {number} [totalCount] * * @member {number} [maxPercentUnhealthyDeployedApplications] * */ class DeployedApplicationsHealthEvaluation extends models['HealthEvaluation'] { constructor() { super(); } /** * Defines the metadata of DeployedApplicationsHealthEvaluation * * @returns {object} metadata of DeployedApplicationsHealthEvaluation * */ mapper() { return { required: false, serializedName: 'DeployedApplications', type: { name: 'Composite', className: 'DeployedApplicationsHealthEvaluation', modelProperties: { description: { required: false, serializedName: 'Description', type: { name: 'String' } }, aggregatedHealthState: { required: false, serializedName: 'AggregatedHealthState', type: { name: 'String' } }, kind: { required: true, serializedName: 'Kind', type: { name: 'String' } }, unhealthyEvaluations: { required: false, serializedName: 'UnhealthyEvaluations', type: { name: 'Sequence', element: { required: false, serializedName: 'UnhealthyEvaluationElementType', type: { name: 'Composite', className: 'UnhealthyEvaluation' } } } }, totalCount: { required: false, serializedName: 'TotalCount', type: { name: 'Number' } }, maxPercentUnhealthyDeployedApplications: { required: false, serializedName: 'MaxPercentUnhealthyDeployedApplications', type: { name: 'Number' } } } } }; } } module.exports = DeployedApplicationsHealthEvaluation;
apache-2.0
catalintrif/infotranspub-backend
modules/onebusaway-gtfs/src/main/java/org/onebusaway/gtfs/model/AgencyAndIdInstance.java
2362
/** * Copyright (C) 2011 Brian Ferris <bdferris@onebusaway.org> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.onebusaway.gtfs.model; import java.io.Serializable; import org.onebusaway.gtfs.model.calendar.ServiceDate; /** * An identifier class that combines a {@link AgencyAndId} id with a service * date. See {@link ServiceDate} for more details of the service date idea. * * @author bdferris * @see AgencyAndId * @see ServiceDate */ public class AgencyAndIdInstance implements Serializable, Comparable<AgencyAndIdInstance> { private static final long serialVersionUID = 1L; private final AgencyAndId id; private final long serviceDate; public AgencyAndIdInstance(AgencyAndId id, long serviceDate) { if (id == null) throw new IllegalArgumentException("id cannot be null"); this.id = id; this.serviceDate = serviceDate; } public AgencyAndId getId() { return id; } public long getServiceDate() { return serviceDate; } @Override public int compareTo(AgencyAndIdInstance o) { int c = this.id.compareTo(o.id); if (c == 0) c = this.serviceDate == o.serviceDate ? 0 : (this.serviceDate < o.serviceDate ? -1 : 1); return c; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + id.hashCode(); result = prime * result + (int) (serviceDate ^ (serviceDate >>> 32)); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; AgencyAndIdInstance other = (AgencyAndIdInstance) obj; if (!id.equals(other.id)) return false; if (serviceDate != other.serviceDate) return false; return true; } }
apache-2.0
arangodb/arangodb
tests/Cache/TransactionManager.cpp
3222
//////////////////////////////////////////////////////////////////////////////// /// DISCLAIMER /// /// Copyright 2014-2020 ArangoDB GmbH, Cologne, Germany /// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// http://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. /// /// Copyright holder is ArangoDB GmbH, Cologne, Germany /// /// @author Dan Larkin-York /// @author Copyright 2017, ArangoDB GmbH, Cologne, Germany //////////////////////////////////////////////////////////////////////////////// #include "gtest/gtest.h" #include <cstdint> #include "Cache/Transaction.h" #include "Cache/TransactionManager.h" using namespace arangodb::cache; TEST(CacheTransactionalManagerTest, verify_that_transaction_term_is_maintained_correctly) { TransactionManager transactions; Transaction* tx1; Transaction* tx2; Transaction* tx3; ASSERT_EQ(0ULL, transactions.term()); tx1 = transactions.begin(false); ASSERT_EQ(1ULL, transactions.term()); transactions.end(tx1); ASSERT_EQ(2ULL, transactions.term()); tx1 = transactions.begin(false); ASSERT_EQ(3ULL, transactions.term()); tx2 = transactions.begin(false); ASSERT_EQ(3ULL, transactions.term()); transactions.end(tx1); ASSERT_EQ(3ULL, transactions.term()); transactions.end(tx2); ASSERT_EQ(4ULL, transactions.term()); tx1 = transactions.begin(true); ASSERT_EQ(4ULL, transactions.term()); tx2 = transactions.begin(false); ASSERT_EQ(5ULL, transactions.term()); transactions.end(tx2); ASSERT_EQ(5ULL, transactions.term()); transactions.end(tx1); ASSERT_EQ(6ULL, transactions.term()); tx1 = transactions.begin(true); ASSERT_EQ(6ULL, transactions.term()); tx2 = transactions.begin(false); ASSERT_EQ(7ULL, transactions.term()); transactions.end(tx2); ASSERT_EQ(7ULL, transactions.term()); tx3 = transactions.begin(true); ASSERT_EQ(7ULL, transactions.term()); transactions.end(tx1); ASSERT_EQ(8ULL, transactions.term()); transactions.end(tx3); ASSERT_EQ(8ULL, transactions.term()); tx1 = transactions.begin(true); ASSERT_EQ(8ULL, transactions.term()); tx2 = transactions.begin(false); ASSERT_EQ(9ULL, transactions.term()); transactions.end(tx2); ASSERT_EQ(9ULL, transactions.term()); tx3 = transactions.begin(true); ASSERT_EQ(9ULL, transactions.term()); transactions.end(tx3); ASSERT_EQ(9ULL, transactions.term()); tx2 = transactions.begin(false); ASSERT_EQ(9ULL, transactions.term()); tx3 = transactions.begin(false); ASSERT_EQ(9ULL, transactions.term()); transactions.end(tx3); ASSERT_EQ(9ULL, transactions.term()); transactions.end(tx2); ASSERT_EQ(9ULL, transactions.term()); transactions.end(tx1); ASSERT_EQ(10ULL, transactions.term()); }
apache-2.0
petedavis/MassTransit
src/MassTransit.Reactive/ServiceBusExtensions.cs
3084
// Copyright 2007-2008 The Apache Software Foundation. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. namespace MassTransit.Reactive { using System; using System.Reactive; using System.Reactive.Linq; public static class ServiceBusExtensions { /// <summary> /// <para>Gets an observer that publishes messages to all subscribed consumers for /// the message type as specified by the generic parameter.</para> /// /// <para> /// Read up on publishing: http://readthedocs.org/docs/masstransit/en/latest/overview/publishing.html /// </para> /// </summary> /// <typeparam name = "T">The type of the message</typeparam> /// <param name = "bus">The message bus</param> public static IObserver<T> AsObserver<T> (this IServiceBus bus) where T : class { return bus.AsObserver<T> ( contextCallback => { }); } /// <summary> /// <para>Gets an observer that publishes messages to all subscribed consumers for /// the message type as specified by the generic parameter. The second parameter /// allows the caller to customize the outgoing publish context and set things /// like headers on the message.</para> /// /// <para> /// Read up on publishing: http://readthedocs.org/docs/masstransit/en/latest/overview/publishing.html /// </para> /// </summary> /// <typeparam name = "T">The type of the message</typeparam> /// <param name = "bus">The message bus</param> /// <param name = "contextCallback">A callback that gives the caller /// access to the publish context.</param> public static IObserver<T> AsObserver<T> (this IServiceBus bus, Action<IPublishContext<T>> contextCallback) where T : class { return Observer.Create<T> ( value => bus.Publish<T> (value, contextCallback)); } public static IObservable<T> AsObservable<T>(this IServiceBus bus) where T : class { return Observable.Create<T>( observer => new ServiceBusSubscription<T>(bus, observer, null)); } public static IObservable<T> AsObservable<T>(this IServiceBus bus, Predicate<T> condition) where T : class { return Observable.Create<T>( observer => new ServiceBusSubscription<T>(bus, observer, condition)); } } }
apache-2.0
huhuang03/ThAnLib
views/PullToRefresh/app/src/main/java/base/tonghu/com/pulltorefresh/MainActivity.java
342
package base.tonghu.com.pulltorefresh; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; public class MainActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); } }
apache-2.0
mehdi149/OF_COMPILER
gen-src/main/java/org/projectfloodlight/openflow/protocol/ver15/OFAsyncConfigPropFlowStatsMasterVer15.java
8121
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University // Copyright (c) 2011, 2012 Open Networking Foundation // Copyright (c) 2012, 2013 Big Switch Networks, Inc. // This library was generated by the LoxiGen Compiler. // See the file LICENSE.txt which should have been included in the source distribution // Automatically generated by LOXI from template of_class.java // Do not modify package org.projectfloodlight.openflow.protocol.ver15; import org.projectfloodlight.openflow.protocol.*; import org.projectfloodlight.openflow.protocol.action.*; import org.projectfloodlight.openflow.protocol.actionid.*; import org.projectfloodlight.openflow.protocol.bsntlv.*; import org.projectfloodlight.openflow.protocol.errormsg.*; import org.projectfloodlight.openflow.protocol.meterband.*; import org.projectfloodlight.openflow.protocol.instruction.*; import org.projectfloodlight.openflow.protocol.instructionid.*; import org.projectfloodlight.openflow.protocol.match.*; import org.projectfloodlight.openflow.protocol.stat.*; import org.projectfloodlight.openflow.protocol.oxm.*; import org.projectfloodlight.openflow.protocol.oxs.*; import org.projectfloodlight.openflow.protocol.queueprop.*; import org.projectfloodlight.openflow.types.*; import org.projectfloodlight.openflow.util.*; import org.projectfloodlight.openflow.exceptions.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Set; import io.netty.buffer.ByteBuf; import com.google.common.hash.PrimitiveSink; import com.google.common.hash.Funnel; class OFAsyncConfigPropFlowStatsMasterVer15 implements OFAsyncConfigPropFlowStatsMaster { private static final Logger logger = LoggerFactory.getLogger(OFAsyncConfigPropFlowStatsMasterVer15.class); // version: 1.5 final static byte WIRE_VERSION = 6; final static int LENGTH = 8; private final static long DEFAULT_MASK = 0x0L; // OF message fields private final long mask; // // Immutable default instance final static OFAsyncConfigPropFlowStatsMasterVer15 DEFAULT = new OFAsyncConfigPropFlowStatsMasterVer15( DEFAULT_MASK ); // package private constructor - used by readers, builders, and factory OFAsyncConfigPropFlowStatsMasterVer15(long mask) { this.mask = mask; } // Accessors for OF message fields @Override public int getType() { return 0xd; } @Override public long getMask() { return mask; } @Override public OFVersion getVersion() { return OFVersion.OF_15; } public OFAsyncConfigPropFlowStatsMaster.Builder createBuilder() { return new BuilderWithParent(this); } static class BuilderWithParent implements OFAsyncConfigPropFlowStatsMaster.Builder { final OFAsyncConfigPropFlowStatsMasterVer15 parentMessage; // OF message fields private boolean maskSet; private long mask; BuilderWithParent(OFAsyncConfigPropFlowStatsMasterVer15 parentMessage) { this.parentMessage = parentMessage; } @Override public int getType() { return 0xd; } @Override public long getMask() { return mask; } @Override public OFAsyncConfigPropFlowStatsMaster.Builder setMask(long mask) { this.mask = mask; this.maskSet = true; return this; } @Override public OFVersion getVersion() { return OFVersion.OF_15; } @Override public OFAsyncConfigPropFlowStatsMaster build() { long mask = this.maskSet ? this.mask : parentMessage.mask; // return new OFAsyncConfigPropFlowStatsMasterVer15( mask ); } } static class Builder implements OFAsyncConfigPropFlowStatsMaster.Builder { // OF message fields private boolean maskSet; private long mask; @Override public int getType() { return 0xd; } @Override public long getMask() { return mask; } @Override public OFAsyncConfigPropFlowStatsMaster.Builder setMask(long mask) { this.mask = mask; this.maskSet = true; return this; } @Override public OFVersion getVersion() { return OFVersion.OF_15; } // @Override public OFAsyncConfigPropFlowStatsMaster build() { long mask = this.maskSet ? this.mask : DEFAULT_MASK; return new OFAsyncConfigPropFlowStatsMasterVer15( mask ); } } final static Reader READER = new Reader(); static class Reader implements OFMessageReader<OFAsyncConfigPropFlowStatsMaster> { @Override public OFAsyncConfigPropFlowStatsMaster readFrom(ByteBuf bb) throws OFParseError { int start = bb.readerIndex(); // fixed value property type == 0xd short type = bb.readShort(); if(type != (short) 0xd) throw new OFParseError("Wrong type: Expected=0xd(0xd), got="+type); int length = U16.f(bb.readShort()); if(length != 8) throw new OFParseError("Wrong length: Expected=8(8), got="+length); if(bb.readableBytes() + (bb.readerIndex() - start) < length) { // Buffer does not have all data yet bb.readerIndex(start); return null; } if(logger.isTraceEnabled()) logger.trace("readFrom - length={}", length); long mask = U32.f(bb.readInt()); OFAsyncConfigPropFlowStatsMasterVer15 asyncConfigPropFlowStatsMasterVer15 = new OFAsyncConfigPropFlowStatsMasterVer15( mask ); if(logger.isTraceEnabled()) logger.trace("readFrom - read={}", asyncConfigPropFlowStatsMasterVer15); return asyncConfigPropFlowStatsMasterVer15; } } public void putTo(PrimitiveSink sink) { FUNNEL.funnel(this, sink); } final static OFAsyncConfigPropFlowStatsMasterVer15Funnel FUNNEL = new OFAsyncConfigPropFlowStatsMasterVer15Funnel(); static class OFAsyncConfigPropFlowStatsMasterVer15Funnel implements Funnel<OFAsyncConfigPropFlowStatsMasterVer15> { private static final long serialVersionUID = 1L; @Override public void funnel(OFAsyncConfigPropFlowStatsMasterVer15 message, PrimitiveSink sink) { // fixed value property type = 0xd sink.putShort((short) 0xd); // fixed value property length = 8 sink.putShort((short) 0x8); sink.putLong(message.mask); } } public void writeTo(ByteBuf bb) { WRITER.write(bb, this); } final static Writer WRITER = new Writer(); static class Writer implements OFMessageWriter<OFAsyncConfigPropFlowStatsMasterVer15> { @Override public void write(ByteBuf bb, OFAsyncConfigPropFlowStatsMasterVer15 message) { // fixed value property type = 0xd bb.writeShort((short) 0xd); // fixed value property length = 8 bb.writeShort((short) 0x8); bb.writeInt(U32.t(message.mask)); } } @Override public String toString() { StringBuilder b = new StringBuilder("OFAsyncConfigPropFlowStatsMasterVer15("); b.append("mask=").append(mask); b.append(")"); return b.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; OFAsyncConfigPropFlowStatsMasterVer15 other = (OFAsyncConfigPropFlowStatsMasterVer15) obj; if( mask != other.mask) return false; return true; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * (int) (mask ^ (mask >>> 32)); return result; } }
apache-2.0
GabrielSantosDoNascimentoSilva/ListaTelefonica
ListaTelefonica/js/app.js
49
angular.module("listaTelefonica",["ngMessages"]);
apache-2.0
emiapwil/alto-server
plugins/autoird/autoird.py
4994
#!/usr/bin/env python3 import bottle import logging import palto.utils import palto.paltomanager import palto.palto_config from .basicird import SimpleIRD def can_autogenerate(backend): try: config = backend.config if not config.has_section('ird'): return False return True except Exception as e: logging.warning('Error reading config from backend %s', backend) return False def get_mountpoints(backend): mountpoints = backend.config.get('ird', 'mountpoints', fallback='') mountpoints = { x.strip() for x in mountpoints.split(',') } return mountpoints def generate_ird_resource(backend): if not hasattr(backend, 'get_meta'): logging.error('AutoIRDPlugin requires get_meta to generate ird') return None args = {} for func in ['get_meta', 'get_capabilities', 'get_uses']: if hasattr(backend, func): args[func] = getattr(backend, func) return SimpleIRD(**args) def get_ird_provider(config): return config.get('autoird', 'provider', fallback='palto.simpleird') class AutoIRDPlugin(): """ """ name = 'auto_ird' api = 2 def __init__(self): self.backends = {} def matches(self, context): if not hasattr(context, 'name'): return False return context.name in ['add-backend','remove-backend'] def get_ird_backend(self, mountpoint, **kargs): if mountpoint in self.backends: return self.backends[mountpoint] try: if self.server.get_backend(mountpoint) is not None: logging.warning('Mountpoint %s already registered', mountpoint) return None provider = kargs.pop('provider', get_ird_provider(self.manager.config)) config = palto.palto_config.genereate_ird_config(mountpoint, provider) self.manager.add_backend_route(mountpoint, config) instance = self.server.get_backend(mountpoint) self.backends[mountpoint] = instance self.generate_ird(mountpoint, instance) return instance except Exception as e: logging.warning('Failed to create IRD backend %s: %s', mountpoint, e) return None def generate_ird(self, name, backend): if not can_autogenerate(backend): return try: mountpoints = get_mountpoints(backend) resource = generate_ird_resource(backend) except Exception as e: logging.warning('Failed to generate IRD resource for %s: %s', name, e) return for mp in mountpoints: try: if mp == name: continue instance = self.get_ird_backend(mp) if instance is None: logging.warning('Failed to get IRD backend %s, skipping', mp) if instance.register(name, resource): logging.info('Register %s to mountpoint %s', name, mp) except Exception as e: logging.warning('Failed to register %s to mountpoint %s: %s', name, mp, e) def remove_ird(self, name, backend): if not can_autogenerate(backend): return mountpoints = get_mountpoints(backend) for mp in mountpoints: try: instance = self.get_ird_backend(mp) if instance is None: continue instance.unregister(name) except Exception as e: logging.warning('Error while unregistering %s from %s', name, mp) def setup(self, app): if not palto.utils.no_plugin_instance(app.plugins, AutoIRDPlugin): raise bottle.PluginError('AutoIRDPlugin already installed') if app.server is None: raise bottle.PluginError('PaltoServer must not be None') self.manager, self.server = app, app.server backends = dict.copy(app.server.get_backends()) for name, backend in backends.items(): self.generate_ird(name, backend) def apply(self, callback, context): if not self.matches(context): return callback def wrapper(*args, **kwargs): out = callback(*args, **kwargs) try: name = kwargs.get('name') backend = self.server.get_backend(name) if context.name == 'add-backend': self.generate_ird(name, backend) elif context.name == 'remove-backend': self.remove_ird(name, backend) except Exception as e: logging.error('Error while handling %s route: %s', context.name, e) return out return wrapper def close(self): for name in self.backends: self.manager.remove_backend_route(name) self.backends = {} def create_instance(name, config, environ): return AutoIRDPlugin()
apache-2.0
youtube/doorman
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
1531
// Copyright 2016 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v3rpc import ( "github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/grpc" "github.com/coreos/etcd/Godeps/_workspace/src/google.golang.org/grpc/credentials" "github.com/coreos/etcd/etcdserver" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/transport" ) func Server(s *etcdserver.EtcdServer, tls *transport.TLSInfo) (*grpc.Server, error) { var opts []grpc.ServerOption if tls != nil { creds, err := credentials.NewServerTLSFromFile(tls.CertFile, tls.KeyFile) if err != nil { return nil, err } opts = append(opts, grpc.Creds(creds)) } grpcServer := grpc.NewServer(opts...) pb.RegisterKVServer(grpcServer, NewKVServer(s)) pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) pb.RegisterLeaseServer(grpcServer, NewLeaseServer(s)) pb.RegisterClusterServer(grpcServer, NewClusterServer(s)) pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) return grpcServer, nil }
apache-2.0
zapbot/zap-extensions
addOns/ascanrulesBeta/src/main/java/org/zaproxy/zap/extension/ascanrulesBeta/MessageCache.java
4340
/* * Zed Attack Proxy (ZAP) and its related class files. * * ZAP is an HTTP/HTTPS proxy for assessing web application security. * * Copyright 2014 The ZAP Development Team * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.zaproxy.zap.extension.ascanrulesBeta; import java.util.Collections; import java.util.Map; import org.apache.commons.collections.map.LRUMap; import org.apache.commons.httpclient.URI; import org.apache.log4j.Logger; import org.parosproxy.paros.core.scanner.HostProcess; import org.parosproxy.paros.network.HttpHeader; import org.parosproxy.paros.network.HttpMessage; /** * MessageCache caches HTTP messages. * * @author 70pointer@gmail.com */ public class MessageCache { private static MessageCache instance; private HostProcess parent = null; @SuppressWarnings("unchecked") private Map<URI, HttpMessage> messagecache = Collections.synchronizedMap(new LRUMap(100)); // a map of 100 objects, synchronized private static Logger log = Logger.getLogger(MessageCache.class); private MessageCache(HostProcess hostprocess) { if (log.isDebugEnabled()) log.debug("Initialising"); parent = hostprocess; } public static synchronized MessageCache getSingleton(HostProcess hostprocess) { if (instance == null) createSingleton(hostprocess); return instance; } private static synchronized void createSingleton(HostProcess hostprocess) { if (instance == null) { instance = new MessageCache(hostprocess); } } /** * is a message cached for the given URI? * * @param uri * @return */ public synchronized boolean isMessageCached(URI uri) { return messagecache.containsKey(uri); } /** * gets a HttpMessage for the requested URI, using basemsg as the base message. If the message * is available in the cache, return it. If not, retrieve it. * * @param uri the URI for which a httpMessage is being requested * @param basemsg the base message which will be used to construct new messages * @return a HttpMessage for the requested URI, using basemsg as the base message * @throws Exception */ public synchronized HttpMessage getMessage( URI uri, HttpMessage basemsg, boolean followRedirects) throws Exception { if (!isMessageCached(uri)) { if (log.isDebugEnabled()) log.debug("URI '" + uri + "' is not in the message cache. Retrieving it."); // request the file, then add the file to the cache // use the cookies from an original request, in case authorisation is required HttpMessage requestmsg = new HttpMessage(uri); try { requestmsg.setCookieParams(basemsg.getCookieParams()); } catch (Exception e) { if (log.isDebugEnabled()) log.debug("Could not set the cookies from the base request:" + e); } requestmsg.getRequestHeader().setHeader(HttpHeader.IF_MODIFIED_SINCE, null); requestmsg.getRequestHeader().setHeader(HttpHeader.IF_NONE_MATCH, null); requestmsg.getRequestHeader().setContentLength(requestmsg.getRequestBody().length()); parent.getHttpSender().sendAndReceive(requestmsg, followRedirects); parent.notifyNewMessage(requestmsg); // put the message in the cache messagecache.put(uri, requestmsg); if (log.isDebugEnabled()) log.debug("Put URI '" + uri + "' in the message cache."); } else { if (log.isDebugEnabled()) log.debug("URI '" + uri + "' is cached in the message cache."); } // and return the cached message. return messagecache.get(uri); } }
apache-2.0
wsaccaco/lift
examples/skittr/src/main/scala/bootstrap/liftweb/Boot.scala
2794
package bootstrap.liftweb /* * Copyright 2007-2010 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions * and limitations under the License. */ import _root_.net.liftweb.util.{Helpers, Log, NamedPF} import _root_.net.liftweb.common.{Box, Empty, Full, Failure} import _root_.net.liftweb.http._ import _root_.net.liftweb.mapper._ import Helpers._ import _root_.net.liftweb.mapper.{DB, ConnectionManager, Schemifier, DefaultConnectionIdentifier, ConnectionIdentifier} import _root_.java.sql.{Connection, DriverManager} import _root_.com.skittr.model._ import _root_.com.skittr.actor._ import provider._ /** * A class that's instantiated early and run. It allows the application * to modify lift's environment */ class Boot { def modelList = List[BaseMetaMapper](User, Friend, MsgStore) def boot { if (!DB.jndiJdbcConnAvailable_?) DB.defineConnectionManager(DefaultConnectionIdentifier, DBVendor) LiftRules.addToPackages("com.skittr") // make sure the database is up to date Schemifier.schemify(true, Schemifier.infoF _, modelList :_*) if ((System.getProperty("create_users") != null) && User.count < User.createdCount) User.createTestUsers // map certain urls to the right place val rewriter: LiftRules.RewritePF = NamedPF("User and Friend mapping") { case RewriteRequest(ParsePath("user" :: user :: _, _, _,_), _, _) => RewriteResponse("user" :: Nil, Map("user" -> user)) case RewriteRequest(ParsePath("friend" :: user :: _, _, _,_), _, _) => RewriteResponse("friend" :: Nil, Map("user" -> user)) case RewriteRequest(ParsePath("unfriend" :: user :: _, _, _, _), _, _) => RewriteResponse("unfriend" :: Nil, Map("user" -> user)) } LiftRules.rewrite.prepend(rewriter) // load up the list of user actors UserList.create } } /** * A singleton that vends a database connection to a Derby database */ object DBVendor extends ConnectionManager { def newConnection(name: ConnectionIdentifier): Box[Connection] = { try { Class.forName("org.apache.derby.jdbc.EmbeddedDriver") val dm = DriverManager.getConnection("jdbc:derby:skittr;create=true") Full(dm) } catch { case e : Exception => e.printStackTrace; Empty } } def releaseConnection(conn: Connection) {conn.close} }
apache-2.0
jmrozanec/cron-utils
src/test/java/com/cronutils/Issue338Test.java
719
package com.cronutils; import com.cronutils.descriptor.CronDescriptor; import com.cronutils.model.Cron; import com.cronutils.model.CronType; import com.cronutils.model.definition.CronDefinitionBuilder; import com.cronutils.parser.CronParser; import org.junit.Assert; import org.junit.Test; import java.util.Locale; public class Issue338Test { @Test public void testEverySecondInFrench() { CronParser cronParser = new CronParser(CronDefinitionBuilder.instanceDefinitionFor(CronType.QUARTZ)); String cronString = "* * * * * ? *"; Cron cron = cronParser.parse(cronString); String description = CronDescriptor.instance(Locale.FRANCE).describe(cron); Assert.assertEquals("chaque seconde", description); } }
apache-2.0
JingchengDu/hbase
hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
18596
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.backup.impl; import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.BackupInfo; import org.apache.hadoop.hbase.backup.BackupInfo.BackupState; import org.apache.hadoop.hbase.backup.BackupRestoreConstants; import org.apache.hadoop.hbase.backup.BackupType; import org.apache.hadoop.hbase.backup.HBackupFileSystem; import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage; import org.apache.hadoop.hbase.backup.master.BackupLogCleaner; import org.apache.hadoop.hbase.backup.master.LogRollMasterProcedureManager; import org.apache.hadoop.hbase.backup.regionserver.LogRollRegionServerProcedureManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.procedure.ProcedureManagerHost; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting; /** * Handles backup requests, creates backup info records in backup system table to * keep track of backup sessions, dispatches backup request. */ @InterfaceAudience.Private public class BackupManager implements Closeable { private static final Log LOG = LogFactory.getLog(BackupManager.class); protected Configuration conf = null; protected BackupInfo backupInfo = null; protected BackupSystemTable systemTable; protected final Connection conn; /** * Backup manager constructor. * @param conn connection * @param conf configuration * @throws IOException exception */ public BackupManager(Connection conn, Configuration conf) throws IOException { if (!conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, BackupRestoreConstants.BACKUP_ENABLE_DEFAULT)) { throw new BackupException("HBase backup is not enabled. Check your " + BackupRestoreConstants.BACKUP_ENABLE_KEY + " setting."); } this.conf = conf; this.conn = conn; this.systemTable = new BackupSystemTable(conn); } /** * Returns backup info */ protected BackupInfo getBackupInfo() { return backupInfo; } /** * This method modifies the master's configuration in order to inject backup-related features * (TESTs only) * @param conf configuration */ @VisibleForTesting public static void decorateMasterConfiguration(Configuration conf) { if (!isBackupEnabled(conf)) { return; } // Add WAL archive cleaner plug-in String plugins = conf.get(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS); String cleanerClass = BackupLogCleaner.class.getCanonicalName(); if (!plugins.contains(cleanerClass)) { conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass); } String classes = conf.get(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY); String masterProcedureClass = LogRollMasterProcedureManager.class.getName(); if (classes == null) { conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, masterProcedureClass); } else if (!classes.contains(masterProcedureClass)) { conf.set(ProcedureManagerHost.MASTER_PROCEDURE_CONF_KEY, classes + "," + masterProcedureClass); } if (LOG.isDebugEnabled()) { LOG.debug("Added log cleaner: " + cleanerClass + "\n" + "Added master procedure manager: " + masterProcedureClass); } } /** * This method modifies the Region Server configuration in order to inject backup-related features * TESTs only. * @param conf configuration */ @VisibleForTesting public static void decorateRegionServerConfiguration(Configuration conf) { if (!isBackupEnabled(conf)) { return; } String classes = conf.get(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY); String regionProcedureClass = LogRollRegionServerProcedureManager.class.getName(); if (classes == null) { conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, regionProcedureClass); } else if (!classes.contains(regionProcedureClass)) { conf.set(ProcedureManagerHost.REGIONSERVER_PROCEDURE_CONF_KEY, classes + "," + regionProcedureClass); } if (LOG.isDebugEnabled()) { LOG.debug("Added region procedure manager: " + regionProcedureClass); } } public static boolean isBackupEnabled(Configuration conf) { return conf.getBoolean(BackupRestoreConstants.BACKUP_ENABLE_KEY, BackupRestoreConstants.BACKUP_ENABLE_DEFAULT); } /** * Get configuration * @return configuration */ Configuration getConf() { return conf; } /** * Stop all the work of backup. */ @Override public void close() { if (systemTable != null) { try { systemTable.close(); } catch (Exception e) { LOG.error(e); } } } /** * Creates a backup info based on input backup request. * @param backupId backup id * @param type type * @param tableList table list * @param targetRootDir root dir * @param workers number of parallel workers * @param bandwidth bandwidth per worker in MB per sec * @return BackupInfo * @throws BackupException exception */ public BackupInfo createBackupInfo(String backupId, BackupType type, List<TableName> tableList, String targetRootDir, int workers, long bandwidth) throws BackupException { if (targetRootDir == null) { throw new BackupException("Wrong backup request parameter: target backup root directory"); } if (type == BackupType.FULL && (tableList == null || tableList.isEmpty())) { // If table list is null for full backup, which means backup all tables. Then fill the table // list with all user tables from meta. It no table available, throw the request exception. HTableDescriptor[] htds = null; try (Admin admin = conn.getAdmin()) { htds = admin.listTables(); } catch (Exception e) { throw new BackupException(e); } if (htds == null) { throw new BackupException("No table exists for full backup of all tables."); } else { tableList = new ArrayList<>(); for (HTableDescriptor hTableDescriptor : htds) { TableName tn = hTableDescriptor.getTableName(); if (tn.equals(BackupSystemTable.getTableName(conf))) { // skip backup system table continue; } tableList.add(hTableDescriptor.getTableName()); } LOG.info("Full backup all the tables available in the cluster: " + tableList); } } // there are one or more tables in the table list backupInfo = new BackupInfo(backupId, type, tableList.toArray(new TableName[tableList.size()]), targetRootDir); backupInfo.setBandwidth(bandwidth); backupInfo.setWorkers(workers); return backupInfo; } /** * Check if any ongoing backup. Currently, we only reply on checking status in backup system * table. We need to consider to handle the case of orphan records in the future. Otherwise, all * the coming request will fail. * @return the ongoing backup id if on going backup exists, otherwise null * @throws IOException exception */ private String getOngoingBackupId() throws IOException { ArrayList<BackupInfo> sessions = systemTable.getBackupInfos(BackupState.RUNNING); if (sessions.size() == 0) { return null; } return sessions.get(0).getBackupId(); } /** * Start the backup manager service. * @throws IOException exception */ public void initialize() throws IOException { String ongoingBackupId = this.getOngoingBackupId(); if (ongoingBackupId != null) { LOG.info("There is a ongoing backup " + ongoingBackupId + ". Can not launch new backup until no ongoing backup remains."); throw new BackupException("There is ongoing backup."); } } public void setBackupInfo(BackupInfo backupInfo) { this.backupInfo = backupInfo; } /** * Get direct ancestors of the current backup. * @param backupInfo The backup info for the current backup * @return The ancestors for the current backup * @throws IOException exception * @throws BackupException exception */ public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo) throws IOException, BackupException { LOG.debug("Getting the direct ancestors of the current backup " + backupInfo.getBackupId()); ArrayList<BackupImage> ancestors = new ArrayList<BackupImage>(); // full backup does not have ancestor if (backupInfo.getType() == BackupType.FULL) { LOG.debug("Current backup is a full backup, no direct ancestor for it."); return ancestors; } // get all backup history list in descending order ArrayList<BackupInfo> allHistoryList = getBackupHistory(true); for (BackupInfo backup : allHistoryList) { BackupImage.Builder builder = BackupImage.newBuilder(); BackupImage image = builder.withBackupId(backup.getBackupId()).withType(backup.getType()) .withRootDir(backup.getBackupRootDir()).withTableList(backup.getTableNames()) .withStartTime(backup.getStartTs()).withCompleteTime(backup.getCompleteTs()).build(); // add the full backup image as an ancestor until the last incremental backup if (backup.getType().equals(BackupType.FULL)) { // check the backup image coverage, if previous image could be covered by the newer ones, // then no need to add if (!BackupManifest.canCoverImage(ancestors, image)) { ancestors.add(image); } } else { // found last incremental backup, if previously added full backup ancestor images can cover // it, then this incremental ancestor is not the dependent of the current incremental // backup, that is to say, this is the backup scope boundary of current table set. // Otherwise, this incremental backup ancestor is the dependent ancestor of the ongoing // incremental backup if (BackupManifest.canCoverImage(ancestors, image)) { LOG.debug("Met the backup boundary of the current table set:"); for (BackupImage image1 : ancestors) { LOG.debug(" BackupID=" + image1.getBackupId() + ", BackupDir=" + image1.getRootDir()); } } else { Path logBackupPath = HBackupFileSystem.getBackupPath(backup.getBackupRootDir(), backup.getBackupId()); LOG.debug("Current backup has an incremental backup ancestor, " + "touching its image manifest in " + logBackupPath.toString() + " to construct the dependency."); BackupManifest lastIncrImgManifest = new BackupManifest(conf, logBackupPath); BackupImage lastIncrImage = lastIncrImgManifest.getBackupImage(); ancestors.add(lastIncrImage); LOG.debug("Last dependent incremental backup image: " + "{BackupID=" + lastIncrImage.getBackupId() + "," + "BackupDir=" + lastIncrImage.getRootDir() + "}"); } } } LOG.debug("Got " + ancestors.size() + " ancestors for the current backup."); return ancestors; } /** * Get the direct ancestors of this backup for one table involved. * @param backupInfo backup info * @param table table * @return backupImages on the dependency list * @throws BackupException exception * @throws IOException exception */ public ArrayList<BackupImage> getAncestors(BackupInfo backupInfo, TableName table) throws BackupException, IOException { ArrayList<BackupImage> ancestors = getAncestors(backupInfo); ArrayList<BackupImage> tableAncestors = new ArrayList<BackupImage>(); for (BackupImage image : ancestors) { if (image.hasTable(table)) { tableAncestors.add(image); if (image.getType() == BackupType.FULL) { break; } } } return tableAncestors; } /* * backup system table operations */ /** * Updates status (state) of a backup session in a persistent store * @param context context * @throws IOException exception */ public void updateBackupInfo(BackupInfo context) throws IOException { systemTable.updateBackupInfo(context); } /** * Starts new backup session * @throws IOException if active session already exists */ public void startBackupSession() throws IOException { systemTable.startBackupExclusiveOperation(); } /** * Finishes active backup session * @throws IOException if no active session */ public void finishBackupSession() throws IOException { systemTable.finishBackupExclusiveOperation(); } /** * Read the last backup start code (timestamp) of last successful backup. Will return null if * there is no startcode stored in backup system table or the value is of length 0. These two * cases indicate there is no successful backup completed so far. * @return the timestamp of a last successful backup * @throws IOException exception */ public String readBackupStartCode() throws IOException { return systemTable.readBackupStartCode(backupInfo.getBackupRootDir()); } /** * Write the start code (timestamp) to backup system table. If passed in null, then write 0 byte. * @param startCode start code * @throws IOException exception */ public void writeBackupStartCode(Long startCode) throws IOException { systemTable.writeBackupStartCode(startCode, backupInfo.getBackupRootDir()); } /** * Get the RS log information after the last log roll from backup system table. * @return RS log info * @throws IOException exception */ public HashMap<String, Long> readRegionServerLastLogRollResult() throws IOException { return systemTable.readRegionServerLastLogRollResult(backupInfo.getBackupRootDir()); } public Pair<Map<TableName, Map<String, Map<String, List<Pair<String, Boolean>>>>>, List<byte[]>> readBulkloadRows(List<TableName> tableList) throws IOException { return systemTable.readBulkloadRows(tableList); } public void removeBulkLoadedRows(List<TableName> lst, List<byte[]> rows) throws IOException { systemTable.removeBulkLoadedRows(lst, rows); } public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps) throws IOException { systemTable.writeBulkLoadedFiles(sTableList, maps, backupInfo.getBackupId()); } /** * Get all completed backup information (in desc order by time) * @return history info of BackupCompleteData * @throws IOException exception */ public List<BackupInfo> getBackupHistory() throws IOException { return systemTable.getBackupHistory(); } public ArrayList<BackupInfo> getBackupHistory(boolean completed) throws IOException { return systemTable.getBackupHistory(completed); } /** * Write the current timestamps for each regionserver to backup system table after a successful * full or incremental backup. Each table may have a different set of log timestamps. The saved * timestamp is of the last log file that was backed up already. * @param tables tables * @throws IOException exception */ public void writeRegionServerLogTimestamp(Set<TableName> tables, HashMap<String, Long> newTimestamps) throws IOException { systemTable.writeRegionServerLogTimestamp(tables, newTimestamps, backupInfo.getBackupRootDir()); } /** * Read the timestamp for each region server log after the last successful backup. Each table has * its own set of the timestamps. * @return the timestamp for each region server. key: tableName value: * RegionServer,PreviousTimeStamp * @throws IOException exception */ public HashMap<TableName, HashMap<String, Long>> readLogTimestampMap() throws IOException { return systemTable.readLogTimestampMap(backupInfo.getBackupRootDir()); } /** * Return the current tables covered by incremental backup. * @return set of tableNames * @throws IOException exception */ public Set<TableName> getIncrementalBackupTableSet() throws IOException { return systemTable.getIncrementalBackupTableSet(backupInfo.getBackupRootDir()); } /** * Adds set of tables to overall incremental backup table set * @param tables tables * @throws IOException exception */ public void addIncrementalBackupTableSet(Set<TableName> tables) throws IOException { systemTable.addIncrementalBackupTableSet(tables, backupInfo.getBackupRootDir()); } /** * Saves list of WAL files after incremental backup operation. These files will be stored until * TTL expiration and are used by Backup Log Cleaner plug-in to determine which WAL files can be * safely purged. */ public void recordWALFiles(List<String> files) throws IOException { systemTable.addWALFiles(files, backupInfo.getBackupId(), backupInfo.getBackupRootDir()); } /** * Get WAL files iterator * @return WAL files iterator from backup system table * @throws IOException */ public Iterator<BackupSystemTable.WALItem> getWALFilesFromBackupSystem() throws IOException { return systemTable.getWALFilesIterator(backupInfo.getBackupRootDir()); } public Connection getConnection() { return conn; } }
apache-2.0
VladiMihaylenko/omim
map/transit/transit_display.hpp
3411
#pragma once #include "map/transit/transit_reader.hpp" #include "map/bookmark_manager.hpp" #include "map/routing_mark.hpp" #include "drape_frontend/color_constants.hpp" #include "drape_frontend/route_shape.hpp" #include "routing/route.hpp" #include <functional> #include <map> #include <string> #include <vector> enum class TransitType: uint32_t { // Do not change the order! IntermediatePoint, Pedestrian, Subway, Train, LightRail, Monorail }; extern std::map<TransitType, std::string> const kTransitSymbols; struct TransitStepInfo { TransitStepInfo() = default; TransitStepInfo(TransitType type, double distance, int time, std::string const & number = "", uint32_t color = 0, int intermediateIndex = 0); bool IsEqualType(TransitStepInfo const & ts) const; TransitType m_type = TransitType::Pedestrian; double m_distanceInMeters = 0.0; int m_timeInSec = 0; std::string m_distanceStr; std::string m_distanceUnitsSuffix; // Is valid for all types except TransitType::IntermediatePoint and TransitType::Pedestrian std::string m_number; uint32_t m_colorARGB = 0; // Is valid for TransitType::IntermediatePoint int m_intermediateIndex = 0; }; struct TransitRouteInfo { void AddStep(TransitStepInfo const & step); void UpdateDistanceStrings(); double m_totalDistInMeters = 0.0; double m_totalPedestrianDistInMeters = 0.0; int m_totalTimeInSec = 0; int m_totalPedestrianTimeInSec = 0; std::string m_totalDistanceStr; std::string m_totalDistanceUnitsSuffix; std::string m_totalPedestrianDistanceStr; std::string m_totalPedestrianUnitsSuffix; std::vector<TransitStepInfo> m_steps; }; struct TransitTitle { TransitTitle() = default; TransitTitle(string const & text, df::ColorConstant const & color) : m_text(text), m_color(color) {} string m_text; df::ColorConstant m_color; }; struct TransitMarkInfo { enum class Type { Stop, KeyStop, Transfer, Gate }; Type m_type = Type::Stop; m2::PointD m_point; std::vector<TransitTitle> m_titles; std::string m_symbolName; df::ColorConstant m_color; FeatureID m_featureId; }; class TransitRouteDisplay { public: using GetMwmIdFn = std::function<MwmSet::MwmId (routing::NumMwmId numMwmId)>; using GetStringsBundleFn = std::function<StringsBundle const & ()>; TransitRouteDisplay(TransitReadManager & transitReadManager, GetMwmIdFn const & getMwmIdFn, GetStringsBundleFn const & getStringsBundleFn, BookmarkManager * bmManager, std::map<std::string, m2::PointF> const & transitSymbolSizes); bool ProcessSubroute(std::vector<routing::RouteSegment> const & segments, df::Subroute & subroute); void CreateTransitMarks(); TransitRouteInfo const & GetRouteInfo(); private: void CollectTransitDisplayInfo(std::vector<routing::RouteSegment> const & segments, TransitDisplayInfos & transitDisplayInfos); TransitMark * CreateMark(m2::PointD const & pt, FeatureID const & fid); TransitReadManager & m_transitReadManager; GetMwmIdFn m_getMwmIdFn; GetStringsBundleFn m_getStringsBundleFn; BookmarkManager * m_bmManager; std::map<std::string, m2::PointF> const & m_symbolSizes; TransitRouteInfo m_routeInfo; std::vector<TransitMarkInfo> m_transitMarks; int m_subrouteIndex = 0; float m_maxSubrouteWidth = -1.0f; };
apache-2.0
seata/seata
saga/seata-saga-processctrl/src/main/java/io/seata/saga/proctrl/HierarchicalProcessContext.java
1847
/* * Copyright 1999-2019 Seata.io Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.seata.saga.proctrl; import java.util.Map; /** * Hierarchical process context * * @author lorne.cl */ public interface HierarchicalProcessContext extends ProcessContext { /** * Gets get variable locally. * * @param name the name * @return the get variable locally */ Object getVariableLocally(String name); /** * Sets set variable locally. * * @param name the name * @param value the value */ void setVariableLocally(String name, Object value); /** * Gets get variables locally. * * @return the get variables locally */ Map<String, Object> getVariablesLocally(); /** * Sets set variables locally. * * @param variables the variables */ void setVariablesLocally(Map<String, Object> variables); /** * Has variable local boolean. * * @param name the name * @return the boolean */ boolean hasVariableLocal(String name); /** * Remove variable locally. * * @param name the name * @return the removed variable or null */ Object removeVariableLocally(String name); /** * Clear locally. */ void clearLocally(); }
apache-2.0
tarikgwa/test
html/var/generation/Magento/Catalog/Model/ProductTypes/Config/Reader/Proxy.php
2271
<?php namespace Magento\Catalog\Model\ProductTypes\Config\Reader; /** * Proxy class for @see \Magento\Catalog\Model\ProductTypes\Config\Reader */ class Proxy extends \Magento\Catalog\Model\ProductTypes\Config\Reader { /** * Object Manager instance * * @var \Magento\Framework\ObjectManagerInterface */ protected $_objectManager = null; /** * Proxied instance name * * @var string */ protected $_instanceName = null; /** * Proxied instance * * @var \Magento\Catalog\Model\ProductTypes\Config\Reader */ protected $_subject = null; /** * Instance shareability flag * * @var bool */ protected $_isShared = null; /** * Proxy constructor * * @param \Magento\Framework\ObjectManagerInterface $objectManager * @param string $instanceName * @param bool $shared */ public function __construct(\Magento\Framework\ObjectManagerInterface $objectManager, $instanceName = '\\Magento\\Catalog\\Model\\ProductTypes\\Config\\Reader', $shared = true) { $this->_objectManager = $objectManager; $this->_instanceName = $instanceName; $this->_isShared = $shared; } /** * @return array */ public function __sleep() { return array('_subject', '_isShared'); } /** * Retrieve ObjectManager from global scope */ public function __wakeup() { $this->_objectManager = \Magento\Framework\App\ObjectManager::getInstance(); } /** * Clone proxied instance */ public function __clone() { $this->_subject = clone $this->_getSubject(); } /** * Get proxied instance * * @return \Magento\Catalog\Model\ProductTypes\Config\Reader */ protected function _getSubject() { if (!$this->_subject) { $this->_subject = true === $this->_isShared ? $this->_objectManager->get($this->_instanceName) : $this->_objectManager->create($this->_instanceName); } return $this->_subject; } /** * {@inheritdoc} */ public function read($scope = null) { return $this->_getSubject()->read($scope); } }
apache-2.0
ryanms15/test-node-client
node-spectate-test/watch.js
2588
var Observer = require('./lib/observer.js') , KeyframeParser = require('./lib/parser/keyframe.js') , co = require('co') var euw = new Observer(process.argv[2]) euw.getGame(process.argv[4], process.argv[5] || 'EUW1', function(err, game) { console.log(game); game.setObserverKey(process.argv[3]) spectate(game) }) function spectate(game) { var first = true game .on('keyframe.available', function(data) { console.log('new keyframe: ', data.id) var buffers = [] var stream = data.download() stream.on('data', function(bfr) { buffers.push(bfr) }) stream.on('end', function() { var full = Buffer.concat(buffers) console.log('loaded keyframe ' + data.id + '#'+ game.id +' Bytes: ' + full.length) try { KeyframeParser().parse(full, dump); } catch(e) { console.log(e) } function dump(data) { console.log('time: ', data.time) console.log('%s players:', data.players.length) for(var pid in data.players) { console.log("player data: %s - %s", data.players[pid].start, data.players[pid].end) console.log("player[%s]: %s", data.players[pid].entity[0], data.players[pid].name) console.log('champion: %s', data.players[pid].champname) console.log('masteries: (%s)', data.players[pid].masteryPointsTotal, data.players[pid].masteries) console.log('items:', data.players[pid].items) } /*console.log('%s towers:', data.towers.length) for(var tid in data.towers) { if(data.towers[tid].itemHeader[1]) { console.log(data.towers[tid].entity[0], data.towers[tid].name) console.log(data.towers[tid].unknown) console.log(data.towers[tid].itemHeader) console.log(data.towers[tid].items) } }*/ } }) }) .on('chunk.available', function(data) { console.log('new chunk: ', data.id) }) .on('end', function(data) { console.log('END') }) .startSpectate() console.log('spectating game: ' + game.id + ' ' + game.region) }
apache-2.0
unrealinux/FinancialDataCrawlingPlatform
src/github.com/derekparker/delve/_fixtures/fputest/fputest.go
408
package main import "runtime" func fputestsetup(f64a, f64b, f64c, f64d float64, f32a, f32b, f32c, f32d float32) func main() { var f64a float64 = 1.1 var f64b float64 = 1.2 var f64c float64 = 1.3 var f64d float64 = 1.4 var f32a float32 = 1.5 var f32b float32 = 1.6 var f32c float32 = 1.7 var f32d float32 = 1.8 fputestsetup(f64a, f64b, f64c, f64d, f32a, f32b, f32c, f32d) runtime.Breakpoint() }
apache-2.0
jabesq/home-assistant
tests/components/mqtt_json/test_device_tracker.py
6084
"""The tests for the JSON MQTT device tracker platform.""" import json import logging import os from asynctest import patch import pytest from homeassistant.setup import async_setup_component from homeassistant.components.device_tracker.legacy import ( YAML_DEVICES, ENTITY_ID_FORMAT, DOMAIN as DT_DOMAIN) from homeassistant.const import CONF_PLATFORM from tests.common import async_mock_mqtt_component, async_fire_mqtt_message _LOGGER = logging.getLogger(__name__) LOCATION_MESSAGE = { 'longitude': 1.0, 'gps_accuracy': 60, 'latitude': 2.0, 'battery_level': 99.9} LOCATION_MESSAGE_INCOMPLETE = { 'longitude': 2.0} @pytest.fixture(autouse=True) def setup_comp(hass): """Initialize components.""" hass.loop.run_until_complete(async_mock_mqtt_component(hass)) yaml_devices = hass.config.path(YAML_DEVICES) yield if os.path.isfile(yaml_devices): os.remove(yaml_devices) async def test_ensure_device_tracker_platform_validation(hass): """Test if platform validation was done.""" async def mock_setup_scanner(hass, config, see, discovery_info=None): """Check that Qos was added by validation.""" assert 'qos' in config with patch('homeassistant.components.mqtt_json.device_tracker.' 'async_setup_scanner', autospec=True, side_effect=mock_setup_scanner) as mock_sp: dev_id = 'paulus' topic = 'location/paulus' assert await async_setup_component(hass, DT_DOMAIN, { DT_DOMAIN: { CONF_PLATFORM: 'mqtt_json', 'devices': {dev_id: topic} } }) assert mock_sp.call_count == 1 async def test_json_message(hass): """Test json location message.""" dev_id = 'zanzito' topic = 'location/zanzito' location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component(hass, DT_DOMAIN, { DT_DOMAIN: { CONF_PLATFORM: 'mqtt_json', 'devices': {dev_id: topic} } }) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() state = hass.states.get('device_tracker.zanzito') assert state.attributes.get('latitude') == 2.0 assert state.attributes.get('longitude') == 1.0 async def test_non_json_message(hass, caplog): """Test receiving a non JSON message.""" dev_id = 'zanzito' topic = 'location/zanzito' location = 'home' assert await async_setup_component(hass, DT_DOMAIN, { DT_DOMAIN: { CONF_PLATFORM: 'mqtt_json', 'devices': {dev_id: topic} } }) caplog.set_level(logging.ERROR) caplog.clear() async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() assert "Error parsing JSON payload: home" in \ caplog.text async def test_incomplete_message(hass, caplog): """Test receiving an incomplete message.""" dev_id = 'zanzito' topic = 'location/zanzito' location = json.dumps(LOCATION_MESSAGE_INCOMPLETE) assert await async_setup_component(hass, DT_DOMAIN, { DT_DOMAIN: { CONF_PLATFORM: 'mqtt_json', 'devices': {dev_id: topic} } }) caplog.set_level(logging.ERROR) caplog.clear() async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() assert "Skipping update for following data because of missing " \ "or malformatted data: {\"longitude\": 2.0}" in \ caplog.text async def test_single_level_wildcard_topic(hass): """Test single level wildcard topic.""" dev_id = 'zanzito' subscription = 'location/+/zanzito' topic = 'location/room/zanzito' location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component(hass, DT_DOMAIN, { DT_DOMAIN: { CONF_PLATFORM: 'mqtt_json', 'devices': {dev_id: subscription} } }) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() state = hass.states.get('device_tracker.zanzito') assert state.attributes.get('latitude') == 2.0 assert state.attributes.get('longitude') == 1.0 async def test_multi_level_wildcard_topic(hass): """Test multi level wildcard topic.""" dev_id = 'zanzito' subscription = 'location/#' topic = 'location/zanzito' location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component(hass, DT_DOMAIN, { DT_DOMAIN: { CONF_PLATFORM: 'mqtt_json', 'devices': {dev_id: subscription} } }) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() state = hass.states.get('device_tracker.zanzito') assert state.attributes.get('latitude') == 2.0 assert state.attributes.get('longitude') == 1.0 async def test_single_level_wildcard_topic_not_matching(hass): """Test not matching single level wildcard topic.""" dev_id = 'zanzito' entity_id = ENTITY_ID_FORMAT.format(dev_id) subscription = 'location/+/zanzito' topic = 'location/zanzito' location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component(hass, DT_DOMAIN, { DT_DOMAIN: { CONF_PLATFORM: 'mqtt_json', 'devices': {dev_id: subscription} } }) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() assert hass.states.get(entity_id) is None async def test_multi_level_wildcard_topic_not_matching(hass): """Test not matching multi level wildcard topic.""" dev_id = 'zanzito' entity_id = ENTITY_ID_FORMAT.format(dev_id) subscription = 'location/#' topic = 'somewhere/zanzito' location = json.dumps(LOCATION_MESSAGE) assert await async_setup_component(hass, DT_DOMAIN, { DT_DOMAIN: { CONF_PLATFORM: 'mqtt_json', 'devices': {dev_id: subscription} } }) async_fire_mqtt_message(hass, topic, location) await hass.async_block_till_done() assert hass.states.get(entity_id) is None
apache-2.0
blogify/blogify
src/Middleware/DenyIfBeingEdited.php
1569
<?php namespace jorenvanhocht\Blogify\Middleware; use App\User; use Closure; use Illuminate\Contracts\Auth\Guard; use jorenvanhocht\Blogify\Models\Post; class DenyIfBeingEdited { /** * Holds the Guard Contract * * @var \Illuminate\Contracts\Auth\Guard */ protected $auth; /** * @var \jorenvanhocht\Blogify\Models\Post */ protected $post; /** * @var \App\User */ protected $user; /** * Create a new filter instance. * * @param \Illuminate\Contracts\Auth\Guard $auth * @param \jorenvanhocht\Blogify\Models\Post $post * @param \App\User $user */ public function __construct(Guard $auth, Post $post, User $user) { $this->auth = $auth; $this->post = $post; $this->user = $user; } /** * Handle an incoming request. * * @param \Illuminate\Http\Request $request * @param \Closure $next * @return mixed */ public function handle($request, Closure $next) { $hash = $request->segment(3); $post = $this->post->byHash($hash); if ( $post->being_edited_by != null && $post->being_edited_by != $this->auth->user()->getAuthIdentifier() ) { $user = $this->user->find($post->being_edited_by)->fullName; session()->flash('notify', ['danger', trans('blogify::posts.notify.being_edited', ['name' => $user])]); return redirect()->route('admin.posts.index'); } return $next($request); } }
apache-2.0
yoinx/kernel_adiutor
app/src/main/java/com/grarak/kerneladiutor/fragments/BaseFragment.java
2105
/* * Copyright (C) 2015 Willi Ye * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.grarak.kerneladiutor.fragments; import android.os.Build; import android.os.Bundle; import android.support.v4.app.Fragment; import android.support.v7.app.ActionBar; import android.support.v7.app.AppCompatActivity; import android.view.View; import android.view.ViewTreeObserver; import com.grarak.kerneladiutor.MainActivity; /** * Created by willi on 14.04.15. */ public abstract class BaseFragment extends Fragment implements MainActivity.OnBackButtonListener { @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setHasOptionsMenu(true); } public ActionBar getActionBar() { return ((AppCompatActivity) getActivity()).getSupportActionBar(); } public void onViewCreated(View view, Bundle saved) { super.onViewCreated(view, saved); final ViewTreeObserver observer = view.getViewTreeObserver(); observer.addOnGlobalLayoutListener(new ViewTreeObserver.OnGlobalLayoutListener() { public void onGlobalLayout() { try { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN){ observer.removeOnGlobalLayoutListener(this); } else { observer.removeGlobalOnLayoutListener(this); } onViewCreated(); } catch (Exception ignored) {} } }); } public void onViewCreated() { } }
apache-2.0
GunoH/intellij-community
platform/platform-impl/src/com/intellij/openapi/fileEditor/TextEditorWithPreview.java
21334
// Copyright 2000-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.openapi.fileEditor; import com.intellij.codeHighlighting.BackgroundEditorHighlighter; import com.intellij.icons.AllIcons; import com.intellij.ide.IdeBundle; import com.intellij.ide.structureView.StructureViewBuilder; import com.intellij.ide.util.PropertiesComponent; import com.intellij.openapi.actionSystem.*; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.editor.impl.EditorComponentImpl; import com.intellij.openapi.project.DumbAware; import com.intellij.openapi.project.Project; import com.intellij.openapi.util.Disposer; import com.intellij.openapi.util.Key; import com.intellij.openapi.util.Pair; import com.intellij.openapi.util.UserDataHolderBase; import com.intellij.openapi.util.registry.Registry; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.openapi.wm.IdeFocusManager; import com.intellij.pom.Navigatable; import com.intellij.ui.JBSplitter; import com.intellij.ui.components.JBLayeredPane; import com.intellij.util.Alarm; import com.intellij.util.ObjectUtils; import com.intellij.util.ui.JBUI; import com.intellij.util.ui.UIUtil; import com.intellij.util.ui.components.BorderLayoutPanel; import org.jetbrains.annotations.Nls; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.awt.*; import java.awt.event.AWTEventListener; import java.awt.event.KeyAdapter; import java.awt.event.KeyEvent; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.util.HashMap; import java.util.Map; import java.util.function.Supplier; import static com.intellij.openapi.actionSystem.ActionPlaces.TEXT_EDITOR_WITH_PREVIEW; /** * Two panel editor with three states: Editor, Preview and Editor with Preview. * Based on SplitFileEditor by Valentin Fondaratov * * @author Konstantin Bulenkov */ public class TextEditorWithPreview extends UserDataHolderBase implements TextEditor { protected final TextEditor myEditor; protected final FileEditor myPreview; @NotNull private final MyListenersMultimap myListenersGenerator = new MyListenersMultimap(); private final Layout myDefaultLayout; private Layout myLayout; private boolean myIsVerticalSplit; private JComponent myComponent; private JBSplitter mySplitter; private SplitEditorToolbar myToolbarWrapper; private final @Nls String myName; public static final Key<Layout> DEFAULT_LAYOUT_FOR_FILE = Key.create("TextEditorWithPreview.DefaultLayout"); public TextEditorWithPreview(@NotNull TextEditor editor, @NotNull FileEditor preview, @NotNull @Nls String editorName, @NotNull Layout defaultLayout, boolean isVerticalSplit) { myEditor = editor; myPreview = preview; myName = editorName; myDefaultLayout = ObjectUtils.notNull(getLayoutForFile(myEditor.getFile()), defaultLayout); myIsVerticalSplit = isVerticalSplit; } public TextEditorWithPreview(@NotNull TextEditor editor, @NotNull FileEditor preview, @NotNull @Nls String editorName, @NotNull Layout defaultLayout) { this(editor, preview, editorName, defaultLayout, false); } public TextEditorWithPreview(@NotNull TextEditor editor, @NotNull FileEditor preview, @NotNull @Nls String editorName) { this(editor, preview, editorName, Layout.SHOW_EDITOR_AND_PREVIEW); } public TextEditorWithPreview(@NotNull TextEditor editor, @NotNull FileEditor preview) { this(editor, preview, "TextEditorWithPreview"); } @Nullable @Override public BackgroundEditorHighlighter getBackgroundHighlighter() { return myEditor.getBackgroundHighlighter(); } @Nullable @Override public FileEditorLocation getCurrentLocation() { return myEditor.getCurrentLocation(); } @Nullable @Override public StructureViewBuilder getStructureViewBuilder() { return myEditor.getStructureViewBuilder(); } @Override public void dispose() { Disposer.dispose(myEditor); Disposer.dispose(myPreview); } @Override public void selectNotify() { myEditor.selectNotify(); myPreview.selectNotify(); } @Override public void deselectNotify() { myEditor.deselectNotify(); myPreview.deselectNotify(); } @NotNull @Override public JComponent getComponent() { if (myComponent != null) { return myComponent; } mySplitter = new JBSplitter(myIsVerticalSplit, 0.5f, 0.15f, 0.85f); mySplitter.setSplitterProportionKey(getSplitterProportionKey()); mySplitter.setFirstComponent(myEditor.getComponent()); mySplitter.setSecondComponent(myPreview.getComponent()); mySplitter.setDividerWidth(2); myToolbarWrapper = createMarkdownToolbarWrapper(mySplitter); if (myLayout == null) { String lastUsed = PropertiesComponent.getInstance().getValue(getLayoutPropertyName()); myLayout = Layout.fromId(lastUsed, myDefaultLayout); } adjustEditorsVisibility(); BorderLayoutPanel panel = JBUI.Panels.simplePanel(mySplitter).addToTop(myToolbarWrapper); if (!Registry.is("ide.text.editor.with.preview.show.floating.toolbar") || !myToolbarWrapper.isLeftToolbarEmpty()) { myComponent = panel; return myComponent; } myToolbarWrapper.setVisible(false); MyEditorLayeredComponentWrapper layeredPane = new MyEditorLayeredComponentWrapper(panel); myComponent = layeredPane; LayoutActionsFloatingToolbar toolbar = new LayoutActionsFloatingToolbar(myComponent, new DefaultActionGroup(myToolbarWrapper.getRightToolbar().getActions())); Disposer.register(this, toolbar); layeredPane.add(panel, JLayeredPane.DEFAULT_LAYER); myComponent.add(toolbar, JLayeredPane.POPUP_LAYER); registerToolbarListeners(panel, toolbar); return myComponent; } private void registerToolbarListeners(JComponent actualComponent, LayoutActionsFloatingToolbar toolbar) { UIUtil.addAwtListener(new MyMouseListener(toolbar), AWTEvent.MOUSE_MOTION_EVENT_MASK, toolbar); final var actualEditor = UIUtil.findComponentOfType(actualComponent, EditorComponentImpl.class); if (actualEditor != null) { final var editorKeyListener = new KeyAdapter() { @Override public void keyPressed(KeyEvent event) { toolbar.getVisibilityController().scheduleHide(); } }; actualEditor.getEditor().getContentComponent().addKeyListener(editorKeyListener); Disposer.register(toolbar, () -> { actualEditor.getEditor().getContentComponent().removeKeyListener(editorKeyListener); }); } } public boolean isVerticalSplit() { return myIsVerticalSplit; } public void setVerticalSplit(boolean verticalSplit) { myIsVerticalSplit = verticalSplit; mySplitter.setOrientation(verticalSplit); } @NotNull private SplitEditorToolbar createMarkdownToolbarWrapper(@NotNull JComponent targetComponentForActions) { final ActionToolbar leftToolbar = createToolbar(); if (leftToolbar != null) { leftToolbar.setTargetComponent(targetComponentForActions); leftToolbar.setReservePlaceAutoPopupIcon(false); } final ActionToolbar rightToolbar = createRightToolbar(); rightToolbar.setTargetComponent(targetComponentForActions); rightToolbar.setReservePlaceAutoPopupIcon(false); return new SplitEditorToolbar(leftToolbar, rightToolbar); } @Override public void setState(@NotNull FileEditorState state) { if (state instanceof MyFileEditorState) { final MyFileEditorState compositeState = (MyFileEditorState)state; if (compositeState.getFirstState() != null) { myEditor.setState(compositeState.getFirstState()); } if (compositeState.getSecondState() != null) { myPreview.setState(compositeState.getSecondState()); } if (compositeState.getSplitLayout() != null) { myLayout = compositeState.getSplitLayout(); invalidateLayout(); } } } @SuppressWarnings("unused") protected void onLayoutChange(Layout oldValue, Layout newValue) { } private void adjustEditorsVisibility() { myEditor.getComponent().setVisible(myLayout == Layout.SHOW_EDITOR || myLayout == Layout.SHOW_EDITOR_AND_PREVIEW); myPreview.getComponent().setVisible(myLayout == Layout.SHOW_PREVIEW || myLayout == Layout.SHOW_EDITOR_AND_PREVIEW); } protected void setLayout(@NotNull Layout layout) { Layout oldLayout = myLayout; myLayout = layout; PropertiesComponent.getInstance().setValue(getLayoutPropertyName(), myLayout.myId, myDefaultLayout.myId); adjustEditorsVisibility(); onLayoutChange(oldLayout, myLayout); } private void invalidateLayout() { adjustEditorsVisibility(); myToolbarWrapper.refresh(); myComponent.repaint(); final JComponent focusComponent = getPreferredFocusedComponent(); Component focusOwner = IdeFocusManager.findInstance().getFocusOwner(); if (focusComponent != null && focusOwner != null && SwingUtilities.isDescendingFrom(focusOwner, getComponent())) { IdeFocusManager.findInstanceByComponent(focusComponent).requestFocus(focusComponent, true); } } @NotNull protected String getSplitterProportionKey() { return "TextEditorWithPreview.SplitterProportionKey"; } @Nullable @Override public JComponent getPreferredFocusedComponent() { switch (myLayout) { case SHOW_EDITOR_AND_PREVIEW: case SHOW_EDITOR: return myEditor.getPreferredFocusedComponent(); case SHOW_PREVIEW: return myPreview.getPreferredFocusedComponent(); default: throw new IllegalStateException(myLayout.myId); } } @NotNull @Override public String getName() { return myName; } @NotNull @Override public FileEditorState getState(@NotNull FileEditorStateLevel level) { return new MyFileEditorState(myLayout, myEditor.getState(level), myPreview.getState(level)); } @Override public void addPropertyChangeListener(@NotNull PropertyChangeListener listener) { myEditor.addPropertyChangeListener(listener); myPreview.addPropertyChangeListener(listener); final DoublingEventListenerDelegate delegate = myListenersGenerator.addListenerAndGetDelegate(listener); myEditor.addPropertyChangeListener(delegate); myPreview.addPropertyChangeListener(delegate); } @Override public void removePropertyChangeListener(@NotNull PropertyChangeListener listener) { myEditor.removePropertyChangeListener(listener); myPreview.removePropertyChangeListener(listener); final DoublingEventListenerDelegate delegate = myListenersGenerator.removeListenerAndGetDelegate(listener); if (delegate != null) { myEditor.removePropertyChangeListener(delegate); myPreview.removePropertyChangeListener(delegate); } } @NotNull public TextEditor getTextEditor() { return myEditor; } @NotNull public FileEditor getPreviewEditor() { return myPreview; } public Layout getLayout() { return myLayout; } public static class MyFileEditorState implements FileEditorState { private final Layout mySplitLayout; private final FileEditorState myFirstState; private final FileEditorState mySecondState; public MyFileEditorState(Layout layout, FileEditorState firstState, FileEditorState secondState) { mySplitLayout = layout; myFirstState = firstState; mySecondState = secondState; } @Nullable public Layout getSplitLayout() { return mySplitLayout; } @Nullable public FileEditorState getFirstState() { return myFirstState; } @Nullable public FileEditorState getSecondState() { return mySecondState; } @Override public boolean canBeMergedWith(@NotNull FileEditorState otherState, @NotNull FileEditorStateLevel level) { return otherState instanceof MyFileEditorState && (myFirstState == null || myFirstState.canBeMergedWith(((MyFileEditorState)otherState).myFirstState, level)) && (mySecondState == null || mySecondState.canBeMergedWith(((MyFileEditorState)otherState).mySecondState, level)); } } @Override public boolean isModified() { return myEditor.isModified() || myPreview.isModified(); } @Override public boolean isValid() { return myEditor.isValid() && myPreview.isValid(); } private final class DoublingEventListenerDelegate implements PropertyChangeListener { @NotNull private final PropertyChangeListener myDelegate; private DoublingEventListenerDelegate(@NotNull PropertyChangeListener delegate) { myDelegate = delegate; } @Override public void propertyChange(PropertyChangeEvent evt) { myDelegate.propertyChange( new PropertyChangeEvent(TextEditorWithPreview.this, evt.getPropertyName(), evt.getOldValue(), evt.getNewValue())); } } private class MyListenersMultimap { private final Map<PropertyChangeListener, Pair<Integer, DoublingEventListenerDelegate>> myMap = new HashMap<>(); @NotNull public DoublingEventListenerDelegate addListenerAndGetDelegate(@NotNull PropertyChangeListener listener) { if (!myMap.containsKey(listener)) { myMap.put(listener, Pair.create(1, new DoublingEventListenerDelegate(listener))); } else { final Pair<Integer, DoublingEventListenerDelegate> oldPair = myMap.get(listener); myMap.put(listener, Pair.create(oldPair.getFirst() + 1, oldPair.getSecond())); } return myMap.get(listener).getSecond(); } @Nullable public DoublingEventListenerDelegate removeListenerAndGetDelegate(@NotNull PropertyChangeListener listener) { final Pair<Integer, DoublingEventListenerDelegate> oldPair = myMap.get(listener); if (oldPair == null) { return null; } if (oldPair.getFirst() == 1) { myMap.remove(listener); } else { myMap.put(listener, Pair.create(oldPair.getFirst() - 1, oldPair.getSecond())); } return oldPair.getSecond(); } } @Nullable protected ActionToolbar createToolbar() { ActionGroup actionGroup = createLeftToolbarActionGroup(); if (actionGroup != null) { return ActionManager.getInstance().createActionToolbar(TEXT_EDITOR_WITH_PREVIEW, actionGroup, true); } else { return null; } } @Nullable protected ActionGroup createLeftToolbarActionGroup() { return null; } @NotNull private ActionToolbar createRightToolbar() { final ActionGroup viewActions = createViewActionGroup(); final ActionGroup group = createRightToolbarActionGroup(); final ActionGroup rightToolbarActions = group == null ? viewActions : new DefaultActionGroup(group, Separator.create(), viewActions); return ActionManager.getInstance().createActionToolbar(TEXT_EDITOR_WITH_PREVIEW, rightToolbarActions, true); } @NotNull protected ActionGroup createViewActionGroup() { return new DefaultActionGroup( getShowEditorAction(), getShowEditorAndPreviewAction(), getShowPreviewAction() ); } @Nullable protected ActionGroup createRightToolbarActionGroup() { return null; } @NotNull protected ToggleAction getShowEditorAction() { return new ChangeViewModeAction(Layout.SHOW_EDITOR); } @NotNull protected ToggleAction getShowPreviewAction() { return new ChangeViewModeAction(Layout.SHOW_PREVIEW); } @NotNull protected ToggleAction getShowEditorAndPreviewAction() { return new ChangeViewModeAction(Layout.SHOW_EDITOR_AND_PREVIEW); } public enum Layout { SHOW_EDITOR("Editor only", IdeBundle.messagePointer("tab.title.editor.only")), SHOW_PREVIEW("Preview only", IdeBundle.messagePointer("tab.title.preview.only")), SHOW_EDITOR_AND_PREVIEW("Editor and Preview", IdeBundle.messagePointer("tab.title.editor.and.preview")); private final @NotNull Supplier<@Nls String> myName; private final String myId; Layout(String id, @NotNull Supplier<String> name) { myId = id; myName = name; } public static Layout fromId(String id, Layout defaultValue) { for (Layout layout : values()) { if (layout.myId.equals(id)) { return layout; } } return defaultValue; } public @Nls String getName() { return myName.get(); } public Icon getIcon(@Nullable TextEditorWithPreview editor) { if (this == SHOW_EDITOR) return AllIcons.General.LayoutEditorOnly; if (this == SHOW_PREVIEW) return AllIcons.General.LayoutPreviewOnly; return editor != null && editor.myIsVerticalSplit ? AllIcons.Actions.PreviewDetailsVertically : AllIcons.Actions.PreviewDetails; } } private class ChangeViewModeAction extends ToggleAction implements DumbAware { private final Layout myActionLayout; ChangeViewModeAction(Layout layout) { super(layout.getName(), layout.getName(), layout.getIcon(TextEditorWithPreview.this)); myActionLayout = layout; } @Override public boolean isSelected(@NotNull AnActionEvent e) { return myLayout == myActionLayout; } @Override public void setSelected(@NotNull AnActionEvent e, boolean state) { if (state) { setLayout(myActionLayout); } else { if (myActionLayout == Layout.SHOW_EDITOR_AND_PREVIEW) { mySplitter.setOrientation(!myIsVerticalSplit); myIsVerticalSplit = !myIsVerticalSplit; } } } @Override public void update(@NotNull AnActionEvent e) { super.update(e); e.getPresentation().setIcon(myActionLayout.getIcon(TextEditorWithPreview.this)); } } @NotNull private String getLayoutPropertyName() { return myName + "Layout"; } @Override public @Nullable VirtualFile getFile() { return getTextEditor().getFile(); } @Override public @NotNull Editor getEditor() { return getTextEditor().getEditor(); } @Override public boolean canNavigateTo(@NotNull Navigatable navigatable) { return getTextEditor().canNavigateTo(navigatable); } @Override public void navigateTo(@NotNull Navigatable navigatable) { getTextEditor().navigateTo(navigatable); } protected void handleLayoutChange(boolean isVerticalSplit) { if (myIsVerticalSplit == isVerticalSplit) return; myIsVerticalSplit = isVerticalSplit; myToolbarWrapper.refresh(); mySplitter.setOrientation(myIsVerticalSplit); myComponent.repaint(); } @Nullable private static Layout getLayoutForFile(@Nullable VirtualFile file) { if (file != null) { return file.getUserData(DEFAULT_LAYOUT_FOR_FILE); } return null; } public static void openPreviewForFile(@NotNull Project project, @NotNull VirtualFile file) { file.putUserData(DEFAULT_LAYOUT_FOR_FILE, Layout.SHOW_PREVIEW); FileEditorManager.getInstance(project).openFile(file, true); } private static class MyEditorLayeredComponentWrapper extends JBLayeredPane { private final JComponent editorComponent; static final int toolbarTopPadding = 25; static final int toolbarRightPadding = 20; private MyEditorLayeredComponentWrapper(JComponent component) { editorComponent = component; } @Override public void doLayout() { final var components = getComponents(); final var bounds = getBounds(); for (Component component : components) { if (component == editorComponent) { component.setBounds(0, 0, bounds.width, bounds.height); } else { final var preferredComponentSize = component.getPreferredSize(); var x = 0; var y = 0; if (component instanceof LayoutActionsFloatingToolbar) { x = bounds.width - preferredComponentSize.width - toolbarRightPadding; y = toolbarTopPadding; } component.setBounds(x, y, preferredComponentSize.width, preferredComponentSize.height); } } } @Override public Dimension getPreferredSize() { return editorComponent.getPreferredSize(); } } private class MyMouseListener implements AWTEventListener { private final LayoutActionsFloatingToolbar toolbar; private final Alarm alarm; MyMouseListener(LayoutActionsFloatingToolbar toolbar) { this.toolbar = toolbar; alarm = new Alarm(Alarm.ThreadToUse.POOLED_THREAD, toolbar); } @Override public void eventDispatched(AWTEvent event) { var isMouseOutsideToolbar = toolbar.getMousePosition() == null; if (myComponent.getMousePosition() != null) { alarm.cancelAllRequests(); toolbar.getVisibilityController().scheduleShow(); if (isMouseOutsideToolbar) { alarm.addRequest(() -> { toolbar.getVisibilityController().scheduleHide(); }, 1400); } } else if (isMouseOutsideToolbar) { toolbar.getVisibilityController().scheduleHide(); } } } }
apache-2.0
wegtam/playframework
dev-mode/routes-compiler/src/main/scala/play/routes/compiler/templates/package.scala
17996
/* * Copyright (C) Lightbend Inc. <https://www.lightbend.com> */ package play.routes.compiler import scala.collection.immutable.ListMap import scala.util.matching.Regex /** * Helper methods used in the templates */ package object templates { /** * Mark lines with source map information. */ def markLines(routes: Rule*): String = { // since a compilation error is really not possible in a comment, there is no point in putting one line per // route, only the first one will ever be taken routes.headOption.fold("")("// @LINE:" + _.pos.line) } /** * Generate a base identifier for the given route */ def baseIdentifier(route: Route, index: Int): String = route.call.packageName.map(_.replace(".", "_") + "_").getOrElse("") + route.call.controller .replace(".", "_") + "_" + route.call.method + index /** * Generate a route object identifier for the given route */ def routeIdentifier(route: Route, index: Int): String = baseIdentifier(route, index) + "_route" /** * Generate a invoker object identifier for the given route */ def invokerIdentifier(route: Route, index: Int): String = baseIdentifier(route, index) + "_invoker" /** * Generate a router object identifier */ def routerIdentifier(include: Include, index: Int): String = include.router.replace(".", "_") + index def concatSep[T](seq: Seq[T], sep: String)(f: T => ScalaContent): Any = { if (seq.isEmpty) { Nil } else { Seq(f(seq.head), seq.tail.map { t => Seq(sep, f(t)) }) } } /** * Generate a controller method call for the given route */ def controllerMethodCall(r: Route, paramFormat: Parameter => String): String = { val methodPart = if (r.call.instantiate) { s"$Injector.instanceOf(classOf[${r.call.packageName.map(_ + ".").getOrElse("")}${r.call.controller}]).${r.call.method}" } else { s"${r.call.packageName.map(_ + ".").getOrElse("")}${r.call.controller}.${r.call.method}" } val paramPart = r.call.parameters .map { params => params.map(paramFormat).mkString(", ") } .map("(" + _ + ")") .getOrElse("") methodPart + paramPart } /** * Generate a controller method call for the given injected route */ def injectedControllerMethodCall(r: Route, ident: String, paramFormat: Parameter => String): String = { val methodPart = if (r.call.instantiate) { s"$ident.get.${r.call.method}" } else { s"$ident.${r.call.method}" } val paramPart = r.call.parameters .map { params => params.map(paramFormat).mkString(", ") } .map("(" + _ + ")") .getOrElse("") methodPart + paramPart } def paramNameOnQueryString(paramName: String): String = { if (paramName.matches("^`[^`]+`$")) paramName.substring(1, paramName.length - 1) else paramName } /** * A route binding */ def routeBinding(route: Route): String = { route.call.parameters .filterNot(_.isEmpty) .map { params => val ps = params.filterNot(_.isJavaRequest).map { p => val paramName: String = paramNameOnQueryString(p.name) p.fixed .map { v => """Param[""" + p.typeName + """]("""" + paramName + """", Right(""" + v + """))""" } .getOrElse { """params.""" + (if (route.path.has(paramName)) "fromPath" else "fromQuery") + """[""" + p.typeName + """]("""" + paramName + """", """ + p.default .map("Some(" + _ + ")") .getOrElse("None") + """)""" } } if (ps.size < 22) ps.mkString(", ") else ps } .map("(" + _ + ")") .filterNot(_ == "()") .getOrElse("") } /** * Extract the local names out from the route, as tuple. See PR#4244 */ def tupleNames(route: Route): String = route.call.parameters .filterNot(_.isEmpty) .map { params => params.filterNot(_.isJavaRequest).map(x => safeKeyword(x.name)).mkString(", ") } .filterNot(_.isEmpty) .map("(" + _ + ") =>") .getOrElse("") /** * Extract the local names out from the route, as List. See PR#4244 */ def listNames(route: Route): String = route.call.parameters .filterNot(_.isEmpty) .map { params => params.filterNot(_.isJavaRequest).map(x => "(" + safeKeyword(x.name) + ": " + x.typeName + ")").mkString(":: ") } .filterNot(_.isEmpty) .map("case " + _ + " :: Nil =>") .getOrElse("") /** * Extract the local names out from the route */ def localNames(route: Route): String = if (route.call.parameters.map(_.filterNot(_.isJavaRequest).size).getOrElse(0) < 22) tupleNames(route) else listNames(route) /** * The code to statically get the Play injector */ val Injector = "play.api.Play.routesCompilerMaybeApplication.map(_.injector).getOrElse(play.api.inject.NewInstanceInjector)" val scalaReservedWords = List( "abstract", "case", "catch", "class", "def", "do", "else", "extends", "false", "final", "finally", "for", "forSome", "if", "implicit", "import", "lazy", "macro", "match", "new", "null", "object", "override", "package", "private", "protected", "return", "sealed", "super", "then", "this", "throw", "trait", "try", "true", "type", "val", "var", "while", "with", "yield", // Not scala keywords, but are used in the router "queryString" ) /** * Ensure that the given keyword doesn't clash with any of the keywords that Play is using, including Scala keywords. */ def safeKeyword(keyword: String): String = scalaReservedWords .collectFirst { case reserved if reserved == keyword => s"_pf_escape_$reserved" } .getOrElse(keyword) /** * Calculate the parameters for the reverse route call for the given routes. */ def reverseParameters(routes: Seq[Route]): Seq[(Parameter, Int)] = routes.head.call.routeParams.zipWithIndex.filterNot { case (p, i) => val fixeds = routes.map(_.call.routeParams(i).fixed).distinct fixeds.size == 1 && fixeds.head.isDefined } /** * Calculate the parameters for the javascript reverse route call for the given routes. */ def reverseParametersJavascript(routes: Seq[Route]): Seq[(Parameter, Int)] = routes.head.call.routeParams.zipWithIndex .map { case (p, i) => val re: Regex = """[^\p{javaJavaIdentifierPart}]""".r val paramEscapedName: String = re.replaceAllIn(p.name, "_") (p.copy(name = paramEscapedName + i), i) } .filterNot { case (p, i) => val fixeds = routes.map(_.call.routeParams(i).fixed).distinct fixeds.size == 1 && fixeds.head.isDefined } /** * Reverse parameters for matching */ def reverseMatchParameters(params: Seq[(Parameter, Int)], annotateUnchecked: Boolean): String = { val annotation = if (annotateUnchecked) ": @unchecked" else "" params.map(x => safeKeyword(x._1.name) + annotation).mkString(", ") } /** * Generate the reverse parameter constraints * * In routes like /dummy controllers.Application.dummy(foo = "bar") * foo = "bar" is a constraint */ def reverseParameterConstraints(route: Route, localNames: Map[String, String]): String = { route.call.parameters .getOrElse(Nil) .filter { p => localNames.contains(p.name) && p.fixed.isDefined } .map { p => p.name + " == " + p.fixed.get } match { case Nil => "" case nonEmpty => "if " + nonEmpty.mkString(" && ") } } /** * Calculate the local names that need to be matched */ def reverseLocalNames(route: Route, params: Seq[(Parameter, Int)]): Map[String, String] = params.map { case (lp, i) => route.call.routeParams(i).name -> lp.name }.toMap /** * Calculate the unique reverse constraints, and generate them using the given block */ def reverseUniqueConstraints(routes: Seq[Route], params: Seq[(Parameter, Int)])( block: (Route, String, String, Map[String, String]) => ScalaContent ): Seq[ScalaContent] = { ListMap(routes.reverse.map { route => val localNames = reverseLocalNames(route, params) val parameters = reverseMatchParameters(params, annotateUnchecked = false) val parameterConstraints = reverseParameterConstraints(route, localNames) (parameters -> parameterConstraints) -> block(route, parameters, parameterConstraints, localNames) }: _*).values.toSeq.reverse } /** * Generate the reverse route context */ def reverseRouteContext(route: Route): String = { val fixedParams = route.call.parameters.getOrElse(Nil).collect { case Parameter(name, _, Some(fixed), _) => "(\"%s\", %s)".format(name, fixed) } if (fixedParams.isEmpty) { "" } else { "implicit lazy val _rrc = new play.core.routing.ReverseRouteContext(Map(%s)); _rrc".format( fixedParams.mkString(", ") ) } } /** * Generate the parameter signature for the reverse route call for the given routes. */ def reverseSignature(routes: Seq[Route]): String = reverseParameters(routes) .map(p => safeKeyword(p._1.name) + ":" + p._1.typeName + { Option(routes.map(_.call.routeParams(p._2).default).distinct) .filter(_.size == 1) .flatMap(_.headOption) .map { case None => "" case Some(default) => " = " + default } .getOrElse("") } ) .mkString(", ") /** * Generate the reverse call */ def reverseCall(route: Route, localNames: Map[String, String] = Map()): String = { val df = if (route.path.parts.isEmpty) "" else " + { _defaultPrefix } + " val callPath = "_prefix" + df + route.path.parts .map { case StaticPart(part) => "\"" + part + "\"" case DynamicPart(name, _, encode) => route.call.routeParams .find(_.name == name) .map { param => val paramName: String = paramNameOnQueryString(param.name) val unbound = s"""implicitly[play.api.mvc.PathBindable[${param.typeName}]]""" + s""".unbind("$paramName", ${safeKeyword(localNames.getOrElse(param.name, param.name))})""" if (encode) s"play.core.routing.dynamicString($unbound)" else unbound } .getOrElse { throw new Error("missing key " + name) } } .mkString(" + ") val queryParams = route.call.routeParams.filterNot { p => p.fixed.isDefined || route.path.parts .collect { case DynamicPart(name, _, _) => name } .contains(p.name) } val callQueryString = if (queryParams.isEmpty) { "" } else { """ + play.core.routing.queryString(List(%s))""".format( queryParams .map { p => ("""implicitly[play.api.mvc.QueryStringBindable[""" + p.typeName + """]].unbind("""" + paramNameOnQueryString( p.name ) + """", """ + safeKeyword(localNames.getOrElse(p.name, p.name)) + """)""") -> p } .map { case (u, Parameter(name, typeName, None, Some(default))) => """if(""" + safeKeyword(localNames.getOrElse(name, name)) + """ == """ + default + """) None else Some(""" + u + """)""" case (u, Parameter(name, typeName, None, None)) => "Some(" + u + ")" } .mkString(", ") ) } """Call("%s", %s%s)""".format(route.verb.value, callPath, callQueryString) } /** * Generate the Javascript code for the parameter constraints. * * This generates the contents of an if statement in JavaScript, and is used for when multiple routes route to the * same action but with different parameters. If there are no constraints, None will be returned. */ def javascriptParameterConstraints(route: Route, localNames: Map[String, String]): Option[String] = { Option( route.call.routeParams .filter { p => localNames.contains(p.name) && p.fixed.isDefined } .map { p => localNames(p.name) + " == \"\"\" + implicitly[play.api.mvc.JavascriptLiteral[" + p.typeName + "]].to(" + p.fixed.get + ") + \"\"\"" } ).filterNot(_.isEmpty).map(_.mkString(" && ")) } /** * Collect all the routes that apply to a single action that are not dead. * * Dead routes occur when two routes route to the same action with the same parameters. When reverse routing, this * means the one reverse router, depending on the parameters, will return different URLs. But if they have the same * parameters, or no parameters, then after the first one, the subsequent ones will be dead code, never matching. * * This optimization not only saves on code generated, but since the body of the JavaScript router is a series of * very long String concatenation, this is hard work on the typer, which can easily stack overflow. */ def javascriptCollectNonDeadRoutes(routes: Seq[Route]): Seq[(Route, Map[String, String], String)] = { routes .map { route => val localNames = reverseLocalNames(route, reverseParametersJavascript(routes)) val constraints = javascriptParameterConstraints(route, localNames) (route, localNames, constraints) } .foldLeft((Seq.empty[(Route, Map[String, String], String)], false)) { case ((_routes, true), dead) => (_routes, true) case ((_routes, false), (route, localNames, None)) => (_routes :+ ((route, localNames, "true")), true) case ((_routes, false), (route, localNames, Some(constraints))) => (_routes :+ ((route, localNames, constraints)), false) } ._1 } /** * Generate the Javascript call */ def javascriptCall(route: Route, localNames: Map[String, String] = Map()): String = { val path = "\"\"\"\" + _prefix + " + { if (route.path.parts.isEmpty) "" else "{ _defaultPrefix } + " } + "\"\"\"\"" + route.path.parts.map { case StaticPart(part) => " + \"" + part + "\"" case DynamicPart(name, _, encode) => route.call.parameters .getOrElse(Nil) .find(_.name == name) .filterNot(_.isJavaRequest) .map { param => val paramName: String = paramNameOnQueryString(param.name) val jsUnbound = "(\"\"\" + implicitly[play.api.mvc.PathBindable[" + param.typeName + "]].javascriptUnbind + \"\"\")" + s"""("$paramName", ${localNames.getOrElse(param.name, param.name)})""" if (encode) s" + encodeURIComponent($jsUnbound)" else s" + $jsUnbound" } .getOrElse { throw new Error("missing key " + name) } }.mkString val queryParams = route.call.routeParams.filterNot { p => p.fixed.isDefined || route.path.parts .collect { case DynamicPart(name, _, _) => name } .contains(p.name) } val queryString = if (queryParams.isEmpty) { "" } else { """ + _qS([%s])""".format( queryParams .map { p => val paramName: String = paramNameOnQueryString(p.name) ("(\"\"\" + implicitly[play.api.mvc.QueryStringBindable[" + p.typeName + "]].javascriptUnbind + \"\"\")" + """("""" + paramName + """", """ + localNames .getOrElse(p.name, p.name) + """)""") -> p } .map { case (u, Parameter(name, typeName, None, Some(default))) => """(""" + localNames.getOrElse(name, name) + " == null ? null : " + u + ")" case (u, Parameter(name, typeName, None, None)) => u } .mkString(", ") ) } "return _wA({method:\"%s\", url:%s%s})".format(route.verb.value, path, queryString) } /** * Generate the signature of a method on the ref router */ def refReverseSignature(routes: Seq[Route]): String = routes.head.call.routeParams.map(p => safeKeyword(p.name) + ": " + p.typeName).mkString(", ") /** * Generate the ref router call */ def refCall(route: Route, useInjector: Route => Boolean): String = { val controllerRef = s"${route.call.packageName.map(_ + ".").getOrElse("")}${route.call.controller}" val methodCall = s"${route.call.method}(${route.call.parameters.getOrElse(Nil).map(x => safeKeyword(x.nameClean)).mkString(", ")})" if (useInjector(route)) { s"$Injector.instanceOf(classOf[$controllerRef]).$methodCall" } else { s"$controllerRef.$methodCall" } } /** * Encode the given String constant as a triple quoted String. * * This will split the String at any $ characters, and use concatenation to concatenate a single $ String followed * be the remainder, this is to avoid "possible missing interpolator" false positive warnings. * * That is to say: * * {{{ * /foo/$id<[^/]+> * }}} * * Will be encoded as: * * {{{ * """/foo/""" + "$" + """id<[^/]+>""" * }}} */ def encodeStringConstant(constant: String): String = { constant.split('$').mkString(tq, s"""$tq + "$$" + $tq""", tq) } def groupRoutesByPackage(routes: Seq[Route]): Map[Option[String], Seq[Route]] = routes.groupBy(_.call.packageName) def groupRoutesByController(routes: Seq[Route]): Map[String, Seq[Route]] = routes.groupBy(_.call.controller) def groupRoutesByMethod(routes: Seq[Route]): Map[(String, Seq[String]), Seq[Route]] = routes.groupBy(r => (r.call.method, r.call.parameters.getOrElse(Nil).map(_.typeNameReal))) val ob = "{" val cb = "}" val tq = "\"\"\"" }
apache-2.0
apache/sqoop
src/test/org/apache/sqoop/manager/TestMainframeManager.java
6376
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.sqoop.manager; import java.io.IOException; import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.apache.sqoop.accumulo.AccumuloUtil; import org.apache.sqoop.hbase.HBaseUtil; import org.apache.sqoop.tool.MainframeImportTool; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.apache.sqoop.ConnFactory; import org.apache.sqoop.SqoopOptions; import org.apache.sqoop.metastore.JobData; import org.apache.sqoop.testutil.BaseSqoopTestCase; import org.apache.sqoop.util.ImportException; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; /** * Test methods of the generic SqlManager implementation. */ public class TestMainframeManager extends BaseSqoopTestCase { private static final Log LOG = LogFactory.getLog(TestMainframeManager.class .getName()); private ConnManager manager; private SqoopOptions opts; private ImportJobContext context; @Before public void setUp() { Configuration conf = getConf(); opts = getSqoopOptions(conf); opts.setConnectString("dummy.server"); opts.setTableName("dummy.pds"); opts.setConnManagerClassName(MainframeManager.class.getName()); context = new ImportJobContext(getTableName(), null, opts, null); ConnFactory f = new ConnFactory(conf); try { this.manager = f.getManager(new JobData(opts, new MainframeImportTool())); } catch (IOException ioe) { fail("IOException instantiating manager: " + StringUtils.stringifyException(ioe)); } } @After public void tearDown() { try { manager.close(); } catch (SQLException sqlE) { LOG.error("Got SQLException: " + sqlE.toString()); fail("Got SQLException: " + sqlE.toString()); } } @Test public void testListColNames() { String[] colNames = manager.getColumnNames(getTableName()); assertNotNull("manager should return a column list", colNames); assertEquals("Column list should be length 1", 1, colNames.length); assertEquals(MainframeManager.DEFAULT_DATASET_COLUMN_NAME, colNames[0]); } @Test public void testListColTypes() { Map<String, Integer> types = manager.getColumnTypes(getTableName()); assertNotNull("manager should return a column types map", types); assertEquals("Column types map should be size 1", 1, types.size()); assertEquals(types.get(MainframeManager.DEFAULT_DATASET_COLUMN_NAME) .intValue(), Types.VARCHAR); } @Test public void testImportTableNoHBaseJarPresent() { HBaseUtil.setAlwaysNoHBaseJarMode(true); opts.setHBaseTable("dummy_table"); try { manager.importTable(context); fail("An ImportException should be thrown: " + "HBase jars are not present in classpath, cannot import to HBase!"); } catch (ImportException e) { assertEquals(e.toString(), "HBase jars are not present in classpath, cannot import to HBase!"); } catch (IOException e) { fail("No IOException should be thrown!"); } finally { HBaseUtil.setAlwaysNoHBaseJarMode(false); opts.setHBaseTable(null); } } @Test public void testImportTableNoAccumuloJarPresent() { AccumuloUtil.setAlwaysNoAccumuloJarMode(true); opts.setAccumuloTable("dummy_table"); try { manager.importTable(context); fail("An ImportException should be thrown: " + "Accumulo jars are not present in classpath, cannot import to " + "Accumulo!"); } catch (ImportException e) { assertEquals(e.toString(), "Accumulo jars are not present in classpath, cannot import to " + "Accumulo!"); } catch (IOException e) { fail("No IOException should be thrown!"); } finally { AccumuloUtil.setAlwaysNoAccumuloJarMode(false); opts.setAccumuloTable(null); } } @Test public void testListTables() { String[] tables = manager.listTables(); assertNull("manager should not return a list of tables", tables); } @Test public void testListDatabases() { String[] databases = manager.listDatabases(); assertNull("manager should not return a list of databases", databases); } @Test public void testGetPrimaryKey() { String primaryKey = manager.getPrimaryKey(getTableName()); assertNull("manager should not return a primary key", primaryKey); } @Test public void testReadTable() { String[] colNames = manager.getColumnNames(getTableName()); try { ResultSet table = manager.readTable(getTableName(), colNames); assertNull("manager should not read a table", table); } catch (SQLException sqlE) { fail("Got SQLException: " + sqlE.toString()); } } @Test public void testGetConnection() { try { Connection con = manager.getConnection(); assertNull("manager should not return a connection", con); } catch (SQLException sqlE) { fail("Got SQLException: " + sqlE.toString()); } } @Test public void testGetDriverClass() { String driverClass = manager.getDriverClass(); assertNotNull("manager should return a driver class", driverClass); assertEquals("manager should return an empty driver class", "", driverClass); } }
apache-2.0
yoanngern/iahm_2016
wp-content/plugins/event-espresso-core-reg/payment_methods/Bank/help_tabs/payment_methods_overview_bank_draft.help_tab.php
1356
<p><strong><?php _e('Bank Draft', 'event_espresso'); ?></strong></p> <p> <?php _e('Adjust the settings for the bank draft payment gateway.', 'event_espresso'); ?> </p> <p><strong><?php _e('Bank Draft Settings', 'event_espresso'); ?></strong></p> <ul> <li> <strong><?php _e('Page Title', 'event_espresso'); ?></strong><br /> <?php _e('Enter a title for this page.', 'event_espresso'); ?> </li> <li> <strong><?php _e('Payment Instructions', 'event_espresso'); ?></strong><br /> <?php _e('Provide clear instructions for how a payment should be made.', 'event_espresso'); ?> </li> <li> <strong><?php _e('Name on Bank Account', 'event_espresso'); ?></strong><br /> <?php _e('Enter the full name on the bank account.', 'event_espresso'); ?> </li> <li> <strong><?php _e('Bank Account #', 'event_espresso'); ?></strong><br /> <?php _e('Enter the bank account number.', 'event_espresso'); ?> </li> <li> <strong><?php _e('Bank Name', 'event_espresso'); ?></strong><br /> <?php _e('Enter the name of the bank.', 'event_espresso'); ?> </li> <li> <strong><?php _e('Bank Address', 'event_espresso'); ?></strong><br /> <?php _e('Enter the address for the bank.', 'event_espresso'); ?> </li> <li> <strong><?php _e('Button Image URL', 'event_espresso'); ?></strong><br /> <?php _e('Change the image that is used for this payment gateway.', 'event_espresso'); ?> </li> </ul>
apache-2.0
amihalik/incubator-rya
extras/periodic.notification/tests/src/test/java/org/apache/rya/periodic/notification/application/PeriodicNotificationApplicationIT.java
25839
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.rya.periodic.notification.application; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; import java.util.Set; import java.util.UUID; import javax.xml.datatype.DatatypeConfigurationException; import javax.xml.datatype.DatatypeFactory; import org.apache.accumulo.core.client.Connector; import org.apache.fluo.api.client.FluoClient; import org.apache.fluo.api.config.FluoConfiguration; import org.apache.fluo.core.client.FluoClientImpl; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.rya.api.resolver.RdfToRyaConversions; import org.apache.rya.indexing.accumulo.ConfigUtils; import org.apache.rya.indexing.pcj.fluo.api.CreatePeriodicQuery; import org.apache.rya.indexing.pcj.fluo.api.InsertTriples; import org.apache.rya.indexing.pcj.fluo.app.IncrementalUpdateConstants; import org.apache.rya.indexing.pcj.fluo.app.util.FluoClientFactory; import org.apache.rya.indexing.pcj.fluo.app.util.FluoQueryUtils; import org.apache.rya.indexing.pcj.storage.PeriodicQueryResultStorage; import org.apache.rya.indexing.pcj.storage.PrecomputedJoinStorage.CloseableIterator; import org.apache.rya.indexing.pcj.storage.accumulo.AccumuloPeriodicQueryResultStorage; import org.apache.rya.kafka.base.EmbeddedKafkaInstance; import org.apache.rya.kafka.base.EmbeddedKafkaSingleton; import org.apache.rya.kafka.base.KafkaTestInstanceRule; import org.apache.rya.pcj.fluo.test.base.RyaExportITBase; import org.apache.rya.periodic.notification.notification.CommandNotification; import org.apache.rya.periodic.notification.registration.KafkaNotificationRegistrationClient; import org.apache.rya.periodic.notification.serialization.BindingSetSerDe; import org.apache.rya.periodic.notification.serialization.CommandNotificationSerializer; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.openrdf.model.Statement; import org.openrdf.model.Value; import org.openrdf.model.ValueFactory; import org.openrdf.model.impl.LiteralImpl; import org.openrdf.model.impl.ValueFactoryImpl; import org.openrdf.model.vocabulary.XMLSchema; import org.openrdf.query.BindingSet; import org.openrdf.query.algebra.evaluation.QueryBindingSet; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; import com.google.common.collect.Sets; import static org.apache.rya.periodic.notification.application.PeriodicNotificationApplicationConfiguration.NOTIFICATION_TOPIC; import static org.apache.rya.periodic.notification.application.PeriodicNotificationApplicationConfiguration.KAFKA_BOOTSTRAP_SERVERS;; public class PeriodicNotificationApplicationIT extends RyaExportITBase { private PeriodicNotificationApplication app; private KafkaNotificationRegistrationClient registrar; private KafkaProducer<String, CommandNotification> producer; private Properties props; private Properties kafkaProps; private PeriodicNotificationApplicationConfiguration conf; private static EmbeddedKafkaInstance embeddedKafka = EmbeddedKafkaSingleton.getInstance(); private static String bootstrapServers; @Rule public KafkaTestInstanceRule rule = new KafkaTestInstanceRule(false); @BeforeClass public static void initClass() { bootstrapServers = embeddedKafka.createBootstrapServerConfig().getProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); } @Before public void init() throws Exception { String topic = rule.getKafkaTopicName(); rule.createTopic(topic); //get user specified props and update with the embedded kafka bootstrap servers and rule generated topic props = getProps(); props.setProperty(NOTIFICATION_TOPIC, topic); props.setProperty(KAFKA_BOOTSTRAP_SERVERS, bootstrapServers); conf = new PeriodicNotificationApplicationConfiguration(props); //create Kafka Producer kafkaProps = getKafkaProperties(conf); producer = new KafkaProducer<>(kafkaProps, new StringSerializer(), new CommandNotificationSerializer()); //extract kafka specific properties from application config app = PeriodicNotificationApplicationFactory.getPeriodicApplication(props); registrar = new KafkaNotificationRegistrationClient(conf.getNotificationTopic(), producer); } @Test public void periodicApplicationWithAggAndGroupByTest() throws Exception { String sparql = "prefix function: <http://org.apache.rya/function#> " // n + "prefix time: <http://www.w3.org/2006/time#> " // n + "select ?type (count(?obs) as ?total) where {" // n + "Filter(function:periodic(?time, 1, .25, time:minutes)) " // n + "?obs <uri:hasTime> ?time. " // n + "?obs <uri:hasObsType> ?type } group by ?type"; // n //make data int periodMult = 15; final ValueFactory vf = new ValueFactoryImpl(); final DatatypeFactory dtf = DatatypeFactory.newInstance(); //Sleep until current time aligns nicely with period to makell //results more predictable while(System.currentTimeMillis() % (periodMult*1000) > 500); ZonedDateTime time = ZonedDateTime.now(); ZonedDateTime zTime1 = time.minusSeconds(2*periodMult); String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT); ZonedDateTime zTime2 = zTime1.minusSeconds(periodMult); String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT); ZonedDateTime zTime3 = zTime2.minusSeconds(periodMult); String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT); final Collection<Statement> statements = Sets.newHashSet( vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time1))), vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasObsType"), vf.createLiteral("ship")), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time1))), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasObsType"), vf.createLiteral("airplane")), vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time2))), vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasObsType"), vf.createLiteral("ship")), vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time2))), vf.createStatement(vf.createURI("urn:obs_4"), vf.createURI("uri:hasObsType"), vf.createLiteral("airplane")), vf.createStatement(vf.createURI("urn:obs_5"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time3))), vf.createStatement(vf.createURI("urn:obs_5"), vf.createURI("uri:hasObsType"), vf.createLiteral("automobile"))); try (FluoClient fluo = FluoClientFactory.getFluoClient(conf.getFluoAppName(), Optional.of(conf.getFluoTableName()), conf)) { Connector connector = ConfigUtils.getConnector(conf); PeriodicQueryResultStorage storage = new AccumuloPeriodicQueryResultStorage(connector, conf.getTablePrefix()); CreatePeriodicQuery periodicQuery = new CreatePeriodicQuery(fluo, storage); String id = FluoQueryUtils.convertFluoQueryIdToPcjId(periodicQuery.createPeriodicQuery(sparql, registrar).getQueryId()); addData(statements); app.start(); Multimap<Long, BindingSet> actual = HashMultimap.create(); try (KafkaConsumer<String, BindingSet> consumer = new KafkaConsumer<>(kafkaProps, new StringDeserializer(), new BindingSetSerDe())) { consumer.subscribe(Arrays.asList(id)); long end = System.currentTimeMillis() + 4*periodMult*1000; long lastBinId = 0L; long binId = 0L; List<Long> ids = new ArrayList<>(); while (System.currentTimeMillis() < end) { ConsumerRecords<String, BindingSet> records = consumer.poll(periodMult*1000); for(ConsumerRecord<String, BindingSet> record: records){ BindingSet result = record.value(); binId = Long.parseLong(result.getBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID).getValue().stringValue()); if(lastBinId != binId) { lastBinId = binId; ids.add(binId); } actual.put(binId, result); } } Map<Long, Set<BindingSet>> expected = new HashMap<>(); Set<BindingSet> expected1 = new HashSet<>(); QueryBindingSet bs1 = new QueryBindingSet(); bs1.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(0))); bs1.addBinding("total", new LiteralImpl("2", XMLSchema.INTEGER)); bs1.addBinding("type", vf.createLiteral("airplane")); QueryBindingSet bs2 = new QueryBindingSet(); bs2.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(0))); bs2.addBinding("total", new LiteralImpl("2", XMLSchema.INTEGER)); bs2.addBinding("type", vf.createLiteral("ship")); QueryBindingSet bs3 = new QueryBindingSet(); bs3.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(0))); bs3.addBinding("total", new LiteralImpl("1", XMLSchema.INTEGER)); bs3.addBinding("type", vf.createLiteral("automobile")); expected1.add(bs1); expected1.add(bs2); expected1.add(bs3); Set<BindingSet> expected2 = new HashSet<>(); QueryBindingSet bs4 = new QueryBindingSet(); bs4.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(1))); bs4.addBinding("total", new LiteralImpl("2", XMLSchema.INTEGER)); bs4.addBinding("type", vf.createLiteral("airplane")); QueryBindingSet bs5 = new QueryBindingSet(); bs5.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(1))); bs5.addBinding("total", new LiteralImpl("2", XMLSchema.INTEGER)); bs5.addBinding("type", vf.createLiteral("ship")); expected2.add(bs4); expected2.add(bs5); Set<BindingSet> expected3 = new HashSet<>(); QueryBindingSet bs6 = new QueryBindingSet(); bs6.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(2))); bs6.addBinding("total", new LiteralImpl("1", XMLSchema.INTEGER)); bs6.addBinding("type", vf.createLiteral("ship")); QueryBindingSet bs7 = new QueryBindingSet(); bs7.addBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID, vf.createLiteral(ids.get(2))); bs7.addBinding("total", new LiteralImpl("1", XMLSchema.INTEGER)); bs7.addBinding("type", vf.createLiteral("airplane")); expected3.add(bs6); expected3.add(bs7); expected.put(ids.get(0), expected1); expected.put(ids.get(1), expected2); expected.put(ids.get(2), expected3); Assert.assertEquals(3, actual.asMap().size()); for(Long ident: ids) { Assert.assertEquals(expected.get(ident), actual.get(ident)); } } Set<BindingSet> expectedResults = new HashSet<>(); try (CloseableIterator<BindingSet> results = storage.listResults(id, Optional.empty())) { results.forEachRemaining(x -> expectedResults.add(x)); Assert.assertEquals(0, expectedResults.size()); } } } @Test public void periodicApplicationWithAggTest() throws Exception { String sparql = "prefix function: <http://org.apache.rya/function#> " // n + "prefix time: <http://www.w3.org/2006/time#> " // n + "select (count(?obs) as ?total) where {" // n + "Filter(function:periodic(?time, 1, .25, time:minutes)) " // n + "?obs <uri:hasTime> ?time. " // n + "?obs <uri:hasId> ?id } "; // n //make data int periodMult = 15; final ValueFactory vf = new ValueFactoryImpl(); final DatatypeFactory dtf = DatatypeFactory.newInstance(); //Sleep until current time aligns nicely with period to make //results more predictable while(System.currentTimeMillis() % (periodMult*1000) > 500); ZonedDateTime time = ZonedDateTime.now(); ZonedDateTime zTime1 = time.minusSeconds(2*periodMult); String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT); ZonedDateTime zTime2 = zTime1.minusSeconds(periodMult); String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT); ZonedDateTime zTime3 = zTime2.minusSeconds(periodMult); String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT); final Collection<Statement> statements = Sets.newHashSet( vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time1))), vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time2))), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")), vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time3))), vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasId"), vf.createLiteral("id_3"))); try (FluoClient fluo = FluoClientFactory.getFluoClient(conf.getFluoAppName(), Optional.of(conf.getFluoTableName()), conf)) { Connector connector = ConfigUtils.getConnector(conf); PeriodicQueryResultStorage storage = new AccumuloPeriodicQueryResultStorage(connector, conf.getTablePrefix()); CreatePeriodicQuery periodicQuery = new CreatePeriodicQuery(fluo, storage); String id = FluoQueryUtils.convertFluoQueryIdToPcjId(periodicQuery.createPeriodicQuery(sparql, registrar).getQueryId()); addData(statements); app.start(); Multimap<Long, BindingSet> expected = HashMultimap.create(); try (KafkaConsumer<String, BindingSet> consumer = new KafkaConsumer<>(kafkaProps, new StringDeserializer(), new BindingSetSerDe())) { consumer.subscribe(Arrays.asList(id)); long end = System.currentTimeMillis() + 4*periodMult*1000; long lastBinId = 0L; long binId = 0L; List<Long> ids = new ArrayList<>(); while (System.currentTimeMillis() < end) { ConsumerRecords<String, BindingSet> records = consumer.poll(periodMult*1000); for(ConsumerRecord<String, BindingSet> record: records){ BindingSet result = record.value(); binId = Long.parseLong(result.getBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID).getValue().stringValue()); if(lastBinId != binId) { lastBinId = binId; ids.add(binId); } expected.put(binId, result); } } Assert.assertEquals(3, expected.asMap().size()); int i = 0; for(Long ident: ids) { Assert.assertEquals(1, expected.get(ident).size()); BindingSet bs = expected.get(ident).iterator().next(); Value val = bs.getValue("total"); int total = Integer.parseInt(val.stringValue()); Assert.assertEquals(3-i, total); i++; } } Set<BindingSet> expectedResults = new HashSet<>(); try (CloseableIterator<BindingSet> results = storage.listResults(id, Optional.empty())) { results.forEachRemaining(x -> expectedResults.add(x)); Assert.assertEquals(0, expectedResults.size()); } } } @Test public void periodicApplicationTest() throws Exception { String sparql = "prefix function: <http://org.apache.rya/function#> " // n + "prefix time: <http://www.w3.org/2006/time#> " // n + "select ?obs ?id where {" // n + "Filter(function:periodic(?time, 1, .25, time:minutes)) " // n + "?obs <uri:hasTime> ?time. " // n + "?obs <uri:hasId> ?id } "; // n //make data int periodMult = 15; final ValueFactory vf = new ValueFactoryImpl(); final DatatypeFactory dtf = DatatypeFactory.newInstance(); //Sleep until current time aligns nicely with period to make //results more predictable while(System.currentTimeMillis() % (periodMult*1000) > 500); ZonedDateTime time = ZonedDateTime.now(); ZonedDateTime zTime1 = time.minusSeconds(2*periodMult); String time1 = zTime1.format(DateTimeFormatter.ISO_INSTANT); ZonedDateTime zTime2 = zTime1.minusSeconds(periodMult); String time2 = zTime2.format(DateTimeFormatter.ISO_INSTANT); ZonedDateTime zTime3 = zTime2.minusSeconds(periodMult); String time3 = zTime3.format(DateTimeFormatter.ISO_INSTANT); final Collection<Statement> statements = Sets.newHashSet( vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time1))), vf.createStatement(vf.createURI("urn:obs_1"), vf.createURI("uri:hasId"), vf.createLiteral("id_1")), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time2))), vf.createStatement(vf.createURI("urn:obs_2"), vf.createURI("uri:hasId"), vf.createLiteral("id_2")), vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasTime"), vf.createLiteral(dtf.newXMLGregorianCalendar(time3))), vf.createStatement(vf.createURI("urn:obs_3"), vf.createURI("uri:hasId"), vf.createLiteral("id_3"))); try (FluoClient fluo = FluoClientFactory.getFluoClient(conf.getFluoAppName(), Optional.of(conf.getFluoTableName()), conf)) { Connector connector = ConfigUtils.getConnector(conf); PeriodicQueryResultStorage storage = new AccumuloPeriodicQueryResultStorage(connector, conf.getTablePrefix()); CreatePeriodicQuery periodicQuery = new CreatePeriodicQuery(fluo, storage); String id = FluoQueryUtils.convertFluoQueryIdToPcjId(periodicQuery.createPeriodicQuery(sparql, registrar).getQueryId()); addData(statements); app.start(); Multimap<Long, BindingSet> expected = HashMultimap.create(); try (KafkaConsumer<String, BindingSet> consumer = new KafkaConsumer<>(kafkaProps, new StringDeserializer(), new BindingSetSerDe())) { consumer.subscribe(Arrays.asList(id)); long end = System.currentTimeMillis() + 4*periodMult*1000; long lastBinId = 0L; long binId = 0L; List<Long> ids = new ArrayList<>(); while (System.currentTimeMillis() < end) { ConsumerRecords<String, BindingSet> records = consumer.poll(periodMult*1000); for(ConsumerRecord<String, BindingSet> record: records){ BindingSet result = record.value(); binId = Long.parseLong(result.getBinding(IncrementalUpdateConstants.PERIODIC_BIN_ID).getValue().stringValue()); if(lastBinId != binId) { lastBinId = binId; ids.add(binId); } expected.put(binId, result); } } Assert.assertEquals(3, expected.asMap().size()); int i = 0; for(Long ident: ids) { Assert.assertEquals(3-i, expected.get(ident).size()); i++; } } Set<BindingSet> expectedResults = new HashSet<>(); try (CloseableIterator<BindingSet> results = storage.listResults(id, Optional.empty())) { results.forEachRemaining(x -> expectedResults.add(x)); Assert.assertEquals(0, expectedResults.size()); } } } @After public void shutdown() { registrar.close(); app.stop(); } private void addData(Collection<Statement> statements) throws DatatypeConfigurationException { // add statements to Fluo try (FluoClient fluo = new FluoClientImpl(getFluoConfiguration())) { InsertTriples inserter = new InsertTriples(); statements.forEach(x -> inserter.insert(fluo, RdfToRyaConversions.convertStatement(x))); getMiniFluo().waitForObservers(); } } private static Properties getKafkaProperties(PeriodicNotificationApplicationConfiguration conf) { Properties kafkaProps = new Properties(); kafkaProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); kafkaProps.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, UUID.randomUUID().toString()); kafkaProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, conf.getNotificationGroupId()); kafkaProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); return kafkaProps; } private Properties getProps() throws IOException { Properties props = new Properties(); try(InputStream in = new FileInputStream("src/test/resources/notification.properties")) { props.load(in); } FluoConfiguration fluoConf = getFluoConfiguration(); props.setProperty("accumulo.user", getUsername()); props.setProperty("accumulo.password", getPassword()); props.setProperty("accumulo.instance", getMiniAccumuloCluster().getInstanceName()); props.setProperty("accumulo.zookeepers", getMiniAccumuloCluster().getZooKeepers()); props.setProperty("accumulo.rya.prefix", getRyaInstanceName()); props.setProperty(PeriodicNotificationApplicationConfiguration.FLUO_APP_NAME, fluoConf.getApplicationName()); props.setProperty(PeriodicNotificationApplicationConfiguration.FLUO_TABLE_NAME, fluoConf.getAccumuloTable()); return props; } }
apache-2.0
atom/crashpad
handler/mac/crash_report_exception_handler.cc
10251
// Copyright 2015 The Crashpad Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "handler/mac/crash_report_exception_handler.h" #include <vector> #include "base/logging.h" #include "base/mac/mach_logging.h" #include "base/mac/scoped_mach_port.h" #include "base/strings/stringprintf.h" #include "client/settings.h" #include "handler/mac/file_limit_annotation.h" #include "minidump/minidump_file_writer.h" #include "minidump/minidump_user_extension_stream_data_source.h" #include "snapshot/crashpad_info_client_options.h" #include "snapshot/mac/process_snapshot_mac.h" #include "util/file/file_writer.h" #include "util/mach/exc_client_variants.h" #include "util/mach/exception_behaviors.h" #include "util/mach/exception_types.h" #include "util/mach/mach_extensions.h" #include "util/mach/mach_message.h" #include "util/mach/scoped_task_suspend.h" #include "util/mach/symbolic_constants_mach.h" #include "util/misc/metrics.h" #include "util/misc/tri_state.h" #include "util/misc/uuid.h" namespace crashpad { CrashReportExceptionHandler::CrashReportExceptionHandler( CrashReportDatabase* database, CrashReportUploadThread* upload_thread, const std::map<std::string, std::string>* process_annotations, const UserStreamDataSources* user_stream_data_sources) : database_(database), upload_thread_(upload_thread), process_annotations_(process_annotations), user_stream_data_sources_(user_stream_data_sources) {} CrashReportExceptionHandler::~CrashReportExceptionHandler() { } kern_return_t CrashReportExceptionHandler::CatchMachException( exception_behavior_t behavior, exception_handler_t exception_port, thread_t thread, task_t task, exception_type_t exception, const mach_exception_data_type_t* code, mach_msg_type_number_t code_count, thread_state_flavor_t* flavor, ConstThreadState old_state, mach_msg_type_number_t old_state_count, thread_state_t new_state, mach_msg_type_number_t* new_state_count, const mach_msg_trailer_t* trailer, bool* destroy_complex_request) { RecordFileLimitAnnotation(); Metrics::ExceptionEncountered(); Metrics::ExceptionCode(ExceptionCodeForMetrics(exception, code[0])); *destroy_complex_request = true; // The expected behavior is EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, // but it’s possible to deal with any exception behavior as long as it // carries identity information (valid thread and task ports). if (!ExceptionBehaviorHasIdentity(behavior)) { LOG(ERROR) << base::StringPrintf( "unexpected exception behavior %s, rejecting", ExceptionBehaviorToString( behavior, kUseFullName | kUnknownIsNumeric | kUseOr).c_str()); Metrics::ExceptionCaptureResult( Metrics::CaptureResult::kUnexpectedExceptionBehavior); return KERN_FAILURE; } else if (behavior != (EXCEPTION_STATE_IDENTITY | kMachExceptionCodes)) { LOG(WARNING) << base::StringPrintf( "unexpected exception behavior %s, proceeding", ExceptionBehaviorToString( behavior, kUseFullName | kUnknownIsNumeric | kUseOr).c_str()); } if (task == mach_task_self()) { LOG(ERROR) << "cannot suspend myself"; Metrics::ExceptionCaptureResult( Metrics::CaptureResult::kFailedDueToSuspendSelf); return KERN_FAILURE; } ScopedTaskSuspend suspend(task); ProcessSnapshotMac process_snapshot; if (!process_snapshot.Initialize(task)) { Metrics::ExceptionCaptureResult(Metrics::CaptureResult::kSnapshotFailed); return KERN_FAILURE; } // Check for suspicious message sources. A suspicious exception message comes // from a source other than the kernel or the process that the exception // purportedly occurred in. // // TODO(mark): Consider exceptions outside of the range (0, 32) from the // kernel to be suspicious, and exceptions other than kMachExceptionSimulated // from the process itself to be suspicious. const pid_t pid = process_snapshot.ProcessID(); pid_t audit_pid = AuditPIDFromMachMessageTrailer(trailer); if (audit_pid != -1 && audit_pid != 0) { if (audit_pid != pid) { LOG(WARNING) << "exception for pid " << pid << " sent by pid " << audit_pid; } } CrashpadInfoClientOptions client_options; process_snapshot.GetCrashpadOptions(&client_options); if (client_options.crashpad_handler_behavior != TriState::kDisabled && !IsExceptionNonfatalResource(exception, code[0], pid)) { // Non-fatal resource exceptions are never user-visible and are not // currently of interest to Crashpad. if (!process_snapshot.InitializeException(behavior, thread, exception, code, code_count, *flavor, old_state, old_state_count)) { Metrics::ExceptionCaptureResult( Metrics::CaptureResult::kExceptionInitializationFailed); return KERN_FAILURE; } UUID client_id; Settings* const settings = database_->GetSettings(); if (settings) { // If GetSettings() or GetClientID() fails, something else will log a // message and client_id will be left at its default value, all zeroes, // which is appropriate. settings->GetClientID(&client_id); } process_snapshot.SetClientID(client_id); process_snapshot.SetAnnotationsSimpleMap(*process_annotations_); CrashReportDatabase::NewReport* new_report; CrashReportDatabase::OperationStatus database_status = database_->PrepareNewCrashReport(&new_report); if (database_status != CrashReportDatabase::kNoError) { Metrics::ExceptionCaptureResult( Metrics::CaptureResult::kPrepareNewCrashReportFailed); return KERN_FAILURE; } process_snapshot.SetReportID(new_report->uuid); CrashReportDatabase::CallErrorWritingCrashReport call_error_writing_crash_report(database_, new_report); WeakFileHandleFileWriter file_writer(new_report->handle); MinidumpFileWriter minidump; minidump.InitializeFromSnapshot(&process_snapshot); AddUserExtensionStreams( user_stream_data_sources_, &process_snapshot, &minidump); if (!minidump.WriteEverything(&file_writer)) { Metrics::ExceptionCaptureResult( Metrics::CaptureResult::kMinidumpWriteFailed); return KERN_FAILURE; } call_error_writing_crash_report.Disarm(); UUID uuid; database_status = database_->FinishedWritingCrashReport(new_report, &uuid); if (database_status != CrashReportDatabase::kNoError) { Metrics::ExceptionCaptureResult( Metrics::CaptureResult::kFinishedWritingCrashReportFailed); return KERN_FAILURE; } upload_thread_->ReportPending(uuid); } if (client_options.system_crash_reporter_forwarding != TriState::kDisabled && (exception == EXC_CRASH || exception == EXC_RESOURCE || exception == EXC_GUARD)) { // Don’t forward simulated exceptions such as kMachExceptionSimulated to the // system crash reporter. Only forward the types of exceptions that it would // receive under normal conditions. Although the system crash reporter is // able to deal with other exceptions including simulated ones, forwarding // them to the system crash reporter could present the system’s crash UI for // processes that haven’t actually crashed, and could result in reports not // actually associated with crashes being sent to the operating system // vendor. base::mac::ScopedMachSendRight system_crash_reporter_handler(SystemCrashReporterHandler()); if (system_crash_reporter_handler.get()) { // Make copies of mutable out parameters so that the system crash reporter // can’t influence the state returned by this method. thread_state_flavor_t flavor_forward = *flavor; mach_msg_type_number_t new_state_forward_count = *new_state_count; std::vector<natural_t> new_state_forward( new_state, new_state + new_state_forward_count); // The system crash reporter requires the behavior to be // EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES. It uses the identity // parameters but doesn’t appear to use the state parameters, including // |flavor|, and doesn’t care if they are 0 or invalid. As long as an // identity is available (checked above), any other exception behavior is // converted to what the system crash reporter wants, with the caveat that // problems may arise if the state wasn’t available and the system crash // reporter changes in the future to use it. However, normally, the state // will be available. kern_return_t kr = UniversalExceptionRaise( EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, system_crash_reporter_handler.get(), thread, task, exception, code, code_count, &flavor_forward, old_state, old_state_count, new_state_forward_count ? &new_state_forward[0] : nullptr, &new_state_forward_count); MACH_LOG_IF(WARNING, kr != KERN_SUCCESS, kr) << "UniversalExceptionRaise"; } } ExcServerCopyState( behavior, old_state, old_state_count, new_state, new_state_count); Metrics::ExceptionCaptureResult(Metrics::CaptureResult::kSuccess); return ExcServerSuccessfulReturnValue(exception, behavior, false); } } // namespace crashpad
apache-2.0
robertnishihara/ray
rllib/offline/input_reader.py
4266
import logging import numpy as np import threading from ray.rllib.policy.sample_batch import MultiAgentBatch from ray.rllib.utils.annotations import PublicAPI from ray.rllib.utils.framework import try_import_tf from typing import Dict, List from ray.rllib.utils.typing import TensorType, SampleBatchType tf1, tf, tfv = try_import_tf() logger = logging.getLogger(__name__) @PublicAPI class InputReader: """Input object for loading experiences in policy evaluation.""" @PublicAPI def next(self): """Return the next batch of experiences read. Returns: SampleBatch or MultiAgentBatch read. """ raise NotImplementedError @PublicAPI def tf_input_ops(self, queue_size: int = 1) -> Dict[str, TensorType]: """Returns TensorFlow queue ops for reading inputs from this reader. The main use of these ops is for integration into custom model losses. For example, you can use tf_input_ops() to read from files of external experiences to add an imitation learning loss to your model. This method creates a queue runner thread that will call next() on this reader repeatedly to feed the TensorFlow queue. Args: queue_size (int): Max elements to allow in the TF queue. Example: >>> class MyModel(rllib.model.Model): ... def custom_loss(self, policy_loss, loss_inputs): ... reader = JsonReader(...) ... input_ops = reader.tf_input_ops() ... logits, _ = self._build_layers_v2( ... {"obs": input_ops["obs"]}, ... self.num_outputs, self.options) ... il_loss = imitation_loss(logits, input_ops["action"]) ... return policy_loss + il_loss You can find a runnable version of this in examples/custom_loss.py. Returns: dict of Tensors, one for each column of the read SampleBatch. """ if hasattr(self, "_queue_runner"): raise ValueError( "A queue runner already exists for this input reader. " "You can only call tf_input_ops() once per reader.") logger.info("Reading initial batch of data from input reader.") batch = self.next() if isinstance(batch, MultiAgentBatch): raise NotImplementedError( "tf_input_ops() is not implemented for multi agent batches") keys = [ k for k in sorted(batch.keys()) if np.issubdtype(batch[k].dtype, np.number) ] dtypes = [batch[k].dtype for k in keys] shapes = { k: (-1, ) + s[1:] for (k, s) in [(k, batch[k].shape) for k in keys] } queue = tf1.FIFOQueue(capacity=queue_size, dtypes=dtypes, names=keys) tensors = queue.dequeue() logger.info("Creating TF queue runner for {}".format(self)) self._queue_runner = _QueueRunner(self, queue, keys, dtypes) self._queue_runner.enqueue(batch) self._queue_runner.start() out = {k: tf.reshape(t, shapes[k]) for k, t in tensors.items()} return out class _QueueRunner(threading.Thread): """Thread that feeds a TF queue from a InputReader.""" def __init__(self, input_reader: InputReader, queue: "tf1.FIFOQueue", keys: List[str], dtypes: "tf.dtypes.DType"): threading.Thread.__init__(self) self.sess = tf1.get_default_session() self.daemon = True self.input_reader = input_reader self.keys = keys self.queue = queue self.placeholders = [tf1.placeholder(dtype) for dtype in dtypes] self.enqueue_op = queue.enqueue(dict(zip(keys, self.placeholders))) def enqueue(self, batch: SampleBatchType): data = { self.placeholders[i]: batch[key] for i, key in enumerate(self.keys) } self.sess.run(self.enqueue_op, feed_dict=data) def run(self): while True: try: batch = self.input_reader.next() self.enqueue(batch) except Exception: logger.exception("Error reading from input")
apache-2.0
adelton/origin
test/integration/gc_default_test.go
4169
package integration import ( "testing" "time" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" kapi "k8s.io/kubernetes/pkg/api" buildapi "github.com/openshift/origin/pkg/build/api" buildclient "github.com/openshift/origin/pkg/build/generated/internalclientset" testutil "github.com/openshift/origin/test/util" testserver "github.com/openshift/origin/test/util/server" ) func TestGCDefaults(t *testing.T) { testutil.RequireEtcd(t) defer testutil.DumpEtcdOnFailure(t) _, clusterAdminKubeConfig, err := testserver.StartTestMaster() if err != nil { t.Fatal(err) } clusterAdminConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig) if err != nil { t.Fatal(err) } originClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig) if err != nil { t.Fatal(err) } kubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { t.Fatal(err) } newBuildClient, err := buildclient.NewForConfig(clusterAdminConfig) if err != nil { t.Fatal(err) } ns := "some-ns-old" if _, err := testserver.CreateNewProject(originClient, *clusterAdminConfig, ns, "adminUser"); err != nil { t.Fatal(err) } buildConfig := &buildapi.BuildConfig{} buildConfig.Name = "bc" buildConfig.Spec.RunPolicy = buildapi.BuildRunPolicyParallel buildConfig.GenerateName = "buildconfig-" buildConfig.Spec.Strategy = strategyForType(t, "source") buildConfig.Spec.Source.Git = &buildapi.GitBuildSource{URI: "example.org"} firstBuildConfig, err := newBuildClient.Build().BuildConfigs(ns).Create(buildConfig) if err != nil { t.Fatal(err) } childConfigMap := &kapi.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Namespace: ns, Name: "child"}, } childConfigMap.OwnerReferences = append(childConfigMap.OwnerReferences, metav1.OwnerReference{ APIVersion: "build.openshift.io/v1", Kind: "BuildConfig", Name: firstBuildConfig.Name, UID: firstBuildConfig.UID, }) if _, err := kubeClient.Core().ConfigMaps(ns).Create(childConfigMap); err != nil { t.Fatal(err) } // we need to make sure that the GC graph has observed the creation of the configmap *before* it observes the delete of // the buildconfig or the orphaning step won't find anything to orphan, then the delete will complete, the configmap // creation will be observed, there will be no parent, and the configmap will be deleted. // There is no API to determine if the configmap was observed. time.Sleep(1 * time.Second) // this looks weird, but we want no new dependencies on the old client if err := newBuildClient.Build().RESTClient().Delete().AbsPath("/oapi/v1/namespaces/" + ns + "/buildconfigs/" + buildConfig.Name).Do().Error(); err != nil { t.Fatal(err) } // the /oapi endpoints should orphan by default // wait for a bit and make sure that the build is still there time.Sleep(2 * time.Second) childConfigMap, err = kubeClient.Core().ConfigMaps(ns).Get(childConfigMap.Name, metav1.GetOptions{}) if err != nil { t.Error(err) } if bc, err := newBuildClient.BuildConfigs(ns).Get(buildConfig.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) { t.Fatalf("%v and %#v", err, bc) } secondBuildConfig, err := newBuildClient.BuildConfigs(ns).Create(buildConfig) if err != nil { t.Fatal(err) } childConfigMap.OwnerReferences = append(childConfigMap.OwnerReferences, metav1.OwnerReference{ APIVersion: "build.openshift.io/v1", Kind: "BuildConfig", Name: secondBuildConfig.Name, UID: secondBuildConfig.UID, }) if _, err := kubeClient.Core().ConfigMaps(ns).Update(childConfigMap); err != nil { t.Fatal(err) } if err := newBuildClient.Build().BuildConfigs(ns).Delete(secondBuildConfig.Name, nil); err != nil { t.Fatal(err) } err = wait.PollImmediate(30*time.Millisecond, 10*time.Second, func() (bool, error) { _, err := kubeClient.Core().ConfigMaps(ns).Get(childConfigMap.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { return true, nil } if err != nil { return false, err } return false, nil }) if err != nil { t.Fatal(err) } }
apache-2.0
racker/omnibus
source/db-5.0.26.NC/java/src/com/sleepycat/util/ExceptionWrapper.java
1044
/*- * See the file LICENSE for redistribution information. * * Copyright (c) 2000, 2010 Oracle and/or its affiliates. All rights reserved. * * $Id$ */ package com.sleepycat.util; /** * Interface implemented by exceptions that can contain nested exceptions. * * @author Mark Hayes */ public interface ExceptionWrapper { /** * Returns the nested exception or null if none is present. * * @return the nested exception or null if none is present. * * @deprecated replaced by {@link #getCause}. */ Throwable getDetail(); /** * Returns the nested exception or null if none is present. * * <p>This method is intentionally defined to be the same signature as the * <code>java.lang.Throwable.getCause</code> method in Java 1.4 and * greater. By defining this method to return a nested exception, the Java * 1.4 runtime will print the nested stack trace.</p> * * @return the nested exception or null if none is present. */ Throwable getCause(); }
apache-2.0
rtkasodariya/interview
python/string/rabinkarp.py
1598
#Rabin Karp algorithm # Java code https://github.com/mission-peace/interview/blob/master/src/com/interview/string/RabinKarpSearch.java prime = 101 def pattern_matching(text, pattern): m = len(pattern) n = len(text) pattern_hash = create_hash(pattern, m - 1) text_hash = create_hash(text, m - 1) for i in range(1, n - m + 2): if pattern_hash == text_hash: if check_equal(text[i-1:i+m-1], pattern[0:]) is True: return i - 1; if i < n - m + 1: text_hash = recalculate_hash(text, i-1, i+m-1, text_hash, m) return -1; def check_equal(str1, str2): if len(str1) != len(str2): return False; i = 0 j = 0 for i, j in zip(str1, str2): if i != j: return False; return True def create_hash(input, end): hash = 0 for i in range(end + 1): hash = hash + ord(input[i])*pow(prime, i) return hash def recalculate_hash(input, old_index, new_index, old_hash, pattern_len): new_hash = old_hash - ord(input[old_index]) new_hash = new_hash/prime new_hash += ord(input[new_index])*pow(prime, pattern_len - 1) return new_hash; index = pattern_matching("TusharRoy", "sharRoy") print("Index ", index) index = pattern_matching("TusharRoy", "Roy") print("Index ", index) index = pattern_matching("TusharRoy", "shar") print("Index ", index) index = pattern_matching("TusharRoy", "usha") print("Index ", index) index = pattern_matching("TusharRoy", "Tus") print("Index ", index) index = pattern_matching("TusharRoy", "Roa") print("Index ", index)
apache-2.0
Manzalab/kalulu
app/minigames/caterpillar/assets/data/params.js
8118
define ([], function () { /** * This configuration object is used to parameter the minigames using the Detection Signal Theory. * It contains both global and local remediation settings. **/ return { baseConfig : { // the "baseConfig" settings are a base to all difficulty levels, overriden by the level specific values. generalParameters : { // those will never change after the initial tuning secondsOfPauseAfterCorrectResponse : 1, secondsOfPauseAfterIncorrectResponse : 1.5, missedCorrectStimuliCountTriggeringPermanentHelp : 3, incorrectResponseCountTriggeringFirstRemediation : 1, incorrectResponseCountTriggeringSecondRemediation : 2, lives : 3, // at the time when lives reach 0 the game ends, i.e. at the third incorrect response for 3 lives capitalLettersShare : 0.4, caterpillarSpeed : 30, popupTimeOnScreen : 3 }, globalRemediation : { // the global remediation settings are invariable inside a game but can evolve depending on globalLevel from one game to the next gameType : "composition", // "identification", "composition", "pairing", or "other" gameTimer : Infinity, // should the game end after a certain amount of seconds ? gameTimerVisible : false, // is this Timer visible ? roundTimer : Infinity, // should each round end after a certain amount of seconds ? roundTimerVisible : false, // is this Timer visible ? roundsCount : 5, // the amount of rounds, (Rafiki will provide one target per round) currentLessonShareInTargets : 0.8, // the amount of targets taken in the current lesson (vs. revision targets from previous lessons) roundTargetClass : "Word", // the class of a round Target stepTargetClass : "GP", // the class of a step Target (if same than round --> identification, if component -> composition) stepsToPlay : "All", // should all the steps be played or are some given ? e.g. [1, 0, 0, 1] stepDistracterCount : 0, // the amount of distracter stimuli to be provided by Rafiki for each round. lineCount : 0, // the number of columns on which jellyfishes will spawn berriesOnScreen : 0, totalTriesCount : 0 }, localRemediation : { // the local remediation settings are used to adapt the difficulty inside a game. The game divide the min-max range in 5 stages and starts at the middle one. minimumCorrectStimuliOnScreen : { min: 0, max: 0 }, // if this is not reached, the next Jellyfish spawned will have a correct stimuli correctResponsePercentage : { min: 0, max: 0 }, // if this is not reached, the next Jellyfish spawned will have a correct stimuli berryPerLine : { min: 0, max: 0 }, // if this is reached, the next Jellyfish spawned will have an incorrect stimuli respawnTime : {min : 0, max : 0}, // nb of seconds between jellyfishes spawns speed : { min: 0, max: 0 }, // speed of the jellyfishes. } }, levels: [ // the settings for difficulty levels 1 to 5 { // LEVEL 1 globalRemediation : { stepDistracterCount : 3, // roundsCount : 3, // lineCount : 3, berriesOnScreen : 2, totalTriesCount : 3 }, localRemediation : { minimumCorrectStimuliOnScreen : {min : 2, max : 1}, // correctResponsePercentage : { min: 0.90, max: 0.75 }, // berryPerLine : { min: 0, max: 1 }, respawnTime : {min : 3, max : 2.5}, // speed : {min : 5, max : 5.5}, // } }, { // LEVEL 2 globalRemediation: { stepDistracterCount : 3, // roundsCount : 3, // lineCount : 3, berriesOnScreen : 2, totalTriesCount : 3 }, localRemediation: { minimumCorrectStimuliOnScreen : {min : 2, max : 1}, // correctResponsePercentage : { min: 0.75, max: 0.60 }, // berryPerLine : { min: 0, max: 1 }, respawnTime : {min : 3, max : 2.5}, // speed : {min : 5.5, max : 6}, // } }, { // LEVEL 3 globalRemediation: { stepDistracterCount : 3, // roundsCount : 4, // lineCount : 4, berriesOnScreen : 3, totalTriesCount : 3 }, localRemediation: { minimumCorrectStimuliOnScreen : {min : 2, max : 1}, // correctResponsePercentage : { min: 0.60, max: 0.45 }, // berryPerLine : { min: 0, max: 2 }, respawnTime : {min : 2.5, max : 2}, // speed : {min : 6, max : 7}, // } }, { // LEVEL 4 globalRemediation: { stepDistracterCount : 3, // roundsCount : 5, // lineCount : 4, berriesOnScreen : 4, totalTriesCount : 3 }, localRemediation: { minimumCorrectStimuliOnScreen : {min : 1, max : 1}, // correctResponsePercentage : { min: 0.45, max: 0.30 }, // berryPerLine : { min: 0, max: 2 }, respawnTime : {min : 2, max : 1.5}, // speed : {min : 7.5, max : 8}, // } }, { // LEVEL 5 globalRemediation: { stepDistracterCount : 3, // roundsCount : 5, // lineCount : 5, berriesOnScreen : 5, totalTriesCount : 3 }, localRemediation: { minimumCorrectStimuliOnScreen : {min : 1, max : 1}, // correctResponsePercentage : { min: 0.30, max: 0.10 }, // berryPerLine : { min: 0, max: 3 }, respawnTime : {min : 1.5, max : 1}, // speed : {min : 8, max : 8.5}, // } } ] }; });
apache-2.0
shanti/olio
webapp/rails/trunk/vendor/plugins/rspec-rails/spec/rails_suite.rb
942
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # dir = File.dirname(__FILE__) Dir["#{dir}/**/*_example.rb"].each do |file| require file end Dir["#{dir}/**/*_spec.rb"].each do |file| require file end
apache-2.0
tim777z/ambrose
common/src/test/java/com/twitter/ambrose/service/impl/InMemoryStatsServiceTest.java
3708
/* Copyright 2012 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.ambrose.service.impl; import com.twitter.ambrose.model.DAGNode; import com.twitter.ambrose.model.Event; import com.twitter.ambrose.model.Job; import org.junit.Before; import org.junit.Test; import java.io.IOException; import java.util.Collection; import java.util.Iterator; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; /** * @author billg */ public class InMemoryStatsServiceTest { private InMemoryStatsService service; private final String workflowId = "id1"; private final Event[] testEvents = new Event[] { new Event.JobStartedEvent(new DAGNode<Job>("some name", null)), new Event.JobProgressEvent(new DAGNode<Job>("50", null)), new Event.JobFinishedEvent(new DAGNode<Job>("done", null)), new Event.JobProgressEvent(new DAGNode<Job>("75", null)), new Event.JobProgressEvent(new DAGNode<Job>("100", null)), }; @Before public void setup() { service = new InMemoryStatsService(); } @Test public void testGetAllEvents() throws IOException { for(Event event : testEvents) { service.pushEvent(workflowId, event); } Collection<Event> events = service.getEventsSinceId(workflowId, -1); Iterator<Event> foundEvents = events.iterator(); assertTrue("No events returned", foundEvents.hasNext()); for(Event sentEvent : testEvents) { assertEqualWorkflows(sentEvent, foundEvents.next()); } assertFalse("Wrong number of events returned", foundEvents.hasNext()); } @Test public void testGetEventsSince() throws IOException { for(Event event : testEvents) { service.pushEvent(workflowId, event); } // first, peek at the first eventId Collection<Event> allEvents = service.getEventsSinceId(workflowId, -1); int sinceId = allEvents.iterator().next().getId(); // get all events since the first Collection<Event> events = service.getEventsSinceId(workflowId, sinceId); Iterator<Event> foundEvents = events.iterator(); assertEquals("Wrong number of events returned", testEvents.length - 1, events.size()); for(Event sentEvent : testEvents) { if (sentEvent.getId() <= sinceId) { continue; } assertEqualWorkflows(sentEvent, foundEvents.next()); } assertFalse("Wrong number of events returned", foundEvents.hasNext()); } @Test public void testGetEventsMax() throws IOException { for(Event event : testEvents) { service.pushEvent(workflowId, event); } int sinceId = -1; Event foundEvent; for(Event event : testEvents) { Iterator<Event> foundEvents = service.getEventsSinceId(workflowId, sinceId, 1).iterator(); foundEvent = foundEvents.next(); assertEqualWorkflows(event, foundEvent); assertFalse("Wrong number of events returned", foundEvents.hasNext()); sinceId = foundEvent.getId(); } } private void assertEqualWorkflows(Event expected, Event found) { assertEquals("Wrong eventId found", expected.getId(), found.getId()); assertEquals("Wrong eventData found", expected.getPayload(), found.getPayload()); } }
apache-2.0
prabushi/devstudio-tooling-esb
plugins/org.wso2.developerstudio.eclipse.gmf.esb.edit/src/org/wso2/developerstudio/eclipse/gmf/esb/provider/APIHandlerPropertyItemProvider.java
6302
/** * Copyright 2009-2012 WSO2, Inc. (http://wso2.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.developerstudio.eclipse.gmf.esb.provider; import java.util.Collection; import java.util.List; import org.eclipse.emf.common.notify.AdapterFactory; import org.eclipse.emf.common.notify.Notification; import org.eclipse.emf.edit.provider.ComposeableAdapterFactory; import org.eclipse.emf.edit.provider.IEditingDomainItemProvider; import org.eclipse.emf.edit.provider.IItemLabelProvider; import org.eclipse.emf.edit.provider.IItemPropertyDescriptor; import org.eclipse.emf.edit.provider.IItemPropertySource; import org.eclipse.emf.edit.provider.IStructuredItemContentProvider; import org.eclipse.emf.edit.provider.ITreeItemContentProvider; import org.eclipse.emf.edit.provider.ItemPropertyDescriptor; import org.eclipse.emf.edit.provider.ViewerNotification; import org.wso2.developerstudio.eclipse.gmf.esb.APIHandlerProperty; import org.wso2.developerstudio.eclipse.gmf.esb.EsbPackage; /** * This is the item provider adapter for a {@link org.wso2.developerstudio.eclipse.gmf.esb.APIHandlerProperty} object. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public class APIHandlerPropertyItemProvider extends EsbNodeItemProvider { /** * This constructs an instance from a factory and a notifier. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ public APIHandlerPropertyItemProvider(AdapterFactory adapterFactory) { super(adapterFactory); } /** * This returns the property descriptors for the adapted class. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public List<IItemPropertyDescriptor> getPropertyDescriptors(Object object) { if (itemPropertyDescriptors == null) { super.getPropertyDescriptors(object); addNamePropertyDescriptor(object); addValuePropertyDescriptor(object); } return itemPropertyDescriptors; } /** * This adds a property descriptor for the Name feature. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ protected void addNamePropertyDescriptor(Object object) { itemPropertyDescriptors.add (createItemPropertyDescriptor (((ComposeableAdapterFactory)adapterFactory).getRootAdapterFactory(), getResourceLocator(), getString("_UI_APIHandlerProperty_name_feature"), getString("_UI_PropertyDescriptor_description", "_UI_APIHandlerProperty_name_feature", "_UI_APIHandlerProperty_type"), EsbPackage.Literals.API_HANDLER_PROPERTY__NAME, true, false, false, ItemPropertyDescriptor.GENERIC_VALUE_IMAGE, null, null)); } /** * This adds a property descriptor for the Value feature. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ protected void addValuePropertyDescriptor(Object object) { itemPropertyDescriptors.add (createItemPropertyDescriptor (((ComposeableAdapterFactory)adapterFactory).getRootAdapterFactory(), getResourceLocator(), getString("_UI_APIHandlerProperty_value_feature"), getString("_UI_PropertyDescriptor_description", "_UI_APIHandlerProperty_value_feature", "_UI_APIHandlerProperty_type"), EsbPackage.Literals.API_HANDLER_PROPERTY__VALUE, true, false, false, ItemPropertyDescriptor.GENERIC_VALUE_IMAGE, null, null)); } /** * This returns APIHandlerProperty.gif. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public Object getImage(Object object) { return overlayImage(object, getResourceLocator().getImage("full/obj16/APIHandlerProperty")); } /** * This returns the label text for the adapted class. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public String getText(Object object) { String label = ((APIHandlerProperty)object).getName(); return label == null || label.length() == 0 ? getString("_UI_APIHandlerProperty_type") : getString("_UI_APIHandlerProperty_type") + " " + label; } /** * This handles model notifications by calling {@link #updateChildren} to update any cached * children and by creating a viewer notification, which it passes to {@link #fireNotifyChanged}. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override public void notifyChanged(Notification notification) { updateChildren(notification); switch (notification.getFeatureID(APIHandlerProperty.class)) { case EsbPackage.API_HANDLER_PROPERTY__NAME: case EsbPackage.API_HANDLER_PROPERTY__VALUE: fireNotifyChanged(new ViewerNotification(notification, notification.getNotifier(), false, true)); return; } super.notifyChanged(notification); } /** * This adds {@link org.eclipse.emf.edit.command.CommandParameter}s describing the children * that can be created under this object. * <!-- begin-user-doc --> * <!-- end-user-doc --> * @generated */ @Override protected void collectNewChildDescriptors(Collection<Object> newChildDescriptors, Object object) { super.collectNewChildDescriptors(newChildDescriptors, object); } }
apache-2.0
QBNemo/spring-mvc-showcase
src/main/java/org/springframework/web/servlet/view/tiles3/SimpleSpringPreparerFactory.java
2654
/* * Copyright 2002-2012 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.web.servlet.view.tiles3; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import org.apache.tiles.TilesException; import org.apache.tiles.preparer.PreparerException; import org.apache.tiles.preparer.ViewPreparer; import org.apache.tiles.preparer.factory.NoSuchPreparerException; import org.springframework.web.context.WebApplicationContext; /** * Tiles3 {@link org.apache.tiles.preparer.PreparerFactory} implementation * that expects preparer class names and builds preparer instances for those, * creating them through the Spring ApplicationContext in order to apply * Spring container callbacks and configured Spring BeanPostProcessors. * * @author Juergen Hoeller * @since 3.2 * @see SpringBeanPreparerFactory */ public class SimpleSpringPreparerFactory extends AbstractSpringPreparerFactory { /** Cache of shared ViewPreparer instances: bean name -> bean instance */ private final Map<String, ViewPreparer> sharedPreparers = new ConcurrentHashMap<String, ViewPreparer>(16); @Override protected ViewPreparer getPreparer(String name, WebApplicationContext context) throws TilesException { // Quick check on the concurrent map first, with minimal locking. ViewPreparer preparer = this.sharedPreparers.get(name); if (preparer == null) { synchronized (this.sharedPreparers) { preparer = this.sharedPreparers.get(name); if (preparer == null) { try { Class<?> beanClass = context.getClassLoader().loadClass(name); if (!ViewPreparer.class.isAssignableFrom(beanClass)) { throw new PreparerException( "Invalid preparer class [" + name + "]: does not implement ViewPreparer interface"); } preparer = (ViewPreparer) context.getAutowireCapableBeanFactory().createBean(beanClass); this.sharedPreparers.put(name, preparer); } catch (ClassNotFoundException ex) { throw new NoSuchPreparerException("Preparer class [" + name + "] not found", ex); } } } } return preparer; } }
apache-2.0
pkilambi/ceilometer
tools/make_test_data.py
6961
#!/usr/bin/env python # # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating test data for Ceilometer. Usage: Generate testing data for e.g. for default time span source .tox/py27/bin/activate ./tools/make_test_data.py --user 1 --project 1 --resource 1 --counter cpu_util --volume 20 """ import argparse import datetime import logging import random import sys import uuid from oslo_config import cfg from oslo_utils import timeutils from ceilometer.publisher import utils from ceilometer import sample from ceilometer import storage def make_test_data(name, meter_type, unit, volume, random_min, random_max, user_id, project_id, resource_id, start, end, interval, resource_metadata=None, source='artificial'): resource_metadata = resource_metadata or {} # Compute start and end timestamps for the new data. if isinstance(start, datetime.datetime): timestamp = start else: timestamp = timeutils.parse_strtime(start) if not isinstance(end, datetime.datetime): end = timeutils.parse_strtime(end) increment = datetime.timedelta(minutes=interval) print('Adding new events for meter %s.' % (name)) # Generate events n = 0 total_volume = volume while timestamp <= end: if (random_min >= 0 and random_max >= 0): # If there is a random element defined, we will add it to # user given volume. if isinstance(random_min, int) and isinstance(random_max, int): total_volume += random.randint(random_min, random_max) else: total_volume += random.uniform(random_min, random_max) c = sample.Sample(name=name, type=meter_type, unit=unit, volume=total_volume, user_id=user_id, project_id=project_id, resource_id=resource_id, timestamp=timestamp, resource_metadata=resource_metadata, source=source, ) data = utils.meter_message_from_counter( c, cfg.CONF.publisher.telemetry_secret) yield data n += 1 timestamp = timestamp + increment if (meter_type == 'gauge' or meter_type == 'delta'): # For delta and gauge, we don't want to increase the value # in time by random element. So we always set it back to # volume. total_volume = volume print('Added %d new events for meter %s.' % (n, name)) def record_test_data(conn, *args, **kwargs): for data in make_test_data(*args, **kwargs): conn.record_metering_data(data) def get_parser(): parser = argparse.ArgumentParser( description='generate metering data', ) parser.add_argument( '--interval', default=10, type=int, help='The period between events, in minutes.', ) parser.add_argument( '--start', default=31, help='Number of days to be stepped back from now or date in the past (' '"YYYY-MM-DDTHH:MM:SS" format) to define timestamps start range.', ) parser.add_argument( '--end', type=int, default=2, help='Number of days to be stepped forward from now or date in the ' 'future ("YYYY-MM-DDTHH:MM:SS" format) to define timestamps end ' 'range.', ) parser.add_argument( '--type', choices=('gauge', 'cumulative'), default='gauge', dest='meter_type', help='Counter type.', ) parser.add_argument( '--unit', default=None, help='Counter unit.', ) parser.add_argument( '--project', dest='project_id', help='Project id of owner.', ) parser.add_argument( '--user', dest='user_id', help='User id of owner.', ) parser.add_argument( '--random_min', help='The random min border of amount for added to given volume.', type=int, default=0, ) parser.add_argument( '--random_max', help='The random max border of amount for added to given volume.', type=int, default=0, ) parser.add_argument( '--resource', dest='resource_id', default=str(uuid.uuid4()), help='The resource id for the meter data.', ) parser.add_argument( '--counter', default='instance', dest='name', help='The counter name for the meter data.', ) parser.add_argument( '--volume', help='The amount to attach to the meter.', type=int, default=1, ) return parser def main(): cfg.CONF([], project='ceilometer') args = get_parser().parse_args() # Set up logging to use the console console = logging.StreamHandler(sys.stderr) console.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) root_logger = logging.getLogger('') root_logger.addHandler(console) root_logger.setLevel(logging.DEBUG) # Connect to the metering database conn = storage.get_connection_from_config(cfg.CONF) # Find the user and/or project for a real resource if not (args.user_id or args.project_id): for r in conn.get_resources(): if r.resource_id == args.resource_id: args.user_id = r.user_id args.project_id = r.project_id break # Compute the correct time span format = '%Y-%m-%dT%H:%M:%S' try: start = datetime.datetime.utcnow() - datetime.timedelta( days=int(args.start)) except ValueError: try: start = datetime.datetime.strptime(args.start, format) except ValueError: raise try: end = datetime.datetime.utcnow() + datetime.timedelta( days=int(args.end)) except ValueError: try: end = datetime.datetime.strptime(args.end, format) except ValueError: raise args.start = start args.end = end record_test_data(conn=conn, **args.__dict__) return 0 if __name__ == '__main__': main()
apache-2.0
yflou520/appinventor-sources
appinventor/blocklyeditor/src/backpack.js
16293
/** * Visual Blocks Editor * * Copyright 2011 Google Inc. * http://blockly.googlecode.com/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview A backpack object that can carry one or more blocks * among workspaces. Blocks can be added to the backpack through the * block's Context menu and retrieved through the workspace's context * menu. * * This is called from BlocklyPanel. * * Backpack contents are stored in Blockly.backpack_ * * @author fraser@google.com (Neil Fraser) * @author ram8647@gmail.com (Ralph Morelli) * @autor vbrown@wellesley.edu (Tori Brown) */ 'use strict'; goog.provide('Blockly.Backpack'); goog.require('Blockly.Util'); /** * Class for a backpack. * @param {!Blockly.Workspace} workspace The Workspace to sit it. * @constructor */ Blockly.Backpack = function(workspace) { this.workspace_ = workspace; }; /** * URL of the small backpack image. * @type {string} * @private */ Blockly.Backpack.prototype.BPACK_SMALL_ = 'media/backpack-smaller.png'; /** * URL of the small backpack image. * @type {string} * @private */ //Blockly.Backpack.prototype.BPACK_OVER_ = 'media/backpack-small-highlighted.png'; Blockly.Backpack.prototype.BPACK_OVER_ = 'media/backpack-small.png'; /** * Width of the image. * @type {number} * @private */ Blockly.Backpack.prototype.WIDTH_ = 80; /** * Height of the image. * @type {number} * @private */ Blockly.Backpack.prototype.BODY_HEIGHT_ = 75; /** * Distance between backpack and top edge of workspace. * @type {number} * @private */ Blockly.Backpack.prototype.MARGIN_TOP_ = 10; /** * Distance between backpack and right edge of workspace. * @type {number} * @private */ Blockly.Backpack.prototype.MARGIN_SIDE_ = 20; /** * Current small/large state of the backpack. * @type {boolean} */ Blockly.Backpack.prototype.isLarge = false; /** * Current over state of the mouse over backpack. * @type {boolean} */ Blockly.Backpack.prototype.isOver = false; /** * Current state whether a block is added to backpack. * @type {boolean} */ Blockly.Backpack.prototype.isAdded = false; /** * The SVG group containing the backpack. * @type {Element} * @private */ Blockly.Backpack.prototype.svgGroup_ = null; /** * The SVG image element of the backpack body. * @type {Element} * @private */ Blockly.Backpack.prototype.svgBody_ = null; /** * Task ID of small/big animation. * @type {number} * @private */ Blockly.Backpack.prototype.resizeTask_ = 0; /** * Task ID of opening/closing animation. * @type {number} * @private */ Blockly.Trashcan.prototype.openTask_ = 0; /** * Left coordinate of the backpack. * @type {number} * @private */ Blockly.Backpack.prototype.left_ = 0; /** * Top coordinate of the backpack. * @type {number} * @private */ Blockly.Backpack.prototype.top_ = 0; /** * Starting x coordinate for snapping back * @type {number} * @private */ // Commented out, not used // Blockly.Backpack.prototype.startX = 0; /** * Starting y coordinate for snapping back * @type {number} * @private */ // Commented out, not used // Blockly.Backpack.prototype.startY = 0; /** * Create the backpack SVG elements. * @return {!Element} The backpack's SVG group. */ Blockly.Backpack.prototype.createDom = function() { Blockly.Backpack.flyout_ = new Blockly.backpackFlyout(); // insert the flyout after the main workspace (except, there's no // svg.insertAfter method, so we need to insert before the thing following // the main workspace. Neil Fraser says: this is "less hacky than it looks". var flyoutGroup = Blockly.Backpack.flyout_.createDom(); Blockly.svg.insertBefore(flyoutGroup, Blockly.mainWorkspace.svgGroup_.nextSibling); this.svgGroup_ = Blockly.createSvgElement('g',null, null); this.svgBody_ = Blockly.createSvgElement('image', {'width': this.WIDTH_, 'height': this.BODY_HEIGHT_, 'id': 'backpackIcon'}, this.svgGroup_); this.svgBody_.setAttributeNS('http://www.w3.org/1999/xlink', 'xlink:href', Blockly.pathToBlockly + this.BPACK_SMALL_); return this.svgGroup_; }; /** * Initialize the backpack. */ Blockly.Backpack.prototype.init = function() { this.position_(); // If the document resizes, reposition the backpack. Blockly.bindEvent_(window, 'resize', this, this.position_); Blockly.Backpack.flyout_.init(Blockly.mainWorkspace, Blockly.getMainWorkspaceMetrics_, true /*withScrollbar*/); // load files for sound effect Blockly.loadAudio_(['media/backpack.mp3', 'media/backpack.ogg', 'media/backpack.wav'], 'backpack'); if (this.getBackpack() == undefined) return; var bp_contents = JSON.parse(this.getBackpack()); var len = bp_contents.length; if (len == 0) this.shrink(); else this.grow(); }; /** * Dispose of this backpack. * Unlink from all DOM elements to prevent memory leaks. */ Blockly.Backpack.prototype.dispose = function() { if (this.svgGroup_) { goog.dom.removeNode(this.svgGroup_); this.svgGroup_ = null; } this.svgBody_ = null; this.getMetrics_ = null; goog.Timer.clear(this.openTask_); }; /** * Pastes the backpack contents to the current workspace. */ Blockly.Backpack.prototype.pasteBackpack = function() { if (this.getBackpack() == undefined) return; bp_contents = JSON.parse(this.getBackpack()); var len = bp_contents.length; for (var i = 0; i < len; i++) { var xml = Blockly.Xml.textToDom(bp_contents[i]); var blk = xml.childNodes[0]; var type = blk.getAttribute('type'); var arr = []; this.checkValidBlockTypes(blk,arr); var ok = true; for (var j = 0; j < arr.length; j++) { var type = arr[j]; if (! Blockly.Blocks[type]) { ok = false; break; } } if (ok) Blockly.mainWorkspace.paste(blk); else window.alert('Sorry. You cannot paste a block of type "' + type + '" because it doesn\'t exist in this workspace.'); } } /** * Recursively traverses the tree starting from block returning * an array of child blocks. * * Pre-condition block has nodeName 'block' and some type. */ Blockly.Backpack.prototype.checkValidBlockTypes = function(block, arr) { if (block.nodeName=='block') { arr.push(block.getAttribute('type')); } var children = block.childNodes; for (var i=0; i < children.length; i++) { var child = children[i]; this.checkValidBlockTypes(child,arr); } } /** * Copy all blocks in the workspace to backpack * */ Blockly.Backpack.prototype.addAllToBackpack = function() { var allBlocks = Blockly.mainWorkspace.getAllBlocks(); var topBlocks = Blockly.mainWorkspace.getTopBlocks(false); for (var x = 0; x < topBlocks.length; x++) { block = allBlocks[x]; this.addToBackpack(block); } } /** * The backpack is an array containing 0 or more * blocks */ Blockly.Backpack.prototype.addToBackpack = function(block) { if (this.getBackpack() == undefined) { this.setBackpack(JSON.stringify([])); } // Copy is made of the expanded block. var isCollapsed = block.collapsed_; block.setCollapsed(false); var xmlBlock = Blockly.Xml.blockToDom_(block); Blockly.Xml.deleteNext(xmlBlock); // Encode start position in XML. var xy = block.getRelativeToSurfaceXY(); xmlBlock.setAttribute('x', Blockly.RTL ? -xy.x : xy.x); xmlBlock.setAttribute('y', xy.y); block.setCollapsed(isCollapsed); // Add the block to the backpack var backpack = this.getBackpack(); var bp_contents = JSON.parse(backpack); var len = bp_contents.length; var newBlock = "<xml>" + Blockly.Xml.domToText(xmlBlock) + "</xml>"; bp_contents[len] = newBlock; this.setBackpack(JSON.stringify(bp_contents)); this.grow(); Blockly.playAudio('backpack'); // update the flyout when it's visible if (Blockly.Backpack.flyout_.isVisible()) { this.isAdded = true; this.openBackpack(); this.isAdded = false; } } /** * Move the backpack to the top right corner. * @private */ Blockly.Backpack.prototype.position_ = function() { var metrics = this.workspace_.getMetrics(); if (!metrics) { // There are no metrics available (workspace is probably not visible). return; } if (Blockly.RTL) { this.left_ = this.MARGIN_SIDE_; } else { this.left_ = metrics.viewWidth + metrics.absoluteLeft - this.WIDTH_ - this.MARGIN_SIDE_; } this.top_ = metrics.viewHeight + metrics.absoluteTop - (metrics.viewHeight - this.MARGIN_TOP_); this.svgGroup_.setAttribute('transform', 'translate(' + this.left_ + ',' + this.top_ + ')'); }; /** * On right click, open alert and show documentation */ Blockly.Backpack.prototype.openBackpackDoc = function(e) { var options = []; var backpackDoc = {enabled : true}; backpackDoc.text = "Backpack documentation"; backpackDoc.callback = function() { var dialog = new Blockly.Util.Dialog(Blockly.Msg.BACKPACK_DOC_TITLE, Blockly.Msg.BACKPACK_DOCUMENTATION, Blockly.Msg.REPL_OK, null, 0, function() { dialog.hide(); }); } options.push(backpackDoc); Blockly.ContextMenu.show(e, options); }; /** * On left click, open backpack and view flyout */ Blockly.Backpack.prototype.openBackpack = function(){ if (!this.isAdded && Blockly.Backpack.flyout_.isVisible()) { Blockly.Backpack.flyout_.hide(); } else { var backpack = JSON.parse(this.getBackpack()); //get backpack contents from java var len = backpack.length; var newBackpack = [] for (var i = 0; i < len; i++) { var dom = Blockly.Xml.textToDom(backpack[i]).firstChild; newBackpack[i] = dom; } Blockly.Backpack.flyout_.show(newBackpack); } }; /** * Obtains starting coordinates so the block can return to spot after copy * NOTE: This function does not appear to be invoked when you click on a * block and drag it to the Backpack. So these values of startX and startY * are not set. * @param {!Event} e Mouse down event. */ Blockly.Backpack.prototype.onMouseDown = function(e){ var xy = Blockly.getAbsoluteXY_(this.svgGroup_); this.startX = xy.x; this.startY = xy.y; } /** * When block is let go over the backpack, copy it and return to original position * @param {!Event} e Mouse up event * @param startX x coordinate of the mouseDown event * @param startY y coordinate of the mouseDown event */ Blockly.Backpack.prototype.onMouseUp = function(e, startX, startY){ var xy = Blockly.getAbsoluteXY_(this.svgGroup_); var mouseX = e.clientX //xy.x; var mouseY = e.clientY //xy.y; // Note: startX and startY do not give the starting location of the block itself. // They give the location of the mouse click which can be anywhere on the block. // So this code will not return the block to its original position. Blockly.selected.moveBy((startX - e.clientX), (startY - e.clientY)); Blockly.mainWorkspace.render(); } /** * Determines if the mouse (with a block) is currently over the backpack. * Opens/closes the lid and sets the isLarge flag. * @param {!Event} e Mouse move event. */ Blockly.Backpack.prototype.onMouseMove = function(e, startX, startY) { /* An alternative approach would be to use onMouseOver and onMouseOut events. However the selected block will be between the mouse and the backpack, thus these events won't fire. Another approach is to use HTML5's drag & drop API, but it's widely hated. Instead, we'll just have the block's drag_ function call us. */ if (!this.svgGroup_) { return; } var xy = Blockly.getAbsoluteXY_(this.svgGroup_); var left = xy.x; var top = xy.y; // Convert the mouse coordinates into SVG coordinates. xy = Blockly.convertCoordinates(e.clientX, e.clientY, true); var mouseX = xy.x; var mouseY = xy.y; var over = (mouseX > left) && (mouseX < left + this.WIDTH_) && (mouseY > top) && (mouseY < top + this.BODY_HEIGHT_); if (this.isOpen != over) { this.setOpen_(over); } }; Blockly.Backpack.prototype.mouseIsOver = function(e) { xy = Blockly.convertCoordinates(e.clientX, e.clientY, true); var mouseX = xy.x; var mouseY = xy.y; var over = (mouseX > this.left_) && (mouseX < this.left_ + this.WIDTH_) && (mouseY > this.top_) && (mouseY < this.top_ + this.BODY_HEIGHT_); this.isOver = over; return over; }; /** * Hide the Backpack flyout */ Blockly.Backpack.hide = function() { Blockly.Backpack.flyout_.hide(); }; /** * Flip the lid open or shut. * @param {boolean} state True if open. * @private */ Blockly.Backpack.prototype.setOpen_ = function(state) { if (this.isOpen == state) { return; } goog.Timer.clear(this.openTask_); this.isOpen = state; this.animateBackpack_(); }; /** * Change the image of backpack to one with red outline */ Blockly.Backpack.prototype.animateBackpack_ = function() { var icon = document.getElementById('backpackIcon'); if (this.isOpen){ icon.setAttributeNS('http://www.w3.org/1999/xlink', 'xlink:href', Blockly.pathToBlockly + this.BPACK_OVER_); } else { icon.setAttributeNS('http://www.w3.org/1999/xlink', 'xlink:href', Blockly.pathToBlockly + this.BPACK_SMALL_); } } /** * Flip the lid shut. * Called externally after a drag. */ Blockly.Backpack.prototype.close = function() { this.setOpen_(false); } /** * Scales the backpack to a large size to indicate it contains blocks. */ Blockly.Backpack.prototype.grow = function() { if (this.isLarge) return; var metrics = this.workspace_.getMetrics(); this.svgBody_.setAttribute('transform','scale(1.2)'); this.MARGIN_SIDE_ = this.MARGIN_SIDE_ / 1.2; this.BODY_HEIGHT_ = this.BODY_HEIGHT_ * 1.2; this.WIDTH_ = this.WIDTH_ * 1.2; this.position_(); this.isLarge = true; } /** * Scales the backpack to a small size to indicate it is empty. */ Blockly.Backpack.prototype.shrink = function() { if (!this.isLarge) return; var metrics = this.workspace_.getMetrics(); this.svgBody_.setAttribute('transform','scale(1)'); this.BODY_HEIGHT_ = this.BODY_HEIGHT_ / 1.2; this.WIDTH_ = this.WIDTH_ / 1.2; this.MARGIN_SIDE_ = this.MARGIN_SIDE_ * 1.2; this.position_(); this.isLarge = false; } /** * Empties the backpack and shrinks its image. */ Blockly.Backpack.prototype.clear = function() { if (Blockly.mainWorkspace.backpack.confirmClear()) { this.setBackpack(JSON.stringify([])); this.shrink(); } } Blockly.Backpack.prototype.confirmClear = function() { return confirm(Blockly.Msg.BACKPACK_CONFIRM_EMPTY); }; /** * Returns count of the number of entries in the backpack. */ Blockly.Backpack.prototype.count = function() { if (this.getBackpack() == null) return 0; var bp_contents = JSON.parse(this.getBackpack()); return bp_contents.length; } Blockly.Backpack.prototype.getBackpack = function() { return window.parent.BlocklyPanel_getBackpack(); } Blockly.Backpack.prototype.setBackpack = function(backpack) { window.parent.BlocklyPanel_setBackpack(backpack); }
apache-2.0
kjirou/reversi-tactics
src/mixins/BattlerMixin.js
907
import assert from 'assert'; import { ARMY_COLORS } from '../consts'; /* * Mix in battler role to UnitModel */ const BattlerMixin = { /* * The army that it belongs * {ArmyModel|null} */ _belongingArmy: null, _position: null, battlerMixinConstructor({ belongingArmy }) { assert(this.hasOwnProperty('_hp'), 'This object is not a UnitModel, probably'); assert(Boolean(belongingArmy), 'belongingArmy is not defined'); this._belongingArmy = belongingArmy; }, getBelongingArmy() { return this._belongingArmy; }, setPosition(value) { this._position = value; }, isPlaced() { return this._position !== null; }, /* * @param {BoardModel} board */ bePlacedToBoard(board, position) { board.placeBattler(position, this); }, }; export default BattlerMixin; export const isMixedBattler = (any) => { return 'bePlacedToBoard' in any; };
apache-2.0
unstab1e/sitecoreyaf8
yafsrc/YAF.Core/Extensions/MembershipUserExtensions.cs
1805
/* Yet Another Forum.NET * Copyright (C) 2003-2005 Bjørnar Henden * Copyright (C) 2006-2013 Jaben Cargman * Copyright (C) 2014-2015 Ingo Herbote * http://www.yetanotherforum.net/ * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ using YAF.Types.SitecoreHelpers; namespace YAF.Core { #region Using using System.Web.Security; using YAF.Types; using YAF.Types.Interfaces; #endregion /// <summary> /// The membership user extensions. /// </summary> public static class SitecoreMembershipUserExtensions { #region Public Methods /// <summary> /// The get user data. /// </summary> /// <param name="SitecoreMembershipUser"> /// The membership user. /// </param> /// <returns> /// </returns> [NotNull] public static IUserData GetUserData([NotNull] this SitecoreMembershipUser SitecoreMembershipUser) { CodeContracts.VerifyNotNull(SitecoreMembershipUser, "SitecoreMembershipUser"); return new CombinedUserDataHelper(SitecoreMembershipUser); } #endregion } }
apache-2.0
cert-se/megatron-java
src-test/se/sitic/megatron/rss/RssManagerTest.java
13503
package se.sitic.megatron.rss; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import org.apache.log4j.PropertyConfigurator; import org.junit.Before; import org.junit.Test; import se.sitic.megatron.core.TypedProperties; import se.sitic.megatron.util.FileUtil; /** * JUnit 4 test-case.<p> * * Will create a couple of RSS files in different formats. */ public class RssManagerTest { private static final String OUTPUT_DIR = "tmp/rss"; private static final String RSS_FILE_EXT= ".xml"; private static final String LOG4J_FILENAME = "conf/dev/log4j.properties"; private TypedProperties props; /** * Constructor. */ public RssManagerTest() { // empty } @Before public void init() throws Exception { PropertyConfigurator.configure(LOG4J_FILENAME); Map<String, String> propMap = new HashMap<String, String>(); props = new TypedProperties(propMap, null); // create output dir FileUtil.ensureDir(OUTPUT_DIR); // delete old files // File outputDir = new File(OUTPUT_DIR); // File[] filesToDelete = outputDir.listFiles(new FilenameFilter() { // public boolean accept(File dir, String name) { // return name.endsWith(RSS_FILE_EXT); // } // }); // for (int i = 0; i < filesToDelete.length; i++) { // File file = filesToDelete[i]; // if (!file.delete()) { // throw new IOException("Cannot delete file: " + file.getAbsolutePath()); // } // } } @Test public void createAndParseMinimalRss() throws Exception { RssManager rssManager = new RssManager(props); IRssFactory rssFactory = rssManager.createRssFactory(); IRssChannel rssChannel = rssFactory.createRssChannel(); for (Iterator<String> iterator = rssChannel.getSupportedRssFormats().iterator(); iterator.hasNext(); ) { createAndParseMinimalRss(iterator.next()); } } @Test public void createAndParseIntermediateRss() throws Exception { RssManager rssManager = new RssManager(props); IRssFactory rssFactory = rssManager.createRssFactory(); IRssChannel rssChannel = rssFactory.createRssChannel(); for (Iterator<String> iterator = rssChannel.getSupportedRssFormats().iterator(); iterator.hasNext(); ) { createAndParseIntermediateRss(iterator.next()); } } private void createAndParseMinimalRss(String rssFormat) throws Exception { if (rssFormat.equals("rss_0.9") || rssFormat.equals("rss_0.91N") || rssFormat.equals("rss_0.91U")) { // skip; require items return; } final String filename = OUTPUT_DIR + "/minimal-" + rssFormat + RSS_FILE_EXT; final String title = "Test Title"; final String description = "Test Description"; final String channelLink = "http://www.example.com/rss-foo.xml"; RssManager rssManager = new RssManager(props); IRssFactory rssFactory = rssManager.createRssFactory(); IRssChannel rssChannel = rssFactory.createRssChannel(); // assign rssChannel.setRssFormat(rssFormat); rssChannel.setTitle(title); rssChannel.setDescription(description); rssChannel.setLinks(Collections.singletonList(channelLink)); // write // IRssWriter rssWriter = rssFactory.createRssWriter(); // Writer out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(filename), Constants.UTF8)); // rssWriter.writeRss(out, rssChannel); // out.close(); rssManager.writeRss(new File(filename), rssChannel); // read // IRssParser rssParser = rssFactory.createRssParser(); // InputStream in = new BufferedInputStream(new FileInputStream(filename)); // rssChannel = rssParser.parseRss(in, null); rssManager.readRss(new File(filename), false); // check empty channel assertNotNull(rssChannel); assertEquals(title, rssChannel.getTitle()); assertEquals(description, rssChannel.getDescription()); assertEquals(Collections.singletonList(channelLink), rssChannel.getLinks()); assertEquals(0, rssChannel.getItems().size()); } private void createAndParseIntermediateRss(String rssFormat) throws Exception { if (rssFormat.equals("rss_0.9") || rssFormat.equals("rss_0.91N") || rssFormat.equals("rss_0.91U")) { // skip; require items return; } // DEBUG: System.out.println("rssFormat=" + rssFormat); // Current time without ms part final long now = 1000L * (System.currentTimeMillis() / 1000L); final String filename = OUTPUT_DIR + "/intermediate-" + rssFormat + RSS_FILE_EXT; final String title = "Test Title. Swedish characters: åäöÅÄÖ"; final String description = "Test Description. Swedish characters: åäöÅÄÖ"; final String[] channelLinks = { "http://www.example.com/rss-foo.xml" }; final String[] categories = { "Category 1", "Category 2" }; final String channelCopyright = "Channel Copyright"; final Date publicationDate = new Date(now - 10*24*60*60*1000L); final String[] singleChannelAuthor = { "Channel Author" }; final String[] multipleChannelAuthors = { "Channel Author 1", "Channel Author 2" }; final String[] channelAuthors = rssFormat.equals("atom_1.0") ? multipleChannelAuthors : singleChannelAuthor; final String item1Title = "Item 1 Title"; final String item1Description = "Description for item 1<br>New line."; final Date item1PublicationDate = new Date(now - 2*60*60*1000L); final Date item1UpdatedDate = new Date(now - 1*60*60*1000L); final String[] item1Categories = { "Item 1 Category 1", "Item 1 Category 2" }; final String[] item1SingleAuthor = { "Item 1 Author" }; final String[] item1MultipleAuthors = { "Item 1 Author 1", "Item 1 Author 2" }; final String[] item1Authors = rssFormat.equals("atom_1.0") ? item1MultipleAuthors : item1SingleAuthor; final String[] item1Links = { "http://www.example.com/rss-foo-item1.xml" }; final String item2Title = "Item 2 Title"; final String item2Description = "Description for item 2."; final Date item2PublicationDate = new Date(now - 4*60*60*1000L); final Date item2UpdatedDate = new Date(now - 2*60*60*1000L); final String[] item2Authors = { "Item 2 Author" }; final String[] item2Links = { "http://www.example.com/rss-foo-item2.xml" }; final String item3Title = "Item 3 Title"; final String item3Description = "Description for item 3."; final Date item3PublicationDate = new Date(now - 5*60*60*1000L); final String[] item3Links = { "http://www.example.com/rss-foo-item3.xml" }; final String item4Title = "Item 4 Title"; final String item4Description = "Description for item 4."; final Date item4PublicationDate = new Date(now - 9*60*60*1000L); final String[] item4Links = { "http://www.example.com/rss-foo-item4.xml" }; RssManager rssManager = new RssManager(props); IRssFactory rssFactory = rssManager.createRssFactory(); IRssChannel rssChannel = rssFactory.createRssChannel(); IRssItem item1 = rssFactory.createRssItem(rssChannel); IRssItem item2 = rssFactory.createRssItem(rssChannel); IRssItem item3 = rssFactory.createRssItem(rssChannel); IRssItem item4 = rssFactory.createRssItem(rssChannel); // assign rssChannel.setRssFormat(rssFormat); rssChannel.setTitle(title); rssChannel.setDescription(description); rssChannel.setLinks(Arrays.asList(channelLinks)); rssChannel.setCategories(Arrays.asList(categories)); rssChannel.setCopyright(channelCopyright); rssChannel.setPublicationDate(publicationDate); rssChannel.setAuthors(Arrays.asList(channelAuthors)); item1.setTitle(item1Title); item1.setDescription(item1Description); item1.setPublicationDate(item1PublicationDate); item1.setUpdatedDate(item1UpdatedDate); item1.setCategories(Arrays.asList(item1Categories)); item1.setAuthors(Arrays.asList(item1Authors)); item1.setLinks(Arrays.asList(item1Links)); item2.setTitle(item2Title); item2.setDescription(item2Description); item2.setPublicationDate(item2PublicationDate); item2.setUpdatedDate(item2UpdatedDate); item2.setAuthors(Arrays.asList(item2Authors)); item2.setLinks(Arrays.asList(item2Links)); item3.setTitle(item3Title); item3.setDescription(item3Description); item3.setPublicationDate(item3PublicationDate); item3.setLinks(Arrays.asList(item3Links)); item4.setTitle(item4Title); item4.setDescription(item4Description); item4.setPublicationDate(item4PublicationDate); item4.setLinks(Arrays.asList(item4Links)); List<IRssItem> items = new ArrayList<IRssItem>(); items.add(item1); items.add(item2); items.add(item3); items.add(item4); rssChannel.setItems(items); // write // IRssWriter rssWriter = rssFactory.createRssWriter(); // Writer out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(filename), Constants.UTF8)); // rssWriter.writeRss(out, rssChannel); // out.close(); rssManager.writeRss(new File(filename), rssChannel); // read // IRssParser rssParser = rssFactory.createRssParser(); // InputStream in = new BufferedInputStream(new FileInputStream(filename)); // rssChannel = rssParser.parseRss(in, null); rssManager.readRss(new File(filename), false); // check channel assertNotNull(rssChannel); assertEquals(title, rssChannel.getTitle()); assertEquals(description, rssChannel.getDescription()); assertEquals(Arrays.asList(channelLinks), rssChannel.getLinks()); if (!rssFormat.equals("rss_0.92") && !rssFormat.equals("rss_0.93") && !rssFormat.equals("rss_0.94")) { assertEquals(Arrays.asList(categories), rssChannel.getCategories()); } assertEquals(channelCopyright, rssChannel.getCopyright()); assertEquals(publicationDate, rssChannel.getPublicationDate()); if (!rssFormat.equals("rss_0.92") && !rssFormat.equals("rss_0.93") && !rssFormat.equals("rss_0.94")) { assertEquals(Arrays.asList(channelAuthors), rssChannel.getAuthors()); } items = rssChannel.getItems(); item1 = items.get(0); item2 = items.get(1); item3 = items.get(2); item4 = items.get(3); // check item1 assertEquals(item1Title, item1.getTitle()); assertEquals(item1Description, item1.getDescription()); if (!rssFormat.equals("rss_0.92")) { assertEquals(item1PublicationDate, item1.getPublicationDate()); } if (!rssFormat.equals("rss_0.92") && !rssFormat.equals("rss_0.93") && !rssFormat.equals("rss_0.94") && !rssFormat.equals("rss_1.0") && !rssFormat.equals("rss_2.0") && !rssFormat.equals("atom_0.3")) { assertEquals(item1UpdatedDate, item1.getUpdatedDate()); } if (!rssFormat.equals("atom_0.3") && !rssFormat.equals("rss_1.0")) { assertEquals(Arrays.asList(item1Categories), item1.getCategories()); } if (!rssFormat.equals("rss_0.92") && !rssFormat.equals("rss_0.93") && !rssFormat.equals("rss_0.94")) { assertEquals(Arrays.asList(item1Authors), item1.getAuthors()); } assertEquals(Arrays.asList(item1Links), item1.getLinks()); // check item2 assertEquals(item2Title, item2.getTitle()); assertEquals(item2Description, item2.getDescription()); if (!rssFormat.equals("rss_0.92")) { assertEquals(item2PublicationDate, item2.getPublicationDate()); } // Tested above: assertEquals(item2UpdatedDate, item2.getUpdatedDate()); assertEquals(0, item2.getCategories().size()); if (!rssFormat.equals("rss_0.92") && !rssFormat.equals("rss_0.93") && !rssFormat.equals("rss_0.94")) { assertEquals(Arrays.asList(item2Authors), item2.getAuthors()); } assertEquals(Arrays.asList(item2Links), item2.getLinks()); // check item3 assertEquals(item3Title, item3.getTitle()); assertEquals(item3Description, item3.getDescription()); if (!rssFormat.equals("rss_0.92")) { assertEquals(item3PublicationDate, item3.getPublicationDate()); } assertEquals(Arrays.asList(item3Links), item3.getLinks()); // check item4 assertEquals(item4Title, item4.getTitle()); assertEquals(item4Description, item4.getDescription()); if (!rssFormat.equals("rss_0.92")) { assertEquals(item4PublicationDate, item4.getPublicationDate()); } assertEquals(Arrays.asList(item4Links), item4.getLinks()); } }
apache-2.0
seanzwx/tmp
seatalk/platform/framework/framework-service/src/main/java/com/sean/service/constant/_P.java
910
package com.sean.service.constant; import com.sean.service.annotation.ParameterConfig; import com.sean.service.annotation.ParameterProviderConfig; import com.sean.service.enums.DataType; /** * 框架内置参数,外部不得使用 * @author sean */ @ParameterProviderConfig(description = "框架内置接口参数") public class _P { @ParameterConfig(dataType = DataType.String, description = "需要加载的css文件路径,多个文件用逗号隔开,路径需要从项目根路径开始") public static final String css = "css"; @ParameterConfig(dataType = DataType.String, description = "需要加载的javascript文件路径,多个文件用逗号隔开,路径需要从项目根路径开始") public static final String js = "js"; @ParameterConfig(dataType = DataType.Int, description = "客户端类型,1-sock,2-websock") public static final String clientType = "clientType"; }
apache-2.0
gisfromscratch/webapp-builder-demos
samples/Sample Widgets/WebAppWidgets/widgets/Splash/setting/nls/nl/strings.js
459
define( ({ instruction: "Bepaal de content die wordt weergegeven op het splash-scherm van uw applicatie. Het splash-scherm verschijnt vóór de applicatie laadt. ", defaultContent: "Voeg hier tekst, koppelingen en kleine afbeeldingen toe.", requireConfirm: "Bevestiging vereisen om door te gaan", confirmLabel: "Bevestigingsteksten: ", defaultConfirmText: "Ik ga akkoord met de bovenstaande gebruiksvoorwaarden en bepalingen" }) );
apache-2.0
maciej-zygmunt/unitime
JavaSource/org/unitime/timetable/api/connectors/BuildingsConntector.java
8838
/* * Licensed to The Apereo Foundation under one or more contributor license * agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * * The Apereo Foundation licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. * */ package org.unitime.timetable.api.connectors; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.hibernate.Transaction; import org.springframework.stereotype.Service; import org.unitime.timetable.api.ApiConnector; import org.unitime.timetable.api.ApiHelper; import org.unitime.timetable.gwt.shared.RoomInterface.BuildingInterface; import org.unitime.timetable.model.Assignment; import org.unitime.timetable.model.Building; import org.unitime.timetable.model.ChangeLog; import org.unitime.timetable.model.Room; import org.unitime.timetable.model.TimetableManager; import org.unitime.timetable.model.dao.BuildingDAO; import org.unitime.timetable.model.dao.SessionDAO; import org.unitime.timetable.security.rights.Right; /** * @author Tomas Muller */ @Service("/api/buildings") public class BuildingsConntector extends ApiConnector { @Override public void doGet(ApiHelper helper) throws IOException { Long sessionId = helper.getAcademicSessionId(); if (sessionId == null) throw new IllegalArgumentException("Academic session not provided, please set the term parameter."); helper.getSessionContext().checkPermissionAnyAuthority(sessionId, "Session", Right.ApiRetrieveRooms); List<BuildingInterface> buildings = new ArrayList<BuildingInterface>(); for (Building b: Building.findAll(sessionId)) { BuildingInterface building = new BuildingInterface(); building.setId(b.getUniqueId()); building.setName(b.getName()); building.setAbbreviation(b.getAbbreviation()); building.setX(b.getCoordinateX()); building.setY(b.getCoordinateY()); building.setExternalId(b.getExternalUniqueId()); buildings.add(building); } helper.setResponse(buildings); } @Override public void doDelete(ApiHelper helper) throws IOException { Transaction tx = helper.getHibSession().beginTransaction(); try { Building building = null; Long buildingId = helper.getOptinalParameterLong("id", null); if (buildingId != null) { building = BuildingDAO.getInstance().get(buildingId, helper.getHibSession()); if (building == null) throw new IllegalArgumentException("Building " + buildingId + " does not exist."); } else { Long sessionId = helper.getAcademicSessionId(); if (sessionId == null) throw new IllegalArgumentException("Academic session not provided, please set the term parameter."); String externalId = helper.getOptinalParameter("externalId", null); if (externalId != null) { building = (Building)helper.getHibSession().createQuery("from Building where externalUniqueId = :externalId and session.uniqueId = :sessionId") .setLong("sessionId", sessionId).setString("externalId", externalId).setMaxResults(1).uniqueResult(); if (building == null) throw new IllegalArgumentException("Building " + externalId + " does not exist."); } if (building == null) { String abbv = helper.getRequiredParameter("building"); building = (Building)helper.getHibSession().createQuery("from Building where (abbreviation = :abbv or name = :abbv) and session.uniqueId = :sessionId") .setLong("sessionId", sessionId).setString("abbv", abbv).setMaxResults(1).uniqueResult(); if (building == null) throw new IllegalArgumentException("Building " + abbv + " does not exist."); } } helper.getSessionContext().checkPermissionAnyAuthority(building.getSession(), Right.ApiRoomEdit); helper.getSessionContext().checkPermissionAnyAuthority(building, Right.BuildingDelete); for (Room r: (List<Room>)BuildingDAO.getInstance().getSession().createQuery("from Room r where r.building.uniqueId = :buildingId").setLong("buildingId", building.getUniqueId()).list()) { helper.getHibSession().createQuery("delete RoomPref p where p.room.uniqueId = :roomId").setLong("roomId", r.getUniqueId()).executeUpdate(); for (Iterator<Assignment> i = r.getAssignments().iterator(); i.hasNext(); ) { Assignment a = i.next(); a.getRooms().remove(r); helper.getHibSession().saveOrUpdate(a); i.remove(); } helper.getHibSession().delete(r); } ChangeLog.addChange( helper.getHibSession(), TimetableManager.findByExternalId(sessionContext.getUser().getExternalUserId()), building.getSession(), building, ChangeLog.Source.BUILDING_EDIT, ChangeLog.Operation.DELETE, null, null); helper.getHibSession().delete(building); tx.commit(); } catch (Exception e) { if (tx != null) { tx.rollback(); } if (e instanceof RuntimeException) throw (RuntimeException)e; if (e instanceof IOException) throw (IOException)e; throw new IOException(e.getMessage(), e); } } @Override public void doPost(ApiHelper helper) throws IOException { BuildingInterface b = helper.getRequest(BuildingInterface.class); Transaction tx = helper.getHibSession().beginTransaction(); try { Building building = null; if (b.getId() != null) { building = BuildingDAO.getInstance().get(b.getId(), helper.getHibSession()); if (building == null) throw new IllegalArgumentException("Building " + b.getId() + " does not exist."); } else { Long sessionId = helper.getAcademicSessionId(); if (sessionId == null) throw new IllegalArgumentException("Academic session not provided, please set the term parameter."); if (b.getExternalId() != null) { building = (Building)helper.getHibSession().createQuery("from Building where externalUniqueId = :externalId and session.uniqueId = :sessionId") .setLong("sessionId", sessionId).setString("externalId", b.getExternalId()).setMaxResults(1).uniqueResult(); } else if (b.getAbbreviation() != null) { building = (Building)helper.getHibSession().createQuery("from Building where abbreviation = :abbv and session.uniqueId = :sessionId") .setLong("sessionId", sessionId).setString("abbv", b.getAbbreviation()).setMaxResults(1).uniqueResult(); } } if (building != null) { helper.getSessionContext().checkPermissionAnyAuthority(building.getSession(), Right.ApiRoomEdit); helper.getSessionContext().checkPermissionAnyAuthority(building, Right.BuildingEdit); } else { helper.getSessionContext().checkPermissionAnyAuthority(helper.getAcademicSessionId(), "Session", Right.ApiRoomEdit); helper.getSessionContext().checkPermissionAnyAuthority(helper.getAcademicSessionId(), "Session", Right.BuildingAdd); } ChangeLog.Operation op = null; if (building == null) { building = new Building(); building.setSession(SessionDAO.getInstance().get(helper.getAcademicSessionId(), helper.getHibSession())); op = ChangeLog.Operation.CREATE; } else { op = ChangeLog.Operation.UPDATE; } building.setName(b.getName()); building.setAbbreviation(b.getAbbreviation()); building.setExternalUniqueId(b.getExternalId()); building.setCoordinateX(b.getX()); building.setCoordinateY(b.getY()); helper.getHibSession().saveOrUpdate(building); b.setId(building.getUniqueId()); ChangeLog.addChange( helper.getHibSession(), TimetableManager.findByExternalId(sessionContext.getUser().getExternalUserId()), building.getSession(), building, ChangeLog.Source.BUILDING_EDIT, op, null, null); tx.commit(); } catch (Exception e) { if (tx != null) { tx.rollback(); } if (e instanceof RuntimeException) throw (RuntimeException)e; if (e instanceof IOException) throw (IOException)e; throw new IOException(e.getMessage(), e); } helper.setResponse(b); } @Override protected String getName() { return "buildings"; } }
apache-2.0
gdefias/JavaDemo
DemoJava/guice-demo/src/main/java/com/github/greengerong/price/PriceService.java
412
package com.github.greengerong.price; /** * *************************************** * * * Auth: green gerong * * Date: 2014 * * blog: http://greengerong.github.io/ * * github: https://github.com/greengerong * * * * **************************************** */ public class PriceService { public String getPrice() { return "price"; } }
apache-2.0
modulexcite/NCache
Integration/MemCached/Clients/.NET memcached Client Library/Src/NativeHandler.cs
13894
// Copyright (c) 2015 Alachisoft // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** /// Memcached C# client /// Copyright (c) 2005 /// /// Based on code written originally by Greg Whalin /// http://www.whalin.com/memcached/ /// /// This library is free software; you can redistribute it and/or /// modify it under the terms of the GNU Lesser General Public /// License as published by the Free Software Foundation; either /// version 2.1 of the License, or (at your option) any later /// version. /// /// This library is distributed in the hope that it will be /// useful, but WITHOUT ANY WARRANTY; without even the implied /// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR /// PURPOSE. See the GNU Lesser General Public License for more /// details. /// /// You should have received a copy of the GNU Lesser General Public /// License along with this library; if not, write to the Free Software /// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA /// /// @author Tim Gebhardt <tim@gebhardtcomputing.com> /// @version 1.0 **/ namespace Memcached.ClientLibrary { using System; using System.Resources; using System.Text; /** /// COMMENT FROM ORIGINAL JAVA CLIENT LIBRARY. NOT SURE HOW MUCH IT /// APPLIES TO THIS LIBRARY. /// /// /// Handle encoding standard Java types directly which can result in significant /// memory savings: /// /// Currently the Memcached driver for Java supports the setSerialize() option. /// This can increase performance in some situations but has a few issues: /// /// Code that performs class casting will throw ClassCastExceptions when /// setSerialize is enabled. For example: /// /// mc.set("foo", new Integer(1)); Integer output = (Integer)mc.get("foo"); /// /// Will work just file when setSerialize is true but when its false will just throw /// a ClassCastException. /// /// Also internally it doesn't support bool and since toString is called wastes a /// lot of memory and causes additional performance issue. For example an Integer /// can take anywhere from 1 byte to 10 bytes. /// /// Due to the way the memcached slabytes allocator works it seems like a LOT of wasted /// memory to store primitive types as serialized objects (from a performance and /// memory perspective). In our applications we have millions of small objects and /// wasted memory would become a big problem. /// /// For example a Serialized bool takes 47 bytes which means it will fit into the /// 64byte LRU. Using 1 byte means it will fit into the 8 byte LRU thus saving 8x /// the memory. This also saves the CPU performance since we don't have to /// serialize bytes back and forth and we can compute the byte[] value directly. /// /// One problem would be when the user calls get() because doing so would require /// the app to know the type of the object stored as a bytearray inside memcached /// (since the user will probably cast). /// /// If we assume the basic types are interned we could use the first byte as the /// type with the remaining bytes as the value. Then on get() we could read the /// first byte to determine the type and then construct the correct object for it. /// This would prevent the ClassCastException I talked about above. /// /// We could remove the setSerialize() option and just assume that standard VM types /// are always internd in this manner. /// /// mc.set("foo", new bool.TRUE); bool bytes = (bool)mc.get("foo"); /// /// And the type casts would work because internally we would create a new bool /// to return back to the client. /// /// This would reduce memory footprint and allow for a virtual implementation of the /// Externalizable interface which is much faster than Serialzation. /// /// Currently the memory improvements would be: /// /// java.lang.bool - 8x performance improvement (now just two bytes) /// java.lang.Integer - 16x performance improvement (now just 5 bytes) /// /// Most of the other primitive types would benefit from this optimization. /// java.lang.Character being another obvious example. /// /// I know it seems like I'm being really picky here but for our application I'd /// save 1G of memory right off the bat. We'd go down from 1.152G of memory used /// down to 144M of memory used which is much better IMO. **/ public sealed class NativeHandler { //FIXME: what about other common types? Also what about //Collections of native types? I could reconstruct these on the remote end //if necessary. Though I'm not sure of the performance advantage here. public const byte ByteMarker = 1; public const byte BoolMarker = 2; public const byte Int32Marker = 3; public const byte Int64Marker = 4; public const byte CharMarker = 5; public const byte StringMarker = 6; public const byte StringBuilderMarker = 7; public const byte SingleMarker = 8; public const byte Int16Marker = 9; public const byte DoubleMarker = 10; public const byte DateTimeMarker = 11; private NativeHandler() {} public static bool IsHandled(object value) { if(value is bool || value is byte || value is string || value is char || value is StringBuilder || value is short || value is long || value is double || value is float || value is DateTime || value is Int32) { return true; } return false; } // **** Encode methods ****************************************************** public static byte[] Encode(object value) { if(value == null) return new byte[0]; if(value is bool) return Encode((bool)value); if(value is Int32) return Encode((Int32)value); if(value is char) return Encode((char)value); if(value is byte) return Encode((byte)value); if(value is short) return Encode((short)value); if(value is long) return Encode((long)value); if(value is double) return Encode((double)value); if(value is float) return Encode((float)value); string tempstr = value as string; if(tempstr != null) return Encode(tempstr); StringBuilder tempsb = value as StringBuilder; if(tempsb != null) return Encode(tempsb); if(value is DateTime) return Encode((DateTime) value); return null; } public static byte[] Encode(DateTime value) { byte[] bytes = GetBytes(value.Ticks); bytes[0] = DateTimeMarker; return bytes; } public static byte[] Encode(bool value) { byte[] bytes = new byte[2]; bytes[0] = BoolMarker; if(value) { bytes[1] = 1; } else { bytes[1] = 0; } return bytes; } public static byte[] Encode(int value) { byte[] bytes = GetBytes(value); bytes[0] = Int32Marker; return bytes; } public static byte[] Encode(char value) { byte[] result = Encode((short) value); result[0] = CharMarker; return result; } public static byte[] Encode(string value) { if(value == null) return new byte[1]{ StringMarker }; byte[] asBytes = UTF8Encoding.UTF8.GetBytes(value); byte[] result = new byte[asBytes.Length + 1]; result[0] = StringMarker; Array.Copy(asBytes, 0, result, 1, asBytes.Length); return result; } public static byte[] Encode(byte value) { byte[] bytes = new byte[2]; bytes[0] = ByteMarker; bytes[1] = value; return bytes; } public static byte[] Encode(StringBuilder value) { if(value == null) return new byte[1]{ StringBuilderMarker }; byte[] bytes = Encode(value.ToString()); bytes[0] = StringBuilderMarker; return bytes; } public static byte[] Encode(short value) { byte[] bytes = Encode((int)value); bytes[0] = Int16Marker; return bytes; } public static byte[] Encode(long value) { byte[] bytes = GetBytes(value); bytes[0] = Int64Marker; return bytes; } public static byte[] Encode(double value) { byte[] temp = BitConverter.GetBytes(value); byte[] bytes = new byte[temp.Length + 1]; bytes[0] = DoubleMarker; Array.Copy(temp, 0, bytes, 1, temp.Length); return bytes; } public static byte[] Encode(float value) { byte[] temp = BitConverter.GetBytes(value); byte[] bytes = new byte[temp.Length + 1]; bytes[0] = SingleMarker; Array.Copy(temp, 0, bytes, 1, temp.Length); return bytes; } public static byte[] GetBytes(long value) { byte b0 = (byte)((value >> 56) & 0xFF); byte b1 = (byte)((value >> 48) & 0xFF); byte b2 = (byte)((value >> 40) & 0xFF); byte b3 = (byte)((value >> 32) & 0xFF); byte b4 = (byte)((value >> 24) & 0xFF); byte b5 = (byte)((value >> 16) & 0xFF); byte b6 = (byte)((value >> 8) & 0xFF); byte b7 = (byte)((value >> 0) & 0xFF); byte[] bytes = new byte[9]; bytes[1] = b0; bytes[2] = b1; bytes[3] = b2; bytes[4] = b3; bytes[5] = b4; bytes[6] = b5; bytes[7] = b6; bytes[8] = b7; return bytes; } public static byte[] GetBytes(int value) { byte b0 = (byte)((value >> 24) & 0xFF); byte b1 = (byte)((value >> 16) & 0xFF); byte b2 = (byte)((value >> 8) & 0xFF); byte b3 = (byte)((value >> 0) & 0xFF); byte[] bytes = new byte[5]; bytes[1] = b0; bytes[2] = b1; bytes[3] = b2; bytes[4] = b3; return bytes; } // **** Decode methods ****************************************************** public static Object Decode(byte[] bytes) { //something strange is going on. if(bytes == null || bytes.Length == 0) return null; //determine what type this is: if(bytes[0] == BoolMarker) return DecodeBool(bytes); if(bytes[0] == Int32Marker) return DecodeInteger(bytes); if(bytes[0] == StringMarker) return DecodeString(bytes); if(bytes[0] == CharMarker) return DecodeCharacter(bytes); if(bytes[0] == ByteMarker) return DecodeByte(bytes); if(bytes[0] == StringBuilderMarker) return DecodeStringBuilder(bytes); if(bytes[0] == Int16Marker) return DecodeShort(bytes); if(bytes[0] == Int64Marker) return DecodeLong(bytes); if(bytes[0] == DoubleMarker) return DecodeDouble(bytes); if(bytes[0] == SingleMarker) return DecodeFloat(bytes); if(bytes[0] == DateTimeMarker) return DecodeDate(bytes); return null; } public static DateTime DecodeDate(byte[] bytes) { return new DateTime(ToLong(bytes)); } public static bool DecodeBool(byte[] bytes) { if(bytes == null) throw new ArgumentNullException("bytes", GetLocalizedString("parameter cannot be null")); bool value = bytes[1] == 1; return value; } public static Int32 DecodeInteger(byte[] bytes) { return ToInt(bytes) ; } public static string DecodeString(byte[] bytes) { if(bytes == null) return null; return UTF8Encoding.UTF8.GetString(bytes, 1, bytes.Length -1); } public static char DecodeCharacter(byte[] bytes) { return (char)DecodeInteger(bytes); } public static byte DecodeByte(byte[] bytes) { if(bytes == null) throw new ArgumentNullException("bytes", GetLocalizedString("parameter cannot be null")); byte value = bytes[1]; return value; } public static StringBuilder DecodeStringBuilder(byte[] bytes) { return new StringBuilder(DecodeString(bytes)); } public static short DecodeShort(byte[] bytes) { return (short)DecodeInteger(bytes); } public static long DecodeLong(byte[] bytes) { return ToLong(bytes); } public static double DecodeDouble(byte[] bytes) { return BitConverter.ToDouble(bytes, 1); } public static float DecodeFloat(byte[] bytes) { return BitConverter.ToSingle(bytes, 1); } public static int ToInt(byte[] bytes) { if(bytes == null) throw new ArgumentNullException("bytes", GetLocalizedString("parameter cannot be null")); //This works by taking each of the bit patterns and converting them to //ints taking into account 2s complement and then adding them.. return ((((int) bytes[4]) & 0xFF) << 32) + ((((int) bytes[3]) & 0xFF) << 40) + ((((int) bytes[2]) & 0xFF) << 48) + ((((int) bytes[1]) & 0xFF) << 56) ; } public static long ToLong(byte[] bytes) { if(bytes == null) throw new ArgumentNullException("bytes", GetLocalizedString("parameter cannot be null")); //FIXME: this is sad in that it takes up 16 bytes instead of JUST 8 //bytes and wastes memory. We could use a memcached flag to enable //special treatment for 64bit types //This works by taking each of the bit patterns and converting them to //ints taking into account 2s complement and then adding them.. return (((long) bytes[8]) & 0xFF) + ((((long) bytes[7]) & 0xFF) << 8) + ((((long) bytes[6]) & 0xFF) << 16) + ((((long) bytes[5]) & 0xFF) << 24) + ((((long) bytes[4]) & 0xFF) << 32) + ((((long) bytes[3]) & 0xFF) << 40) + ((((long) bytes[2]) & 0xFF) << 48) + ((((long) bytes[1]) & 0xFF) << 56) ; } private static ResourceManager _resourceManager = new ResourceManager("Memcached.ClientLibrary.StringMessages", typeof(SockIOPool).Assembly); private static string GetLocalizedString(string key) { return _resourceManager.GetString(key); } } }
apache-2.0
touwolf/bridje-framework
bridje-sql/src/main/java/org/bridje/sql/BuildFkReferencesStep.java
1270
/* * Copyright 2017 Bridje Framework. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bridje.sql; /** * Build foreign key references step. */ public interface BuildFkReferencesStep { /** * Adds the ON DELTE, ON UPDATE strategy for the foreign key. * * @param onUpdate The ON DELETE strategy. * @param onDelete The ON UPDATE strategy. * * @return The next step. */ BuildFkFinalStep strategy(ForeignKeyStrategy onUpdate, ForeignKeyStrategy onDelete); /** * Adds the ON DELTE, ON UPDATE strategy for the foreign key. * * @param stategy The ON DELETE, ON UPDATE strategy. * * @return The next step. */ BuildFkFinalStep strategy(ForeignKeyStrategy stategy); }
apache-2.0
ninqing/tddl
tddl-parser/src/main/java/com/alibaba/cobar/parser/ast/stmt/dal/ShowBroadcasts.java
981
/* * Copyright 1999-2012 Alibaba Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * (created at 2011-5-21) */ package com.alibaba.cobar.parser.ast.stmt.dal; import com.alibaba.cobar.parser.visitor.SQLASTVisitor; /** * @author mengshi.sunmengshi 2014年5月16日 上午11:26:24 * @since 5.1.0 */ public class ShowBroadcasts extends TddlShow { @Override public void accept(SQLASTVisitor visitor) { visitor.visit(this); } }
apache-2.0
cbeams-archive/spring-framework-2.5.x
test/org/springframework/web/servlet/view/velocity/TestVelocityEngine.java
1483
/* * Copyright 2002-2006 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.web.servlet.view.velocity; import java.util.HashMap; import java.util.Map; import org.apache.velocity.Template; import org.apache.velocity.app.VelocityEngine; /** * @author Juergen Hoeller * @since 09.10.2004 */ public class TestVelocityEngine extends VelocityEngine { private final Map templates = new HashMap(); public TestVelocityEngine() { } public TestVelocityEngine(String expectedName, Template template) { addTemplate(expectedName, template); } public void addTemplate(String expectedName, Template template) { this.templates.put(expectedName, template); } public Template getTemplate(String name) { Template template = (Template) this.templates.get(name); if (template == null) { throw new IllegalStateException("No template registered for name [" + name + "]"); } return template; } }
apache-2.0
h2oai/h2o-3
h2o-extensions/xgboost/src/main/java/hex/tree/xgboost/MemoryCheck.java
1509
package hex.tree.xgboost; import oshi.SystemInfo; import oshi.hardware.GlobalMemory; import oshi.hardware.HardwareAbstractionLayer; import water.util.PrettyPrint; public class MemoryCheck { public static Report runCheck(double offHeapRatio) { SystemInfo systemInfo = new SystemInfo(); HardwareAbstractionLayer hardware = systemInfo.getHardware(); GlobalMemory globalMemory = hardware.getMemory(); Runtime runtime = Runtime.getRuntime(); long available = globalMemory.getAvailable(); long availableOffHeap = Math.max(available - (runtime.maxMemory() - runtime.totalMemory()), 0); long desiredOffHeap = (long) (runtime.maxMemory() * offHeapRatio); return new Report(availableOffHeap, desiredOffHeap); } public static class Report { public final long _available_off_heap; public final long _desired_off_heap; public Report(long available_off_heap, long desired_off_heap) { _available_off_heap = available_off_heap; _desired_off_heap = desired_off_heap; } public boolean isOffHeapRequirementMet() { return _available_off_heap >= _desired_off_heap; } @Override public String toString() { return "Estimated Available Off-Heap (assuming JVM heap reaches maximum size): " + PrettyPrint.bytes(_available_off_heap) + ", Desired Off-Heap: " + PrettyPrint.bytes(_desired_off_heap); } } }
apache-2.0
zzcclp/carbondata
core/src/main/java/org/apache/carbondata/core/datastore/blocklet/EncodedBlocklet.java
6853
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.core.datastore.blocklet; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; import org.apache.carbondata.core.datastore.page.EncodedTablePage; import org.apache.carbondata.core.localdictionary.generator.LocalDictionaryGenerator; /** * Holds the blocklet level data and metadata to be written in carbondata file * For dimension pages it will check if all the pages are not encoded with dictionary * then it will encode those pages for that column again */ public class EncodedBlocklet { /** * number of rows in a blocklet */ private int blockletSize; /** * maintains encoded dimension data for each column */ private List<BlockletEncodedColumnPage> encodedDimensionColumnPages; /** * maintains encoded measure data for each column */ private List<BlockletEncodedColumnPage> encodedMeasureColumnPages; /** * fallback executor service, will used to re-encode column pages */ private ExecutorService executorService; /** * number of pages in a blocklet */ private int numberOfPages; /** * row count in each page */ private List<Short> rowCountInPage; /** * is decoder based fallback is enabled or not */ private boolean isDecoderBasedFallBackEnabled; /** * local dictionary generator map of all local dictionary columns */ private Map<String, LocalDictionaryGenerator> localDictionaryGeneratorMap; public EncodedBlocklet(ExecutorService executorService, boolean isDecoderBasedFallBackEnabled, Map<String, LocalDictionaryGenerator> localDictionaryGeneratorMap) { this.executorService = executorService; this.isDecoderBasedFallBackEnabled = isDecoderBasedFallBackEnabled; this.localDictionaryGeneratorMap = localDictionaryGeneratorMap; this.rowCountInPage = new ArrayList<>(); } /** * Below method will be used to add page metadata details * * @param encodedTablePage * encoded table page */ private void addPageMetadata(EncodedTablePage encodedTablePage) { if (null == rowCountInPage) { rowCountInPage = new ArrayList<>(); } // update details blockletSize += encodedTablePage.getPageSize(); this.numberOfPages++; rowCountInPage.add((short)encodedTablePage.getPageSize()); } /** * Below method will be used to add measure column pages * * @param encodedTablePage * encoded table page */ private void addEncodedMeasurePage(EncodedTablePage encodedTablePage) { // for first page create new list if (null == encodedMeasureColumnPages) { encodedMeasureColumnPages = new ArrayList<>(); // adding measure pages for (int i = 0; i < encodedTablePage.getNumMeasures(); i++) { BlockletEncodedColumnPage blockletEncodedColumnPage = new BlockletEncodedColumnPage(null, false, null); blockletEncodedColumnPage.addEncodedColumnPage(encodedTablePage.getMeasure(i)); encodedMeasureColumnPages.add(blockletEncodedColumnPage); } } else { for (int i = 0; i < encodedTablePage.getNumMeasures(); i++) { encodedMeasureColumnPages.get(i).addEncodedColumnPage(encodedTablePage.getMeasure(i)); } } } /** * Below method will be used to add dimension column pages * * @param encodedTablePage * encoded table page */ private void addEncodedDimensionPage(EncodedTablePage encodedTablePage) { // for first page create new list if (null == encodedDimensionColumnPages) { encodedDimensionColumnPages = new ArrayList<>(); // adding measure pages for (int i = 0; i < encodedTablePage.getNumDimensions(); i++) { BlockletEncodedColumnPage blockletEncodedColumnPage = new BlockletEncodedColumnPage(executorService, isDecoderBasedFallBackEnabled, localDictionaryGeneratorMap.get( encodedTablePage.getDimension(i).getActualPage().getColumnSpec() .getFieldName())); blockletEncodedColumnPage.addEncodedColumnPage(encodedTablePage.getDimension(i)); encodedDimensionColumnPages.add(blockletEncodedColumnPage); } } else { for (int i = 0; i < encodedTablePage.getNumDimensions(); i++) { encodedDimensionColumnPages.get(i).addEncodedColumnPage(encodedTablePage.getDimension(i)); } } } /** * Use to add table pages * * @param encodedTablePage * encoded table page */ public void addEncodedTablePage(EncodedTablePage encodedTablePage) { addPageMetadata(encodedTablePage); addEncodedDimensionPage(encodedTablePage); addEncodedMeasurePage(encodedTablePage); } public int getBlockletSize() { return blockletSize; } public List<BlockletEncodedColumnPage> getEncodedDimensionColumnPages() { return encodedDimensionColumnPages; } public List<BlockletEncodedColumnPage> getEncodedMeasureColumnPages() { return encodedMeasureColumnPages; } public int getNumberOfDimension() { return encodedDimensionColumnPages.size(); } public int getNumberOfMeasure() { return encodedMeasureColumnPages.size(); } public int getNumberOfPages() { return this.numberOfPages; } public List<Short> getRowCountInPage() { return rowCountInPage; } public void setRowCountInPage(List<Short> rowCountInPage) { this.rowCountInPage = rowCountInPage; } public void clear() { if (null != encodedDimensionColumnPages) { for (BlockletEncodedColumnPage blockletEncodedColumnPage : encodedDimensionColumnPages) { blockletEncodedColumnPage.cleanBuffer(); } } if (null != encodedMeasureColumnPages) { for (BlockletEncodedColumnPage blockletEncodedColumnPage : encodedMeasureColumnPages) { blockletEncodedColumnPage.cleanBuffer(); } } this.numberOfPages = 0; this.encodedDimensionColumnPages = null; this.blockletSize = 0; this.encodedMeasureColumnPages = null; this.rowCountInPage = null; } }
apache-2.0
tdyas/pants
src/python/pants/backend/jvm/tasks/jvmdoc_gen.py
10104
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import contextlib import multiprocessing import os import re import subprocess from collections import namedtuple from multiprocessing.pool import ThreadPool from pants.backend.jvm.tasks.jvm_task import JvmTask from pants.base.exceptions import TaskError from pants.build_graph.target_scopes import Scopes from pants.task.target_restriction_mixins import ( HasSkipAndTransitiveOptionsMixin, SkipAndTransitiveOptionsRegistrar, ) from pants.util import desktop from pants.util.dirutil import safe_mkdir, safe_walk from pants.util.memo import memoized_property Jvmdoc = namedtuple("Jvmdoc", ["tool_name", "product_type"]) # TODO: Shouldn't this be a NailgunTask? # TODO(John Sirois): The --skip flag supports the JarPublish task and is an abstraction leak. # It allows folks doing a local-publish to skip an expensive and un-needed step. # Remove this flag and instead support conditional requirements being registered against # the round manager. This may require incremental or windowed flag parsing that happens bit by # bit as tasks are recursively prepared vs. the current all-at once style. class JvmdocGen(SkipAndTransitiveOptionsRegistrar, HasSkipAndTransitiveOptionsMixin, JvmTask): @classmethod def jvmdoc(cls): """Subclasses should return their Jvmdoc configuration.""" raise NotImplementedError() @classmethod def register_options(cls, register): super().register_options(register) tool_name = cls.jvmdoc().tool_name register( "--include-codegen", type=bool, fingerprint=True, help=f"Create {tool_name} for generated code.", ) register( "--combined", type=bool, fingerprint=True, help="Generate {0} for all targets combined, instead of each target " "individually.".format(tool_name), ) register( "--open", type=bool, help=f"Open the generated {tool_name} in a browser (implies --combined).", ) register( "--ignore-failure", type=bool, fingerprint=True, help=f"Do not consider {tool_name} errors to be build errors.", ) register( "--exclude-patterns", type=list, default=[], fingerprint=True, help="Patterns for targets to be excluded from doc generation.", ) @classmethod def product_types(cls): return [cls.jvmdoc().product_type] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) options = self.get_options() self._include_codegen = options.include_codegen self.open = options.open self.combined = self.open or options.combined self.ignore_failure = options.ignore_failure @memoized_property def _exclude_patterns(self): return [re.compile(x) for x in set(self.get_options().exclude_patterns or [])] def generate_doc(self, language_predicate, create_jvmdoc_command): """Generate an execute method given a language predicate and command to create documentation. language_predicate: a function that accepts a target and returns True if the target is of that language create_jvmdoc_command: (classpath, directory, *targets) -> command (string) that will generate documentation documentation for targets """ catalog = self.context.products.isrequired(self.jvmdoc().product_type) if catalog and self.combined: raise TaskError( f"Cannot provide {self.jvmdoc().product_type} target mappings for combined output" ) def docable(target): if not language_predicate(target): self.context.log.debug( f"Skipping [{target.address.spec}] because it is does not pass the language predicate" ) return False if not self._include_codegen and target.is_synthetic: self.context.log.debug( f"Skipping [{target.address.spec}] because it is a synthetic target" ) return False for pattern in self._exclude_patterns: if pattern.search(target.address.spec): self.context.log.debug( f"Skipping [{target.address.spec}] because it matches exclude pattern '{pattern.pattern}'" ) return False return True targets = self.get_targets(predicate=docable) if not targets: return with self.invalidated(targets, invalidate_dependents=self.combined) as invalidation_check: def find_invalid_targets(): invalid_targets = set() for vt in invalidation_check.invalid_vts: invalid_targets.update(vt.targets) return invalid_targets invalid_targets = list(find_invalid_targets()) if invalid_targets: if self.combined: self._generate_combined(targets, create_jvmdoc_command) else: self._generate_individual(invalid_targets, create_jvmdoc_command) if self.open and self.combined: try: desktop.ui_open(os.path.join(self.workdir, "combined", "index.html")) except desktop.OpenError as e: raise TaskError(e) if catalog: for target in targets: gendir = self._gendir(target) jvmdocs = [] for root, dirs, files in safe_walk(gendir): jvmdocs.extend(os.path.relpath(os.path.join(root, f), gendir) for f in files) self.context.products.get(self.jvmdoc().product_type).add(target, gendir, jvmdocs) def _generate_combined(self, targets, create_jvmdoc_command): gendir = os.path.join(self.workdir, "combined") if targets: classpath = self.classpath(targets, include_scopes=Scopes.JVM_COMPILE_SCOPES) safe_mkdir(gendir, clean=True) command = create_jvmdoc_command(classpath, gendir, *targets) if command: self.context.log.debug( f"Running create_jvmdoc in {gendir} with {' '.join(command)}" ) result, gendir = create_jvmdoc(command, gendir) self._handle_create_jvmdoc_result(targets, result, command) def _generate_individual(self, targets, create_jvmdoc_command): jobs = {} for target in targets: gendir = self._gendir(target) classpath = self.classpath([target], include_scopes=Scopes.JVM_COMPILE_SCOPES) command = create_jvmdoc_command(classpath, gendir, target) if command: jobs[gendir] = (target, command) if jobs: # Use ThreadPool as there may be dangling processes that cause identical run id and # then buildstats error downstream. https://github.com/pantsbuild/pants/issues/6785 with contextlib.closing( ThreadPool(processes=min(len(jobs), multiprocessing.cpu_count())) ) as pool: # map would be a preferable api here but fails after the 1st batch with an internal: # ... # File "...src/python/pants/backend/jvm/tasks/jar_create.py", line 170, in javadocjar # pool.map(createjar, jobs) # File "...lib/python2.6/multiprocessing/pool.py", line 148, in map # return self.map_async(func, iterable, chunksize).get() # File "...lib/python2.6/multiprocessing/pool.py", line 422, in get # raise self._value # NameError: global name 'self' is not defined futures = [] self.context.log.debug( "Begin multiprocessing section; output may be misordered or garbled" ) try: for gendir, (target, command) in jobs.items(): self.context.log.debug( "Running create_jvmdoc in {} with {}".format(gendir, " ".join(command)) ) futures.append(pool.apply_async(create_jvmdoc, args=(command, gendir))) for future in futures: result, gendir = future.get() target, command = jobs[gendir] self._handle_create_jvmdoc_result([target], result, command) finally: # In the event of an exception, we want to call terminate() because otherwise # we get errors on exit when multiprocessing tries to do it, because what # is dead may never die. pool.terminate() self.context.log.debug("End multiprocessing section") def _handle_create_jvmdoc_result(self, targets, result, command): if result != 0: targetlist = ", ".join(target.address.spec for target in targets) message = "Failed to process {} for {} [{}]: {}".format( self.jvmdoc().tool_name, targetlist, result, " ".join(command) ) if self.ignore_failure: self.context.log.warn(message) else: raise TaskError(message) def _gendir(self, target): return os.path.join(self.workdir, target.id) def create_jvmdoc(command, gendir): try: safe_mkdir(gendir, clean=True) process = subprocess.Popen(command) result = process.wait() return result, gendir except OSError: return 1, gendir
apache-2.0