code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test base for tf.keras Models in multi-worker mode.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading # pylint: disable=g-direct-tensorflow-import from tensorflow.contrib.distribute.python import collective_all_reduce_strategy as collective_strategy from tensorflow.contrib.distribute.python import parameter_server_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribute_coordinator as dc from tensorflow.python.distribute import multi_worker_test_base from tensorflow.python.eager import context from tensorflow.python.platform import test _original_run_std_server = dc._run_std_server # pylint: disable=protected-access # Used as a decorator on test methods. run_sync_strategies = combinations.generate( combinations.combine( mode=['graph'], strategy_cls=[ collective_strategy.CollectiveAllReduceStrategy, ], required_gpus=[0, 1])) # Used as a decorator on test methods. run_async_strategies = combinations.generate( combinations.combine( mode=['graph'], strategy_cls=[parameter_server_strategy.ParameterServerStrategy], required_gpus=[0, 1])) def get_strategy_object(strategy_cls): return strategy_cls(num_gpus_per_worker=context.num_gpus()) # TODO(omalleyt): Merge with keras_multiworker_callback_test class KerasIndependentWorkerTestBase( multi_worker_test_base.IndependentWorkerTestBase): """Test base for simulating Keras Multi-Worker in threads.""" def _make_mock_run_std_server(self): thread_local = threading.local() def _mock_run_std_server(*args, **kwargs): ret = _original_run_std_server(*args, **kwargs) # Wait for all std servers to be brought up in order to reduce the chance # of remote sessions taking local ports that have been assigned to std # servers. Only call this barrier the first time this function is run for # each thread. if not getattr(thread_local, 'server_started', False): self._barrier.wait() thread_local.server_started = True return ret return _mock_run_std_server def run_independent_workers(self, worker_fn, strategy_cls, num_workers, num_ps=None, **kwargs): cluster_spec = multi_worker_test_base.create_cluster_spec( num_workers=num_workers, num_ps=num_ps) self._barrier = dc._Barrier(num_workers + (num_ps or 0)) # pylint: disable=protected-access def _worker_fn(**kwargs): """Runs the worker function in a thread.""" with test.mock.patch.object(dc, '_run_std_server', self._make_mock_run_std_server()): strategy = get_strategy_object(strategy_cls) with strategy.scope(): return worker_fn(**kwargs) threads = self.run_multiple_tasks_in_threads(_worker_fn, cluster_spec, **kwargs) strategy = get_strategy_object(strategy_cls) if strategy.extended.experimental_between_graph: threads_to_join = threads.get('chief', []) + threads.get('worker', []) else: threads_to_join = [ threads['chief'][0] if 'chief' in threads else threads['worker'][0] ] self.join_independent_workers(threads_to_join)
ghchinoy/tensorflow
tensorflow/contrib/distribute/python/keras_multi_worker_test_base.py
Python
apache-2.0
4,173
(function () { "use strict"; var fs = require("fs"), fse = require("fs-extra"), path = require("path"), readline = require("readline"); var _domainManager; function init(domainManager) { _domainManager = domainManager; if (!_domainManager.hasDomain("importNode")) { _domainManager.registerDomain("importNode", {major: 0, minor: 1}); } // Get shared project from share function getSharedProject(callback) { var sharedPath = path.join(process.cwd(), "share"); fs.readdir(sharedPath, function(error, files) { callback(null, files); }); } function getSharedFile(projectName, callback) { var fileName; var sharedPath = path.join(process.cwd(), "share", projectName); // Get target name from makefile var makePath = path.join(sharedPath, "makefile"); if (fs.existsSync(makePath)) { var lineReader = readline.createInterface({ input: fs.createReadStream(makePath) }); lineReader.on("line", function(line) { if (line.startsWith("TARGET")) { var file = line.split("=")[1].trim(); fileName = file.split(".")[0]; } }); lineReader.on("close", function() { // FIXME: We just checked wasm and js whether it was exsited // or not. We need a way to find correct result file. var wasmPath = path.join(sharedPath, fileName + ".wasm"); var loaderPath = path.join(sharedPath, fileName + ".js"); var fileList = []; if (fs.existsSync(wasmPath) && fs.existsSync(loaderPath)) { fileList.push(fileName + ".wasm"); fileList.push(fileName + ".js"); callback(null, fileList); } else { callback("Not found wasm"); } }); } else { callback("Not found makefile"); } } function copySharedFile(projectName, fileList, targetId, callback) { var sharedPath = path.join(process.cwd(), "share", projectName); var destPath = path.join(process.cwd(), "projects", targetId); // Copy files to the target project fileList.forEach(function(file) { var sourcePath = path.join(sharedPath, file); if (fs.existsSync(sourcePath)) { var destFilePath = path.join(destPath, file); try { fse.copySync(sourcePath, destFilePath); } catch (error) { return callback("Fail to copy files"); } } }); callback(null); } function copyFile(projectId, src, name, dest, callback) { const sourcePath = path.join(process.cwd(), 'projects', projectId, src, name); const destPath = path.join(process.cwd(), 'projects', projectId, dest, name); fse.copy(sourcePath, destPath, (err) => { if (err) { return callback(err); } callback(); }); } function moveFile(projectId, src, name, dest, callback) { const sourcePath = path.join(process.cwd(), 'projects', projectId, src, name); const destPath = path.join(process.cwd(), 'projects', projectId, dest, name); fse.move(sourcePath, destPath, (err) => { if (err) { return callback(err); } callback(); }); } _domainManager.registerCommand( "importNode", "getSharedProject", getSharedProject, true, "Get Shared Project", null, [ {name: "data", type: "array"} ] ); _domainManager.registerCommand( "importNode", "getSharedFile", getSharedFile, true, "Get Shared File", [ {name: "projectName", type: "string"} ], [ {name: "result", type: "array"} ] ); _domainManager.registerCommand( "importNode", "copySharedFile", copySharedFile, true, "Copy Shared File", [ {name: "projectName", type: "string"}, {name: "fileList", type: "array"}, {name: "targetId", type: "string"} ], [] ); _domainManager.registerCommand( "importNode", "COPY", copyFile, true, "Copy File", [ {name: "projectId", type: "string"}, {name: "src", type: "string"}, {name: "name", type: "string"}, {name: "dest", type: "string"} ], [] ); _domainManager.registerCommand( "importNode", "CUT", moveFile, true, "Move File", [ {name: "projectId", type: "string"}, {name: "src", type: "string"}, {name: "name", type: "string"}, {name: "dest", type: "string"} ], [] ); } exports.init = init; }());
hyundukkim/WATT
libs/brackets-server/embedded-ext/importfile/node/ImportDomain.js
JavaScript
apache-2.0
5,812
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.sdk.util; import static com.google.common.base.Preconditions.checkState; import com.google.auth.Credentials; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.protobuf.ByteString; import com.google.protobuf.Timestamp; import com.google.pubsub.v1.AcknowledgeRequest; import com.google.pubsub.v1.DeleteSubscriptionRequest; import com.google.pubsub.v1.DeleteTopicRequest; import com.google.pubsub.v1.GetSubscriptionRequest; import com.google.pubsub.v1.ListSubscriptionsRequest; import com.google.pubsub.v1.ListSubscriptionsResponse; import com.google.pubsub.v1.ListTopicsRequest; import com.google.pubsub.v1.ListTopicsResponse; import com.google.pubsub.v1.ModifyAckDeadlineRequest; import com.google.pubsub.v1.PublishRequest; import com.google.pubsub.v1.PublishResponse; import com.google.pubsub.v1.PublisherGrpc; import com.google.pubsub.v1.PublisherGrpc.PublisherBlockingStub; import com.google.pubsub.v1.PubsubMessage; import com.google.pubsub.v1.PullRequest; import com.google.pubsub.v1.PullResponse; import com.google.pubsub.v1.ReceivedMessage; import com.google.pubsub.v1.SubscriberGrpc; import com.google.pubsub.v1.SubscriberGrpc.SubscriberBlockingStub; import com.google.pubsub.v1.Subscription; import com.google.pubsub.v1.Topic; import io.grpc.Channel; import io.grpc.ClientInterceptors; import io.grpc.ManagedChannel; import io.grpc.auth.ClientAuthInterceptor; import io.grpc.netty.GrpcSslContexts; import io.grpc.netty.NegotiationType; import io.grpc.netty.NettyChannelBuilder; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import javax.annotation.Nullable; import org.apache.beam.sdk.options.GcpOptions; import org.apache.beam.sdk.options.PubsubOptions; /** * A helper class for talking to Pubsub via grpc. * * <p>CAUTION: Currently uses the application default credentials and does not respect any * credentials-related arguments in {@link GcpOptions}. */ public class PubsubGrpcClient extends PubsubClient { private static final String PUBSUB_ADDRESS = "pubsub.googleapis.com"; private static final int PUBSUB_PORT = 443; private static final int LIST_BATCH_SIZE = 1000; private static final int DEFAULT_TIMEOUT_S = 15; private static class PubsubGrpcClientFactory implements PubsubClientFactory { @Override public PubsubClient newClient( @Nullable String timestampLabel, @Nullable String idLabel, PubsubOptions options) throws IOException { ManagedChannel channel = NettyChannelBuilder .forAddress(PUBSUB_ADDRESS, PUBSUB_PORT) .negotiationType(NegotiationType.TLS) .sslContext(GrpcSslContexts.forClient().ciphers(null).build()) .build(); return new PubsubGrpcClient(timestampLabel, idLabel, DEFAULT_TIMEOUT_S, channel, options.getGcpCredential()); } @Override public String getKind() { return "Grpc"; } } /** * Factory for creating Pubsub clients using gRCP transport. */ public static final PubsubClientFactory FACTORY = new PubsubGrpcClientFactory(); /** * Timeout for grpc calls (in s). */ private final int timeoutSec; /** * Underlying netty channel, or {@literal null} if closed. */ @Nullable private ManagedChannel publisherChannel; /** * Credentials determined from options and environment. */ private final Credentials credentials; /** * Label to use for custom timestamps, or {@literal null} if should use Pubsub publish time * instead. */ @Nullable private final String timestampLabel; /** * Label to use for custom ids, or {@literal null} if should use Pubsub provided ids. */ @Nullable private final String idLabel; /** * Cached stubs, or null if not cached. */ @Nullable private PublisherGrpc.PublisherBlockingStub cachedPublisherStub; private SubscriberGrpc.SubscriberBlockingStub cachedSubscriberStub; @VisibleForTesting PubsubGrpcClient( @Nullable String timestampLabel, @Nullable String idLabel, int timeoutSec, ManagedChannel publisherChannel, Credentials credentials) { this.timestampLabel = timestampLabel; this.idLabel = idLabel; this.timeoutSec = timeoutSec; this.publisherChannel = publisherChannel; this.credentials = credentials; } /** * Gracefully close the underlying netty channel. */ @Override public void close() { if (publisherChannel == null) { // Already closed. return; } // Can gc the underlying stubs. cachedPublisherStub = null; cachedSubscriberStub = null; // Mark the client as having been closed before going further // in case we have an exception from the channel. ManagedChannel publisherChannel = this.publisherChannel; this.publisherChannel = null; // Gracefully shutdown the channel. publisherChannel.shutdown(); try { publisherChannel.awaitTermination(timeoutSec, TimeUnit.SECONDS); } catch (InterruptedException e) { // Ignore. Thread.currentThread().interrupt(); } } /** * Return channel with interceptor for returning credentials. */ private Channel newChannel() throws IOException { checkState(publisherChannel != null, "PubsubGrpcClient has been closed"); ClientAuthInterceptor interceptor = new ClientAuthInterceptor(credentials, Executors.newSingleThreadExecutor()); return ClientInterceptors.intercept(publisherChannel, interceptor); } /** * Return a stub for making a publish request with a timeout. */ private PublisherBlockingStub publisherStub() throws IOException { if (cachedPublisherStub == null) { cachedPublisherStub = PublisherGrpc.newBlockingStub(newChannel()); } return cachedPublisherStub.withDeadlineAfter(timeoutSec, TimeUnit.SECONDS); } /** * Return a stub for making a subscribe request with a timeout. */ private SubscriberBlockingStub subscriberStub() throws IOException { if (cachedSubscriberStub == null) { cachedSubscriberStub = SubscriberGrpc.newBlockingStub(newChannel()); } return cachedSubscriberStub.withDeadlineAfter(timeoutSec, TimeUnit.SECONDS); } @Override public int publish(TopicPath topic, List<OutgoingMessage> outgoingMessages) throws IOException { PublishRequest.Builder request = PublishRequest.newBuilder() .setTopic(topic.getPath()); for (OutgoingMessage outgoingMessage : outgoingMessages) { PubsubMessage.Builder message = PubsubMessage.newBuilder() .setData(ByteString.copyFrom(outgoingMessage.elementBytes)); if (timestampLabel != null) { message.getMutableAttributes() .put(timestampLabel, String.valueOf(outgoingMessage.timestampMsSinceEpoch)); } if (idLabel != null && !Strings.isNullOrEmpty(outgoingMessage.recordId)) { message.getMutableAttributes().put(idLabel, outgoingMessage.recordId); } request.addMessages(message); } PublishResponse response = publisherStub().publish(request.build()); return response.getMessageIdsCount(); } @Override public List<IncomingMessage> pull( long requestTimeMsSinceEpoch, SubscriptionPath subscription, int batchSize, boolean returnImmediately) throws IOException { PullRequest request = PullRequest.newBuilder() .setSubscription(subscription.getPath()) .setReturnImmediately(returnImmediately) .setMaxMessages(batchSize) .build(); PullResponse response = subscriberStub().pull(request); if (response.getReceivedMessagesCount() == 0) { return ImmutableList.of(); } List<IncomingMessage> incomingMessages = new ArrayList<>(response.getReceivedMessagesCount()); for (ReceivedMessage message : response.getReceivedMessagesList()) { PubsubMessage pubsubMessage = message.getMessage(); @Nullable Map<String, String> attributes = pubsubMessage.getAttributes(); // Payload. byte[] elementBytes = pubsubMessage.getData().toByteArray(); // Timestamp. String pubsubTimestampString = null; Timestamp timestampProto = pubsubMessage.getPublishTime(); if (timestampProto != null) { pubsubTimestampString = String.valueOf(timestampProto.getSeconds() + timestampProto.getNanos() / 1000L); } long timestampMsSinceEpoch = extractTimestamp(timestampLabel, pubsubTimestampString, attributes); // Ack id. String ackId = message.getAckId(); checkState(!Strings.isNullOrEmpty(ackId)); // Record id, if any. @Nullable String recordId = null; if (idLabel != null && attributes != null) { recordId = attributes.get(idLabel); } if (Strings.isNullOrEmpty(recordId)) { // Fall back to the Pubsub provided message id. recordId = pubsubMessage.getMessageId(); } incomingMessages.add(new IncomingMessage(elementBytes, timestampMsSinceEpoch, requestTimeMsSinceEpoch, ackId, recordId)); } return incomingMessages; } @Override public void acknowledge(SubscriptionPath subscription, List<String> ackIds) throws IOException { AcknowledgeRequest request = AcknowledgeRequest.newBuilder() .setSubscription(subscription.getPath()) .addAllAckIds(ackIds) .build(); subscriberStub().acknowledge(request); // ignore Empty result. } @Override public void modifyAckDeadline( SubscriptionPath subscription, List<String> ackIds, int deadlineSeconds) throws IOException { ModifyAckDeadlineRequest request = ModifyAckDeadlineRequest.newBuilder() .setSubscription(subscription.getPath()) .addAllAckIds(ackIds) .setAckDeadlineSeconds(deadlineSeconds) .build(); subscriberStub().modifyAckDeadline(request); // ignore Empty result. } @Override public void createTopic(TopicPath topic) throws IOException { Topic request = Topic.newBuilder() .setName(topic.getPath()) .build(); publisherStub().createTopic(request); // ignore Topic result. } @Override public void deleteTopic(TopicPath topic) throws IOException { DeleteTopicRequest request = DeleteTopicRequest.newBuilder() .setTopic(topic.getPath()) .build(); publisherStub().deleteTopic(request); // ignore Empty result. } @Override public List<TopicPath> listTopics(ProjectPath project) throws IOException { ListTopicsRequest.Builder request = ListTopicsRequest.newBuilder() .setProject(project.getPath()) .setPageSize(LIST_BATCH_SIZE); ListTopicsResponse response = publisherStub().listTopics(request.build()); if (response.getTopicsCount() == 0) { return ImmutableList.of(); } List<TopicPath> topics = new ArrayList<>(response.getTopicsCount()); while (true) { for (Topic topic : response.getTopicsList()) { topics.add(topicPathFromPath(topic.getName())); } if (response.getNextPageToken().isEmpty()) { break; } request.setPageToken(response.getNextPageToken()); response = publisherStub().listTopics(request.build()); } return topics; } @Override public void createSubscription( TopicPath topic, SubscriptionPath subscription, int ackDeadlineSeconds) throws IOException { Subscription request = Subscription.newBuilder() .setTopic(topic.getPath()) .setName(subscription.getPath()) .setAckDeadlineSeconds(ackDeadlineSeconds) .build(); subscriberStub().createSubscription(request); // ignore Subscription result. } @Override public void deleteSubscription(SubscriptionPath subscription) throws IOException { DeleteSubscriptionRequest request = DeleteSubscriptionRequest.newBuilder() .setSubscription(subscription.getPath()) .build(); subscriberStub().deleteSubscription(request); // ignore Empty result. } @Override public List<SubscriptionPath> listSubscriptions(ProjectPath project, TopicPath topic) throws IOException { ListSubscriptionsRequest.Builder request = ListSubscriptionsRequest.newBuilder() .setProject(project.getPath()) .setPageSize(LIST_BATCH_SIZE); ListSubscriptionsResponse response = subscriberStub().listSubscriptions(request.build()); if (response.getSubscriptionsCount() == 0) { return ImmutableList.of(); } List<SubscriptionPath> subscriptions = new ArrayList<>(response.getSubscriptionsCount()); while (true) { for (Subscription subscription : response.getSubscriptionsList()) { if (subscription.getTopic().equals(topic.getPath())) { subscriptions.add(subscriptionPathFromPath(subscription.getName())); } } if (response.getNextPageToken().isEmpty()) { break; } request.setPageToken(response.getNextPageToken()); response = subscriberStub().listSubscriptions(request.build()); } return subscriptions; } @Override public int ackDeadlineSeconds(SubscriptionPath subscription) throws IOException { GetSubscriptionRequest request = GetSubscriptionRequest.newBuilder() .setSubscription(subscription.getPath()) .build(); Subscription response = subscriberStub().getSubscription(request); return response.getAckDeadlineSeconds(); } @Override public boolean isEOF() { return false; } }
yafengguo/Apache-beam
sdks/java/core/src/main/java/org/apache/beam/sdk/util/PubsubGrpcClient.java
Java
apache-2.0
15,532
package com.planet_ink.coffee_mud.MOBS; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Libraries.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; import java.util.*; /* Copyright 2013-2015 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ public class Squirrel extends StdMOB { @Override public String ID(){return "Squirrel";} public Squirrel() { super(); final Random randomizer = new Random(System.currentTimeMillis()); username="a squirrel"; setDescription("It\\`s small, cute, and quick with a big expressive tail."); setDisplayText("A squirrel darts around."); CMLib.factions().setAlignment(this,Faction.Align.NEUTRAL); setMoney(0); basePhyStats.setWeight(4450 + Math.abs(randomizer.nextInt() % 5)); setWimpHitPoint(2); basePhyStats().setDamage(2); baseCharStats().setStat(CharStats.STAT_INTELLIGENCE,1); baseCharStats().setMyRace(CMClass.getRace("Squirrel")); baseCharStats().getMyRace().startRacing(this,false); basePhyStats().setAbility(0); basePhyStats().setLevel(1); basePhyStats().setArmor(90); baseState.setHitPoints(CMLib.dice().roll(basePhyStats().level(),11,basePhyStats().level())); recoverMaxState(); resetToMaxState(); recoverPhyStats(); recoverCharStats(); } }
Tycheo/coffeemud
com/planet_ink/coffee_mud/MOBS/Squirrel.java
Java
apache-2.0
2,521
/******************************************************************************* * Copyright 2013 * Ubiquitous Knowledge Processing (UKP) Lab and FG Language Technology * Technische Universität Darmstadt * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package de.tudarmstadt.ukp.clarin.webanno.brat.controller; /** * Throw an exception if the arc annotation is not in the same sentence * * @author Seid Muhie Yimam */ public class ArcCrossedMultipleSentenceException extends BratAnnotationException { private static final long serialVersionUID = 1280015349963924638L; public ArcCrossedMultipleSentenceException(String message) { super(message); } }
debovis/webanno
webanno-brat/src/main/java/de/tudarmstadt/ukp/clarin/webanno/brat/controller/ArcCrossedMultipleSentenceException.java
Java
apache-2.0
1,274
/* * Copyright 2000-2016 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.roots.ui.configuration.libraryEditor; import com.intellij.notification.Notification; import com.intellij.notification.NotificationType; import com.intellij.openapi.application.Application; import com.intellij.openapi.application.ApplicationBundle; import com.intellij.openapi.application.ApplicationManager; import com.intellij.openapi.application.ModalityState; import com.intellij.openapi.diagnostic.Logger; import com.intellij.openapi.progress.ProgressIndicator; import com.intellij.openapi.progress.ProgressManager; import com.intellij.openapi.progress.Task; import com.intellij.openapi.progress.util.ProgressIndicatorBase; import com.intellij.openapi.ui.Messages; import com.intellij.openapi.util.SystemInfo; import com.intellij.openapi.vfs.VfsUtilCore; import com.intellij.openapi.vfs.VirtualFile; import com.sun.jna.platform.mac.XAttrUtil; import org.jetbrains.annotations.NotNull; import java.io.IOException; import java.net.URI; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.stream.Stream; /** * Files downloaded from Internet are marked as 'quarantined' by OS X. * For such files opening urls of type file://path#fragment via * <a href="https://developer.apple.com/library/mac/documentation/Carbon/Conceptual/LaunchServicesConcepts/LSCIntro/LSCIntro.html"> * Launch Services API * </a> * (used internally by {@link java.awt.Desktop#browse(URI)}) won't work as expected (fragment will be ignored on file opening). * This class allows to clear quarantine status from folder containing Javadoc, if confirmed by user. */ public class JavadocQuarantineStatusCleaner { private static final Logger LOG = Logger.getInstance(JavadocQuarantineStatusCleaner.class); private static final String QUARANTINE_ATTRIBUTE = "com.apple.quarantine"; public static void cleanIfNeeded(@NotNull VirtualFile javadocFolder) { Application application = ApplicationManager.getApplication(); assert !application.isDispatchThread(); if (!SystemInfo.isMac || !javadocFolder.isInLocalFileSystem() || !javadocFolder.isDirectory()) return; String folderPath = VfsUtilCore.virtualToIoFile(javadocFolder).getAbsolutePath(); // UserDefinedFileAttributeView isn't supported by JDK for HFS+ extended attributes on OS X, so we resort to JNA if (XAttrUtil.getXAttr(folderPath, QUARANTINE_ATTRIBUTE) == null) return; application.invokeLater(() -> { int result = Messages.showYesNoDialog(ApplicationBundle.message("quarantine.dialog.message"), ApplicationBundle.message("quarantine.dialog.title"), null); if (result == Messages.YES) { cleanQuarantineStatusInBackground(folderPath); } }, ModalityState.any()); } private static void cleanQuarantineStatusInBackground(@NotNull String folderPath) { ProgressIndicatorBase progressIndicator = new ProgressIndicatorBase(); String message = ApplicationBundle.message("quarantine.clean.progress", folderPath); ProgressManager.getInstance().runProcessWithProgressAsynchronously(new Task.Backgroundable(null, message) { @Override public void run(@NotNull ProgressIndicator indicator) { try(Stream<Path> s = Files.walk(Paths.get(folderPath))) { s.forEach(p -> { ProgressManager.checkCanceled(); XAttrUtil.removeXAttr(p.toFile().getAbsolutePath(), QUARANTINE_ATTRIBUTE); }); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void onError(@NotNull Exception error) { LOG.warn(error); new Notification(ApplicationBundle.message("quarantine.error.group"), ApplicationBundle.message("quarantine.error.title"), ApplicationBundle.message("quarantine.error.message"), NotificationType.WARNING).notify(null); } }, progressIndicator); } }
michaelgallacher/intellij-community
java/idea-ui/src/com/intellij/openapi/roots/ui/configuration/libraryEditor/JavadocQuarantineStatusCleaner.java
Java
apache-2.0
4,651
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Autogenerated by Thrift Compiler (0.9.2) * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated */ #include "workspace_model_constants.h" namespace apache { namespace airavata { namespace model { namespace workspace { const workspace_modelConstants g_workspace_model_constants; workspace_modelConstants::workspace_modelConstants() { } }}}} // namespace
hasinitg/airavata
airavata-api/airavata-client-sdks/airavata-cpp-sdk/src/main/resources/lib/airavata/workspace_model_constants.cpp
C++
apache-2.0
1,201
using System; using Magnum.StateMachine; using MassTransit.Saga; using Serilog; namespace MassTransit.Persistence.MongoDb.Tests.Sagas { public class AuctionSaga : SagaStateMachine<AuctionSaga>, ISaga { public static readonly ILogger Logger = Log.Logger.ForContext<AuctionSaga>(); static AuctionSaga() { Define( () => { Correlate(Bid).By((saga, message) => saga.CorrelationId == message.AuctionId); Initially( When(Create).Then( (saga, message) => { saga.OpeningBid = message.OpeningBid; saga.OwnerEmail = message.OwnerEmail; saga.Title = message.Title; }).TransitionTo(Open)); During(Open, When(Bid).Call((saga, message) => saga.Handle(message))); }); } public AuctionSaga(Guid correlationId) { this.CorrelationId = correlationId; } public decimal? CurrentBid { get; set; } public string HighBidder { get; set; } public Guid HighBidId { get; set; } public decimal OpeningBid { get; set; } public string OwnerEmail { get; set; } public string Title { get; set; } public static State Initial { get; set; } public static State Completed { get; set; } public static State Open { get; set; } public static State Closed { get; set; } public static Event<CreateAuction> Create { get; set; } public static Event<PlaceBid> Bid { get; set; } public Guid CorrelationId { get; set; } public IServiceBus Bus { get; set; } private void Handle(PlaceBid bid) { if (!this.CurrentBid.HasValue || bid.MaximumBid > this.CurrentBid) { if (this.HighBidder != null) { this.Bus.Publish(new Outbid(this.HighBidId)); } this.CurrentBid = bid.MaximumBid; this.HighBidder = bid.BidderEmail; this.HighBidId = bid.BidId; } else { // already outbid this.Bus.Publish(new Outbid(bid.BidId)); } } } }
cwooldridge/MassTransit.Persistence.MongoDb
MassTransit.Persistence.MongoDb.Tests/Sagas/AuctionSaga.cs
C#
apache-2.0
2,517
// Copyright 2020 Red Hat, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "github.com/coreos/go-semver/semver" "github.com/coreos/ignition/v2/config/shared/errors" "github.com/coreos/vcontext/path" "github.com/coreos/vcontext/report" ) func (v Ignition) Semver() (*semver.Version, error) { return semver.NewVersion(v.Version) } func (ic IgnitionConfig) Validate(c path.ContextPath) (r report.Report) { for i, res := range ic.Merge { r.AddOnError(c.Append("merge", i), res.validateRequiredSource()) } return } func (v Ignition) Validate(c path.ContextPath) (r report.Report) { c = c.Append("version") tv, err := v.Semver() if err != nil { r.AddOnError(c, errors.ErrInvalidVersion) return } if MaxVersion != *tv { r.AddOnError(c, errors.ErrUnknownVersion) } return }
coreos/ignition
config/v3_4_experimental/types/ignition.go
GO
apache-2.0
1,335
"""Helpers that help with state related things.""" import asyncio from collections import defaultdict import datetime as dt import logging from types import ModuleType, TracebackType from typing import Dict, Iterable, List, Optional, Type, Union from homeassistant.components.sun import STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON from homeassistant.const import ( STATE_CLOSED, STATE_HOME, STATE_LOCKED, STATE_NOT_HOME, STATE_OFF, STATE_ON, STATE_OPEN, STATE_UNKNOWN, STATE_UNLOCKED, ) from homeassistant.core import Context, State from homeassistant.loader import IntegrationNotFound, async_get_integration, bind_hass import homeassistant.util.dt as dt_util from .typing import HomeAssistantType _LOGGER = logging.getLogger(__name__) class AsyncTrackStates: """ Record the time when the with-block is entered. Add all states that have changed since the start time to the return list when with-block is exited. Must be run within the event loop. """ def __init__(self, hass: HomeAssistantType) -> None: """Initialize a TrackStates block.""" self.hass = hass self.states: List[State] = [] # pylint: disable=attribute-defined-outside-init def __enter__(self) -> List[State]: """Record time from which to track changes.""" self.now = dt_util.utcnow() return self.states def __exit__( self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType], ) -> None: """Add changes states to changes list.""" self.states.extend(get_changed_since(self.hass.states.async_all(), self.now)) def get_changed_since( states: Iterable[State], utc_point_in_time: dt.datetime ) -> List[State]: """Return list of states that have been changed since utc_point_in_time.""" return [state for state in states if state.last_updated >= utc_point_in_time] @bind_hass async def async_reproduce_state( hass: HomeAssistantType, states: Union[State, Iterable[State]], blocking: bool = False, context: Optional[Context] = None, ) -> None: """Reproduce a list of states on multiple domains.""" if isinstance(states, State): states = [states] to_call: Dict[str, List[State]] = defaultdict(list) for state in states: to_call[state.domain].append(state) async def worker(domain: str, states_by_domain: List[State]) -> None: try: integration = await async_get_integration(hass, domain) except IntegrationNotFound: _LOGGER.warning( "Trying to reproduce state for unknown integration: %s", domain ) return try: platform: Optional[ModuleType] = integration.get_platform("reproduce_state") except ImportError: _LOGGER.warning("Integration %s does not support reproduce state", domain) return await platform.async_reproduce_states( # type: ignore hass, states_by_domain, context=context ) if to_call: # run all domains in parallel await asyncio.gather( *(worker(domain, data) for domain, data in to_call.items()) ) def state_as_number(state: State) -> float: """ Try to coerce our state to a number. Raises ValueError if this is not possible. """ if state.state in ( STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON, STATE_OPEN, STATE_HOME, ): return 1 if state.state in ( STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN, STATE_BELOW_HORIZON, STATE_CLOSED, STATE_NOT_HOME, ): return 0 return float(state.state)
leppa/home-assistant
homeassistant/helpers/state.py
Python
apache-2.0
3,813
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package predicates import ( "fmt" "math/rand" "strconv" "sync" "time" "github.com/golang/glog" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/util/workqueue" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" v1qos "k8s.io/kubernetes/pkg/api/v1/helper/qos" corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util" "k8s.io/metrics/pkg/client/clientset_generated/clientset" ) // predicatePrecomputations: Helper types/variables... type PredicateMetadataModifier func(pm *predicateMetadata) var predicatePrecomputeRegisterLock sync.Mutex var predicatePrecomputations map[string]PredicateMetadataModifier = make(map[string]PredicateMetadataModifier) func RegisterPredicatePrecomputation(predicateName string, precomp PredicateMetadataModifier) { predicatePrecomputeRegisterLock.Lock() defer predicatePrecomputeRegisterLock.Unlock() predicatePrecomputations[predicateName] = precomp } // Other types for predicate functions... type NodeInfo interface { GetNodeInfo(nodeID string) (*v1.Node, error) } type PersistentVolumeInfo interface { GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) } // CachedPersistentVolumeInfo implements PersistentVolumeInfo type CachedPersistentVolumeInfo struct { corelisters.PersistentVolumeLister } func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) { return c.Get(pvID) } type PersistentVolumeClaimInfo interface { GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) } // CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo type CachedPersistentVolumeClaimInfo struct { corelisters.PersistentVolumeClaimLister } // GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) { return c.PersistentVolumeClaims(namespace).Get(name) } type CachedNodeInfo struct { corelisters.NodeLister } // GetNodeInfo returns cached data for the node 'id'. func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) { node, err := c.Get(id) if apierrors.IsNotFound(err) { return nil, fmt.Errorf("node '%v' not found", id) } if err != nil { return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err) } return node, nil } // Note that predicateMetadata and matchingPodAntiAffinityTerm need to be declared in the same file // due to the way declarations are processed in predicate declaration unit tests. type matchingPodAntiAffinityTerm struct { term *v1.PodAffinityTerm node *v1.Node } type predicateMetadata struct { pod *v1.Pod podBestEffort bool podRequest *schedulercache.Resource podPorts map[int]bool matchingAntiAffinityTerms []matchingPodAntiAffinityTerm serviceAffinityMatchingPodList []*v1.Pod serviceAffinityMatchingPodServices []*v1.Service } func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // fast path if there is no conflict checking targets. if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil && volume.ISCSI == nil { return false } for _, existingVolume := range pod.Spec.Volumes { // Same GCE disk mounted by multiple pods conflicts unless all pods mount it read-only. if volume.GCEPersistentDisk != nil && existingVolume.GCEPersistentDisk != nil { disk, existingDisk := volume.GCEPersistentDisk, existingVolume.GCEPersistentDisk if disk.PDName == existingDisk.PDName && !(disk.ReadOnly && existingDisk.ReadOnly) { return true } } if volume.AWSElasticBlockStore != nil && existingVolume.AWSElasticBlockStore != nil { if volume.AWSElasticBlockStore.VolumeID == existingVolume.AWSElasticBlockStore.VolumeID { return true } } if volume.ISCSI != nil && existingVolume.ISCSI != nil { iqn := volume.ISCSI.IQN eiqn := existingVolume.ISCSI.IQN // two ISCSI volumes are same, if they share the same iqn. As iscsi volumes are of type // RWO or ROX, we could permit only one RW mount. Same iscsi volume mounted by multiple Pods // conflict unless all other pods mount as read only. if iqn == eiqn && !(volume.ISCSI.ReadOnly && existingVolume.ISCSI.ReadOnly) { return true } } if volume.RBD != nil && existingVolume.RBD != nil { mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage // two RBDs images are the same if they share the same Ceph monitor, are in the same RADOS Pool, and have the same image name // only one read-write mount is permitted for the same RBD image. // same RBD image mounted by multiple Pods conflicts unless all Pods mount the image read-only if haveSame(mon, emon) && pool == epool && image == eimage && !(volume.RBD.ReadOnly && existingVolume.RBD.ReadOnly) { return true } } } return false } // NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that // are already mounted. If there is already a volume mounted on that node, another pod that uses the same volume // can't be scheduled there. // This is GCE, Amazon EBS, and Ceph RBD specific for now: // - GCE PD allows multiple mounts as long as they're all read-only // - AWS EBS forbids any two pods mounting the same volume ID // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. // - ISCSI forbids if any two pods share at least same IQN, LUN and Target // TODO: migrate this into some per-volume specific code? func NoDiskConflict(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { for _, v := range pod.Spec.Volumes { for _, ev := range nodeInfo.Pods() { if isVolumeConflict(v, ev) { return false, []algorithm.PredicateFailureReason{ErrDiskConflict}, nil } } } return true, nil, nil } type MaxPDVolumeCountChecker struct { filter VolumeFilter maxVolumes int pvInfo PersistentVolumeInfo pvcInfo PersistentVolumeClaimInfo } // VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps type VolumeFilter struct { // Filter normal volumes FilterVolume func(vol *v1.Volume) (id string, relevant bool) FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool) } // NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the // number of volumes which match a filter that it requests, and those that are already present. The // maximum number is configurable to accommodate different systems. // // The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume // types, counts the number of unique volumes, and rejects the new pod if it would place the total count over // the maximum. func NewMaxPDVolumeCountPredicate(filter VolumeFilter, maxVolumes int, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { c := &MaxPDVolumeCountChecker{ filter: filter, maxVolumes: maxVolumes, pvInfo: pvInfo, pvcInfo: pvcInfo, } return c.predicate } func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error { for i := range volumes { vol := &volumes[i] if id, ok := c.filter.FilterVolume(vol); ok { filteredVolumes[id] = true } else if vol.PersistentVolumeClaim != nil { pvcName := vol.PersistentVolumeClaim.ClaimName if pvcName == "" { return fmt.Errorf("PersistentVolumeClaim had no name") } pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil { // if the PVC is not found, log the error and count the PV towards the PV limit // generate a random volume ID since its required for de-dup utilruntime.HandleError(fmt.Errorf("Unable to look up PVC info for %s/%s, assuming PVC matches predicate when counting limits: %v", namespace, pvcName, err)) source := rand.NewSource(time.Now().UnixNano()) generatedID := "missingPVC" + strconv.Itoa(rand.New(source).Intn(1000000)) filteredVolumes[generatedID] = true return nil } if pvc == nil { return fmt.Errorf("PersistentVolumeClaim not found: %q", pvcName) } pvName := pvc.Spec.VolumeName if pvName == "" { return fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil { // if the PV is not found, log the error // and count the PV towards the PV limit // generate a random volume ID since it is required for de-dup utilruntime.HandleError(fmt.Errorf("Unable to look up PV info for %s/%s/%s, assuming PV matches predicate when counting limits: %v", namespace, pvcName, pvName, err)) source := rand.NewSource(time.Now().UnixNano()) generatedID := "missingPV" + strconv.Itoa(rand.New(source).Intn(1000000)) filteredVolumes[generatedID] = true return nil } if pv == nil { return fmt.Errorf("PersistentVolume not found: %q", pvName) } if id, ok := c.filter.FilterPersistentVolume(pv); ok { filteredVolumes[id] = true } } } return nil } func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { return true, nil, nil } newVolumes := make(map[string]bool) if err := c.filterVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil { return false, nil, err } // quick return if len(newVolumes) == 0 { return true, nil, nil } // count unique volumes existingVolumes := make(map[string]bool) for _, existingPod := range nodeInfo.Pods() { if err := c.filterVolumes(existingPod.Spec.Volumes, existingPod.Namespace, existingVolumes); err != nil { return false, nil, err } } numExistingVolumes := len(existingVolumes) // filter out already-mounted volumes for k := range existingVolumes { if _, ok := newVolumes[k]; ok { delete(newVolumes, k) } } numNewVolumes := len(newVolumes) if numExistingVolumes+numNewVolumes > c.maxVolumes { // violates MaxEBSVolumeCount or MaxGCEPDVolumeCount return false, []algorithm.PredicateFailureReason{ErrMaxVolumeCountExceeded}, nil } return true, nil, nil } // EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes var EBSVolumeFilter VolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AWSElasticBlockStore != nil { return vol.AWSElasticBlockStore.VolumeID, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AWSElasticBlockStore != nil { return pv.Spec.AWSElasticBlockStore.VolumeID, true } return "", false }, } // GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes var GCEPDVolumeFilter VolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.GCEPersistentDisk != nil { return vol.GCEPersistentDisk.PDName, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.GCEPersistentDisk != nil { return pv.Spec.GCEPersistentDisk.PDName, true } return "", false }, } // AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes var AzureDiskVolumeFilter VolumeFilter = VolumeFilter{ FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AzureDisk != nil { return vol.AzureDisk.DiskName, true } return "", false }, FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AzureDisk != nil { return pv.Spec.AzureDisk.DiskName, true } return "", false }, } type VolumeZoneChecker struct { pvInfo PersistentVolumeInfo pvcInfo PersistentVolumeClaimInfo } // VolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given // that some volumes may have zone scheduling constraints. The requirement is that any // volume zone-labels must match the equivalent zone-labels on the node. It is OK for // the node to have more zone-label constraints (for example, a hypothetical replicated // volume might allow region-wide access) // // Currently this is only supported with PersistentVolumeClaims, and looks to the labels // only on the bound PersistentVolume. // // Working with volumes declared inline in the pod specification (i.e. not // using a PersistentVolume) is likely to be harder, as it would require // determining the zone of a volume during scheduling, and that is likely to // require calling out to the cloud provider. It seems that we are moving away // from inline volume declarations anyway. func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate { c := &VolumeZoneChecker{ pvInfo: pvInfo, pvcInfo: pvcInfo, } return c.predicate } func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { return true, nil, nil } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } nodeConstraints := make(map[string]string) for k, v := range node.ObjectMeta.Labels { if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion { continue } nodeConstraints[k] = v } if len(nodeConstraints) == 0 { // The node has no zone constraints, so we're OK to schedule. // In practice, when using zones, all nodes must be labeled with zone labels. // We want to fast-path this case though. return true, nil, nil } namespace := pod.Namespace manifest := &(pod.Spec) for i := range manifest.Volumes { volume := &manifest.Volumes[i] if volume.PersistentVolumeClaim != nil { pvcName := volume.PersistentVolumeClaim.ClaimName if pvcName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim had no name") } pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil { return false, nil, err } if pvc == nil { return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName) } pvName := pvc.Spec.VolumeName if pvName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil { return false, nil, err } if pv == nil { return false, nil, fmt.Errorf("PersistentVolume not found: %q", pvName) } for k, v := range pv.ObjectMeta.Labels { if k != kubeletapis.LabelZoneFailureDomain && k != kubeletapis.LabelZoneRegion { continue } nodeV, _ := nodeConstraints[k] if v != nodeV { glog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k) return false, []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}, nil } } } } return true, nil, nil } // Returns a *schedulercache.Resource that covers the largest width in each // resource dimension. Because init-containers run sequentially, we collect the // max in each dimension iteratively. In contrast, we sum the resource vectors // for regular containers since they run simultaneously. // // Example: // // Pod: // InitContainers // IC1: // CPU: 2 // Memory: 1G // IC2: // CPU: 2 // Memory: 3G // Containers // C1: // CPU: 2 // Memory: 1G // C2: // CPU: 1 // Memory: 1G // // Result: CPU: 3, Memory: 3G func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { result := schedulercache.Resource{} for _, container := range pod.Spec.Containers { for rName, rQuantity := range container.Resources.Requests { switch rName { case v1.ResourceMemory: result.Memory += rQuantity.Value() case v1.ResourceCPU: result.MilliCPU += rQuantity.MilliValue() case v1.ResourceNvidiaGPU: result.NvidiaGPU += rQuantity.Value() case v1.ResourceStorageOverlay: result.StorageOverlay += rQuantity.Value() default: if v1helper.IsOpaqueIntResourceName(rName) { result.AddOpaque(rName, rQuantity.Value()) } } } } // Account for storage requested by emptydir volumes // If the storage medium is memory, should exclude the size for _, vol := range pod.Spec.Volumes { if vol.EmptyDir != nil && vol.EmptyDir.Medium != v1.StorageMediumMemory { result.StorageScratch += vol.EmptyDir.SizeLimit.Value() } } // take max_resource(sum_pod, any_init_container) for _, container := range pod.Spec.InitContainers { for rName, rQuantity := range container.Resources.Requests { switch rName { case v1.ResourceMemory: if mem := rQuantity.Value(); mem > result.Memory { result.Memory = mem } case v1.ResourceCPU: if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU { result.MilliCPU = cpu } case v1.ResourceNvidiaGPU: if gpu := rQuantity.Value(); gpu > result.NvidiaGPU { result.NvidiaGPU = gpu } case v1.ResourceStorageOverlay: if overlay := rQuantity.Value(); overlay > result.StorageOverlay { result.StorageOverlay = overlay } default: if v1helper.IsOpaqueIntResourceName(rName) { value := rQuantity.Value() if value > result.OpaqueIntResources[rName] { result.SetOpaque(rName, value) } } } } } return &result } func podName(pod *v1.Pod) string { return pod.Namespace + "/" + pod.Name } // PodFitsResources checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. // First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the // predicate failure reasons if the node has insufficient resources to run the pod. func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } var predicateFails []algorithm.PredicateFailureReason allowedPodNumber := nodeInfo.AllowedPodNumber() if len(nodeInfo.Pods())+1 > allowedPodNumber { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber))) } var podRequest *schedulercache.Resource if predicateMeta, ok := meta.(*predicateMetadata); ok { podRequest = predicateMeta.podRequest } else { // We couldn't parse metadata - fallback to computing it. podRequest = GetResourceRequest(pod) } if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.StorageOverlay == 0 && podRequest.StorageScratch == 0 && len(podRequest.OpaqueIntResources) == 0 { return len(predicateFails) == 0, predicateFails, nil } allocatable := nodeInfo.AllocatableResource() if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU)) } if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory)) } if allocatable.NvidiaGPU < podRequest.NvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU)) } scratchSpaceRequest := podRequest.StorageScratch if allocatable.StorageOverlay == 0 { scratchSpaceRequest += podRequest.StorageOverlay //scratchSpaceRequest += nodeInfo.RequestedResource().StorageOverlay nodeScratchRequest := nodeInfo.RequestedResource().StorageOverlay + nodeInfo.RequestedResource().StorageScratch if allocatable.StorageScratch < scratchSpaceRequest+nodeScratchRequest { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageScratch, scratchSpaceRequest, nodeScratchRequest, allocatable.StorageScratch)) } } else if allocatable.StorageScratch < scratchSpaceRequest+nodeInfo.RequestedResource().StorageScratch { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageScratch, scratchSpaceRequest, nodeInfo.RequestedResource().StorageScratch, allocatable.StorageScratch)) } if allocatable.StorageOverlay > 0 && allocatable.StorageOverlay < podRequest.StorageOverlay+nodeInfo.RequestedResource().StorageOverlay { predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceStorageOverlay, podRequest.StorageOverlay, nodeInfo.RequestedResource().StorageOverlay, allocatable.StorageOverlay)) } for rName, rQuant := range podRequest.OpaqueIntResources { if allocatable.OpaqueIntResources[rName] < rQuant+nodeInfo.RequestedResource().OpaqueIntResources[rName] { predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.OpaqueIntResources[rName], nodeInfo.RequestedResource().OpaqueIntResources[rName], allocatable.OpaqueIntResources[rName])) } } if glog.V(10) { if len(predicateFails) == 0 { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber) } } return len(predicateFails) == 0, predicateFails, nil } // nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms, // terms are ORed, and an empty list of terms will match nothing. func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool { for _, req := range nodeSelectorTerms { nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(req.MatchExpressions) if err != nil { glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions) return false } if nodeSelector.Matches(labels.Set(node.Labels)) { return true } } return false } // The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector. func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool { // Check if node.Labels match pod.Spec.NodeSelector. if len(pod.Spec.NodeSelector) > 0 { selector := labels.SelectorFromSet(pod.Spec.NodeSelector) if !selector.Matches(labels.Set(node.Labels)) { return false } } // 1. nil NodeSelector matches all nodes (i.e. does not filter out any nodes) // 2. nil []NodeSelectorTerm (equivalent to non-nil empty NodeSelector) matches no nodes // 3. zero-length non-nil []NodeSelectorTerm matches no nodes also, just for simplicity // 4. nil []NodeSelectorRequirement (equivalent to non-nil empty NodeSelectorTerm) matches no nodes // 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity // 6. non-nil empty NodeSelectorRequirement is not allowed nodeAffinityMatches := true affinity := pod.Spec.Affinity if affinity != nil && affinity.NodeAffinity != nil { nodeAffinity := affinity.NodeAffinity // if no required NodeAffinity requirements, will do no-op, means select all nodes. // TODO: Replace next line with subsequent commented-out line when implement RequiredDuringSchedulingRequiredDuringExecution. if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { // if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution == nil && nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { return true } // Match node selector for requiredDuringSchedulingRequiredDuringExecution. // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. // if nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { // nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingRequiredDuringExecution.NodeSelectorTerms // glog.V(10).Infof("Match for RequiredDuringSchedulingRequiredDuringExecution node selector terms %+v", nodeSelectorTerms) // nodeAffinityMatches = nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) // } // Match node selector for requiredDuringSchedulingIgnoredDuringExecution. if nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { nodeSelectorTerms := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms glog.V(10).Infof("Match for RequiredDuringSchedulingIgnoredDuringExecution node selector terms %+v", nodeSelectorTerms) nodeAffinityMatches = nodeAffinityMatches && nodeMatchesNodeSelectorTerms(node, nodeSelectorTerms) } } return nodeAffinityMatches } // PodMatchNodeSelector checks if a pod node selector matches the node label. func PodMatchNodeSelector(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } if podMatchesNodeLabels(pod, node) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil } // PodFitsHost checks if a pod spec node name matches the current node. func PodFitsHost(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if len(pod.Spec.NodeName) == 0 { return true, nil, nil } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } if pod.Spec.NodeName == node.Name { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrPodNotMatchHostName}, nil } type NodeLabelChecker struct { labels []string presence bool } func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate { labelChecker := &NodeLabelChecker{ labels: labels, presence: presence, } return labelChecker.CheckNodeLabelPresence } // CheckNodeLabelPresence checks whether all of the specified labels exists on a node or not, regardless of their value // If "presence" is false, then returns false if any of the requested labels matches any of the node's labels, // otherwise returns true. // If "presence" is true, then returns false if any of the requested labels does not match any of the node's labels, // otherwise returns true. // // Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels // In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected // // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // A node may have a label with "retiring" as key and the date as the value // and it may be desirable to avoid scheduling new pods on this node func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } var exists bool nodeLabels := labels.Set(node.Labels) for _, label := range n.labels { exists = nodeLabels.Has(label) if (exists && !n.presence) || (!exists && n.presence) { return false, []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil } } return true, nil, nil } type ServiceAffinity struct { podLister algorithm.PodLister serviceLister algorithm.ServiceLister nodeInfo NodeInfo labels []string } // serviceAffinityPrecomputation should be run once by the scheduler before looping through the Predicate. It is a helper function that // only should be referenced by NewServiceAffinityPredicate. func (s *ServiceAffinity) serviceAffinityPrecomputation(pm *predicateMetadata) { if pm.pod == nil { glog.Errorf("Cannot precompute service affinity, a pod is required to calculate service affinity.") return } var errSvc, errList error // Store services which match the pod. pm.serviceAffinityMatchingPodServices, errSvc = s.serviceLister.GetPodServices(pm.pod) selector := CreateSelectorFromLabels(pm.pod.Labels) // consider only the pods that belong to the same namespace allMatches, errList := s.podLister.List(selector) // In the future maybe we will return them as part of the function. if errSvc != nil || errList != nil { glog.Errorf("Some Error were found while precomputing svc affinity: \nservices:%v , \npods:%v", errSvc, errList) } pm.serviceAffinityMatchingPodList = FilterPodsByNamespace(allMatches, pm.pod.Namespace) } func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataModifier) { affinity := &ServiceAffinity{ podLister: podLister, serviceLister: serviceLister, nodeInfo: nodeInfo, labels: labels, } return affinity.checkServiceAffinity, affinity.serviceAffinityPrecomputation } // checkServiceAffinity is a predicate which matches nodes in such a way to force that // ServiceAffinity.labels are homogenous for pods that are scheduled to a node. // (i.e. it returns true IFF this pod can be added to this node such that all other pods in // the same service are running on nodes with // the exact same ServiceAffinity.label values). // // For example: // If the first pod of a service was scheduled to a node with label "region=foo", // all the other subsequent pods belong to the same service will be schedule on // nodes with the same "region=foo" label. // // Details: // // If (the svc affinity labels are not a subset of pod's label selectors ) // The pod has all information necessary to check affinity, the pod's label selector is sufficient to calculate // the match. // Otherwise: // Create an "implicit selector" which guarantees pods will land on nodes with similar values // for the affinity labels. // // To do this, we "reverse engineer" a selector by introspecting existing pods running under the same service+namespace. // These backfilled labels in the selector "L" are defined like so: // - L is a label that the ServiceAffinity object needs as a matching constraints. // - L is not defined in the pod itself already. // - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value. // // WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed... // For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction. func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var services []*v1.Service var pods []*v1.Pod if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) { services = pm.serviceAffinityMatchingPodServices pods = pm.serviceAffinityMatchingPodList } else { // Make the predicate resilient in case metadata is missing. pm = &predicateMetadata{pod: pod} s.serviceAffinityPrecomputation(pm) pods, services = pm.serviceAffinityMatchingPodList, pm.serviceAffinityMatchingPodServices } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } // check if the pod being scheduled has the affinity labels specified in its NodeSelector affinityLabels := FindLabelsInSet(s.labels, labels.Set(pod.Spec.NodeSelector)) // Step 1: If we don't have all constraints, introspect nodes to find the missing constraints. if len(s.labels) > len(affinityLabels) { if len(services) > 0 { if len(pods) > 0 { nodeWithAffinityLabels, err := s.nodeInfo.GetNodeInfo(pods[0].Spec.NodeName) if err != nil { return false, nil, err } AddUnsetLabelsToMap(affinityLabels, s.labels, labels.Set(nodeWithAffinityLabels.Labels)) } } } // Step 2: Finally complete the affinity predicate based on whatever set of predicates we were able to find. if CreateSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil } // PodFitsHostPorts checks if a node has free ports for the requested pod ports. func PodFitsHostPorts(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var wantPorts map[int]bool if predicateMeta, ok := meta.(*predicateMetadata); ok { wantPorts = predicateMeta.podPorts } else { // We couldn't parse metadata - fallback to computing it. wantPorts = schedutil.GetUsedPorts(pod) } if len(wantPorts) == 0 { return true, nil, nil } existingPorts := nodeInfo.UsedPorts() for wport := range wantPorts { if wport != 0 && existingPorts[wport] { return false, []algorithm.PredicateFailureReason{ErrPodNotFitsHostPorts}, nil } } return true, nil, nil } // search two arrays and return true if they have at least one common element; return false otherwise func haveSame(a1, a2 []string) bool { for _, val1 := range a1 { for _, val2 := range a2 { if val1 == val2 { return true } } } return false } // GeneralPredicates checks whether noncriticalPredicates and EssentialPredicates pass. noncriticalPredicates are the predicates // that only non-critical pods need and EssentialPredicates are the predicates that all pods, including critical pods, need func GeneralPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := noncriticalPredicates(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } fit, reasons, err = EssentialPredicates(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } return len(predicateFails) == 0, predicateFails, nil } // noncriticalPredicates are the predicates that only non-critical pods need func noncriticalPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := PodFitsResources(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } return len(predicateFails) == 0, predicateFails, nil } // EssentialPredicates are the predicates that all pods, including critical pods, need func EssentialPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := PodFitsHost(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } // TODO: PodFitsHostPorts is essential for now, but kubelet should ideally // preempt pods to free up host ports too fit, reasons, err = PodFitsHostPorts(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } fit, reasons, err = PodMatchNodeSelector(pod, meta, nodeInfo) if err != nil { return false, predicateFails, err } if !fit { predicateFails = append(predicateFails, reasons...) } return len(predicateFails) == 0, predicateFails, nil } type PodAffinityChecker struct { info NodeInfo podLister algorithm.PodLister } func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate { checker := &PodAffinityChecker{ info: info, podLister: podLister, } return checker.InterPodAffinityMatches } // InterPodAffinityMatches checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. // First return value indicates whether a pod can be scheduled on the specified node while the second return value indicates the // predicate failure reasons if the pod cannot be scheduled on the specified node. func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } if !c.satisfiesExistingPodsAntiAffinity(pod, meta, node) { return false, []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, nil } // Now check if <pod> requirements will be satisfied on this node. affinity := pod.Spec.Affinity if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { return true, nil, nil } if !c.satisfiesPodsAffinityAntiAffinity(pod, node, affinity) { return false, []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch}, nil } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod (anti)affinity constraints satisfied", podName(pod), node.Name) } return true, nil, nil } // anyPodMatchesPodAffinityTerm checks if any of given pods can match the specific podAffinityTerm. // First return value indicates whether a matching pod exists on a node that matches the topology key, // while the second return value indicates whether a matching pod exists anywhere. // TODO: Do we really need any pod matching, or all pods matching? I think the latter. func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods []*v1.Pod, node *v1.Node, term *v1.PodAffinityTerm) (bool, bool, error) { if len(term.TopologyKey) == 0 { return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") } matchingPodExists := false namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { return false, false, err } for _, existingPod := range allPods { match := priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, namespaces, selector) if match { matchingPodExists = true existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { return false, matchingPodExists, err } if priorityutil.NodesHaveSameTopologyKey(node, existingPodNode, term.TopologyKey) { return true, matchingPodExists, nil } } } return false, matchingPodExists, nil } func getPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) { if podAffinity != nil { if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution } // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(podAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // terms = append(terms, podAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} } return terms } func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) { if podAntiAffinity != nil { if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution } // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //if len(podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 { // terms = append(terms, podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution...) //} } return terms } func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) ([]matchingPodAntiAffinityTerm, error) { allNodeNames := make([]string, 0, len(nodeInfoMap)) for name := range nodeInfoMap { allNodeNames = append(allNodeNames, name) } var lock sync.Mutex var result []matchingPodAntiAffinityTerm var firstError error appendResult := func(toAppend []matchingPodAntiAffinityTerm) { lock.Lock() defer lock.Unlock() result = append(result, toAppend...) } catchError := func(err error) { lock.Lock() defer lock.Unlock() if firstError == nil { firstError = err } } processNode := func(i int) { nodeInfo := nodeInfoMap[allNodeNames[i]] node := nodeInfo.Node() if node == nil { catchError(fmt.Errorf("node not found")) return } var nodeResult []matchingPodAntiAffinityTerm for _, existingPod := range nodeInfo.PodsWithAffinity() { affinity := existingPod.Spec.Affinity if affinity == nil { continue } for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { catchError(err) return } if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { nodeResult = append(nodeResult, matchingPodAntiAffinityTerm{term: &term, node: node}) } } } if len(nodeResult) > 0 { appendResult(nodeResult) } } workqueue.Parallelize(16, len(allNodeNames), processNode) return result, firstError } func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods []*v1.Pod) ([]matchingPodAntiAffinityTerm, error) { var result []matchingPodAntiAffinityTerm for _, existingPod := range allPods { affinity := existingPod.Spec.Affinity if affinity != nil && affinity.PodAntiAffinity != nil { existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { return nil, err } for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { return nil, err } if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) { result = append(result, matchingPodAntiAffinityTerm{term: &term, node: existingPodNode}) } } } } return result, nil } // Checks if scheduling the pod onto this node would break any anti-affinity // rules indicated by the existing pods. func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta interface{}, node *v1.Node) bool { var matchingTerms []matchingPodAntiAffinityTerm if predicateMeta, ok := meta.(*predicateMetadata); ok { matchingTerms = predicateMeta.matchingAntiAffinityTerms } else { allPods, err := c.podLister.List(labels.Everything()) if err != nil { glog.Errorf("Failed to get all pods, %+v", err) return false } if matchingTerms, err = c.getMatchingAntiAffinityTerms(pod, allPods); err != nil { glog.Errorf("Failed to get all terms that pod %+v matches, err: %+v", podName(pod), err) return false } } for _, term := range matchingTerms { if len(term.term.TopologyKey) == 0 { glog.Error("Empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity") return false } if priorityutil.NodesHaveSameTopologyKey(node, term.node, term.term.TopologyKey) { glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAntiAffinityTerm %v", podName(pod), node.Name, term.term) return false } } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.Infof("Schedule Pod %+v on Node %+v is allowed, existing pods anti-affinity rules satisfied.", podName(pod), node.Name) } return true } // Checks if scheduling the pod onto this node would break any rules of this pod. func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node *v1.Node, affinity *v1.Affinity) bool { allPods, err := c.podLister.List(labels.Everything()) if err != nil { return false } // Check all affinity terms. for _, term := range getPodAffinityTerms(affinity.PodAffinity) { termMatches, matchingPodExists, err := c.anyPodMatchesPodAffinityTerm(pod, allPods, node, &term) if err != nil { glog.Errorf("Cannot schedule pod %+v onto node %v,because of PodAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) return false } if !termMatches { // If the requirement matches a pod's own labels are namespace, and there are // no other such pods, then disregard the requirement. This is necessary to // not block forever because the first pod of the collection can't be scheduled. if matchingPodExists { glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) return false } namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term) selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector) if err != nil { glog.Errorf("Cannot parse selector on term %v for pod %v. Details %v", term, podName(pod), err) return false } match := priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) if !match { glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) return false } } } // Check all anti-affinity terms. for _, term := range getPodAntiAffinityTerms(affinity.PodAntiAffinity) { termMatches, _, err := c.anyPodMatchesPodAffinityTerm(pod, allPods, node, &term) if err != nil || termMatches { glog.V(10).Infof("Cannot schedule pod %+v onto node %v,because of PodAntiAffinityTerm %v, err: %v", podName(pod), node.Name, term, err) return false } } if glog.V(10) { // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // not logged. There is visible performance gain from it. glog.Infof("Schedule Pod %+v on Node %+v is allowed, pod afinnity/anti-affinity constraints satisfied.", podName(pod), node.Name) } return true } // PodToleratesNodeTaints checks if a pod tolertaions can tolerate the node taints func PodToleratesNodeTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { taints, err := nodeInfo.Taints() if err != nil { return false, nil, err } if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, func(t *v1.Taint) bool { // PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints. return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute }) { return true, nil, nil } return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil } // isPodBestEffort checks if pod is scheduled with best-effort QoS func isPodBestEffort(pod *v1.Pod) bool { return v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort } // CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node // reporting memory pressure condition. func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var podBestEffort bool if predicateMeta, ok := meta.(*predicateMetadata); ok { podBestEffort = predicateMeta.podBestEffort } else { // We couldn't parse metadata - fallback to computing it. podBestEffort = isPodBestEffort(pod) } // pod is not BestEffort pod if !podBestEffort { return true, nil, nil } // check if node is under memory preasure if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil } return true, nil, nil } // CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node // reporting disk pressure condition. func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // check if node is under disk preasure if nodeInfo.DiskPressureCondition() == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil } return true, nil, nil } type VolumeNodeChecker struct { pvInfo PersistentVolumeInfo pvcInfo PersistentVolumeClaimInfo client clientset.Interface } // VolumeNodeChecker evaluates if a pod can fit due to the volumes it requests, given // that some volumes have node topology constraints, particularly when using Local PVs. // The requirement is that any pod that uses a PVC that is bound to a PV with topology constraints // must be scheduled to a node that satisfies the PV's topology labels. func NewVolumeNodePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo, client clientset.Interface) algorithm.FitPredicate { c := &VolumeNodeChecker{ pvInfo: pvInfo, pvcInfo: pvcInfo, client: client, } return c.predicate } func (c *VolumeNodeChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if !utilfeature.DefaultFeatureGate.Enabled(features.PersistentLocalVolumes) { return true, nil, nil } // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { return true, nil, nil } node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } glog.V(2).Infof("Checking for prebound volumes with node affinity") namespace := pod.Namespace manifest := &(pod.Spec) for i := range manifest.Volumes { volume := &manifest.Volumes[i] if volume.PersistentVolumeClaim == nil { continue } pvcName := volume.PersistentVolumeClaim.ClaimName if pvcName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim had no name") } pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName) if err != nil { return false, nil, err } if pvc == nil { return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName) } pvName := pvc.Spec.VolumeName if pvName == "" { return false, nil, fmt.Errorf("PersistentVolumeClaim is not bound: %q", pvcName) } pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName) if err != nil { return false, nil, err } if pv == nil { return false, nil, fmt.Errorf("PersistentVolume not found: %q", pvName) } err = volumeutil.CheckNodeAffinity(pv, node.Labels) if err != nil { glog.V(2).Infof("Won't schedule pod %q onto node %q due to volume %q node mismatch: %v", pod.Name, node.Name, pvName, err.Error()) return false, []algorithm.PredicateFailureReason{ErrVolumeNodeConflict}, nil } glog.V(4).Infof("VolumeNode predicate allows node %q for pod %q due to volume %q", node.Name, pod.Name, pvName) } return true, nil, nil }
abdasgupta/kubernetes
plugin/pkg/scheduler/algorithm/predicates/predicates.go
GO
apache-2.0
53,588
<?php /** * LICENSE: Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * PHP version 5 * * @category Microsoft * * @author Azure PHP SDK <azurephpsdk@microsoft.com> * @copyright 2012 Microsoft Corporation * @license http://www.apache.org/licenses/LICENSE-2.0 Apache License 2.0 * * @link https://github.com/WindowsAzure/azure-sdk-for-php */ namespace Tests\unit\WindowsAzure\ServiceBus\models; use WindowsAzure\ServiceBus\Models\SqlRuleAction; /** * Unit tests for class WrapAccessTokenResult. * * @category Microsoft * * @author Azure PHP SDK <azurephpsdk@microsoft.com> * @copyright 2012 Microsoft Corporation * @license http://www.apache.org/licenses/LICENSE-2.0 Apache License 2.0 * * @version Release: 0.5.0_2016-11 * * @link https://github.com/WindowsAzure/azure-sdk-for-php */ class SqlRuleActionTest extends \PHPUnit_Framework_TestCase { /** * @covers \WindowsAzure\ServiceBus\Models\SqlRuleAction::__construct */ public function testSqlRuleActionConstructor() { // Setup // Test $sqlRuleAction = new SqlRuleAction(); // Assert $this->assertNotNull($sqlRuleAction); } /** * @covers \WindowsAzure\ServiceBus\Models\SqlRuleAction::getSqlExpression * @covers \WindowsAzure\ServiceBus\Models\SqlRuleAction::setSqlExpression */ public function testGetSetSqlExpression() { // Setup $expected = 'testSqlExpression'; $sqlRuleAction = new SqlRuleAction(); // Test $sqlRuleAction->setSqlExpression($expected); $actual = $sqlRuleAction->getSqlExpression(); // Assert $this->assertEquals( $expected, $actual ); } }
chewbaccateam/hackfest
vendor/microsoft/windowsazure/tests/unit/WindowsAzure/ServiceBus/models/SqlRuleActionTest.php
PHP
apache-2.0
2,265
package mil.nga.giat.geowave.cli.geoserver; import java.io.File; import java.util.ArrayList; import java.util.List; import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.Status; import mil.nga.giat.geowave.core.cli.annotations.GeowaveOperation; import mil.nga.giat.geowave.core.cli.api.Command; import mil.nga.giat.geowave.core.cli.api.OperationParams; import mil.nga.giat.geowave.core.cli.operations.config.options.ConfigOptions; import com.beust.jcommander.Parameter; import com.beust.jcommander.ParameterException; import com.beust.jcommander.Parameters; @GeowaveOperation(name = "rmstyle", parentOperation = GeoServerSection.class) @Parameters(commandDescription = "Remove GeoServer Style") public class GeoServerRemoveStyleCommand implements Command { private GeoServerRestClient geoserverClient = null; @Parameter(description = "<style name>") private List<String> parameters = new ArrayList<String>(); private String styleName = null; @Override public boolean prepare( OperationParams params ) { if (geoserverClient == null) { // Get the local config for GeoServer File propFile = (File) params.getContext().get( ConfigOptions.PROPERTIES_FILE_CONTEXT); GeoServerConfig config = new GeoServerConfig( propFile); // Create the rest client geoserverClient = new GeoServerRestClient( config); } // Successfully prepared return true; } @Override public void execute( OperationParams params ) throws Exception { if (parameters.size() != 1) { throw new ParameterException( "Requires argument: <style name>"); } styleName = parameters.get(0); Response deleteStyleResponse = geoserverClient.deleteStyle(styleName); if (deleteStyleResponse.getStatus() == Status.OK.getStatusCode()) { System.out.println("Delete style '" + styleName + "' on GeoServer: OK"); } else { System.err.println("Error deleting style '" + styleName + "' on GeoServer; code = " + deleteStyleResponse.getStatus()); } } }
Becca42/geowave
extensions/cli/geoserver/src/main/java/mil/nga/giat/geowave/cli/geoserver/GeoServerRemoveStyleCommand.java
Java
apache-2.0
2,009
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.appcompat.widget; import static android.view.View.MeasureSpec.AT_MOST; import static android.view.View.MeasureSpec.EXACTLY; import static android.view.View.MeasureSpec.getMode; import static androidx.annotation.RestrictTo.Scope.LIBRARY; import android.content.Context; import android.graphics.Rect; import android.util.AttributeSet; import android.util.DisplayMetrics; import android.util.TypedValue; import android.widget.FrameLayout; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.annotation.RestrictTo; import androidx.core.view.ViewCompat; /** * @hide */ @RestrictTo(LIBRARY) public class ContentFrameLayout extends FrameLayout { public interface OnAttachListener { void onDetachedFromWindow(); void onAttachedFromWindow(); } private TypedValue mMinWidthMajor; private TypedValue mMinWidthMinor; private TypedValue mFixedWidthMajor; private TypedValue mFixedWidthMinor; private TypedValue mFixedHeightMajor; private TypedValue mFixedHeightMinor; private final Rect mDecorPadding; private OnAttachListener mAttachListener; public ContentFrameLayout(@NonNull Context context) { this(context, null); } public ContentFrameLayout(@NonNull Context context, @Nullable AttributeSet attrs) { this(context, attrs, 0); } public ContentFrameLayout( @NonNull Context context, @Nullable AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); mDecorPadding = new Rect(); } /** * @hide */ @RestrictTo(LIBRARY) public void dispatchFitSystemWindows(Rect insets) { fitSystemWindows(insets); } public void setAttachListener(OnAttachListener attachListener) { mAttachListener = attachListener; } /** * Notify this view of the window decor view's padding. We use these values when working out * our size for the window size attributes. * * @hide */ @RestrictTo(LIBRARY) public void setDecorPadding(int left, int top, int right, int bottom) { mDecorPadding.set(left, top, right, bottom); if (ViewCompat.isLaidOut(this)) { requestLayout(); } } @Override protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) { final DisplayMetrics metrics = getContext().getResources().getDisplayMetrics(); final boolean isPortrait = metrics.widthPixels < metrics.heightPixels; final int widthMode = getMode(widthMeasureSpec); final int heightMode = getMode(heightMeasureSpec); boolean fixedWidth = false; if (widthMode == AT_MOST) { final TypedValue tvw = isPortrait ? mFixedWidthMinor : mFixedWidthMajor; if (tvw != null && tvw.type != TypedValue.TYPE_NULL) { int w = 0; if (tvw.type == TypedValue.TYPE_DIMENSION) { w = (int) tvw.getDimension(metrics); } else if (tvw.type == TypedValue.TYPE_FRACTION) { w = (int) tvw.getFraction(metrics.widthPixels, metrics.widthPixels); } if (w > 0) { w -= (mDecorPadding.left + mDecorPadding.right); final int widthSize = MeasureSpec.getSize(widthMeasureSpec); widthMeasureSpec = MeasureSpec.makeMeasureSpec( Math.min(w, widthSize), EXACTLY); fixedWidth = true; } } } if (heightMode == AT_MOST) { final TypedValue tvh = isPortrait ? mFixedHeightMajor : mFixedHeightMinor; if (tvh != null && tvh.type != TypedValue.TYPE_NULL) { int h = 0; if (tvh.type == TypedValue.TYPE_DIMENSION) { h = (int) tvh.getDimension(metrics); } else if (tvh.type == TypedValue.TYPE_FRACTION) { h = (int) tvh.getFraction(metrics.heightPixels, metrics.heightPixels); } if (h > 0) { h -= (mDecorPadding.top + mDecorPadding.bottom); final int heightSize = MeasureSpec.getSize(heightMeasureSpec); heightMeasureSpec = MeasureSpec.makeMeasureSpec( Math.min(h, heightSize), EXACTLY); } } } super.onMeasure(widthMeasureSpec, heightMeasureSpec); int width = getMeasuredWidth(); boolean measure = false; widthMeasureSpec = MeasureSpec.makeMeasureSpec(width, EXACTLY); if (!fixedWidth && widthMode == AT_MOST) { final TypedValue tv = isPortrait ? mMinWidthMinor : mMinWidthMajor; if (tv != null && tv.type != TypedValue.TYPE_NULL) { int min = 0; if (tv.type == TypedValue.TYPE_DIMENSION) { min = (int) tv.getDimension(metrics); } else if (tv.type == TypedValue.TYPE_FRACTION) { min = (int) tv.getFraction(metrics.widthPixels, metrics.widthPixels); } if (min > 0) { min -= (mDecorPadding.left + mDecorPadding.right); } if (width < min) { widthMeasureSpec = MeasureSpec.makeMeasureSpec(min, EXACTLY); measure = true; } } } if (measure) { super.onMeasure(widthMeasureSpec, heightMeasureSpec); } } public TypedValue getMinWidthMajor() { if (mMinWidthMajor == null) mMinWidthMajor = new TypedValue(); return mMinWidthMajor; } public TypedValue getMinWidthMinor() { if (mMinWidthMinor == null) mMinWidthMinor = new TypedValue(); return mMinWidthMinor; } public TypedValue getFixedWidthMajor() { if (mFixedWidthMajor == null) mFixedWidthMajor = new TypedValue(); return mFixedWidthMajor; } public TypedValue getFixedWidthMinor() { if (mFixedWidthMinor == null) mFixedWidthMinor = new TypedValue(); return mFixedWidthMinor; } public TypedValue getFixedHeightMajor() { if (mFixedHeightMajor == null) mFixedHeightMajor = new TypedValue(); return mFixedHeightMajor; } public TypedValue getFixedHeightMinor() { if (mFixedHeightMinor == null) mFixedHeightMinor = new TypedValue(); return mFixedHeightMinor; } @Override protected void onAttachedToWindow() { super.onAttachedToWindow(); if (mAttachListener != null) { mAttachListener.onAttachedFromWindow(); } } @Override protected void onDetachedFromWindow() { super.onDetachedFromWindow(); if (mAttachListener != null) { mAttachListener.onDetachedFromWindow(); } } }
AndroidX/androidx
appcompat/appcompat/src/main/java/androidx/appcompat/widget/ContentFrameLayout.java
Java
apache-2.0
7,578
/* Copyright (c) 2007 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Text; using System.Xml; using Google.GData.Client; using Google.GData.Extensions; using Google.GData.Extensions.Apps; namespace Google.GData.Apps { /// <summary> /// Feed API customization class for defining user account feed. /// </summary> public class UserFeed : AbstractFeed { /// <summary> /// Constructor /// </summary> /// <param name="uriBase">The uri for the user account feed.</param> /// <param name="iService">The user account service.</param> public UserFeed(Uri uriBase, IService iService) : base(uriBase, iService) { GAppsExtensions.AddProvisioningExtensions(this); } /// <summary> /// Overridden. Returns a new <code>UserEntry</code>. /// </summary> /// <returns>the new <code>UserEntry</code></returns> public override AtomEntry CreateFeedEntry() { return new UserEntry(); } } }
michael-jia-sage/libgoogle
src/gapps/userfeed.cs
C#
apache-2.0
1,602
/** Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. limitations under the License. */ package com.thilinamb.flume.sink.util; import org.apache.zookeeper.server.ServerConfig; import org.apache.zookeeper.server.ZooKeeperServerMain; import org.apache.zookeeper.server.quorum.QuorumPeerConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.Properties; /** * A local Zookeeper server for running unit tests. * Reference: https://gist.github.com/fjavieralba/7930018/ */ public class ZooKeeperLocal { private static final Logger logger = LoggerFactory.getLogger(ZooKeeperLocal.class); private ZooKeeperServerMain zooKeeperServer; public ZooKeeperLocal(Properties zkProperties) throws IOException{ QuorumPeerConfig quorumConfiguration = new QuorumPeerConfig(); try { quorumConfiguration.parseProperties(zkProperties); } catch(Exception e) { throw new RuntimeException(e); } zooKeeperServer = new ZooKeeperServerMain(); final ServerConfig configuration = new ServerConfig(); configuration.readFrom(quorumConfiguration); new Thread() { public void run() { try { zooKeeperServer.runFromConfig(configuration); } catch (IOException e) { logger.error("Zookeeper startup failed.", e); } } }.start(); } }
tempbottle/flume-ng-kafka-sink
impl/src/test/java/com.thilinamb.flume.sink/util/ZooKeeperLocal.java
Java
apache-2.0
2,197
<?php # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/billing/budgets/v1/budget_model.proto namespace Google\Cloud\Billing\Budgets\V1; use Google\Protobuf\Internal\GPBType; use Google\Protobuf\Internal\RepeatedField; use Google\Protobuf\Internal\GPBUtil; /** * Describes a budget amount targeted to the last * [Filter.calendar_period][google.cloud.billing.budgets.v1.Filter.calendar_period] * spend. At this time, the amount is automatically 100% of the last calendar * period's spend; that is, there are no other options yet. * Future configuration options will be described here (for example, configuring * a percentage of last period's spend). * LastPeriodAmount cannot be set for a budget configured with * a * [Filter.custom_period][google.cloud.billing.budgets.v1.Filter.custom_period]. * * Generated from protobuf message <code>google.cloud.billing.budgets.v1.LastPeriodAmount</code> */ class LastPeriodAmount extends \Google\Protobuf\Internal\Message { /** * Constructor. * * @param array $data { * Optional. Data for populating the Message object. * * } */ public function __construct($data = NULL) { \GPBMetadata\Google\Cloud\Billing\Budgets\V1\BudgetModel::initOnce(); parent::__construct($data); } }
googleapis/google-cloud-php-billing-budgets
src/V1/LastPeriodAmount.php
PHP
apache-2.0
1,331
/* * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you * may not use this file except in compliance with the License. You * may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. See the License for the specific language governing * permissions and limitations under the License. See accompanying * LICENSE file. */ package com.gemstone.gemfire.admin.internal; import com.gemstone.gemfire.admin.*; import com.gemstone.gemfire.distributed.internal.DM; import com.gemstone.gemfire.distributed.internal.DistributionManager; import com.gemstone.gemfire.internal.admin.GemFireVM; import com.gemstone.gemfire.internal.admin.remote.RemoteApplicationVM; import com.gemstone.gemfire.internal.i18n.LocalizedStrings; /** * Implements the administrative interface to a cache server. * * @author David Whitlock * @since 3.5 */ public class CacheServerImpl extends ManagedSystemMemberImpl implements CacheVm, CacheServer { private static final String GFXD_LAUNCHER_NAME = "gfxd"; private static final String GFE_LAUNCHER_NAME = "cacheserver"; private static final boolean isSqlFire = Boolean.getBoolean("isSqlFire"); /** How many new <code>CacheServer</code>s have been created? */ private static int newCacheServers = 0; /////////////////////// Instance Fields /////////////////////// /** The configuration object for this cache server */ private final CacheServerConfigImpl config; ///////////////////////// Constructors //////////////////////// /** * Creates a new <code>CacheServerImpl</code> that represents a * non-existsing (unstarted) cache server in a given distributed * system. */ public CacheServerImpl(AdminDistributedSystemImpl system, CacheVmConfig config) throws AdminException { super(system, config); this.config = (CacheServerConfigImpl) config; this.config.setManagedEntity(this); } /** * Creates a new <code>CacheServerImpl</code> that represents an * existing dedicated cache server in a given distributed system. */ public CacheServerImpl(AdminDistributedSystemImpl system, GemFireVM vm) throws AdminException { super(system, vm); this.config = new CacheServerConfigImpl(vm); } ////////////////////// Instance Methods ////////////////////// @Override public SystemMemberType getType() { return SystemMemberType.CACHE_VM; } public String getNewId() { synchronized (CacheServerImpl.class) { return "CacheVm" + (++newCacheServers); } } public void start() throws AdminException { if (!needToStart()) { return; } this.config.validate(); this.controller.start(this); this.config.setManagedEntity(this); } public void stop() { if (!needToStop()) { return; } this.controller.stop(this); // NOTE: DistributedSystem nodeLeft will then set this.manager to null this.config.setManagedEntity(null); } public boolean isRunning() { DM dm = ((AdminDistributedSystemImpl)getDistributedSystem()).getDistributionManager(); if(dm == null) { try { return this.controller.isRunning(this); } catch (IllegalStateException e) { return false; } } return ((DistributionManager)dm).getDistributionManagerIdsIncludingAdmin().contains(getDistributedMember()); } public CacheServerConfig getConfig() { return this.config; } public CacheVmConfig getVmConfig() { return this.config; } //////////////////////// Command execution //////////////////////// public ManagedEntityConfig getEntityConfig() { return this.getConfig(); } public String getEntityType() { // Fix bug 32564 return "Cache Vm"; } public String getStartCommand() { StringBuffer sb = new StringBuffer(); if (!isSqlFire) { sb.append(this.controller.getProductExecutable(this, GFE_LAUNCHER_NAME)); sb.append(" start -dir="); } else { sb.append(this.controller.getProductExecutable(this, GFXD_LAUNCHER_NAME)); sb.append(" server start -dir="); } sb.append(this.getConfig().getWorkingDirectory()); String file = this.getConfig().getCacheXMLFile(); if (file != null && file.length() > 0) { sb.append(" "); sb.append(com.gemstone.gemfire.distributed.internal.DistributionConfig.CACHE_XML_FILE_NAME); sb.append("="); sb.append(file); } String classpath = this.getConfig().getClassPath(); if (classpath != null && classpath.length() > 0) { sb.append(" -classpath="); sb.append(classpath); } appendConfiguration(sb); return sb.toString().trim(); } public String getStopCommand() { StringBuffer sb = new StringBuffer(); if (!isSqlFire) { sb.append(this.controller.getProductExecutable(this, GFE_LAUNCHER_NAME)); sb.append(" stop -dir="); } else { sb.append(this.controller.getProductExecutable(this, GFXD_LAUNCHER_NAME)); sb.append(" server stop -dir="); } sb.append(this.getConfig().getWorkingDirectory()); return sb.toString().trim(); } public String getIsRunningCommand() { StringBuffer sb = new StringBuffer(); if (!isSqlFire) { sb.append(this.controller.getProductExecutable(this, GFE_LAUNCHER_NAME)); sb.append(" status -dir="); } else { sb.append(this.controller.getProductExecutable(this, GFXD_LAUNCHER_NAME)); sb.append(" server status -dir="); } sb.append(this.getConfig().getWorkingDirectory()); return sb.toString().trim(); } /** * Find whether this server is primary for given client (durableClientId) * * @param durableClientId - * durable-id of the client * @return true if the server is primary for given client * * @since 5.6 */ public boolean isPrimaryForDurableClient(String durableClientId) { RemoteApplicationVM vm = (RemoteApplicationVM)this.getGemFireVM(); boolean isPrimary = false; if (vm != null) { isPrimary = vm.isPrimaryForDurableClient(durableClientId); } return isPrimary; } }
papicella/snappy-store
gemfire-core/src/main/java/com/gemstone/gemfire/admin/internal/CacheServerImpl.java
Java
apache-2.0
6,469
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.isis.viewer.restfulobjects.tck.domainobject.oid.collection; import org.jboss.resteasy.client.core.executors.URLConnectionClientExecutor; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.apache.isis.viewer.restfulobjects.applib.RestfulHttpMethod; import org.apache.isis.viewer.restfulobjects.applib.client.RestfulClient; import org.apache.isis.viewer.restfulobjects.applib.client.RestfulRequest; import org.apache.isis.viewer.restfulobjects.applib.client.RestfulResponse; import org.apache.isis.viewer.restfulobjects.applib.version.VersionRepresentation; import org.apache.isis.viewer.restfulobjects.tck.IsisWebServerRule; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; public class Get_whenDoesntExistOid_then_404 { @Rule public IsisWebServerRule webServerRule = new IsisWebServerRule(); private RestfulClient client; @Before public void setUp() throws Exception { client = webServerRule.getClient(new URLConnectionClientExecutor()); } @Test public void returns404() throws Exception { givenWhenThen("73", RestfulResponse.HttpStatusCode.OK); givenWhenThen("nonExistentOid", RestfulResponse.HttpStatusCode.NOT_FOUND); } private void givenWhenThen(String oid, RestfulResponse.HttpStatusCode statusCode1) { // given RestfulRequest request = client.createRequest(RestfulHttpMethod.GET, "objects/BSRL/" + oid + "/collections/visibleAndEditableCollection"); // when RestfulResponse<VersionRepresentation> restfulResponse = request.executeT(); assertThat(restfulResponse.getStatus(), is(statusCode1)); } }
peridotperiod/isis
tck/tck-viewer-restfulobjects/src/test/java/org/apache/isis/viewer/restfulobjects/tck/domainobject/oid/collection/Get_whenDoesntExistOid_then_404.java
Java
apache-2.0
2,570
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.trino.tests.product.cassandra; import com.datastax.driver.core.utils.Bytes; import io.airlift.units.Duration; import io.trino.jdbc.Row; import io.trino.tempto.ProductTest; import io.trino.tempto.Requirement; import io.trino.tempto.RequirementsProvider; import io.trino.tempto.configuration.Configuration; import io.trino.tempto.internal.query.CassandraQueryExecutor; import io.trino.tempto.query.QueryResult; import org.assertj.core.api.Assertions; import org.testng.annotations.Test; import java.sql.Date; import java.sql.Timestamp; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.util.function.Consumer; import static io.trino.tempto.Requirements.compose; import static io.trino.tempto.assertions.QueryAssert.Row.row; import static io.trino.tempto.assertions.QueryAssert.assertThat; import static io.trino.tempto.fulfillment.table.TableRequirements.immutableTable; import static io.trino.tempto.query.QueryExecutor.query; import static io.trino.tests.product.TestGroups.CASSANDRA; import static io.trino.tests.product.TestGroups.PROFILE_SPECIFIC_TESTS; import static io.trino.tests.product.TpchTableResults.PRESTO_NATION_RESULT; import static io.trino.tests.product.cassandra.CassandraTpchTableDefinitions.CASSANDRA_NATION; import static io.trino.tests.product.cassandra.CassandraTpchTableDefinitions.CASSANDRA_SUPPLIER; import static io.trino.tests.product.cassandra.DataTypesTableDefinition.CASSANDRA_ALL_TYPES; import static io.trino.tests.product.cassandra.TestConstants.CONNECTOR_NAME; import static io.trino.tests.product.cassandra.TestConstants.KEY_SPACE; import static io.trino.tests.product.utils.QueryAssertions.assertContainsEventually; import static io.trino.tests.product.utils.QueryExecutors.onTrino; import static java.lang.String.format; import static java.nio.charset.StandardCharsets.UTF_8; import static java.sql.JDBCType.BIGINT; import static java.sql.JDBCType.BOOLEAN; import static java.sql.JDBCType.DATE; import static java.sql.JDBCType.DOUBLE; import static java.sql.JDBCType.INTEGER; import static java.sql.JDBCType.REAL; import static java.sql.JDBCType.SMALLINT; import static java.sql.JDBCType.TIMESTAMP_WITH_TIMEZONE; import static java.sql.JDBCType.TINYINT; import static java.sql.JDBCType.VARBINARY; import static java.sql.JDBCType.VARCHAR; import static java.util.concurrent.TimeUnit.MINUTES; public class TestSelect extends ProductTest implements RequirementsProvider { private Configuration configuration; @Override public Requirement getRequirements(Configuration configuration) { this.configuration = configuration; return compose( immutableTable(CASSANDRA_NATION), immutableTable(CASSANDRA_SUPPLIER), immutableTable(CASSANDRA_ALL_TYPES)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectNation() { String sql = format( "SELECT n_nationkey, n_name, n_regionkey, n_comment FROM %s.%s.%s", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_NATION.getName()); QueryResult queryResult = onTrino() .executeQuery(sql); assertThat(queryResult).matches(PRESTO_NATION_RESULT); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectWithEqualityFilterOnPartitioningKey() { String sql = format( "SELECT n_nationkey FROM %s.%s.%s WHERE n_nationkey = 0", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_NATION.getName()); QueryResult queryResult = onTrino() .executeQuery(sql); assertThat(queryResult).containsOnly(row(0)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectWithFilterOnPartitioningKey() { String sql = format( "SELECT n_nationkey FROM %s.%s.%s WHERE n_nationkey > 23", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_NATION.getName()); QueryResult queryResult = onTrino() .executeQuery(sql); assertThat(queryResult).containsOnly(row(24)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectWithEqualityFilterOnNonPartitioningKey() { String sql = format( "SELECT n_name FROM %s.%s.%s WHERE n_name = 'UNITED STATES'", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_NATION.getName()); QueryResult queryResult = onTrino() .executeQuery(sql); assertThat(queryResult).containsOnly(row("UNITED STATES")); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectWithNonEqualityFilterOnNonPartitioningKey() { String sql = format( "SELECT n_name FROM %s.%s.%s WHERE n_name < 'B'", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_NATION.getName()); QueryResult queryResult = onTrino() .executeQuery(sql); assertThat(queryResult).containsOnly(row("ALGERIA"), row("ARGENTINA")); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectWithMorePartitioningKeysThanLimit() { String sql = format( "SELECT s_suppkey FROM %s.%s.%s WHERE s_suppkey = 10", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_SUPPLIER.getName()); QueryResult queryResult = onTrino() .executeQuery(sql); assertThat(queryResult).containsOnly(row(10)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectWithMorePartitioningKeysThanLimitNonPK() { String sql = format( "SELECT s_suppkey FROM %s.%s.%s WHERE s_name = 'Supplier#000000010'", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_SUPPLIER.getName()); QueryResult queryResult = onTrino() .executeQuery(sql); assertThat(queryResult).containsOnly(row(10)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testAllDataTypes() { // NOTE: DECIMAL is treated like DOUBLE QueryResult query = query(format( "SELECT a, b, bl, bo, d, do, dt, f, fr, i, integer, l, m, s, si, t, ti, ts, tu, u, v, vari FROM %s.%s.%s", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_ALL_TYPES.getName())); assertThat(query) .hasColumns(VARCHAR, BIGINT, VARBINARY, BOOLEAN, DOUBLE, DOUBLE, DATE, REAL, VARCHAR, VARCHAR, INTEGER, VARCHAR, VARCHAR, VARCHAR, SMALLINT, VARCHAR, TINYINT, TIMESTAMP_WITH_TIMEZONE, VARCHAR, VARCHAR, VARCHAR, VARCHAR) .containsOnly( row("\0", Long.MIN_VALUE, Bytes.fromHexString("0x00").array(), false, 0f, Double.MIN_VALUE, Date.valueOf("1970-01-02"), Float.MIN_VALUE, "[0]", "0.0.0.0", Integer.MIN_VALUE, "[0]", "{\"\\u0000\":-2147483648,\"a\":0}", "[0]", Short.MIN_VALUE, "\0", Byte.MIN_VALUE, Timestamp.from(OffsetDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant()), "d2177dd0-eaa2-11de-a572-001b779c76e3", "01234567-0123-0123-0123-0123456789ab", "\0", String.valueOf(Long.MIN_VALUE)), row("the quick brown fox jumped over the lazy dog", 9223372036854775807L, "01234".getBytes(UTF_8), true, Double.valueOf("99999999999999999999999999999999999999"), Double.MAX_VALUE, Date.valueOf("9999-12-31"), Float.MAX_VALUE, "[4,5,6,7]", "255.255.255.255", Integer.MAX_VALUE, "[4,5,6]", "{\"a\":1,\"b\":2}", "[4,5,6]", Short.MAX_VALUE, "this is a text value", Byte.MAX_VALUE, Timestamp.from(OffsetDateTime.of(9999, 12, 31, 23, 59, 59, 0, ZoneOffset.UTC).toInstant()), "d2177dd0-eaa2-11de-a572-001b779c76e3", "01234567-0123-0123-0123-0123456789ab", "abc", String.valueOf(Long.MAX_VALUE)), row("def", null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testNationJoinNation() { String tableName = format("%s.%s.%s", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_NATION.getName()); String sql = format( "SELECT n1.n_name, n2.n_regionkey FROM %s n1 JOIN " + "%s n2 ON n1.n_nationkey = n2.n_regionkey " + "WHERE n1.n_nationkey=3", tableName, tableName); QueryResult queryResult = onTrino() .executeQuery(sql); assertThat(queryResult).containsOnly( row("CANADA", 3), row("CANADA", 3), row("CANADA", 3), row("CANADA", 3), row("CANADA", 3)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testNationJoinRegion() { String sql = format( "SELECT c.n_name, t.name FROM %s.%s.%s c JOIN " + "tpch.tiny.region t ON c.n_regionkey = t.regionkey " + "WHERE c.n_nationkey=3", CONNECTOR_NAME, KEY_SPACE, CASSANDRA_NATION.getName()); QueryResult queryResult = onTrino() .executeQuery(sql); assertThat(queryResult).containsOnly(row("CANADA", "AMERICA")); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectAllTypePartitioningMaterializedView() { String materializedViewName = format("%s_partitioned_mv", CASSANDRA_ALL_TYPES.getName()); onCassandra(format("DROP MATERIALIZED VIEW IF EXISTS %s.%s", KEY_SPACE, materializedViewName)); onCassandra(format("CREATE MATERIALIZED VIEW %s.%s AS SELECT * FROM %s.%s WHERE b IS NOT NULL PRIMARY KEY (a, b)", KEY_SPACE, materializedViewName, KEY_SPACE, CASSANDRA_ALL_TYPES.getName())); assertContainsEventually(() -> query(format("SHOW TABLES FROM %s.%s", CONNECTOR_NAME, KEY_SPACE)), query(format("SELECT '%s'", materializedViewName)), new Duration(1, MINUTES)); // Materialized view may not return all results during the creation assertContainsEventually(() -> query(format("SELECT status_replicated FROM %s.system.built_views WHERE view_name = '%s'", CONNECTOR_NAME, materializedViewName)), query("SELECT true"), new Duration(1, MINUTES)); QueryResult query = query(format( "SELECT a, b, bl, bo, d, do, dt, f, fr, i, integer, l, m, s, si, t, ti, ts, tu, u, v, vari FROM %s.%s.%s WHERE a = '\0'", CONNECTOR_NAME, KEY_SPACE, materializedViewName)); assertThat(query) .hasColumns(VARCHAR, BIGINT, VARBINARY, BOOLEAN, DOUBLE, DOUBLE, DATE, REAL, VARCHAR, VARCHAR, INTEGER, VARCHAR, VARCHAR, VARCHAR, SMALLINT, VARCHAR, TINYINT, TIMESTAMP_WITH_TIMEZONE, VARCHAR, VARCHAR, VARCHAR, VARCHAR) .containsOnly( row("\0", Long.MIN_VALUE, Bytes.fromHexString("0x00").array(), false, 0f, Double.MIN_VALUE, Date.valueOf("1970-01-02"), Float.MIN_VALUE, "[0]", "0.0.0.0", Integer.MIN_VALUE, "[0]", "{\"\\u0000\":-2147483648,\"a\":0}", "[0]", Short.MIN_VALUE, "\0", Byte.MIN_VALUE, Timestamp.from(OffsetDateTime.of(1970, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC).toInstant()), "d2177dd0-eaa2-11de-a572-001b779c76e3", "01234567-0123-0123-0123-0123456789ab", "\0", String.valueOf(Long.MIN_VALUE))); onCassandra(format("DROP MATERIALIZED VIEW IF EXISTS %s.%s", KEY_SPACE, materializedViewName)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectClusteringMaterializedView() { String mvName = "clustering_mv"; onCassandra(format("DROP MATERIALIZED VIEW IF EXISTS %s.%s", KEY_SPACE, mvName)); onCassandra(format("CREATE MATERIALIZED VIEW %s.%s AS " + "SELECT * FROM %s.%s " + "WHERE s_nationkey IS NOT NULL " + "PRIMARY KEY (s_nationkey, s_suppkey) " + "WITH CLUSTERING ORDER BY (s_nationkey DESC)", KEY_SPACE, mvName, KEY_SPACE, CASSANDRA_SUPPLIER.getName())); assertContainsEventually(() -> query(format("SHOW TABLES FROM %s.%s", CONNECTOR_NAME, KEY_SPACE)), query(format("SELECT '%s'", mvName)), new Duration(1, MINUTES)); // Materialized view may not return all results during the creation assertContainsEventually(() -> query(format("SELECT status_replicated FROM %s.system.built_views WHERE view_name = '%s'", CONNECTOR_NAME, mvName)), query("SELECT true"), new Duration(1, MINUTES)); QueryResult aggregateQueryResult = onTrino() .executeQuery(format( "SELECT MAX(s_nationkey), SUM(s_suppkey), AVG(s_acctbal) " + "FROM %s.%s.%s WHERE s_suppkey BETWEEN 1 AND 10 ", CONNECTOR_NAME, KEY_SPACE, mvName)); assertThat(aggregateQueryResult).containsOnly( row(24, 55, 4334.653)); QueryResult orderedResult = onTrino() .executeQuery(format( "SELECT s_nationkey, s_suppkey, s_acctbal " + "FROM %s.%s.%s WHERE s_nationkey = 1 LIMIT 1", CONNECTOR_NAME, KEY_SPACE, mvName)); assertThat(orderedResult).containsOnly( row(1, 3, 4192.4)); onCassandra(format("DROP MATERIALIZED VIEW IF EXISTS %s.%s", KEY_SPACE, mvName)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testProtocolVersion() { QueryResult queryResult = onTrino() .executeQuery(format("SELECT native_protocol_version FROM %s.system.local", CONNECTOR_NAME)); assertThat(queryResult).containsOnly(row("4")); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectTupleType() { String tableName = "select_tuple_table"; onCassandra(format("DROP TABLE IF EXISTS %s.%s", KEY_SPACE, tableName)); onCassandra(format("CREATE TABLE %s.%s (key int, value frozen<tuple<int, text, float>>, PRIMARY KEY (key))", KEY_SPACE, tableName)); onCassandra(format("INSERT INTO %s.%s (key, value) VALUES(1, (1, 'text-1', 1.11))", KEY_SPACE, tableName)); QueryResult queryResult = onTrino().executeQuery( format("SELECT * FROM %s.%s.%s", CONNECTOR_NAME, KEY_SPACE, tableName)); assertThat(queryResult).hasRowsCount(1); Assertions.assertThat(queryResult.row(0).get(0)).isEqualTo(1); Assertions.assertThat(queryResult.row(0).get(1)).isEqualTo(Row.builder() .addUnnamedField(1) .addUnnamedField("text-1") .addUnnamedField(1.11f) .build()); onCassandra(format("DROP TABLE IF EXISTS %s.%s", KEY_SPACE, tableName)); } @Test(groups = {CASSANDRA, PROFILE_SPECIFIC_TESTS}) public void testSelectTupleTypeInPrimaryKey() { String tableName = "select_tuple_in_primary_key_table"; onCassandra(format("DROP TABLE IF EXISTS %s.%s", KEY_SPACE, tableName)); onCassandra(format("CREATE TABLE %s.%s (intkey int, tuplekey frozen<tuple<int, text, float>>, PRIMARY KEY (intkey, tuplekey))", KEY_SPACE, tableName)); onCassandra(format("INSERT INTO %s.%s (intkey, tuplekey) VALUES(1, (1, 'text-1', 1.11))", KEY_SPACE, tableName)); Consumer<QueryResult> assertion = queryResult -> { assertThat(queryResult).hasRowsCount(1); Assertions.assertThat(queryResult.row(0).get(0)).isEqualTo(1); Assertions.assertThat(queryResult.row(0).get(1)).isEqualTo(Row.builder() .addUnnamedField(1) .addUnnamedField("text-1") .addUnnamedField(1.11f) .build()); }; assertion.accept(onTrino().executeQuery(format("SELECT * FROM %s.%s.%s", CONNECTOR_NAME, KEY_SPACE, tableName))); assertion.accept(onTrino().executeQuery(format("SELECT * FROM %s.%s.%s WHERE intkey = 1 and tuplekey = row(1, 'text-1', 1.11)", CONNECTOR_NAME, KEY_SPACE, tableName))); onCassandra(format("DROP TABLE IF EXISTS %s.%s", KEY_SPACE, tableName)); } private void onCassandra(String query) { try (CassandraQueryExecutor queryExecutor = new CassandraQueryExecutor(configuration)) { queryExecutor.executeQuery(query); } } }
dain/presto
testing/trino-product-tests/src/main/java/io/trino/tests/product/cassandra/TestSelect.java
Java
apache-2.0
19,572
// Copyright John Maddock 2006. // Copyright Paul A. Bristow 2007, 2009 // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <boost/math/concepts/real_concept.hpp> #define BOOST_TEST_MAIN #include <boost/test/unit_test.hpp> #include <boost/test/floating_point_comparison.hpp> #include <boost/math/special_functions/math_fwd.hpp> #include <boost/math/tools/stats.hpp> #include <boost/math/tools/test.hpp> #include <boost/math/constants/constants.hpp> #include <boost/type_traits/is_floating_point.hpp> #include <boost/array.hpp> #include "functor.hpp" #include "test_beta_hooks.hpp" #include "handle_test_result.hpp" #include "table_type.hpp" #ifndef SC_ #define SC_(x) static_cast<typename table_type<T>::type>(BOOST_JOIN(x, L)) #endif template <class Real, class T> void test_inverses(const T& data) { using namespace std; typedef typename T::value_type row_type; typedef Real value_type; value_type precision = static_cast<value_type>(ldexp(1.0, 1-boost::math::policies::digits<value_type, boost::math::policies::policy<> >()/2)) * 100; if(boost::math::policies::digits<value_type, boost::math::policies::policy<> >() < 50) precision = 1; // 1% or two decimal digits, all we can hope for when the input is truncated for(unsigned i = 0; i < data.size(); ++i) { // // These inverse tests are thrown off if the output of the // incomplete beta is too close to 1: basically there is insuffient // information left in the value we're using as input to the inverse // to be able to get back to the original value. // if(Real(data[i][5]) == 0) BOOST_CHECK_EQUAL(boost::math::ibeta_inv(Real(data[i][0]), Real(data[i][1]), Real(data[i][5])), value_type(0)); else if((1 - Real(data[i][5]) > 0.001) && (fabs(Real(data[i][5])) > 2 * boost::math::tools::min_value<value_type>()) && (fabs(Real(data[i][5])) > 2 * boost::math::tools::min_value<double>())) { value_type inv = boost::math::ibeta_inv(Real(data[i][0]), Real(data[i][1]), Real(data[i][5])); BOOST_CHECK_CLOSE(Real(data[i][2]), inv, precision); } else if(1 == Real(data[i][5])) BOOST_CHECK_EQUAL(boost::math::ibeta_inv(Real(data[i][0]), Real(data[i][1]), Real(data[i][5])), value_type(1)); if(Real(data[i][6]) == 0) BOOST_CHECK_EQUAL(boost::math::ibetac_inv(Real(data[i][0]), Real(data[i][1]), Real(data[i][6])), value_type(1)); else if((1 - Real(data[i][6]) > 0.001) && (fabs(Real(data[i][6])) > 2 * boost::math::tools::min_value<value_type>()) && (fabs(Real(data[i][6])) > 2 * boost::math::tools::min_value<double>())) { value_type inv = boost::math::ibetac_inv(Real(data[i][0]), Real(data[i][1]), Real(data[i][6])); BOOST_CHECK_CLOSE(Real(data[i][2]), inv, precision); } else if(Real(data[i][6]) == 1) BOOST_CHECK_EQUAL(boost::math::ibetac_inv(Real(data[i][0]), Real(data[i][1]), Real(data[i][6])), value_type(0)); } } template <class Real, class T> void test_inverses2(const T& data, const char* type_name, const char* test_name) { typedef typename T::value_type row_type; typedef Real value_type; typedef value_type (*pg)(value_type, value_type, value_type); #if defined(BOOST_MATH_NO_DEDUCED_FUNCTION_POINTERS) pg funcp = boost::math::ibeta_inv<value_type, value_type, value_type>; #else pg funcp = boost::math::ibeta_inv; #endif boost::math::tools::test_result<value_type> result; std::cout << "Testing " << test_name << " with type " << type_name << "\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"; // // test ibeta_inv(T, T, T) against data: // result = boost::math::tools::test_hetero<Real>( data, bind_func<Real>(funcp, 0, 1, 2), extract_result<Real>(3)); handle_test_result(result, data[result.worst()], result.worst(), type_name, "boost::math::ibeta_inv", test_name); // // test ibetac_inv(T, T, T) against data: // #if defined(BOOST_MATH_NO_DEDUCED_FUNCTION_POINTERS) funcp = boost::math::ibetac_inv<value_type, value_type, value_type>; #else funcp = boost::math::ibetac_inv; #endif result = boost::math::tools::test_hetero<Real>( data, bind_func<Real>(funcp, 0, 1, 2), extract_result<Real>(4)); handle_test_result(result, data[result.worst()], result.worst(), type_name, "boost::math::ibetac_inv", test_name); } template <class T> void test_beta(T, const char* name) { (void)name; // // The actual test data is rather verbose, so it's in a separate file // // The contents are as follows, each row of data contains // five items, input value a, input value b, integration limits x, beta(a, b, x) and ibeta(a, b, x): // #if !defined(TEST_DATA) || (TEST_DATA == 1) # include "ibeta_small_data.ipp" test_inverses<T>(ibeta_small_data); #endif #if !defined(TEST_DATA) || (TEST_DATA == 2) # include "ibeta_data.ipp" test_inverses<T>(ibeta_data); #endif #if !defined(TEST_DATA) || (TEST_DATA == 3) # include "ibeta_large_data.ipp" test_inverses<T>(ibeta_large_data); #endif #if !defined(TEST_DATA) || (TEST_DATA == 4) # include "ibeta_inv_data.ipp" test_inverses2<T>(ibeta_inv_data, name, "Inverse incomplete beta"); #endif } template <class T> void test_spots(T) { // // basic sanity checks, tolerance is 100 epsilon expressed as a percentage: // T tolerance = boost::math::tools::epsilon<T>() * 10000; BOOST_CHECK_CLOSE( ::boost::math::ibeta_inv( static_cast<T>(1), static_cast<T>(2), static_cast<T>(0.5)), static_cast<T>(0.29289321881345247559915563789515096071516406231153L), tolerance); BOOST_CHECK_CLOSE( ::boost::math::ibeta_inv( static_cast<T>(3), static_cast<T>(0.5), static_cast<T>(0.5)), static_cast<T>(0.92096723292382700385142816696980724853063433975470L), tolerance); BOOST_CHECK_CLOSE( ::boost::math::ibeta_inv( static_cast<T>(20.125), static_cast<T>(0.5), static_cast<T>(0.5)), static_cast<T>(0.98862133312917003480022776106012775747685870929920L), tolerance); BOOST_CHECK_CLOSE( ::boost::math::ibeta_inv( static_cast<T>(40), static_cast<T>(80), static_cast<T>(0.5)), static_cast<T>(0.33240456430025026300937492802591128972548660643778L), tolerance); }
ryancoleman/autodock-vina
boost_1_54_0/libs/math/test/test_ibeta_inv.hpp
C++
apache-2.0
6,612
# Copyright (C) Mesosphere, Inc. See LICENSE file for details. import copy import logging import os import time import pytest import requests from generic_test_code.common import ( generic_correct_upstream_dest_test, generic_correct_upstream_request_test, generic_upstream_headers_verify_test, generic_verify_response_test, overridden_file_content, verify_header, ) from util import GuardedSubprocess, LineBufferFilter, SearchCriteria log = logging.getLogger(__name__) class TestServiceEndpoint: # Majority of /service endpoint tests are done with generic tests framework def test_if_accept_encoding_header_is_removed_from_upstream_request( self, master_ar_process_perclass, mocker, valid_user_header): headers = copy.deepcopy(valid_user_header) headers['Accept-Encoding'] = 'gzip' generic_upstream_headers_verify_test(master_ar_process_perclass, headers, '/service/scheduler-alwaysthere/foo/bar/', assert_headers_absent=["Accept-Encoding"], ) class TestAgentEndpoint: # Tests for /agent endpoint routing are done in test_cache.py def test_if_accept_encoding_header_is_removed_from_upstream_request( self, master_ar_process_perclass, mocker, valid_user_header): headers = copy.deepcopy(valid_user_header) headers['Accept-Encoding'] = 'gzip' generic_upstream_headers_verify_test(master_ar_process_perclass, headers, '/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S1/', assert_headers_absent=["Accept-Encoding"], ) class TestSystemAgentEndpoint: # Tests for /agent endpoint routing are done in test_cache.py def test_if_accept_encoding_header_is_removed_from_upstream_request( self, master_ar_process_perclass, mocker, valid_user_header): headers = copy.deepcopy(valid_user_header) headers['Accept-Encoding'] = 'gzip' generic_upstream_headers_verify_test( master_ar_process_perclass, headers, '/system/v1/agent/de1baf83-c36c-4d23-9cb0-f89f596cd6ab-S0/logs', assert_headers_absent=["Accept-Encoding"], ) class TestHistoryServiceRouting: def test_if_invalid_cache_case_is_handled( self, nginx_class, valid_user_header, dns_server_mock): ar = nginx_class() url = ar.make_url_from_path('/dcos-history-service/foo/bar') with GuardedSubprocess(ar): # Unfortunatelly there are upstreams that use `leader.mesos` and # removing this entry too early will result in Nginx failing to start. # So we need to do it right after nginx starts, but before first # cache update. time.sleep(1) dns_server_mock.remove_dns_entry('leader.mesos.') resp = requests.get(url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 503 assert 'cache is invalid' in resp.text def test_if_leader_is_unknown_state_is_handled( self, nginx_class, valid_user_header): ar = nginx_class(host_ip=None) url = ar.make_url_from_path('/dcos-history-service/foo/bar') with GuardedSubprocess(ar): resp = requests.get(url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 503 assert 'mesos leader is unknown' in resp.text def test_if_leader_is_local_state_is_handled( self, nginx_class, valid_user_header): ar = nginx_class() path_sent = '/dcos-history-service/foo/bar?a1=GET+param&a2=foobarism' path_expected = '/foo/bar?a1=GET+param&a2=foobarism' with GuardedSubprocess(ar): generic_correct_upstream_dest_test( ar, valid_user_header, path_sent, "http://127.0.0.1:15055") generic_correct_upstream_request_test( ar, valid_user_header, path_sent, path_expected) generic_upstream_headers_verify_test( ar, valid_user_header, path_sent) def test_if_leader_is_nonlocal_state_is_handled( self, nginx_class, valid_user_header, dns_server_mock): ar = nginx_class() path_sent = '/dcos-history-service/foo/bar?a1=GET+param&a2=foobarism' path_expected = '/dcos-history-service/foo/bar?a1=GET+param&a2=foobarism' dns_server_mock.set_dns_entry('leader.mesos.', ip='127.0.0.3') with GuardedSubprocess(ar): generic_correct_upstream_dest_test( ar, valid_user_header, path_sent, "http://127.0.0.3:80") generic_correct_upstream_request_test( ar, valid_user_header, path_sent, path_expected) generic_upstream_headers_verify_test( ar, valid_user_header, path_sent, assert_headers={"DCOS-Forwarded": "true"}) def test_if_proxy_loop_is_handled( self, nginx_class, valid_user_header, dns_server_mock): ar = nginx_class() url = ar.make_url_from_path('/dcos-history-service/foo/bar') dns_server_mock.set_dns_entry('leader.mesos.', ip='127.0.0.3') h = valid_user_header h.update({"DCOS-Forwarded": "true"}) with GuardedSubprocess(ar): resp = requests.get(url, allow_redirects=False, headers=h) assert resp.status_code == 503 assert 'mesos leader is unknown' in resp.text class TestMetadata: @pytest.mark.parametrize("public_ip", ['1.2.3.4', "10.20.20.30"]) def test_if_public_ip_detection_works( self, master_ar_process_perclass, valid_user_header, public_ip): url = master_ar_process_perclass.make_url_from_path('/metadata') with overridden_file_content( '/usr/local/detect_ip_public_data.txt', "return ip {}".format(public_ip)): resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert resp_data['PUBLIC_IPV4'] == public_ip def test_if_clusterid_is_returned( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert resp_data['CLUSTER_ID'] == 'fdb1d7c0-06cf-4d65-bb9b-a8920bb854ef' with overridden_file_content( '/var/lib/dcos/cluster-id', "fd21689b-4fe2-4779-8c30-9125149eef11"): resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert resp_data['CLUSTER_ID'] == "fd21689b-4fe2-4779-8c30-9125149eef11" def test_if_missing_clusterid_file_is_handled( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') with overridden_file_content('/var/lib/dcos/cluster-id'): os.unlink('/var/lib/dcos/cluster-id') resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert 'CLUSTER_ID' not in resp_data def test_if_public_ip_detect_script_failue_is_handled( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') filter_regexp = { 'Traceback \(most recent call last\):': SearchCriteria(1, True), ("FileNotFoundError: \[Errno 2\] No such file or directory:" " '/usr/local/detect_ip_public_data.txt'"): SearchCriteria(1, True), } lbf = LineBufferFilter(filter_regexp, line_buffer=master_ar_process_perclass.stderr_line_buffer) with lbf, overridden_file_content('/usr/local/detect_ip_public_data.txt'): os.unlink('/usr/local/detect_ip_public_data.txt') resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 assert lbf.extra_matches == {} resp_data = resp.json() assert resp_data['PUBLIC_IPV4'] == "127.0.0.1" @pytest.mark.xfail(reason="Needs some refactoring, tracked in DCOS_OSS-1007") def test_if_public_ip_detect_script_execution_is_timed_out( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') ts_start = time.time() with overridden_file_content('/usr/local/detect_ip_public_data.txt', "timeout 10"): requests.get( url, allow_redirects=False, headers=valid_user_header) ts_total = time.time() - ts_start assert ts_total < 10 # TODO (prozlach): tune it a bit # assert resp.status_code == 200 # resp_data = resp.json() # assert resp_data['PUBLIC_IPV4'] == "127.0.0.1" @pytest.mark.xfail(reason="Needs some refactoring, tracked in DCOS_OSS-1007") def test_if_public_ip_detect_script_nonzero_exit_status_is_handled( self, master_ar_process_perclass, valid_user_header): url = master_ar_process_perclass.make_url_from_path('/metadata') with overridden_file_content( '/usr/local/detect_ip_public_data.txt', "break with 1"): resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp_data = resp.json() assert resp_data['PUBLIC_IPV4'] == "127.0.0.1" class TestUiRoot: @pytest.mark.parametrize("uniq_content", ["(。◕‿‿◕。)", "plain text 1234"]) @pytest.mark.parametrize("path", ["plain-ui-testfile.html", "nest1/nested-ui-testfile.html"]) def test_if_ui_files_are_handled( self, master_ar_process_perclass, valid_user_header, uniq_content, path): url = master_ar_process_perclass.make_url_from_path('/{}'.format(path)) with overridden_file_content( '/opt/mesosphere/active/dcos-ui/usr/{}'.format(path), uniq_content): resp = requests.get( url, allow_redirects=False, headers=valid_user_header) assert resp.status_code == 200 resp.encoding = 'utf-8' assert resp.text == uniq_content verify_header(resp.headers.items(), 'X-Frame-Options', 'DENY') class TestMisc: @pytest.mark.parametrize("content", ["{'data': '1234'}", "{'data': 'abcd'}"]) def test_if_buildinfo_is_served( self, master_ar_process_perclass, valid_user_header, content): url = master_ar_process_perclass.make_url_from_path( '/pkgpanda/active.buildinfo.full.json') with overridden_file_content( '/opt/mesosphere/active.buildinfo.full.json', content): resp = requests.get( url, allow_redirects=False, headers=valid_user_header ) assert resp.status_code == 200 assert resp.text == content @pytest.mark.parametrize("content", ["{'data': '1234'}", "{'data': 'abcd'}"]) def test_if_dcos_metadata_is_served( self, master_ar_process_perclass, valid_user_header, content): url = master_ar_process_perclass.make_url_from_path( '/dcos-metadata/dcos-version.json') with overridden_file_content( '/opt/mesosphere/active/dcos-metadata/etc/dcos-version.json', content): resp = requests.get( url, allow_redirects=False, headers=valid_user_header ) assert resp.status_code == 200 assert resp.text == content def test_if_xaccel_header_is_passed_to_client_by_ar( self, master_ar_process_perclass, valid_user_header, mocker): accel_buff_header = {"X-Accel-Buffering": "TEST"} mocker.send_command( endpoint_id='http:///run/dcos/dcos-log.sock', func_name='set_response_headers', aux_data=accel_buff_header, ) generic_verify_response_test( master_ar_process_perclass, valid_user_header, '/system/v1/logs/foo/bar', assert_headers=accel_buff_header)
surdy/dcos
packages/adminrouter/extra/src/test-harness/tests/test_master.py
Python
apache-2.0
13,807
# encoding: UTF-8 require 'simplecov' require 'coveralls' SimpleCov.formatter = if ENV['CI'] Coveralls::SimpleCov::Formatter else SimpleCov::Formatter::HTMLFormatter end SimpleCov.start
sbagdadi-gpsw/custom-prometheus-client
spec/spec_helper.rb
Ruby
apache-2.0
203
<?php require_once __DIR__ . "/http_server.php"; /* class swoole_http_server extends swoole_server { public function on($name, $cb) {} // 与 tcp server 的on接受的eventname 不同 } class swoole_http_response { public function cookie() {} public function rawcookie() {} public function status() {} public function gzip() {} public function header() {} public function write() {} public function end() {} public function sendfile() {} } class swoole_http_request { public function rawcontent() {} } */ $host = isset($argv[1]) ? $argv[1] : HTTP_SERVER_HOST; $port = isset($argv[2]) ? $argv[2] : HTTP_SERVER_PORT; (new HttpServer($host, $port, true))->start();
youzan/zan
php-test/apitest/swoole_http_server/simple_https_server.php
PHP
apache-2.0
704
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/rds/model/CreateDBParameterGroupRequest.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/memory/stl/AWSStringStream.h> using namespace Aws::RDS::Model; using namespace Aws::Utils; CreateDBParameterGroupRequest::CreateDBParameterGroupRequest() : m_dBParameterGroupNameHasBeenSet(false), m_dBParameterGroupFamilyHasBeenSet(false), m_descriptionHasBeenSet(false), m_tagsHasBeenSet(false) { } Aws::String CreateDBParameterGroupRequest::SerializePayload() const { Aws::StringStream ss; ss << "Action=CreateDBParameterGroup&"; if(m_dBParameterGroupNameHasBeenSet) { ss << "DBParameterGroupName=" << StringUtils::URLEncode(m_dBParameterGroupName.c_str()) << "&"; } if(m_dBParameterGroupFamilyHasBeenSet) { ss << "DBParameterGroupFamily=" << StringUtils::URLEncode(m_dBParameterGroupFamily.c_str()) << "&"; } if(m_descriptionHasBeenSet) { ss << "Description=" << StringUtils::URLEncode(m_description.c_str()) << "&"; } if(m_tagsHasBeenSet) { unsigned tagsCount = 1; for(auto& item : m_tags) { item.OutputToStream(ss, "Tags.member.", tagsCount, ""); tagsCount++; } } ss << "Version=2014-10-31"; return ss.str(); } void CreateDBParameterGroupRequest::DumpBodyToUrl(Aws::Http::URI& uri ) const { uri.SetQueryString(SerializePayload()); }
jt70471/aws-sdk-cpp
aws-cpp-sdk-rds/source/model/CreateDBParameterGroupRequest.cpp
C++
apache-2.0
1,483
package node import ( "bytes" "io/ioutil" "testing" "github.com/docker/cli/cli/internal/test" "github.com/docker/docker/api/types/swarm" "github.com/pkg/errors" // Import builders to get the builder function as package function . "github.com/docker/cli/cli/internal/test/builders" "github.com/docker/docker/pkg/testutil" "github.com/stretchr/testify/assert" ) func TestNodePromoteErrors(t *testing.T) { testCases := []struct { args []string nodeInspectFunc func() (swarm.Node, []byte, error) nodeUpdateFunc func(nodeID string, version swarm.Version, node swarm.NodeSpec) error expectedError string }{ { expectedError: "requires at least 1 argument", }, { args: []string{"nodeID"}, nodeInspectFunc: func() (swarm.Node, []byte, error) { return swarm.Node{}, []byte{}, errors.Errorf("error inspecting the node") }, expectedError: "error inspecting the node", }, { args: []string{"nodeID"}, nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { return errors.Errorf("error updating the node") }, expectedError: "error updating the node", }, } for _, tc := range testCases { buf := new(bytes.Buffer) cmd := newPromoteCommand( test.NewFakeCli(&fakeClient{ nodeInspectFunc: tc.nodeInspectFunc, nodeUpdateFunc: tc.nodeUpdateFunc, }, buf)) cmd.SetArgs(tc.args) cmd.SetOutput(ioutil.Discard) testutil.ErrorContains(t, cmd.Execute(), tc.expectedError) } } func TestNodePromoteNoChange(t *testing.T) { buf := new(bytes.Buffer) cmd := newPromoteCommand( test.NewFakeCli(&fakeClient{ nodeInspectFunc: func() (swarm.Node, []byte, error) { return *Node(Manager()), []byte{}, nil }, nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { if node.Role != swarm.NodeRoleManager { return errors.Errorf("expected role manager, got %s", node.Role) } return nil }, }, buf)) cmd.SetArgs([]string{"nodeID"}) assert.NoError(t, cmd.Execute()) } func TestNodePromoteMultipleNode(t *testing.T) { buf := new(bytes.Buffer) cmd := newPromoteCommand( test.NewFakeCli(&fakeClient{ nodeInspectFunc: func() (swarm.Node, []byte, error) { return *Node(), []byte{}, nil }, nodeUpdateFunc: func(nodeID string, version swarm.Version, node swarm.NodeSpec) error { if node.Role != swarm.NodeRoleManager { return errors.Errorf("expected role manager, got %s", node.Role) } return nil }, }, buf)) cmd.SetArgs([]string{"nodeID1", "nodeID2"}) assert.NoError(t, cmd.Execute()) }
kris-nova/klone
vendor/github.com/docker/cli/cli/command/node/promote_test.go
GO
apache-2.0
2,594
var __extends = this.__extends || function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; function __() { this.constructor = d; } __.prototype = b.prototype; d.prototype = new __(); }; var S18 = (function (_super) { __extends(S18, _super); function S18() { _super.apply(this, arguments); } return S18; })(S18); (new S18()).blah;
hippich/typescript
tests/baselines/reference/recursiveBaseCheck6.js
JavaScript
apache-2.0
400
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.collections4.iterators; import java.util.Iterator; import java.util.NoSuchElementException; import org.apache.commons.collections4.Predicate; /** * Decorates another {@link Iterator} using a predicate to filter elements. * <p> * This iterator decorates the underlying iterator, only allowing through * those elements that match the specified {@link Predicate Predicate}. * * @since 1.0 * @version $Id: FilterIterator.java 1477802 2013-04-30 20:01:28Z tn $ */ public class FilterIterator<E> implements Iterator<E> { /** The iterator being used */ private Iterator<? extends E> iterator; /** The predicate being used */ private Predicate<? super E> predicate; /** The next object in the iteration */ private E nextObject; /** Whether the next object has been calculated yet */ private boolean nextObjectSet = false; //----------------------------------------------------------------------- /** * Constructs a new <code>FilterIterator</code> that will not function * until {@link #setIterator(Iterator) setIterator} is invoked. */ public FilterIterator() { super(); } /** * Constructs a new <code>FilterIterator</code> that will not function * until {@link #setPredicate(Predicate) setPredicate} is invoked. * * @param iterator the iterator to use */ public FilterIterator(final Iterator<? extends E> iterator) { super(); this.iterator = iterator; } /** * Constructs a new <code>FilterIterator</code> that will use the * given iterator and predicate. * * @param iterator the iterator to use * @param predicate the predicate to use */ public FilterIterator(final Iterator<? extends E> iterator, final Predicate<? super E> predicate) { super(); this.iterator = iterator; this.predicate = predicate; } //----------------------------------------------------------------------- /** * Returns true if the underlying iterator contains an object that * matches the predicate. * * @return true if there is another object that matches the predicate * @throws NullPointerException if either the iterator or predicate are null */ public boolean hasNext() { return nextObjectSet || setNextObject(); } /** * Returns the next object that matches the predicate. * * @return the next object which matches the given predicate * @throws NullPointerException if either the iterator or predicate are null * @throws NoSuchElementException if there are no more elements that * match the predicate */ public E next() { if (!nextObjectSet) { if (!setNextObject()) { throw new NoSuchElementException(); } } nextObjectSet = false; return nextObject; } /** * Removes from the underlying collection of the base iterator the last * element returned by this iterator. * This method can only be called * if <code>next()</code> was called, but not after * <code>hasNext()</code>, because the <code>hasNext()</code> call * changes the base iterator. * * @throws IllegalStateException if <code>hasNext()</code> has already * been called. */ public void remove() { if (nextObjectSet) { throw new IllegalStateException("remove() cannot be called"); } iterator.remove(); } //----------------------------------------------------------------------- /** * Gets the iterator this iterator is using. * * @return the iterator */ public Iterator<? extends E> getIterator() { return iterator; } /** * Sets the iterator for this iterator to use. * If iteration has started, this effectively resets the iterator. * * @param iterator the iterator to use */ public void setIterator(final Iterator<? extends E> iterator) { this.iterator = iterator; nextObject = null; nextObjectSet = false; } //----------------------------------------------------------------------- /** * Gets the predicate this iterator is using. * * @return the predicate */ public Predicate<? super E> getPredicate() { return predicate; } /** * Sets the predicate this the iterator to use. * * @param predicate the predicate to use */ public void setPredicate(final Predicate<? super E> predicate) { this.predicate = predicate; nextObject = null; nextObjectSet = false; } //----------------------------------------------------------------------- /** * Set nextObject to the next object. If there are no more * objects then return false. Otherwise, return true. */ private boolean setNextObject() { while (iterator.hasNext()) { final E object = iterator.next(); if (predicate.evaluate(object)) { nextObject = object; nextObjectSet = true; return true; } } return false; } }
krivachy/compgs03_mutation_testing
src/main/java/org/apache/commons/collections4/iterators/FilterIterator.java
Java
apache-2.0
6,056
/* * Copyright 2000-2013 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.externalSystem.service.settings; import com.intellij.openapi.externalSystem.ExternalSystemManager; import com.intellij.openapi.externalSystem.model.ProjectSystemId; import com.intellij.openapi.externalSystem.settings.AbstractExternalSystemSettings; import com.intellij.openapi.externalSystem.settings.ExternalProjectSettings; import com.intellij.openapi.externalSystem.settings.ExternalSystemSettingsListener; import com.intellij.openapi.externalSystem.util.*; import com.intellij.openapi.fileChooser.FileChooserDescriptor; import com.intellij.openapi.options.ConfigurationException; import com.intellij.openapi.project.Project; import com.intellij.openapi.ui.TextComponentAccessor; import com.intellij.openapi.ui.TextFieldWithBrowseButton; import com.intellij.openapi.util.text.StringUtil; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import javax.swing.event.DocumentEvent; import javax.swing.event.DocumentListener; import java.awt.*; /** * A control which knows how to manage settings of external project being imported. * * @author Denis Zhdanov * @since 4/30/13 2:33 PM */ public abstract class AbstractImportFromExternalSystemControl< ProjectSettings extends ExternalProjectSettings, L extends ExternalSystemSettingsListener<ProjectSettings>, SystemSettings extends AbstractExternalSystemSettings<SystemSettings, ProjectSettings, L>> { @NotNull private final SystemSettings mySystemSettings; @NotNull private final ProjectSettings myProjectSettings; @NotNull private final PaintAwarePanel myComponent = new PaintAwarePanel(new GridBagLayout()); @NotNull private final TextFieldWithBrowseButton myLinkedProjectPathField = new TextFieldWithBrowseButton(); @NotNull private final ExternalSystemSettingsControl<ProjectSettings> myProjectSettingsControl; @NotNull private final ProjectSystemId myExternalSystemId; @Nullable private final ExternalSystemSettingsControl<SystemSettings> mySystemSettingsControl; @Nullable Project myCurrentProject; @SuppressWarnings("AbstractMethodCallInConstructor") protected AbstractImportFromExternalSystemControl(@NotNull ProjectSystemId externalSystemId, @NotNull SystemSettings systemSettings, @NotNull ProjectSettings projectSettings) { myExternalSystemId = externalSystemId; mySystemSettings = systemSettings; myProjectSettings = projectSettings; myProjectSettingsControl = createProjectSettingsControl(projectSettings); mySystemSettingsControl = createSystemSettingsControl(systemSettings); JLabel linkedProjectPathLabel = new JLabel(ExternalSystemBundle.message("settings.label.select.project", externalSystemId.getReadableName())); ExternalSystemManager<?, ?, ?, ?, ?> manager = ExternalSystemApiUtil.getManager(externalSystemId); assert manager != null; FileChooserDescriptor fileChooserDescriptor = manager.getExternalProjectDescriptor(); myLinkedProjectPathField.addBrowseFolderListener("", ExternalSystemBundle .message("settings.label.select.project", externalSystemId.getReadableName()), null, fileChooserDescriptor, TextComponentAccessor.TEXT_FIELD_WHOLE_TEXT, false); myLinkedProjectPathField.getTextField().getDocument().addDocumentListener(new DocumentListener() { @Override public void insertUpdate(DocumentEvent e) { onLinkedProjectPathChange(myLinkedProjectPathField.getText()); } @Override public void removeUpdate(DocumentEvent e) { onLinkedProjectPathChange(myLinkedProjectPathField.getText()); } @Override public void changedUpdate(DocumentEvent e) { onLinkedProjectPathChange(myLinkedProjectPathField.getText()); } }); myComponent.add(linkedProjectPathLabel, ExternalSystemUiUtil.getLabelConstraints(0)); myComponent.add(myLinkedProjectPathField, ExternalSystemUiUtil.getFillLineConstraints(0)); myProjectSettingsControl.fillUi(myComponent, 0); if (mySystemSettingsControl != null) { mySystemSettingsControl.fillUi(myComponent, 0); } ExternalSystemUiUtil.fillBottom(myComponent); } /** * This control is assumed to be used at least at two circumstances: * <pre> * <ul> * <li>new ide project is being created on the external project basis;</li> * <li>new ide module(s) is being added to the existing ide project on the external project basis;</li> * </ul> * </pre> * We need to differentiate these situations, for example, we don't want to allow linking an external project to existing ide * project if it's already linked. * <p/> * This property helps us to achieve that - when an ide project is defined, that means that new modules are being imported * to that ide project from external project; when this property is <code>null</code> that means that new ide project is being * created on the target external project basis. * * @param currentProject current ide project (if any) */ public void setCurrentProject(@Nullable Project currentProject) { myCurrentProject = currentProject; } protected abstract void onLinkedProjectPathChange(@NotNull String path); /** * Creates a control for managing given project settings. * * @param settings target external project settings * @return control for managing given project settings */ @NotNull protected abstract ExternalSystemSettingsControl<ProjectSettings> createProjectSettingsControl(@NotNull ProjectSettings settings); /** * Creates a control for managing given system-level settings (if any). * * @param settings target system settings * @return a control for managing given system-level settings; * <code>null</code> if current external system doesn't have system-level settings (only project-level settings) */ @Nullable protected abstract ExternalSystemSettingsControl<SystemSettings> createSystemSettingsControl(@NotNull SystemSettings settings); @NotNull public JComponent getComponent() { return myComponent; } @NotNull public ExternalSystemSettingsControl<ProjectSettings> getProjectSettingsControl() { return myProjectSettingsControl; } public void setLinkedProjectPath(@NotNull String path) { myProjectSettings.setExternalProjectPath(path); myLinkedProjectPathField.setText(path); } @NotNull public SystemSettings getSystemSettings() { return mySystemSettings; } @NotNull public ProjectSettings getProjectSettings() { return myProjectSettings; } public void reset() { myLinkedProjectPathField.setText(""); myProjectSettingsControl.reset(); if (mySystemSettingsControl != null) { mySystemSettingsControl.reset(); } } public void apply() throws ConfigurationException { String linkedProjectPath = myLinkedProjectPathField.getText(); if (StringUtil.isEmpty(linkedProjectPath)) { throw new ConfigurationException(ExternalSystemBundle.message("error.project.undefined")); } else if (myCurrentProject != null) { ExternalSystemManager<?, ?, ?, ?, ?> manager = ExternalSystemApiUtil.getManager(myExternalSystemId); assert manager != null; AbstractExternalSystemSettings<?, ?,?> settings = manager.getSettingsProvider().fun(myCurrentProject); if (settings.getLinkedProjectSettings(linkedProjectPath) != null) { throw new ConfigurationException(ExternalSystemBundle.message("error.project.already.registered")); } } //noinspection ConstantConditions myProjectSettings.setExternalProjectPath(ExternalSystemApiUtil.normalizePath(linkedProjectPath)); myProjectSettingsControl.validate(myProjectSettings); myProjectSettingsControl.apply(myProjectSettings); if (mySystemSettingsControl != null) { mySystemSettingsControl.validate(mySystemSettings); mySystemSettingsControl.apply(mySystemSettings); } } }
romankagan/DDBWorkbench
platform/external-system-impl/src/com/intellij/openapi/externalSystem/service/settings/AbstractImportFromExternalSystemControl.java
Java
apache-2.0
9,041
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2005-2019, NumPy Developers. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of the NumPy Developers nor the names of any * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*! * \file np_einsum_op.cc * \brief CPU Implementation of numpy-compatible einsum */ #include "./np_einsum_op-inl.h" #include <cstdlib> #include <cstring> namespace mxnet { namespace op { inline std::vector<std::string> _parse_einsum_input(std::string subscripts, const mxnet::ShapeVector& shapes) { const std::string einsum_symbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; std::bitset<MAXAXIS> einsum_symbols_set; for (const char& c : einsum_symbols) { einsum_symbols_set.set(c); } CHECK_NE(shapes.size(), 0U) << "No input operands"; auto end_pos = std::remove(subscripts.begin(), subscripts.end(), ' '); subscripts.erase(end_pos, subscripts.end()); // Ensure all characters are valid for (const char& c : subscripts) { if (c == '.' || c == ',' || c == '-' || c == '>') { continue; } CHECK(einsum_symbols_set.test(c)) << "Character " << c << " is not a valid symbol."; } // Check for proper "->" if (subscripts.find('-') != std::string::npos || subscripts.find('>') != std::string::npos) { bool invalid = (std::count(subscripts.begin(), subscripts.end(), '-') > 1 || std::count(subscripts.begin(), subscripts.end(), '>') > 1); CHECK(!invalid && _count_substring(subscripts, "->") == 1) << "Subscripts can only contain one '->'."; } // Parse ellipses if (subscripts.find('.') != std::string::npos) { std::string used = subscripts; used.erase(std::remove_if(used.begin(), used.end(), [](const char& c){return c == '.' || c == ',' || c == '-' || c == '>';}), used.end()); std::bitset<MAXAXIS> used_set = str2set(used); std::string ellipse_inds = ""; for (const char& c : einsum_symbols) { if (!used_set.test(static_cast<int>(c))) { ellipse_inds.append(1, c); } } int longest = 0; std::string input_tmp, output_sub; std::vector<std::string> split_subscripts; bool out_sub; if (subscripts.find("->") != std::string::npos) { std::vector<std::string> tmp = split(subscripts, "->"); input_tmp = tmp[0]; output_sub = tmp[1]; split_subscripts = split(input_tmp, ","); out_sub = true; } else { split_subscripts = split(subscripts, ","); out_sub = false; } size_t size_split_subscripts = split_subscripts.size(); subscripts = ""; for (size_t i = 0; i < size_split_subscripts; ++i) { const std::string& sub = split_subscripts[i]; if (sub.find('.') != std::string::npos) { CHECK_EQ(std::count(sub.begin(), sub.end(), '.'), 3) << "Invalid Ellipses"; CHECK_EQ(_count_substring(sub, "..."), 1) << "Invalid Ellipses"; // Take into account numerical values int ellipse_count = 0; if (shapes[i].ndim() == 0) { ellipse_count = 0; } else { ellipse_count = std::max(shapes[i].ndim(), 1); ellipse_count -= sub.length() - 3; } if (ellipse_count > longest) { longest = ellipse_count; } CHECK_GE(ellipse_count, 0) << "Ellipses lengths do not match."; if (ellipse_count == 0) { split_subscripts[i].erase(sub.find("..."), 3); } else { std::string rep_inds = ellipse_inds.substr(ellipse_inds.length() - ellipse_count); split_subscripts[i].replace(sub.find("..."), 3, rep_inds); } } subscripts += split_subscripts[i]; if (i + 1 < size_split_subscripts) { subscripts += ","; } } std::string out_ellipse; if (longest == 0) { out_ellipse = ""; } else { out_ellipse = ellipse_inds.substr(ellipse_inds.length() - longest); } if (out_sub) { output_sub.replace(output_sub.find("..."), 3, out_ellipse); subscripts += "->" + output_sub; } else { // Special care for outputless ellipses std::bitset<MAXAXIS> out_ellipse_set = str2set(out_ellipse); std::string tmp_subscripts = subscripts, output_subscript = ""; size_t len_tmp_subscripts = tmp_subscripts.length(); std::sort(tmp_subscripts.begin(), tmp_subscripts.end()); for (size_t i = 0; i < len_tmp_subscripts; ++i) { const char& c = tmp_subscripts[i]; if (c == ',') { continue; } CHECK(einsum_symbols_set.test(c)) << "Character " << c << " is not a valid symbol."; if ((i == 0 || tmp_subscripts[i - 1] != c) && (i == len_tmp_subscripts - 1 || tmp_subscripts[i + 1] != c) && !out_ellipse_set.test(c)) { output_subscript.append(1, c); } } subscripts += "->" + out_ellipse + output_subscript; } } // Build output string if does not exist std::vector<std::string> ret(2); if (subscripts.find("->") != std::string::npos) { ret = split(subscripts, "->"); } else { ret[0] = subscripts; ret[1] = ""; // Build output subscripts std::string tmp_subscripts = subscripts; size_t len_tmp_subscripts = tmp_subscripts.length(); std::sort(tmp_subscripts.begin(), tmp_subscripts.end()); for (size_t i = 0; i < len_tmp_subscripts; ++i) { const char& c = tmp_subscripts[i]; if (c == ',') { continue; } CHECK(einsum_symbols_set.test(c)) << "Character " << c << " is not a valid symbol."; if ((i == 0 || tmp_subscripts[i - 1] != c) && (i == len_tmp_subscripts - 1 || tmp_subscripts[i + 1] != c)) { ret[1].append(1, c); } } } // Make sure output subscripts are in the input std::bitset<MAXAXIS> input_subscripts_set = str2set(ret[0]); for (const char& c : ret[1]) { CHECK(input_subscripts_set.test(c)) << "Output character " << c << " did not appear in the input"; } // Make sure number operands is equivalent to the number of terms CHECK_EQ(std::count(ret[0].begin(), ret[0].end(), ',') + 1, shapes.size()) << "Number of einsum subscripts must be equal to the " << "number of operands."; return ret; } bool NumpyEinsumShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const NumpyEinsumParam &param = nnvm::get<NumpyEinsumParam>(attrs.parsed); const std::string& subscripts = param.subscripts; int num_args = param.num_args; CHECK_EQ(in_attrs->size(), num_args); CHECK_EQ(out_attrs->size(), 1U); for (int i = 0; i < num_args; i++) { if (!shape_is_known(in_attrs->at(i))) { return false; } } // Parsing std::vector<std::string> parsed_subscripts = _parse_einsum_input(subscripts, *in_attrs); // Build a few useful list and sets std::vector<std::string> input_list = split(parsed_subscripts[0], ","); size_t isize = input_list.size(); // Get length of each unique dimension and ensure all dimensions are correct dim_t dimension_dict[MAXAXIS]; memset(dimension_dict, -1, sizeof(dimension_dict)); for (size_t i = 0; i < isize; ++i) { const std::string& term = input_list[i]; const TShape& sh = in_attrs->at(i); CHECK_EQ(sh.ndim(), term.length()) << "Einstein sum subscript " << input_list[i] << " does not contain the " << "correct number of indices for operand " << i << "."; size_t len_term = term.length(); for (size_t j = 0; j < len_term; ++j) { dim_t dim = sh[j]; const char& c = term[j]; if (dimension_dict[static_cast<int>(c)] != -1) { // For broadcasting cases we always want the largest dim size if (dimension_dict[static_cast<int>(c)] == 1) { dimension_dict[static_cast<int>(c)] = dim; } CHECK(dim == 1 || dim == dimension_dict[static_cast<int>(c)]) << "Size of label '" << c << "' for operand " << i << " (" << dimension_dict[static_cast<int>(c)] << ") does not match previous terms (" << dim << ")."; } else { dimension_dict[static_cast<int>(c)] = dim; } } } // Get oshape const std::string& output_str = parsed_subscripts[1]; size_t odim = output_str.size(); TShape oshape(odim, -1); for (size_t i = 0; i < odim; ++i) { oshape[i] = dimension_dict[static_cast<int>(output_str[i])]; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, oshape); size_t lim = static_cast<size_t>(std::numeric_limits<index_t>::max()); for (int i = 0; i < num_args; ++i) { CHECK_LE(in_attrs->at(i).Size(), lim) << "Size of operand " << i << " exceeds the maximum index." << " Try setting `USE_INT64_TENSOR_SIZE`."; } CHECK_LE(oshape.Size(), lim) << "Size of output" << " exceeds the maximum index." << " Try setting `USE_INT64_TENSOR_SIZE`."; return shape_is_known(oshape); } OpStatePtr CreateEinsumState(const NodeAttrs& attrs, Context ctx, const mxnet::ShapeVector& in_shapes, const std::vector<int>& in_types) { const NumpyEinsumParam& param = dmlc::get<NumpyEinsumParam>(attrs.parsed); return OpStatePtr::Create<EinsumOp>(param.num_args, param.optimize, param.subscripts); } DMLC_REGISTER_PARAMETER(NumpyEinsumParam); NNVM_REGISTER_OP(_npi_einsum) .describe(R"doc()doc" ADD_FILELINE) .set_attr_parser(ParamParser<NumpyEinsumParam>) .set_num_inputs([](const nnvm::NodeAttrs& attrs) { const NumpyEinsumParam& param = dmlc::get<NumpyEinsumParam>(attrs.parsed); return static_cast<uint32_t>(param.num_args); }) .set_num_outputs(1) .set_attr<std::string>("key_var_num_args", "num_args") .set_attr<nnvm::FListInputNames>("FListInputNames", [](const nnvm::NodeAttrs& attrs) { int num_args = dmlc::get<NumpyEinsumParam>(attrs.parsed).num_args; std::vector<std::string> ret; ret.reserve(num_args); for (int i = 0; i < num_args; i++) { ret.push_back(std::string("arg") + std::to_string(i)); } return ret; }) .set_attr<mxnet::FInferShape>("FInferShape", NumpyEinsumShape) .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<-1, 1>) .set_attr<FCreateOpState>("FCreateOpState", CreateEinsumState) .set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs) { return std::vector<ResourceRequest>(1, ResourceRequest::kTempSpace); }) .set_attr<FStatefulCompute>("FStatefulCompute<cpu>", NumpyEinsumForward<cpu>) .set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseIn{"_backward_npi_einsum"}) .add_argument("data", "NDArray-or-Symbol[]", "List of eimsum operands") .add_arguments(NumpyEinsumParam::__FIELDS__()); NNVM_REGISTER_OP(_backward_npi_einsum) .set_attr_parser(ParamParser<NumpyEinsumParam>) .set_num_inputs([](const nnvm::NodeAttrs& attrs) { const NumpyEinsumParam& param = dmlc::get<NumpyEinsumParam>(attrs.parsed); return static_cast<uint32_t>(param.num_args + 1); }) .set_num_outputs([](const nnvm::NodeAttrs& attrs) { const NumpyEinsumParam& param = dmlc::get<NumpyEinsumParam>(attrs.parsed); return static_cast<uint32_t>(param.num_args); }) .set_attr<bool>("TIsLayerOpBackward", true) .set_attr<nnvm::TIsBackward>("TIsBackward", true) .set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& attrs) { return std::vector<ResourceRequest>(1, ResourceRequest::kTempSpace); }) .set_attr<FStatefulCompute>("FStatefulCompute<cpu>", NumpyEinsumBackward<cpu>); } // namespace op } // namespace mxnet
yajiedesign/mxnet
src/operator/numpy/np_einsum_op.cc
C++
apache-2.0
14,145
/* * Copyright 2014 The Closure Compiler Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.javascript.jscomp; import static com.google.common.base.MoreObjects.firstNonNull; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.javascript.jscomp.parsing.TypeTransformationParser; import com.google.javascript.jscomp.parsing.TypeTransformationParser.Keywords; import com.google.javascript.rhino.JSDocInfo; import com.google.javascript.rhino.JSTypeExpression; import com.google.javascript.rhino.Node; import com.google.javascript.rhino.jstype.JSType; import com.google.javascript.rhino.jstype.JSTypeNative; import com.google.javascript.rhino.jstype.JSTypeRegistry; import com.google.javascript.rhino.jstype.ObjectType; import com.google.javascript.rhino.jstype.StaticTypedScope; import com.google.javascript.rhino.jstype.StaticTypedSlot; import java.util.Arrays; import java.util.Collection; import java.util.LinkedHashMap; import java.util.Map; /** * A class for processing type transformation expressions * * @author lpino@google.com (Luis Fernando Pino Duque) */ class TypeTransformation { private static final String VIRTUAL_FILE = "<TypeTransformation.java>"; static final DiagnosticType UNKNOWN_TYPEVAR = DiagnosticType.warning("TYPEVAR_UNDEFINED", "Reference to an unknown type variable {0}"); static final DiagnosticType UNKNOWN_STRVAR = DiagnosticType.warning("UNKNOWN_STRVAR", "Reference to an unknown string variable {0}"); static final DiagnosticType UNKNOWN_TYPENAME = DiagnosticType.warning("TYPENAME_UNDEFINED", "Reference to an unknown type name {0}"); static final DiagnosticType BASETYPE_INVALID = DiagnosticType.warning("BASETYPE_INVALID", "The type {0} cannot be templatized"); static final DiagnosticType TEMPTYPE_INVALID = DiagnosticType.warning("TEMPTYPE_INVALID", "Expected templatized type in {0} found {1}"); static final DiagnosticType INDEX_OUTOFBOUNDS = DiagnosticType.warning("INDEX_OUTOFBOUNDS", "Index out of bounds in templateTypeOf: expected a number less than {0}, found {1}"); static final DiagnosticType DUPLICATE_VARIABLE = DiagnosticType.warning("DUPLICATE_VARIABLE", "The variable {0} is already defined"); // This warning is never exercised. static final DiagnosticType UNKNOWN_NAMEVAR = DiagnosticType.warning("UNKNOWN_NAMEVAR", "Reference to an unknown name variable {0}"); static final DiagnosticType RECTYPE_INVALID = DiagnosticType.warning("RECTYPE_INVALID", "The first parameter of a maprecord must be a record type, " + "found {0}"); static final DiagnosticType MAPRECORD_BODY_INVALID = DiagnosticType.warning("MAPRECORD_BODY_INVALID", "The body of a maprecord function must evaluate to a record type or " + "a no type, found {0}"); static final DiagnosticType VAR_UNDEFINED = DiagnosticType.warning("VAR_UNDEFINED", "Variable {0} is undefined in the scope"); static final DiagnosticType INVALID_CTOR = DiagnosticType.warning("INVALID_CTOR", "Expected a constructor type, found {0}"); static final DiagnosticType RECPARAM_INVALID = DiagnosticType.warning("RECPARAM_INVALID", "Expected a record type, found {0}"); static final DiagnosticType PROPTYPE_INVALID = DiagnosticType.warning("PROPTYPE_INVALID", "Expected object type, found {0}"); private final AbstractCompiler compiler; private final JSTypeRegistry registry; private final StaticTypedScope typeEnv; /** * A helper class for holding the information about the type variables * and the name variables in maprecord expressions */ private static class NameResolver { ImmutableMap<String, JSType> typeVars; ImmutableMap<String, String> nameVars; NameResolver(ImmutableMap<String, JSType> typeVars, ImmutableMap<String, String> nameVars) { this.typeVars = typeVars; this.nameVars = nameVars; } } @SuppressWarnings("unchecked") TypeTransformation(AbstractCompiler compiler, StaticTypedScope typeEnv) { this.compiler = compiler; this.registry = compiler.getTypeRegistry(); this.typeEnv = typeEnv; } private boolean isTypeVar(Node n) { return n.isName(); } private boolean isTypeName(Node n) { return n.isString(); } private boolean isBooleanOperation(Node n) { return n.isAnd() || n.isOr() || n.isNot(); } private Keywords nameToKeyword(String s) { return TypeTransformationParser.Keywords.valueOf(s.toUpperCase()); } private JSType getType(String typeName) { JSType type = registry.getType(typeEnv, typeName); if (type != null) { return type; } StaticTypedSlot slot = typeEnv.getSlot(typeName); type = slot != null ? slot.getType() : null; if (type != null) { if (type.isConstructor() || type.isInterface()) { return type.toMaybeFunctionType().getInstanceType().getRawType(); } if (type.isEnumElementType()) { return type.getEnumeratedTypeOfEnumElement(); } return type; } JSDocInfo jsdoc = slot == null ? null : slot.getJSDocInfo(); if (jsdoc != null && jsdoc.hasTypedefType()) { return this.registry.evaluateTypeExpression(jsdoc.getTypedefType(), typeEnv); } return null; } private JSType getUnknownType() { return registry.getNativeObjectType(JSTypeNative.UNKNOWN_TYPE); } private JSType getNoType() { return registry.getNativeObjectType(JSTypeNative.NO_TYPE); } private JSType getAllType() { return registry.getNativeType(JSTypeNative.ALL_TYPE); } private JSType getObjectType() { return registry.getNativeType(JSTypeNative.OBJECT_TYPE); } private JSType createUnionType(JSType[] variants) { return registry.createUnionType(Arrays.asList(variants)); } private JSType createTemplatizedType(ObjectType baseType, JSType[] params) { return registry.instantiateGenericType(baseType, ImmutableList.copyOf(params)); } private JSType createRecordType(ImmutableMap<String, JSType> props) { return this.registry.createRecordType(props); } private void reportWarning(Node n, DiagnosticType msg, String... param) { compiler.report(JSError.make(n, msg, param)); } private <T> ImmutableMap<String, T> addNewEntry( ImmutableMap<String, T> map, String name, T type) { return new ImmutableMap.Builder<String, T>() .putAll(map) .put(name, type) .build(); } private String getFunctionParameter(Node n, int i) { Preconditions.checkArgument(n.isFunction(), "Expected a function node, found %s", n); return n.getSecondChild().getChildAtIndex(i).getString(); } private String getCallName(Node n) { Preconditions.checkArgument(n.isCall(), "Expected a call node, found %s", n); return n.getFirstChild().getString(); } private Node getCallArgument(Node n, int i) { Preconditions.checkArgument(n.isCall(), "Expected a call node, found %s", n); return n.getChildAtIndex(i + 1); } private int getCallParamCount(Node n) { Preconditions.checkArgument(n.isCall(), "Expected a call node, found %s", n); return n.getChildCount() - 1; } // TODO(dimvar): rewrite the uses of this method to use siblings() and delete it. // Copying is unnecessarily inefficient. private ImmutableList<Node> getCallParams(Node n) { Preconditions.checkArgument(n.isCall(), "Expected a call node, found %s", n); ImmutableList.Builder<Node> builder = new ImmutableList.Builder<>(); for (int i = 0; i < getCallParamCount(n); i++) { builder.add(getCallArgument(n, i)); } return builder.build(); } private Node getComputedPropValue(Node n) { Preconditions.checkArgument( n.isComputedProp(), "Expected a computed property node, found %s", n); return n.getSecondChild(); } private String getComputedPropName(Node n) { Preconditions.checkArgument( n.isComputedProp(), "Expected a computed property node, found %s", n); return n.getFirstChild().getString(); } /** Evaluates the type transformation expression and returns the resulting type. * * @param ttlAst The node representing the type transformation expression * @param typeVars The environment containing the information about the type variables * @return JSType The resulting type after the transformation */ JSType eval(Node ttlAst, ImmutableMap<String, JSType> typeVars) { return eval(ttlAst, typeVars, ImmutableMap.of()); } /** Evaluates the type transformation expression and returns the resulting type. * * @param ttlAst The node representing the type transformation expression * @param typeVars The environment containing the information about the type variables * @param nameVars The environment containing the information about the name variables * @return JSType The resulting type after the transformation */ @SuppressWarnings("unchecked") @VisibleForTesting JSType eval(Node ttlAst, ImmutableMap<String, JSType> typeVars, ImmutableMap<String, String> nameVars) { JSType result = evalInternal(ttlAst, new NameResolver(typeVars, nameVars)); return result.isEmptyType() ? getUnknownType() : result; } private JSType evalInternal(Node ttlAst, NameResolver nameResolver) { if (isTypeName(ttlAst)) { return evalTypeName(ttlAst); } if (isTypeVar(ttlAst)) { return evalTypeVar(ttlAst, nameResolver); } String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword.kind) { case TYPE_CONSTRUCTOR: return evalTypeExpression(ttlAst, nameResolver); case OPERATION: return evalOperationExpression(ttlAst, nameResolver); default: throw new IllegalStateException( "Could not evaluate the type transformation expression"); } } private JSType evalOperationExpression(Node ttlAst, NameResolver nameResolver) { String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword) { case COND: return evalConditional(ttlAst, nameResolver); case MAPUNION: return evalMapunion(ttlAst, nameResolver); case MAPRECORD: return evalMaprecord(ttlAst, nameResolver); case TYPEOFVAR: return evalTypeOfVar(ttlAst); case INSTANCEOF: return evalInstanceOf(ttlAst, nameResolver); case PRINTTYPE: return evalPrintType(ttlAst, nameResolver); case PROPTYPE: return evalPropType(ttlAst, nameResolver); default: throw new IllegalStateException("Invalid type transformation operation"); } } private JSType evalTypeExpression(Node ttlAst, NameResolver nameResolver) { String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword) { case TYPE: return evalTemplatizedType(ttlAst, nameResolver); case UNION: return evalUnionType(ttlAst, nameResolver); case NONE: return getNoType(); case ALL: return getAllType(); case UNKNOWN: return getUnknownType(); case RAWTYPEOF: return evalRawTypeOf(ttlAst, nameResolver); case TEMPLATETYPEOF: return evalTemplateTypeOf(ttlAst, nameResolver); case RECORD: return evalRecordType(ttlAst, nameResolver); case TYPEEXPR: return evalNativeTypeExpr(ttlAst); default: throw new IllegalStateException("Invalid type expression"); } } private JSType evalTypeName(Node ttlAst) { String typeName = ttlAst.getString(); JSType resultingType = getType(typeName); // If the type name is not defined then return UNKNOWN and report a warning if (resultingType == null) { reportWarning(ttlAst, UNKNOWN_TYPENAME, typeName); return getUnknownType(); } return resultingType; } private JSType evalTemplatizedType(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); JSType firstParam = evalInternal(params.get(0), nameResolver); if (firstParam.isFullyInstantiated()) { reportWarning(ttlAst, BASETYPE_INVALID, firstParam.toString()); return getUnknownType(); } // TODO(lpino): Check that the number of parameters correspond with the // number of template types that the base type can take when creating // a templatized type. For instance, if the base type is Array then there // must be just one parameter. JSType[] templatizedTypes = new JSType[params.size() - 1]; for (int i = 0; i < templatizedTypes.length; i++) { templatizedTypes[i] = evalInternal(params.get(i + 1), nameResolver); } ObjectType baseType = firstParam.toMaybeObjectType(); return createTemplatizedType(baseType, templatizedTypes); } private JSType evalTypeVar(Node ttlAst, NameResolver nameResolver) { String typeVar = ttlAst.getString(); JSType resultingType = nameResolver.typeVars.get(typeVar); // If the type variable is not defined then return UNKNOWN and report a warning if (resultingType == null) { reportWarning(ttlAst, UNKNOWN_TYPEVAR, typeVar); return getUnknownType(); } return resultingType; } private JSType evalUnionType(Node ttlAst, NameResolver nameResolver) { // Get the parameters of the union ImmutableList<Node> params = getCallParams(ttlAst); int paramCount = params.size(); // Create an array of types after evaluating each parameter JSType[] basicTypes = new JSType[paramCount]; for (int i = 0; i < paramCount; i++) { basicTypes[i] = evalInternal(params.get(i), nameResolver); } return createUnionType(basicTypes); } private JSType[] evalTypeParams(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); int paramCount = params.size(); JSType[] result = new JSType[paramCount]; for (int i = 0; i < paramCount; i++) { result[i] = evalInternal(params.get(i), nameResolver); } return result; } private String evalString(Node ttlAst, NameResolver nameResolver) { if (ttlAst.isName()) { // Return the empty string if the name variable cannot be resolved if (!nameResolver.nameVars.containsKey(ttlAst.getString())) { reportWarning(ttlAst, UNKNOWN_STRVAR, ttlAst.getString()); return ""; } return nameResolver.nameVars.get(ttlAst.getString()); } return ttlAst.getString(); } private String[] evalStringParams(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); int paramCount = params.size(); String[] result = new String[paramCount]; for (int i = 0; i < paramCount; i++) { result[i] = evalString(params.get(i), nameResolver); } return result; } private boolean evalTypePredicate(Node ttlAst, NameResolver nameResolver) { JSType[] params = evalTypeParams(ttlAst, nameResolver); String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); JSType type = params[0]; switch (keyword) { case EQ: return type.isEquivalentTo(params[1]); case SUB: return type.isSubtypeOf(params[1]); case ISCTOR: return type.isConstructor(); case ISTEMPLATIZED: return type.isObjectType() && type.toMaybeObjectType().isGenericObjectType() && type.isPartiallyInstantiated(); case ISRECORD: return type.isRecordType(); case ISUNKNOWN: return type.isSomeUnknownType(); default: throw new IllegalStateException( "Invalid type predicate in the type transformation"); } } private boolean evalStringPredicate(Node ttlAst, NameResolver nameResolver) { String[] params = evalStringParams(ttlAst, nameResolver); // If any of the parameters evaluates to the empty string then they were // not resolved by the name resolver. In this case we always return false. for (int i = 0; i < params.length; i++) { if (params[i].isEmpty()) { return false; } } String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword) { case STREQ: return params[0].equals(params[1]); default: throw new IllegalStateException( "Invalid string predicate in the type transformation"); } } private boolean evalTypevarPredicate(Node ttlAst, NameResolver nameResolver) { String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword) { case ISDEFINED: return nameResolver.typeVars.containsKey(getCallArgument(ttlAst, 0).getString()); default: throw new IllegalStateException( "Invalid typevar predicate in the type transformation"); } } private boolean evalBooleanOperation(Node ttlAst, NameResolver nameResolver) { boolean param0 = evalBoolean(ttlAst.getFirstChild(), nameResolver); if (ttlAst.isNot()) { return !param0; } if (ttlAst.isAnd()) { return param0 && evalBoolean(ttlAst.getLastChild(), nameResolver); } if (ttlAst.isOr()) { return param0 || evalBoolean(ttlAst.getLastChild(), nameResolver); } throw new IllegalStateException( "Invalid boolean predicate in the type transformation"); } private boolean evalBoolean(Node ttlAst, NameResolver nameResolver) { if (isBooleanOperation(ttlAst)) { return evalBooleanOperation(ttlAst, nameResolver); } String name = getCallName(ttlAst); Keywords keyword = nameToKeyword(name); switch (keyword.kind) { case STRING_PREDICATE: return evalStringPredicate(ttlAst, nameResolver); case TYPE_PREDICATE: return evalTypePredicate(ttlAst, nameResolver); case TYPEVAR_PREDICATE: return evalTypevarPredicate(ttlAst, nameResolver); default: throw new IllegalStateException( "Invalid boolean predicate in the type transformation"); } } private JSType evalConditional(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); if (evalBoolean(params.get(0), nameResolver)) { return evalInternal(params.get(1), nameResolver); } else { return evalInternal(params.get(2), nameResolver); } } private JSType evalMapunion(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); Node unionParam = params.get(0); Node mapFunction = params.get(1); String paramName = getFunctionParameter(mapFunction, 0); // The mapunion variable must not be defined in the environment if (nameResolver.typeVars.containsKey(paramName)) { reportWarning(ttlAst, DUPLICATE_VARIABLE, paramName); return getUnknownType(); } Node mapFunctionBody = NodeUtil.getFunctionBody(mapFunction); JSType unionType = evalInternal(unionParam, nameResolver); // If the first parameter does not correspond to a union type then // consider it as a union with a single type and evaluate if (!unionType.isUnionType()) { NameResolver newNameResolver = new NameResolver( addNewEntry(nameResolver.typeVars, paramName, unionType), nameResolver.nameVars); return evalInternal(mapFunctionBody, newNameResolver); } // Otherwise obtain the elements in the union type. Note that the block // above guarantees the casting to be safe Collection<JSType> unionElms = ImmutableList.copyOf(unionType.getUnionMembers()); // Evaluate the map function body using each element in the union type int unionSize = unionElms.size(); JSType[] newUnionElms = new JSType[unionSize]; int i = 0; for (JSType elm : unionElms) { NameResolver newNameResolver = new NameResolver( addNewEntry(nameResolver.typeVars, paramName, elm), nameResolver.nameVars); newUnionElms[i] = evalInternal(mapFunctionBody, newNameResolver); i++; } return createUnionType(newUnionElms); } private JSType evalRawTypeOf(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); JSType type = evalInternal(params.get(0), nameResolver); if (!type.isGenericObjectType()) { reportWarning(ttlAst, TEMPTYPE_INVALID, "rawTypeOf", type.toString()); return getUnknownType(); } return type.toMaybeObjectType().getRawType(); } private JSType evalTemplateTypeOf(Node ttlAst, NameResolver nameResolver) { ImmutableList<Node> params = getCallParams(ttlAst); JSType type = evalInternal(params.get(0), nameResolver); if (!type.isGenericObjectType()) { reportWarning(ttlAst, TEMPTYPE_INVALID, "templateTypeOf", type.toString()); return getUnknownType(); } int index = (int) params.get(1).getDouble(); ImmutableList<? extends JSType> templateTypes = type.toMaybeObjectType().getTemplateTypes(); if (index >= templateTypes.size()) { reportWarning(ttlAst, INDEX_OUTOFBOUNDS, Integer.toString(templateTypes.size()), Integer.toString(index)); return getUnknownType(); } return templateTypes.get(index); } private JSType evalRecord(Node record, NameResolver nameResolver) { Map<String, JSType> props = new LinkedHashMap<>(); for (Node propNode : record.children()) { // If it is a computed property then find the property name using the resolver if (propNode.isComputedProp()) { String compPropName = getComputedPropName(propNode); // If the name does not exist then report a warning if (!nameResolver.nameVars.containsKey(compPropName)) { reportWarning(record, UNKNOWN_NAMEVAR, compPropName); return getUnknownType(); } // Otherwise add the property Node propValue = getComputedPropValue(propNode); String resolvedName = nameResolver.nameVars.get(compPropName); JSType resultingType = evalInternal(propValue, nameResolver); props.put(resolvedName, resultingType); } else { String propName = propNode.getString(); JSType resultingType = evalInternal(propNode.getFirstChild(), nameResolver); props.put(propName, resultingType); } } return this.registry.createRecordType(props); } private JSType evalRecordParam(Node ttlAst, NameResolver nameResolver) { if (ttlAst.isObjectLit()) { return evalRecord(ttlAst, nameResolver); } // The parameter of record can be a type transformation expression return evalInternal(ttlAst, nameResolver); } private JSType evalRecordType(Node ttlAst, NameResolver nameResolver) { int paramCount = getCallParamCount(ttlAst); ImmutableList.Builder<ObjectType> recTypesBuilder = new ImmutableList.Builder<>(); for (int i = 0; i < paramCount; i++) { JSType type = evalRecordParam(getCallArgument(ttlAst, i), nameResolver); // Check that each parameter evaluates to an object ObjectType objType = type.toMaybeObjectType(); if (objType == null || objType.isUnknownType()) { reportWarning(ttlAst, RECPARAM_INVALID, type.toString()); return getUnknownType(); } JSType recType = this.registry.buildRecordTypeFromObject(objType); if (!recType.isEquivalentTo(getObjectType())) { recTypesBuilder.add(recType.toMaybeObjectType()); } } return joinRecordTypes(recTypesBuilder.build()); } private void putNewPropInPropertyMap(Map<String, JSType> props, String newPropName, JSType newPropValue) { // TODO(lpino): Decide if the best strategy is to collapse the properties // to a union type or not. So far, new values replace the old ones except // if they are two record types in which case the properties are joined // together // Three cases: // (i) If the key does not exist then add it to the map with the new value // (ii) If the key to be added already exists in the map and the new value // is not a record type then the current value is replaced with the new one // (iii) If the new value is a record type and the current is not then // the current value is replaced with the new one if (!props.containsKey(newPropName) || !newPropValue.isRecordType() || !props.get(newPropName).isRecordType()) { props.put(newPropName, newPropValue); return; } // Otherwise join the current value with the new one since both are records props.put(newPropName, joinRecordTypes(ImmutableList.of( (ObjectType) props.get(newPropName), (ObjectType) newPropValue))); } /** * Merges a list of record types. * Example * {r:{s:string, n:number}} and {a:boolean} * is transformed into {r:{s:string, n:number}, a:boolean} */ private JSType joinRecordTypes(ImmutableList<ObjectType> recTypes) { Map<String, JSType> props = new LinkedHashMap<>(); for (ObjectType recType : recTypes) { for (String newPropName : recType.getOwnPropertyNames()) { JSType newPropValue = recType.getPropertyType(newPropName); // Put the new property depending if it already exists in the map putNewPropInPropertyMap(props, newPropName, newPropValue); } } return createRecordType(ImmutableMap.copyOf(props)); } private JSType evalMaprecord(Node ttlAst, NameResolver nameResolver) { Node recordNode = ttlAst.getSecondChild(); Node mapFunction = ttlAst.getChildAtIndex(2); JSType type = evalInternal(recordNode, nameResolver); // If it is an empty record type (Object) then return if (type.isEquivalentTo(getObjectType())) { return getObjectType(); } // The parameter must be a valid record type if (!type.isRecordType()) { // TODO(lpino): Decide how to handle non-record types reportWarning(recordNode, RECTYPE_INVALID, type.toString()); return getUnknownType(); } ObjectType objtype = type.toMaybeObjectType(); // Fetch the information of the map function String paramKey = getFunctionParameter(mapFunction, 0); String paramValue = getFunctionParameter(mapFunction, 1); // The maprecord variables must not be defined in the environment if (nameResolver.nameVars.containsKey(paramKey)) { reportWarning(ttlAst, DUPLICATE_VARIABLE, paramKey); return getUnknownType(); } if (nameResolver.typeVars.containsKey(paramValue)) { reportWarning(ttlAst, DUPLICATE_VARIABLE, paramValue); return getUnknownType(); } // Compute the new properties using the map function Node mapFnBody = NodeUtil.getFunctionBody(mapFunction); Map<String, JSType> newProps = new LinkedHashMap<>(); for (String propName : objtype.getOwnPropertyNames()) { // The value of the current property JSType propValue = objtype.getPropertyType(propName); // Evaluate the map function body with paramValue and paramKey replaced // by the values of the current property NameResolver newNameResolver = new NameResolver( addNewEntry(nameResolver.typeVars, paramValue, propValue), addNewEntry(nameResolver.nameVars, paramKey, propName)); JSType body = evalInternal(mapFnBody, newNameResolver); // If the body returns unknown then the whole expression returns unknown if (body.isUnknownType()) { return getUnknownType(); } // Skip the property when the body evaluates to NO_TYPE // or the empty record (Object) if (body.isEmptyType() || body.isEquivalentTo(getObjectType())) { continue; } // Otherwise the body must evaluate to a record type if (!body.isRecordType()) { reportWarning(ttlAst, MAPRECORD_BODY_INVALID, body.toString()); return getUnknownType(); } // Add the properties of the resulting record type to the original one ObjectType bodyAsObj = body.toMaybeObjectType(); for (String newPropName : bodyAsObj.getOwnPropertyNames()) { JSType newPropValue = bodyAsObj.getPropertyType(newPropName); // If the key already exists then we have to mix it with the current property value putNewPropInPropertyMap(newProps, newPropName, newPropValue); } } return createRecordType(ImmutableMap.copyOf(newProps)); } private JSType evalTypeOfVar(Node ttlAst) { String name = getCallArgument(ttlAst, 0).getString(); StaticTypedSlot slot = typeEnv.getSlot(name); JSType type = slot != null ? slot.getType() : null; if (type == null) { reportWarning(ttlAst, VAR_UNDEFINED, name); return getUnknownType(); } return type; } private JSType evalInstanceOf(Node ttlAst, NameResolver nameResolver) { JSType type = evalInternal(getCallArgument(ttlAst, 0), nameResolver); if (type.isUnknownType() || !type.isConstructor()) { reportWarning(ttlAst, INVALID_CTOR, type.getDisplayName()); return getUnknownType(); } return type.toMaybeFunctionType().getInstanceType(); } private JSType evalNativeTypeExpr(Node ttlAst) { JSTypeExpression expr = new JSTypeExpression(getCallArgument(ttlAst, 0), VIRTUAL_FILE); return this.registry.evaluateTypeExpression(expr, this.typeEnv); } private JSType evalPrintType(Node ttlAst, NameResolver nameResolver) { JSType type = evalInternal(getCallArgument(ttlAst, 1), nameResolver); String msg = getCallArgument(ttlAst, 0).getString() + type; System.out.println(msg); return type; } private JSType evalPropType(Node ttlAst, NameResolver nameResolver) { JSType type = evalInternal(getCallArgument(ttlAst, 1), nameResolver); ObjectType objType = type.toMaybeObjectType(); if (objType == null) { reportWarning(ttlAst, PROPTYPE_INVALID, type.toString()); return getUnknownType(); } JSType propType = objType.getPropertyType(getCallArgument(ttlAst, 0).getString()); return firstNonNull(propType, getUnknownType()); } }
mprobst/closure-compiler
src/com/google/javascript/jscomp/TypeTransformation.java
Java
apache-2.0
30,898
from django.utils.translation import ugettext_lazy as _ import horizon from {{ dash_path }} import dashboard class {{ panel_name|title }}(horizon.Panel): name = _("{{ panel_name|title }}") slug = "{{ panel_name|slugify }}" dashboard.register({{ panel_name|title }})
xhorn/xchorizon
horizon/conf/panel_template/panel.py
Python
apache-2.0
280
'use strict'; var TodoServiceFactory = function(database){ return { // Return all todos in the database getTodos: function(){ return database('Todo').select().orderBy('createdAt', 'desc'); }, // Return a single todo by Id getTodo: function(id){ var p = Promise.defer(); database('Todo').where('id', id).select() .then(function(rows){ if(rows.length === 0){ //not found p.reject('TodoService: not found'); } else { p.resolve(rows[0]); } }); return p.promise; }, //Update a todo in the database updateTodo: function(todo){ var p = Promise.defer(); //TODO: real-world validation database('Todo').update({ text: todo.text, completed: todo.completed }) .where('id', todo.id) .then(function(affectedRows){ if(affectedRows === 1){ p.resolve(todo); } else { p.reject('Not found'); } }); return p.promise; }, //Create a new todo in the database createTodo: function(todo){ var p = Promise.defer(); //TODO: real-world validation database('Todo').insert(todo) .then(function(idArray){ //return the newly created todo todo.id = idArray[0]; p.resolve(todo); }) .catch(function(err){ p.reject('TodoService: create failed. Error:' + err.toString()); }); return p.promise; }, //Delete a todo specified by Id deleteTodo: function(todoId){ var p = Promise.defer(); database('Todo').where('id', todoId).del() .then(function(affectedRows){ if(affectedRows === 1){ return p.resolve(true); } else { return p.reject('TodoService: not found'); } }) .catch(function(err){ p.reject('TodoService: delete failed. Error' + err.toString()); }); return p.promise; } } } module.exports = TodoServiceFactory;
raffaeu/Syncho
src/node_modules/kontainer-di/examples/express/services/todo_service.js
JavaScript
apache-2.0
2,050
/* * Copyright @ 2015 Atlassian Pty Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.java.sip.communicator.impl.protocol.jabber.extensions.jingle; /** * A representation of the <tt>remote-candidate</tt> ICE transport element. * * @author Emil Ivov */ public class RemoteCandidatePacketExtension extends CandidatePacketExtension { /** * The name of the "candidate" element. */ public static final String ELEMENT_NAME = "remote-candidate"; /** * Creates a new {@link CandidatePacketExtension} */ public RemoteCandidatePacketExtension() { super(ELEMENT_NAME); } }
gardenia/jitsi-hammer
src/net/java/sip/communicator/impl/protocol/jabber/extensions/jingle/RemoteCandidatePacketExtension.java
Java
apache-2.0
1,149
/* * Copyright 2015-present Boundless Spatial Inc., http://boundlessgeo.com * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and limitations * under the License. */ import React from 'react'; import {connect} from 'react-redux'; import PropTypes from 'prop-types'; import * as mapActions from '../../actions/map'; /** @module components/map/zoom-control * @example * import SdkZoomControl from '@boundlessgeo/sdk/components/map/zoom-control'; * import { Provider } from 'react-redux'; * import SdkMap from '@boundlessgeo/sdk/components/map'; * import ReactDOM from 'react-dom'; * * ReactDOM.render(<Provider store={store}> * <SdkMap> * <SdkZoomControl /> * </SdkMap> * </Provider>, document.getElementById('map')); * * @desc Provides 2 buttons to zoom the map (zoom in and out). */ class ZoomControl extends React.Component { render() { let className = 'sdk-zoom-control'; if (this.props.className) { className += ' ' + this.props.className; } return ( <div className={className} style={this.props.style}> <button className='sdk-zoom-in' onClick={this.props.zoomIn} title={this.props.zoomInTitle}>+</button> <button className='sdk-zoom-out' onClick={this.props.zoomOut} title={this.props.zoomOutTitle}>{'\u2212'}</button> </div> ); } } ZoomControl.propTypes = { /** * Css className for the root div. */ className: PropTypes.string, /** * Style config object for root div. */ style: PropTypes.object, /** * Title for the zoom in button. */ zoomInTitle: PropTypes.string, /** * Title for the zoom out button. */ zoomOutTitle: PropTypes.string, }; ZoomControl.defaultProps = { zoomInTitle: 'Zoom in', zoomOutTitle: 'Zoom out', }; function mapDispatchToProps(dispatch) { return { zoomIn: () => { dispatch(mapActions.zoomIn()); }, zoomOut: () => { dispatch(mapActions.zoomOut()); }, }; } export default connect(null, mapDispatchToProps)(ZoomControl);
boundlessgeo/sdk
src/components/map/zoom-control.js
JavaScript
apache-2.0
2,469
/* Copyright 2015 Coursera Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.coursera.android.shift; import android.os.Bundle; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; public class ShiftActionsFragment extends ViewPagerFragment { private static final String TAB_TITLE_ACTIONS = "Actions"; public ShiftActionsFragment() { super(TAB_TITLE_ACTIONS); } public static ShiftActionsFragment getNewInstance() { return new ShiftActionsFragment(); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.actions_fragment, container, false); RecyclerView recyclerView = (RecyclerView) view.findViewById(R.id.recycler_view); recyclerView.setHasFixedSize(true); recyclerView.setLayoutManager(new LinearLayoutManager(getActivity())); int bottomMargin = (int) getResources().getDimension(R.dimen.card_margin); recyclerView.addItemDecoration(new VerticalMarginItemDecoration(bottomMargin)); ShiftActionRecyclerViewAdapter adapter = new ShiftActionRecyclerViewAdapter(getActivity(), ShiftManager.getInstance().getActionManager().getActionList()); recyclerView.setAdapter(adapter); return view; } }
coursera/shift
shift/src/main/java/org/coursera/android/shift/ShiftActionsFragment.java
Java
apache-2.0
2,029
using MongoDB.Bson; namespace MongoDB.Protocol { /// <summary> /// Deprecated. OP_MSG sends a diagnostic message to the database. /// The database sends back a fixed resonse. /// </summary> /// <remarks> /// struct { /// MsgHeader header; // standard message header /// cstring message; // message for the database /// } /// </remarks> internal class MsgMessage : RequestMessageBase { /// <summary> /// Initializes a new instance of the <see cref="MsgMessage"/> class. /// </summary> public MsgMessage() : base(new BsonWriterSettings()){ Header = new MessageHeader(OpCode.Msg); } /// <summary> /// Gets or sets the message. /// </summary> /// <value>The message.</value> public string Message { get; set; } /// <summary> /// Writes the body. /// </summary> /// <param name="writer">The writer.</param> protected override void WriteBody(BsonWriter writer){ writer.Write(Message, false); } /// <summary> /// Calculates the size of the body. /// </summary> /// <param name="writer">The writer.</param> /// <returns></returns> protected override int CalculateBodySize(BsonWriter writer){ return writer.CalculateSize(Message, false); } } }
mongodb-csharp/mongodb-csharp
source/MongoDB/Protocol/MsgMessage.cs
C#
apache-2.0
1,486
package com.inomma.kandu.server; public enum RequestMethod { POST,GET,PUT; }
kandu-community/android
kandu-android/trunk/src/com/inomma/kandu/server/RequestMethod.java
Java
apache-2.0
80
// Copyright 2014 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.rules.apple; import static com.google.devtools.build.lib.packages.Attribute.attr; import static com.google.devtools.build.lib.packages.BuildType.LABEL; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableMap; import com.google.devtools.build.lib.analysis.RuleDefinition; import com.google.devtools.build.lib.analysis.RuleDefinitionEnvironment; import com.google.devtools.build.lib.cmdline.Label; import com.google.devtools.build.lib.concurrent.ThreadSafety.Immutable; import com.google.devtools.build.lib.packages.Attribute; import com.google.devtools.build.lib.packages.Attribute.LabelLateBoundDefault; import com.google.devtools.build.lib.packages.RuleClass; import com.google.devtools.build.lib.packages.RuleClass.Builder.RuleClassType; import com.google.devtools.build.lib.skylarkbuildapi.apple.AppleToolchainApi; import java.io.Serializable; /** * Utility class for resolving items for the Apple toolchain (such as common tool flags, and paths). */ @Immutable public class AppleToolchain implements AppleToolchainApi<AppleConfiguration> { // These next two strings are shared secrets with the xcrunwrapper.sh to allow // expansion of DeveloperDir and SDKRoot and runtime, since they aren't known // until compile time on any given build machine. private static final String DEVELOPER_DIR = "__BAZEL_XCODE_DEVELOPER_DIR__"; private static final String SDKROOT_DIR = "__BAZEL_XCODE_SDKROOT__"; // These two paths are framework paths relative to SDKROOT. @VisibleForTesting public static final String DEVELOPER_FRAMEWORK_PATH = "/Developer/Library/Frameworks"; @VisibleForTesting public static final String SYSTEM_FRAMEWORK_PATH = "/System/Library/Frameworks"; // There is a handy reference to many clang warning flags at // http://nshipster.com/clang-diagnostics/ // There is also a useful narrative for many Xcode settings at // http://www.xs-labs.com/en/blog/2011/02/04/xcode-build-settings/ public static final ImmutableMap<String, String> DEFAULT_WARNINGS = new ImmutableMap.Builder<String, String>() .put("GCC_WARN_64_TO_32_BIT_CONVERSION", "-Wshorten-64-to-32") .put("CLANG_WARN_BOOL_CONVERSION", "-Wbool-conversion") .put("CLANG_WARN_CONSTANT_CONVERSION", "-Wconstant-conversion") // Double-underscores are intentional - thanks Xcode. .put("CLANG_WARN__DUPLICATE_METHOD_MATCH", "-Wduplicate-method-match") .put("CLANG_WARN_EMPTY_BODY", "-Wempty-body") .put("CLANG_WARN_ENUM_CONVERSION", "-Wenum-conversion") .put("CLANG_WARN_INT_CONVERSION", "-Wint-conversion") .put("CLANG_WARN_UNREACHABLE_CODE", "-Wunreachable-code") .put("GCC_WARN_ABOUT_RETURN_TYPE", "-Wmismatched-return-types") .put("GCC_WARN_UNDECLARED_SELECTOR", "-Wundeclared-selector") .put("GCC_WARN_UNINITIALIZED_AUTOS", "-Wuninitialized") .put("GCC_WARN_UNUSED_FUNCTION", "-Wunused-function") .put("GCC_WARN_UNUSED_VARIABLE", "-Wunused-variable") .build(); /** Returns the platform directory inside of Xcode for a platform name. */ public static String platformDir(String platformName) { return developerDir() + "/Platforms/" + platformName + ".platform"; } /** * Returns the platform directory inside of Xcode for a given configuration. */ public static String sdkDir() { return SDKROOT_DIR; } /** * Returns the Developer directory inside of Xcode for a given configuration. */ public static String developerDir() { return DEVELOPER_DIR; } /** * Returns the platform frameworks directory inside of Xcode for a given {@link ApplePlatform}. */ public static String platformDeveloperFrameworkDir(ApplePlatform platform) { String platformDir = platformDir(platform.getNameInPlist()); return platformDir + "/Developer/Library/Frameworks"; } /** Returns the SDK frameworks directory inside of Xcode for a given configuration. */ public static String sdkFrameworkDir(ApplePlatform targetPlatform, XcodeConfigInfo xcodeConfig) { String relativePath; switch (targetPlatform) { case IOS_DEVICE: case IOS_SIMULATOR: if (xcodeConfig .getSdkVersionForPlatform(targetPlatform) .compareTo(DottedVersion.fromStringUnchecked("9.0")) >= 0) { relativePath = SYSTEM_FRAMEWORK_PATH; } else { relativePath = DEVELOPER_FRAMEWORK_PATH; } break; case MACOS: case WATCHOS_DEVICE: case WATCHOS_SIMULATOR: case TVOS_DEVICE: case TVOS_SIMULATOR: relativePath = SYSTEM_FRAMEWORK_PATH; break; default: throw new IllegalArgumentException("Unhandled platform " + targetPlatform); } return sdkDir() + relativePath; } /** The default label of the build-wide {@code xcode_config} configuration rule. */ public static LabelLateBoundDefault<AppleConfiguration> getXcodeConfigLabel( String toolsRepository) { return LabelLateBoundDefault.fromTargetConfiguration( AppleConfiguration.class, Label.parseAbsoluteUnchecked( toolsRepository + AppleCommandLineOptions.DEFAULT_XCODE_VERSION_CONFIG_LABEL), (Attribute.LateBoundDefault.Resolver<AppleConfiguration, Label> & Serializable) (rule, attributes, appleConfig) -> appleConfig.getXcodeConfigLabel()); } /** * Returns the platform directory inside of Xcode for a given configuration. */ @Override public String sdkDirConstant() { return sdkDir(); } /** * Returns the Developer directory inside of Xcode for a given configuration. */ @Override public String developerDirConstant() { return developerDir(); } /** * Returns the platform frameworks directory inside of Xcode for a given configuration. */ @Override public String platformFrameworkDirFromConfig(AppleConfiguration configuration) { return platformDeveloperFrameworkDir(configuration.getSingleArchPlatform()); } /** * Base rule definition to be ancestor for rules which may require an xcode toolchain. */ public static class RequiresXcodeConfigRule implements RuleDefinition { private final String toolsRepository; public RequiresXcodeConfigRule(String toolsRepository) { this.toolsRepository = toolsRepository; } @Override public RuleClass build(RuleClass.Builder builder, RuleDefinitionEnvironment env) { return builder .add( attr(XcodeConfigRule.XCODE_CONFIG_ATTR_NAME, LABEL) .allowedRuleClasses("xcode_config") .checkConstraints() .direct_compile_time_input() .value(getXcodeConfigLabel(toolsRepository))) .build(); } @Override public Metadata getMetadata() { return RuleDefinition.Metadata.builder() .name("$requires_xcode_config") .type(RuleClassType.ABSTRACT) .build(); } } }
dslomov/bazel
src/main/java/com/google/devtools/build/lib/rules/apple/AppleToolchain.java
Java
apache-2.0
7,703
package cgeo.geocaching.files; import cgeo.geocaching.R; import cgeo.geocaching.ui.recyclerview.AbstractRecyclerViewAdapter; import cgeo.geocaching.ui.recyclerview.AbstractRecyclerViewHolder; import android.graphics.Typeface; import android.support.annotation.NonNull; import android.view.LayoutInflater; import android.view.View; import android.view.View.OnClickListener; import android.view.ViewGroup; import android.widget.TextView; import java.io.File; import java.util.List; import butterknife.BindView; public class FileSelectionListAdapter extends AbstractRecyclerViewAdapter<FileSelectionListAdapter.ViewHolder> { private final IFileSelectionView parentView; @NonNull private final List<File> files; public FileSelectionListAdapter(@NonNull final IFileSelectionView parentIn, @NonNull final List<File> listIn) { files = listIn; parentView = parentIn; } @Override public int getItemCount() { return files.size(); } @Override public ViewHolder onCreateViewHolder(final ViewGroup parent, final int position) { final View view = LayoutInflater.from(parent.getContext()).inflate(R.layout.mapfile_item, parent, false); final ViewHolder viewHolder = new ViewHolder(view); viewHolder.itemView.setOnClickListener(new OnClickListener() { @Override public void onClick(final View view) { final File file = files.get(viewHolder.getItemPosition()); parentView.setCurrentFile(file.toString()); parentView.close(); } }); return viewHolder; } @Override public void onBindViewHolder(final ViewHolder holder, final int position) { super.onBindViewHolder(holder, position); final File file = files.get(position); final String currentFile = parentView.getCurrentFile(); if (currentFile != null && file.equals(new File(currentFile))) { holder.filename.setTypeface(holder.filename.getTypeface(), Typeface.BOLD); } else { holder.filename.setTypeface(holder.filename.getTypeface(), Typeface.NORMAL); } holder.filepath.setText(file.getParent()); holder.filename.setText(file.getName()); } protected static final class ViewHolder extends AbstractRecyclerViewHolder { @BindView(R.id.mapfilepath) TextView filepath; @BindView(R.id.mapfilename) TextView filename; ViewHolder(final View view) { super(view); } } }
kumy/cgeo
main/src/cgeo/geocaching/files/FileSelectionListAdapter.java
Java
apache-2.0
2,548
/* * Copyright 2005 JBoss Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.eclipse.flow.ruleflow.view.property.constraint; import java.util.HashMap; import java.util.List; import java.util.Map; import org.drools.eclipse.flow.common.view.property.EditBeanDialog; import org.eclipse.swt.SWT; import org.eclipse.swt.events.SelectionAdapter; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.layout.GridData; import org.eclipse.swt.layout.GridLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Control; import org.eclipse.swt.widgets.Display; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Shell; import org.jbpm.workflow.core.Constraint; import org.jbpm.workflow.core.WorkflowProcess; import org.jbpm.workflow.core.impl.ConnectionRef; import org.jbpm.workflow.core.impl.NodeImpl; import org.jbpm.workflow.core.node.StateNode; import org.kie.api.definition.process.Connection; /** * Dialog for editing constraints. */ public class StateConstraintListDialog extends EditBeanDialog<Map<ConnectionRef, Constraint>> { private WorkflowProcess process; private StateNode stateNode; private Map<ConnectionRef, Constraint> newMap; private Map<Connection, Label> labels = new HashMap<Connection, Label>(); protected StateConstraintListDialog(Shell parentShell, WorkflowProcess process, StateNode stateNode) { super(parentShell, "Edit Constraints"); this.process = process; this.stateNode = stateNode; } protected Control createDialogArea(Composite parent) { Composite composite = (Composite) super.createDialogArea(parent); GridLayout gridLayout = new GridLayout(); gridLayout.numColumns = 3; composite.setLayout(gridLayout); List<Connection> outgoingConnections = stateNode.getOutgoingConnections(NodeImpl.CONNECTION_DEFAULT_TYPE); labels.clear(); for (Connection outgoingConnection: outgoingConnections) { Label label1 = new Label(composite, SWT.NONE); label1.setText("To node " + outgoingConnection.getTo().getName() + ": "); Label label2 = new Label(composite, SWT.NONE); labels.put(outgoingConnection, label2); GridData gridData = new GridData(); gridData.grabExcessHorizontalSpace = true; gridData.horizontalAlignment = GridData.FILL; label2.setLayoutData(gridData); Constraint constraint = newMap.get( new ConnectionRef(outgoingConnection.getTo().getId(), outgoingConnection.getToType())); if (constraint != null) { label2.setText(constraint.getName()); } Button editButton = new Button(composite, SWT.NONE); editButton.setText("Edit"); editButton.addSelectionListener(new EditButtonListener( outgoingConnection)); } return composite; } public void setValue(Map<ConnectionRef, Constraint> value) { super.setValue(value); this.newMap = new HashMap<ConnectionRef, Constraint>((Map<ConnectionRef, Constraint>) value); } protected Map<ConnectionRef, Constraint> updateValue(Map<ConnectionRef, Constraint> value) { return newMap; } private void editItem(final Connection connection) { final Runnable r = new Runnable() { public void run() { RuleFlowConstraintDialog dialog = new RuleFlowConstraintDialog( getShell(), process); dialog.create(); ConnectionRef connectionRef = new ConnectionRef(connection.getTo().getId(), connection.getToType()); Constraint constraint = newMap.get(connectionRef); dialog.setConstraint(constraint); dialog.fixType(0); dialog.fixDialect(0); int code = dialog.open(); if (code != CANCEL) { constraint = dialog.getConstraint(); newMap.put( connectionRef, constraint); setConnectionText( (Label) labels.get(connection), constraint.getName()); } } }; r.run(); } private void setConnectionText(final Label connection, final String name) { Display.getDefault().asyncExec(new Runnable() { public void run() { connection.setText(name); } }); } private class EditButtonListener extends SelectionAdapter { private Connection connection; public EditButtonListener(Connection connection) { this.connection = connection; } public void widgetSelected(SelectionEvent e) { editItem(connection); } } }
pkman/droolsjbpm-tools
drools-eclipse/org.drools.eclipse/src/main/java/org/drools/eclipse/flow/ruleflow/view/property/constraint/StateConstraintListDialog.java
Java
apache-2.0
5,496
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.runners.dataflow; import static org.apache.beam.sdk.metrics.MetricMatchers.attemptedMetricsResult; import static org.apache.beam.sdk.metrics.MetricMatchers.committedMetricsResult; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.google.api.services.dataflow.Dataflow; import com.google.api.services.dataflow.model.Job; import com.google.api.services.dataflow.model.JobMetrics; import com.google.api.services.dataflow.model.MetricStructuredName; import com.google.api.services.dataflow.model.MetricUpdate; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import java.io.IOException; import java.math.BigDecimal; import org.apache.beam.runners.dataflow.testing.TestDataflowPipelineOptions; import org.apache.beam.sdk.PipelineResult.State; import org.apache.beam.sdk.metrics.MetricQueryResults; import org.apache.beam.sdk.options.PipelineOptionsFactory; import org.apache.beam.sdk.util.NoopPathValidator; import org.apache.beam.sdk.util.TestCredential; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.mockito.Mock; import org.mockito.MockitoAnnotations; /** * Tests for {@link DataflowMetrics}. */ @RunWith(JUnit4.class) public class DataflowMetricsTest { private static final String PROJECT_ID = "some-project"; private static final String JOB_ID = "1234"; private static final String REGION_ID = "some-region"; private static final String REPLACEMENT_JOB_ID = "4321"; @Mock private Dataflow mockWorkflowClient; @Mock private Dataflow.Projects mockProjects; @Mock private Dataflow.Projects.Locations mockLocations; @Mock private Dataflow.Projects.Locations.Jobs mockJobs; private TestDataflowPipelineOptions options; @Before public void setup() { MockitoAnnotations.initMocks(this); when(mockWorkflowClient.projects()).thenReturn(mockProjects); when(mockProjects.locations()).thenReturn(mockLocations); when(mockLocations.jobs()).thenReturn(mockJobs); options = PipelineOptionsFactory.as(TestDataflowPipelineOptions.class); options.setDataflowClient(mockWorkflowClient); options.setProject(PROJECT_ID); options.setRunner(DataflowRunner.class); options.setTempLocation("gs://fakebucket/temp"); options.setPathValidatorClass(NoopPathValidator.class); options.setGcpCredential(new TestCredential()); } @Test public void testEmptyMetricUpdates() throws IOException { Job modelJob = new Job(); modelJob.setCurrentState(State.RUNNING.toString()); DataflowPipelineJob job = mock(DataflowPipelineJob.class); when(job.getState()).thenReturn(State.RUNNING); job.jobId = JOB_ID; JobMetrics jobMetrics = new JobMetrics(); jobMetrics.setMetrics(ImmutableList.<MetricUpdate>of()); DataflowClient dataflowClient = mock(DataflowClient.class); when(dataflowClient.getJobMetrics(JOB_ID)).thenReturn(jobMetrics); DataflowMetrics dataflowMetrics = new DataflowMetrics(job, dataflowClient); MetricQueryResults result = dataflowMetrics.queryMetrics(); assertThat(ImmutableList.copyOf(result.counters()), is(empty())); assertThat(ImmutableList.copyOf(result.distributions()), is(empty())); } @Test public void testCachingMetricUpdates() throws IOException { Job modelJob = new Job(); modelJob.setCurrentState(State.RUNNING.toString()); DataflowPipelineJob job = mock(DataflowPipelineJob.class); when(job.getState()).thenReturn(State.DONE); job.jobId = JOB_ID; JobMetrics jobMetrics = new JobMetrics(); jobMetrics.setMetrics(ImmutableList.<MetricUpdate>of()); DataflowClient dataflowClient = mock(DataflowClient.class); when(dataflowClient.getJobMetrics(JOB_ID)).thenReturn(jobMetrics); DataflowMetrics dataflowMetrics = new DataflowMetrics(job, dataflowClient); verify(dataflowClient, times(0)).getJobMetrics(JOB_ID); dataflowMetrics.queryMetrics(null); verify(dataflowClient, times(1)).getJobMetrics(JOB_ID); dataflowMetrics.queryMetrics(null); verify(dataflowClient, times(1)).getJobMetrics(JOB_ID); } private MetricUpdate makeCounterMetricUpdate(String name, String namespace, String step, long scalar, boolean tentative) { MetricUpdate update = new MetricUpdate(); update.setScalar(new BigDecimal(scalar)); MetricStructuredName structuredName = new MetricStructuredName(); structuredName.setName(name); structuredName.setOrigin("user"); ImmutableMap.Builder contextBuilder = new ImmutableMap.Builder<String, String>(); contextBuilder.put("step", step) .put("namespace", namespace); if (tentative) { contextBuilder.put("tentative", "true"); } structuredName.setContext(contextBuilder.build()); update.setName(structuredName); return update; } @Test public void testSingleCounterUpdates() throws IOException { JobMetrics jobMetrics = new JobMetrics(); DataflowPipelineJob job = mock(DataflowPipelineJob.class); when(job.getState()).thenReturn(State.RUNNING); job.jobId = JOB_ID; MetricUpdate update = new MetricUpdate(); long stepValue = 1234L; update.setScalar(new BigDecimal(stepValue)); // The parser relies on the fact that one tentative and one committed metric update exist in // the job metrics results. MetricUpdate mu1 = makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1234L, false); MetricUpdate mu1Tentative = makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1233L, true); jobMetrics.setMetrics(ImmutableList.of(mu1, mu1Tentative)); DataflowClient dataflowClient = mock(DataflowClient.class); when(dataflowClient.getJobMetrics(JOB_ID)).thenReturn(jobMetrics); DataflowMetrics dataflowMetrics = new DataflowMetrics(job, dataflowClient); MetricQueryResults result = dataflowMetrics.queryMetrics(null); assertThat(result.counters(), containsInAnyOrder( attemptedMetricsResult("counterNamespace", "counterName", "s2", 1233L))); assertThat(result.counters(), containsInAnyOrder( committedMetricsResult("counterNamespace", "counterName", "s2", 1234L))); } @Test public void testIgnoreDistributionButGetCounterUpdates() throws IOException { JobMetrics jobMetrics = new JobMetrics(); DataflowClient dataflowClient = mock(DataflowClient.class); when(dataflowClient.getJobMetrics(JOB_ID)).thenReturn(jobMetrics); DataflowPipelineJob job = mock(DataflowPipelineJob.class); when(job.getState()).thenReturn(State.RUNNING); job.jobId = JOB_ID; // The parser relies on the fact that one tentative and one committed metric update exist in // the job metrics results. jobMetrics.setMetrics(ImmutableList.of( makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1233L, false), makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1234L, true), makeCounterMetricUpdate("otherCounter[MIN]", "otherNamespace", "s3", 0L, false), makeCounterMetricUpdate("otherCounter[MIN]", "otherNamespace", "s3", 0L, true))); DataflowMetrics dataflowMetrics = new DataflowMetrics(job, dataflowClient); MetricQueryResults result = dataflowMetrics.queryMetrics(null); assertThat(result.counters(), containsInAnyOrder( attemptedMetricsResult("counterNamespace", "counterName", "s2", 1234L))); assertThat(result.counters(), containsInAnyOrder( committedMetricsResult("counterNamespace", "counterName", "s2", 1233L))); } @Test public void testMultipleCounterUpdates() throws IOException { JobMetrics jobMetrics = new JobMetrics(); DataflowClient dataflowClient = mock(DataflowClient.class); when(dataflowClient.getJobMetrics(JOB_ID)).thenReturn(jobMetrics); DataflowPipelineJob job = mock(DataflowPipelineJob.class); when(job.getState()).thenReturn(State.RUNNING); job.jobId = JOB_ID; // The parser relies on the fact that one tentative and one committed metric update exist in // the job metrics results. jobMetrics.setMetrics(ImmutableList.of( makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1233L, false), makeCounterMetricUpdate("counterName", "counterNamespace", "s2", 1234L, true), makeCounterMetricUpdate("otherCounter", "otherNamespace", "s3", 12L, false), makeCounterMetricUpdate("otherCounter", "otherNamespace", "s3", 12L, true), makeCounterMetricUpdate("counterName", "otherNamespace", "s4", 1200L, false), makeCounterMetricUpdate("counterName", "otherNamespace", "s4", 1233L, true))); DataflowMetrics dataflowMetrics = new DataflowMetrics(job, dataflowClient); MetricQueryResults result = dataflowMetrics.queryMetrics(null); assertThat(result.counters(), containsInAnyOrder( attemptedMetricsResult("counterNamespace", "counterName", "s2", 1234L), attemptedMetricsResult("otherNamespace", "otherCounter", "s3", 12L), attemptedMetricsResult("otherNamespace", "counterName", "s4", 1233L))); assertThat(result.counters(), containsInAnyOrder( committedMetricsResult("counterNamespace", "counterName", "s2", 1233L), committedMetricsResult("otherNamespace", "otherCounter", "s3", 12L), committedMetricsResult("otherNamespace", "counterName", "s4", 1200L))); } }
xsm110/Apache-Beam
runners/google-cloud-dataflow-java/src/test/java/org/apache/beam/runners/dataflow/DataflowMetricsTest.java
Java
apache-2.0
10,530
/// Copyright (c) 2009 Microsoft Corporation /// /// Redistribution and use in source and binary forms, with or without modification, are permitted provided /// that the following conditions are met: /// * Redistributions of source code must retain the above copyright notice, this list of conditions and /// the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and /// the following disclaimer in the documentation and/or other materials provided with the distribution. /// * Neither the name of Microsoft nor the names of its contributors may be used to /// endorse or promote products derived from this software without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR /// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE /// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT /// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, /// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF /// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ES5Harness.registerTest({ id: "15.4.4.14-3-21", path: "TestCases/chapter15/15.4/15.4.4/15.4.4.14/15.4.4.14-3-21.js", description: "Array.prototype.indexOf - 'length' is an object that has an own valueOf method that returns an object and toString method that returns a string", test: function testcase() { var toStringAccessed = false; var valueOfAccessed = false; var obj = { 1: true, length: { toString: function () { toStringAccessed = true; return '2'; }, valueOf: function () { valueOfAccessed = true; return {}; } } }; return Array.prototype.indexOf.call(obj, true) === 1 && toStringAccessed && valueOfAccessed; }, precondition: function prereq() { return fnExists(Array.prototype.indexOf); } });
hnafar/IronJS
Src/Tests/ietestcenter/chapter15/15.4/15.4.4/15.4.4.14/15.4.4.14-3-21.js
JavaScript
apache-2.0
2,528
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.builder.component.dsl; import javax.annotation.Generated; import org.apache.camel.Component; import org.apache.camel.builder.component.AbstractComponentBuilder; import org.apache.camel.builder.component.ComponentBuilder; import org.apache.camel.component.atomix.client.set.AtomixSetComponent; /** * Access Atomix's distributed set. * * Generated by camel-package-maven-plugin - do not edit this file! */ @Generated("org.apache.camel.maven.packaging.ComponentDslMojo") public interface AtomixSetComponentBuilderFactory { /** * Atomix Set (camel-atomix) * Access Atomix's distributed set. * * Category: clustering * Since: 2.20 * Maven coordinates: org.apache.camel:camel-atomix * * @return the dsl builder */ @Deprecated static AtomixSetComponentBuilder atomixSet() { return new AtomixSetComponentBuilderImpl(); } /** * Builder for the Atomix Set component. */ interface AtomixSetComponentBuilder extends ComponentBuilder<AtomixSetComponent> { /** * The Atomix instance to use. * * The option is a: &lt;code&gt;io.atomix.Atomix&lt;/code&gt; type. * * Group: common * * @param atomix the value to set * @return the dsl builder */ default AtomixSetComponentBuilder atomix(io.atomix.Atomix atomix) { doSetProperty("atomix", atomix); return this; } /** * The shared component configuration. * * The option is a: * &lt;code&gt;org.apache.camel.component.atomix.client.set.AtomixSetConfiguration&lt;/code&gt; type. * * Group: common * * @param configuration the value to set * @return the dsl builder */ default AtomixSetComponentBuilder configuration( org.apache.camel.component.atomix.client.set.AtomixSetConfiguration configuration) { doSetProperty("configuration", configuration); return this; } /** * The path to the AtomixClient configuration. * * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type. * * Group: common * * @param configurationUri the value to set * @return the dsl builder */ default AtomixSetComponentBuilder configurationUri( java.lang.String configurationUri) { doSetProperty("configurationUri", configurationUri); return this; } /** * The default action. * * The option is a: * &lt;code&gt;org.apache.camel.component.atomix.client.set.AtomixSet.Action&lt;/code&gt; type. * * Default: ADD * Group: common * * @param defaultAction the value to set * @return the dsl builder */ default AtomixSetComponentBuilder defaultAction( org.apache.camel.component.atomix.client.set.AtomixSet.Action defaultAction) { doSetProperty("defaultAction", defaultAction); return this; } /** * The nodes the AtomixClient should connect to. * * The option is a: * &lt;code&gt;java.util.List&amp;lt;io.atomix.catalyst.transport.Address&amp;gt;&lt;/code&gt; type. * * Group: common * * @param nodes the value to set * @return the dsl builder */ default AtomixSetComponentBuilder nodes( java.util.List<io.atomix.catalyst.transport.Address> nodes) { doSetProperty("nodes", nodes); return this; } /** * The header that wil carry the result. * * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type. * * Group: common * * @param resultHeader the value to set * @return the dsl builder */ default AtomixSetComponentBuilder resultHeader( java.lang.String resultHeader) { doSetProperty("resultHeader", resultHeader); return this; } /** * The class name (fqn) of the Atomix transport. * * The option is a: &lt;code&gt;java.lang.String&lt;/code&gt; type. * * Default: io.atomix.catalyst.transport.netty.NettyTransport * Group: common * * @param transportClassName the value to set * @return the dsl builder */ default AtomixSetComponentBuilder transportClassName( java.lang.String transportClassName) { doSetProperty("transportClassName", transportClassName); return this; } /** * The resource ttl. * * The option is a: &lt;code&gt;long&lt;/code&gt; type. * * Group: common * * @param ttl the value to set * @return the dsl builder */ default AtomixSetComponentBuilder ttl(long ttl) { doSetProperty("ttl", ttl); return this; } /** * Allows for bridging the consumer to the Camel routing Error Handler, * which mean any exceptions occurred while the consumer is trying to * pickup incoming messages, or the likes, will now be processed as a * message and handled by the routing Error Handler. By default the * consumer will use the org.apache.camel.spi.ExceptionHandler to deal * with exceptions, that will be logged at WARN or ERROR level and * ignored. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: consumer * * @param bridgeErrorHandler the value to set * @return the dsl builder */ default AtomixSetComponentBuilder bridgeErrorHandler( boolean bridgeErrorHandler) { doSetProperty("bridgeErrorHandler", bridgeErrorHandler); return this; } /** * Whether the producer should be started lazy (on the first message). * By starting lazy you can use this to allow CamelContext and routes to * startup in situations where a producer may otherwise fail during * starting and cause the route to fail being started. By deferring this * startup to be lazy then the startup failure can be handled during * routing messages via Camel's routing error handlers. Beware that when * the first message is processed then creating and starting the * producer may take a little time and prolong the total processing time * of the processing. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: producer * * @param lazyStartProducer the value to set * @return the dsl builder */ default AtomixSetComponentBuilder lazyStartProducer( boolean lazyStartProducer) { doSetProperty("lazyStartProducer", lazyStartProducer); return this; } /** * Whether autowiring is enabled. This is used for automatic autowiring * options (the option must be marked as autowired) by looking up in the * registry to find if there is a single instance of matching type, * which then gets configured on the component. This can be used for * automatic configuring JDBC data sources, JMS connection factories, * AWS Clients, etc. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: true * Group: advanced * * @param autowiredEnabled the value to set * @return the dsl builder */ default AtomixSetComponentBuilder autowiredEnabled( boolean autowiredEnabled) { doSetProperty("autowiredEnabled", autowiredEnabled); return this; } /** * The cluster wide default resource configuration. * * The option is a: &lt;code&gt;java.util.Properties&lt;/code&gt; type. * * Group: advanced * * @param defaultResourceConfig the value to set * @return the dsl builder */ default AtomixSetComponentBuilder defaultResourceConfig( java.util.Properties defaultResourceConfig) { doSetProperty("defaultResourceConfig", defaultResourceConfig); return this; } /** * The local default resource options. * * The option is a: &lt;code&gt;java.util.Properties&lt;/code&gt; type. * * Group: advanced * * @param defaultResourceOptions the value to set * @return the dsl builder */ default AtomixSetComponentBuilder defaultResourceOptions( java.util.Properties defaultResourceOptions) { doSetProperty("defaultResourceOptions", defaultResourceOptions); return this; } /** * Sets if the local member should join groups as PersistentMember or * not. If set to ephemeral the local member will receive an auto * generated ID thus the local one is ignored. * * The option is a: &lt;code&gt;boolean&lt;/code&gt; type. * * Default: false * Group: advanced * * @param ephemeral the value to set * @return the dsl builder */ default AtomixSetComponentBuilder ephemeral(boolean ephemeral) { doSetProperty("ephemeral", ephemeral); return this; } /** * The read consistency level. * * The option is a: * &lt;code&gt;io.atomix.resource.ReadConsistency&lt;/code&gt; type. * * Group: advanced * * @param readConsistency the value to set * @return the dsl builder */ default AtomixSetComponentBuilder readConsistency( io.atomix.resource.ReadConsistency readConsistency) { doSetProperty("readConsistency", readConsistency); return this; } /** * Cluster wide resources configuration. * * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String, * java.util.Properties&amp;gt;&lt;/code&gt; type. * * Group: advanced * * @param resourceConfigs the value to set * @return the dsl builder */ default AtomixSetComponentBuilder resourceConfigs( java.util.Map<java.lang.String, java.util.Properties> resourceConfigs) { doSetProperty("resourceConfigs", resourceConfigs); return this; } /** * Local resources configurations. * * The option is a: &lt;code&gt;java.util.Map&amp;lt;java.lang.String, * java.util.Properties&amp;gt;&lt;/code&gt; type. * * Group: advanced * * @param resourceOptions the value to set * @return the dsl builder */ default AtomixSetComponentBuilder resourceOptions( java.util.Map<java.lang.String, java.util.Properties> resourceOptions) { doSetProperty("resourceOptions", resourceOptions); return this; } } class AtomixSetComponentBuilderImpl extends AbstractComponentBuilder<AtomixSetComponent> implements AtomixSetComponentBuilder { @Override protected AtomixSetComponent buildConcreteComponent() { return new AtomixSetComponent(); } private org.apache.camel.component.atomix.client.set.AtomixSetConfiguration getOrCreateConfiguration( org.apache.camel.component.atomix.client.set.AtomixSetComponent component) { if (component.getConfiguration() == null) { component.setConfiguration(new org.apache.camel.component.atomix.client.set.AtomixSetConfiguration()); } return component.getConfiguration(); } @Override protected boolean setPropertyOnComponent( Component component, String name, Object value) { switch (name) { case "atomix": getOrCreateConfiguration((AtomixSetComponent) component).setAtomix((io.atomix.Atomix) value); return true; case "configuration": ((AtomixSetComponent) component).setConfiguration((org.apache.camel.component.atomix.client.set.AtomixSetConfiguration) value); return true; case "configurationUri": ((AtomixSetComponent) component).setConfigurationUri((java.lang.String) value); return true; case "defaultAction": getOrCreateConfiguration((AtomixSetComponent) component).setDefaultAction((org.apache.camel.component.atomix.client.set.AtomixSet.Action) value); return true; case "nodes": ((AtomixSetComponent) component).setNodes((java.util.List) value); return true; case "resultHeader": getOrCreateConfiguration((AtomixSetComponent) component).setResultHeader((java.lang.String) value); return true; case "transportClassName": getOrCreateConfiguration((AtomixSetComponent) component).setTransportClassName((java.lang.String) value); return true; case "ttl": getOrCreateConfiguration((AtomixSetComponent) component).setTtl((long) value); return true; case "bridgeErrorHandler": ((AtomixSetComponent) component).setBridgeErrorHandler((boolean) value); return true; case "lazyStartProducer": ((AtomixSetComponent) component).setLazyStartProducer((boolean) value); return true; case "autowiredEnabled": ((AtomixSetComponent) component).setAutowiredEnabled((boolean) value); return true; case "defaultResourceConfig": getOrCreateConfiguration((AtomixSetComponent) component).setDefaultResourceConfig((java.util.Properties) value); return true; case "defaultResourceOptions": getOrCreateConfiguration((AtomixSetComponent) component).setDefaultResourceOptions((java.util.Properties) value); return true; case "ephemeral": getOrCreateConfiguration((AtomixSetComponent) component).setEphemeral((boolean) value); return true; case "readConsistency": getOrCreateConfiguration((AtomixSetComponent) component).setReadConsistency((io.atomix.resource.ReadConsistency) value); return true; case "resourceConfigs": getOrCreateConfiguration((AtomixSetComponent) component).setResourceConfigs((java.util.Map) value); return true; case "resourceOptions": getOrCreateConfiguration((AtomixSetComponent) component).setResourceOptions((java.util.Map) value); return true; default: return false; } } } }
christophd/camel
dsl/camel-componentdsl/src/generated/java/org/apache/camel/builder/component/dsl/AtomixSetComponentBuilderFactory.java
Java
apache-2.0
16,066
define(function(require, exports, module) { var Morris=require("morris"); require("jquery.bootstrap-datetimepicker"); var Validator = require('bootstrap.validator'); var autoSubmitCondition=require("./autoSubmitCondition.js"); require('common/validator-rules').inject(Validator); var now = new Date(); exports.run = function() { if($('#data').length > 0){ var data = eval ("(" + $('#data').attr("value") + ")"); Morris.Line({ element: 'line-data', data: data, xkey: 'date', ykeys: ['count'], labels: [Translator.trans('班级营收额')], xLabels:"day" }); } $("[name=endTime]").datetimepicker({ autoclose: true, format: 'yyyy-mm-dd', minView: 'month' }); $('[name=endTime]').datetimepicker('setEndDate', now); $('[name=endTime]').datetimepicker('setStartDate', $('#classroomIncomeStartDate').attr("value")); $("[name=startTime]").datetimepicker({ autoclose: true, format: 'yyyy-mm-dd', minView: 'month' }); $('[name=startTime]').datetimepicker('setEndDate', now); $('[name=startTime]').datetimepicker('setStartDate', $('#classroomIncomeStartDate').attr("value")); var validator = new Validator({ element: '#operation-form'}); validator.addItem({ element: '[name=startTime]', required: true, rule:'date_check' }); validator.addItem({ element: '[name=endTime]', required: true, rule:'date_check' }); validator.addItem({ element: '[name=analysisDateType]', required: true }); autoSubmitCondition.autoSubmitCondition(); }; });
18826252059/im
web/bundles/topxiaadmin/js/controller/analysis/classroom-income.js
JavaScript
apache-2.0
1,998
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/license-manager/model/Tag.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace LicenseManager { namespace Model { Tag::Tag() : m_keyHasBeenSet(false), m_valueHasBeenSet(false) { } Tag::Tag(JsonView jsonValue) : m_keyHasBeenSet(false), m_valueHasBeenSet(false) { *this = jsonValue; } Tag& Tag::operator =(JsonView jsonValue) { if(jsonValue.ValueExists("Key")) { m_key = jsonValue.GetString("Key"); m_keyHasBeenSet = true; } if(jsonValue.ValueExists("Value")) { m_value = jsonValue.GetString("Value"); m_valueHasBeenSet = true; } return *this; } JsonValue Tag::Jsonize() const { JsonValue payload; if(m_keyHasBeenSet) { payload.WithString("Key", m_key); } if(m_valueHasBeenSet) { payload.WithString("Value", m_value); } return payload; } } // namespace Model } // namespace LicenseManager } // namespace Aws
aws/aws-sdk-cpp
aws-cpp-sdk-license-manager/source/model/Tag.cpp
C++
apache-2.0
1,136
# # Cookbook Name:: apache2 # Recipe:: mod_auth_cas # # Copyright 2013, Opscode, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # include_recipe 'apache2::default' if node['apache']['mod_auth_cas']['from_source'] package 'httpd-devel' do package_name value_for_platform_family( %w[rhel fedora suse] => 'httpd-devel', 'debian' => 'apache2-dev' ) end git '/tmp/mod_auth_cas' do repository 'git://github.com/Jasig/mod_auth_cas.git' revision node['apache']['mod_auth_cas']['source_revision'] notifies :run, 'execute[compile mod_auth_cas]', :immediately end execute 'compile mod_auth_cas' do command './configure && make && make install' cwd '/tmp/mod_auth_cas' not_if "test -f #{node['apache']['libexecdir']}/mod_auth_cas.so" end template "#{node['apache']['dir']}/mods-available/auth_cas.load" do source 'mods/auth_cas.load.erb' owner 'root' group node['apache']['root_group'] mode '0644' end else case node['platform_family'] when 'debian' package 'libapache2-mod-auth-cas' when 'rhel', 'fedora' yum_package 'mod_auth_cas' do notifies :run, 'execute[generate-module-list]', :immediately end file "#{node['apache']['dir']}/conf-available/auth_cas.conf" do action :delete backup false end end end apache_module 'auth_cas' do conf true end directory "#{node['apache']['cache_dir']}/mod_auth_cas" do owner node['apache']['user'] group node['apache']['group'] mode '0700' end
ceros/apache2-onehealth
recipes/mod_auth_cas.rb
Ruby
apache-2.0
2,026
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @Version("3.0.0") package org.apache.jackrabbit.oak.plugins.tree; import org.osgi.annotation.versioning.Version;
mduerig/jackrabbit-oak
oak-security-spi/src/main/java/org/apache/jackrabbit/oak/plugins/tree/package-info.java
Java
apache-2.0
916
<?php # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/osconfig/v1/vulnerability.proto namespace Google\Cloud\OsConfig\V1\VulnerabilityReport\Vulnerability; use Google\Protobuf\Internal\GPBType; use Google\Protobuf\Internal\RepeatedField; use Google\Protobuf\Internal\GPBUtil; /** * OS inventory item that is affected by a vulnerability or fixed as a * result of a vulnerability. * * Generated from protobuf message <code>google.cloud.osconfig.v1.VulnerabilityReport.Vulnerability.Item</code> */ class Item extends \Google\Protobuf\Internal\Message { /** * Corresponds to the `INSTALLED_PACKAGE` inventory item on the VM. * This field displays the inventory items affected by this vulnerability. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. For some * operating systems, this field might be empty. * * Generated from protobuf field <code>string installed_inventory_item_id = 1;</code> */ private $installed_inventory_item_id = ''; /** * Corresponds to the `AVAILABLE_PACKAGE` inventory item on the VM. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. If there is no * available fix, the field is empty. The `inventory_item` value specifies * the latest `SoftwarePackage` available to the VM that fixes the * vulnerability. * * Generated from protobuf field <code>string available_inventory_item_id = 2;</code> */ private $available_inventory_item_id = ''; /** * The recommended [CPE URI](https://cpe.mitre.org/specification/) update * that contains a fix for this vulnerability. * * Generated from protobuf field <code>string fixed_cpe_uri = 3;</code> */ private $fixed_cpe_uri = ''; /** * The upstream OS patch, packages or KB that fixes the vulnerability. * * Generated from protobuf field <code>string upstream_fix = 4;</code> */ private $upstream_fix = ''; /** * Constructor. * * @param array $data { * Optional. Data for populating the Message object. * * @type string $installed_inventory_item_id * Corresponds to the `INSTALLED_PACKAGE` inventory item on the VM. * This field displays the inventory items affected by this vulnerability. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. For some * operating systems, this field might be empty. * @type string $available_inventory_item_id * Corresponds to the `AVAILABLE_PACKAGE` inventory item on the VM. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. If there is no * available fix, the field is empty. The `inventory_item` value specifies * the latest `SoftwarePackage` available to the VM that fixes the * vulnerability. * @type string $fixed_cpe_uri * The recommended [CPE URI](https://cpe.mitre.org/specification/) update * that contains a fix for this vulnerability. * @type string $upstream_fix * The upstream OS patch, packages or KB that fixes the vulnerability. * } */ public function __construct($data = NULL) { \GPBMetadata\Google\Cloud\Osconfig\V1\Vulnerability::initOnce(); parent::__construct($data); } /** * Corresponds to the `INSTALLED_PACKAGE` inventory item on the VM. * This field displays the inventory items affected by this vulnerability. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. For some * operating systems, this field might be empty. * * Generated from protobuf field <code>string installed_inventory_item_id = 1;</code> * @return string */ public function getInstalledInventoryItemId() { return $this->installed_inventory_item_id; } /** * Corresponds to the `INSTALLED_PACKAGE` inventory item on the VM. * This field displays the inventory items affected by this vulnerability. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. For some * operating systems, this field might be empty. * * Generated from protobuf field <code>string installed_inventory_item_id = 1;</code> * @param string $var * @return $this */ public function setInstalledInventoryItemId($var) { GPBUtil::checkString($var, True); $this->installed_inventory_item_id = $var; return $this; } /** * Corresponds to the `AVAILABLE_PACKAGE` inventory item on the VM. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. If there is no * available fix, the field is empty. The `inventory_item` value specifies * the latest `SoftwarePackage` available to the VM that fixes the * vulnerability. * * Generated from protobuf field <code>string available_inventory_item_id = 2;</code> * @return string */ public function getAvailableInventoryItemId() { return $this->available_inventory_item_id; } /** * Corresponds to the `AVAILABLE_PACKAGE` inventory item on the VM. * If the vulnerability report was not updated after the VM inventory * update, these values might not display in VM inventory. If there is no * available fix, the field is empty. The `inventory_item` value specifies * the latest `SoftwarePackage` available to the VM that fixes the * vulnerability. * * Generated from protobuf field <code>string available_inventory_item_id = 2;</code> * @param string $var * @return $this */ public function setAvailableInventoryItemId($var) { GPBUtil::checkString($var, True); $this->available_inventory_item_id = $var; return $this; } /** * The recommended [CPE URI](https://cpe.mitre.org/specification/) update * that contains a fix for this vulnerability. * * Generated from protobuf field <code>string fixed_cpe_uri = 3;</code> * @return string */ public function getFixedCpeUri() { return $this->fixed_cpe_uri; } /** * The recommended [CPE URI](https://cpe.mitre.org/specification/) update * that contains a fix for this vulnerability. * * Generated from protobuf field <code>string fixed_cpe_uri = 3;</code> * @param string $var * @return $this */ public function setFixedCpeUri($var) { GPBUtil::checkString($var, True); $this->fixed_cpe_uri = $var; return $this; } /** * The upstream OS patch, packages or KB that fixes the vulnerability. * * Generated from protobuf field <code>string upstream_fix = 4;</code> * @return string */ public function getUpstreamFix() { return $this->upstream_fix; } /** * The upstream OS patch, packages or KB that fixes the vulnerability. * * Generated from protobuf field <code>string upstream_fix = 4;</code> * @param string $var * @return $this */ public function setUpstreamFix($var) { GPBUtil::checkString($var, True); $this->upstream_fix = $var; return $this; } }
googleapis/google-cloud-php-osconfig
src/V1/VulnerabilityReport/Vulnerability/Item.php
PHP
apache-2.0
7,814
// Copyright 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "fusion/config/gefConfigUtil.h" #include <cstdlib> #include <algorithm> #include "fusion/autoingest/.idl/Systemrc.h" #include "fusion/autoingest/.idl/VolumeStorage.h" #include "fusion/autoingest/geAssetRoot.h" #include "common/khSpawn.h" #include "common/khException.h" #include "common/geUsers.h" #include "common/khFileUtils.h" #include "common/config/geConfigUtil.h" #include "common/config/geCapabilities.h" namespace { const uint32 kMaxNumJobsDefault = 8; uint32 GetDefaultMaxNumJobs() { char* variable; uint32 max_num_jobs = kMaxNumJobsDefault; if ((variable = getenv("KH_GOOGLE_MAX_NUM_JOBS")) != NULL) { char *endptr = NULL; const uint32 value = static_cast<uint32>( std::strtoul(variable, &endptr, 0)); if (endptr != variable) { max_num_jobs = value; } } return max_num_jobs; } } // namespace // **************************************************************************** // *** ValidateHostReadyForConfig // **************************************************************************** bool IsFusionRunning(void) { return (geCheckPidFile("gesystemmanager") || geCheckPidFile("geresourceprovider")); } namespace { void AssertFusionNotRunning(void) { if (IsFusionRunning()) { throw khException(kh::tr("Please stop fusion before proceeding.\n" "(e.g. /etc/init.d/gefusion stop)")); } } } std::string ValidateHostReadyForConfig(void) { AssertRunningAsRoot(); AssertFusionNotRunning(); return GetAndValidateHostname(); } void LoadSystemrc(Systemrc &systemrc) { static Systemrc cached_systemrc; static bool use_cached = false; if (use_cached) { systemrc = cached_systemrc; return; } if (khExists(Systemrc::Filename())) { use_cached = true; // load into a tmp to avoid a partial load on an earlier file // affecting the defaults for this load Systemrc tmp; if (tmp.Load()) { uint32 max_num_jobs = GetMaxNumJobs(); // If uninitialized or greater than maximum allowable number of // concurrent jobs, the maxjobs defaults to the min of the values: // maximum allowable number of concurrent jobs or limit on max number // of jobs. if (tmp.maxjobs == 0 || tmp.maxjobs > max_num_jobs) { tmp.maxjobs = std::min(max_num_jobs, kMaxNumJobsLimit); } systemrc = cached_systemrc = tmp; } } else { throw khException(kh::tr("'%1' is missing").arg(Systemrc::Filename())); } } std::string CommandlineAssetRootDefault(void) { Systemrc systemrc; LoadSystemrc(systemrc); return systemrc.assetroot; } uint32 CommandlineNumCPUsDefault(void) { Systemrc systemrc; LoadSystemrc(systemrc); return systemrc.maxjobs; } // **************************************************************************** // *** Volume routines // **************************************************************************** void LoadVolumesOrThrow(const std::string &assetroot, VolumeDefList &volumes) { std::string volumefname = geAssetRoot::Filename(assetroot, geAssetRoot::VolumeFile); if (!khExists(volumefname) || !volumes.Load(volumefname)) { throw khException(kh::tr("Unable to load volumes for %1") .arg(assetroot)); } } void SaveVolumesOrThrow(const std::string &assetroot, const VolumeDefList &volumes) { std::string volumefname = geAssetRoot::Filename(assetroot, geAssetRoot::VolumeFile); if (!volumes.Save(volumefname)) { throw khException(kh::tr("Unable to save volumes for %1") .arg(assetroot)); } (void)khChmod(volumefname, geAssetRoot::FilePerms(geAssetRoot::VolumeFile)); } void SwitchToUser(const std::string username, const std::string group_name) { geUserId ge_user(username, group_name); ge_user.SwitchEffectiveToThis(); } uint32 GetMaxNumJobs() { uint32 max_num_jobs = 0; // Get maximum allowable number of concurrent jobs. // Note: KH_MAX_NUM_JOBS_COEFF can be used to build GEE Fusion licensing // KH_MAX_NUM_JOBS_COEFF*kMaxNumJobsDefault (8/16/24..) concurrent jobs. #ifdef KH_MAX_NUM_JOBS_COEFF max_num_jobs = kMaxNumJobsDefault * static_cast<uint32>(KH_MAX_NUM_JOBS_COEFF); #endif // Note: Apply an internal multiplier in case of GEE Fusion is built // with maximum number of concurrent jobs equals 0 (internal usage). if (max_num_jobs == 0) { max_num_jobs = GetDefaultMaxNumJobs(); } // Set the max_num_jobs to the min of the values: number of CPUs or max // allowable number of jobs. max_num_jobs = std::min(max_num_jobs, GetNumCPUs()); return max_num_jobs; }
tst-ahernandez/earthenterprise
earth_enterprise/src/fusion/config/gefConfigUtil.cpp
C++
apache-2.0
5,241
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.psi.search; import com.intellij.openapi.fileTypes.PlainTextLanguage; import com.intellij.openapi.roots.ModuleRootModificationUtil; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.PsiFile; import com.intellij.psi.PsiFileFactory; import com.intellij.testFramework.PlatformTestCase; import com.intellij.testFramework.PsiTestUtil; import org.jetbrains.annotations.NotNull; import org.junit.Assert; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; public class GlobalSearchScopeTest extends PlatformTestCase { public void testUniteDirectorySearchScopeDoesNotSOE() throws Exception { VirtualFile genRoot = getVirtualFile(createTempDir("genSrcRoot")); VirtualFile srcRoot = getVirtualFile(createTempDir("srcRoot")); VirtualFile child = createChildDirectory(srcRoot, "child"); GlobalSearchScope childScope = GlobalSearchScopesCore.directoryScope(getProject(), child, true); GlobalSearchScope directoryScope = GlobalSearchScopesCore.directoryScope(getProject(), srcRoot, true); GlobalSearchScope scope = GlobalSearchScope.EMPTY_SCOPE.uniteWith(directoryScope); assertSame(scope, directoryScope); scope = scope.uniteWith(directoryScope); assertSame(scope, directoryScope); scope = scope.uniteWith(childScope); assertSame(scope, directoryScope); GlobalSearchScope s = childScope; int N = 1000; VirtualFile[] d = new VirtualFile[N]; for (int i=0; i< N;i++) { d[i] = createChildDirectory(srcRoot, "d"+i); GlobalSearchScope united = s.uniteWith(GlobalSearchScopesCore.directoryScope(getProject(), d[i], true)); assertNotSame(s, united); s = united; assertTrue(s instanceof GlobalSearchScopesCore.DirectoriesScope); } for (VirtualFile file : d) { VirtualFile f = createChildData(file, "f"); assertTrue(s.contains(f)); } assertFalse(s.contains(genRoot)); assertSame(s.uniteWith(childScope), s); assertSame(s.uniteWith(s), s); } public void testNotScope() { VirtualFile moduleRoot = getTempDir().createTempVDir(); ModuleRootModificationUtil.addContentRoot(getModule(), moduleRoot.getPath()); GlobalSearchScope projectScope = GlobalSearchScope.projectScope(getProject()); assertFalse(projectScope.isSearchInLibraries()); assertTrue(projectScope.isSearchInModuleContent(getModule())); assertTrue(projectScope.contains(moduleRoot)); GlobalSearchScope notProjectScope = GlobalSearchScope.notScope(projectScope); assertTrue(notProjectScope.isSearchInLibraries()); assertFalse(notProjectScope.contains(moduleRoot)); GlobalSearchScope allScope = GlobalSearchScope.allScope(getProject()); assertTrue(allScope.isSearchInLibraries()); assertTrue(allScope.contains(moduleRoot)); GlobalSearchScope notAllScope = GlobalSearchScope.notScope(allScope); assertFalse(notAllScope.contains(moduleRoot)); } public void testIntersectionPreservesOrderInCaseClientsWantToPutCheaperChecksFirst() throws IOException { AtomicInteger targetCalled = new AtomicInteger(); GlobalSearchScope alwaysTrue = new DelegatingGlobalSearchScope(new EverythingGlobalScope()) { @Override public boolean contains(@NotNull VirtualFile file) { return true; } }; GlobalSearchScope target = new DelegatingGlobalSearchScope(new EverythingGlobalScope()) { @Override public boolean contains(@NotNull VirtualFile file) { targetCalled.incrementAndGet(); return true; } }; GlobalSearchScope trueIntersection = target.intersectWith(alwaysTrue); VirtualFile file1 = getVirtualFile(createTempFile("file1", "")); VirtualFile file2 = getVirtualFile(createTempFile("file2", "")); assertTrue(trueIntersection.contains(file2)); assertEquals(1, targetCalled.get()); assertFalse(GlobalSearchScope.fileScope(myProject, file1).intersectWith(trueIntersection).contains(file2)); assertEquals(1, targetCalled.get()); } public void testDirScopeSearchInLibraries() throws IOException { VirtualFile libRoot = getVirtualFile(createTempDir("libRoot")); VirtualFile contentRoot = getVirtualFile(createTempDir("contentRoot")); PsiTestUtil.removeAllRoots(getModule(), null); PsiTestUtil.addContentRoot(getModule(), contentRoot); PsiTestUtil.addLibrary(getModule(), libRoot.getPath()); assertTrue(GlobalSearchScopesCore.directoryScope(myProject, libRoot, true).isSearchInLibraries()); assertTrue(GlobalSearchScopesCore.directoriesScope(myProject, true, libRoot, contentRoot).isSearchInLibraries()); } public void testUnionWithEmptyScopeMustNotAffectCompare() { VirtualFile moduleRoot = getTempDir().createTempVDir(); assertNotNull(moduleRoot); PsiTestUtil.addSourceRoot(getModule(), moduleRoot); VirtualFile moduleRoot2 = getTempDir().createTempVDir(); assertNotNull(moduleRoot2); PsiTestUtil.addSourceRoot(getModule(), moduleRoot2); GlobalSearchScope modScope = getModule().getModuleContentScope(); int compare = modScope.compare(moduleRoot, moduleRoot2); assertTrue(compare != 0); GlobalSearchScope union = modScope.uniteWith(GlobalSearchScope.EMPTY_SCOPE); int compare2 = union.compare(moduleRoot, moduleRoot2); assertEquals(compare, compare2); assertEquals(modScope.compare(moduleRoot2, moduleRoot), union.compare(moduleRoot2, moduleRoot)); } public void testIsInScopeDoesNotAcceptRandomNonPhysicalFilesByDefault() { PsiFile file = PsiFileFactory.getInstance(myProject).createFileFromText(PlainTextLanguage.INSTANCE, ""); VirtualFile vFile = file.getViewProvider().getVirtualFile(); assertFalse(GlobalSearchScope.allScope(myProject).contains(vFile)); assertFalse(PsiSearchScopeUtil.isInScope(GlobalSearchScope.allScope(myProject), file)); assertTrue(file.getResolveScope().contains(vFile)); assertTrue(PsiSearchScopeUtil.isInScope(file.getResolveScope(), file)); } public void testUnionWithEmptyAndUnion() { GlobalSearchScope scope = GlobalSearchScope.EMPTY_SCOPE.uniteWith(GlobalSearchScope.EMPTY_SCOPE); assertEquals(GlobalSearchScope.EMPTY_SCOPE, scope); GlobalSearchScope scope2 = GlobalSearchScope.union(new GlobalSearchScope[]{GlobalSearchScope.EMPTY_SCOPE, GlobalSearchScope.EMPTY_SCOPE}); assertEquals(GlobalSearchScope.EMPTY_SCOPE, scope2); GlobalSearchScope p = GlobalSearchScope.projectScope(getProject()); GlobalSearchScope scope3 = GlobalSearchScope.union(new GlobalSearchScope[]{GlobalSearchScope.EMPTY_SCOPE, p, GlobalSearchScope.EMPTY_SCOPE}); assertEquals(p, scope3); GlobalSearchScope m = GlobalSearchScope.moduleScope(getModule()); GlobalSearchScope pm = m.uniteWith(p); Assert.assertNotEquals(m, pm); GlobalSearchScope scope4 = GlobalSearchScope.union(new GlobalSearchScope[]{GlobalSearchScope.EMPTY_SCOPE, p, GlobalSearchScope.EMPTY_SCOPE, pm, m}); assertEquals(pm, scope4); } }
mdanielwork/intellij-community
platform/platform-tests/testSrc/com/intellij/psi/search/GlobalSearchScopeTest.java
Java
apache-2.0
7,120
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.ilm.IndexLifecycleFeatureSetUsage.PolicyStats; import java.io.IOException; import java.util.ArrayList; import java.util.List; public class IndexLifecycleFeatureSetUsageTests extends AbstractWireSerializingTestCase<IndexLifecycleFeatureSetUsage> { @Override protected IndexLifecycleFeatureSetUsage createTestInstance() { boolean enabled = randomBoolean(); boolean available = randomBoolean(); List<PolicyStats> policyStats = null; if (enabled) { int size = randomIntBetween(0, 10); policyStats = new ArrayList<>(size); for (int i = 0; i < size; i++) { policyStats.add(PolicyStatsTests.createRandomInstance()); } } return new IndexLifecycleFeatureSetUsage(available, policyStats); } @Override protected IndexLifecycleFeatureSetUsage mutateInstance(IndexLifecycleFeatureSetUsage instance) throws IOException { boolean available = instance.available(); List<PolicyStats> policyStats = instance.getPolicyStats(); switch (between(0, 1)) { case 0: available = available == false; break; case 1: if (policyStats == null) { policyStats = new ArrayList<>(); policyStats.add(PolicyStatsTests.createRandomInstance()); } else if (randomBoolean()) { policyStats = null; } else { policyStats = new ArrayList<>(policyStats); policyStats.add(PolicyStatsTests.createRandomInstance()); } break; default: throw new AssertionError("Illegal randomisation branch"); } return new IndexLifecycleFeatureSetUsage(available, policyStats); } @Override protected Reader<IndexLifecycleFeatureSetUsage> instanceReader() { return IndexLifecycleFeatureSetUsage::new; } }
gingerwizard/elasticsearch
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsageTests.java
Java
apache-2.0
2,384
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.internal.processors.cache.persistence.pagemem; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.LockSupport; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.CheckpointWriteProgressSupplier; import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.typedef.internal.U; /** * Throttles threads that generate dirty pages during ongoing checkpoint. * Designed to avoid zero dropdowns that can happen if checkpoint buffer is overflowed. * Uses average checkpoint write speed and moment speed of marking pages as dirty. */ public class PagesWriteSpeedBasedThrottle implements PagesWriteThrottlePolicy { /** Maximum dirty pages in region. */ private static final double MAX_DIRTY_PAGES = 0.75; /** Page memory. */ private final PageMemoryImpl pageMemory; /** Database manager. */ private final CheckpointWriteProgressSupplier cpProgress; /** Starting throttle time. Limits write speed to 1000 MB/s. */ private static final long STARTING_THROTTLE_NANOS = 4000; /** Backoff ratio. Each next park will be this times longer. */ private static final double BACKOFF_RATIO = 1.05; /** Percent of dirty pages which will not cause throttling. */ private static final double MIN_RATIO_NO_THROTTLE = 0.03; /** Exponential backoff counter. */ private final AtomicInteger exponentialBackoffCntr = new AtomicInteger(0); /** Counter of written pages from checkpoint. Value is saved here for detecting checkpoint start. */ private final AtomicInteger lastObservedWritten = new AtomicInteger(0); /** * Dirty pages ratio was observed at checkpoint start (here start is moment when first page was actually saved to * store). This ratio is excluded from throttling. */ private volatile double initDirtyRatioAtCpBegin = MIN_RATIO_NO_THROTTLE; /** * Target (maximum) dirty pages ratio, after which throttling will start using * {@link #getParkTime(double, long, int, int, long, long)}. */ private volatile double targetDirtyRatio; /** * Current dirty pages ratio (percent of dirty pages in most used segment), negative value means no cp is running. */ private volatile double currDirtyRatio; /** Speed average checkpoint write speed. Current and 3 past checkpoints used. Pages/second. */ private final IntervalBasedMeasurement speedCpWrite = new IntervalBasedMeasurement(); /** Last estimated speed for marking all clear pages as dirty till the end of checkpoint. */ private volatile long speedForMarkAll; /** Threads set. Contains identifiers of all threads which were marking pages for current checkpoint. */ private final GridConcurrentHashSet<Long> threadIds = new GridConcurrentHashSet<>(); /** * Used for calculating speed of marking pages dirty. * Value from past 750-1000 millis only. * {@link IntervalBasedMeasurement#getSpeedOpsPerSec(long)} returns pages marked/second. * {@link IntervalBasedMeasurement#getAverage()} returns average throttle time. * */ private final IntervalBasedMeasurement speedMarkAndAvgParkTime = new IntervalBasedMeasurement(250, 3); /** Total pages which is possible to store in page memory. */ private long totalPages; /** Checkpoint lock state provider. */ private CheckpointLockStateChecker cpLockStateChecker; /** Logger. */ private IgniteLogger log; /** Previous warning time, nanos. */ private AtomicLong prevWarnTime = new AtomicLong(); /** Warning min delay nanoseconds. */ private static final long WARN_MIN_DELAY_NS = TimeUnit.SECONDS.toNanos(10); /** Warning threshold: minimal level of pressure that causes warning messages to log. */ static final double WARN_THRESHOLD = 0.2; /** * @param pageMemory Page memory. * @param cpProgress Database manager. * @param stateChecker Checkpoint lock state provider. * @param log Logger. */ public PagesWriteSpeedBasedThrottle( PageMemoryImpl pageMemory, CheckpointWriteProgressSupplier cpProgress, CheckpointLockStateChecker stateChecker, IgniteLogger log ) { this.pageMemory = pageMemory; this.cpProgress = cpProgress; totalPages = pageMemory.totalPages(); this.cpLockStateChecker = stateChecker; this.log = log; } /** {@inheritDoc} */ @Override public void onMarkDirty(boolean isPageInCheckpoint) { assert cpLockStateChecker.checkpointLockIsHeldByThread(); AtomicInteger writtenPagesCntr = cpProgress.writtenPagesCounter(); if (writtenPagesCntr == null) { speedForMarkAll = 0; targetDirtyRatio = -1; currDirtyRatio = -1; return; // Don't throttle if checkpoint is not running. } int cpWrittenPages = writtenPagesCntr.get(); long fullyCompletedPages = (cpWrittenPages + cpSyncedPages()) / 2; // written & sync'ed long curNanoTime = System.nanoTime(); speedCpWrite.setCounter(fullyCompletedPages, curNanoTime); long markDirtySpeed = speedMarkAndAvgParkTime.getSpeedOpsPerSec(curNanoTime); long curCpWriteSpeed = speedCpWrite.getSpeedOpsPerSec(curNanoTime); threadIds.add(Thread.currentThread().getId()); ThrottleMode level = ThrottleMode.NO; //should apply delay (throttling) for current page modification if (isPageInCheckpoint) { int checkpointBufLimit = pageMemory.checkpointBufferPagesSize() * 2 / 3; if (pageMemory.checkpointBufferPagesCount() > checkpointBufLimit) level = ThrottleMode.EXPONENTIAL; } long throttleParkTimeNs = 0; if (level == ThrottleMode.NO) { int nThreads = threadIds.size(); int cpTotalPages = cpTotalPages(); if (cpTotalPages == 0) { boolean throttleByCpSpeed = curCpWriteSpeed > 0 && markDirtySpeed > curCpWriteSpeed; if (throttleByCpSpeed) { throttleParkTimeNs = calcDelayTime(curCpWriteSpeed, nThreads, 1); level = ThrottleMode.LIMITED; } } else { double dirtyPagesRatio = pageMemory.getDirtyPagesRatio(); currDirtyRatio = dirtyPagesRatio; detectCpPagesWriteStart(cpWrittenPages, dirtyPagesRatio); if (dirtyPagesRatio >= MAX_DIRTY_PAGES) level = ThrottleMode.NO; // too late to throttle, will wait on safe to update instead. else { int notEvictedPagesTotal = cpTotalPages - cpEvictedPages(); throttleParkTimeNs = getParkTime(dirtyPagesRatio, fullyCompletedPages, notEvictedPagesTotal < 0 ? 0 : notEvictedPagesTotal, nThreads, markDirtySpeed, curCpWriteSpeed); level = throttleParkTimeNs == 0 ? ThrottleMode.NO : ThrottleMode.LIMITED; } } } if (level == ThrottleMode.EXPONENTIAL) { int exponent = exponentialBackoffCntr.getAndIncrement(); throttleParkTimeNs = (long)(STARTING_THROTTLE_NANOS * Math.pow(BACKOFF_RATIO, exponent)); } else { if (isPageInCheckpoint) exponentialBackoffCntr.set(0); if (level == ThrottleMode.NO) throttleParkTimeNs = 0; } if (throttleParkTimeNs > 0) { recurrentLogIfNeed(); doPark(throttleParkTimeNs); } speedMarkAndAvgParkTime.addMeasurementForAverageCalculation(throttleParkTimeNs); } /** * Disables the current thread for thread scheduling purposes. May be overriden by subclasses for tests * * @param throttleParkTimeNs the maximum number of nanoseconds to wait */ protected void doPark(long throttleParkTimeNs) { if (throttleParkTimeNs > LOGGING_THRESHOLD) { U.warn(log, "Parking thread=" + Thread.currentThread().getName() + " for timeout(ms)=" + (throttleParkTimeNs / 1_000_000)); } LockSupport.parkNanos(throttleParkTimeNs); } /** * @return number of written pages. */ private int cpWrittenPages() { AtomicInteger writtenPagesCntr = cpProgress.writtenPagesCounter(); return writtenPagesCntr == null ? 0 : writtenPagesCntr.get(); } /** * @return Number of pages in current checkpoint. */ private int cpTotalPages() { return cpProgress.currentCheckpointPagesCount(); } /** * @return Counter for fsynced checkpoint pages. */ private int cpSyncedPages() { AtomicInteger syncedPagesCntr = cpProgress.syncedPagesCounter(); return syncedPagesCntr == null ? 0 : syncedPagesCntr.get(); } /** * @return number of evicted pages. */ private int cpEvictedPages() { AtomicInteger evictedPagesCntr = cpProgress.evictedPagesCntr(); return evictedPagesCntr == null ? 0 : evictedPagesCntr.get(); } /** * Prints warning to log if throttling is occurred and requires markable amount of time. */ private void recurrentLogIfNeed() { long prevWarningNs = prevWarnTime.get(); long curNs = System.nanoTime(); if (prevWarningNs != 0 && (curNs - prevWarningNs) <= WARN_MIN_DELAY_NS) return; double weight = throttleWeight(); if (weight <= WARN_THRESHOLD) return; if (prevWarnTime.compareAndSet(prevWarningNs, curNs)) { String msg = String.format("Throttling is applied to page modifications " + "[percentOfPartTime=%.2f, markDirty=%d pages/sec, checkpointWrite=%d pages/sec, " + "estIdealMarkDirty=%d pages/sec, curDirty=%.2f, maxDirty=%.2f, avgParkTime=%d ns, " + "pages: (total=%d, evicted=%d, written=%d, synced=%d, cpBufUsed=%d, cpBufTotal=%d)]", weight, getMarkDirtySpeed(), getCpWriteSpeed(), getLastEstimatedSpeedForMarkAll(), getCurrDirtyRatio(), getTargetDirtyRatio(), throttleParkTime(), cpTotalPages(), cpEvictedPages(), cpWrittenPages(), cpSyncedPages(), pageMemory.checkpointBufferPagesCount(), pageMemory.checkpointBufferPagesSize()); log.info(msg); } } /** * @param dirtyPagesRatio actual percent of dirty pages. * @param fullyCompletedPages written & fsynced pages count. * @param cpTotalPages total checkpoint scope. * @param nThreads number of threads providing data during current checkpoint. * @param markDirtySpeed registered mark dirty speed, pages/sec. * @param curCpWriteSpeed average checkpoint write speed, pages/sec. * @return time in nanoseconds to part or 0 if throttling is not required. */ long getParkTime( double dirtyPagesRatio, long fullyCompletedPages, int cpTotalPages, int nThreads, long markDirtySpeed, long curCpWriteSpeed) { long speedForMarkAll = calcSpeedToMarkAllSpaceTillEndOfCp(dirtyPagesRatio, fullyCompletedPages, curCpWriteSpeed, cpTotalPages); double targetDirtyRatio = calcTargetDirtyRatio(fullyCompletedPages, cpTotalPages); this.speedForMarkAll = speedForMarkAll; //publish for metrics this.targetDirtyRatio = targetDirtyRatio; //publish for metrics boolean lowSpaceLeft = dirtyPagesRatio > targetDirtyRatio && (dirtyPagesRatio + 0.05 > MAX_DIRTY_PAGES); int slowdown = lowSpaceLeft ? 3 : 1; double multiplierForSpeedForMarkAll = lowSpaceLeft ? 0.8 : 1.0; boolean markingTooFast = speedForMarkAll > 0 && markDirtySpeed > multiplierForSpeedForMarkAll * speedForMarkAll; boolean throttleBySizeAndMarkSpeed = dirtyPagesRatio > targetDirtyRatio && markingTooFast; //for case of speedForMarkAll >> markDirtySpeed, allow write little bit faster than CP average double allowWriteFasterThanCp = (speedForMarkAll > 0 && markDirtySpeed > 0 && speedForMarkAll > markDirtySpeed) ? (0.1 * speedForMarkAll / markDirtySpeed) : (dirtyPagesRatio > targetDirtyRatio ? 0.0 : 0.1); double fasterThanCpWriteSpeed = lowSpaceLeft ? 1.0 : 1.0 + allowWriteFasterThanCp; boolean throttleByCpSpeed = curCpWriteSpeed > 0 && markDirtySpeed > (fasterThanCpWriteSpeed * curCpWriteSpeed); long delayByCpWrite = throttleByCpSpeed ? calcDelayTime(curCpWriteSpeed, nThreads, slowdown) : 0; long delayByMarkAllWrite = throttleBySizeAndMarkSpeed ? calcDelayTime(speedForMarkAll, nThreads, slowdown) : 0; return Math.max(delayByCpWrite, delayByMarkAllWrite); } /** * @param dirtyPagesRatio current percent of dirty pages. * @param fullyCompletedPages count of written and sync'ed pages * @param curCpWriteSpeed pages/second checkpoint write speed. 0 speed means 'no data'. * @param cpTotalPages total pages in checkpoint. * @return pages/second to mark to mark all clean pages as dirty till the end of checkpoint. 0 speed means 'no * data'. */ private long calcSpeedToMarkAllSpaceTillEndOfCp(double dirtyPagesRatio, long fullyCompletedPages, long curCpWriteSpeed, int cpTotalPages) { if (curCpWriteSpeed == 0) return 0; if (cpTotalPages <= 0) return 0; if (dirtyPagesRatio >= MAX_DIRTY_PAGES) return 0; double remainedClear = (MAX_DIRTY_PAGES - dirtyPagesRatio) * totalPages; double timeRemainedSeconds = 1.0 * (cpTotalPages - fullyCompletedPages) / curCpWriteSpeed; return (long)(remainedClear / timeRemainedSeconds); } /** * @param fullyCompletedPages number of completed. * @param cpTotalPages Total amount of pages under checkpoint. * @return size-based calculation of target ratio. */ private double calcTargetDirtyRatio(long fullyCompletedPages, int cpTotalPages) { double cpProgress = ((double)fullyCompletedPages) / cpTotalPages; // Starting with initialDirtyRatioAtCpBegin to avoid throttle right after checkpoint start double constStart = initDirtyRatioAtCpBegin; double throttleTotalWeight = 1.0 - constStart; // .75 is maximum ratio of dirty pages return (cpProgress * throttleTotalWeight + constStart) * MAX_DIRTY_PAGES; } /** * @param baseSpeed speed to slow down. * @param nThreads operating threads. * @param coefficient how much it is needed to slowdown base speed. 1.0 means delay to get exact base speed. * @return sleep time in nanoseconds. */ private long calcDelayTime(long baseSpeed, int nThreads, double coefficient) { if (coefficient <= 0.0) return 0; if (baseSpeed <= 0) return 0; long updTimeNsForOnePage = TimeUnit.SECONDS.toNanos(1) * nThreads / (baseSpeed); return (long)(coefficient * updTimeNsForOnePage); } /** * @param cpWrittenPages current counter of written pages. * @param dirtyPagesRatio current percent of dirty pages. */ private void detectCpPagesWriteStart(int cpWrittenPages, double dirtyPagesRatio) { if (cpWrittenPages > 0 && lastObservedWritten.compareAndSet(0, cpWrittenPages)) { double newMinRatio = dirtyPagesRatio; if (newMinRatio < MIN_RATIO_NO_THROTTLE) newMinRatio = MIN_RATIO_NO_THROTTLE; if (newMinRatio > 1) newMinRatio = 1; //for slow cp is completed now, drop previous dirty page percent initDirtyRatioAtCpBegin = newMinRatio; } } /** {@inheritDoc} */ @Override public void onBeginCheckpoint() { speedCpWrite.setCounter(0L, System.nanoTime()); initDirtyRatioAtCpBegin = MIN_RATIO_NO_THROTTLE; lastObservedWritten.set(0); } /** {@inheritDoc} */ @Override public void onFinishCheckpoint() { exponentialBackoffCntr.set(0); speedCpWrite.finishInterval(); speedMarkAndAvgParkTime.finishInterval(); threadIds.clear(); } /** * @return Exponential backoff counter. */ public long throttleParkTime() { return speedMarkAndAvgParkTime.getAverage(); } /** * @return Target (maximum) dirty pages ratio, after which throttling will start. */ public double getTargetDirtyRatio() { return targetDirtyRatio; } /** * @return Current dirty pages ratio. */ public double getCurrDirtyRatio() { double ratio = currDirtyRatio; if (ratio >= 0) return ratio; return pageMemory.getDirtyPagesRatio(); } /** * @return Speed of marking pages dirty. Value from past 750-1000 millis only. Pages/second. */ public long getMarkDirtySpeed() { return speedMarkAndAvgParkTime.getSpeedOpsPerSec(System.nanoTime()); } /** * @return Speed average checkpoint write speed. Current and 3 past checkpoints used. Pages/second. */ public long getCpWriteSpeed() { return speedCpWrite.getSpeedOpsPerSecReadOnly(); } /** * @return Returns {@link #speedForMarkAll}. */ public long getLastEstimatedSpeedForMarkAll() { return speedForMarkAll; } /** * Measurement shows how much throttling time is involved into average marking time. * @return metric started from 0.0 and showing how much throttling is involved into current marking process. */ public double throttleWeight() { long speed = speedMarkAndAvgParkTime.getSpeedOpsPerSec(System.nanoTime()); if (speed <= 0) return 0; long timeForOnePage = calcDelayTime(speed, threadIds.size(), 1); if (timeForOnePage == 0) return 0; return 1.0 * throttleParkTime() / timeForOnePage; } /** * Throttling mode for page. */ private enum ThrottleMode { /** No delay is applied. */ NO, /** Limited, time is based on target speed. */ LIMITED, /** Exponential. */ EXPONENTIAL } }
alexzaitzev/ignite
modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java
Java
apache-2.0
19,621
package v1 import ( kapi "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" buildapi "github.com/openshift/origin/pkg/build/api/v1" ) // BuildDefaultsConfig controls the default information for Builds type BuildDefaultsConfig struct { unversioned.TypeMeta `json:",inline"` // gitHTTPProxy is the location of the HTTPProxy for Git source GitHTTPProxy string `json:"gitHTTPProxy,omitempty"` // gitHTTPSProxy is the location of the HTTPSProxy for Git source GitHTTPSProxy string `json:"gitHTTPSProxy,omitempty"` // gitNoProxy is the list of domains for which the proxy should not be used GitNoProxy string `json:"gitNoProxy,omitempty"` // env is a set of default environment variables that will be applied to the // build if the specified variables do not exist on the build Env []kapi.EnvVar `json:"env,omitempty"` // sourceStrategyDefaults are default values that apply to builds using the // source strategy. SourceStrategyDefaults *SourceStrategyDefaultsConfig `json:"sourceStrategyDefaults,omitempty"` // imageLabels is a list of docker labels that are applied to the resulting image. // User can override a default label by providing a label with the same name in their // Build/BuildConfig. ImageLabels []buildapi.ImageLabel `json:"imageLabels,omitempty"` // nodeSelector is a selector which must be true for the build pod to fit on a node NodeSelector map[string]string `json:"nodeSelector,omitempty"` // annotations are annotations that will be added to the build pod Annotations map[string]string `json:"annotations,omitempty"` } // SourceStrategyDefaultsConfig contains values that apply to builds using the // source strategy. type SourceStrategyDefaultsConfig struct { // incremental indicates if s2i build strategies should perform an incremental // build or not Incremental *bool `json:"incremental,omitempty"` }
jeffvance/origin
pkg/build/admission/defaults/api/v1/types.go
GO
apache-2.0
1,882
/* * Copyright 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.room.integration.testapp.test; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.MatcherAssert.assertThat; import static java.util.Collections.emptyList; import androidx.arch.core.executor.testing.CountingTaskExecutorRule; import androidx.lifecycle.LiveData; import androidx.room.integration.testapp.dao.RawDao; import androidx.room.integration.testapp.vo.NameAndLastName; import androidx.room.integration.testapp.vo.Pet; import androidx.room.integration.testapp.vo.User; import androidx.room.integration.testapp.vo.UserAndAllPets; import androidx.room.integration.testapp.vo.UserAndPet; import androidx.sqlite.db.SimpleSQLiteQuery; import androidx.sqlite.db.SupportSQLiteQuery; import androidx.test.ext.junit.runners.AndroidJUnit4; import androidx.test.filters.SmallTest; import androidx.test.platform.app.InstrumentationRegistry; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @SmallTest @RunWith(AndroidJUnit4.class) public class RawQueryTest extends TestDatabaseTest { @Rule public CountingTaskExecutorRule mExecutorRule = new CountingTaskExecutorRule(); @Test public void entity_null() { User user = mRawDao.getUser(new SimpleSQLiteQuery("SELECT * FROM User WHERE mId = 0")); assertThat(user, is(nullValue())); } @Test public void entity_one() { User expected = TestUtil.createUser(3); mUserDao.insert(expected); User received = mRawDao.getUser(new SimpleSQLiteQuery("SELECT * FROM User WHERE mId = ?", new Object[]{3})); assertThat(received, is(expected)); } @Test public void entity_list() { List<User> expected = TestUtil.createUsersList(1, 2, 3, 4); mUserDao.insertAll(expected.toArray(new User[4])); List<User> received = mRawDao.getUserList( new SimpleSQLiteQuery("SELECT * FROM User ORDER BY mId ASC")); assertThat(received, is(expected)); } @Test public void entity_liveData_string() throws TimeoutException, InterruptedException { SupportSQLiteQuery query = new SimpleSQLiteQuery( "SELECT * FROM User WHERE mId = ?", new Object[]{3} ); liveDataTest(mRawDao.getUserLiveData(query)); } @Test public void entity_liveData_supportQuery() throws TimeoutException, InterruptedException { liveDataTest(mRawDao.getUserLiveData( new SimpleSQLiteQuery("SELECT * FROM User WHERE mId = ?", new Object[]{3}))); } private void liveDataTest( final LiveData<User> liveData) throws TimeoutException, InterruptedException { InstrumentationRegistry.getInstrumentation().runOnMainSync( () -> liveData.observeForever(user -> { })); drain(); assertThat(liveData.getValue(), is(nullValue())); User user = TestUtil.createUser(3); mUserDao.insert(user); drain(); assertThat(liveData.getValue(), is(user)); user.setLastName("cxZ"); mUserDao.insertOrReplace(user); drain(); assertThat(liveData.getValue(), is(user)); } @Test public void entity_supportSql() { User user = TestUtil.createUser(3); mUserDao.insert(user); SimpleSQLiteQuery query = new SimpleSQLiteQuery("SELECT * FROM User WHERE mId = ?", new Object[]{3}); User received = mRawDao.getUser(query); assertThat(received, is(user)); } @Test public void embedded() { User user = TestUtil.createUser(3); Pet[] pets = TestUtil.createPetsForUser(3, 1, 1); mUserDao.insert(user); mPetDao.insertAll(pets); UserAndPet received = mRawDao.getUserAndPet(new SimpleSQLiteQuery( "SELECT * FROM User, Pet WHERE User.mId = Pet.mUserId LIMIT 1")); assertThat(received.getUser(), is(user)); assertThat(received.getPet(), is(pets[0])); } @Test public void relation() { User user = TestUtil.createUser(3); mUserDao.insert(user); Pet[] pets = TestUtil.createPetsForUser(3, 1, 10); mPetDao.insertAll(pets); UserAndAllPets result = mRawDao .getUserAndAllPets(new SimpleSQLiteQuery("SELECT * FROM User WHERE mId = ?", new Object[]{3})); assertThat(result.user, is(user)); assertThat(result.pets, is(Arrays.asList(pets))); } @Test public void pojo() { User user = TestUtil.createUser(3); mUserDao.insert(user); NameAndLastName result = mRawDao.getUserNameAndLastName(new SimpleSQLiteQuery("SELECT * FROM User")); assertThat(result, is(new NameAndLastName(user.getName(), user.getLastName()))); } @Test public void pojo_supportSql() { User user = TestUtil.createUser(3); mUserDao.insert(user); NameAndLastName result = mRawDao.getUserNameAndLastNameWithObserved(new SimpleSQLiteQuery( "SELECT * FROM User WHERE mId = ?", new Object[]{3} )); assertThat(result, is(new NameAndLastName(user.getName(), user.getLastName()))); } @Test public void pojo_typeConverter() { User user = TestUtil.createUser(3); mUserDao.insert(user); RawDao.UserNameAndBirthday result = mRawDao.getUserAndBirthday( new SimpleSQLiteQuery("SELECT mName, mBirthday FROM user LIMIT 1")); assertThat(result.name, is(user.getName())); assertThat(result.birthday, is(user.getBirthday())); } @Test public void embedded_nullField() { User user = TestUtil.createUser(3); Pet[] pets = TestUtil.createPetsForUser(3, 1, 1); mUserDao.insert(user); mPetDao.insertAll(pets); UserAndPet received = mRawDao.getUserAndPet( new SimpleSQLiteQuery("SELECT * FROM User LIMIT 1")); assertThat(received.getUser(), is(user)); assertThat(received.getPet(), is(nullValue())); } @Test public void embedded_list() { User[] users = TestUtil.createUsersArray(3, 5); Pet[] pets = TestUtil.createPetsForUser(3, 1, 2); mUserDao.insertAll(users); mPetDao.insertAll(pets); List<UserAndPet> received = mRawDao.getUserAndPetList( new SimpleSQLiteQuery( "SELECT * FROM User LEFT JOIN Pet ON (User.mId = Pet.mUserId)" + " ORDER BY mId ASC, mPetId ASC")); assertThat(received.size(), is(3)); // row 0 assertThat(received.get(0).getUser(), is(users[0])); assertThat(received.get(0).getPet(), is(pets[0])); // row 1 assertThat(received.get(1).getUser(), is(users[0])); assertThat(received.get(1).getPet(), is(pets[1])); // row 2 assertThat(received.get(2).getUser(), is(users[1])); assertThat(received.get(2).getPet(), is(nullValue())); } @Test public void count() { mUserDao.insertAll(TestUtil.createUsersArray(3, 5, 7, 10)); int count = mRawDao.count(new SimpleSQLiteQuery("SELECT COUNT(*) FROM User")); assertThat(count, is(4)); } @Test public void embedded_liveData() throws TimeoutException, InterruptedException { LiveData<List<UserAndPet>> liveData = mRawDao.getUserAndPetListObservable( new SimpleSQLiteQuery("SELECT * FROM User LEFT JOIN Pet ON (User.mId = Pet.mUserId)" + " ORDER BY mId ASC, mPetId ASC")); InstrumentationRegistry.getInstrumentation().runOnMainSync( () -> liveData.observeForever(user -> { }) ); drain(); assertThat(liveData.getValue(), is(emptyList())); User[] users = TestUtil.createUsersArray(3, 5); Pet[] pets = TestUtil.createPetsForUser(3, 1, 2); mUserDao.insertAll(users); drain(); List<UserAndPet> justUsers = liveData.getValue(); //noinspection ConstantConditions assertThat(justUsers.size(), is(2)); assertThat(justUsers.get(0).getUser(), is(users[0])); assertThat(justUsers.get(1).getUser(), is(users[1])); assertThat(justUsers.get(0).getPet(), is(nullValue())); assertThat(justUsers.get(1).getPet(), is(nullValue())); mPetDao.insertAll(pets); drain(); List<UserAndPet> allItems = liveData.getValue(); //noinspection ConstantConditions assertThat(allItems.size(), is(3)); // row 0 assertThat(allItems.get(0).getUser(), is(users[0])); assertThat(allItems.get(0).getPet(), is(pets[0])); // row 1 assertThat(allItems.get(1).getUser(), is(users[0])); assertThat(allItems.get(1).getPet(), is(pets[1])); // row 2 assertThat(allItems.get(2).getUser(), is(users[1])); assertThat(allItems.get(2).getPet(), is(nullValue())); mDatabase.clearAllTables(); drain(); assertThat(liveData.getValue(), is(emptyList())); } @SuppressWarnings("ConstantConditions") @Test public void relation_liveData() throws TimeoutException, InterruptedException { LiveData<UserAndAllPets> liveData = mRawDao .getUserAndAllPetsObservable( new SimpleSQLiteQuery("SELECT * FROM User WHERE mId = ?", new Object[]{3})); InstrumentationRegistry.getInstrumentation().runOnMainSync( () -> liveData.observeForever(user -> { }) ); drain(); User user = TestUtil.createUser(3); mUserDao.insert(user); drain(); assertThat(liveData.getValue().user, is(user)); assertThat(liveData.getValue().pets, is(emptyList())); Pet[] pets = TestUtil.createPetsForUser(3, 1, 5); mPetDao.insertAll(pets); drain(); assertThat(liveData.getValue().user, is(user)); assertThat(liveData.getValue().pets, is(Arrays.asList(pets))); } private void drain() throws TimeoutException, InterruptedException { mExecutorRule.drainTasks(1, TimeUnit.MINUTES); } }
AndroidX/androidx
room/integration-tests/testapp/src/androidTest/java/androidx/room/integration/testapp/test/RawQueryTest.java
Java
apache-2.0
11,077
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.oodt.cas.workflow.engine; //OODT imports import org.apache.oodt.cas.metadata.Metadata; import org.apache.oodt.cas.resource.system.XmlRpcResourceManagerClient; import org.apache.oodt.cas.workflow.structs.Workflow; import org.apache.oodt.cas.workflow.structs.WorkflowInstance; import org.apache.oodt.cas.workflow.structs.WorkflowStatus; import org.apache.oodt.cas.workflow.structs.WorkflowTask; import org.apache.oodt.cas.workflow.structs.exceptions.EngineException; import org.apache.oodt.cas.workflow.structs.exceptions.InstanceRepositoryException; import org.apache.oodt.cas.workflow.engine.IterativeWorkflowProcessorThread; import org.apache.oodt.cas.workflow.instrepo.WorkflowInstanceRepository; import org.apache.oodt.commons.util.DateConvert; //JDK imports import java.net.URL; import java.text.ParseException; import java.util.Date; import java.util.HashMap; import java.util.logging.Level; import java.util.logging.Logger; //java.util.concurrent imports import EDU.oswego.cs.dl.util.concurrent.BoundedBuffer; import EDU.oswego.cs.dl.util.concurrent.Channel; import EDU.oswego.cs.dl.util.concurrent.LinkedQueue; import EDU.oswego.cs.dl.util.concurrent.PooledExecutor; /** * * The ThreadPooling portion of the WorkflowEngine. This class is meant to be an * extension point for WorkflowEngines that want to implement ThreadPooling. * This WorkflowEngine provides everything needed to manage a ThreadPool using * Doug Lea's wonderful java.util.concurrent package that made it into JDK5. * * @author mattmann * @version $Revsion$ * */ public class ThreadPoolWorkflowEngine implements WorkflowEngine, WorkflowStatus { /* our thread pool */ private PooledExecutor pool = null; /* our worker thread hash mapping worker threads to workflow instance ids */ private HashMap workerMap = null; /* our log stream */ private static final Logger LOG = Logger .getLogger(ThreadPoolWorkflowEngine.class.getName()); /* our instance repository */ private WorkflowInstanceRepository instRep = null; /* our resource manager client */ private XmlRpcResourceManagerClient rClient = null; /* the URL pointer to the parent Workflow Manager */ private URL wmgrUrl = null; /** * Default Constructor. * * @param instRep * The WorkflowInstanceRepository to be used by this engine. * @param queueSize * The size of the queue that the workflow engine should use * (irrelevant if unlimitedQueue is set to true) * @param maxPoolSize * The minimum thread pool size. * @param minPoolSize * The maximum thread pool size. * @param threadKeepAliveTime * The amount of minutes that each thread in the pool should be kept * alive. * @param unlimitedQueue * Whether or not to use a queue whose bounds are dictated by the * physical memory of the underlying hardware. * @param resUrl * A URL pointer to a resource manager. If this is set Tasks will be * wrapped as Resource Manager {@link Job}s and sent through the * Resource Manager. If this parameter is not set, local execution * (the default) will be used */ public ThreadPoolWorkflowEngine(WorkflowInstanceRepository instRep, int queueSize, int maxPoolSize, int minPoolSize, long threadKeepAliveTime, boolean unlimitedQueue, URL resUrl) { this.instRep = instRep; Channel c = null; if (unlimitedQueue) { c = new LinkedQueue(); } else { c = new BoundedBuffer(queueSize); } pool = new PooledExecutor(c, maxPoolSize); pool.setMinimumPoolSize(minPoolSize); pool.setKeepAliveTime(1000 * 60 * threadKeepAliveTime); workerMap = new HashMap(); if (resUrl != null) rClient = new XmlRpcResourceManagerClient(resUrl); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#pauseWorkflowInstance * (java.lang.String) */ public synchronized void pauseWorkflowInstance(String workflowInstId) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to pause workflow instance id: " + workflowInstId + ", however, this engine is not tracking its execution"); return; } // otherwise, all good worker.pause(); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#resumeWorkflowInstance * (java.lang.String) */ public synchronized void resumeWorkflowInstance(String workflowInstId) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to resume workflow instance id: " + workflowInstId + ", however, this engine is " + "not tracking its execution"); return; } // also check to make sure that the worker is currently paused // only can resume WorkflowInstances that are paused, right? if (!worker.isPaused()) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to resume a workflow that " + "isn't paused currently: instance id: " + workflowInstId); return; } // okay, all good worker.resume(); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#startWorkflow(org.apache * .oodt.cas.workflow.structs.Workflow, org.apache.oodt.cas.metadata.Metadata) */ public synchronized WorkflowInstance startWorkflow(Workflow workflow, Metadata metadata) throws EngineException { // to start the workflow, we create a default workflow instance // populate it // persist it // add it to the worker map // start it WorkflowInstance wInst = new WorkflowInstance(); wInst.setWorkflow(workflow); wInst.setCurrentTaskId(((WorkflowTask) workflow.getTasks().get(0)) .getTaskId()); wInst.setSharedContext(metadata); wInst.setStatus(CREATED); persistWorkflowInstance(wInst); IterativeWorkflowProcessorThread worker = new IterativeWorkflowProcessorThread( wInst, instRep, this.wmgrUrl); worker.setRClient(rClient); workerMap.put(wInst.getId(), worker); wInst.setStatus(QUEUED); persistWorkflowInstance(wInst); try { pool.execute(worker); } catch (InterruptedException e) { throw new EngineException(e); } return wInst; } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#getInstanceRepository() */ public WorkflowInstanceRepository getInstanceRepository() { return this.instRep; } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#updateMetadata(java. * lang.String, org.apache.oodt.cas.metadata.Metadata) */ public synchronized boolean updateMetadata(String workflowInstId, Metadata met) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to update metadata context " + "for workflow instance id: " + workflowInstId + ", however, this engine is " + "not tracking its execution"); return false; } worker.getWorkflowInstance().setSharedContext(met); try { persistWorkflowInstance(worker.getWorkflowInstance()); } catch (Exception e) { LOG.log( Level.WARNING, "Exception persisting workflow instance: [" + worker.getWorkflowInstance().getId() + "]: Message: " + e.getMessage()); return false; } return true; } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#setWorkflowManagerUrl * (java.net.URL) */ public void setWorkflowManagerUrl(URL url) { this.wmgrUrl = url; } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#stopWorkflow(java.lang * .String) */ public synchronized void stopWorkflow(String workflowInstId) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { LOG.log(Level.WARNING, "WorkflowEngine: Attempt to stop workflow instance id: " + workflowInstId + ", however, this engine is " + "not tracking its execution"); return; } worker.stop(); } /* * (non-Javadoc) * * @see org.apache.oodt.cas.workflow.engine.WorkflowEngine# * getCurrentTaskWallClockMinutes(java.lang.String) */ public double getCurrentTaskWallClockMinutes(String workflowInstId) { // get the workflow instance that we're talking about WorkflowInstance inst = safeGetWorkflowInstanceById(workflowInstId); return getCurrentTaskWallClockMinutes(inst); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#getWorkflowInstanceMetadata * (java.lang.String) */ public Metadata getWorkflowInstanceMetadata(String workflowInstId) { // okay, try and look up that worker thread in our hash map IterativeWorkflowProcessorThread worker = (IterativeWorkflowProcessorThread) workerMap .get(workflowInstId); if (worker == null) { // try and get the metadata // from the workflow instance repository (as it was persisted) try { WorkflowInstance inst = instRep.getWorkflowInstanceById(workflowInstId); return inst.getSharedContext(); } catch (InstanceRepositoryException e) { LOG.log(Level.FINEST, "WorkflowEngine: Attempt to get metadata " + "for workflow instance id: " + workflowInstId + ", however, this engine is " + "not tracking its execution and the id: [" + workflowInstId + "] " + "was never persisted to " + "the instance repository"); e.printStackTrace(); return new Metadata(); } } return worker.getWorkflowInstance().getSharedContext(); } /* * (non-Javadoc) * * @see * org.apache.oodt.cas.workflow.engine.WorkflowEngine#getWallClockMinutes( * java.lang.String) */ public double getWallClockMinutes(String workflowInstId) { // get the workflow instance that we're talking about WorkflowInstance inst = safeGetWorkflowInstanceById(workflowInstId); return getWallClockMinutes(inst); } protected static double getWallClockMinutes(WorkflowInstance inst) { if (inst == null) { return 0.0; } Date currentDateOrStopTime = (inst.getEndDateTimeIsoStr() != null && !inst.getEndDateTimeIsoStr().equals("") && !inst .getEndDateTimeIsoStr().equals("null")) ? safeDateConvert(inst .getEndDateTimeIsoStr()) : new Date(); Date workflowStartDateTime = null; if (inst.getStartDateTimeIsoStr() == null || (inst.getStartDateTimeIsoStr() != null && (inst .getStartDateTimeIsoStr().equals("") || inst .getStartDateTimeIsoStr().equals("null")))) { return 0.0; } try { workflowStartDateTime = DateConvert.isoParse(inst .getStartDateTimeIsoStr()); } catch (ParseException e) { return 0.0; } long diffMs = currentDateOrStopTime.getTime() - workflowStartDateTime.getTime(); double diffSecs = (diffMs * 1.0 / 1000.0); double diffMins = diffSecs / 60.0; return diffMins; } protected static double getCurrentTaskWallClockMinutes(WorkflowInstance inst) { if (inst == null) { return 0.0; } Date currentDateOrStopTime = (inst.getCurrentTaskEndDateTimeIsoStr() != null && !inst.getCurrentTaskEndDateTimeIsoStr().equals("") && !inst .getCurrentTaskEndDateTimeIsoStr().equals("null")) ? safeDateConvert(inst .getCurrentTaskEndDateTimeIsoStr()) : new Date(); Date workflowTaskStartDateTime = null; if (inst.getCurrentTaskStartDateTimeIsoStr() == null || (inst.getCurrentTaskStartDateTimeIsoStr() != null && (inst .getCurrentTaskStartDateTimeIsoStr().equals("") || inst .getCurrentTaskStartDateTimeIsoStr().equals("null")))) { return 0.0; } try { workflowTaskStartDateTime = DateConvert.isoParse(inst .getCurrentTaskStartDateTimeIsoStr()); } catch (ParseException e) { return 0.0; } // should never be in this state, so return 0 if (workflowTaskStartDateTime.after(currentDateOrStopTime)) { LOG.log( Level.WARNING, "Start date time: [" + DateConvert.isoFormat(workflowTaskStartDateTime) + " of workflow inst [" + inst.getId() + "] is AFTER " + "End date time: [" + DateConvert.isoFormat(currentDateOrStopTime) + "] of workflow inst."); return 0.0; } long diffMs = currentDateOrStopTime.getTime() - workflowTaskStartDateTime.getTime(); double diffSecs = (diffMs * 1.0 / 1000.0); double diffMins = diffSecs / 60.0; return diffMins; } private synchronized void persistWorkflowInstance(WorkflowInstance wInst) throws EngineException { try { if (wInst.getId() == null || (wInst.getId() != null && wInst.getId().equals(""))) { // we have to persist it by adding it // rather than updating it instRep.addWorkflowInstance(wInst); } else { // persist by update instRep.updateWorkflowInstance(wInst); } } catch (InstanceRepositoryException e) { e.printStackTrace(); throw new EngineException(e.getMessage()); } } private WorkflowInstance safeGetWorkflowInstanceById(String workflowInstId) { try { return instRep.getWorkflowInstanceById(workflowInstId); } catch (Exception e) { return null; } } private static Date safeDateConvert(String isoTimeStr) { try { return DateConvert.isoParse(isoTimeStr); } catch (Exception ignore) { ignore.printStackTrace(); return null; } } }
OSBI/oodt
workflow/src/main/java/org/apache/oodt/cas/workflow/engine/ThreadPoolWorkflowEngine.java
Java
apache-2.0
15,482
// Copyright 2015 The Gemmlowp Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "test.h" #include <array> #include <cstdint> #include <cstdlib> #include <ctime> #include <iostream> #include <memory> #include <string> #include <vector> #ifdef __APPLE__ #include <TargetConditionals.h> #endif #include "../eight_bit_int_gemm/eight_bit_int_gemm.h" #include "../internal/kernel_reference.h" #include "test_data.h" namespace gemmlowp { void ReferenceEightBitIntGemm(bool transpose_a, bool transpose_b, bool transpose_c, int m, int n, int k, const std::uint8_t* a, std::int32_t a_offset, int lda, const std::uint8_t* b, std::int32_t b_offset, int ldb, std::uint8_t* c, std::int32_t c_offset, std::int32_t c_mult_int, std::int32_t c_shift, int ldc) { ScopedProfilingLabel("ReferenceEightBitIntGemm"); assert((c_shift >= 0) && (c_shift <= 32)); assert(a != nullptr); assert(b != nullptr); assert(c != nullptr); int a_i_stride; int a_l_stride; if (transpose_a) { a_i_stride = lda; a_l_stride = 1; } else { a_i_stride = 1; a_l_stride = lda; } int b_j_stride; int b_l_stride; if (transpose_b) { b_j_stride = 1; b_l_stride = ldb; } else { b_j_stride = ldb; b_l_stride = 1; } int c_i_stride; int c_j_stride; if (transpose_c) { c_i_stride = ldc; c_j_stride = 1; } else { c_i_stride = 1; c_j_stride = ldc; } int i, j, l; const std::int32_t kRoundingTerm = (c_shift < 1) ? 0 : (1 << (c_shift - 1)); for (j = 0; j < n; j++) { for (i = 0; i < m; i++) { std::int32_t total = 0; for (l = 0; l < k; l++) { const int a_index = i * a_i_stride + l * a_l_stride; const std::uint8_t a_as_byte = a[a_index]; const std::int32_t a_as_int = static_cast<std::int32_t>(a_as_byte) + a_offset; const int b_index = j * b_j_stride + l * b_l_stride; const std::uint8_t b_as_byte = b[b_index]; const std::int32_t b_as_int = static_cast<std::int32_t>(b_as_byte) + b_offset; const std::int32_t mult_as_int = a_as_int * b_as_int; total += mult_as_int; } std::int32_t output = (((total + c_offset) * c_mult_int) + kRoundingTerm) >> c_shift; if (output > 255) { output = 255; } if (output < 0) { output = 0; } const int c_index = i * c_i_stride + j * c_j_stride; c[c_index] = static_cast<std::uint8_t>(output); } } } typedef VectorMap<const std::int32_t, VectorShape::Col> OffsetColMap; typedef VectorMap<const std::int32_t, VectorShape::Row> OffsetRowMap; typedef VectorDup<const std::int32_t, VectorShape::Col> OffsetColDup; typedef VectorDup<const std::int32_t, VectorShape::Row> OffsetRowDup; // *GemmWrapper's allow to wrap various Gemm functions in a uniform // interface, so we can use the same testing code to test all of them template <typename Kernel, typename Scalar, typename tBitDepthParams> struct SingleThreadGemmWrapper { typedef tBitDepthParams BitDepthParams; static const char* Name() { static char buf[256]; snprintf(buf, sizeof(buf), "SingleThreadGemm, Kernel: %s", Kernel().Name()); return buf; } typedef SingleThreadGemmContext Context; template <MapOrder LhsOrder, MapOrder RhsOrder, MapOrder ResultOrder> static bool Gemm(Context* context, const MatrixMap<const Scalar, LhsOrder>& lhs, const MatrixMap<const Scalar, RhsOrder>& rhs, MatrixMap<Scalar, ResultOrder>* result, int lhs_offset, int rhs_offset, int result_offset, int result_mult_int, int result_shift) { ScopedProfilingLabel("SingleThreadGemmWrapper::Gemm"); const int rows = lhs.rows(); const int cols = rhs.cols(); if (rows < cols) { // SingleThreadGemm is never called with rows < cols. // That case is handled earlier. return false; } const OffsetColDup lhs_offset_vector(lhs_offset, rows); const OffsetRowDup rhs_offset_vector(rhs_offset, cols); SingleThreadGemm<typename Kernel::Format, Scalar, Scalar, BitDepthParams, LhsOrder, RhsOrder, ResultOrder, OffsetColDup, OffsetRowDup>( context, Kernel(), lhs, rhs, result, lhs_offset_vector, rhs_offset_vector, MakeStandardOutputPipeline(result_offset, result_mult_int, result_shift)); return true; } }; template <typename Kernel, typename Scalar, typename tBitDepthParams> struct MultiThreadGemmWrapper { typedef tBitDepthParams BitDepthParams; static const char* Name() { static char buf[256]; snprintf(buf, sizeof(buf), "MultiThreadGemm, Kernel: %s", Kernel().Name()); return buf; } typedef MultiThreadGemmContext Context; template <MapOrder LhsOrder, MapOrder RhsOrder, MapOrder ResultOrder> static bool Gemm(Context* context, const MatrixMap<const Scalar, LhsOrder>& lhs, const MatrixMap<const Scalar, RhsOrder>& rhs, MatrixMap<Scalar, ResultOrder>* result, int lhs_offset, int rhs_offset, int result_offset, int result_mult_int, int result_shift) { ScopedProfilingLabel("MultiThreadGemmWrapper::Gemm"); context->set_max_num_threads(0); const int rows = lhs.rows(); const int cols = rhs.cols(); if (rows < cols) { // SingleThreadGemm is never called with rows < cols. // That case is handled earlier. return false; } const OffsetColDup lhs_offset_vector(lhs_offset, rows); const OffsetRowDup rhs_offset_vector(rhs_offset, cols); MultiThreadGemm<typename Kernel::Format, Scalar, Scalar, BitDepthParams, LhsOrder, RhsOrder, ResultOrder, OffsetColDup, OffsetRowDup>( context, Kernel(), lhs, rhs, result, lhs_offset_vector, rhs_offset_vector, MakeStandardOutputPipeline(result_offset, result_mult_int, result_shift)); return true; } }; template <typename Scalar, typename tBitDepthParams> struct PublicGemmWrapper { typedef tBitDepthParams BitDepthParams; static const char* Name() { return "public Gemm"; } typedef GemmContext Context; template <MapOrder LhsOrder, MapOrder RhsOrder, MapOrder ResultOrder> static bool Gemm(Context* context, const MatrixMap<const Scalar, LhsOrder>& lhs, const MatrixMap<const Scalar, RhsOrder>& rhs, MatrixMap<Scalar, ResultOrder>* result, int lhs_offset, int rhs_offset, int result_offset, int result_mult_int, int result_shift) { ScopedProfilingLabel("PublicGemmWrapper::Gemm"); gemmlowp::Gemm<std::uint8_t, BitDepthParams, LhsOrder, RhsOrder, ResultOrder>(context, lhs, rhs, result, lhs_offset, rhs_offset, result_offset, result_mult_int, result_shift); return true; } }; template <eight_bit_int_gemm::BitDepthSetting BitDepth> struct BitDepthParamsForSettings {}; template <> struct BitDepthParamsForSettings<eight_bit_int_gemm::BitDepthSetting::A8B8> : DefaultL8R8BitDepthParams {}; template <> struct BitDepthParamsForSettings<eight_bit_int_gemm::BitDepthSetting::A5B7> : DefaultL7R5BitDepthParams {}; template <typename Scalar, eight_bit_int_gemm::BitDepthSetting BitDepth> struct EightBitIntGemmWrapper { typedef BitDepthParamsForSettings<BitDepth> BitDepthParams; static const char* Name() { return "EightBitIntGemm"; } typedef void Context; template <MapOrder LhsOrder, MapOrder RhsOrder, MapOrder ResultOrder> static bool Gemm(Context*, const MatrixMap<const Scalar, LhsOrder>& lhs, const MatrixMap<const Scalar, RhsOrder>& rhs, MatrixMap<Scalar, ResultOrder>* result, int lhs_offset, int rhs_offset, int result_offset, int result_mult_int, int result_shift) { ScopedProfilingLabel("EightBitIntGemmWrapper::Gemm"); const bool transpose_c = ResultOrder == MapOrder::RowMajor; const bool transpose_a = LhsOrder == MapOrder::RowMajor; const bool transpose_b = RhsOrder == MapOrder::RowMajor; eight_bit_int_gemm::EightBitIntGemm( transpose_a, transpose_b, transpose_c, lhs.rows(), rhs.cols(), lhs.cols(), lhs.data(), lhs_offset, lhs.stride(), rhs.data(), rhs_offset, rhs.stride(), result->data(), result_offset, result_mult_int, result_shift, result->stride(), BitDepth); return true; } }; template <typename Scalar> struct ReferenceEightBitIntGemmWrapper { typedef DefaultL8R8BitDepthParams BitDepthParams; static const char* Name() { return "ReferenceEightBitIntGemm"; } template <MapOrder LhsOrder, MapOrder RhsOrder, MapOrder ResultOrder> static bool Gemm(bool transpose_a, bool transpose_b, bool transpose_c, const MatrixMap<const Scalar, LhsOrder>& lhs, const MatrixMap<const Scalar, RhsOrder>& rhs, MatrixMap<Scalar, ResultOrder>* result, int lhs_offset, int rhs_offset, int result_offset, int result_mult_int, int result_shift) { ScopedProfilingLabel("ReferenceEightBitIntGemmWrapper::Gemm"); ReferenceEightBitIntGemm(transpose_a, transpose_b, transpose_c, lhs.rows(), rhs.cols(), lhs.cols(), lhs.data(), lhs_offset, lhs.stride(), rhs.data(), rhs_offset, rhs.stride(), result->data(), result_offset, result_mult_int, result_shift, result->stride()); return true; } }; const char* OrderName(MapOrder order) { return order == MapOrder::ColMajor ? "ColMajor" : "RowMajor"; } struct ResultStats { ResultStats() : count(0), med_val(0), mean_signed_diff(0), med_signed_diff(0), med_unsigned_diff(0), max_unsigned_diff(0) {} int count; int med_val; float mean_signed_diff; int med_signed_diff; int med_unsigned_diff; int max_unsigned_diff; std::vector<int> count_diff_by_pot_slice; }; void GetResultStats(const std::uint8_t* actual, const std::uint8_t* expected, size_t count, ResultStats* stats) { ScopedProfilingLabel("GetResultStats"); std::vector<std::uint8_t> results; std::vector<std::int16_t> signed_diffs; std::vector<std::uint8_t> unsigned_diffs; std::int64_t signed_diffs_sum = 0; for (size_t i = 0; i < count; i++) { results.push_back(actual[i]); std::int16_t signed_diff = actual[i] - expected[i]; signed_diffs.push_back(signed_diff); unsigned_diffs.push_back(std::abs(signed_diff)); signed_diffs_sum += signed_diff; } std::sort(results.begin(), results.end()); std::sort(signed_diffs.begin(), signed_diffs.end()); std::sort(unsigned_diffs.begin(), unsigned_diffs.end()); const size_t middle = count / 2; stats->count = count; stats->med_val = results[middle]; stats->mean_signed_diff = float(signed_diffs_sum) / count; stats->med_signed_diff = signed_diffs[middle]; stats->med_unsigned_diff = unsigned_diffs[middle]; stats->max_unsigned_diff = unsigned_diffs.back(); // Size 9 for 9 different POT values: 2^0, ..., 2^8 stats->count_diff_by_pot_slice.resize(9); auto cur = unsigned_diffs.begin(); size_t checksum = 0; for (int exponent = 0; exponent < 9; exponent++) { int pot = 1 << exponent; auto next = std::lower_bound(cur, unsigned_diffs.end(), pot); checksum += stats->count_diff_by_pot_slice[exponent] = next - cur; cur = next; } assert(checksum == count); } struct ResultStatsBounds { ResultStatsBounds() : mean_signed_diff(0), med_signed_diff(0), med_unsigned_diff(0), max_unsigned_diff(0) {} float mean_signed_diff; int med_signed_diff; int med_unsigned_diff; int max_unsigned_diff; }; bool CheckResultStatsBounds(const ResultStats& stats, const ResultStatsBounds& bounds) { return stats.max_unsigned_diff <= bounds.max_unsigned_diff && stats.med_unsigned_diff <= bounds.med_unsigned_diff && std::abs(stats.med_signed_diff) <= bounds.med_signed_diff && std::abs(stats.mean_signed_diff) <= bounds.mean_signed_diff; } void ReportResultStats(const ResultStats& stats, const ResultStatsBounds& bounds) { printf(" number of matrix entries: %d\n", stats.count); printf(" median value: %d\n", stats.med_val); printf(" median unsigned diff: %d (tolerating %d)\n", stats.med_unsigned_diff, bounds.med_unsigned_diff); printf(" max unsigned diff: %d (tolerating %d)\n", stats.max_unsigned_diff, bounds.max_unsigned_diff); printf(" median signed diff: %d (tolerating %d)\n", stats.med_signed_diff, bounds.med_signed_diff); printf(" mean signed diff: %.3g (tolerating %.3g)\n", stats.mean_signed_diff, bounds.mean_signed_diff); printf("No error: %.2f %% of entries\n", 100.f * stats.count_diff_by_pot_slice[0] / stats.count); for (int exponent = 1; exponent < 9; exponent++) { printf("Error in %d..%d range: %.2f %% of entries\n", 1 << (exponent - 1), (1 << exponent) - 1, 100.f * stats.count_diff_by_pot_slice[exponent] / stats.count); } } // Our approach to choosing result_shift values for testing, is bisection. // This function takes an interval, [result_shift_min .. result_shift_max]. // If too much saturation occurred in either direction, it bisects accordingly, // recursing until the interval contains only one value. // The primary reason why we prefer this over computing optimal shift values, // is that we actually want to exercise some saturation, as there is nontrivial // code handling that in gemmlowp. // Secondarily, this is faster than computing optimal shifts, since in 90% of // cases the first-tried shift value 16 turns out to be good enough. template <typename GemmWrapper, typename LhsType, typename RhsType, typename ResultType> void test_gemm_impl(typename GemmWrapper::Context* context, const LhsType& lhs, const RhsType& rhs, ResultType* result, int lhs_offset, int rhs_offset, int result_offset, int result_mult_int, int result_shift_min, int result_shift_max) { const int rows = lhs.rows(); const int cols = rhs.cols(); Check(lhs.cols() == rhs.rows()); const int depth = lhs.cols(); const int result_shift = (result_shift_min + result_shift_max) / 2; if (!GemmWrapper::Gemm(context, lhs.const_map(), rhs.const_map(), &result->map(), lhs_offset, rhs_offset, result_offset, result_mult_int, result_shift)) { // Internal GEMM functions are not required to handle all cases // (e.g. rows < cols) as these are supposed to have been handled // ahead of them. Their test wrappers return false in that case. return; } typedef typename ResultType::Scalar Scalar; static const MapOrder kLhsOrder = LhsType::kOrder; static const MapOrder kRhsOrder = RhsType::kOrder; static const MapOrder kResultOrder = ResultType::kOrder; ResultType ref_result(rows, cols); const bool transpose_c = kResultOrder == MapOrder::RowMajor; const bool transpose_a = kLhsOrder == MapOrder::RowMajor; const bool transpose_b = kRhsOrder == MapOrder::RowMajor; ReferenceEightBitIntGemmWrapper<Scalar>::Gemm( transpose_a, transpose_b, transpose_c, lhs.const_map(), rhs.const_map(), &ref_result.map(), lhs_offset, rhs_offset, result_offset, result_mult_int, result_shift); typedef typename GemmWrapper::BitDepthParams BitDepthParams; ResultStats stats; GetResultStats(result->data(), ref_result.data(), rows * cols, &stats); // Adjust shifts until we get meaningful results int new_result_shift_min = result_shift_min; int new_result_shift_max = result_shift_max; bool retry = false; if (stats.med_val < 32) { new_result_shift_max = (result_shift_min + result_shift_max) / 2; retry = true; } if (stats.med_val > 224) { new_result_shift_min = (result_shift_min + result_shift_max) / 2; retry = true; } if (retry) { if (result_shift_min != result_shift_max) { test_gemm_impl<GemmWrapper>(context, lhs, rhs, result, lhs_offset, rhs_offset, result_offset, result_mult_int, new_result_shift_min, new_result_shift_max); } return; } ResultStatsBounds bounds; // Check results const bool good = CheckResultStatsBounds(stats, bounds); printf( "%s: %dx%dx%d %s x %s -> %s, %s, offsets %d/%d/%d, mult %d, shift %d\n", good ? "PASS" : "FAIL", rows, depth, cols, OrderName(kLhsOrder), OrderName(kRhsOrder), OrderName(kResultOrder), GemmWrapper::Name(), lhs_offset, rhs_offset, result_offset, result_mult_int, result_shift); if (!good) { ReportResultStats(stats, bounds); int bad_coeffs_printed = 0; for (int c = 0; c < result->cols() && bad_coeffs_printed < 200; c++) { for (int r = 0; r < result->rows() && bad_coeffs_printed < 200; r++) { if (ref_result(r, c) != (*result)(r, c)) { printf("bad coeff: at (%d, %d), expected %d, got %d\n", r, c, ref_result(r, c), (*result)(r, c)); bad_coeffs_printed++; } } } } Check(good); } template <typename GemmWrapper, typename LhsType, typename RhsType, typename ResultType> void test_gemm(typename GemmWrapper::Context* context, const LhsType& lhs, const RhsType& rhs, ResultType* result, int lhs_offset, int rhs_offset, int result_offset, int result_mult_int) { test_gemm_impl<GemmWrapper>(context, lhs, rhs, result, lhs_offset, rhs_offset, result_offset, result_mult_int, 0, 32); } enum class WhatParamsToTest { All, OnlyGenericCase, }; template <typename GemmWrapper, MapOrder LhsOrder, MapOrder RhsOrder, MapOrder ResultOrder> void test_gemm(typename GemmWrapper::Context* context, int rows, int depth, int cols, WhatParamsToTest params_to_test) { typedef std::uint8_t Scalar; typedef Matrix<Scalar, LhsOrder> LhsType; using BitDepthParams = typename GemmWrapper::BitDepthParams; LhsType lhs(rows, depth); MakeRandom<typename BitDepthParams::LhsRange>(&lhs); typedef Matrix<Scalar, RhsOrder> RhsType; RhsType rhs(depth, cols); MakeRandom<typename BitDepthParams::RhsRange>(&rhs); typedef Matrix<Scalar, ResultOrder> ResultType; ResultType result(rows, cols); MakeZero(&result); if (params_to_test == WhatParamsToTest::All) { test_gemm<GemmWrapper>(context, lhs, rhs, &result, 0, 0, 0, 1); test_gemm<GemmWrapper>(context, lhs, rhs, &result, 10, 0, 0, 1); test_gemm<GemmWrapper>(context, lhs, rhs, &result, 0, 10, 0, 1); test_gemm<GemmWrapper>(context, lhs, rhs, &result, 0, 0, 10, 1); test_gemm<GemmWrapper>(context, lhs, rhs, &result, 0, 0, 0, 10); test_gemm<GemmWrapper>(context, lhs, rhs, &result, 10, 10, 10, 10); test_gemm<GemmWrapper>(context, lhs, rhs, &result, 256, 1, 17, 4); } test_gemm<GemmWrapper>(context, lhs, rhs, &result, -75, -91, 74980, 123); } enum class WhatOrdersToTest { All, OnlyRCC }; template <typename GemmWrapper> void test_gemm(typename GemmWrapper::Context* context, int rows, int depth, int cols, WhatParamsToTest params_to_test, WhatOrdersToTest orders_to_test) { #define GEMMLOWP_ONE_TEST(LhsOrder, RhsOrder, ResultOrder) \ do { \ test_gemm<GemmWrapper, MapOrder::LhsOrder, MapOrder::RhsOrder, \ MapOrder::ResultOrder>(context, rows, depth, cols, \ params_to_test); \ } while (false) if (orders_to_test == WhatOrdersToTest::All) { GEMMLOWP_ONE_TEST(ColMajor, ColMajor, ColMajor); GEMMLOWP_ONE_TEST(RowMajor, ColMajor, ColMajor); GEMMLOWP_ONE_TEST(ColMajor, RowMajor, ColMajor); GEMMLOWP_ONE_TEST(RowMajor, RowMajor, ColMajor); GEMMLOWP_ONE_TEST(ColMajor, ColMajor, RowMajor); GEMMLOWP_ONE_TEST(RowMajor, ColMajor, RowMajor); GEMMLOWP_ONE_TEST(ColMajor, RowMajor, RowMajor); GEMMLOWP_ONE_TEST(RowMajor, RowMajor, RowMajor); } else { GEMMLOWP_ONE_TEST(RowMajor, ColMajor, ColMajor); } #undef GEMMLOWP_ONE_TEST } template <typename Kernel> void test_gemm_kernel(MultiThreadGemmContext* context) { typedef MultiThreadGemmWrapper<Kernel, std::uint8_t, DefaultL8R8BitDepthParams> GemmWrapper; test_gemm<GemmWrapper>(context, 1, 1, 1, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 2, 2, 2, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 3, 3, 3, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 4, 4, 4, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 5, 5, 5, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 9, 11, 13, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 50, 50, 50, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 200, 200, 200, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::All); test_gemm<GemmWrapper>(context, 50, 5000, 50, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); } template <typename GemmWrapper> void test_gemm(typename GemmWrapper::Context* context) { test_gemm<GemmWrapper>(context, 1, 1, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 2, 1, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 1, 2, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 1, 1, 2, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 2, 2, 2, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 3, 3, 3, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 4, 4, 4, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 5, 5, 5, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 6, 6, 6, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 3, 5, 7, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 7, 3, 5, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 5, 7, 3, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 8, 8, 8, WhatParamsToTest::All, WhatOrdersToTest::All); test_gemm<GemmWrapper>(context, 16, 16, 16, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 32, 32, 32, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 64, 64, 64, WhatParamsToTest::All, WhatOrdersToTest::All); test_gemm<GemmWrapper>(context, 128, 128, 128, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 16, 17, 16, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 37, 55, 73, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 57, 87, 117, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 93, 83, 73, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 109, 89, 99, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 78, 101, 82, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 512, 512, 512, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 1024, 1024, 1024, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 567, 2345, 123, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 100, 5000, 100, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 1, 1, 1000, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 1000, 1, 1, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 1, 1000, 1, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 1, 1000, 1000, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 1000, 1, 1000, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 1000, 1000, 1, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 777, 3456, 1, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 4567, 555, 1, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::OnlyRCC); // Test all storage orders test_gemm<GemmWrapper>(context, 70, 90, 110, WhatParamsToTest::All, WhatOrdersToTest::All); test_gemm<GemmWrapper>(context, 300, 400, 500, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::All); } template <typename GemmWrapper> void test_gemv(typename GemmWrapper::Context* context) { test_gemm<GemmWrapper>(context, 2, 2, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 3, 3, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 4, 4, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 5, 5, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 6, 6, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 3, 5, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 7, 3, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 5, 7, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 8, 8, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 32, 32, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 128, 128, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); test_gemm<GemmWrapper>(context, 321, 123, 1, WhatParamsToTest::All, WhatOrdersToTest::OnlyRCC); // Test all storage orders test_gemm<GemmWrapper>(context, 70, 90, 1, WhatParamsToTest::All, WhatOrdersToTest::All); test_gemm<GemmWrapper>(context, 300, 400, 1, WhatParamsToTest::OnlyGenericCase, WhatOrdersToTest::All); } const char* GetBitDepthName(eight_bit_int_gemm::BitDepthSetting b) { switch (b) { case eight_bit_int_gemm::BitDepthSetting::A8B8: return "Lhs: 8 bit, Rhs: 8 bit"; case eight_bit_int_gemm::BitDepthSetting::A5B7: return "(legacy, no longer requantizing) Lhs: 7 bit, Rhs: 5 bit"; default: abort(); return nullptr; } } // Runs a small set of hand-picked data for per-channel quantized data. // This test case comes from a set of 2 2x2 convolution filters run over a 3x3 // image. void TestWithSmallDataPerChannelQuantization() { const int m = 2; const int n = 9; const int k = 12; // 12 x 2, columnwise. const std::uint8_t a_data[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 64, 64, 64, 64, 64, 64, 0, 0, 0, 255, 255, 255}; const int lda = k; int a_offset[] = {0, -64}; MatrixMap<const std::uint8_t, MapOrder::RowMajor> lhs(a_data, m, k, lda); const OffsetColMap lhs_offset(a_offset, m); // 12 x 9, columnwise. const std::uint8_t b_data[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 127, 127, 127, 0, 0, 0, 127, 127, 127, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 0, 0, 0, 127, 127, 127, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127, 127, 127, 0, 0, 0, 0, 0, 0, 127, 127, 127, 127, 127, 127, 0, 0, 0, 127, 127, 127, 127, 127, 127, 127, 127, 127}; const int ldb = k; int b_offset = -127; MatrixMap<const std::uint8_t, MapOrder::ColMajor> rhs(b_data, k, n, ldb); const OffsetRowDup rhs_offset(b_offset, rhs.cols()); // 2 x 9, columnwise. const std::uint8_t expected_c_data[] = {255, 255, 0, 0, 127, 159, 0, 64, 0, 64, 127, 159, 127, 127, 127, 127, 127, 127}; const int ldc = m; int c_offset[] = {97155, 97346}; int c_mult_int[] = {2741, 2741}; const int c_shift = 21; const int c_count = m * n; std::unique_ptr<std::uint8_t[]> output_data(new std::uint8_t[c_count]); MatrixMap<std::uint8_t, MapOrder::ColMajor> result(output_data.get(), m, n, ldc); const OffsetColMap result_offset(c_offset, m); const OffsetColMap result_mult_int(c_mult_int, m); const int result_shift = c_shift; GemmContext gemm_context; auto output_pipeline = MakeStandardOutputPipeline<VectorShape::Col>( result_offset, result_mult_int, result_shift); GemmWithOutputPipelinePC<std::uint8_t, std::uint8_t, DefaultL8R8BitDepthParams>( &gemm_context, lhs, rhs, &result, lhs_offset, rhs_offset, output_pipeline); ResultStats stats; GetResultStats(output_data.get(), expected_c_data, c_count, &stats); ResultStatsBounds bounds; const bool good = CheckResultStatsBounds(stats, bounds); printf("TestWithSmallDataPerChannelQuantization: %s\n", good ? "PASS" : "FAIL"); ReportResultStats(stats, bounds); Check(good); } // Runs a larger set of hand-picked data for per-channel quantized data. // This test case comes from a set of 22 3x3 convolution filters run over a 5x5 // image. Right now, I have 7 different filters and 15 copies of the first // filter to make sure NEON code path that processes 16 rows at a time is // covered. void TestWithLargeDataPerChannelQuantization() { const int m = 22; const int n = 25; const int k = 27; // 27 x 22, column-wise. const std::uint8_t a_data[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 255, 255, 255, 127, 127, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 127, 127, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 127, 127, 127, 0, 0, 0, 51, 51, 51, 51, 51, 51, 51, 51, 51, 0, 0, 0, 255, 255, 255, 0, 0, 0, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 0, 0, 0, 51, 51, 51, 51, 51, 51, 255, 255, 255, 51, 51, 51, 51, 51, 51, 0, 0, 0, 51, 51, 51, 0, 0, 0, 64, 64, 64, 0, 0, 0, 64, 64, 64, 255, 255, 255, 64, 64, 64, 0, 0, 0, 64, 64, 64, 0, 0, 0, 36, 36, 36, 0, 0, 0, 36, 36, 36, 0, 0, 0, 255, 255, 255, 0, 0, 0, 36, 36, 36, 0, 0, 0, 36, 36, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; const int lda = k; int a_offset[] = {0, 0, 0, -51, -51, 0, -36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; MatrixMap<const std::uint8_t, MapOrder::RowMajor> lhs(a_data, m, k, lda); const OffsetColMap lhs_offset(a_offset, m); // 27 x 25, column-wise. const std::uint8_t b_data[] = { 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 136, 136, 136, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 136, 136, 136, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 136, 136, 136, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 136, 136, 136, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 136, 136, 136, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 136, 136, 136, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 136, 136, 136, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 136, 136, 136, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 136, 136, 136, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 119, 119, 119, 119, 119, 119, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127}; const int ldb = k; int b_offset = -127; MatrixMap<const std::uint8_t, MapOrder::ColMajor> rhs(b_data, k, n, ldb); const OffsetRowDup rhs_offset(b_offset, rhs.cols()); // 22 x 25, column-wise. const std::uint8_t expected_c_data[] = { 7, 37, 37, 67, 67, 39, 79, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 87, 67, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 87, 67, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 87, 67, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 37, 67, 67, 39, 79, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 7, 67, 87, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 87, 87, 7, 103, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 71, 87, 45, 41, 77, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 87, 87, 7, 103, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 7, 67, 87, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 7, 67, 87, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 71, 7, 45, 87, 41, 77, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 255, 135, 135, 255, 255, 143, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 7, 71, 7, 45, 87, 41, 77, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 7, 67, 87, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 7, 67, 87, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 87, 87, 7, 103, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 71, 87, 45, 41, 77, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 87, 87, 7, 103, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 7, 67, 87, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 37, 67, 67, 39, 79, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 87, 67, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 87, 67, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 87, 67, 23, 91, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 37, 37, 67, 67, 39, 79, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, }; const int ldc = m; int c_offset[] = { 6477, 12954, 12954, 7793, 7793, 12954, 9282, 6477, 6477, 6477, 6477, 6477, 6477, 6477, 6477, 6477, 6477, 6477, 6477, 6477, 6477, 6477, }; int c_mult_int[] = { 41121, 20560, 20560, 34267, 34267, 21937, 28784, 41121, 41121, 41121, 41121, 41121, 41121, 41121, 41121, 41121, 41121, 41121, 41121, 41121, 41121, 41121, }; const int c_shift = 21; const int c_count = m * n; std::unique_ptr<std::uint8_t[]> output_data(new std::uint8_t[c_count]); MatrixMap<std::uint8_t, MapOrder::ColMajor> result(output_data.get(), m, n, ldc); const OffsetColMap result_offset(c_offset, m); const OffsetColMap result_mult_int(c_mult_int, m); const int result_shift = c_shift; GemmContext gemm_context; auto output_pipeline = MakeStandardOutputPipeline<VectorShape::Col>( result_offset, result_mult_int, result_shift); GemmWithOutputPipelinePC<std::uint8_t, std::uint8_t, DefaultL8R8BitDepthParams>( &gemm_context, lhs, rhs, &result, lhs_offset, rhs_offset, output_pipeline); ResultStats stats; GetResultStats(output_data.get(), expected_c_data, c_count, &stats); ResultStatsBounds bounds; const bool good = CheckResultStatsBounds(stats, bounds); printf("TestWithLargeDataPerChannelQuantization: %s\n", good ? "PASS" : "FAIL"); ReportResultStats(stats, bounds); Check(good); } // Multithreading only activates when the result has more than 16 rows, and also // (result rows) * (result cols) * depth >= 2 x 65 x 1024. Size was selected // to run in 3 threads. // // Based on the following floating point data: // LHS: all zeros except 10.0, 20.0 at the beginning of first 16 rows; // 1.0, 2.0 at the beginning of next 16 rows; 0.1, 0.2 in next 16 rows; // 0.01, 0.02 in last 16 rows. // RHS: all zeros except 1.0 in (0, 0) and 2.0 in (1, 0). // Varying boundaries were used for each 16 rows block of LHS, to test for // correct indexing into offsets. // Expected result: all zeros, except 50.0 at the beginning of first 16 rows; // 5.0 at the beginning of next 16 rows; 0.5 in next 16 rows; 0.05 in last // 16 rows. void TestMultithreadedPerChannelQuantization() { const int m = 64; const int n = 20; const int k = 160; // LHS, m x k. const std::array<std::int32_t, 4> lhs_offsets_terse{{ 0, -51, -85, -109, }}; assert(lhs_offsets_terse.size() * 16 == m); const std::array<std::uint8_t, 4> lhs_first_el{{ 128, 153, 170, 182, }}; assert(lhs_first_el.size() * 16 == m); // lhs_first_el at (i, 0) and 255 at (i, 1), other values are all -offset. std::vector<std::uint8_t> a_data(m * k, 0); for (int i = 0; i < m; ++i) { a_data[i * k] = lhs_first_el[i / 16]; a_data[i * k + 1] = 255; for (int j = 2; j < k; ++j) { a_data[i * k + j] = std::uint8_t(-lhs_offsets_terse[i / 16]); } } const int lda = k; // Given values at [i / 16]. std::vector<std::int32_t> a_offset(m, 0); for (int i = 0; i < m; ++i) { a_offset[i] = lhs_offsets_terse[i / 16]; } MatrixMap<const std::uint8_t, MapOrder::RowMajor> lhs(&a_data[0], m, k, lda); const OffsetColMap lhs_offset(&a_offset[0], m); // RHS, k x n. // All zeros, except 128 at (0, 0) and 255 at (1, 0). std::vector<std::uint8_t> b_data(k * n, 0); b_data[0] = 128; b_data[1] = 255; const int ldb = k; std::int32_t b_offset = 0; MatrixMap<const std::uint8_t, MapOrder::ColMajor> rhs(&b_data[0], k, n, ldb); const OffsetRowDup rhs_offset(b_offset, rhs.cols()); // Result, m x n. // All zeros, except given values at (i / 16, 0). const std::array<std::uint8_t, 4> expected_c_terse{{ 142, 159, 182, 213, }}; assert(expected_c_terse.size() * 16 == m); std::vector<std::uint8_t> expected_c_data(m * n, 0); for (int i = 0; i < m; ++i) { expected_c_data[i] = expected_c_terse[i / 16]; } const int ldc = m; // All zeros. std::vector<std::int32_t> c_offset(m, 0); // Given values at [i / 16]. const std::array<std::int32_t, 4> c_mult_int_terse{{ 3655, 5140, 7049, 9595, }}; assert(c_mult_int_terse.size() * 16 == m); std::vector<std::int32_t> c_mult_int(m); for (int i = 0; i < m; ++i) { c_mult_int[i] = c_mult_int_terse[i / 16]; } const int c_shift = 21; const int c_count = m * n; std::unique_ptr<std::uint8_t[]> output_data(new std::uint8_t[c_count]); MatrixMap<std::uint8_t, MapOrder::ColMajor> result(output_data.get(), m, n, ldc); const OffsetColMap result_offset(&c_offset[0], m); const OffsetColMap result_mult_int(&c_mult_int[0], m); const int result_shift = c_shift; GemmContext gemm_context; auto output_pipeline = MakeStandardOutputPipeline<VectorShape::Col>( result_offset, result_mult_int, result_shift); GemmWithOutputPipelinePC<std::uint8_t, std::uint8_t, DefaultL8R8BitDepthParams>( &gemm_context, lhs, rhs, &result, lhs_offset, rhs_offset, output_pipeline); ResultStats stats; GetResultStats(output_data.get(), &expected_c_data[0], c_count, &stats); ResultStatsBounds bounds; const bool good = CheckResultStatsBounds(stats, bounds); printf("TestMultithreadedPerChannelQuantization: %s\n", good ? "PASS" : "FAIL"); ReportResultStats(stats, bounds); Check(good); } // Runs a small set of hand-calculated data through the implementation. void TestWithSmallData() { const int m = 4; const int n = 2; const int k = 3; // Matrix A (LHS) is: // | 7 | 10 | 13 | 16 | // | 8 | 11 | 14 | 17 | // | 9 | 12 | 15 | 18 | const std::uint8_t a_data[] = {7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}; // Matrix B (RHS) is: // | 1 | 3 | 5 | // | 2 | 4 | 6 | const std::uint8_t b_data[] = {1, 2, 3, 4, 5, 6}; // Here are the results we expect, from hand calculations: // (1 * 7) + (3 * 8) + (5 * 9) = 76 // (2 * 7) + (4 * 8) + (6 * 9) = 100 // (1 * 10) + (3 * 11) + (5 * 12) = 103 // (2 * 10) + (4 * 11) + (6 * 12) = 136 // (1 * 13) + (3 * 14) + (5 * 15) = 130 // (2 * 13) + (4 * 14) + (6 * 15) = 172 // (1 * 16) + (3 * 17) + (5 * 18) = 157 // (2 * 16) + (4 * 17) + (6 * 18) = 208 // That means matrix C should be: // | 76 | 103 | 130 | 157 | // | 100 | 136 | 172 | 208 | const std::uint8_t expected_data[] = {76, 100, 103, 136, 130, 172, 157, 208}; const int c_count = m * n; std::unique_ptr<std::uint8_t[]> output_data(new std::uint8_t[c_count]); const bool is_a_transposed = true; const bool is_b_transposed = true; const bool is_c_transposed = true; const int lda = k; const int ldb = n; const int ldc = n; const int a_offset = 0; const int b_offset = 0; const int c_offset = 0; const int c_mult = 1; const int c_shift = 0; gemmlowp::eight_bit_int_gemm::EightBitIntGemm( is_a_transposed, is_b_transposed, is_c_transposed, m, n, k, a_data, a_offset, lda, b_data, b_offset, ldb, output_data.get(), c_offset, c_mult, c_shift, ldc, eight_bit_int_gemm::BitDepthSetting::A8B8); ResultStats stats; GetResultStats(output_data.get(), expected_data, c_count, &stats); ResultStatsBounds bounds; const bool good = CheckResultStatsBounds(stats, bounds); printf("TestWithSmallData: %s\n", good ? "PASS" : "FAIL"); ReportResultStats(stats, bounds); Check(good); } // This is the most realistic test of how we'll be using the low-precision GEMM // function in applications. It takes in large input matrices that have been // captured from an actual neural network run. void TestWithRealData(eight_bit_int_gemm::BitDepthSetting BitDepth, int tolerance_median, int tolerance_max) { std::unique_ptr<std::uint8_t[]> output_data( new std::uint8_t[test_data::c_count]); gemmlowp::eight_bit_int_gemm::EightBitIntGemm( test_data::is_a_transposed, test_data::is_b_transposed, test_data::is_c_transposed, test_data::m, test_data::n, test_data::k, test_data::a_data, test_data::a_offset, test_data::k, test_data::b_data, test_data::b_offset, test_data::k, output_data.get(), test_data::c_offset, test_data::c_mult_int, test_data::c_shift, test_data::m, BitDepth); ResultStats stats; GetResultStats(output_data.get(), test_data::expected_c_data, test_data::c_count, &stats); ResultStatsBounds bounds; if (BitDepth == eight_bit_int_gemm::BitDepthSetting::A5B7) { bounds.med_unsigned_diff = tolerance_median; bounds.max_unsigned_diff = tolerance_max; bounds.med_signed_diff = 0; bounds.mean_signed_diff = 0.2f; } const bool good = CheckResultStatsBounds(stats, bounds); printf("TestWithRealData: %s with %s\n", good ? "PASS" : "FAIL", GetBitDepthName(BitDepth)); ReportResultStats(stats, bounds); Check(good); } template <typename BitDepthParams, MapOrder ResultOrder> void TestOutputStages(int rows, int depth, int cols, int result_offset, int result_mult_int, int result_shift) { Matrix<std::uint8_t, MapOrder::RowMajor> lhs(rows, depth); Matrix<std::uint8_t, MapOrder::ColMajor> rhs(depth, cols); Matrix<std::int32_t, ResultOrder> result_raw_int32(rows, cols); MakeRandom<typename BitDepthParams::LhsRange>(&lhs); MakeRandom<typename BitDepthParams::RhsRange>(&rhs); const int lhs_offset = 12; const int rhs_offset = -34; // Test an empty pipeline, i.e. returning raw int32 accumulators. auto empty_pipeline = std::make_tuple(); GemmContext context; GemmWithOutputPipeline<std::uint8_t, std::int32_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_raw_int32, lhs_offset, rhs_offset, empty_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t expected = 0; for (int d = 0; d < depth; d++) { std::int32_t lhs_val = static_cast<std::int32_t>(lhs(r, d)) + lhs_offset; std::int32_t rhs_val = static_cast<std::int32_t>(rhs(d, c)) + rhs_offset; expected += lhs_val * rhs_val; } Check(expected == result_raw_int32(r, c)); } } // Test a pipeline with only the quantize-down stage, still returning // unclamped (but scaled) int32's OutputStageQuantizeDownInt32ToUint8Scale quantize_down_stage; quantize_down_stage.result_offset = result_offset; quantize_down_stage.result_mult_int = result_mult_int; quantize_down_stage.result_shift = result_shift; auto quantize_down_pipeline = std::make_tuple(quantize_down_stage); Matrix<std::int32_t, ResultOrder> result_quantized_down_int32(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::int32_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_quantized_down_int32, lhs_offset, rhs_offset, quantize_down_pipeline); std::int64_t sum = 0; for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t raw = result_raw_int32(r, c); std::int32_t expected = RoundingDivideByPOT( (raw + result_offset) * result_mult_int, result_shift); Check(expected == result_quantized_down_int32(r, c)); sum += expected; } } std::int64_t avg = sum / (rows * cols); // Test that the average quantized-down value falls reasonably in the // middle of the [0..255] range. Otherwise, the multiplier / shift need to be // adjusted. Check(avg >= 64 && avg <= 192); // Test the familiar default pipeline consisting of quantize-down and // clamp-and-cast-to-uint8. OutputStageSaturatingCastToUint8 saturating_cast_stage; auto quantize_down_and_saturating_cast_pipeline = std::make_tuple(quantize_down_stage, saturating_cast_stage); Matrix<std::uint8_t, ResultOrder> result_quantized_down_saturated_uint8(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::uint8_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_quantized_down_saturated_uint8, lhs_offset, rhs_offset, quantize_down_and_saturating_cast_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t quantized = result_quantized_down_int32(r, c); std::uint8_t expected = std::min(std::max(quantized, 0), 255); Check(expected == result_quantized_down_saturated_uint8(r, c)); } } // Test a variant of the familiar default pipeline consisting of quantize-down // and clamp-and-cast-to-int16. OutputStageSaturatingCastToInt16 saturating_cast_int16_stage; auto quantize_down_and_saturating_cast_int16_pipeline = std::make_tuple(quantize_down_stage, saturating_cast_int16_stage); Matrix<std::int16_t, ResultOrder> result_quantized_down_saturated_int16(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::int16_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_quantized_down_saturated_int16, lhs_offset, rhs_offset, quantize_down_and_saturating_cast_int16_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t quantized = result_quantized_down_int32(r, c); std::int16_t expected = std::min(std::max(quantized, -32768), 32767); Check(expected == result_quantized_down_saturated_int16(r, c)); } } #ifdef GEMMLOWP_MSA // Test a pipeline consisting of quantize-down and truncating-cast-to-uint8. OutputStageTruncatingCastToUint8 truncating_cast_stage; auto quantize_down_and_truncating_cast_pipeline = std::make_tuple(quantize_down_stage, truncating_cast_stage); Matrix<std::uint8_t, ResultOrder> result_quantized_down_truncated_uint8( rows, cols); GemmWithOutputPipeline<std::uint8_t, std::uint8_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_quantized_down_truncated_uint8, lhs_offset, rhs_offset, quantize_down_and_truncating_cast_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t quantized = result_quantized_down_int32(r, c); std::uint8_t expected = quantized & 255; Check(expected == result_quantized_down_truncated_uint8(r, c)); } } #endif // Test a bias-addition with row-vector std::vector<std::int32_t> row_vector_data(cols); std::uniform_int_distribution<std::int32_t> uniform_minus_500_plus_500(-500, 500); for (int i = 0; i < cols; i++) { row_vector_data[i] = uniform_minus_500_plus_500(RandomEngine()); } typedef VectorMap<std::int32_t, VectorShape::Row> RowVectorMap; RowVectorMap row_vector_map(row_vector_data.data(), cols); OutputStageBiasAddition<RowVectorMap> row_bias_addition_stage; row_bias_addition_stage.bias_vector = row_vector_map; auto row_bias_addition_pipeline = std::make_tuple(row_bias_addition_stage); Matrix<std::int32_t, ResultOrder> result_of_row_bias_addition(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::int32_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_of_row_bias_addition, lhs_offset, rhs_offset, row_bias_addition_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t expected = result_raw_int32(r, c) + row_vector_data[c]; Check(expected == result_of_row_bias_addition(r, c)); } } // Test a bias-addition with column-vector std::vector<std::int32_t> col_vector_data(rows); for (int i = 0; i < rows; i++) { col_vector_data[i] = uniform_minus_500_plus_500(RandomEngine()); } typedef VectorMap<std::int32_t, VectorShape::Col> ColVectorMap; ColVectorMap col_vector_map(col_vector_data.data(), rows); OutputStageBiasAddition<ColVectorMap> col_bias_addition_stage; col_bias_addition_stage.bias_vector = col_vector_map; auto col_bias_addition_pipeline = std::make_tuple(col_bias_addition_stage); Matrix<std::int32_t, ResultOrder> result_of_col_bias_addition(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::int32_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_of_col_bias_addition, lhs_offset, rhs_offset, col_bias_addition_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t expected = result_raw_int32(r, c) + col_vector_data[r]; Check(expected == result_of_col_bias_addition(r, c)); } } // Test a clamp OutputStageClamp clamp_stage; // Determine min and max of raw int32 accumulators std::int32_t raw_min = std::numeric_limits<std::int32_t>::max(); std::int32_t raw_max = std::numeric_limits<std::int32_t>::min(); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { raw_min = std::min(raw_min, result_raw_int32(r, c)); raw_max = std::max(raw_max, result_raw_int32(r, c)); } } // Pick some interesting clamp min/max bounds clamp_stage.min = static_cast<std::int32_t>(raw_min * 0.7 + raw_max * 0.3); clamp_stage.max = static_cast<std::int32_t>(raw_min * 0.3 + raw_max * 0.7); assert(raw_min <= clamp_stage.min && clamp_stage.min <= clamp_stage.max && clamp_stage.max <= raw_max); auto clamp_pipeline = std::make_tuple(clamp_stage); Matrix<std::int32_t, ResultOrder> result_clamped(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::int32_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_clamped, lhs_offset, rhs_offset, clamp_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t raw = result_raw_int32(r, c); std::int32_t expected = std::min(std::max(raw, clamp_stage.min), clamp_stage.max); Check(expected == result_clamped(r, c)); } } // Test tanh OutputStageTanh tanh_stage; const std::int32_t real_zero_as_int32 = (raw_max + raw_min) / 2; const std::int32_t real_amplitude_as_int32 = (raw_max - raw_min) / 16; tanh_stage.real_zero_as_int32 = real_zero_as_int32; tanh_stage.real_amplitude_as_int32 = real_amplitude_as_int32; auto tanh_pipeline = std::make_tuple(tanh_stage); Matrix<std::int32_t, ResultOrder> result_tanh(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::int32_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_tanh, lhs_offset, rhs_offset, tanh_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t raw = result_raw_int32(r, c); double real_input = double(raw - real_zero_as_int32) / real_amplitude_as_int32; double expected = std::tanh(real_input); std::int32_t actual_int32 = result_tanh(r, c); double actual = double(actual_int32 - real_zero_as_int32) / real_amplitude_as_int32; Check(std::abs(expected - actual) < 2e-4); } } // Test a pipeline with bias and clamp auto bias_clamp_pipeline = std::make_tuple(col_bias_addition_stage, clamp_stage); Matrix<std::int32_t, ResultOrder> result_biased_clamped(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::int32_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_biased_clamped, lhs_offset, rhs_offset, bias_clamp_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t raw = result_raw_int32(r, c); std::int32_t biased = raw + col_vector_data[r]; std::int32_t expected = std::min(std::max(biased, clamp_stage.min), clamp_stage.max); Check(expected == result_biased_clamped(r, c)); } } // Test a full pipeline with bias and clamp and quantization down to 8bit // result auto bias_clamp_quantize_cast_pipeline = std::make_tuple(col_bias_addition_stage, clamp_stage, quantize_down_stage, saturating_cast_stage); Matrix<std::uint8_t, ResultOrder> result_biased_clamped_quantized_casted( rows, cols); GemmWithOutputPipeline<std::uint8_t, std::uint8_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_biased_clamped_quantized_casted, lhs_offset, rhs_offset, bias_clamp_quantize_cast_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t quantized = RoundingDivideByPOT( (result_biased_clamped(r, c) + result_offset) * result_mult_int, result_shift); std::uint8_t expected = std::min(std::max(quantized, 0), 255); Check(expected == result_biased_clamped_quantized_casted(r, c)); } } // Test a pipeline with the fixed-point-multiplier variant stage for the // quantizing down of 32bit accumulators. // // First, figure appropriate fixedpoint multiplier and shift values. std::int32_t result_fixedpoint_multiplier = result_mult_int; std::int32_t result_fixedpoint_shift = result_shift; Check(result_mult_int > 0); Check(result_shift > 0); result_fixedpoint_multiplier = result_mult_int; result_fixedpoint_shift = result_shift - 31; while (result_fixedpoint_multiplier < (1 << 30)) { result_fixedpoint_multiplier <<= 1; result_fixedpoint_shift++; } Check(result_fixedpoint_shift >= 0); // Now test OutputStageQuantizeDownInt32ByFixedPoint OutputStageQuantizeDownInt32ByFixedPoint quantize_down_by_fixedpoint_stage; quantize_down_by_fixedpoint_stage.result_offset_after_shift = static_cast<std::int32_t>( round(static_cast<double>(result_offset * result_mult_int) / (1 << result_shift))); quantize_down_by_fixedpoint_stage.result_fixedpoint_multiplier = result_fixedpoint_multiplier; quantize_down_by_fixedpoint_stage.result_shift = result_fixedpoint_shift; auto quantize_down_by_fixedpoint_pipeline = std::make_tuple(quantize_down_by_fixedpoint_stage); Matrix<std::int32_t, ResultOrder> result_quantized_down_by_fixedpoint_int32( rows, cols); GemmWithOutputPipeline<std::uint8_t, std::int32_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_quantized_down_by_fixedpoint_int32, lhs_offset, rhs_offset, quantize_down_by_fixedpoint_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { const std::int32_t actual = result_quantized_down_by_fixedpoint_int32(r, c); const std::int32_t raw = result_raw_int32(r, c); const std::int32_t expected = quantize_down_by_fixedpoint_stage.result_offset_after_shift + RoundingDivideByPOT(SaturatingRoundingDoublingHighMul( raw, result_fixedpoint_multiplier), result_fixedpoint_shift); Check(actual == expected); } } // Test OutputStageScaleInt32ByFixedPointAndExponent for (int exponent = -2; exponent <= 2; exponent++) { OutputStageScaleInt32ByFixedPointAndExponent scale_by_fixedpoint_and_exponent_stage; scale_by_fixedpoint_and_exponent_stage.result_offset_after_shift = static_cast<std::int32_t>(round(static_cast<double>( result_offset * result_mult_int * std::pow(2.0, exponent)))); scale_by_fixedpoint_and_exponent_stage.result_fixedpoint_multiplier = result_fixedpoint_multiplier; scale_by_fixedpoint_and_exponent_stage.result_exponent = exponent; auto scale_by_fixedpoint_and_exponent_pipeline = std::make_tuple(scale_by_fixedpoint_and_exponent_stage); Matrix<std::int32_t, ResultOrder> result_scaled_by_fixedpoint_and_exponent_int32(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::int32_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_scaled_by_fixedpoint_and_exponent_int32, lhs_offset, rhs_offset, scale_by_fixedpoint_and_exponent_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { const std::int32_t actual = result_scaled_by_fixedpoint_and_exponent_int32(r, c); const std::int32_t raw = result_raw_int32(r, c); int left_shift = std::max(0, exponent); int right_shift = std::max(0, -exponent); const std::int32_t expected = scale_by_fixedpoint_and_exponent_stage.result_offset_after_shift + RoundingDivideByPOT( SaturatingRoundingDoublingHighMul((1 << left_shift) * raw, result_fixedpoint_multiplier), right_shift); Check(actual == expected); } } } // Test the variant of the familiar default pipeline consisting of // quantize-down and // clamp-and-cast-to-uint8, where we used fixedpoint multipliers for the // downscaling. auto quantize_down_by_fixedpoint_and_saturating_cast_pipeline = std::make_tuple(quantize_down_by_fixedpoint_stage, saturating_cast_stage); Matrix<std::uint8_t, ResultOrder> result_quantized_down_by_fixedpoint_saturated_uint8(rows, cols); GemmWithOutputPipeline<std::uint8_t, std::uint8_t, DefaultL8R8BitDepthParams>( &context, lhs.const_map(), rhs.const_map(), &result_quantized_down_by_fixedpoint_saturated_uint8, lhs_offset, rhs_offset, quantize_down_by_fixedpoint_and_saturating_cast_pipeline); for (int r = 0; r < rows; r++) { for (int c = 0; c < cols; c++) { std::int32_t quantized = result_quantized_down_by_fixedpoint_int32(r, c); std::uint8_t expected = std::min(std::max(quantized, 0), 255); Check(expected == result_quantized_down_by_fixedpoint_saturated_uint8(r, c)); } } printf("TestOutputStages: PASS with ResultOrder=%s\n", OrderName(ResultOrder)); } #ifndef GEMMLOWP_SKIP_EXHAUSTIVE_TESTS template <typename BitDepthParams> void TestExhaustively() { GemmContext context; // Test the internal GEMM interfaces test_gemm< SingleThreadGemmWrapper<DefaultKernel<BitDepthParams>, std::uint8_t, BitDepthParams>>(&context); test_gemm< MultiThreadGemmWrapper<DefaultKernel<BitDepthParams>, std::uint8_t, BitDepthParams>>(&context); // Test the public GEMM interfaces test_gemm<PublicGemmWrapper<std::uint8_t, BitDepthParams>>(&context); // Test GEMV cases (internal interfaces) test_gemv< SingleThreadGemmWrapper<DefaultKernel<BitDepthParams>, std::uint8_t, BitDepthParams>>(&context); test_gemv< MultiThreadGemmWrapper<DefaultKernel<BitDepthParams>, std::uint8_t, BitDepthParams>>(&context); // Test GEMV cases (public interfaces) test_gemv<PublicGemmWrapper<std::uint8_t, BitDepthParams>>(&context); } template <eight_bit_int_gemm::BitDepthSetting BitDepthSetting> void TestExhaustivelyEightBitIntGemm() { GemmContext context; test_gemv<EightBitIntGemmWrapper<std::uint8_t, BitDepthSetting>>(&context); test_gemv<EightBitIntGemmWrapper<std::uint8_t, BitDepthSetting>>(&context); test_gemm<EightBitIntGemmWrapper<std::uint8_t, BitDepthSetting>>(&context); } void TestKernels() { GemmContext context; // Test specific kernels with various different formats, // to exercises corner cases especially in the packing code. test_gemm_kernel< ReferenceKernel<KernelFormat<KernelSideFormat<CellFormat<1, 1>, 1>, KernelSideFormat<CellFormat<1, 1>, 1>>>>( &context); test_gemm_kernel< ReferenceKernel<KernelFormat<KernelSideFormat<CellFormat<4, 2>, 1>, KernelSideFormat<CellFormat<4, 2>, 2>>>>( &context); test_gemm_kernel< ReferenceKernel<KernelFormat<KernelSideFormat<CellFormat<4, 2>, 4>, KernelSideFormat<CellFormat<4, 2>, 5>>>>( &context); test_gemm_kernel<ReferenceKernel<KernelFormat< KernelSideFormat<CellFormat<3, 4, CellOrder::DepthMajor>, 2>, KernelSideFormat<CellFormat<5, 4, CellOrder::DepthMajor>, 3>>>>(&context); test_gemm_kernel<ReferenceKernel<KernelFormat< KernelSideFormat<CellFormat<3, 4, CellOrder::WidthMajor>, 2>, KernelSideFormat<CellFormat<5, 4, CellOrder::WidthMajor>, 3>>>>(&context); test_gemm_kernel<ReferenceKernel<KernelFormat< KernelSideFormat<CellFormat<5, 2, CellOrder::WidthMajor>, 3>, KernelSideFormat<CellFormat<4, 2, CellOrder::DepthMajor>, 2>>>>(&context); test_gemm_kernel<ReferenceKernel<KernelFormat< KernelSideFormat<CellFormat<5, 2, CellOrder::DepthMajor>, 3>, KernelSideFormat<CellFormat<4, 2, CellOrder::WidthMajor>, 2>>>>(&context); test_gemm_kernel<ReferenceKernel<KernelFormat< KernelSideFormat<CellFormat<8, 8, CellOrder::Diagonal>, 2>, KernelSideFormat<CellFormat<3, 8, CellOrder::WidthMajor>, 1>>>>(&context); test_gemm_kernel<ReferenceKernel<KernelFormat< KernelSideFormat<CellFormat<1, 4, CellOrder::DepthMajor>, 1>, KernelSideFormat<CellFormat<4, 4, CellOrder::Diagonal>, 1>>>>(&context); } #endif // not GEMMLOWP_SKIP_EXHAUSTIVE_TESTS template <typename BitDepthParams> void TestOutputStages() { // Test non-default output pipelines with various combinations of // output stages. TestOutputStages<BitDepthParams, MapOrder::RowMajor>(63, 10, 127, 5, 17, 14); TestOutputStages<BitDepthParams, MapOrder::ColMajor>(63, 10, 127, 5, 17, 14); TestOutputStages<BitDepthParams, MapOrder::RowMajor>(630, 10, 1270, 5, 17, 14); TestOutputStages<BitDepthParams, MapOrder::ColMajor>(630, 10, 1270, 5, 17, 14); } void test() { #ifdef GEMMLOWP_TEST_PROFILE RegisterCurrentThreadForProfiling(); StartProfiling(); #endif // Run a first quick test against hand-calculated data. TestWithSmallData(); #ifndef GEMMLOWP_SKIP_EXHAUSTIVE_TESTS TestExhaustively<DefaultL8R8BitDepthParams>(); TestExhaustively<L8R8WithLhsNonzeroBitDepthParams>(); TestExhaustively<DefaultL7R5BitDepthParams>(); // legacy, same as L8R8 TestExhaustivelyEightBitIntGemm<eight_bit_int_gemm::BitDepthSetting::A8B8>(); TestExhaustivelyEightBitIntGemm<eight_bit_int_gemm::BitDepthSetting::A5B7>(); TestKernels(); #endif // Run against actual data from a network evaluation. TestWithRealData(eight_bit_int_gemm::BitDepthSetting::A8B8, 0, 0); TestWithRealData(eight_bit_int_gemm::BitDepthSetting::A5B7, 2, 10); // Test non-default output pipelines with various combinations of // output stages. TestOutputStages<DefaultL8R8BitDepthParams>(); TestOutputStages<L8R8WithLhsNonzeroBitDepthParams>(); // Test per channel quantization. TestWithSmallDataPerChannelQuantization(); TestWithLargeDataPerChannelQuantization(); TestMultithreadedPerChannelQuantization(); #ifdef GEMMLOWP_TEST_PROFILE FinishProfiling(); #endif std::cerr << "All tests passed." << std::endl; // We have been testing the eight_bit_int_gemm, so we should free its // persistent // resources now to avoid having leak-checking tools report leaks. eight_bit_int_gemm::FreePersistentResources(); } } // end namespace gemmlowp // For iOS, we need to define our own main(), so skip it here. #if !(defined(__APPLE__) && (TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR)) int main() { gemmlowp::test(); } #endif
google/gemmlowp
test/test.cc
C++
apache-2.0
75,539
<?php /* * InlineStyle MIT License * * Copyright (c) 2010 Christiaan Baartse * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * Parses a html file and applies all embedded and external stylesheets inline * * @author Christiaan Baartse <christiaan@baartse.nl> * @copyright 2010 Christiaan Baartse */ class InlineStyle { /** * @var DOMDocument the HTML as DOMDocument */ protected $_dom; /** * @var CSSQuery instance to use css based selectors on our DOMDocument */ protected $_cssquery; /** * Prepare all the necessary objects * * @param string $html */ public function __construct($html, $encoding = 'UTF-8') { if(!class_exists("CSSQuery")) { throw new Exception( "InlineStyle needs the CSSQuery class"); } $html = htmlspecialchars_decode(htmlentities((string) $html, ENT_NOQUOTES, $encoding), ENT_NOQUOTES); $this->_dom = new DOMDocument(); $this->_dom->formatOutput = true; $this->_dom->preserveWhitespace = false; if(file_exists($html)) { $this->_dom->loadHTMLFile($html); } else { $this->_dom->loadHTML($html); } $this->_cssquery = new CSSQuery($this->_dom); } /** * Applies one or more stylesheets to the current document * * @param string $stylesheet * @return InlineStyle self */ public function applyStylesheet($stylesheet) { $stylesheet = (array) $stylesheet; foreach($stylesheet as $ss) { foreach($this->parseStylesheet($ss) as $arr) { list($selector, $style) = $arr; $this->applyRule($selector, $style); } } return $this; } /** * Applies a style rule on the document * @param string $selector * @param string $style * @return InlineStyle self */ public function applyRule($selector, $style) { $selector = trim(trim($selector), ","); if($selector) { $nodes = array(); foreach(explode(",", $selector) as $sel) { if(false === stripos($sel, ":hover") && false === stripos($sel, ":active") && false === stripos($sel, ":link") && false === stripos($sel, ":visited")) { $nodes = array_merge($nodes, $this->_cssquery->query($sel)); } } $style = $this->_styleToArray($style); foreach($nodes as $node) { $current = $node->hasAttribute("style") ? $this->_styleToArray($node->getAttribute("style")) : array(); $current = $this->_mergeStyles($current, $style); $st = array(); foreach($current as $prop => $val) { $st[] = "{$prop}:{$val}"; } $node->setAttribute("style", implode(";", $st)); } } return $this; } /** * Returns the DOMDocument as html * * @return string the HTML */ public function getHTML() { return $this->_dom->saveHTML(); } /** * Recursively extracts the stylesheet nodes from the DOMNode * @param DOMNode $node leave empty to extract from the whole document * @return array the extracted stylesheets */ public function extractStylesheets(DOMNode $node = null, $base = "") { if(null === $node) { $node = $this->_dom; } $stylesheets = array(); if(strtolower($node->nodeName) === "style") { $stylesheets[] = $node->nodeValue; $node->parentNode->removeChild($node); } else if(strtolower($node->nodeName) === "link") { if($node->hasAttribute("href")) { $href = $node->getAttribute("href"); if($base && false === strpos($href, "://")) { $href = "{$base}/{$href}"; } $ext = @file_get_contents($href); if($ext) { $stylesheets[] = $ext; $node->parentNode->removeChild($node); } } } if($node->hasChildNodes()) { foreach($node->childNodes as $child) { $stylesheets = array_merge($stylesheets, $this->extractStylesheets($child, $base)); } } return $stylesheets; } /** * Parses a stylesheet to selectors and properties * @param string $stylesheet * @return array */ public function parseStylesheet($stylesheet) { $parsed = array(); $stylesheet = $this->_stripStylesheet($stylesheet); $stylesheet = trim(trim($stylesheet), "}"); foreach(explode("}", $stylesheet) as $rule) { list($selector, $style) = explode("{", $rule, 2); $parsed[] = array(trim($selector), trim(trim($style), ";")); } return $parsed; } /** * Parses style properties to a array which can be merged by mergeStyles() * @param string $style * @return array */ protected function _styleToArray($style) { $styles = array(); $style = trim(trim($style), ";"); if($style) { foreach(explode(";",$style) as $props) { $props = trim(trim($props), ";"); preg_match('#^([-a-z0-9]+):(.*)$#i', $props, $matches); list($match, $prop, $val) = $matches; $styles[$prop] = $val; } } return $styles; } /** * Merges two sets of style properties taking !important into account * @param array $styleA * @param array $styleB * @return array */ protected function _mergeStyles(array $styleA, array $styleB) { foreach($styleB as $prop => $val) { if(!isset($styleA[$prop]) || substr(str_replace(" ", "", strtolower($styleA[$prop])), -10) !== "!important") { $styleA[$prop] = $val; } } return $styleA; } protected function _stripStylesheet($s) { $s = preg_replace('!/\*[^*]*\*+([^/][^*]*\*+)*/!','', $s); $s = str_replace(array("\r\n","\r","\n","\t",' ',' ',' '),'',$s); $s = str_replace('{ ', '{', $s); $s = str_replace(' }', '}', $s); $s = str_replace('; ', ';', $s); return $s; } } /** * This file has had some love from Christiaan Baartse <christiaan@baartse.nl> * * This package contains one class for using Cascading Style Sheet * selectors to retrieve elements from a DOMDocument object similarly * to DOMXPath does with XPath selectors * * PHP version 5 * * @category HTML * @package CSSQuery * @author Sam Shull <sam.shull@jhspecialty.com> * @copyright Copyright (c) 2009 Sam Shull <sam.shull@jhspeicalty.com> * @license <http://www.opensource.org/licenses/mit-license.html> * @version 1.4 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * * CHANGES: * 06-08-2009 - added normalize-space function to CSSQuery::className * and removed unecessary sprintf(s) in favor of " strings * and fixed runtime pass-by-reference errors * 07-14-2009 - added references and type hinting to many of the functions * in order to improve performance a little * 07-25-2009 - added support for class (.) and id (#) as filters (div#id.class) * 08-05-2009 - corrected my horrible typing errors * changed the attribute filter handling to match the entire operator */ /** * Perform a CSS query on a DOMDocument using DOMXPath * * <code> * $doc = new DOMDocument(); * $doc->loadHTML('<html><body><p>hello world</p></body></html>'); * $css = new CSSQuery($doc); * print count( $css->query("p:contains('hello world')") ); * </code> * * * @category HTML * @package CSSQuery * @author Sam Shull <sam.shull@jhspecialty.com> * @copyright Copyright (c) 2009 Sam Shull <sam.shull@jhspeicalty.com> * @license <http://www.opensource.org/licenses/mit-license.html> * @version Release: @package_version@ * @link * @since Class available since Release 1.0 */ class CSSQuery { /** * This PCRE string matches one valid css selector at a time * * @const string */ const CHUNKER = '/^\s*([#\.>~\+:\[,]?)\s*(\*|[^\*#\.>~\+:\[\]\)\(\s,]*)/'; /** * This PCRE string matches one psuedo css selector at a time * * @const string */ const PSUEDO = '/^\s*:([\w\-]+)\s*(\(\s*([^\(\)]*(\([^\(\)]*\))?)?\s*\))?\s*/'; /** * This PCRE string matches one css attribute selector at a time * * @const string */ const ATTRIBUTES = '/\[@?([\w\-]+(\|[\w\-]+)?)\s*((\S*=)\s*([\'"]?)(?(5)([^\\5]*)\\5|([^\]]+)))?\s*\]/i'; /** * An array of functions representing psuedo selectors * * @access public * * @staticvar array */ public static $filters; /** * An array of functions representing attribute selectors * * @access public * * @staticvar array */ public static $attributeFilters; /** * An instance of DOMXPath for finding the information on the document * * @access public * * @var DOMXPath */ public $xpath; /** * The document that the queries will originate from * * @access public * * @var DOMDocument */ public $document; /** * Initialize the object - opens a new DOMXPath * * @access public * * @param DOMDocument $document */ public function __construct (DOMDocument &$document) { $this->xpath = new DOMXPath($document); $this->document =& $document; } /** * register a namespace * * @access public * * @param string $prefix * @param string $URI * * @returns boolean */ public function registerNamespace ($prefix, $URI) { return $this->xpath->registerNamespace($prefix, $URI); } /** * Get an array of DOMNodes that match a CSS query expression * * @access public * * @param string $expression * @param mixed $context - a DOMNode or an array of DOMNodes * * @returns array */ public function query ($expression, $context=null) { $original_context = func_num_args() < 3 ? $context : func_get_arg(2); $current = $context instanceof DOMNode ? array($context) : self::makeArray($context); $new = array(); $m = array(''); if ($expression && preg_match(self::CHUNKER, $expression, $m)) { //replace a pipe with a semi-colon in a selector //for namespace uses $m[2] = $m[2] ? str_replace('|', ':', $m[2]) : '*'; switch ($m[1]) { case ',': { $new = $this->query(ltrim(substr($expression, strpos($expression, $m[1]) + 1)), array(), $original_context); $new = array_merge($current, $new); return self::unique($new); } //#id case '#': { $new = $this->id($m[2], $current); break; } //.class case '.': { $new = $this->className($m[2], $current); break; } // > child case '>': { $new = $this->children($m[2], $current); break; } // + adjacent sibling case '+': { $new = $this->adjacentSibling($m[2],$current); break; } // ~ general sibling case '~': { $new = $this->generalSibling($m[2], $current); break; } //:psuedo-filter case ':': { if ($m[2] == 'root') { $new = array($this->document->documentElement); } //a psuedo selector is a filter elseif (preg_match(self::PSUEDO, $expression, $n)) { if ($n[1] && isset(self::$filters[$n[1]]) && is_callable(self::$filters[$n[1]])) { if (!$current) { $current = $this->xpath->query('//*'); $current = self::makeArray($current); } $i = 0; foreach ($current as $elem) { if ($item = call_user_func(self::$filters[$n[1]], $elem, $i++, $n, $current, $this)) { if ($item instanceof DOMNode) { if (self::inArray($item, $new) < 0) { $new[] = $item; } } //usually boolean elseif (is_scalar($item)) { if ($item) { $new[] = $elem; } } else { $new = array_merge($new, self::makeArray($item)); $new = self::unique($new); } } } } else { throw new Exception("Unknown psuedo-filter: {$m[2]}, in {$expression}"); } //set this for the substr $m[0] = $n[0]; } else { throw new Exception("Unknown use of semi-colon: {$m[2]}, in {$expression}"); } break; } //[attribute="value"] filter case '[': { if (preg_match(self::ATTRIBUTES, $expression, $n)) { //change a pipe to a semi-colon for namespace purposes $n[1] = str_replace('|', ':', $n[1]); if (!isset($n[4]) || !$n[4]) { $n[4] = ''; $n[6] = null; } if (!isset(self::$attributeFilters[$n[4]]) || !is_callable(self::$attributeFilters[$n[4]])) { //print_r($n); //thrown if there is no viable attributeFilter function for the given operator throw new Exception("Unknown attribute filter: {$n[4]}"); } if (!$current) { $current = $this->xpath->query('//*'); $current = self::makeArray($current); } foreach ($current as $elem) { if (true === call_user_func(self::$attributeFilters[$n[4]], $elem, $n[1], $n[6], $n, $current)) { $new[] = $elem; } } //set this for the substr $m[0] = $n[0]; } else { //only thrown if query is malformed throw new Exception("Unidentified use of '[' in {$m[0]}"); } break; } //just a tag - i.e. any descendant of the current context default: { $new = $this->tag($m[2], $current); break; } } //check for # or . as filter $exp = substr($expression, strlen($m[0])); while ($exp && ($exp[0] == "." || $exp[0] == "#")) { if (preg_match(self::CHUNKER, $exp, $m)) { $expression = $exp; $new = $m[1] == "." ? $this->className($m[2], $new, true) : $this->id($m[2], $new, true); $exp = substr($expression, strlen($m[0])); } } } return (strlen($m[0]) < strlen($expression) && !empty($new)) //return strlen($m[0]) < strlen($expression) ? $this->query(substr($expression, strlen($m[0])), $new, $original_context) : self::unique($new); } /** * get an element by its id attribute * * @access public * * @param string $id * @param array $context * * @returns array */ public function id (&$id, array &$context=array(), $filter=false) { $new = array(); //if a context is present - div#id should act like a filter if ($filter || $context) { foreach ($context as $elem) { if ($elem instanceof DOMElement && $elem->hasAttribute('id') && $elem->getAttribute('id') == $id) { $new[] = $elem; } } } elseif (($items = $this->xpath->query("//*[@id='{$id}']")) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } return $new; } /** * get an element by its class attribute * * @access public * * @param string $id * @param array $context * * @returns array */ public function className (&$className, array &$context=array(), $filter=false) { $new = array(); if ($filter && $context) { $regex = '/\s+' . preg_quote($className, '/') . '\s+/'; foreach ($context as $elem) { if ($elem->hasAttribute('class') && preg_match($regex, " {$elem->getAttribute('class')} ")) { $new[] = $elem; } } } //if there is a context for the query elseif ($context) { //06-08-2009 - added normalize-space function, http://westhoffswelt.de/blog/0036_xpath_to_select_html_by_class.html $query = "./descendant::*[ @class and contains( concat(' ', normalize-space(@class), ' '), ' {$className} ') ]"; foreach ($context as $elem) { if ( ($items = $this->xpath->query($query, $elem)) && $items->length > 0 ) { foreach ($items as $item) { $new[] = $item; } } } } //otherwise select any element in the document that matches the selector elseif (($items = $this->xpath->query("//*[ @class and contains( concat(' ', normalize-space(@class), ' '), ' {$className} ') ]")) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } return $new; } /** * get the children elements * * @access public * * @param string $tag * @param array $context * * @returns array */ public function children (&$tag='*', array &$context=array()) { $new = array(); $query = "./{$tag}"; //if there is a context for the query if ($context) { foreach ($context as $elem) { if (($items = $this->xpath->query($query, $elem)) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } } } //otherwise select any element in the document that matches the selector elseif (($items = $this->xpath->query($query, $this->document->documentElement)) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } return $new; } /** * get the adjacent sibling elements * * @access public * * @param string $tag * @param array $context * * @returns array */ public function adjacentSibling (&$tag='*', array &$context=array()) { $new = array(); $tag = strtolower($tag); //if there is a context for the query if ($context) { foreach ($context as $elem) { if ($tag == '*' || strtolower($elem->nextSibling->nodeName) == $tag) { $new[] = $elem->nextSibling; } } } return $new; } /** * get the all sibling elements * * @access public * * @param string $tag * @param array $context * * @returns array */ public function generalSibling (&$tag='*', array &$context=array()) { $new = array(); //if there is a context for the query if ($context) { $query = "./following-sibling::{$tag} | ./preceding-sibling::{$tag}"; foreach ($context as $elem) { if (($items = $this->xpath->query($query, $elem)) && $items->length > 0) { foreach ($items as $item) { $new[] = $item; } } } } return $new; } /** * get the all descendant elements * * @access public * * @param string $tag * @param array $context * * @returns array */ public function tag (&$tag='*', array &$context=array()) { $new = array(); //get all the descendants with the given tagName if ($context) { $query = "./descendant::{$tag}"; foreach ($context as $elem) { if ($items = $this->xpath->query($query, $elem)) { foreach ($items as $item) { $new[] = $item; } } } } //get all elements with the given tagName else { if ($items = $this->xpath->query("//{$tag}")) { foreach ($items as $item) { $new[] = $item; } } } return $new; } /** * A utility function for calculating nth-* style psuedo selectors * * @static * @access public * * @param DOMNode $context - the element whose position is being calculated * @param string $func - the name of the psuedo function that is being calculated for * @param string $expr - the string argument for the selector * @param DOMXPath $xpath - an existing xpath instance for the document that the context belong to * * @returns boolean */ public static function nthChild (DOMNode &$context, $func, $expr, DOMXPath &$xpath) { //remove all the whitespace $expr = preg_replace('/\s+/', '', trim(strtolower($expr))); //all if ($expr == 'n' || $expr == 'n+0' || $expr == '1n+0' || $expr == '1n') { return true; } //the direction we will look for siblings $DIR = (stristr($func, 'last') ? 'following' : 'preceding'); //do a tagName check? $type = stristr($func, 'type') ? '[local-name()=name(.)]' : ''; //the position of this node $count = $xpath->evaluate("count( {$DIR}-sibling::*{$type} ) + 1", $context); //odd if($expr == 'odd' || $expr == '2n+1') { return $count % 2 != 0; } //even elseif($expr == 'even' || $expr == '2n' || $expr == '2n+0') { return $count > 0 && $count % 2 == 0; } //a particular position elseif(preg_match('/^([\+\-]?\d+)$/i', $expr, $mat)) { $d = (stristr($func, 'last') ? -1 : 1) * intval($mat[1]); $r = $xpath->query(sprintf('../%s', $type ? $context->tagName : '*'), $context); return $r && $r->length >= abs($d) && ($d > 0 ? $r->item($d - 1)->isSameNode($context) : $r->item($r->length + $d)->isSameNode($context)); } //grouped after a particular position elseif(preg_match('/^([\+\-]?\d*)?n([\+\-]\d+)?/i', $expr, $mat)) { $a = (isset($mat[2]) && $mat[2] ? intval($mat[2]) : 0); $b = (isset($mat[2]) && $mat[2] ? intval($mat[2]) : 1); return ($a == 0 && $count == $b) || ($a > 0 && $count >= $b && ($count - $b) % $a == 0) || ($a < 0 && $count <= $b && (($b - $count) % ($a * -1)) == 0); } return false; } /** * A utility function for filtering inputs of a specific type * * @static * @access public * * @param mixed $elem * @param string $type * * @returns boolean */ public static function inputFilter (&$elem, $type) { $t = trim(strtolower($type)); //gotta be a -DOMNode- DOMElement return $elem instanceof DOMElement && //with the tagName input strtolower($elem->tagName) == 'input' && ( ($t == 'text' && !$elem->hasAttribute('type')) || ($t == 'button' && strtolower($e->tagName) == "button") || ( //and the attribute type $elem->hasAttribute('type') && //the attribute type should match the given variable type case insensitive trim(strtolower($elem->getAttribute('type'))) == $t ) ); } /** * A utility function for making an iterable object into an array * * @static * @access public * * @param array|Traversable $arr * * @return array */ public static function makeArray (&$arr) { if (is_array($arr)) { return array_values($arr); } $ret = array(); if ($arr) { foreach ($arr as $elem) { $ret[count($ret)] = $elem; } } return $ret; } /** * A utility function for stripping duplicate elements from an array * works on DOMNodes * * @static * @access public * * @param array|Traversable $arr * * @returns array */ public static function unique (&$arr) { //first step make sure all the elements are unique $new = array(); foreach ($arr as $current) { if ( //if the new array is empty //just put the element in the array empty($new) || ( //if it is not an instance of a DOMNode //no need to check for isSameNode !($current instanceof DOMNode) && !in_array($current, $new) ) || //do DOMNode test on array self::inArray($current, $new) < 0 ) { $new[] = $current; } } return $new; } /** * A utility function for determining the position of an element in an array * works on DOMNodes, returns -1 on failure * * @static * @access public * * @param mixed $elem * @param array|Traversable $arr * * @returns integer */ public static function inArray (DOMNode $elem, $arr) { $i = 0; foreach ($arr as $current) { //if it is an identical object or a DOMElement that represents the same node if ($current === $elem || ($current instanceof DOMNode && $current->isSameNode($elem))) { return $i; } $i += 1; } return -1; } /** * A utility function for filtering elements from an array or array-like object * * @static * @access public * * @param mixed $elem * @param array|Traversable $arr * * @returns array */ public static function filter ($array, $func) { $ret = array(); if (!is_callable($func)) { return $array; } foreach ($array as $n => $v) { if (false !== call_user_func($func, $v, $n, $array, $this)) { $ret[] = $v; } } return $ret; } /** * A static function designed to make it easier to get the info * * @static * @access public * * @param string $query * @param mixed $context * @param array|Traversable $ret - passed by reference * * @return array */ public static function find ($query, $context, $ret=null) { $new = array(); //query using DOMDocument if ($context instanceof DOMDocument) { $css = new self($context); $new = $css->query($query); } elseif ($context instanceof DOMNodeList) { if ($context->length) { $css = new self($context->item(0)->ownerDocument); $new = $css->query($query, $context); } } //should be an array if it isn't a DOMNode //in which case the first element should be a DOMNode //representing the desired context elseif (!($context instanceof DOMNode) && count($context)) { $css = new self($context[0]->ownerDocument); $new = $css->query($query, $context); } //otherwise use the ownerDocument and the context as the context of the query else { $css = new self($context->ownerDocument); $new = $css->query($query, $context); } //if there is a place to store the newly selected elements if ($ret) { //append the newly selected elements to the given array|object //or if it is an instance of ArrayAccess just push it on to the object if (is_array($ret)) { $new = array_merge($ret, $new); $new = self::unique($new); $ret = $new; } elseif (is_object($ret)) { if ($ret instanceof ArrayAccess) { foreach ($new as $elem) { $ret[count($ret)] = $elem; } } //appending elements to a DOMDocumentFragment is a fast way to move them around elseif ($ret instanceof DOMDocumentFragment) { foreach ($new as $elem) { //appendChild, but don't forget to verify same document $ret->appendChild( !$ret->ownerDocument->isSameNode($elem->ownerDocument) ? $ret->ownerDocument->importNode($elem, true) : $elem); } } //otherwise we need to find a method to use to attach the elements elseif (($m = method_exists($ret, 'push')) || method_exists($ret, 'add')) { $method = $m ? 'push' : 'add'; foreach ($new as $elem) { $ret->$method($elem); } } elseif (($m = method_exists($ret, 'concat')) || method_exists($ret, 'concatenate')) { $method = $m ? 'concat' : 'concatenate'; $ret->$method($new); } } //this will save the selected elements into a string elseif (is_string($ret)) { foreach ($new as $elem) { $ret .= $elem->ownerDocument->saveXML($elem); } } } return $new; } } /** * this creates the default filters array on the CSSQuery object * * <code> * //prototype function (DOMNode $element, integer $i, array $matches, array $context, CSSQuery $cssQuery); * CSSQuery::$filters['myfilter'] = create_function('', ''); * * </code> */ CSSQuery::$filters = new RecursiveArrayIterator(array( //CSS3 selectors 'first-child' => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return !$e->isSameNode($e->ownerDocument->documentElement) && $c->xpath->query("../*[position()=1]", $e)->item(0)->isSameNode($e);'), 'last-child' => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return !$e->isSameNode($e->ownerDocument->documentElement) && $c->xpath->query("../*[last()]", $e)->item(0)->isSameNode($e);'), 'only-child' => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return !$e->isSameNode($e->ownerDocument->documentElement) && $e->parentNode->getElementsByTagName("*")->length == 1;'), 'checked' => create_function('DOMNode $e', 'return strtolower($e->tagName) == "input" && $e->hasAttribute("checked");'), 'disabled' => create_function('DOMNode $e', 'return $e->hasAttribute("disabled") && stristr("|input|textarea|select|button|", "|".$e->tagName."|") !== false;'), 'enabled' => create_function('DOMNode $e', 'return !$e->hasAttribute("disabled") && stristr("|input|textarea|select|button|", "|".$e->tagName . "|") !== false && (!$e->hasAttribute("type") || strtolower($e->getAttribute("type")) != "hidden");'), //nth child selectors "nth-child" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::nthChild($e, "nth-child", $m[3], $c->xpath);'), "nth-last-child" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::nthChild($e, "nth-last-child", $m[3], $c->xpath);'), "nth-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::nthChild($e, "nth-of-type", $m[3], $c->xpath);'), "nth-last-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::nthChild($e, "nth-last-of-type", $m[3], $c->xpath);'), "first-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return call_user_func(CSSQuery::$filters["nth-of-type"], $e, $i, array(0,1,1,1), $a, $c);'), "last-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return call_user_func(CSSQuery::$filters["nth-last-of-type"],$e, $i, array(0,1,1,1), $a, $c);'), "only-of-type" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return call_user_func(CSSQuery::$filters["first-of-type"], $e, $i, $m, $a, $c) && call_user_func(CSSQuery::$filters["last-of-type"], $e, $i, $m, $a, $c);'), //closest thing to the lang filter "lang" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return $c->xpath->evaluate( sprintf( "count(./ancestor-or-self::*[@lang and (@lang =". " \"%s\" or substring(@lang, 1, %u)=\"%s-\")])", $m[3], strlen($m[3]) + 1, $m[3] ), $e ) > 0;'), //negation filter "not" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return CSSQuery::inArray($e, $c->query(trim($m[3]))) == -1;'), //element has no child nodes "empty" => create_function('DOMNode $e', 'return !$e->hasChildNodes();'), //element has child nodes that are elements "parent" => create_function('DOMNode $e', 'return ($n = $e->getElementsByTagName("*")) && $n->length > 0;'), //get the parent node of the current element "parent-node" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '//if there is no filter just return the first parentNode if (!$m || !isset($m[3]) || !trim($m[3])) return $e->parentNode; //otherwise if the filter is more than a tagName return preg_match("/^(\*|\w+)([^\w]+.+)/", trim($m[3]), $n) ? CSSQuery::find(trim($n[2]), $c->xpath->query("./ancestor::{$n[1]}", $e)) //if the filter is only a tagName save the trouble : $c->xpath->query(sprintf("./ancestor::%s", trim($m[3])), $e);'), //get the ancestors of the current element "parents" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '$r = $c->xpath->query("./ancestor::*", $e); return $m && isset($m[3]) && trim($m[3]) ? CSSQuery::find(trim($m[3]), $r) : $r;'), //the element has nextSiblings "next-sibling" => create_function('DOMNode $e', 'return ($n = $e->parentNode->getElementsByTagName("*")) && !$n->item($n->length-1)->isSameNode($e);'), //the element has previousSiblings "previous-sibling" => create_function('DOMNode $e', 'return !$e->parentNode->getElementsByTagName("*")->item(0)->isSameNode($e);'), //get the previousSiblings of the current element "previous-siblings" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '$r = $c->xpath->query("./preceding-sibling::*", $e); return $m && isset($m[3]) && trim($m[3]) ? CSSQuery::find(trim($m[3]), $r) : $r;'), //get the nextSiblings of the current element "next-siblings" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '$r = $c->xpath->query("./following-sibling::*", $e); return $m && isset($m[3]) && trim($m[3]) ? CSSQuery::find(trim($m[3]), $r) : $r;'), //get all the siblings of the current element "siblings" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', '$r = $c->xpath->query("./preceding-sibling::* | ./following-sibling::*", $e); return $m && isset($m[3]) && trim($m[3]) ? CSSQuery::find(trim($m[3]), $r) : $r;'), //select the header elements "header" => create_function('DOMNode $e', 'return (bool)preg_match("/^h[1-6]$/i", $e->tagName);'), //form element selectors "selected" => create_function('DOMNode $e', 'return $e->hasAttribute("selected");'), //any element that would be considered input based on tagName "input" => create_function('DOMNode $e', 'return stristr("|input|textarea|select|button|", "|" . $e->tagName . "|") !== false;'), //any input element and type "radio" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "radio");'), "checkbox" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "checkbox");'), "file" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "file");'), "password" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "password");'), "submit" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "submit");'), "image" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "image");'), "reset" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "reset");'), "button" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "button");'), "text" => create_function('DOMNode $e', 'return CSSQuery::inputFilter($e, "text");'), //limiting filter "has" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return count($c->query($m[3], $e)) > 0;'), //text limiting filter "contains" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return strstr($e->textContent, preg_replace("/^\s*([\'\"])(.*)\\\\1\s*$/", "\\\\2", $m[3]));'), "Contains" => create_function('DOMNode $e,$i,$m,$a,CSSQuery $c', 'return stristr($e->textContent, preg_replace("/^\s*([\'\"])(.*)\\\\1\s*$/", "\\\\2", $m[3]));'), //positional selectors for the current node-set "first" => create_function('DOMNode $e,$i', 'return $i === 0;'), "last" => create_function('DOMNode $e,$i,$m,$a', 'return $i === (count($a) - 1);'), "lt" => create_function('DOMNode $e,$i,$m', 'return $i < $m[3];'), "gt" => create_function('DOMNode $e,$i,$m', 'return $i > $m[3];'), "eq" => create_function('DOMNode $e,$i,$m', 'return $i === intval($m[3]);'), //works like nth-child on the currently selected node-set "nth" => create_function('DOMNode $e,$i,$m', '$expr = preg_replace("/\s+/", "", strtolower(trim($m[3]))); //these selectors select all so dont waste time figuring them out if ($expr == "n" || $expr == "n+0" || $expr == "1n+0" || $expr == "1n") { return true; } //even numbered elements elseif ($expr == "even" || $expr == "2n" || $expr == "2n+0") { return $i % 2 == 0; } //odd numbered elements elseif ($expr == "odd" || $expr == "2n+1") { return $i % 2 != 0; } //positional - a negative position is not supported elseif (preg_match("/^([\+\-]?\d+)$/i", $expr, $mat)) { return $i == intval($mat[1]); } //grouped according to a position elseif (preg_match("/^([\+\-]?\d*)?n([\+\-]\d+)?/i", $expr, $mat)) { $a = (isset($mat[2]) && $mat[2] ? intval($mat[2]) : 0); $b = (isset($mat[2]) && $mat[2] ? intval($mat[2]) : 1); return ($a == 0 && $i == $b) || ($a > 0 && $i >= $b && ($i - $b) % $a == 0) || ($a < 0 && $i <= $b && (($b - $i) % ($a * -1)) == 0); } return false; '), ), 2); /** * create a default array of attribute filters * * <code> * //prototype function (DOMNode $element, string $attributeName, string $value = '', array $matches, array $context=array()); * CSSQuery::$attributeFilters['>'] = create_function('', ''); * * </code> */ CSSQuery::$attributeFilters = new RecursiveArrayIterator(array( //hasAttribute and/or attribute == value "" => create_function('$e,$a,$v=null', 'return $e->hasAttribute($a);'), //hasAttribute and/or attribute == value "=" => create_function('$e,$a,$v=null', 'return $e->hasAttribute($a) && $e->getAttribute($a) == $v;'), //!hasAttribute or attribute != value "!=" => create_function('$e,$a,$v', 'return !$e->hasAttribute($a) || $e->getAttribute($a) != $v;'), //hasAttribute and the attribute begins with value "^=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && substr($e->getAttribute($a), 0, strlen($v)) == $v;'), //hasAttribute and the attribute ends with value '$=' => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && substr($e->getAttribute($a), -strlen($v)) == $v;'), //hasAttribute and the attribute begins with value . - "|=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && substr($e->getAttribute($a), 0, strlen($v) + 1) == $v."-";'), //hasAttribute and attribute contains value "*=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && strstr($e->getAttribute($a), $v);'), //special //hasAttribute and attribute contains value - case insensitive "%=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && stristr($e->getAttribute($a), $v);'), //hasAttribute and the attrributes value matches the given PCRE pattern "@=" => create_function('$e,$a,$v', 'return $e->hasAttribute($a) && preg_match($v, $e->getAttribute($a));'), ), 2); ?>
juniorug/vidacon
js/myMail/classes/libs/InlineStyle.php
PHP
apache-2.0
53,238
/// Copyright (c) 2009 Microsoft Corporation /// /// Redistribution and use in source and binary forms, with or without modification, are permitted provided /// that the following conditions are met: /// * Redistributions of source code must retain the above copyright notice, this list of conditions and /// the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and /// the following disclaimer in the documentation and/or other materials provided with the distribution. /// * Neither the name of Microsoft nor the names of its contributors may be used to /// endorse or promote products derived from this software without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR /// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE /// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT /// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, /// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF /// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ES5Harness.registerTest({ id: "15.2.3.6-4-45", path: "TestCases/chapter15/15.2/15.2.3/15.2.3.6/15.2.3.6-4-45.js", description: "Object.defineProperty - 'O' is the global object that uses Object's [[GetOwnProperty]] method to access the 'name' property (8.12.9 step 1)", test: function testcase() { try { Object.defineProperty(fnGlobalObject(), "foo", { value: 12, configurable: true }); return dataPropertyAttributesAreCorrect(fnGlobalObject(), "foo", 12, false, false, true); } finally { delete fnGlobalObject().foo; } }, precondition: function prereq() { return fnExists(Object.defineProperty); } });
hnafar/IronJS
Src/Tests/ietestcenter/chapter15/15.2/15.2.3/15.2.3.6/15.2.3.6-4-45.js
JavaScript
apache-2.0
2,309
'use strict'; /** * @license * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ goog.provide('tachyfont.WorkQueue'); goog.require('goog.Promise'); goog.require('tachyfont.Reporter'); /** * This class manages work queues. * @param {string} name An identifier useful for error reports. * @constructor @struct @final */ tachyfont.WorkQueue = function(name) { /** @private @const {string} */ this.name_ = name; /** @private {!Array<!tachyfont.WorkQueue.Task>} */ this.queue_ = []; }; /** * Adds a task. * @param {function(?)} taskFunction The function to call. * @param {*} data The data to pass to the function. * @param {string} fontId An identifier useful for error messages. * @param {number=} opt_watchDogTime Option watch dog time. * @return {!tachyfont.WorkQueue.Task} A task object. */ tachyfont.WorkQueue.prototype.addTask = function( taskFunction, data, fontId, opt_watchDogTime) { var task = new tachyfont.WorkQueue.Task( taskFunction, data, fontId, opt_watchDogTime); this.queue_.push(task); if (this.queue_.length == 1) { this.runNextTask(); } return task; }; /** * Runs a task. */ tachyfont.WorkQueue.prototype.runNextTask = function() { if (this.queue_.length < 1) { return; } var task = this.queue_[0]; task.run().thenAlways( /** @this {tachyfont.WorkQueue} */ function() { this.queue_.shift(); this.runNextTask(); }, this); }; /** * Gets the queue length. * @return {number} */ tachyfont.WorkQueue.prototype.getLength = function() { return this.queue_.length; }; /** * Enum for error values. * @enum {string} */ // LINT.IfChange tachyfont.WorkQueue.Error = { FILE_ID: 'EWQ', WATCH_DOG_TIMER: '01', END: '00' }; // LINT.ThenChange(//depot/google3/\ // java/com/google/i18n/tachyfont/boq/gen204/error-reports.properties) /** * The error reporter for this file. * @param {string} errNum The error number; * @param {string} errId Identifies the error. * @param {*} errInfo The error object; */ tachyfont.WorkQueue.reportError = function(errNum, errId, errInfo) { tachyfont.Reporter.reportError( tachyfont.WorkQueue.Error.FILE_ID + errNum, errId, errInfo); }; /** * Gets the work queue name * @return {string} */ tachyfont.WorkQueue.prototype.getName = function() { return this.name_; }; /** * A class that holds a task. * @param {function(?)} taskFunction The function to call. * @param {*} data The data to pass to the function. * @param {string} fontId An indentifer for error reporting. * @param {number=} opt_watchDogTime Option watch dog time. * @constructor @struct @final */ tachyfont.WorkQueue.Task = function( taskFunction, data, fontId, opt_watchDogTime) { var resolver; /** @private {function(?)} */ this.function_ = taskFunction; /** @private {*} */ this.data_ = data; /** @private {string} */ this.fontId_ = fontId; /** @private {!goog.Promise<?,?>} */ this.result_ = new goog.Promise(function(resolve, reject) { // resolver = resolve; }); /** @private {function(*=)} */ this.resolver_ = resolver; /** @private {?number} */ this.timeoutId_ = null; /** @private {number} */ this.watchDogTime_ = opt_watchDogTime || tachyfont.WorkQueue.Task.WATCH_DOG_TIME; }; /** * The time in milliseconds to wait before reporting that a running task did not * complete. * @type {number} */ tachyfont.WorkQueue.Task.WATCH_DOG_TIME = 10 * 60 * 1000; /** * Gets the task result promise (may be unresolved). * @return {!goog.Promise<?,?>} */ tachyfont.WorkQueue.Task.prototype.getResult = function() { return this.result_; }; /** * Resolves the task result promise. * @param {*} result The result of the function. May be any value including a * resolved/rejected promise. * @return {!goog.Promise<?,?>} * @private */ tachyfont.WorkQueue.Task.prototype.resolve_ = function(result) { this.resolver_(result); return this.result_; }; /** * Runs the task. * @return {*} */ tachyfont.WorkQueue.Task.prototype.run = function() { this.startWatchDogTimer_(); var result; try { result = this.function_(this.data_); } catch (e) { result = goog.Promise.reject(e); } this.result_.thenAlways(function() { // this.stopWatchDogTimer_(); }.bind(this)); return this.resolve_(result); }; /** * Starts the watch dog timer. * @return {*} * @private */ tachyfont.WorkQueue.Task.prototype.startWatchDogTimer_ = function() { this.timeoutId_ = setTimeout(function() { this.timeoutId_ = null; tachyfont.WorkQueue.reportError( tachyfont.WorkQueue.Error.WATCH_DOG_TIMER, this.fontId_, ''); }.bind(this), this.watchDogTime_); }; /** * Stops the watch dog timer. * @private */ tachyfont.WorkQueue.Task.prototype.stopWatchDogTimer_ = function() { if (this.timeoutId_) { clearTimeout(this.timeoutId_); } this.timeoutId_ = null; };
googlefonts/TachyFont
run_time/src/gae_server/www/js/tachyfont/work_queue.js
JavaScript
apache-2.0
5,429
package com.thinkbiganalytics.metadata.jpa.app; /*- * #%L * thinkbig-operational-metadata-jpa * %% * Copyright (C) 2017 ThinkBig Analytics * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import com.thinkbiganalytics.KyloVersionUtil; import com.thinkbiganalytics.KyloVersion; import com.thinkbiganalytics.metadata.api.app.KyloVersionProvider; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.domain.Sort; import org.springframework.data.domain.Sort.Direction; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.util.List; import java.util.Properties; import javax.annotation.PostConstruct; /** * Provider for accessing and updating the Kylo version */ public class JpaKyloVersionProvider implements KyloVersionProvider { private static final Logger log = LoggerFactory.getLogger(JpaKyloVersionProvider.class); private static final Sort SORT_ORDER = new Sort(new Sort.Order(Direction.DESC, "majorVersion"), new Sort.Order(Direction.DESC, "minorVersion"), new Sort.Order(Direction.DESC, "pointVersion"), new Sort.Order(Direction.DESC, "tag") ); private KyloVersionRepository kyloVersionRepository; private String currentVersion; private String buildTimestamp; @Autowired public JpaKyloVersionProvider(KyloVersionRepository kyloVersionRepository) { this.kyloVersionRepository = kyloVersionRepository; } @Override public boolean isUpToDate() { KyloVersion buildVer = KyloVersionUtil.getBuildVersion(); KyloVersion currentVer = getCurrentVersion(); return currentVer != null && buildVer.matches(currentVer.getMajorVersion(), currentVer.getMinorVersion(), currentVer.getPointVersion()); } @Override public KyloVersion getCurrentVersion() { List<JpaKyloVersion> versions = kyloVersionRepository.findAll(SORT_ORDER); if (versions != null && !versions.isEmpty()) { return versions.get(0); } return null; } /* (non-Javadoc) * @see com.thinkbiganalytics.metadata.api.app.KyloVersionProvider#setCurrentVersion(com.thinkbiganalytics.KyloVersion) */ @Override public void setCurrentVersion(KyloVersion version) { JpaKyloVersion update = new JpaKyloVersion(version.getMajorVersion(), version.getMinorVersion(), version.getPointVersion(), version.getTag()); kyloVersionRepository.save(update); } @Override public KyloVersion getBuildVersion() { return KyloVersionUtil.getBuildVersion(); } @PostConstruct private void init() { getBuildVersion(); } }
peter-gergely-horvath/kylo
core/operational-metadata/operational-metadata-jpa/src/main/java/com/thinkbiganalytics/metadata/jpa/app/JpaKyloVersionProvider.java
Java
apache-2.0
3,679
package com.planet_ink.coffee_mud.Abilities.Spells; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Libraries.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; import java.util.*; /* Copyright 2003-2015 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ @SuppressWarnings("rawtypes") public class Spell_ArcaneMark extends Spell { @Override public String ID() { return "Spell_ArcaneMark"; } private final static String localizedName = CMLib.lang().L("Arcane Mark"); @Override public String name() { return localizedName; } @Override public int abstractQuality(){ return Ability.QUALITY_INDIFFERENT;} @Override protected int canAffectCode(){return 0;} @Override protected int canTargetCode(){return CAN_ITEMS;} @Override public int classificationCode(){ return Ability.ACODE_SPELL|Ability.DOMAIN_ALTERATION;} @Override public boolean invoke(MOB mob, Vector commands, Physical givenTarget, boolean auto, int asLevel) { if(commands.size()<2) { mob.tell(L("You must specify what object you want the spell cast on, and the message you wish the object have marked upon it. ")); return false; } final Physical target=mob.location().fetchFromMOBRoomFavorsItems(mob,null,((String)commands.elementAt(0)),Wearable.FILTER_UNWORNONLY); if((target==null)||(!CMLib.flags().canBeSeenBy(target,mob))) { mob.tell(L("You don't see '@x1' here.",((String)commands.elementAt(0)))); return false; } if((!(target instanceof Item))||(!target.isGeneric())) { mob.tell(L("You can't can't cast this on @x1.",target.name(mob))); return false; } final String message=CMParms.combine(commands,1); if(!super.invoke(mob,commands,givenTarget,auto,asLevel)) return false; final boolean success=proficiencyCheck(mob,0,auto); if(success) { final CMMsg msg=CMClass.getMsg(mob,target,this,verbalCastCode(mob,target,auto),L("^S<S-NAME> invoke(s) a spell upon <T-NAMESELF>.^?")); if(mob.location().okMessage(mob,msg)) { mob.location().send(mob,msg); if(target.description().indexOf("Some markings on it say")>=0) target.setDescription(L("@x1 Some other markings say `@x2`.",target.description(),message)); else target.setDescription(L("@x1 Some markings on it say `@x2`.",target.description(),message)); } } else beneficialWordsFizzle(mob,target,L("<S-NAME> attempt(s) to invoke a spell upon <T-NAMESELF>, but the spell fizzles.")); // return whether it worked return success; } }
Tycheo/coffeemud
com/planet_ink/coffee_mud/Abilities/Spells/Spell_ArcaneMark.java
Java
apache-2.0
3,763
package oidc import ( "crypto/rand" "encoding/base64" "errors" "fmt" "net" "net/http" "net/url" "strings" "time" "github.com/coreos/go-oidc/jose" ) func ParseTokenFromRequest(r *http.Request) (token jose.JWT, err error) { ah := r.Header.Get("Authorization") if ah == "" { err = errors.New("missing Authorization header") return } if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" { err = errors.New("should be a bearer token") return } return jose.ParseJWT(ah[7:]) } func NewClaims(iss, sub, aud string, iat, exp time.Time) jose.Claims { return jose.Claims{ // required "iss": iss, "sub": sub, "aud": aud, "iat": float64(iat.Unix()), "exp": float64(exp.Unix()), } } func GenClientID(hostport string) (string, error) { b, err := randBytes(32) if err != nil { return "", err } var host string if strings.Contains(hostport, ":") { host, _, err = net.SplitHostPort(hostport) if err != nil { return "", err } } else { host = hostport } return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil } func randBytes(n int) ([]byte, error) { b := make([]byte, n) got, err := rand.Read(b) if err != nil { return nil, err } else if n != got { return nil, errors.New("unable to generate enough random data") } return b, nil } // urlEqual checks two urls for equality using only the host and path portions. func urlEqual(url1, url2 string) bool { u1, err := url.Parse(url1) if err != nil { return false } u2, err := url.Parse(url2) if err != nil { return false } return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path) }
yifan-gu/go-oidc
oidc/util.go
GO
apache-2.0
1,643
'use strict'; var debug = require('debug')('ali-oss:sts'); var crypto = require('crypto'); var querystring = require('querystring'); var copy = require('copy-to'); var AgentKeepalive = require('agentkeepalive'); var is = require('is-type-of'); var ms = require('humanize-ms'); var urllib = require('urllib'); var globalHttpAgent = new AgentKeepalive(); module.exports = STS; function STS(options) { if (!(this instanceof STS)) { return new STS(options); } if (!options || !options.accessKeyId || !options.accessKeySecret) { throw new Error('require accessKeyId, accessKeySecret'); } this.options = { endpoint: options.endpoint || 'https://sts.aliyuncs.com', format: 'JSON', apiVersion: '2015-04-01', sigMethod: 'HMAC-SHA1', sigVersion: '1.0', timeout: '60s' }; copy(options).to(this.options); // support custom agent and urllib client if (this.options.urllib) { this.urllib = this.options.urllib; } else { this.urllib = urllib; this.agent = this.options.agent || globalHttpAgent; } }; var proto = STS.prototype; /** * STS opertaions */ proto.assumeRole = function* assumeRole(role, policy, expiration, session, options) { var opts = this.options; var params = { 'Action': 'AssumeRole', 'RoleArn': role, 'RoleSessionName': session || 'app', 'DurationSeconds': expiration || 3600, 'Format': opts.format, 'Version': opts.apiVersion, 'AccessKeyId': opts.accessKeyId, 'SignatureMethod': opts.sigMethod, 'SignatureVersion': opts.sigVersion, 'SignatureNonce': Math.random(), 'Timestamp': new Date().toISOString() }; if (policy) { var policyStr; if (is.string(policy)) { try { policyStr = JSON.stringify(JSON.parse(policy)); } catch (err) { throw new Error('Policy string is not a valid JSON: ' + err.message); } } else { policyStr = JSON.stringify(policy); } params.Policy = policyStr; } var signature = this._getSignature('POST', params, opts.accessKeySecret); params.Signature = signature; var reqUrl = opts.endpoint; var reqParams = { agent: this.agent, timeout: ms(options && options.timeout || opts.timeout), method: 'POST', content: querystring.stringify(params), headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, ctx: options && options.ctx, }; var result = yield this.urllib.request(reqUrl, reqParams); debug('response %s %s, got %s, headers: %j', reqParams.method, reqUrl, result.status, result.headers); if (Math.floor(result.status / 100) !== 2) { var err = yield this._requestError(result); err.params = reqParams; throw err; } result.data = JSON.parse(result.data); return { res: result.res, credentials: result.data.Credentials }; }; proto._requestError = function* _requestError(result) { var err = new Error(); err.status = result.status; try { var resp = yield JSON.parse(result.data) || {}; err.code = resp.Code; err.message = resp.Code + ': ' + resp.Message; err.requestId = resp.RequestId; } catch (e) { err.message = 'UnknownError: ' + String(result.data); } return err; }; proto._getSignature = function _getSignature(method, params, key) { var that = this; var canoQuery = Object.keys(params).sort().map(function (key) { return that._escape(key) + '=' + that._escape(params[key]) }).join('&'); var stringToSign = method.toUpperCase() + '&' + this._escape('/') + '&' + this._escape(canoQuery); debug('string to sign: %s', stringToSign); var signature = crypto.createHmac('sha1', key + '&'); signature = signature.update(stringToSign).digest('base64'); debug('signature: %s', signature); return signature; }; /** * Since `encodeURIComponent` doesn't encode '*', which causes * 'SignatureDoesNotMatch'. We need do it ourselves. */ proto._escape = function _escape(str) { return encodeURIComponent(str).replace(/\*/g, '%2A'); };
WebDeltaSync/WebR2sync_plus
node_modules/ali-oss/lib/sts.js
JavaScript
apache-2.0
4,037
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wicketTutorial; import org.apache.wicket.request.mapper.parameter.PageParameters; import org.apache.wicket.markup.html.basic.Label; import org.apache.wicket.markup.html.WebPage; public class HomePage extends WebPage { private static final long serialVersionUID = 1L; public HomePage(final PageParameters parameters) { super(parameters); add(new Label("version", getApplication().getFrameworkSettings().getVersion())); // TODO Add your page's components here } }
mafulafunk/wicket
wicket-user-guide/examples/MarkupInheritanceExample/src/main/java/org/wicketTutorial/HomePage.java
Java
apache-2.0
1,308
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.arrow.vector.complex.impl; import org.apache.arrow.util.Preconditions; import org.apache.arrow.vector.complex.ListVector; import org.apache.arrow.vector.complex.NonNullableStructVector; import org.apache.arrow.vector.complex.StateTool; import org.apache.arrow.vector.complex.StructVector; import org.apache.arrow.vector.complex.writer.BaseWriter.ComplexWriter; import org.apache.arrow.vector.types.pojo.Field; public class ComplexWriterImpl extends AbstractFieldWriter implements ComplexWriter { private NullableStructWriter structRoot; private UnionListWriter listRoot; private final NonNullableStructVector container; Mode mode = Mode.INIT; private final String name; private final boolean unionEnabled; private final NullableStructWriterFactory nullableStructWriterFactory; private enum Mode { INIT, STRUCT, LIST } public ComplexWriterImpl( String name, NonNullableStructVector container, boolean unionEnabled, boolean caseSensitive) { this.name = name; this.container = container; this.unionEnabled = unionEnabled; nullableStructWriterFactory = caseSensitive ? NullableStructWriterFactory.getNullableCaseSensitiveStructWriterFactoryInstance() : NullableStructWriterFactory.getNullableStructWriterFactoryInstance(); } public ComplexWriterImpl(String name, NonNullableStructVector container, boolean unionEnabled) { this(name, container, unionEnabled, false); } public ComplexWriterImpl(String name, NonNullableStructVector container) { this(name, container, false); } @Override public Field getField() { return container.getField(); } @Override public int getValueCapacity() { return container.getValueCapacity(); } private void check(Mode... modes) { StateTool.check(mode, modes); } @Override public void reset() { setPosition(0); } @Override public void close() throws Exception { clear(); structRoot.close(); if (listRoot != null) { listRoot.close(); } } @Override public void clear() { switch (mode) { case STRUCT: structRoot.clear(); break; case LIST: listRoot.clear(); break; default: break; } } @Override public void setValueCount(int count) { switch (mode) { case STRUCT: structRoot.setValueCount(count); break; case LIST: listRoot.setValueCount(count); break; default: break; } } @Override public void setPosition(int index) { super.setPosition(index); switch (mode) { case STRUCT: structRoot.setPosition(index); break; case LIST: listRoot.setPosition(index); break; default: break; } } public StructWriter directStruct() { Preconditions.checkArgument(name == null); switch (mode) { case INIT: structRoot = nullableStructWriterFactory.build((StructVector) container); structRoot.setPosition(idx()); mode = Mode.STRUCT; break; case STRUCT: break; default: check(Mode.INIT, Mode.STRUCT); } return structRoot; } @Override public StructWriter rootAsStruct() { switch (mode) { case INIT: // TODO allow dictionaries in complex types StructVector struct = container.addOrGetStruct(name); structRoot = nullableStructWriterFactory.build(struct); structRoot.setPosition(idx()); mode = Mode.STRUCT; break; case STRUCT: break; default: check(Mode.INIT, Mode.STRUCT); } return structRoot; } @Override public void allocate() { if (structRoot != null) { structRoot.allocate(); } else if (listRoot != null) { listRoot.allocate(); } } @Override public ListWriter rootAsList() { switch (mode) { case INIT: int vectorCount = container.size(); // TODO allow dictionaries in complex types ListVector listVector = container.addOrGetList(name); if (container.size() > vectorCount) { listVector.allocateNew(); } listRoot = new UnionListWriter(listVector, nullableStructWriterFactory); listRoot.setPosition(idx()); mode = Mode.LIST; break; case LIST: break; default: check(Mode.INIT, Mode.STRUCT); } return listRoot; } }
pcmoritz/arrow
java/vector/src/main/java/org/apache/arrow/vector/complex/impl/ComplexWriterImpl.java
Java
apache-2.0
5,289
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "fmt" "io/ioutil" "math/rand" "net" "net/http" "sort" "strconv" "strings" "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/intstr" utilnet "k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" ) // Maximum time a kube-proxy daemon on a node is allowed to not // notice a Service update, such as type=NodePort. // TODO: This timeout should be O(10s), observed values are O(1m), 5m is very // liberal. Fix tracked in #20567. const kubeProxyLagTimeout = 5 * time.Minute // Maximum time a load balancer is allowed to not respond after creation. const loadBalancerLagTimeout = 2 * time.Minute // How long to wait for a load balancer to be created/modified. //TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable const loadBalancerCreateTimeout = 20 * time.Minute // This should match whatever the default/configured range is var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768} var _ = Describe("Services", func() { f := NewFramework("services") var c *client.Client var extraNamespaces []string BeforeEach(func() { var err error c, err = loadClient() Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { if testContext.DeleteNamespace { for _, ns := range extraNamespaces { By(fmt.Sprintf("Destroying namespace %v", ns)) if err := deleteNS(c, ns, 5*time.Minute /* namespace deletion timeout */); err != nil { Failf("Couldn't delete namespace %s: %s", ns, err) } } extraNamespaces = nil } else { Logf("Found DeleteNamespace=false, skipping namespace deletion!") } }) // TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here. It("should provide secure master service [Conformance]", func() { _, err := c.Services(api.NamespaceDefault).Get("kubernetes") Expect(err).NotTo(HaveOccurred()) }) It("should serve a basic endpoint from pods [Conformance]", func() { // TODO: use the ServiceTestJig here serviceName := "endpoint-test2" ns := f.Namespace.Name labels := map[string]string{ "foo": "bar", "baz": "blah", } By("creating service " + serviceName + " in namespace " + ns) defer func() { err := c.Services(ns).Delete(serviceName) Expect(err).NotTo(HaveOccurred()) }() service := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, }, Spec: api.ServiceSpec{ Selector: labels, Ports: []api.ServicePort{{ Port: 80, TargetPort: intstr.FromInt(80), }}, }, } _, err := c.Services(ns).Create(service) Expect(err).NotTo(HaveOccurred()) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) names := map[string]bool{} defer func() { for name := range names { err := c.Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } }() name1 := "pod1" name2 := "pod2" createPodOrFail(c, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}}) names[name1] = true validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}}) createPodOrFail(c, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}}) names[name2] = true validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}}) deletePodOrFail(c, ns, name1) delete(names, name1) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{name2: {80}}) deletePodOrFail(c, ns, name2) delete(names, name2) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) }) It("should serve multiport endpoints from pods [Conformance]", func() { // TODO: use the ServiceTestJig here // repacking functionality is intentionally not tested here - it's better to test it in an integration test. serviceName := "multi-endpoint-test" ns := f.Namespace.Name defer func() { err := c.Services(ns).Delete(serviceName) Expect(err).NotTo(HaveOccurred()) }() labels := map[string]string{"foo": "bar"} svc1port := "svc1" svc2port := "svc2" By("creating service " + serviceName + " in namespace " + ns) service := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: serviceName, }, Spec: api.ServiceSpec{ Selector: labels, Ports: []api.ServicePort{ { Name: "portname1", Port: 80, TargetPort: intstr.FromString(svc1port), }, { Name: "portname2", Port: 81, TargetPort: intstr.FromString(svc2port), }, }, }, } _, err := c.Services(ns).Create(service) Expect(err).NotTo(HaveOccurred()) port1 := 100 port2 := 101 validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) names := map[string]bool{} defer func() { for name := range names { err := c.Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } }() containerPorts1 := []api.ContainerPort{ { Name: svc1port, ContainerPort: port1, }, } containerPorts2 := []api.ContainerPort{ { Name: svc2port, ContainerPort: port2, }, } podname1 := "pod1" podname2 := "pod2" createPodOrFail(c, ns, podname1, labels, containerPorts1) names[podname1] = true validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}}) createPodOrFail(c, ns, podname2, labels, containerPorts2) names[podname2] = true validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname1: {port1}, podname2: {port2}}) deletePodOrFail(c, ns, podname1) delete(names, podname1) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{podname2: {port2}}) deletePodOrFail(c, ns, podname2) delete(names, podname2) validateEndpointsOrFail(c, ns, serviceName, PortsByPodName{}) }) It("should be able to up and down services", func() { // TODO: use the ServiceTestJig here // this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP SkipUnlessProviderIs(providersWithSSH...) ns := f.Namespace.Name numPods, servicePort := 3, 80 By("creating service1 in namespace " + ns) podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) By("creating service2 in namespace " + ns) podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) hosts, err := NodeSSHHosts(c) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { Failf("No ssh-able nodes") } host := hosts[0] By("verifying service1 is up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) By("verifying service2 is up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) // Stop service 1 and make sure it is gone. By("stopping service1") expectNoError(stopServeHostnameService(c, ns, "service1")) By("verifying service1 is not up") expectNoError(verifyServeHostnameServiceDown(c, host, svc1IP, servicePort)) By("verifying service2 is still up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) // Start another service and verify both are up. By("creating service3 in namespace " + ns) podNames3, svc3IP, err := startServeHostnameService(c, ns, "service3", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc2IP == svc3IP { Failf("service IPs conflict: %v", svc2IP) } By("verifying service2 is still up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) By("verifying service3 is up") expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames3, svc3IP, servicePort)) }) It("should work after restarting kube-proxy [Disruptive]", func() { // TODO: use the ServiceTestJig here SkipUnlessProviderIs("gce", "gke") ns := f.Namespace.Name numPods, servicePort := 3, 80 svc1 := "service1" svc2 := "service2" defer func() { expectNoError(stopServeHostnameService(c, ns, svc1)) }() podNames1, svc1IP, err := startServeHostnameService(c, ns, svc1, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) defer func() { expectNoError(stopServeHostnameService(c, ns, svc2)) }() podNames2, svc2IP, err := startServeHostnameService(c, ns, svc2, servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { Failf("VIPs conflict: %v", svc1IP) } hosts, err := NodeSSHHosts(c) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { Failf("No ssh-able nodes") } host := hosts[0] expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) By("Restarting kube-proxy") if err := restartKubeProxy(host); err != nil { Failf("error restarting kube-proxy: %v", err) } expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) By("Removing iptable rules") result, err := SSH(` sudo iptables -t nat -F KUBE-SERVICES || true; sudo iptables -t nat -F KUBE-PORTALS-HOST || true; sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) Failf("couldn't remove iptable rules: %v", err) } expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) }) It("should work after restarting apiserver [Disruptive]", func() { // TODO: use the ServiceTestJig here // TODO: restartApiserver doesn't work in GKE - fix it and reenable this test. SkipUnlessProviderIs("gce") ns := f.Namespace.Name numPods, servicePort := 3, 80 defer func() { expectNoError(stopServeHostnameService(c, ns, "service1")) }() podNames1, svc1IP, err := startServeHostnameService(c, ns, "service1", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) hosts, err := NodeSSHHosts(c) Expect(err).NotTo(HaveOccurred()) if len(hosts) == 0 { Failf("No ssh-able nodes") } host := hosts[0] expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) // Restart apiserver if err := restartApiserver(); err != nil { Failf("error restarting apiserver: %v", err) } if err := waitForApiserverUp(c); err != nil { Failf("error while waiting for apiserver up: %v", err) } expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) // Create a new service and check if it's not reusing IP. defer func() { expectNoError(stopServeHostnameService(c, ns, "service2")) }() podNames2, svc2IP, err := startServeHostnameService(c, ns, "service2", servicePort, numPods) Expect(err).NotTo(HaveOccurred()) if svc1IP == svc2IP { Failf("VIPs conflict: %v", svc1IP) } expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames1, svc1IP, servicePort)) expectNoError(verifyServeHostnameServiceUp(c, ns, host, podNames2, svc2IP, servicePort)) }) // TODO: Run this test against the userspace proxy and nodes // configured with a default deny firewall to validate that the // proxy whitelists NodePort traffic. It("should be able to create a functioning NodePort service", func() { serviceName := "nodeport-test" ns := f.Namespace.Name jig := NewServiceTestJig(c, serviceName) nodeIP := pickNodeIP(jig.Client) // for later By("creating service " + serviceName + " with type=NodePort in namespace " + ns) service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) { svc.Spec.Type = api.ServiceTypeNodePort }) jig.SanityCheckService(service, api.ServiceTypeNodePort) nodePort := service.Spec.Ports[0].NodePort By("creating pod to be part of service " + serviceName) jig.RunOrFail(ns, nil) By("hitting the pod through the service's NodePort") jig.TestReachableHTTP(nodeIP, nodePort, kubeProxyLagTimeout) By("verifying the node port is locked") hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") // Even if the node-ip:node-port check above passed, this hostexec pod // might fall on a node with a laggy kube-proxy. cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort) stdout, err := RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { Failf("expected node port %d to be in use, stdout: %v", nodePort, stdout) } }) It("should be able to change the type and ports of a service", func() { // requires cloud load-balancer support SkipUnlessProviderIs("gce", "gke", "aws") // This test is more monolithic than we'd like because LB turnup can be // very slow, so we lumped all the tests into one LB lifecycle. serviceName := "mutability-test" ns1 := f.Namespace.Name // LB1 in ns1 on TCP Logf("namespace for TCP test: %s", ns1) By("creating a second namespace") namespacePtr, err := createTestingNS("services", c, nil) Expect(err).NotTo(HaveOccurred()) ns2 := namespacePtr.Name // LB2 in ns2 on UDP Logf("namespace for UDP test: %s", ns2) extraNamespaces = append(extraNamespaces, ns2) jig := NewServiceTestJig(c, serviceName) nodeIP := pickNodeIP(jig.Client) // for later // Test TCP and UDP Services. Services with the same name in different // namespaces should get different node ports and load balancers. By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) tcpService := jig.CreateTCPServiceOrFail(ns1, nil) jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP) By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2) udpService := jig.CreateUDPServiceOrFail(ns2, nil) jig.SanityCheckService(udpService, api.ServiceTypeClusterIP) By("verifying that TCP and UDP use the same port") if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { Failf("expected to use the same port for TCP and UDP") } svcPort := tcpService.Spec.Ports[0].Port Logf("service port (TCP and UDP): %d", svcPort) By("creating a pod to be part of the TCP service " + serviceName) jig.RunOrFail(ns1, nil) By("creating a pod to be part of the UDP service " + serviceName) jig.RunOrFail(ns2, nil) // Change the services to NodePort. By("changing the TCP service " + serviceName + " to type=NodePort") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeNodePort }) jig.SanityCheckService(tcpService, api.ServiceTypeNodePort) tcpNodePort := tcpService.Spec.Ports[0].NodePort Logf("TCP node port: %d", tcpNodePort) By("changing the UDP service " + serviceName + " to type=NodePort") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeNodePort }) jig.SanityCheckService(udpService, api.ServiceTypeNodePort) udpNodePort := udpService.Spec.Ports[0].NodePort Logf("UDP node port: %d", udpNodePort) By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) // Change the services to LoadBalancer. requestedIP := "" if providerIs("gce", "gke") { By("creating a static load balancer IP") rand.Seed(time.Now().UTC().UnixNano()) staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535)) requestedIP, err = createGCEStaticIP(staticIPName) Expect(err).NotTo(HaveOccurred()) defer func() { // Release GCE static IP - this is not kube-managed and will not be automatically released. deleteGCEStaticIP(staticIPName) }() Logf("Allocated static load balancer IP: %s", requestedIP) } By("changing the TCP service " + serviceName + " to type=LoadBalancer") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) { s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable s.Spec.Type = api.ServiceTypeLoadBalancer }) By("changing the UDP service " + serviceName + " to type=LoadBalancer") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeLoadBalancer }) By("waiting for the TCP service " + serviceName + " to have a load balancer") // Wait for the load balancer to be created asynchronously tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) if tcpService.Spec.Ports[0].NodePort != tcpNodePort { Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) } if requestedIP != "" && getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) Logf("TCP load balancer: %s", tcpIngressIP) By("waiting for the UDP service " + serviceName + " to have a load balancer") // 2nd one should be faster since they ran in parallel. udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name) jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) if udpService.Spec.Ports[0].NodePort != udpNodePort { Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) } udpIngressIP := getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) Logf("UDP load balancer: %s", tcpIngressIP) By("verifying that TCP and UDP use different load balancers") if tcpIngressIP == udpIngressIP { Failf("Load balancers are not different: %s", getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) // Change the services' node ports. By("changing the TCP service's " + serviceName + " NodePort") tcpService = jig.ChangeServiceNodePortOrFail(ns1, tcpService.Name, tcpNodePort) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) tcpNodePortOld := tcpNodePort tcpNodePort = tcpService.Spec.Ports[0].NodePort if tcpNodePort == tcpNodePortOld { Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) } if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } Logf("TCP node port: %d", tcpNodePort) By("changing the UDP service's " + serviceName + " NodePort") udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort) jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) udpNodePortOld := udpNodePort udpNodePort = udpService.Spec.Ports[0].NodePort if udpNodePort == udpNodePortOld { Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) } if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } Logf("UDP node port: %d", udpNodePort) By("hitting the TCP service's new NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("hitting the UDP service's new NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) By("checking the old TCP NodePort is closed") jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, kubeProxyLagTimeout) By("checking the old UDP NodePort is closed") jig.TestNotReachableUDP(nodeIP, udpNodePortOld, kubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) // Change the services' main ports. By("changing the TCP service's port") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) { s.Spec.Ports[0].Port++ }) jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) svcPortOld := svcPort svcPort = tcpService.Spec.Ports[0].Port if svcPort == svcPortOld { Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) } if tcpService.Spec.Ports[0].NodePort != tcpNodePort { Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) } if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } Logf("service port (TCP and UDP): %d", svcPort) By("changing the UDP service's port") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { s.Spec.Ports[0].Port++ }) jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer) if udpService.Spec.Ports[0].Port != svcPort { Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) } if udpService.Spec.Ports[0].NodePort != udpNodePort { Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) } if getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) // this may actually recreate the LB) // Change the services back to ClusterIP. By("changing TCP service " + serviceName + " back to type=ClusterIP") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 }) // Wait for the load balancer to be destroyed asynchronously tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns1, tcpService.Name, tcpIngressIP, svcPort) jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP) By("changing UDP service " + serviceName + " back to type=ClusterIP") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) { s.Spec.Type = api.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 }) // Wait for the load balancer to be destroyed asynchronously udpService = jig.WaitForLoadBalancerDestroyOrFail(ns2, udpService.Name, udpIngressIP, svcPort) jig.SanityCheckService(udpService, api.ServiceTypeClusterIP) By("checking the TCP NodePort is closed") jig.TestNotReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout) By("checking the UDP NodePort is closed") jig.TestNotReachableUDP(nodeIP, udpNodePort, kubeProxyLagTimeout) By("checking the TCP LoadBalancer is closed") jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) By("checking the UDP LoadBalancer is closed") jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) }) It("should prevent NodePort collisions", func() { // TODO: use the ServiceTestJig here baseName := "nodeport-collision-" serviceName1 := baseName + "1" serviceName2 := baseName + "2" ns := f.Namespace.Name t := NewServerTest(c, ns, serviceName1) defer func() { defer GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { Failf("errors in cleanup: %v", errs) } }() By("creating service " + serviceName1 + " with type NodePort in namespace " + ns) service := t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort result, err := t.CreateService(service) Expect(err).NotTo(HaveOccurred()) if result.Spec.Type != api.ServiceTypeNodePort { Failf("got unexpected Spec.Type for new service: %v", result) } if len(result.Spec.Ports) != 1 { Failf("got unexpected len(Spec.Ports) for new service: %v", result) } port := result.Spec.Ports[0] if port.NodePort == 0 { Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", result) } By("creating service " + serviceName2 + " with conflicting NodePort") service2 := t.BuildServiceSpec() service2.Name = serviceName2 service2.Spec.Type = api.ServiceTypeNodePort service2.Spec.Ports[0].NodePort = port.NodePort result2, err := t.CreateService(service2) if err == nil { Failf("Created service with conflicting NodePort: %v", result2) } expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort) Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) By("deleting service " + serviceName1 + " to release NodePort") err = t.DeleteService(serviceName1) Expect(err).NotTo(HaveOccurred()) By("creating service " + serviceName2 + " with no-longer-conflicting NodePort") _, err = t.CreateService(service2) Expect(err).NotTo(HaveOccurred()) }) It("should check NodePort out-of-range", func() { // TODO: use the ServiceTestJig here serviceName := "nodeport-range-test" ns := f.Namespace.Name t := NewServerTest(c, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { Failf("errors in cleanup: %v", errs) } }() service := t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) Expect(err).NotTo(HaveOccurred()) if service.Spec.Type != api.ServiceTypeNodePort { Failf("got unexpected Spec.Type for new service: %v", service) } if len(service.Spec.Ports) != 1 { Failf("got unexpected len(Spec.Ports) for new service: %v", service) } port := service.Spec.Ports[0] if port.NodePort == 0 { Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } if !ServiceNodePortRange.Contains(port.NodePort) { Failf("got unexpected (out-of-range) port for new service: %v", service) } outOfRangeNodePort := 0 for { outOfRangeNodePort = 1 + rand.Intn(65535) if !ServiceNodePortRange.Contains(outOfRangeNodePort) { break } } By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) result, err := updateService(c, ns, serviceName, func(s *api.Service) { s.Spec.Ports[0].NodePort = outOfRangeNodePort }) if err == nil { Failf("failed to prevent update of service with out-of-range NodePort: %v", result) } expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort) Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) By("deleting original service " + serviceName) err = t.DeleteService(serviceName) Expect(err).NotTo(HaveOccurred()) By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort)) service = t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort service.Spec.Ports[0].NodePort = outOfRangeNodePort service, err = t.CreateService(service) if err == nil { Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) } Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) }) It("should release NodePorts on delete", func() { // TODO: use the ServiceTestJig here serviceName := "nodeport-reuse" ns := f.Namespace.Name t := NewServerTest(c, ns, serviceName) defer func() { defer GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { Failf("errors in cleanup: %v", errs) } }() service := t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) Expect(err).NotTo(HaveOccurred()) if service.Spec.Type != api.ServiceTypeNodePort { Failf("got unexpected Spec.Type for new service: %v", service) } if len(service.Spec.Ports) != 1 { Failf("got unexpected len(Spec.Ports) for new service: %v", service) } port := service.Spec.Ports[0] if port.NodePort == 0 { Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } if !ServiceNodePortRange.Contains(port.NodePort) { Failf("got unexpected (out-of-range) port for new service: %v", service) } nodePort := port.NodePort By("deleting original service " + serviceName) err = t.DeleteService(serviceName) Expect(err).NotTo(HaveOccurred()) hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) var stdout string if pollErr := wait.PollImmediate(poll, kubeProxyLagTimeout, func() (bool, error) { var err error stdout, err = RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout) return false, nil } return true, nil }); pollErr != nil { Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, kubeProxyLagTimeout, stdout) } By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) service = t.BuildServiceSpec() service.Spec.Type = api.ServiceTypeNodePort service.Spec.Ports[0].NodePort = nodePort service, err = t.CreateService(service) Expect(err).NotTo(HaveOccurred()) }) }) // updateService fetches a service, calls the update function on it, // and then attempts to send the updated service. It retries up to 2 // times in the face of timeouts and conflicts. func updateService(c *client.Client, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) { var service *api.Service var err error for i := 0; i < 3; i++ { service, err = c.Services(namespace).Get(serviceName) if err != nil { return service, err } update(service) service, err = c.Services(namespace).Update(service) if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { return service, err } } return service, err } func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID { m := PortsByPodUID{} for _, ss := range endpoints.Subsets { for _, port := range ss.Ports { for _, addr := range ss.Addresses { containerPort := port.Port hostPort := port.Port // use endpoint annotations to recover the container port in a Mesos setup // compare contrib/mesos/pkg/service/endpoints_controller.syncService key := fmt.Sprintf("k8s.mesosphere.io/containerPort_%s_%s_%d", port.Protocol, addr.IP, hostPort) mesosContainerPortString := endpoints.Annotations[key] if mesosContainerPortString != "" { var err error containerPort, err = strconv.Atoi(mesosContainerPortString) if err != nil { continue } Logf("Mapped mesos host port %d to container port %d via annotation %s=%s", hostPort, containerPort, key, mesosContainerPortString) } // Logf("Found pod %v, host port %d and container port %d", addr.TargetRef.UID, hostPort, containerPort) if _, ok := m[addr.TargetRef.UID]; !ok { m[addr.TargetRef.UID] = make([]int, 0) } m[addr.TargetRef.UID] = append(m[addr.TargetRef.UID], containerPort) } } } return m } type PortsByPodName map[string][]int type PortsByPodUID map[types.UID][]int func translatePodNameToUIDOrFail(c *client.Client, ns string, expectedEndpoints PortsByPodName) PortsByPodUID { portsByUID := make(PortsByPodUID) for name, portList := range expectedEndpoints { pod, err := c.Pods(ns).Get(name) if err != nil { Failf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) } portsByUID[pod.ObjectMeta.UID] = portList } // Logf("successfully translated pod names to UIDs: %v -> %v on namespace %s", expectedEndpoints, portsByUID, ns) return portsByUID } func validatePortsOrFail(endpoints PortsByPodUID, expectedEndpoints PortsByPodUID) { if len(endpoints) != len(expectedEndpoints) { // should not happen because we check this condition before Failf("invalid number of endpoints got %v, expected %v", endpoints, expectedEndpoints) } for podUID := range expectedEndpoints { if _, ok := endpoints[podUID]; !ok { Failf("endpoint %v not found", podUID) } if len(endpoints[podUID]) != len(expectedEndpoints[podUID]) { Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) } sort.Ints(endpoints[podUID]) sort.Ints(expectedEndpoints[podUID]) for index := range endpoints[podUID] { if endpoints[podUID][index] != expectedEndpoints[podUID][index] { Failf("invalid list of ports for uid %v. Got %v, expected %v", podUID, endpoints[podUID], expectedEndpoints[podUID]) } } } } func validateEndpointsOrFail(c *client.Client, namespace, serviceName string, expectedEndpoints PortsByPodName) { By(fmt.Sprintf("waiting up to %v for service %s in namespace %s to expose endpoints %v", serviceStartTimeout, serviceName, namespace, expectedEndpoints)) i := 1 for start := time.Now(); time.Since(start) < serviceStartTimeout; time.Sleep(1 * time.Second) { endpoints, err := c.Endpoints(namespace).Get(serviceName) if err != nil { Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) continue } // Logf("Found endpoints %v", endpoints) portsByPodUID := getContainerPortsByPodUID(endpoints) // Logf("Found port by pod UID %v", portsByPodUID) expectedPortsByPodUID := translatePodNameToUIDOrFail(c, namespace, expectedEndpoints) if len(portsByPodUID) == len(expectedEndpoints) { validatePortsOrFail(portsByPodUID, expectedPortsByPodUID) Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, time.Since(start)) return } if i%5 == 0 { Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start)) } i++ } if pods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{}); err == nil { for _, pod := range pods.Items { Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) } } else { Logf("Can't list pod debug info: %v", err) } Failf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, serviceStartTimeout) } // createExecPodOrFail creates a simple busybox pod in a sleep loop used as a // vessel for kubectl exec commands. func createExecPodOrFail(c *client.Client, ns, name string) { Logf("Creating new exec pod") immediate := int64(0) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Namespace: ns, }, Spec: api.PodSpec{ TerminationGracePeriodSeconds: &immediate, Containers: []api.Container{ { Name: "exec", Image: "gcr.io/google_containers/busybox", Command: []string{"sh", "-c", "while true; do sleep 5; done"}, }, }, }, } _, err := c.Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { retrievedPod, err := c.Pods(pod.Namespace).Get(pod.Name) if err != nil { return false, nil } return retrievedPod.Status.Phase == api.PodRunning, nil }) Expect(err).NotTo(HaveOccurred()) } func createPodOrFail(c *client.Client, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) { By(fmt.Sprintf("creating pod %s in namespace %s", name, ns)) pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: name, Labels: labels, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "test", Image: "gcr.io/google_containers/pause:2.0", Ports: containerPorts, }, }, }, } _, err := c.Pods(ns).Create(pod) Expect(err).NotTo(HaveOccurred()) } func deletePodOrFail(c *client.Client, ns, name string) { By(fmt.Sprintf("deleting pod %s in namespace %s", name, ns)) err := c.Pods(ns).Delete(name, nil) Expect(err).NotTo(HaveOccurred()) } func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []string { ips := []string{} for i := range nodes.Items { item := &nodes.Items[i] for j := range item.Status.Addresses { nodeAddress := &item.Status.Addresses[j] if nodeAddress.Type == addressType { ips = append(ips, nodeAddress.Address) } } } return ips } func getNodePublicIps(c *client.Client) ([]string, error) { nodes := ListSchedulableNodesOrDie(c) ips := collectAddresses(nodes, api.NodeExternalIP) if len(ips) == 0 { ips = collectAddresses(nodes, api.NodeLegacyHostIP) } return ips, nil } func pickNodeIP(c *client.Client) string { publicIps, err := getNodePublicIps(c) Expect(err).NotTo(HaveOccurred()) if len(publicIps) == 0 { Failf("got unexpected number (%d) of public IPs", len(publicIps)) } ip := publicIps[0] return ip } func testReachableHTTP(ip string, port int, request string, expect string) (bool, error) { url := fmt.Sprintf("http://%s:%d%s", ip, port, request) if ip == "" { Failf("Got empty IP for reachability check (%s)", url) return false, nil } if port == 0 { Failf("Got port==0 for reachability check (%s)", url) return false, nil } Logf("Testing HTTP reachability of %v", url) resp, err := httpGetNoConnectionPool(url) if err != nil { Logf("Got error testing for reachability of %s: %v", url, err) return false, nil } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { Logf("Got error reading response from %s: %v", url, err) return false, nil } if resp.StatusCode != 200 { return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s", resp.Status, url, string(body)) } if !strings.Contains(string(body), expect) { return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body)) } Logf("Successfully reached %v", url) return true, nil } func testNotReachableHTTP(ip string, port int) (bool, error) { url := fmt.Sprintf("http://%s:%d", ip, port) if ip == "" { Failf("Got empty IP for non-reachability check (%s)", url) return false, nil } if port == 0 { Failf("Got port==0 for non-reachability check (%s)", url) return false, nil } Logf("Testing HTTP non-reachability of %v", url) resp, err := httpGetNoConnectionPool(url) if err != nil { Logf("Confirmed that %s is not reachable", url) return true, nil } resp.Body.Close() return false, nil } func testReachableUDP(ip string, port int, request string, expect string) (bool, error) { uri := fmt.Sprintf("udp://%s:%d", ip, port) if ip == "" { Failf("Got empty IP for reachability check (%s)", uri) return false, nil } if port == 0 { Failf("Got port==0 for reachability check (%s)", uri) return false, nil } Logf("Testing UDP reachability of %v", uri) con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) if err != nil { return false, fmt.Errorf("Failed to dial %s:%d: %v", ip, port, err) } _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) if err != nil { return false, fmt.Errorf("Failed to send request: %v", err) } var buf []byte = make([]byte, len(expect)+1) err = con.SetDeadline(time.Now().Add(3 * time.Second)) if err != nil { return false, fmt.Errorf("Failed to set deadline: %v", err) } _, err = con.Read(buf) if err != nil { return false, nil } if !strings.Contains(string(buf), expect) { return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf)) } Logf("Successfully reached %v", uri) return true, nil } func testNotReachableUDP(ip string, port int, request string) (bool, error) { uri := fmt.Sprintf("udp://%s:%d", ip, port) if ip == "" { Failf("Got empty IP for reachability check (%s)", uri) return false, nil } if port == 0 { Failf("Got port==0 for reachability check (%s)", uri) return false, nil } Logf("Testing UDP non-reachability of %v", uri) con, err := net.Dial("udp", ip+":"+strconv.Itoa(port)) if err != nil { Logf("Confirmed that %s is not reachable", uri) return true, nil } _, err = con.Write([]byte(fmt.Sprintf("%s\n", request))) if err != nil { Logf("Confirmed that %s is not reachable", uri) return true, nil } var buf []byte = make([]byte, 1) err = con.SetDeadline(time.Now().Add(3 * time.Second)) if err != nil { return false, fmt.Errorf("Failed to set deadline: %v", err) } _, err = con.Read(buf) if err != nil { Logf("Confirmed that %s is not reachable", uri) return true, nil } return false, nil } // Creates a replication controller that serves its hostname and a service on top of it. func startServeHostnameService(c *client.Client, ns, name string, port, replicas int) ([]string, string, error) { podNames := make([]string, replicas) By("creating service " + name + " in namespace " + ns) _, err := c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: name, }, Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Port: port, TargetPort: intstr.FromInt(9376), Protocol: "TCP", }}, Selector: map[string]string{ "name": name, }, }, }) if err != nil { return podNames, "", err } var createdPods []*api.Pod maxContainerFailures := 0 config := RCConfig{ Client: c, Image: "gcr.io/google_containers/serve_hostname:1.1", Name: name, Namespace: ns, PollInterval: 3 * time.Second, Timeout: podReadyBeforeTimeout, Replicas: replicas, CreatedPods: &createdPods, MaxContainerFailures: &maxContainerFailures, } err = RunRC(config) if err != nil { return podNames, "", err } if len(createdPods) != replicas { return podNames, "", fmt.Errorf("Incorrect number of running pods: %v", len(createdPods)) } for i := range createdPods { podNames[i] = createdPods[i].ObjectMeta.Name } sort.StringSlice(podNames).Sort() service, err := c.Services(ns).Get(name) if err != nil { return podNames, "", err } if service.Spec.ClusterIP == "" { return podNames, "", fmt.Errorf("Service IP is blank for %v", name) } serviceIP := service.Spec.ClusterIP return podNames, serviceIP, nil } func stopServeHostnameService(c *client.Client, ns, name string) error { if err := DeleteRC(c, ns, name); err != nil { return err } if err := c.Services(ns).Delete(name); err != nil { return err } return nil } // verifyServeHostnameServiceUp wgets the given serviceIP:servicePort from the // given host and from within a pod. The host is expected to be an SSH-able node // in the cluster. Each pod in the service is expected to echo its name. These // names are compared with the given expectedPods list after a sort | uniq. func verifyServeHostnameServiceUp(c *client.Client, ns, host string, expectedPods []string, serviceIP string, servicePort int) error { execPodName := "execpod" createExecPodOrFail(c, ns, execPodName) defer func() { deletePodOrFail(c, ns, execPodName) }() // Loop a bunch of times - the proxy is randomized, so we want a good // chance of hitting each backend at least once. buildCommand := func(wget string) string { return fmt.Sprintf("for i in $(seq 1 %d); do %s http://%s:%d 2>&1 || true; echo; done", 50*len(expectedPods), wget, serviceIP, servicePort) } commands := []func() string{ // verify service from node func() string { cmd := "set -e; " + buildCommand("wget -q --timeout=0.2 --tries=1 -O -") Logf("Executing cmd %q on host %v", cmd, host) result, err := SSH(cmd, host, testContext.Provider) if err != nil || result.Code != 0 { LogSSHResult(result) Logf("error while SSH-ing to node: %v", err) } return result.Stdout }, // verify service from pod func() string { cmd := buildCommand("wget -q -T 1 -O -") Logf("Executing cmd %q in pod %v/%v", cmd, ns, execPodName) // TODO: Use exec-over-http via the netexec pod instead of kubectl exec. output, err := RunHostCmd(ns, execPodName, cmd) if err != nil { Logf("error while kubectl execing %q in pod %v/%v: %v\nOutput: %v", cmd, ns, execPodName, err, output) } return output }, } sort.StringSlice(expectedPods).Sort() By(fmt.Sprintf("verifying service has %d reachable backends", len(expectedPods))) for _, cmdFunc := range commands { passed := false gotPods := []string{} // Retry cmdFunc for a while for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { pods := strings.Split(strings.TrimSpace(cmdFunc()), "\n") // Uniq pods before the sort because inserting them into a set // (which is implemented using dicts) can re-order them. gotPods = sets.NewString(pods...).List() if api.Semantic.DeepEqual(gotPods, expectedPods) { passed = true break } Logf("Waiting for expected pods for %s: %v, got: %v", serviceIP, expectedPods, gotPods) } if !passed { return fmt.Errorf("service verification failed for: %s, expected %v, got %v", serviceIP, expectedPods, gotPods) } } return nil } func verifyServeHostnameServiceDown(c *client.Client, host string, serviceIP string, servicePort int) error { command := fmt.Sprintf( "curl -s --connect-timeout 2 http://%s:%d && exit 99", serviceIP, servicePort) for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { result, err := SSH(command, host, testContext.Provider) if err != nil { LogSSHResult(result) Logf("error while SSH-ing to node: %v", err) } if result.Code != 99 { return nil } Logf("service still alive - still waiting") } return fmt.Errorf("waiting for service to be down timed out") } // Does an HTTP GET, but does not reuse TCP connections // This masks problems where the iptables rule has changed, but we don't see it // This is intended for relatively quick requests (status checks), so we set a short (5 seconds) timeout func httpGetNoConnectionPool(url string) (*http.Response, error) { tr := &http.Transport{ DisableKeepAlives: true, } client := &http.Client{ Transport: tr, Timeout: 5 * time.Second, } return client.Get(url) } // A test jig to help testing. type ServiceTestJig struct { ID string Name string Client *client.Client Labels map[string]string } // NewServiceTestJig allocates and inits a new ServiceTestJig. func NewServiceTestJig(client *client.Client, name string) *ServiceTestJig { j := &ServiceTestJig{} j.Client = client j.Name = name j.ID = j.Name + "-" + string(util.NewUUID()) j.Labels = map[string]string{"testid": j.ID} return j } // newServiceTemplate returns the default api.Service template for this jig, but // does not actually create the Service. The default Service has the same name // as the jig and exposes port 80. func (j *ServiceTestJig) newServiceTemplate(namespace string, proto api.Protocol) *api.Service { service := &api.Service{ ObjectMeta: api.ObjectMeta{ Namespace: namespace, Name: j.Name, Labels: j.Labels, }, Spec: api.ServiceSpec{ Selector: j.Labels, Ports: []api.ServicePort{ { Protocol: proto, Port: 80, }, }, }, } return service } // CreateTCPServiceOrFail creates a new TCP Service based on the jig's // defaults. Callers can provide a function to tweak the Service object before // it is created. func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *api.Service)) *api.Service { svc := j.newServiceTemplate(namespace, api.ProtocolTCP) if tweak != nil { tweak(svc) } result, err := j.Client.Services(namespace).Create(svc) if err != nil { Failf("Failed to create TCP Service %q: %v", svc.Name, err) } return result } // CreateUDPServiceOrFail creates a new UDP Service based on the jig's // defaults. Callers can provide a function to tweak the Service object before // it is created. func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *api.Service)) *api.Service { svc := j.newServiceTemplate(namespace, api.ProtocolUDP) if tweak != nil { tweak(svc) } result, err := j.Client.Services(namespace).Create(svc) if err != nil { Failf("Failed to create UDP Service %q: %v", svc.Name, err) } return result } func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.ServiceType) { if svc.Spec.Type != svcType { Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType) } expectNodePorts := false if svcType != api.ServiceTypeClusterIP { expectNodePorts = true } for i, port := range svc.Spec.Ports { hasNodePort := (port.NodePort != 0) if hasNodePort != expectNodePorts { Failf("unexpected Spec.Ports[%d].NodePort (%d) for service", i, port.NodePort) } if hasNodePort { if !ServiceNodePortRange.Contains(port.NodePort) { Failf("out-of-range nodePort (%d) for service", port.NodePort) } } } expectIngress := false if svcType == api.ServiceTypeLoadBalancer { expectIngress = true } hasIngress := len(svc.Status.LoadBalancer.Ingress) != 0 if hasIngress != expectIngress { Failf("unexpected number of Status.LoadBalancer.Ingress (%d) for service", len(svc.Status.LoadBalancer.Ingress)) } if hasIngress { for i, ing := range svc.Status.LoadBalancer.Ingress { if ing.IP == "" && ing.Hostname == "" { Failf("unexpected Status.LoadBalancer.Ingress[%d] for service: %#v", i, ing) } } } } // UpdateService fetches a service, calls the update function on it, and // then attempts to send the updated service. It tries up to 3 times in the // face of timeouts and conflicts. func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*api.Service)) (*api.Service, error) { for i := 0; i < 3; i++ { service, err := j.Client.Services(namespace).Get(name) if err != nil { return nil, fmt.Errorf("Failed to get Service %q: %v", name, err) } update(service) service, err = j.Client.Services(namespace).Update(service) if err == nil { return service, nil } if !errors.IsConflict(err) && !errors.IsServerTimeout(err) { return nil, fmt.Errorf("Failed to update Service %q: %v", name, err) } } return nil, fmt.Errorf("Too many retries updating Service %q", name) } // UpdateServiceOrFail fetches a service, calls the update function on it, and // then attempts to send the updated service. It tries up to 3 times in the // face of timeouts and conflicts. func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*api.Service)) *api.Service { svc, err := j.UpdateService(namespace, name, update) if err != nil { Failf(err.Error()) } return svc } func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *api.Service { var err error var service *api.Service for i := 1; i < ServiceNodePortRange.Size; i++ { offs1 := initial - ServiceNodePortRange.Base offs2 := (offs1 + i) % ServiceNodePortRange.Size newPort := ServiceNodePortRange.Base + offs2 service, err = j.UpdateService(namespace, name, func(s *api.Service) { s.Spec.Ports[0].NodePort = newPort }) if err != nil && strings.Contains(err.Error(), "provided port is already allocated") { Logf("tried nodePort %d, but it is in use, will try another", newPort) continue } // Otherwise err was nil or err was a real error break } if err != nil { Failf("Could not change the nodePort: %v", err) } return service } func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string) *api.Service { var service *api.Service Logf("Waiting up to %v for service %q to have a LoadBalancer", loadBalancerCreateTimeout, name) pollFunc := func() (bool, error) { svc, err := j.Client.Services(namespace).Get(name) if err != nil { return false, err } if len(svc.Status.LoadBalancer.Ingress) > 0 { service = svc return true, nil } return false, nil } if err := wait.PollImmediate(poll, loadBalancerCreateTimeout, pollFunc); err != nil { Failf("Timeout waiting for service %q to have a load balancer", name) } return service } func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int) *api.Service { // TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable defer func() { if err := EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil { Logf("Failed to delete cloud resources for service: %s %d (%v)", ip, port, err) } }() var service *api.Service Logf("Waiting up to %v for service %q to have no LoadBalancer", loadBalancerCreateTimeout, name) pollFunc := func() (bool, error) { svc, err := j.Client.Services(namespace).Get(name) if err != nil { return false, err } if len(svc.Status.LoadBalancer.Ingress) == 0 { service = svc return true, nil } return false, nil } if err := wait.PollImmediate(poll, loadBalancerCreateTimeout, pollFunc); err != nil { Failf("Timeout waiting for service %q to have no load balancer", name) } return service } func (j *ServiceTestJig) TestReachableHTTP(host string, port int, timeout time.Duration) { if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testReachableHTTP(host, port, "/echo?msg=hello", "hello") }); err != nil { Failf("Could not reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestNotReachableHTTP(host string, port int, timeout time.Duration) { if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testNotReachableHTTP(host, port) }); err != nil { Failf("Could still reach HTTP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestReachableUDP(host string, port int, timeout time.Duration) { if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testReachableUDP(host, port, "echo hello", "hello") }); err != nil { Failf("Could not reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } func (j *ServiceTestJig) TestNotReachableUDP(host string, port int, timeout time.Duration) { if err := wait.PollImmediate(poll, timeout, func() (bool, error) { return testNotReachableUDP(host, port, "echo hello") }); err != nil { Failf("Could still reach UDP service through %v:%v after %v: %v", host, port, timeout, err) } } func getIngressPoint(ing *api.LoadBalancerIngress) string { host := ing.IP if host == "" { host = ing.Hostname } return host } // newRCTemplate returns the default api.ReplicationController object for // this jig, but does not actually create the RC. The default RC has the same // name as the jig and runs the "netexec" container. func (j *ServiceTestJig) newRCTemplate(namespace string) *api.ReplicationController { rc := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Namespace: namespace, Name: j.Name, Labels: j.Labels, }, Spec: api.ReplicationControllerSpec{ Replicas: 1, Selector: j.Labels, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ Labels: j.Labels, }, Spec: api.PodSpec{ Containers: []api.Container{ { Name: "netexec", Image: "gcr.io/google_containers/netexec:1.4", Args: []string{"--http-port=80", "--udp-port=80"}, ReadinessProbe: &api.Probe{ PeriodSeconds: 3, Handler: api.Handler{ HTTPGet: &api.HTTPGetAction{ Port: intstr.FromInt(80), Path: "/hostName", }, }, }, }, }, TerminationGracePeriodSeconds: new(int64), }, }, }, } return rc } // RunOrFail creates a ReplicationController and Pod(s) and waits for the // Pod(s) to be running. Callers can provide a function to tweak the RC object // before it is created. func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *api.ReplicationController)) *api.ReplicationController { rc := j.newRCTemplate(namespace) if tweak != nil { tweak(rc) } result, err := j.Client.ReplicationControllers(namespace).Create(rc) if err != nil { Failf("Failed to created RC %q: %v", rc.Name, err) } pods, err := j.waitForPodsCreated(namespace, rc.Spec.Replicas) if err != nil { Failf("Failed to create pods: %v", err) } if err := j.waitForPodsReady(namespace, pods); err != nil { Failf("Failed waiting for pods to be running: %v", err) } return result } func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]string, error) { timeout := 2 * time.Minute // List the pods, making sure we observe all the replicas. label := labels.SelectorFromSet(labels.Set(j.Labels)) Logf("Waiting up to %v for %d pods to be created", timeout, replicas) for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) { options := api.ListOptions{LabelSelector: label} pods, err := j.Client.Pods(namespace).List(options) if err != nil { return nil, err } found := []string{} for _, pod := range pods.Items { if pod.DeletionTimestamp != nil { continue } found = append(found, pod.Name) } if len(found) == replicas { Logf("Found all %d pods", replicas) return found, nil } Logf("Found %d/%d pods - will retry", len(found), replicas) } return nil, fmt.Errorf("Timeout waiting for %d pods to be created", replicas) } func (j *ServiceTestJig) waitForPodsReady(namespace string, pods []string) error { timeout := 2 * time.Minute if !checkPodsRunningReady(j.Client, namespace, pods, timeout) { return fmt.Errorf("Timeout waiting for %d pods to be ready") } return nil } // Simple helper class to avoid too much boilerplate in tests type ServiceTestFixture struct { ServiceName string Namespace string Client *client.Client TestId string Labels map[string]string rcs map[string]bool services map[string]bool name string image string } func NewServerTest(client *client.Client, namespace string, serviceName string) *ServiceTestFixture { t := &ServiceTestFixture{} t.Client = client t.Namespace = namespace t.ServiceName = serviceName t.TestId = t.ServiceName + "-" + string(util.NewUUID()) t.Labels = map[string]string{ "testid": t.TestId, } t.rcs = make(map[string]bool) t.services = make(map[string]bool) t.name = "webserver" t.image = "gcr.io/google_containers/test-webserver" return t } // Build default config for a service (which can then be changed) func (t *ServiceTestFixture) BuildServiceSpec() *api.Service { service := &api.Service{ ObjectMeta: api.ObjectMeta{ Name: t.ServiceName, Namespace: t.Namespace, }, Spec: api.ServiceSpec{ Selector: t.Labels, Ports: []api.ServicePort{{ Port: 80, TargetPort: intstr.FromInt(80), }}, }, } return service } // CreateWebserverRC creates rc-backed pods with the well-known webserver // configuration and records it for cleanup. func (t *ServiceTestFixture) CreateWebserverRC(replicas int) *api.ReplicationController { rcSpec := rcByNamePort(t.name, replicas, t.image, 80, api.ProtocolTCP, t.Labels) rcAct, err := t.createRC(rcSpec) if err != nil { Failf("Failed to create rc %s: %v", rcSpec.Name, err) } if err := verifyPods(t.Client, t.Namespace, t.name, false, replicas); err != nil { Failf("Failed to create %d pods with name %s: %v", replicas, t.name, err) } return rcAct } // createRC creates a replication controller and records it for cleanup. func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.ReplicationController, error) { rc, err := t.Client.ReplicationControllers(t.Namespace).Create(rc) if err == nil { t.rcs[rc.Name] = true } return rc, err } // Create a service, and record it for cleanup func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service, error) { result, err := t.Client.Services(t.Namespace).Create(service) if err == nil { t.services[service.Name] = true } return result, err } // Delete a service, and remove it from the cleanup list func (t *ServiceTestFixture) DeleteService(serviceName string) error { err := t.Client.Services(t.Namespace).Delete(serviceName) if err == nil { delete(t.services, serviceName) } return err } func (t *ServiceTestFixture) Cleanup() []error { var errs []error for rcName := range t.rcs { By("stopping RC " + rcName + " in namespace " + t.Namespace) // First, resize the RC to 0. old, err := t.Client.ReplicationControllers(t.Namespace).Get(rcName) if err != nil { errs = append(errs, err) } old.Spec.Replicas = 0 if _, err := t.Client.ReplicationControllers(t.Namespace).Update(old); err != nil { errs = append(errs, err) } // TODO(mikedanese): Wait. // Then, delete the RC altogether. if err := t.Client.ReplicationControllers(t.Namespace).Delete(rcName); err != nil { errs = append(errs, err) } } for serviceName := range t.services { By("deleting service " + serviceName + " in namespace " + t.Namespace) err := t.Client.Services(t.Namespace).Delete(serviceName) if err != nil { errs = append(errs, err) } } return errs }
dcbw/kubernetes
test/e2e/service.go
GO
apache-2.0
63,318
/* Copyright (c) 2016 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura */ #include "library/vm/vm.h" namespace lean { static void del_instr_at(unsigned pc, buffer<vm_instr> & code) { code.erase(pc); // we must adjust pc of other instructions for (unsigned i = 0; i < code.size(); i++) { vm_instr & c = code[i]; for (unsigned j = 0; j < c.get_num_pcs(); j++) { if (c.get_pc(j) > pc) c.set_pc(j, c.get_pc(j) - 1); } } } typedef rb_tree<unsigned, unsigned_cmp> addr_set; /* Collect addresses in addr_set that are goto/branching targets */ static void collect_targets(buffer<vm_instr> & code, addr_set & r) { for (auto c : code) { for (unsigned j = 0; j < c.get_num_pcs(); j++) r.insert(c.get_pc(j)); } } /** \brief Applies the following transformation ... pc: drop n pc+1: drop m ... ===> ... pc: drop n+m ... */ static void compress_drop_drop(buffer<vm_instr> & code) { if (code.empty()) return; addr_set targets; collect_targets(code, targets); unsigned i = code.size() - 1; while (i > 0) { --i; if (code[i].op() == opcode::Drop && code[i+1].op() == opcode::Drop && /* If i+1 is a goto/branch target, then we should not merge the two Drops */ !targets.contains(i+1)) { code[i] = mk_drop_instr(code[i].get_num() + code[i+1].get_num()); del_instr_at(i+1, code); } } } /** \brief Applies the following transformation pc_1 : goto pc_2 ... pc_2 : ret ===> pc_1 : ret ... pc_2 : ret */ static void compress_goto_ret(buffer<vm_instr> & code) { unsigned i = code.size(); while (i > 0) { --i; if (code[i].op() == opcode::Goto) { unsigned pc = code[i].get_goto_pc(); if (code[pc].op() == opcode::Ret) { code[i] = mk_ret_instr(); } } } } void optimize(environment const &, buffer<vm_instr> & code) { compress_goto_ret(code); compress_drop_drop(code); } }
fgdorais/lean
src/library/vm/optimize.cpp
C++
apache-2.0
2,195
// Copyright 2017 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.android.desugar; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; import static com.google.common.base.Preconditions.checkState; import com.google.common.collect.ImmutableList; import java.util.Comparator; import java.util.HashSet; import java.util.Iterator; import java.util.Set; import java.util.TreeSet; import org.objectweb.asm.ClassReader; import org.objectweb.asm.ClassVisitor; import org.objectweb.asm.MethodVisitor; import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; import org.objectweb.asm.tree.AbstractInsnNode; import org.objectweb.asm.tree.InsnList; import org.objectweb.asm.tree.InsnNode; import org.objectweb.asm.tree.MethodInsnNode; import org.objectweb.asm.tree.MethodNode; /** * Fixer of classes that extend interfaces with default methods to declare any missing methods * explicitly and call the corresponding companion method generated by {@link InterfaceDesugaring}. */ public class DefaultMethodClassFixer extends ClassVisitor { private final ClassReaderFactory classpath; private final ClassReaderFactory bootclasspath; private final ClassLoader targetLoader; private final DependencyCollector depsCollector; private final HashSet<String> instanceMethods = new HashSet<>(); private boolean isInterface; private String internalName; private ImmutableList<String> directInterfaces; private String superName; /** This method node caches <clinit>, and flushes out in {@code visitEnd()}; */ private MethodNode clInitMethodNode; public DefaultMethodClassFixer( ClassVisitor dest, ClassReaderFactory classpath, DependencyCollector depsCollector, ClassReaderFactory bootclasspath, ClassLoader targetLoader) { super(Opcodes.ASM5, dest); this.classpath = classpath; this.bootclasspath = bootclasspath; this.targetLoader = targetLoader; this.depsCollector = depsCollector; } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { checkState(this.directInterfaces == null); isInterface = BitFlags.isSet(access, Opcodes.ACC_INTERFACE); internalName = name; checkArgument( superName != null || "java/lang/Object".equals(name), // ASM promises this "Type without superclass: %s", name); this.directInterfaces = ImmutableList.copyOf(interfaces); this.superName = superName; super.visit(version, access, name, signature, superName, interfaces); } @Override public void visitEnd() { if (!isInterface && defaultMethodsDefined(directInterfaces)) { // Inherited methods take precedence over default methods, so visit all superclasses and // figure out what methods they declare before stubbing in any missing default methods. recordInheritedMethods(); stubMissingDefaultAndBridgeMethods(); // Check whether there are interfaces with default methods and <clinit>. If yes, the following // method call will return a list of interface fields to access in the <clinit> to trigger // the initialization of these interfaces. ImmutableList<String> companionsToTriggerInterfaceClinit = collectOrderedCompanionsToTriggerInterfaceClinit(directInterfaces); if (!companionsToTriggerInterfaceClinit.isEmpty()) { if (clInitMethodNode == null) { clInitMethodNode = new MethodNode(Opcodes.ACC_STATIC, "<clinit>", "()V", null, null); } desugarClinitToTriggerInterfaceInitializers(companionsToTriggerInterfaceClinit); } } if (clInitMethodNode != null && super.cv != null) { // Write <clinit> to the chained visitor. clInitMethodNode.accept(super.cv); } super.visitEnd(); } private boolean isClinitAlreadyDesugared( ImmutableList<String> companionsToAccessToTriggerInterfaceClinit) { InsnList instructions = clInitMethodNode.instructions; if (instructions.size() <= companionsToAccessToTriggerInterfaceClinit.size()) { // The <clinit> must end with RETURN, so if the instruction count is less than or equal to // the companion class count, this <clinit> has not been desugared. return false; } Iterator<AbstractInsnNode> iterator = instructions.iterator(); for (String companion : companionsToAccessToTriggerInterfaceClinit) { if (!iterator.hasNext()) { return false; } AbstractInsnNode first = iterator.next(); if (!(first instanceof MethodInsnNode)) { return false; } MethodInsnNode methodInsnNode = (MethodInsnNode) first; if (methodInsnNode.getOpcode() != Opcodes.INVOKESTATIC || !methodInsnNode.owner.equals(companion) || !methodInsnNode.name.equals( InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_NAME)) { return false; } checkState( methodInsnNode.desc.equals( InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_DESC), "Inconsistent method desc: %s vs %s", methodInsnNode.desc, InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_DESC); if (!iterator.hasNext()) { return false; } AbstractInsnNode second = iterator.next(); if (second.getOpcode() != Opcodes.POP) { return false; } } return true; } private void desugarClinitToTriggerInterfaceInitializers( ImmutableList<String> companionsToTriggerInterfaceClinit) { if (isClinitAlreadyDesugared(companionsToTriggerInterfaceClinit)) { return; } InsnList desugarInsts = new InsnList(); for (String companionClass : companionsToTriggerInterfaceClinit) { desugarInsts.add( new MethodInsnNode( Opcodes.INVOKESTATIC, companionClass, InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_NAME, InterfaceDesugaring.COMPANION_METHOD_TO_TRIGGER_INTERFACE_CLINIT_DESC, false)); } if (clInitMethodNode.instructions.size() == 0) { clInitMethodNode.instructions.insert(new InsnNode(Opcodes.RETURN)); } clInitMethodNode.instructions.insertBefore( clInitMethodNode.instructions.getFirst(), desugarInsts); } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { // Keep track of instance methods implemented in this class for later. if (!isInterface) { recordIfInstanceMethod(access, name, desc); } if ("<clinit>".equals(name)) { checkState(clInitMethodNode == null, "This class fixer has been used. "); clInitMethodNode = new MethodNode(access, name, desc, signature, exceptions); return clInitMethodNode; } return super.visitMethod(access, name, desc, signature, exceptions); } private void stubMissingDefaultAndBridgeMethods() { TreeSet<Class<?>> allInterfaces = new TreeSet<>(InterfaceComparator.INSTANCE); for (String direct : directInterfaces) { // Loading ensures all transitively implemented interfaces can be loaded, which is necessary // to produce correct default method stubs in all cases. We could do without classloading but // it's convenient to rely on Class.isAssignableFrom to compute subtype relationships, and // we'd still have to insist that all transitively implemented interfaces can be loaded. // We don't load the visited class, however, in case it's a generated lambda class. Class<?> itf = loadFromInternal(direct); collectInterfaces(itf, allInterfaces); } Class<?> superclass = loadFromInternal(superName); for (Class<?> interfaceToVisit : allInterfaces) { // if J extends I, J is allowed to redefine I's default methods. The comparator we used // above makes sure we visit J before I in that case so we can use J's definition. if (superclass != null && interfaceToVisit.isAssignableFrom(superclass)) { // superclass already implements this interface, so we must skip it. The superclass will // be similarly rewritten or comes from the bootclasspath; either way we don't need to and // shouldn't stub default methods for this interface. continue; } stubMissingDefaultAndBridgeMethods(interfaceToVisit.getName().replace('.', '/')); } } private Class<?> loadFromInternal(String internalName) { try { return targetLoader.loadClass(internalName.replace('/', '.')); } catch (ClassNotFoundException e) { throw new IllegalStateException( "Couldn't load " + internalName + ", is the classpath complete?", e); } } private void collectInterfaces(Class<?> itf, Set<Class<?>> dest) { checkArgument(itf.isInterface()); if (!dest.add(itf)) { return; } for (Class<?> implemented : itf.getInterfaces()) { collectInterfaces(implemented, dest); } } private void recordInheritedMethods() { InstanceMethodRecorder recorder = new InstanceMethodRecorder(); String internalName = superName; while (internalName != null) { ClassReader bytecode = bootclasspath.readIfKnown(internalName); if (bytecode == null) { bytecode = checkNotNull( classpath.readIfKnown(internalName), "Superclass not found: %s", internalName); } bytecode.accept(recorder, ClassReader.SKIP_CODE | ClassReader.SKIP_DEBUG); internalName = bytecode.getSuperName(); } } private void recordIfInstanceMethod(int access, String name, String desc) { if (BitFlags.noneSet(access, Opcodes.ACC_STATIC)) { // Record all declared instance methods, including abstract, bridge, and native methods, as // they all take precedence over default methods. instanceMethods.add(name + ":" + desc); } } /** * Starting from the given interfaces, this method scans the interface hierarchy, finds the * interfaces that have default methods and <clinit>, and returns the companion class names of * these interfaces. * * <p>Note that the returned companion classes are ordered in the order of the interface * initialization, which is consistent with the JVM behavior. For example, "class A implements I1, * I2", the returned list would be [I1$$CC, I2$$CC], not [I2$$CC, I1$$CC]. */ private ImmutableList<String> collectOrderedCompanionsToTriggerInterfaceClinit( ImmutableList<String> interfaces) { ImmutableList.Builder<String> companionCollector = ImmutableList.builder(); HashSet<String> visitedInterfaces = new HashSet<>(); for (String anInterface : interfaces) { collectOrderedCompanionsToTriggerInterfaceClinit( anInterface, visitedInterfaces, companionCollector); } return companionCollector.build(); } private void collectOrderedCompanionsToTriggerInterfaceClinit( String anInterface, HashSet<String> visitedInterfaces, ImmutableList.Builder<String> companionCollector) { if (!visitedInterfaces.add(anInterface)) { return; } ClassReader bytecode = classpath.readIfKnown(anInterface); if (bytecode == null || bootclasspath.isKnown(anInterface)) { return; } String[] parentInterfaces = bytecode.getInterfaces(); if (parentInterfaces != null && parentInterfaces.length > 0) { for (String parentInterface : parentInterfaces) { collectOrderedCompanionsToTriggerInterfaceClinit( parentInterface, visitedInterfaces, companionCollector); } } InterfaceInitializationNecessityDetector necessityDetector = new InterfaceInitializationNecessityDetector(bytecode.getClassName()); bytecode.accept(necessityDetector, ClassReader.SKIP_DEBUG); if (necessityDetector.needsToInitialize()) { // If we need to initialize this interface, we initialize its companion class, and its // companion class will initialize the interface then. This desigin decision is made to avoid // access issue, e.g., package-private interfaces. companionCollector.add(InterfaceDesugaring.getCompanionClassName(anInterface)); } } /** * Recursively searches the given interfaces for default methods not implemented by this class * directly. If this method returns true we need to think about stubbing missing default methods. */ private boolean defaultMethodsDefined(ImmutableList<String> interfaces) { for (String implemented : interfaces) { if (bootclasspath.isKnown(implemented)) { continue; } ClassReader bytecode = classpath.readIfKnown(implemented); if (bytecode == null) { // Interface isn't on the classpath, which indicates incomplete classpaths. Record missing // dependency so we can check it later. If we don't check then we may get runtime failures // or wrong behavior from default methods that should've been stubbed in. // TODO(kmb): Print a warning so people can start fixing their deps? depsCollector.missingImplementedInterface(internalName, implemented); } else { // Class in classpath and bootclasspath is a bad idea but in any event, assume the // bootclasspath will take precedence like in a classloader. // We can skip code attributes as we just need to find default methods to stub. DefaultMethodFinder finder = new DefaultMethodFinder(); bytecode.accept(finder, ClassReader.SKIP_CODE | ClassReader.SKIP_DEBUG); if (finder.foundDefaultMethods()) { return true; } } } return false; } /** Returns {@code true} for non-bridge default methods not in {@link #instanceMethods}. */ private boolean shouldStubAsDefaultMethod(int access, String name, String desc) { // Ignore private methods, which technically aren't default methods and can only be called from // other methods defined in the interface. This also ignores lambda body methods, which is fine // as we don't want or need to stub those. Also ignore bridge methods as javac adds them to // concrete classes as needed anyway and we handle them separately for generated lambda classes. // Note that an exception is that, if a bridge method is for a default interface method, javac // will NOT generate the bridge method in the implementing class. So we need extra logic to // handle these bridge methods. return isNonBridgeDefaultMethod(access) && !instanceMethods.contains(name + ":" + desc); } private static boolean isNonBridgeDefaultMethod(int access) { return BitFlags.noneSet( access, Opcodes.ACC_ABSTRACT | Opcodes.ACC_STATIC | Opcodes.ACC_BRIDGE | Opcodes.ACC_PRIVATE); } /** * Check whether an interface method is a bridge method for a default interface method. This type * of bridge methods is special, as they are not put in the implementing classes by javac. */ private boolean shouldStubAsBridgeDefaultMethod(int access, String name, String desc) { return BitFlags.isSet(access, Opcodes.ACC_BRIDGE | Opcodes.ACC_PUBLIC) && BitFlags.noneSet(access, Opcodes.ACC_ABSTRACT | Opcodes.ACC_STATIC) && !instanceMethods.contains(name + ":" + desc); } private void stubMissingDefaultAndBridgeMethods(String implemented) { if (bootclasspath.isKnown(implemented)) { // Default methods on the bootclasspath will be available at runtime, so just ignore them. return; } ClassReader bytecode = checkNotNull( classpath.readIfKnown(implemented), "Couldn't find interface %s implemented by %s", implemented, internalName); bytecode.accept(new DefaultMethodStubber(), ClassReader.SKIP_DEBUG); } /** * Visitor for interfaces that produces delegates in the class visited by the outer {@link * DefaultMethodClassFixer} for every default method encountered. */ private class DefaultMethodStubber extends ClassVisitor { private String stubbedInterfaceName; public DefaultMethodStubber() { super(Opcodes.ASM5); } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { checkArgument(BitFlags.isSet(access, Opcodes.ACC_INTERFACE)); checkState(stubbedInterfaceName == null); stubbedInterfaceName = name; } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { if (shouldStubAsDefaultMethod(access, name, desc)) { // Remember we stubbed this method in case it's also defined by subsequently visited // interfaces. javac would force the method to be defined explicitly if there any two // definitions conflict, but see stubMissingDefaultMethods() for how we deal with default // methods redefined in interfaces extending another. recordIfInstanceMethod(access, name, desc); depsCollector.assumeCompanionClass( internalName, InterfaceDesugaring.getCompanionClassName(stubbedInterfaceName)); // Add this method to the class we're desugaring and stub in a body to call the default // implementation in the interface's companion class. ijar omits these methods when setting // ACC_SYNTHETIC modifier, so don't. // Signatures can be wrong, e.g., when type variables are introduced, instantiated, or // refined in the class we're processing, so drop them. MethodVisitor stubMethod = DefaultMethodClassFixer.this.visitMethod(access, name, desc, (String) null, exceptions); int slot = 0; stubMethod.visitVarInsn(Opcodes.ALOAD, slot++); // load the receiver Type neededType = Type.getMethodType(desc); for (Type arg : neededType.getArgumentTypes()) { stubMethod.visitVarInsn(arg.getOpcode(Opcodes.ILOAD), slot); slot += arg.getSize(); } stubMethod.visitMethodInsn( Opcodes.INVOKESTATIC, InterfaceDesugaring.getCompanionClassName(stubbedInterfaceName), name, InterfaceDesugaring.companionDefaultMethodDescriptor(stubbedInterfaceName, desc), /*itf*/ false); stubMethod.visitInsn(neededType.getReturnType().getOpcode(Opcodes.IRETURN)); stubMethod.visitMaxs(0, 0); // rely on class writer to compute these stubMethod.visitEnd(); return null; } else if (shouldStubAsBridgeDefaultMethod(access, name, desc)) { recordIfInstanceMethod(access, name, desc); // For bridges we just copy their bodies instead of going through the companion class. // Meanwhile, we also need to desugar the copied method bodies, so that any calls to // interface methods are correctly handled. return new InterfaceDesugaring.InterfaceInvocationRewriter( DefaultMethodClassFixer.this.visitMethod(access, name, desc, (String) null, exceptions), stubbedInterfaceName, bootclasspath, depsCollector, internalName); } else { return null; // we don't care about the actual code in these methods } } } /** * Visitor for interfaces that recursively searches interfaces for default method declarations. */ private class DefaultMethodFinder extends ClassVisitor { @SuppressWarnings("hiding") private ImmutableList<String> interfaces; private boolean found; public DefaultMethodFinder() { super(Opcodes.ASM5); } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { checkArgument(BitFlags.isSet(access, Opcodes.ACC_INTERFACE)); checkState(this.interfaces == null); this.interfaces = ImmutableList.copyOf(interfaces); } public boolean foundDefaultMethods() { return found; } @Override public void visitEnd() { if (!found) { found = defaultMethodsDefined(this.interfaces); } } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { if (!found && shouldStubAsDefaultMethod(access, name, desc)) { // Found a default method we're not ignoring (instanceMethods at this point contains methods // the top-level visited class implements itself). found = true; } return null; // we don't care about the actual code in these methods } } private class InstanceMethodRecorder extends ClassVisitor { public InstanceMethodRecorder() { super(Opcodes.ASM5); } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { checkArgument(BitFlags.noneSet(access, Opcodes.ACC_INTERFACE)); super.visit(version, access, name, signature, superName, interfaces); } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { recordIfInstanceMethod(access, name, desc); return null; } } /** * Detector to determine whether an interface needs to be initialized when it is loaded. * * <p>If the interface has a default method, and its <clinit> initializes any of its fields, then * this interface needs to be initialized. */ private static class InterfaceInitializationNecessityDetector extends ClassVisitor { private final String internalName; private boolean hasFieldInitializedInClinit; private boolean hasDefaultMethods; public InterfaceInitializationNecessityDetector(String internalName) { super(Opcodes.ASM5); this.internalName = internalName; } public boolean needsToInitialize() { return hasDefaultMethods && hasFieldInitializedInClinit; } @Override public void visit( int version, int access, String name, String signature, String superName, String[] interfaces) { super.visit(version, access, name, signature, superName, interfaces); checkState( internalName.equals(name), "Inconsistent internal names: expected=%s, real=%s", internalName, name); checkArgument( BitFlags.isSet(access, Opcodes.ACC_INTERFACE), "This class visitor is only used for interfaces."); } @Override public MethodVisitor visitMethod( int access, String name, String desc, String signature, String[] exceptions) { if (!hasDefaultMethods) { hasDefaultMethods = isNonBridgeDefaultMethod(access); } if ("<clinit>".equals(name)) { return new MethodVisitor(Opcodes.ASM5) { @Override public void visitFieldInsn(int opcode, String owner, String name, String desc) { if (opcode == Opcodes.PUTSTATIC && internalName.equals(owner)) { hasFieldInitializedInClinit = true; } } }; } return null; // Do not care about the code. } } /** Comparator for interfaces that compares by whether interfaces extend one another. */ enum InterfaceComparator implements Comparator<Class<?>> { INSTANCE; @Override public int compare(Class<?> o1, Class<?> o2) { checkArgument(o1.isInterface()); checkArgument(o2.isInterface()); if (o1 == o2) { return 0; } if (o1.isAssignableFrom(o2)) { // o1 is supertype of o2 return 1; // we want o1 to come after o2 } if (o2.isAssignableFrom(o1)) { // o2 is supertype of o1 return -1; // we want o2 to come after o1 } // o1 and o2 aren't comparable so arbitrarily impose lexicographical ordering return o1.getName().compareTo(o2.getName()); } } }
damienmg/bazel
src/tools/android/java/com/google/devtools/build/android/desugar/DefaultMethodClassFixer.java
Java
apache-2.0
24,887
/* * Copyright (C) 2010 JFrog Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jfrog.hudson.util; import org.apache.commons.lang.StringUtils; import org.jfrog.hudson.ArtifactoryServer; import org.jfrog.hudson.DeployerOverrider; import org.jfrog.hudson.ResolverOverrider; import org.jfrog.hudson.util.plugins.PluginsUtils; /** * A utility class the helps find the preferred credentials to use out of each setting and server * * @author Noam Y. Tenne */ public abstract class CredentialManager { private CredentialManager() { } /** * Decides and returns the preferred deployment credentials to use from this builder settings and selected server * * @param deployerOverrider Deploy-overriding capable builder * @param server Selected Artifactory server * @return Preferred deployment credentials */ public static Credentials getPreferredDeployer(DeployerOverrider deployerOverrider, ArtifactoryServer server) { if (deployerOverrider.isOverridingDefaultDeployer()) { String credentialsId = deployerOverrider.getDeployerCredentialsId(); return PluginsUtils.credentialsLookup(credentialsId); } if (server != null) { Credentials deployerCredentials = server.getDeployerCredentials(); if (deployerCredentials != null) { return deployerCredentials; } } return new Credentials(null, null); } public static Credentials getPreferredDeployer(String credentialsId, ArtifactoryServer server) { String username; String password; if(StringUtils.isBlank(credentialsId)) { Credentials deployedCredentials = server.getDeployerCredentials(); username = deployedCredentials.getUsername(); password = deployedCredentials.getPassword(); } else { return PluginsUtils.credentialsLookup(credentialsId); } return new Credentials(username, password); } /** * Decides and returns the preferred resolver credentials to use from this builder settings and selected server * Override priority: * 1) Job override resolver * 2) Plugin manage override resolver * 3) Plugin manage general * @param resolverOverrider Resolve-overriding capable builder * @param server Selected Artifactory server * @return Preferred resolver credentials */ public static Credentials getPreferredResolver(ResolverOverrider resolverOverrider, ArtifactoryServer server) { if (resolverOverrider != null && resolverOverrider.isOverridingDefaultResolver()) { String credentialsId = resolverOverrider.getResolverCredentialsId(); return PluginsUtils.credentialsLookup(credentialsId); } return server.getResolvingCredentials(); } public static Credentials getPreferredResolver(String credentialsId, ArtifactoryServer server) { String username; String password; if(StringUtils.isBlank(credentialsId)) { Credentials deployedCredentials = server.getResolvingCredentials(); username = deployedCredentials.getUsername(); password = deployedCredentials.getPassword(); } else { return PluginsUtils.credentialsLookup(credentialsId); } return new Credentials(username, password); } }
christ66/jenkins-artifactory-plugin
src/main/java/org/jfrog/hudson/util/CredentialManager.java
Java
apache-2.0
3,973
// Copyright 2014 Samsung Electronics Co., Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. assert(Math.atan2(+0, -Infinity) === Math.PI);
tilmannOSG/jerryscript
tests/jerry-test-suite/15/15.08/15.08.02/15.08.02.05/15.08.02.05-010.js
JavaScript
apache-2.0
653
/** @file A brief file description @section license License Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ ////////////////////////////////////////////////////////////////////// // // The EThread Class // ///////////////////////////////////////////////////////////////////// #include "P_EventSystem.h" #if HAVE_EVENTFD #include <sys/eventfd.h> #endif struct AIOCallback; #define MAX_HEARTBEATS_MISSED 10 #define NO_HEARTBEAT -1 #define THREAD_MAX_HEARTBEAT_MSECONDS 60 volatile bool shutdown_event_system = false; EThread::EThread() { memset(thread_private, 0, PER_THREAD_DATA); } EThread::EThread(ThreadType att, int anid) : id(anid), tt(att) { ethreads_to_be_signalled = (EThread **)ats_malloc(MAX_EVENT_THREADS * sizeof(EThread *)); memset(ethreads_to_be_signalled, 0, MAX_EVENT_THREADS * sizeof(EThread *)); memset(thread_private, 0, PER_THREAD_DATA); #if HAVE_EVENTFD evfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); if (evfd < 0) { if (errno == EINVAL) { // flags invalid for kernel <= 2.6.26 evfd = eventfd(0, 0); if (evfd < 0) { Fatal("EThread::EThread: %d=eventfd(0,0),errno(%d)", evfd, errno); } } else { Fatal("EThread::EThread: %d=eventfd(0,EFD_NONBLOCK | EFD_CLOEXEC),errno(%d)", evfd, errno); } } #elif TS_USE_PORT /* Solaris ports requires no crutches to do cross thread signaling. * We'll just port_send the event straight over the port. */ #else ink_release_assert(pipe(evpipe) >= 0); fcntl(evpipe[0], F_SETFD, FD_CLOEXEC); fcntl(evpipe[0], F_SETFL, O_NONBLOCK); fcntl(evpipe[1], F_SETFD, FD_CLOEXEC); fcntl(evpipe[1], F_SETFL, O_NONBLOCK); #endif } EThread::EThread(ThreadType att, Event *e) : tt(att), start_event(e) { ink_assert(att == DEDICATED); memset(thread_private, 0, PER_THREAD_DATA); } // Provide a destructor so that SDK functions which create and destroy // threads won't have to deal with EThread memory deallocation. EThread::~EThread() { if (n_ethreads_to_be_signalled > 0) { flush_signals(this); } ats_free(ethreads_to_be_signalled); // TODO: This can't be deleted .... // delete[]l1_hash; } bool EThread::is_event_type(EventType et) { return (event_types & (1 << static_cast<int>(et))) != 0; } void EThread::set_event_type(EventType et) { event_types |= (1 << static_cast<int>(et)); } void EThread::process_event(Event *e, int calling_code) { ink_assert((!e->in_the_prot_queue && !e->in_the_priority_queue)); MUTEX_TRY_LOCK_FOR(lock, e->mutex, this, e->continuation); if (!lock.is_locked()) { e->timeout_at = cur_time + DELAY_FOR_RETRY; EventQueueExternal.enqueue_local(e); } else { if (e->cancelled) { free_event(e); return; } Continuation *c_temp = e->continuation; e->continuation->handleEvent(calling_code, e); ink_assert(!e->in_the_priority_queue); ink_assert(c_temp == e->continuation); MUTEX_RELEASE(lock); if (e->period) { if (!e->in_the_prot_queue && !e->in_the_priority_queue) { if (e->period < 0) { e->timeout_at = e->period; } else { this->get_hrtime_updated(); e->timeout_at = cur_time + e->period; if (e->timeout_at < cur_time) { e->timeout_at = cur_time; } } EventQueueExternal.enqueue_local(e); } } else if (!e->in_the_prot_queue && !e->in_the_priority_queue) { free_event(e); } } } // // void EThread::execute() // // Execute loops forever on: // Find the earliest event. // Sleep until the event time or until an earlier event is inserted // When its time for the event, try to get the appropriate continuation // lock. If successful, call the continuation, otherwise put the event back // into the queue. // void EThread::execute() { // Do the start event first. // coverity[lock] if (start_event) { MUTEX_TAKE_LOCK_FOR(start_event->mutex, this, start_event->continuation); start_event->continuation->handleEvent(EVENT_IMMEDIATE, start_event); MUTEX_UNTAKE_LOCK(start_event->mutex, this); free_event(start_event); start_event = nullptr; } switch (tt) { case REGULAR: { Event *e; Que(Event, link) NegativeQueue; ink_hrtime next_time = 0; // give priority to immediate events for (;;) { if (unlikely(shutdown_event_system == true)) { return; } // execute all the available external events that have // already been dequeued cur_time = Thread::get_hrtime_updated(); while ((e = EventQueueExternal.dequeue_local())) { if (e->cancelled) { free_event(e); } else if (!e->timeout_at) { // IMMEDIATE ink_assert(e->period == 0); process_event(e, e->callback_event); } else if (e->timeout_at > 0) { // INTERVAL EventQueue.enqueue(e, cur_time); } else { // NEGATIVE Event *p = nullptr; Event *a = NegativeQueue.head; while (a && a->timeout_at > e->timeout_at) { p = a; a = a->link.next; } if (!a) { NegativeQueue.enqueue(e); } else { NegativeQueue.insert(e, p); } } } bool done_one; do { done_one = false; // execute all the eligible internal events EventQueue.check_ready(cur_time, this); while ((e = EventQueue.dequeue_ready(cur_time))) { ink_assert(e); ink_assert(e->timeout_at > 0); if (e->cancelled) { free_event(e); } else { done_one = true; process_event(e, e->callback_event); } } } while (done_one); // execute any negative (poll) events if (NegativeQueue.head) { if (n_ethreads_to_be_signalled) { flush_signals(this); } // dequeue all the external events and put them in a local // queue. If there are no external events available, don't // do a cond_timedwait. if (!INK_ATOMICLIST_EMPTY(EventQueueExternal.al)) { EventQueueExternal.dequeue_timed(cur_time, next_time, false); } while ((e = EventQueueExternal.dequeue_local())) { if (!e->timeout_at) { process_event(e, e->callback_event); } else { if (e->cancelled) { free_event(e); } else { // If its a negative event, it must be a result of // a negative event, which has been turned into a // timed-event (because of a missed lock), executed // before the poll. So, it must // be executed in this round (because you can't have // more than one poll between two executions of a // negative event) if (e->timeout_at < 0) { Event *p = nullptr; Event *a = NegativeQueue.head; while (a && a->timeout_at > e->timeout_at) { p = a; a = a->link.next; } if (!a) { NegativeQueue.enqueue(e); } else { NegativeQueue.insert(e, p); } } else { EventQueue.enqueue(e, cur_time); } } } } // execute poll events while ((e = NegativeQueue.dequeue())) { process_event(e, EVENT_POLL); } if (!INK_ATOMICLIST_EMPTY(EventQueueExternal.al)) { EventQueueExternal.dequeue_timed(cur_time, next_time, false); } } else { // Means there are no negative events next_time = EventQueue.earliest_timeout(); ink_hrtime sleep_time = next_time - cur_time; if (sleep_time > THREAD_MAX_HEARTBEAT_MSECONDS * HRTIME_MSECOND) { next_time = cur_time + THREAD_MAX_HEARTBEAT_MSECONDS * HRTIME_MSECOND; } // dequeue all the external events and put them in a local // queue. If there are no external events available, do a // cond_timedwait. if (n_ethreads_to_be_signalled) { flush_signals(this); } EventQueueExternal.dequeue_timed(cur_time, next_time, true); } } } case DEDICATED: { break; } default: ink_assert(!"bad case value (execute)"); break; } /* End switch */ // coverity[missing_unlock] }
rahmalik/trafficserver
iocore/eventsystem/UnixEThread.cc
C++
apache-2.0
9,183
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdio.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <sstream> #include <mesos/log/log.hpp> #include <process/clock.hpp> #include <process/future.hpp> #include <process/process.hpp> #include <process/time.hpp> #include <stout/bytes.hpp> #include <stout/error.hpp> #include <stout/foreach.hpp> #include <stout/os.hpp> #include <stout/stopwatch.hpp> #include <stout/strings.hpp> #include <stout/os/read.hpp> #include "log/tool/initialize.hpp" #include "log/tool/benchmark.hpp" #include "logging/logging.hpp" using namespace process; using std::cout; using std::endl; using std::ifstream; using std::ofstream; using std::string; using std::vector; using mesos::log::Log; namespace mesos { namespace internal { namespace log { namespace tool { Benchmark::Flags::Flags() { add(&Flags::quorum, "quorum", "Quorum size"); add(&Flags::path, "path", "Path to the log"); add(&Flags::servers, "servers", "ZooKeeper servers"); add(&Flags::znode, "znode", "ZooKeeper znode"); add(&Flags::input, "input", "Path to the input trace file. Each line in the trace file\n" "specifies the size of the append (e.g. 100B, 2MB, etc.)"); add(&Flags::output, "output", "Path to the output file"); add(&Flags::type, "type", "Type of data to be written (zero, one, random)\n" " zero: all bits are 0\n" " one: all bits are 1\n" " random: all bits are randomly chosen\n", "random"); add(&Flags::initialize, "initialize", "Whether to initialize the log", true); } Try<Nothing> Benchmark::execute(int argc, char** argv) { flags.setUsageMessage( "Usage: " + name() + " [options]\n" "\n" "This command is used to do performance test on the\n" "replicated log. It takes a trace file of write sizes\n" "and replay that trace to measure the latency of each\n" "write. The data to be written for each write can be\n" "specified using the --type flag.\n" "\n"); // Configure the tool by parsing command line arguments. if (argc > 0 && argv != nullptr) { Try<flags::Warnings> load = flags.load(None(), argc, argv); if (load.isError()) { return Error(flags.usage(load.error())); } if (flags.help) { return Error(flags.usage()); } process::initialize(); logging::initialize(argv[0], false, flags); // Log any flag warnings (after logging is initialized). foreach (const flags::Warning& warning, load->warnings) { LOG(WARNING) << warning.message; } } if (flags.quorum.isNone()) { return Error(flags.usage("Missing required option --quorum")); } if (flags.path.isNone()) { return Error(flags.usage("Missing required option --path")); } if (flags.servers.isNone()) { return Error(flags.usage("Missing required option --servers")); } if (flags.znode.isNone()) { return Error(flags.usage("Missing required option --znode")); } if (flags.input.isNone()) { return Error(flags.usage("Missing required option --input")); } if (flags.output.isNone()) { return Error(flags.usage("Missing required option --output")); } // Initialize the log. if (flags.initialize) { Initialize initialize; initialize.flags.path = flags.path; Try<Nothing> execution = initialize.execute(); if (execution.isError()) { return Error(execution.error()); } } // Create the log. Log log( flags.quorum.get(), flags.path.get(), flags.servers.get(), Seconds(10), flags.znode.get()); // Create the log writer. Log::Writer writer(&log); Future<Option<Log::Position>> position = writer.start(); if (!position.await(Seconds(15))) { return Error("Failed to start a log writer: timed out"); } else if (!position.isReady()) { return Error("Failed to start a log writer: " + (position.isFailed() ? position.failure() : "Discarded future")); } // Statistics to output. vector<Bytes> sizes; vector<Duration> durations; vector<Time> timestamps; // Read sizes from the input trace file. ifstream input(flags.input.get().c_str()); if (!input.is_open()) { return Error("Failed to open the trace file " + flags.input.get()); } string line; while (getline(input, line)) { Try<Bytes> size = Bytes::parse(strings::trim(line)); if (size.isError()) { return Error("Failed to parse the trace file: " + size.error()); } sizes.push_back(size.get()); } input.close(); // Generate the data to be written. vector<string> data; for (size_t i = 0; i < sizes.size(); i++) { if (flags.type == "one") { data.push_back(string(sizes[i].bytes(), static_cast<char>(0xff))); } else if (flags.type == "random") { data.push_back(string(sizes[i].bytes(), os::random() % 256)); } else { data.push_back(string(sizes[i].bytes(), 0)); } } Stopwatch stopwatch; stopwatch.start(); for (size_t i = 0; i < sizes.size(); i++) { Stopwatch stopwatch; stopwatch.start(); position = writer.append(data[i]); if (!position.await(Seconds(10))) { return Error("Failed to append: timed out"); } else if (!position.isReady()) { return Error("Failed to append: " + (position.isFailed() ? position.failure() : "Discarded future")); } else if (position.get().isNone()) { return Error("Failed to append: exclusive write promise lost"); } durations.push_back(stopwatch.elapsed()); timestamps.push_back(Clock::now()); } cout << "Total number of appends: " << sizes.size() << endl; cout << "Total time used: " << stopwatch.elapsed() << endl; // Ouput statistics. ofstream output(flags.output.get().c_str()); if (!output.is_open()) { return Error("Failed to open the output file " + flags.output.get()); } for (size_t i = 0; i < sizes.size(); i++) { output << timestamps[i] << " Appended " << sizes[i].bytes() << " bytes" << " in " << durations[i].ms() << " ms" << endl; } return Nothing(); } } // namespace tool { } // namespace log { } // namespace internal { } // namespace mesos {
shakamunyi/mesos
src/log/tool/benchmark.cpp
C++
apache-2.0
7,142
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for // license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is // regenerated. namespace Microsoft.Azure.Management.Automation.Models { using Microsoft.Azure; using Microsoft.Azure.Management; using Microsoft.Azure.Management.Automation; using Newtonsoft.Json; using System.Linq; /// <summary> /// The connection type property associated with the entity. /// </summary> public partial class ConnectionTypeAssociationProperty { /// <summary> /// Initializes a new instance of the ConnectionTypeAssociationProperty /// class. /// </summary> public ConnectionTypeAssociationProperty() { CustomInit(); } /// <summary> /// Initializes a new instance of the ConnectionTypeAssociationProperty /// class. /// </summary> /// <param name="name">Gets or sets the name of the connection /// type.</param> public ConnectionTypeAssociationProperty(string name = default(string)) { Name = name; CustomInit(); } /// <summary> /// An initialization method that performs custom operations like setting defaults /// </summary> partial void CustomInit(); /// <summary> /// Gets or sets the name of the connection type. /// </summary> [JsonProperty(PropertyName = "name")] public string Name { get; set; } } }
SiddharthChatrolaMs/azure-sdk-for-net
src/SDKs/Automation/Management.Automation/Generated/Models/ConnectionTypeAssociationProperty.cs
C#
apache-2.0
1,705
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.query.groupby.epinephelinae; import com.google.common.base.Suppliers; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Ordering; import com.google.common.primitives.Ints; import org.apache.druid.common.config.NullHandling; import org.apache.druid.data.input.MapBasedRow; import org.apache.druid.query.aggregation.AggregatorFactory; import org.apache.druid.query.aggregation.CountAggregatorFactory; import org.apache.druid.query.aggregation.LongSumAggregatorFactory; import org.apache.druid.query.groupby.epinephelinae.Grouper.Entry; import org.junit.Assert; import org.junit.Test; import java.nio.ByteBuffer; import java.util.Comparator; import java.util.List; public class BufferArrayGrouperTest { @Test public void testAggregate() { final TestColumnSelectorFactory columnSelectorFactory = GrouperTestUtil.newColumnSelectorFactory(); final IntGrouper grouper = newGrouper(columnSelectorFactory, 1024); columnSelectorFactory.setRow(new MapBasedRow(0, ImmutableMap.of("value", 10L))); grouper.aggregate(12); grouper.aggregate(6); grouper.aggregate(10); grouper.aggregate(6); grouper.aggregate(12); grouper.aggregate(6); final List<Entry<Integer>> expected = ImmutableList.of( new Grouper.Entry<>(6, new Object[]{30L, 3L}), new Grouper.Entry<>(10, new Object[]{10L, 1L}), new Grouper.Entry<>(12, new Object[]{20L, 2L}) ); final List<Entry<Integer>> unsortedEntries = Lists.newArrayList(grouper.iterator(false)); Assert.assertEquals( expected, Ordering.from((Comparator<Entry<Integer>>) (o1, o2) -> Ints.compare(o1.getKey(), o2.getKey())) .sortedCopy(unsortedEntries) ); } private BufferArrayGrouper newGrouper( TestColumnSelectorFactory columnSelectorFactory, int bufferSize ) { final ByteBuffer buffer = ByteBuffer.allocate(bufferSize); final BufferArrayGrouper grouper = new BufferArrayGrouper( Suppliers.ofInstance(buffer), columnSelectorFactory, new AggregatorFactory[]{ new LongSumAggregatorFactory("valueSum", "value"), new CountAggregatorFactory("count") }, 1000 ); grouper.init(); return grouper; } @Test public void testRequiredBufferCapacity() { int[] cardinalityArray = new int[]{1, 10, Integer.MAX_VALUE - 1}; AggregatorFactory[] aggregatorFactories = new AggregatorFactory[]{ new LongSumAggregatorFactory("sum", "sum") }; long[] requiredSizes; if (NullHandling.sqlCompatible()) { // We need additional size to store nullability information. requiredSizes = new long[]{19, 101, 19058917368L}; } else { requiredSizes = new long[]{17, 90, 16911433721L}; } for (int i = 0; i < cardinalityArray.length; i++) { Assert.assertEquals(requiredSizes[i], BufferArrayGrouper.requiredBufferCapacity( cardinalityArray[i], aggregatorFactories )); } } }
dkhwangbo/druid
processing/src/test/java/org/apache/druid/query/groupby/epinephelinae/BufferArrayGrouperTest.java
Java
apache-2.0
3,937
/* Copyright 2007 Brian Tanner brian@tannerpages.com http://brian.tannerpages.com Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package rlVizLib.visualization; import java.util.Observable; import rlVizLib.visualization.interfaces.AgentOnValueFunctionDataProvider; import rlVizLib.utilities.UtilityShop; import java.awt.Color; import java.awt.Graphics2D; import java.awt.geom.Rectangle2D; import java.util.Observer; import org.rlcommunity.rlglue.codec.types.Reward_observation_action_terminal; import rlVizLib.general.TinyGlue; import rlVizLib.visualization.interfaces.GlueStateProvider; public class AgentOnValueFunctionVizComponent implements SelfUpdatingVizComponent, Observer { private VizComponentChangeListener theChangeListener; private AgentOnValueFunctionDataProvider dataProvider; private boolean enabled = true; public AgentOnValueFunctionVizComponent(AgentOnValueFunctionDataProvider dataProvider, TinyGlue theGlueState) { this.dataProvider = dataProvider; theGlueState.addObserver(this); } public void setEnabled(boolean newEnableValue) { if (newEnableValue == false && this.enabled) { disable(); } if (newEnableValue == true && !this.enabled) { enable(); } } private void disable() { enabled = false; theChangeListener.vizComponentChanged(this); } private void enable() { enabled = true; } public void render(Graphics2D g) { if (!enabled) { Color myClearColor = new Color(0.0f, 0.0f, 0.0f, 0.0f); g.setColor(myClearColor); g.setBackground(myClearColor); g.clearRect(0, 0, 1, 1); return; } dataProvider.updateAgentState(); g.setColor(Color.BLUE); double transX = UtilityShop.normalizeValue(dataProvider.getCurrentStateInDimension(0), dataProvider.getMinValueForDim(0), dataProvider.getMaxValueForDim(0)); double transY = UtilityShop.normalizeValue(dataProvider.getCurrentStateInDimension(1), dataProvider.getMinValueForDim(1), dataProvider.getMaxValueForDim(1)); Rectangle2D agentRect = new Rectangle2D.Double(transX-.01, transY-.01, .02, .02); g.fill(agentRect); } public void setVizComponentChangeListener(VizComponentChangeListener theChangeListener) { this.theChangeListener = theChangeListener; } public void update(Observable o, Object theEvent) { if (theChangeListener != null) { theChangeListener.vizComponentChanged(this); } } }
cosmoharrigan/rl-viz
projects/rlVizLibJava/src/rlVizLib/visualization/AgentOnValueFunctionVizComponent.java
Java
apache-2.0
3,132
package junit.runner; /** * This class defines the current version of JUnit */ public class Version { private Version() { // don't instantiate } public static String id() { return "4.12"; } public static void main(String[] args) { System.out.println(id()); } }
tascape/th-junit4
src/main/java/junit/runner/Version.java
Java
apache-2.0
279
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/importexport/model/GetStatusResult.h> #include <aws/core/utils/xml/XmlSerializer.h> #include <aws/core/AmazonWebServiceResult.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/logging/LogMacros.h> #include <utility> using namespace Aws::ImportExport::Model; using namespace Aws::Utils::Xml; using namespace Aws::Utils::Logging; using namespace Aws::Utils; using namespace Aws; GetStatusResult::GetStatusResult() : m_jobType(JobType::NOT_SET), m_errorCount(0) { } GetStatusResult::GetStatusResult(const Aws::AmazonWebServiceResult<XmlDocument>& result) : m_jobType(JobType::NOT_SET), m_errorCount(0) { *this = result; } GetStatusResult& GetStatusResult::operator =(const Aws::AmazonWebServiceResult<XmlDocument>& result) { const XmlDocument& xmlDocument = result.GetPayload(); XmlNode rootNode = xmlDocument.GetRootElement(); XmlNode resultNode = rootNode; if (!rootNode.IsNull() && (rootNode.GetName() != "GetStatusResult")) { resultNode = rootNode.FirstChild("GetStatusResult"); } if(!resultNode.IsNull()) { XmlNode jobIdNode = resultNode.FirstChild("JobId"); if(!jobIdNode.IsNull()) { m_jobId = Aws::Utils::Xml::DecodeEscapedXmlText(jobIdNode.GetText()); } XmlNode jobTypeNode = resultNode.FirstChild("JobType"); if(!jobTypeNode.IsNull()) { m_jobType = JobTypeMapper::GetJobTypeForName(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(jobTypeNode.GetText()).c_str()).c_str()); } XmlNode locationCodeNode = resultNode.FirstChild("LocationCode"); if(!locationCodeNode.IsNull()) { m_locationCode = Aws::Utils::Xml::DecodeEscapedXmlText(locationCodeNode.GetText()); } XmlNode locationMessageNode = resultNode.FirstChild("LocationMessage"); if(!locationMessageNode.IsNull()) { m_locationMessage = Aws::Utils::Xml::DecodeEscapedXmlText(locationMessageNode.GetText()); } XmlNode progressCodeNode = resultNode.FirstChild("ProgressCode"); if(!progressCodeNode.IsNull()) { m_progressCode = Aws::Utils::Xml::DecodeEscapedXmlText(progressCodeNode.GetText()); } XmlNode progressMessageNode = resultNode.FirstChild("ProgressMessage"); if(!progressMessageNode.IsNull()) { m_progressMessage = Aws::Utils::Xml::DecodeEscapedXmlText(progressMessageNode.GetText()); } XmlNode carrierNode = resultNode.FirstChild("Carrier"); if(!carrierNode.IsNull()) { m_carrier = Aws::Utils::Xml::DecodeEscapedXmlText(carrierNode.GetText()); } XmlNode trackingNumberNode = resultNode.FirstChild("TrackingNumber"); if(!trackingNumberNode.IsNull()) { m_trackingNumber = Aws::Utils::Xml::DecodeEscapedXmlText(trackingNumberNode.GetText()); } XmlNode logBucketNode = resultNode.FirstChild("LogBucket"); if(!logBucketNode.IsNull()) { m_logBucket = Aws::Utils::Xml::DecodeEscapedXmlText(logBucketNode.GetText()); } XmlNode logKeyNode = resultNode.FirstChild("LogKey"); if(!logKeyNode.IsNull()) { m_logKey = Aws::Utils::Xml::DecodeEscapedXmlText(logKeyNode.GetText()); } XmlNode errorCountNode = resultNode.FirstChild("ErrorCount"); if(!errorCountNode.IsNull()) { m_errorCount = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(errorCountNode.GetText()).c_str()).c_str()); } XmlNode signatureNode = resultNode.FirstChild("Signature"); if(!signatureNode.IsNull()) { m_signature = Aws::Utils::Xml::DecodeEscapedXmlText(signatureNode.GetText()); } XmlNode signatureFileContentsNode = resultNode.FirstChild("SignatureFileContents"); if(!signatureFileContentsNode.IsNull()) { m_signatureFileContents = Aws::Utils::Xml::DecodeEscapedXmlText(signatureFileContentsNode.GetText()); } XmlNode currentManifestNode = resultNode.FirstChild("CurrentManifest"); if(!currentManifestNode.IsNull()) { m_currentManifest = Aws::Utils::Xml::DecodeEscapedXmlText(currentManifestNode.GetText()); } XmlNode creationDateNode = resultNode.FirstChild("CreationDate"); if(!creationDateNode.IsNull()) { m_creationDate = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(creationDateNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601); } XmlNode artifactListNode = resultNode.FirstChild("ArtifactList"); if(!artifactListNode.IsNull()) { XmlNode artifactListMember = artifactListNode.FirstChild("member"); while(!artifactListMember.IsNull()) { m_artifactList.push_back(artifactListMember); artifactListMember = artifactListMember.NextNode("member"); } } } if (!rootNode.IsNull()) { XmlNode responseMetadataNode = rootNode.FirstChild("ResponseMetadata"); m_responseMetadata = responseMetadataNode; AWS_LOGSTREAM_DEBUG("Aws::ImportExport::Model::GetStatusResult", "x-amzn-request-id: " << m_responseMetadata.GetRequestId() ); } return *this; }
jt70471/aws-sdk-cpp
aws-cpp-sdk-importexport/source/model/GetStatusResult.cpp
C++
apache-2.0
5,140
/* * Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ using System.IO; using System.Xml; using System.Text; using Amazon.S3.Util; using Amazon.Runtime; using Amazon.Runtime.Internal; using Amazon.Runtime.Internal.Transform; using System.Globalization; using Amazon.Util; #pragma warning disable 1591 namespace Amazon.S3.Model.Internal.MarshallTransformations { /// <summary> /// Put Object Acl Request Marshaller /// </summary> public class PutACLRequestMarshaller : IMarshaller<IRequest, PutACLRequest> ,IMarshaller<IRequest,Amazon.Runtime.AmazonWebServiceRequest> { public IRequest Marshall(Amazon.Runtime.AmazonWebServiceRequest input) { return this.Marshall((PutACLRequest)input); } public IRequest Marshall(PutACLRequest putObjectAclRequest) { IRequest request = new DefaultRequest(putObjectAclRequest, "AmazonS3"); request.HttpMethod = "PUT"; if (putObjectAclRequest.IsSetCannedACL()) request.Headers.Add(HeaderKeys.XAmzAclHeader, S3Transforms.ToStringValue(putObjectAclRequest.CannedACL)); // if we are putting the acl onto the bucket, the keyname component will collapse to empty string request.ResourcePath = string.Format(CultureInfo.InvariantCulture, "/{0}/{1}", S3Transforms.ToStringValue(putObjectAclRequest.BucketName), S3Transforms.ToStringValue(putObjectAclRequest.Key)); request.AddSubResource("acl"); if (putObjectAclRequest.IsSetVersionId()) request.AddSubResource("versionId", S3Transforms.ToStringValue(putObjectAclRequest.VersionId)); var stringWriter = new StringWriter(System.Globalization.CultureInfo.InvariantCulture); using ( var xmlWriter = XmlWriter.Create(stringWriter, new XmlWriterSettings() { Encoding = Encoding.UTF8, OmitXmlDeclaration = true })) { var accessControlPolicyAccessControlPolicy = putObjectAclRequest.AccessControlList; if (accessControlPolicyAccessControlPolicy != null) { xmlWriter.WriteStartElement("AccessControlPolicy", ""); var accessControlPolicyAccessControlPolicygrantsList = accessControlPolicyAccessControlPolicy.Grants; if (accessControlPolicyAccessControlPolicygrantsList != null && accessControlPolicyAccessControlPolicygrantsList.Count > 0) { xmlWriter.WriteStartElement("AccessControlList", ""); foreach ( var accessControlPolicyAccessControlPolicygrantsListValue in accessControlPolicyAccessControlPolicygrantsList) { xmlWriter.WriteStartElement("Grant", ""); if (accessControlPolicyAccessControlPolicygrantsListValue != null) { var granteeGrantee = accessControlPolicyAccessControlPolicygrantsListValue.Grantee; if (granteeGrantee != null) { xmlWriter.WriteStartElement("Grantee", ""); if (granteeGrantee.IsSetType()) { xmlWriter.WriteAttributeString("xsi", "type", "http://www.w3.org/2001/XMLSchema-instance", granteeGrantee.Type.ToString()); } if (granteeGrantee.IsSetDisplayName()) { xmlWriter.WriteElementString("DisplayName", "", S3Transforms.ToXmlStringValue( granteeGrantee.DisplayName)); } if (granteeGrantee.IsSetEmailAddress()) { xmlWriter.WriteElementString("EmailAddress", "", S3Transforms.ToXmlStringValue( granteeGrantee.EmailAddress)); } if (granteeGrantee.IsSetCanonicalUser()) { xmlWriter.WriteElementString("ID", "", S3Transforms.ToXmlStringValue( granteeGrantee.CanonicalUser)); } if (granteeGrantee.IsSetURI()) { xmlWriter.WriteElementString("URI", "", S3Transforms.ToXmlStringValue( granteeGrantee.URI)); } xmlWriter.WriteEndElement(); } if (accessControlPolicyAccessControlPolicygrantsListValue.IsSetPermission()) { xmlWriter.WriteElementString("Permission", "", S3Transforms.ToXmlStringValue( accessControlPolicyAccessControlPolicygrantsListValue .Permission)); } } xmlWriter.WriteEndElement(); } xmlWriter.WriteEndElement(); var ownerOwner = accessControlPolicyAccessControlPolicy.Owner; if (ownerOwner != null) { xmlWriter.WriteStartElement("Owner", ""); if (ownerOwner.IsSetDisplayName()) { xmlWriter.WriteElementString("DisplayName", "", S3Transforms.ToXmlStringValue(ownerOwner.DisplayName)); } if (ownerOwner.IsSetId()) { xmlWriter.WriteElementString("ID", "", S3Transforms.ToXmlStringValue(ownerOwner.Id)); } xmlWriter.WriteEndElement(); } } xmlWriter.WriteEndElement(); } } try { var content = stringWriter.ToString(); request.Content = Encoding.UTF8.GetBytes(content); request.Headers[HeaderKeys.ContentTypeHeader] = "application/xml"; string checksum = AmazonS3Util.GenerateChecksumForContent(content, true); request.Headers[HeaderKeys.ContentMD5Header] = checksum; } catch (EncoderFallbackException e) { throw new AmazonServiceException("Unable to marshall request to XML", e); } return request; } } }
rafd123/aws-sdk-net
sdk/src/Services/S3/Custom/Model/Internal/MarshallTransformations/PutACLRequestMarshaller.cs
C#
apache-2.0
8,804
/* * Copyright 2014 The Netty Project * * The Netty Project licenses this file to you under the Apache License, version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package io.netty.handler.codec.http2; import java.util.Collection; /** * Manager for the state of an HTTP/2 connection with the remote end-point. */ public interface Http2Connection { /** * Listener for life-cycle events for streams in this connection. */ interface Listener { /** * Notifies the listener that the given stream was added to the connection. This stream may * not yet be active (i.e. open/half-closed). */ void streamAdded(Http2Stream stream); /** * Notifies the listener that the given stream was made active (i.e. open in at least one * direction). */ void streamActive(Http2Stream stream); /** * Notifies the listener that the given stream is now half-closed. The stream can be * inspected to determine which side is closed. */ void streamHalfClosed(Http2Stream stream); /** * Notifies the listener that the given stream is now closed in both directions. */ void streamInactive(Http2Stream stream); /** * Notifies the listener that the given stream has now been removed from the connection and * will no longer be returned via {@link Http2Connection#stream(int)}. The connection may * maintain inactive streams for some time before removing them. */ void streamRemoved(Http2Stream stream); /** * Notifies the listener that a priority tree parent change has occurred. This method will be invoked * in a top down order relative to the priority tree. This method will also be invoked after all tree * structure changes have been made and the tree is in steady state relative to the priority change * which caused the tree structure to change. * @param stream The stream which had a parent change (new parent and children will be steady state) * @param oldParent The old parent which {@code stream} used to be a child of (may be {@code null}) */ void priorityTreeParentChanged(Http2Stream stream, Http2Stream oldParent); /** * Notifies the listener that a parent dependency is about to change * This is called while the tree is being restructured and so the tree * structure is not necessarily steady state. * @param stream The stream which the parent is about to change to {@code newParent} * @param newParent The stream which will be the parent of {@code stream} */ void priorityTreeParentChanging(Http2Stream stream, Http2Stream newParent); /** * Notifies the listener that the weight has changed for {@code stream} * @param stream The stream which the weight has changed * @param oldWeight The old weight for {@code stream} */ void onWeightChanged(Http2Stream stream, short oldWeight); /** * Called when a GO_AWAY frame has either been sent or received for the connection. */ void goingAway(); } /** * A view of the connection from one endpoint (local or remote). */ interface Endpoint<F extends Http2FlowController> { /** * Returns the next valid streamId for this endpoint. If negative, the stream IDs are * exhausted for this endpoint an no further streams may be created. */ int nextStreamId(); /** * Indicates whether the given streamId is from the set of IDs used by this endpoint to * create new streams. */ boolean createdStreamId(int streamId); /** * Indicates whether or not this endpoint is currently accepting new streams. This will be * be false if {@link #numActiveStreams()} + 1 >= {@link #maxStreams()} or if the stream IDs * for this endpoint have been exhausted (i.e. {@link #nextStreamId()} < 0). */ boolean acceptingNewStreams(); /** * Creates a stream initiated by this endpoint. This could fail for the following reasons: * <ul> * <li>The requested stream ID is not the next sequential ID for this endpoint.</li> * <li>The stream already exists.</li> * <li>The number of concurrent streams is above the allowed threshold for this endpoint.</li> * <li>The connection is marked as going away.</li> * </ul> * <p> * The caller is expected to {@link Http2Stream#open()} the stream. * @param streamId The ID of the stream * @see Http2Stream#open() * @see Http2Stream#open(boolean) */ Http2Stream createStream(int streamId) throws Http2Exception; /** * Creates a push stream in the reserved state for this endpoint and notifies all listeners. * This could fail for the following reasons: * <ul> * <li>Server push is not allowed to the opposite endpoint.</li> * <li>The requested stream ID is not the next sequential stream ID for this endpoint.</li> * <li>The number of concurrent streams is above the allowed threshold for this endpoint.</li> * <li>The connection is marked as going away.</li> * <li>The parent stream ID does not exist or is not open from the side sending the push * promise.</li> * <li>Could not set a valid priority for the new stream.</li> * </ul> * * @param streamId the ID of the push stream * @param parent the parent stream used to initiate the push stream. */ Http2Stream reservePushStream(int streamId, Http2Stream parent) throws Http2Exception; /** * Indicates whether or not this endpoint is the server-side of the connection. */ boolean isServer(); /** * Sets whether server push is allowed to this endpoint. */ void allowPushTo(boolean allow); /** * Gets whether or not server push is allowed to this endpoint. This is always false * for a server endpoint. */ boolean allowPushTo(); /** * Gets the number of currently active streams that were created by this endpoint. */ int numActiveStreams(); /** * Gets the maximum number of concurrent streams allowed by this endpoint. */ int maxStreams(); /** * Sets the maximum number of concurrent streams allowed by this endpoint. */ void maxStreams(int maxStreams); /** * Gets the ID of the stream last successfully created by this endpoint. */ int lastStreamCreated(); /** * Gets the last stream created by this endpoint that is "known" by the opposite endpoint. * If a GOAWAY was received for this endpoint, this will be the last stream ID from the * GOAWAY frame. Otherwise, this will be same as {@link #lastStreamCreated()}. */ int lastKnownStream(); /** * Gets the flow controller for this endpoint. */ F flowController(); /** * Sets the flow controller for this endpoint. */ void flowController(F flowController); /** * Gets the {@link Endpoint} opposite this one. */ Endpoint<? extends Http2FlowController> opposite(); } /** * Adds a listener of stream life-cycle events. Adding the same listener multiple times has no effect. */ void addListener(Listener listener); /** * Removes a listener of stream life-cycle events. */ void removeListener(Listener listener); /** * Attempts to get the stream for the given ID. If it doesn't exist, throws. */ Http2Stream requireStream(int streamId) throws Http2Exception; /** * Gets the stream if it exists. If not, returns {@code null}. */ Http2Stream stream(int streamId); /** * Gets the stream object representing the connection, itself (i.e. stream zero). This object * always exists. */ Http2Stream connectionStream(); /** * Gets the number of streams that are currently either open or half-closed. */ int numActiveStreams(); /** * Gets all streams that are currently either open or half-closed. The returned collection is * sorted by priority. */ Collection<Http2Stream> activeStreams(); /** * Indicates whether or not the local endpoint for this connection is the server. */ boolean isServer(); /** * Gets a view of this connection from the local {@link Endpoint}. */ Endpoint<Http2LocalFlowController> local(); /** * Creates a new stream initiated by the local endpoint * @see Endpoint#createStream(int) */ Http2Stream createLocalStream(int streamId) throws Http2Exception; /** * Gets a view of this connection from the remote {@link Endpoint}. */ Endpoint<Http2RemoteFlowController> remote(); /** * Creates a new stream initiated by the remote endpoint. * @see Endpoint#createStream(int) */ Http2Stream createRemoteStream(int streamId) throws Http2Exception; /** * Indicates whether or not a {@code GOAWAY} was received from the remote endpoint. */ boolean goAwayReceived(); /** * Indicates that a {@code GOAWAY} was received from the remote endpoint and sets the last known stream. */ void goAwayReceived(int lastKnownStream); /** * Indicates whether or not a {@code GOAWAY} was sent to the remote endpoint. */ boolean goAwaySent(); /** * Indicates that a {@code GOAWAY} was sent to the remote endpoint and sets the last known stream. */ void goAwaySent(int lastKnownStream); /** * Indicates whether or not either endpoint has received a GOAWAY. */ boolean isGoAway(); }
nat2013/netty
codec-http2/src/main/java/io/netty/handler/codec/http2/Http2Connection.java
Java
apache-2.0
10,653
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.util.io; import com.intellij.openapi.util.ThreadLocalCachedValue; import com.intellij.openapi.util.ThrowableComputable; import com.intellij.openapi.util.io.FileUtil; import com.intellij.openapi.vfs.CharsetToolkit; import com.intellij.util.SystemProperties; import org.jetbrains.annotations.NonNls; import org.jetbrains.annotations.NotNull; import java.io.*; import java.lang.reflect.Field; import java.nio.charset.Charset; public class IOUtil { public static final boolean ourByteBuffersUseNativeByteOrder = SystemProperties.getBooleanProperty("idea.bytebuffers.use.native.byte.order", true); private static final int STRING_HEADER_SIZE = 1; private static final int STRING_LENGTH_THRESHOLD = 255; @NonNls private static final String LONGER_THAN_64K_MARKER = "LONGER_THAN_64K"; private IOUtil() {} public static String readString(@NotNull DataInput stream) throws IOException { int length = stream.readInt(); if (length == -1) return null; if (length == 0) return ""; byte[] bytes = new byte[length*2]; stream.readFully(bytes); return new String(bytes, 0, length*2, CharsetToolkit.UTF_16BE_CHARSET); } public static void writeString(String s, @NotNull DataOutput stream) throws IOException { if (s == null) { stream.writeInt(-1); return; } stream.writeInt(s.length()); if (s.isEmpty()) { return; } char[] chars = s.toCharArray(); byte[] bytes = new byte[chars.length * 2]; for (int i = 0, i2 = 0; i < chars.length; i++, i2 += 2) { char aChar = chars[i]; bytes[i2] = (byte)(aChar >>> 8 & 0xFF); bytes[i2 + 1] = (byte)(aChar & 0xFF); } stream.write(bytes); } public static void writeUTFTruncated(@NotNull DataOutput stream, @NotNull String text) throws IOException { // we should not compare number of symbols to 65635 -> it is number of bytes what should be compared // ? 4 bytes per symbol - rough estimation if (text.length() > 16383) { stream.writeUTF(text.substring(0, 16383)); } else { stream.writeUTF(text); } } private static final ThreadLocalCachedValue<byte[]> ourReadWriteBuffersCache = new ThreadLocalCachedValue<byte[]>() { @Override protected byte[] create() { return allocReadWriteUTFBuffer(); } }; public static void writeUTF(@NotNull DataOutput storage, @NotNull final String value) throws IOException { writeUTFFast(ourReadWriteBuffersCache.getValue(), storage, value); } public static String readUTF(@NotNull DataInput storage) throws IOException { return readUTFFast(ourReadWriteBuffersCache.getValue(), storage); } @NotNull public static byte[] allocReadWriteUTFBuffer() { return new byte[STRING_LENGTH_THRESHOLD + STRING_HEADER_SIZE]; } public static void writeUTFFast(@NotNull byte[] buffer, @NotNull DataOutput storage, @NotNull final String value) throws IOException { int len = value.length(); if (len < STRING_LENGTH_THRESHOLD) { buffer[0] = (byte)len; boolean isAscii = true; for (int i = 0; i < len; i++) { char c = value.charAt(i); if (c >= 128) { isAscii = false; break; } buffer[i + STRING_HEADER_SIZE] = (byte)c; } if (isAscii) { storage.write(buffer, 0, len + STRING_HEADER_SIZE); return; } } storage.writeByte((byte)0xFF); try { storage.writeUTF(value); } catch (UTFDataFormatException e) { storage.writeUTF(LONGER_THAN_64K_MARKER); writeString(value, storage); } } public static final Charset US_ASCII = Charset.forName("US-ASCII"); private static final ThreadLocalCachedValue<char[]> spareBufferLocal = new ThreadLocalCachedValue<char[]>() { @Override protected char[] create() { return new char[STRING_LENGTH_THRESHOLD]; } }; public static String readUTFFast(@NotNull byte[] buffer, @NotNull DataInput storage) throws IOException { int len = 0xFF & (int)storage.readByte(); if (len == 0xFF) { String result = storage.readUTF(); if (LONGER_THAN_64K_MARKER.equals(result)) { return readString(storage); } return result; } if (len == 0) return ""; storage.readFully(buffer, 0, len); char[] chars = spareBufferLocal.getValue(); for(int i = 0; i < len; ++i) chars[i] = (char)(buffer[i] &0xFF); return new String(chars, 0, len); } public static boolean isAscii(@NotNull String str) { for (int i = 0, length = str.length(); i < length; ++ i) { if (str.charAt(i) >= 128) return false; } return true; } public static boolean isAscii(char c) { return c < 128; } public static boolean deleteAllFilesStartingWith(@NotNull File file) { final String baseName = file.getName(); File parentFile = file.getParentFile(); final File[] files = parentFile != null ? parentFile.listFiles(new FileFilter() { @Override public boolean accept(final File pathname) { return pathname.getName().startsWith(baseName); } }): null; boolean ok = true; if (files != null) { for (File f : files) { ok &= FileUtil.delete(f); } } return ok; } public static void syncStream(OutputStream stream) throws IOException { stream.flush(); try { Field outField = FilterOutputStream.class.getDeclaredField("out"); outField.setAccessible(true); while (stream instanceof FilterOutputStream) { Object o = outField.get(stream); if (o instanceof OutputStream) { stream = (OutputStream)o; } else { break; } } if (stream instanceof FileOutputStream) { ((FileOutputStream)stream).getFD().sync(); } } catch (NoSuchFieldException e) { throw new RuntimeException(e); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } public static <T> T openCleanOrResetBroken(@NotNull ThrowableComputable<T, IOException> factoryComputable, final File file) throws IOException { return openCleanOrResetBroken(factoryComputable, new Runnable() { @Override public void run() { deleteAllFilesStartingWith(file); } }); } public static <T> T openCleanOrResetBroken(@NotNull ThrowableComputable<T, IOException> factoryComputable, Runnable cleanupCallback) throws IOException { for(int i = 0; i < 2; ++i) { try { return factoryComputable.compute(); } catch (IOException ex) { if (i == 1) throw ex; cleanupCallback.run(); } } return null; } }
diorcety/intellij-community
platform/util/src/com/intellij/util/io/IOUtil.java
Java
apache-2.0
7,277
/* Copyright 2006 Jerry Huxtable Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.jhlabs.image; import java.awt.*; import java.awt.image.*; /** * A Filter to pixellate images. */ public class BlockFilter extends AbstractBufferedImageOp { private int blockSize = 2; /** * Construct a BlockFilter. */ public BlockFilter() { } /** * Construct a BlockFilter. * @param blockSize the number of pixels along each block edge */ public BlockFilter( int blockSize ) { this.blockSize = blockSize; } /** * Set the pixel block size. * @param blockSize the number of pixels along each block edge * @min-value 1 * @max-value 100+ * @see #getBlockSize */ public void setBlockSize(int blockSize) { this.blockSize = blockSize; } /** * Get the pixel block size. * @return the number of pixels along each block edge * @see #setBlockSize */ public int getBlockSize() { return blockSize; } public BufferedImage filter( BufferedImage src, BufferedImage dst ) { int width = src.getWidth(); int height = src.getHeight(); int type = src.getType(); WritableRaster srcRaster = src.getRaster(); if ( dst == null ) dst = createCompatibleDestImage( src, null ); int[] pixels = new int[blockSize * blockSize]; for ( int y = 0; y < height; y += blockSize ) { for ( int x = 0; x < width; x += blockSize ) { int w = Math.min( blockSize, width-x ); int h = Math.min( blockSize, height-y ); int t = w*h; getRGB( src, x, y, w, h, pixels ); int r = 0, g = 0, b = 0; int argb; int i = 0; for ( int by = 0; by < h; by++ ) { for ( int bx = 0; bx < w; bx++ ) { argb = pixels[i]; r += (argb >> 16) & 0xff; g += (argb >> 8) & 0xff; b += argb & 0xff; i++; } } argb = ((r/t) << 16) | ((g/t) << 8) | (b/t); i = 0; for ( int by = 0; by < h; by++ ) { for ( int bx = 0; bx < w; bx++ ) { pixels[i] = (pixels[i] & 0xff000000) | argb; i++; } } setRGB( dst, x, y, w, h, pixels ); } } return dst; } public String toString() { return "Pixellate/Mosaic..."; } }
svn2github/pixels
src/main/java/com/jhlabs/image/BlockFilter.java
Java
apache-2.0
3,064
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* $Id$ */ package org.apache.fop.pdf; import java.io.IOException; import java.io.OutputStream; import org.apache.commons.io.output.CountingOutputStream; /** * Class representing a PDF name object. */ public class PDFName extends PDFObject { private String name; /** * Creates a new PDF name object. * @param name the name value */ public PDFName(String name) { super(); this.name = escapeName(name); } private static final String ESCAPED_NAME_CHARS = "/()<>[]%#"; /** * Escapes a PDF name. It adds the leading slash and escapes characters as necessary. * @param name the name * @return the escaped name */ static String escapeName(String name) { StringBuilder sb = new StringBuilder(Math.min(16, name.length() + 4)); boolean skipFirst = false; sb.append('/'); if (name.startsWith("/")) { skipFirst = true; } for (int i = (skipFirst ? 1 : 0), c = name.length(); i < c; i++) { char ch = name.charAt(i); if (ch < 33 || ch > 126 || ESCAPED_NAME_CHARS.indexOf(ch) >= 0) { sb.append('#'); toHex(ch, sb); } else { sb.append(ch); } } return sb.toString(); } private static final char[] DIGITS = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; private static void toHex(char ch, StringBuilder sb) { if (ch >= 256) { throw new IllegalArgumentException( "Only 8-bit characters allowed by this implementation"); } sb.append(DIGITS[ch >>> 4 & 0x0F]); sb.append(DIGITS[ch & 0x0F]); } /** {@inheritDoc} */ @Override public String toString() { return this.name; } /** * Returns the name without the leading slash. * @return the name without the leading slash */ public String getName() { return this.name.substring(1); } /** {@inheritDoc} */ public boolean equals(Object obj) { if (!(obj instanceof PDFName)) { return false; } PDFName other = (PDFName)obj; return this.name.equals(other.name); } /** {@inheritDoc} */ public int hashCode() { return name.hashCode(); } @Override public int output(OutputStream stream) throws IOException { CountingOutputStream cout = new CountingOutputStream(stream); StringBuilder textBuffer = new StringBuilder(64); textBuffer.append(toString()); PDFDocument.flushTextBuffer(textBuffer, cout); return cout.getCount(); } @Override public void outputInline(OutputStream out, StringBuilder textBuffer) throws IOException { if (hasObjectNumber()) { textBuffer.append(referencePDF()); } else { textBuffer.append(toString()); } } }
Distrotech/fop
src/java/org/apache/fop/pdf/PDFName.java
Java
apache-2.0
3,799
package gov.hhs.onc.sdcct.ws.impl; import gov.hhs.onc.sdcct.logging.impl.TxTaskExecutor; import org.apache.cxf.endpoint.DeferredConduitSelector; public class SdcctConduitSelector extends DeferredConduitSelector { private TxTaskExecutor taskExec; public SdcctConduitSelector(TxTaskExecutor taskExec) { super(); this.taskExec = taskExec; } public TxTaskExecutor getTaskExecutor() { return this.taskExec; } }
elizabethso/sdcct
sdcct-core/src/main/java/gov/hhs/onc/sdcct/ws/impl/SdcctConduitSelector.java
Java
apache-2.0
455
// // CipherReference.cs - CipherReference implementation for XML Encryption // http://www.w3.org/2001/04/xmlenc#sec-CipherReference // // Author: // Tim Coleman (tim@timcoleman.com) // // Copyright (C) Tim Coleman, 2004 // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. // #if NET_2_0 using System.Xml; namespace System.Security.Cryptography.Xml { public sealed class CipherReference : EncryptedReference { #region Constructors public CipherReference () : base () { } public CipherReference (string uri) : base (uri) { } public CipherReference (string uri, TransformChain tc) : base (uri, tc) { } #endregion // Constructors #region Methods public override XmlElement GetXml () { return GetXml (new XmlDocument ()); } internal override XmlElement GetXml (XmlDocument document) { XmlElement xel = document.CreateElement (XmlEncryption.ElementNames.CipherReference, EncryptedXml.XmlEncNamespaceUrl); xel.SetAttribute (XmlEncryption.AttributeNames.URI, Uri); if (TransformChain != null && TransformChain.Count > 0) { XmlElement xtr = document.CreateElement (XmlEncryption.ElementNames.Transforms, EncryptedXml.XmlEncNamespaceUrl); foreach (Transform t in TransformChain) xtr.AppendChild (document.ImportNode (t.GetXml (), true)); xel.AppendChild (xtr); } return xel; } public override void LoadXml (XmlElement value) { if (value == null) throw new ArgumentNullException ("value"); if ((value.LocalName != XmlEncryption.ElementNames.CipherReference) || (value.NamespaceURI != EncryptedXml.XmlEncNamespaceUrl)) throw new CryptographicException ("Malformed CipherReference element."); base.LoadXml (value); } #endregion // Methods } } #endif
symplified/Symplified.Auth
lib/mono/mcs/class/System.Security/System.Security.Cryptography.Xml/CipherReference.cs
C#
apache-2.0
2,802
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.curator.x.async.modeled.details; import org.apache.curator.x.async.AsyncStage; import org.apache.zookeeper.WatchedEvent; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.Executor; import java.util.function.BiConsumer; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; class ModelStage<T> extends CompletableFuture<T> implements AsyncStage<T> { private final CompletionStage<WatchedEvent> event; static <U> ModelStage<U> make() { return new ModelStage<>(null); } static <U> ModelStage<U> make(CompletionStage<WatchedEvent> event) { return new ModelStage<>(event); } static <U> ModelStage<U> completed(U value) { ModelStage<U> stage = new ModelStage<>(null); stage.complete(value); return stage; } static <U> ModelStage<U> exceptionally(Exception e) { ModelStage<U> stage = new ModelStage<>(null); stage.completeExceptionally(e); return stage; } static <U> ModelStage<U> async(Executor executor) { return new AsyncModelStage<>(executor); } static <U> ModelStage<U> asyncCompleted(U value, Executor executor) { ModelStage<U> stage = new AsyncModelStage<>(executor); stage.complete(value); return stage; } static <U> ModelStage<U> asyncExceptionally(Exception e, Executor executor) { ModelStage<U> stage = new AsyncModelStage<>(executor); stage.completeExceptionally(e); return stage; } @Override public CompletionStage<WatchedEvent> event() { return event; } private ModelStage(CompletionStage<WatchedEvent> event) { this.event = event; } private static class AsyncModelStage<U> extends ModelStage<U> { private final Executor executor; public AsyncModelStage(Executor executor) { super(null); this.executor = executor; } @Override public <U1> CompletableFuture<U1> thenApplyAsync(Function<? super U, ? extends U1> fn) { return super.thenApplyAsync(fn, executor); } @Override public CompletableFuture<Void> thenAcceptAsync(Consumer<? super U> action) { return super.thenAcceptAsync(action, executor); } @Override public CompletableFuture<Void> thenRunAsync(Runnable action) { return super.thenRunAsync(action, executor); } @Override public <U1, V> CompletableFuture<V> thenCombineAsync(CompletionStage<? extends U1> other, BiFunction<? super U, ? super U1, ? extends V> fn) { return super.thenCombineAsync(other, fn, executor); } @Override public <U1> CompletableFuture<Void> thenAcceptBothAsync(CompletionStage<? extends U1> other, BiConsumer<? super U, ? super U1> action) { return super.thenAcceptBothAsync(other, action, executor); } @Override public CompletableFuture<Void> runAfterBothAsync(CompletionStage<?> other, Runnable action) { return super.runAfterBothAsync(other, action, executor); } @Override public <U1> CompletableFuture<U1> applyToEitherAsync(CompletionStage<? extends U> other, Function<? super U, U1> fn) { return super.applyToEitherAsync(other, fn, executor); } @Override public CompletableFuture<Void> acceptEitherAsync(CompletionStage<? extends U> other, Consumer<? super U> action) { return super.acceptEitherAsync(other, action, executor); } @Override public CompletableFuture<Void> runAfterEitherAsync(CompletionStage<?> other, Runnable action) { return super.runAfterEitherAsync(other, action, executor); } @Override public <U1> CompletableFuture<U1> thenComposeAsync(Function<? super U, ? extends CompletionStage<U1>> fn) { return super.thenComposeAsync(fn, executor); } @Override public CompletableFuture<U> whenCompleteAsync(BiConsumer<? super U, ? super Throwable> action) { return super.whenCompleteAsync(action, executor); } @Override public <U1> CompletableFuture<U1> handleAsync(BiFunction<? super U, Throwable, ? extends U1> fn) { return super.handleAsync(fn, executor); } } }
apache/curator
curator-x-async/src/main/java/org/apache/curator/x/async/modeled/details/ModelStage.java
Java
apache-2.0
5,444
<?php /** * Copyright 2016 Google Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ namespace Google\Cloud\Storage\Tests\System; use Google\Cloud\Core\Exception\BadRequestException; use Google\Cloud\Storage\Bucket; /** * @group storage * @group storage-bucket */ class ManageBucketsTest extends StorageTestCase { public function testListsBuckets() { $foundBuckets = []; $bucketsToCreate = [ uniqid(self::TESTING_PREFIX), uniqid(self::TESTING_PREFIX) ]; foreach ($bucketsToCreate as $bucketToCreate) { self::createBucket(self::$client, $bucketToCreate); } $buckets = self::$client->buckets(['prefix' => self::TESTING_PREFIX]); foreach ($buckets as $bucket) { foreach ($bucketsToCreate as $key => $bucketToCreate) { if ($bucket->name() === $bucketToCreate) { $foundBuckets[$key] = $bucket->name(); } } } $this->assertEquals($bucketsToCreate, $foundBuckets); } public function testCreatesBucket() { $name = uniqid(self::TESTING_PREFIX); $options = [ 'location' => 'ASIA', 'storageClass' => 'NEARLINE', 'versioning' => [ 'enabled' => true ] ]; $this->assertFalse(self::$client->bucket($name)->exists()); $bucket = self::createBucket(self::$client, $name, $options); $this->assertTrue(self::$client->bucket($name)->exists()); $this->assertEquals($name, $bucket->name()); $this->assertEquals($options['location'], $bucket->info()['location']); $this->assertEquals($options['storageClass'], $bucket->info()['storageClass']); $this->assertEquals($options['versioning'], $bucket->info()['versioning']); $this->assertEquals('multi-region', $bucket->info()['locationType']); } public function testUpdateBucket() { $options = [ 'website' => [ 'mainPageSuffix' => 'index.html', 'notFoundPage' => '404.html' ] ]; $info = self::$bucket->update($options); $this->assertEquals($options['website'], $info['website']); } /** * @group storage-bucket-lifecycle * @dataProvider lifecycleRules */ public function testCreateBucketWithLifecycleDeleteRule(array $rule, $isError = false) { if ($isError) { $this->setExpectedException(BadRequestException::class); } $lifecycle = Bucket::lifecycle(); $lifecycle->addDeleteRule($rule); $bucket = self::createBucket(self::$client, uniqid(self::TESTING_PREFIX), [ 'lifecycle' => $lifecycle ]); $this->assertEquals($lifecycle->toArray(), $bucket->info()['lifecycle']); } /** * @group storage-bucket-lifecycle * @dataProvider lifecycleRules */ public function testUpdateBucketWithLifecycleDeleteRule(array $rule, $isError = false) { if ($isError) { $this->setExpectedException(BadRequestException::class); } $lifecycle = Bucket::lifecycle(); $lifecycle->addDeleteRule($rule); $bucket = self::createBucket(self::$client, uniqid(self::TESTING_PREFIX)); $this->assertArrayNotHasKey('lifecycle', $bucket->info()); $bucket->update([ 'lifecycle' => $lifecycle ]); $this->assertEquals($lifecycle->toArray(), $bucket->info()['lifecycle']); } public function lifecycleRules() { return [ [['age' => 1000]], [['daysSinceNoncurrentTime' => 25]], [['daysSinceNoncurrentTime' => -5], true], // error case [['daysSinceNoncurrentTime' => -5], true], // error case [['noncurrentTimeBefore' => (new \DateTime)->format("Y-m-d")]], [['noncurrentTimeBefore' => new \DateTime]], [['noncurrentTimeBefore' => 'this is not a timestamp'], true], // error case [['customTimeBefore' => (new \DateTime)->format("Y-m-d")]], [['customTimeBefore' => new \DateTime]], [['customTimeBefore' => 'this is not a timestamp'], true], // error case ]; } /** * @group storage-bucket-lifecycle */ public function testUpdateAndClearLifecycle() { $lifecycle = self::$bucket->currentLifecycle() ->addDeleteRule([ 'age' => 500 ]); $info = self::$bucket->update(['lifecycle' => $lifecycle]); $this->assertEquals($lifecycle->toArray(), $info['lifecycle']); $lifecycle = self::$bucket->currentLifecycle() ->clearRules('Delete'); $info = self::$bucket->update(['lifecycle' => $lifecycle]); $this->assertEmpty($lifecycle->toArray()); $this->assertArrayNotHasKey('lifecycle', $info); } public function testReloadBucket() { $this->assertEquals('storage#bucket', self::$bucket->reload()['kind']); } /** * @group storageiam */ public function testIam() { $iam = self::$bucket->iam(); $policy = $iam->policy(); // pop the version off the resourceId to make the assertion below more robust. $resourceId = explode('#', $policy['resourceId'])[0]; $bucketName = self::$bucket->name(); $this->assertEquals($resourceId, sprintf('projects/_/buckets/%s', $bucketName)); $role = 'roles/storage.admin'; $policy['bindings'][] = [ 'role' => $role, 'members' => ['projectOwner:gcloud-php-integration-tests'] ]; $iam->setPolicy($policy); $policy = $iam->reload(); $newBinding = array_filter($policy['bindings'], function ($binding) use ($role) { return ($binding['role'] === $role); }); $this->assertCount(1, $newBinding); $permissions = ['storage.buckets.get']; $test = $iam->testPermissions($permissions); $this->assertEquals($permissions, $test); } public function testLabels() { $bucket = self::$bucket; $bucket->update([ 'labels' => [ 'foo' => 'bar' ] ]); $bucket->reload(); $this->assertEquals($bucket->info()['labels']['foo'], 'bar'); $bucket->update([ 'labels' => [ 'foo' => 'bat' ] ]); $bucket->reload(); $this->assertEquals($bucket->info()['labels']['foo'], 'bat'); $bucket->update([ 'labels' => [ 'foo' => null ] ]); $bucket->reload(); $this->assertFalse(isset($bucket->info()['labels']['foo'])); } /** * @group storage-bucket-location * @dataProvider locationTypes */ public function testBucketLocationType($storageClass, $location, $expectedLocationType, $updateStorageClass) { $bucketName = uniqid(self::TESTING_PREFIX); $bucket = self::createBucket(self::$client, $bucketName, [ 'storageClass' => $storageClass, 'location' => $location, 'retentionPolicy' => [ 'retentionPeriod' => 1 ] ]); // Test create bucket response $this->assertEquals($expectedLocationType, $bucket->info()['locationType']); // Test get bucket response $this->assertEquals($expectedLocationType, $bucket->reload()['locationType']); // Test update bucket. $bucket->update(['storageClass' => $updateStorageClass]); $bucket->update(['storageClass' => $storageClass]); $this->assertEquals($expectedLocationType, $bucket->info()['locationType']); // Test list bucket response $buckets = iterator_to_array(self::$client->buckets()); $listBucketBucket = current(array_filter($buckets, function ($bucket) use ($bucketName) { return $bucket->name() === $bucketName; })); $this->assertEquals($expectedLocationType, $listBucketBucket->info()['locationType']); // Test lock retention policy response $bucket->lockRetentionPolicy(); $this->assertEquals($expectedLocationType, $bucket->info()['locationType']); } public function locationTypes() { return [ [ 'STANDARD', 'us', 'multi-region', 'NEARLINE' ], [ 'STANDARD', 'us-central1', 'region', 'NEARLINE' ], [ 'COLDLINE', 'nam4', 'dual-region', 'STANDARD' ], [ 'ARCHIVE', 'nam4', 'dual-region', 'STANDARD' ] ]; } }
googleapis/google-cloud-php-storage
tests/System/ManageBucketsTest.php
PHP
apache-2.0
9,520
package denominator; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import dagger.ObjectGraph; import denominator.mock.MockProvider; import static org.assertj.core.api.Assertions.assertThat; public class DenominatorTest { @Rule public final ExpectedException thrown = ExpectedException.none(); @Test public void niceMessageWhenProviderNotFound() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("provider foo not in set of configured providers: [mock]"); Denominator.create("foo"); } @Test public void illegalArgumentWhenMissingModule() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("NoModuleProvider should have a static inner class named Module"); Denominator.create(new NoModuleProvider()); } @Test public void illegalArgumentWhenCtorHasArgs() { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("Module has a no-args constructor"); Denominator.create(new WrongCtorModuleProvider()); } @Test public void providerBindsProperly() { Provider provider = Denominator.create(new FooProvider()).provider(); assertThat(provider).isEqualTo(new FooProvider()); } @Test @Deprecated public void providerReturnsSameInstance() { FooProvider provider = new FooProvider(); DNSApiManager mgr = ObjectGraph.create(Denominator.provider(provider), new FooProvider.Module()).get( DNSApiManager.class); assertThat(mgr.provider()).isSameAs(provider); } @Test public void anonymousProviderPermitted() { Provider provider = Denominator.create(new FooProvider() { @Override public String name() { return "bar"; } @Override public String url() { return "http://bar"; } }).provider(); assertThat(provider.name()).isEqualTo("bar"); assertThat(provider.url()).isEqualTo("http://bar"); } static class NoModuleProvider extends BasicProvider { } static class WrongCtorModuleProvider extends BasicProvider { @dagger.Module(injects = DNSApiManager.class, includes = MockProvider.Module.class, complete = false) static class Module { Module(String name) { } } } static class FooProvider extends BasicProvider { @dagger.Module(injects = DNSApiManager.class, includes = MockProvider.Module.class, complete = false) static class Module { } } }
jdamick/denominator
core/src/test/java/denominator/DenominatorTest.java
Java
apache-2.0
2,477
// Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University // Copyright (c) 2011, 2012 Open Networking Foundation // Copyright (c) 2012, 2013 Big Switch Networks, Inc. // This library was generated by the LoxiGen Compiler. // See the file LICENSE.txt which should have been included in the source distribution // Automatically generated by LOXI from template of_virtual_class.java // Do not modify package org.projectfloodlight.openflow.protocol.ver15; import org.projectfloodlight.openflow.protocol.*; import org.projectfloodlight.openflow.protocol.action.*; import org.projectfloodlight.openflow.protocol.actionid.*; import org.projectfloodlight.openflow.protocol.bsntlv.*; import org.projectfloodlight.openflow.protocol.errormsg.*; import org.projectfloodlight.openflow.protocol.meterband.*; import org.projectfloodlight.openflow.protocol.instruction.*; import org.projectfloodlight.openflow.protocol.instructionid.*; import org.projectfloodlight.openflow.protocol.match.*; import org.projectfloodlight.openflow.protocol.stat.*; import org.projectfloodlight.openflow.protocol.oxm.*; import org.projectfloodlight.openflow.protocol.oxs.*; import org.projectfloodlight.openflow.protocol.queueprop.*; import org.projectfloodlight.openflow.types.*; import org.projectfloodlight.openflow.util.*; import org.projectfloodlight.openflow.exceptions.*; import io.netty.buffer.ByteBuf; abstract class OFGroupModVer15 { // version: 1.5 final static byte WIRE_VERSION = 6; final static int MINIMUM_LENGTH = 24; public final static OFGroupModVer15.Reader READER = new Reader(); static class Reader implements OFMessageReader<OFGroupMod> { @Override public OFGroupMod readFrom(ByteBuf bb) throws OFParseError { if(bb.readableBytes() < MINIMUM_LENGTH) return null; int start = bb.readerIndex(); // fixed value property version == 6 byte version = bb.readByte(); if(version != (byte) 0x6) throw new OFParseError("Wrong version: Expected=OFVersion.OF_15(6), got="+version); // fixed value property type == 15 byte type = bb.readByte(); if(type != (byte) 0xf) throw new OFParseError("Wrong type: Expected=OFType.GROUP_MOD(15), got="+type); int length = U16.f(bb.readShort()); if(length < MINIMUM_LENGTH) throw new OFParseError("Wrong length: Expected to be >= " + MINIMUM_LENGTH + ", was: " + length); U32.f(bb.readInt()); short command = bb.readShort(); bb.readerIndex(start); switch(command) { case (short) 0x0: // discriminator value OFGroupModCommand.ADD=0 for class OFGroupAddVer15 return OFGroupAddVer15.READER.readFrom(bb); case (short) 0x2: // discriminator value OFGroupModCommand.DELETE=2 for class OFGroupDeleteVer15 return OFGroupDeleteVer15.READER.readFrom(bb); case (short) 0x1: // discriminator value OFGroupModCommand.MODIFY=1 for class OFGroupModifyVer15 return OFGroupModifyVer15.READER.readFrom(bb); case (short) 0x3: // discriminator value OFGroupModCommand.INSERT_BUCKET=3 for class OFGroupInsertBucketVer15 return OFGroupInsertBucketVer15.READER.readFrom(bb); case (short) 0x5: // discriminator value OFGroupModCommand.REMOVE_BUCKET=5 for class OFGroupRemoveBucketVer15 return OFGroupRemoveBucketVer15.READER.readFrom(bb); default: throw new OFParseError("Unknown value for discriminator command of class OFGroupModVer15: " + command); } } } }
floodlight/loxigen-artifacts
openflowj/gen-src/main/java/org/projectfloodlight/openflow/protocol/ver15/OFGroupModVer15.java
Java
apache-2.0
3,873
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2019 Serge Rider (serge@jkiss.org) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.registry.formatter; import org.jkiss.dbeaver.model.data.DBDDataFormatter; import org.jkiss.dbeaver.model.data.DBDDataFormatterProfile; import org.jkiss.dbeaver.model.impl.preferences.SimplePreferenceStore; import org.jkiss.dbeaver.model.preferences.DBPPreferenceListener; import org.jkiss.dbeaver.model.preferences.DBPPreferenceStore; import org.jkiss.dbeaver.model.preferences.DBPPropertyDescriptor; import org.jkiss.dbeaver.model.struct.DBSTypedObject; import org.jkiss.dbeaver.utils.PrefUtils; import org.jkiss.utils.CommonUtils; import java.io.IOException; import java.util.HashMap; import java.util.Locale; import java.util.Map; /** * DataFormatterProfile */ public class DataFormatterProfile implements DBDDataFormatterProfile, DBPPreferenceListener { private static final String PROP_LANGUAGE = "dataformat.profile.language"; //$NON-NLS-1$ private static final String PROP_COUNTRY = "dataformat.profile.country"; //$NON-NLS-1$ private static final String PROP_VARIANT = "dataformat.profile.variant"; //$NON-NLS-1$ public static final String DATAFORMAT_PREFIX = "dataformat."; //$NON-NLS-1$ public static final String DATAFORMAT_TYPE_PREFIX = DATAFORMAT_PREFIX + "type."; //$NON-NLS-1$ private DBPPreferenceStore store; private String name; private Locale locale; public DataFormatterProfile(String profileName, DBPPreferenceStore store) { this.name = profileName; this.store = store; loadProfile(); } private void loadProfile() { { String language = store.getString(PROP_LANGUAGE); String country = store.getString(PROP_COUNTRY); String variant = store.getString(PROP_VARIANT); if (CommonUtils.isEmpty(language)) { this.locale = Locale.getDefault(); } else if (CommonUtils.isEmpty(country)) { this.locale = new Locale(language); } else if (CommonUtils.isEmpty(variant)) { this.locale = new Locale(language, country); } else { this.locale = new Locale(language, country, variant); } } } @Override public void saveProfile() throws IOException { store.setValue(PROP_LANGUAGE, locale.getLanguage()); store.setValue(PROP_COUNTRY, locale.getCountry()); store.setValue(PROP_VARIANT, locale.getVariant()); PrefUtils.savePreferenceStore(store); } @Override public DBPPreferenceStore getPreferenceStore() { return store; } @Override public String getProfileName() { return name; } @Override public void setProfileName(String name) { this.name = name; } @Override public Locale getLocale() { return locale; } @Override public void setLocale(Locale locale) { this.locale = locale; } @Override public Map<Object, Object> getFormatterProperties(String typeId) { DataFormatterDescriptor formatter = DataFormatterRegistry.getInstance().getDataFormatter(typeId); Map<Object, Object> defaultProperties = formatter.getSample().getDefaultProperties(locale); Map<Object, Object> formatterProps = new HashMap<>(); for (DBPPropertyDescriptor prop : formatter.getProperties()) { Object defaultValue = defaultProperties.get(prop.getId()); Object propValue = PrefUtils.getPreferenceValue( store, DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId(), prop.getDataType()); if (propValue != null && !CommonUtils.equalObjects(defaultValue, propValue)) { formatterProps.put(prop.getId(), propValue); } } return formatterProps; } @Override public void setFormatterProperties(String typeId, Map<Object, Object> formatterProps) { DataFormatterDescriptor formatter = DataFormatterRegistry.getInstance().getDataFormatter(typeId); for (DBPPropertyDescriptor prop : formatter.getProperties()) { Object propValue = formatterProps == null ? null : formatterProps.get(prop.getId()); if (propValue != null) { PrefUtils.setPreferenceValue(store, DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId(), propValue); } else { store.setToDefault(DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId()); } } } @Override public boolean isOverridesParent() { if (store instanceof SimplePreferenceStore) { SimplePreferenceStore prefStore = (SimplePreferenceStore) store; if (prefStore.isSet(PROP_LANGUAGE) || prefStore.isSet(PROP_COUNTRY) || prefStore.isSet(PROP_VARIANT)) { return true; } for (DataFormatterDescriptor formatter : DataFormatterRegistry.getInstance().getDataFormatters()) { for (DBPPropertyDescriptor prop : formatter.getProperties()) { if (prefStore.isSet(DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId())) { return true; } } } return false; } return true; } @Override public void reset() { if (store instanceof SimplePreferenceStore) { // Set all formatter properties to default store.setToDefault(PROP_LANGUAGE); store.setToDefault(PROP_COUNTRY); store.setToDefault(PROP_VARIANT); for (DataFormatterDescriptor formatter : DataFormatterRegistry.getInstance().getDataFormatters()) { for (DBPPropertyDescriptor prop : formatter.getProperties()) { store.setToDefault(DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId()); } } } loadProfile(); } @Override public DBDDataFormatter createFormatter(String typeId, DBSTypedObject type) throws IllegalAccessException, InstantiationException, IllegalArgumentException { DataFormatterDescriptor descriptor = DataFormatterRegistry.getInstance().getDataFormatter(typeId); if (descriptor == null) { throw new IllegalArgumentException("Formatter '" + typeId + "' not found"); } DBDDataFormatter formatter = descriptor.createFormatter(); Map<Object, Object> defProps = descriptor.getSample().getDefaultProperties(locale); Map<Object, Object> props = getFormatterProperties(typeId); Map<Object, Object> formatterProps = new HashMap<>(); if (defProps != null && !defProps.isEmpty()) { formatterProps.putAll(defProps); } if (props != null && !props.isEmpty()) { formatterProps.putAll(props); } formatter.init(type, locale, formatterProps); return formatter; } public static void initDefaultPreferences(DBPPreferenceStore store, Locale locale) { for (DataFormatterDescriptor formatter : DataFormatterRegistry.getInstance().getDataFormatters()) { Map<Object, Object> defaultProperties = formatter.getSample().getDefaultProperties(locale); Map<Object, Object> formatterProps = new HashMap<>(); for (DBPPropertyDescriptor prop : formatter.getProperties()) { Object defaultValue = defaultProperties.get(prop.getId()); if (defaultValue != null) { PrefUtils.setPreferenceDefaultValue(store, DATAFORMAT_TYPE_PREFIX + formatter.getId() + "." + prop.getId(), defaultValue); } } } } @Override public void preferenceChange(PreferenceChangeEvent event) { if (event.getProperty() != null && event.getProperty().startsWith(DATAFORMAT_PREFIX)) { // Reload this profile loadProfile(); } } }
liuyuanyuan/dbeaver
plugins/org.jkiss.dbeaver.model/src/org/jkiss/dbeaver/registry/formatter/DataFormatterProfile.java
Java
apache-2.0
8,702
package com.planet_ink.coffee_mud.Races; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Libraries.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; import java.util.*; /* Copyright 2004-2015 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ public class GiantAmphibian extends GreatAmphibian { @Override public String ID(){ return "GiantAmphibian"; } @Override public String name(){ return "Giant Amphibian"; } @Override public int shortestMale(){return 50;} @Override public int shortestFemale(){return 55;} @Override public int heightVariance(){return 20;} @Override public int lightestWeight(){return 1955;} @Override public int weightVariance(){return 405;} @Override public long forbiddenWornBits(){return ~(Wearable.WORN_EYES);} @Override public String racialCategory(){return "Amphibian";} protected static Vector<RawMaterial> resources=new Vector<RawMaterial>(); @Override public List<RawMaterial> myResources() { synchronized(resources) { if(resources.size()==0) { for(int i=0;i<25;i++) resources.addElement(makeResource ("some "+name().toLowerCase(),RawMaterial.RESOURCE_FISH)); for(int i=0;i<15;i++) resources.addElement(makeResource ("a "+name().toLowerCase()+" hide",RawMaterial.RESOURCE_HIDE)); resources.addElement(makeResource ("some "+name().toLowerCase()+" blood",RawMaterial.RESOURCE_BLOOD)); } } return resources; } }
MaxRau/CoffeeMud
com/planet_ink/coffee_mud/Races/GiantAmphibian.java
Java
apache-2.0
2,689
class Catalog::PlatformsController < Base::PlatformsController before_filter :find_catalog_and_platform def index @platforms = Cms::Relation.all(:params => {:ciId => @catalog.ciId, :direction => 'from', :targetClassName => 'catalog.Platform', :relatioShortnName => 'ComposedOf'}).map(&:toCi) render :json => @platforms end private def find_catalog_and_platform @catalog = Cms::Ci.locate(params[:catalog_id], catalogs_ns_path, 'account.Design') || Cms::Ci.locate(params[:catalog_id], private_catalogs_ns_path, 'account.Design') platform_id = params[:id] if platform_id.present? @platform = Cms::Ci.locate(platform_id, catalog_ns_path(@catalog), 'catalog.Platform') @platform = nil if @platform && @platform.ciClassName != 'catalog.Platform' end end end
subaan/display
app/controllers/catalog/platforms_controller.rb
Ruby
apache-2.0
991
package com.mesosphere.sdk.specification.yaml; import com.fasterxml.jackson.annotation.JsonProperty; /** * Raw YAML port. */ public class RawPort { private final Integer port; private final String envKey; private final RawVip vip; private RawPort( @JsonProperty("port") Integer port, @JsonProperty("env-key") String envKey, @JsonProperty("vip") RawVip vip) { this.port = port; this.envKey = envKey; this.vip = vip; } public Integer getPort() { return port; } public String getEnvKey() { return envKey; } public RawVip getVip() { return vip; } }
adragomir/dcos-commons
sdk/scheduler/src/main/java/com/mesosphere/sdk/specification/yaml/RawPort.java
Java
apache-2.0
682
// Package customimagesearch implements the Azure ARM Customimagesearch service API version 1.0. // // The Bing Custom Image Search API lets you send an image search query to Bing and get back image search results // customized to meet your custom search definition. package customimagesearch // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/Azure/go-autorest/autorest" ) const ( // DefaultBaseURI is the default URI used for the service Customimagesearch DefaultBaseURI = "https://api.cognitive.microsoft.com/bingcustomsearch/v7.0" ) // BaseClient is the base client for Customimagesearch. type BaseClient struct { autorest.Client BaseURI string } // New creates an instance of the BaseClient client. func New() BaseClient { return NewWithBaseURI(DefaultBaseURI) } // NewWithBaseURI creates an instance of the BaseClient client. func NewWithBaseURI(baseURI string) BaseClient { return BaseClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), BaseURI: baseURI, } }
linzhaoming/origin
vendor/github.com/Azure/azure-sdk-for-go/services/cognitiveservices/v1.0/customimagesearch/client.go
GO
apache-2.0
1,734
# # Copyright (C) 2014 Dell, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys PYTHON26 = sys.version_info < (2, 7) if PYTHON26: import unittest2 as unittest else: import unittest __all__ = ['unittest']
aidanhs/blockade
blockade/tests/__init__.py
Python
apache-2.0
731
"""Tests for fan platforms.""" import pytest from homeassistant.components.fan import FanEntity class BaseFan(FanEntity): """Implementation of the abstract FanEntity.""" def __init__(self): """Initialize the fan.""" def test_fanentity(): """Test fan entity methods.""" fan = BaseFan() assert fan.state == "off" assert len(fan.speed_list) == 0 assert fan.supported_features == 0 assert fan.capability_attributes == {} # Test set_speed not required with pytest.raises(NotImplementedError): fan.oscillate(True) with pytest.raises(NotImplementedError): fan.set_speed("slow") with pytest.raises(NotImplementedError): fan.turn_on() with pytest.raises(NotImplementedError): fan.turn_off()
tboyce021/home-assistant
tests/components/fan/test_init.py
Python
apache-2.0
781
/// Copyright (c) 2009 Microsoft Corporation /// /// Redistribution and use in source and binary forms, with or without modification, are permitted provided /// that the following conditions are met: /// * Redistributions of source code must retain the above copyright notice, this list of conditions and /// the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and /// the following disclaimer in the documentation and/or other materials provided with the distribution. /// * Neither the name of Microsoft nor the names of its contributors may be used to /// endorse or promote products derived from this software without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR /// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE /// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT /// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, /// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF /// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ES5Harness.registerTest({ id: "15.10.4.1-2", path: "TestCases/chapter15/15.10/15.10.4/15.10.4.1-2.js", description: "RegExp - the thrown error is SyntaxError instead of RegExpError when the characters of 'P' do not have the syntactic form Pattern", test: function testcase() { try { var regExpObj = new RegExp('\\'); return false; } catch (e) { return e instanceof SyntaxError; } }, precondition: function prereq() { return true; } });
hnafar/IronJS
Src/Tests/ietestcenter/chapter15/15.10/15.10.4/15.10.4.1-2.js
JavaScript
apache-2.0
2,097
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ import { Disposable, Command, EventEmitter, Event, workspace, Uri } from 'vscode'; import { Repository, Operation } from './repository'; import { anyEvent, dispose, filterEvent } from './util'; import * as nls from 'vscode-nls'; import { Branch } from './api/git'; const localize = nls.loadMessageBundle(); class CheckoutStatusBar { private _onDidChange = new EventEmitter<void>(); get onDidChange(): Event<void> { return this._onDidChange.event; } private disposables: Disposable[] = []; constructor(private repository: Repository) { repository.onDidRunGitStatus(this._onDidChange.fire, this._onDidChange, this.disposables); } get command(): Command | undefined { const rebasing = !!this.repository.rebaseCommit; const title = `$(git-branch) ${this.repository.headLabel}${rebasing ? ` (${localize('rebasing', 'Rebasing')})` : ''}`; return { command: 'git.checkout', tooltip: `${this.repository.headLabel}`, title, arguments: [this.repository.sourceControl] }; } dispose(): void { this.disposables.forEach(d => d.dispose()); } } interface SyncStatusBarState { enabled: boolean; isSyncRunning: boolean; hasRemotes: boolean; HEAD: Branch | undefined; } class SyncStatusBar { private static StartState: SyncStatusBarState = { enabled: true, isSyncRunning: false, hasRemotes: false, HEAD: undefined }; private _onDidChange = new EventEmitter<void>(); get onDidChange(): Event<void> { return this._onDidChange.event; } private disposables: Disposable[] = []; private _state: SyncStatusBarState = SyncStatusBar.StartState; private get state() { return this._state; } private set state(state: SyncStatusBarState) { this._state = state; this._onDidChange.fire(); } constructor(private repository: Repository) { repository.onDidRunGitStatus(this.onModelChange, this, this.disposables); repository.onDidChangeOperations(this.onOperationsChange, this, this.disposables); const onEnablementChange = filterEvent(workspace.onDidChangeConfiguration, e => e.affectsConfiguration('git.enableStatusBarSync')); onEnablementChange(this.updateEnablement, this, this.disposables); this.updateEnablement(); this._onDidChange.fire(); } private updateEnablement(): void { const config = workspace.getConfiguration('git', Uri.file(this.repository.root)); const enabled = config.get<boolean>('enableStatusBarSync', true); this.state = { ... this.state, enabled }; } private onOperationsChange(): void { const isSyncRunning = this.repository.operations.isRunning(Operation.Sync) || this.repository.operations.isRunning(Operation.Push) || this.repository.operations.isRunning(Operation.Pull); this.state = { ...this.state, isSyncRunning }; } private onModelChange(): void { this.state = { ...this.state, hasRemotes: this.repository.remotes.length > 0, HEAD: this.repository.HEAD }; } get command(): Command | undefined { if (!this.state.enabled || !this.state.hasRemotes) { return undefined; } const HEAD = this.state.HEAD; let icon = '$(sync)'; let text = ''; let command = ''; let tooltip = ''; if (HEAD && HEAD.name && HEAD.commit) { if (HEAD.upstream) { if (HEAD.ahead || HEAD.behind) { text += this.repository.syncLabel; } const config = workspace.getConfiguration('git', Uri.file(this.repository.root)); const rebaseWhenSync = config.get<string>('rebaseWhenSync'); command = rebaseWhenSync ? 'git.syncRebase' : 'git.sync'; tooltip = localize('sync changes', "Synchronize Changes"); } else { icon = '$(cloud-upload)'; command = 'git.publish'; tooltip = localize('publish changes', "Publish Changes"); } } else { command = ''; tooltip = ''; } if (this.state.isSyncRunning) { icon = '$(sync~spin)'; command = ''; tooltip = localize('syncing changes', "Synchronizing Changes..."); } return { command, title: [icon, text].join(' ').trim(), tooltip, arguments: [this.repository.sourceControl] }; } dispose(): void { this.disposables.forEach(d => d.dispose()); } } export class StatusBarCommands { private syncStatusBar: SyncStatusBar; private checkoutStatusBar: CheckoutStatusBar; private disposables: Disposable[] = []; constructor(repository: Repository) { this.syncStatusBar = new SyncStatusBar(repository); this.checkoutStatusBar = new CheckoutStatusBar(repository); } get onDidChange(): Event<void> { return anyEvent( this.syncStatusBar.onDidChange, this.checkoutStatusBar.onDidChange ); } get commands(): Command[] { const result: Command[] = []; const checkout = this.checkoutStatusBar.command; if (checkout) { result.push(checkout); } const sync = this.syncStatusBar.command; if (sync) { result.push(sync); } return result; } dispose(): void { this.syncStatusBar.dispose(); this.checkoutStatusBar.dispose(); this.disposables = dispose(this.disposables); } }
leafclick/intellij-community
plugins/textmate/lib/bundles/git/src/statusbar.ts
TypeScript
apache-2.0
5,303
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.distributed; import static java.util.concurrent.TimeUnit.MINUTES; import static org.apache.geode.distributed.AbstractLauncher.Status.STOPPED; import static org.apache.geode.distributed.ConfigurationProperties.CLUSTER_CONFIGURATION_DIR; import static org.apache.geode.distributed.ConfigurationProperties.DISABLE_AUTO_RECONNECT; import static org.apache.geode.distributed.ConfigurationProperties.LOG_LEVEL; import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT; import static org.apache.geode.distributed.internal.ClusterConfigurationService.CLUSTER_CONFIG_DISK_DIR_PREFIX; import static org.apache.geode.distributed.internal.DistributionConfig.GEMFIRE_PREFIX; import static org.apache.geode.internal.AvailablePortHelper.getRandomAvailableTCPPorts; import static org.apache.geode.internal.DistributionLocator.TEST_OVERRIDE_DEFAULT_PORT_PROPERTY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertNotNull; import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.rules.ErrorCollector; import org.apache.geode.distributed.AbstractLauncher.Status; import org.apache.geode.distributed.LocatorLauncher.Builder; import org.apache.geode.distributed.LocatorLauncher.LocatorState; import org.apache.geode.internal.process.ProcessType; /** * Abstract base class for integration tests of {@link LocatorLauncher}. * * @since GemFire 8.0 */ public abstract class LocatorLauncherIntegrationTestCase extends LauncherIntegrationTestCase { protected volatile int defaultLocatorPort; protected volatile int nonDefaultLocatorPort; protected volatile LocatorLauncher launcher; private volatile File clusterConfigDirectory; @Rule public ErrorCollector errorCollector = new ErrorCollector(); @Before public void setUpAbstractLocatorLauncherIntegrationTestCase() throws Exception { System.setProperty(GEMFIRE_PREFIX + MCAST_PORT, Integer.toString(0)); clusterConfigDirectory = temporaryFolder.newFolder(CLUSTER_CONFIG_DISK_DIR_PREFIX + getUniqueName()); int[] ports = getRandomAvailableTCPPorts(2); defaultLocatorPort = ports[0]; nonDefaultLocatorPort = ports[1]; System.setProperty(TEST_OVERRIDE_DEFAULT_PORT_PROPERTY, String.valueOf(defaultLocatorPort)); } @After public void tearDownAbstractLocatorLauncherIntegrationTestCase() throws Exception { if (launcher != null) { launcher.stop(); } } @Override protected ProcessType getProcessType() { return ProcessType.LOCATOR; } @Override protected void givenEmptyWorkingDirectory() { File[] files = getWorkingDirectory().listFiles(); assertThat(files).hasSize(1); assertThat(files[0]).isDirectory().isEqualTo(getClusterConfigDirectory()); } protected LocatorLauncher givenLocatorLauncher() { return givenLocatorLauncher(newBuilder()); } private LocatorLauncher givenLocatorLauncher(final Builder builder) { return builder.build(); } protected LocatorLauncher givenRunningLocator() { return givenRunningLocator(newBuilder()); } protected LocatorLauncher givenRunningLocator(final Builder builder) { return awaitStart(builder); } protected LocatorLauncher awaitStart(final LocatorLauncher launcher) { await().atMost(2, MINUTES).until(() -> assertThat(isLauncherOnline()).isTrue()); return launcher; } protected Locator getLocator() { return launcher.getLocator(); } /** * Returns a new Builder with helpful defaults for safe testing. If you need a Builder in a test * without any of these defaults then simply use {@code new Builder()} instead. */ protected Builder newBuilder() { return new Builder().setMemberName(getUniqueName()).setRedirectOutput(true) .setWorkingDirectory(getWorkingDirectoryPath()) .set(CLUSTER_CONFIGURATION_DIR, getClusterConfigDirectoryPath()) .set(DISABLE_AUTO_RECONNECT, "true").set(LOG_LEVEL, "config").set(MCAST_PORT, "0"); } protected LocatorLauncher startLocator() { return awaitStart(newBuilder()); } protected LocatorLauncher startLocator(final Builder builder) { return awaitStart(builder); } protected void stopLocator() { assertThat(launcher.stop().getStatus()).isEqualTo(STOPPED); } private LocatorLauncher awaitStart(final Builder builder) { launcher = builder.build(); assertThat(launcher.start().getStatus()).isEqualTo(Status.ONLINE); return awaitStart(launcher); } private File getClusterConfigDirectory() { return clusterConfigDirectory; } private String getClusterConfigDirectoryPath() { try { return clusterConfigDirectory.getCanonicalPath(); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean isLauncherOnline() { LocatorState locatorState = launcher.status(); assertNotNull(locatorState); return Status.ONLINE.equals(locatorState.getStatus()); } }
charliemblack/geode
geode-core/src/test/java/org/apache/geode/distributed/LocatorLauncherIntegrationTestCase.java
Java
apache-2.0
5,865
/* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.activiti5.engine.impl.json; import java.io.Reader; import java.io.Writer; import org.activiti5.engine.impl.util.json.JSONObject; /** * @author Tom Baeyens */ public abstract class JsonObjectConverter <T> { public void toJson(T object, Writer writer) { toJsonObject(object).write(writer); } public String toJson(T object) { return toJsonObject(object).toString(); } public String toJson(T object, int indentFactor) { return toJsonObject(object).toString(indentFactor); } public abstract JSONObject toJsonObject(T object); public abstract T toObject(Reader reader); }
roberthafner/flowable-engine
modules/flowable5-engine/src/main/java/org/activiti5/engine/impl/json/JsonObjectConverter.java
Java
apache-2.0
1,184