code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
package org.srplib.validation; import java.util.List; /** * Encapsulates an object which can be validated. * * <p>Validatable is unaware of UI framework and contains only validation specific logic.</p> * * <p>Usage patterns: * <ol> * <li>Extract value from to be-validated-component and wap it with Validatable implementation (e.g. DefaultValidatable).</li> * <li>Pass resulting validatable to all associated validators</li> * <li>Query validatable.hasErrors() method</li> * </ol> * * </p> * * @author Anton Pechinsky */ public interface Validatable<T> { /** * Returns value to be validated. * * @return an object representing value. */ T getValue(); /** * Tests if this object has errors or not. * * @return true if object has errors, <code>false</code> otherwise */ boolean hasErrors(); /** * Adds error to this object. * * @param error ValidationError an error. */ void addError(ValidationError error); /** * Returns validation error associated with this object. * * @return a List of validation errors. If there are errors then empty list is returned. */ List<ValidationError> getErrors(); /** * Returns a reference to component which is being validated. * * @return Object validation context */ Object getContext(); }
apechinsky/srplib
srp-validation/src/main/java/org/srplib/validation/Validatable.java
Java
apache-2.0
1,391
package mb.scopegraph.pepm16.terms.path; import org.immutables.serial.Serial; import org.immutables.value.Value; import org.metaborg.util.collection.ConsList; import org.metaborg.util.iterators.Iterables2; import io.usethesource.capsule.Set; import mb.scopegraph.pepm16.ILabel; import mb.scopegraph.pepm16.IOccurrence; import mb.scopegraph.pepm16.IScope; import mb.scopegraph.pepm16.path.IResolutionPath; import mb.scopegraph.pepm16.path.IStep; @Value.Immutable @Serial.Version(value = 42L) abstract class AEStep<S extends IScope, L extends ILabel, O extends IOccurrence> implements IStep<S, L, O> { @Value.Parameter @Override public abstract S getSource(); @Value.Parameter @Override public abstract L getLabel(); @Value.Parameter @Override public abstract S getTarget(); @Value.Lazy @Override public int size() { return 1; } @Value.Lazy @Override public Set.Immutable<O> getImports() { return Set.Immutable.of(); } @Override public Iterable<IResolutionPath<S, L, O>> getImportPaths() { return Iterables2.empty(); } @Value.Lazy @Override public Set.Immutable<S> getScopes() { return Set.Immutable.of(getSource(), getTarget()); } @Value.Lazy @Override public ConsList<L> getLabels() { return ConsList.of(getLabel()); } @Override public <T> T match(IStep.ICases<S, L, O, T> cases) { return cases.caseE(getSource(), getLabel(), getTarget()); } @Override public String toString(boolean includeSource, boolean includeTarget) { StringBuilder sb = new StringBuilder(); if(includeSource) { sb.append(getSource()); sb.append(Paths.PATH_SEPERATOR); } sb.append("E("); sb.append(getLabel()); sb.append(")"); if(includeTarget) { sb.append(Paths.PATH_SEPERATOR); sb.append(getTarget()); } return sb.toString(); } @Value.Lazy @Override public abstract int hashCode(); @Override public String toString() { return toString(true, true); } }
metaborg/nabl
nabl2.solver/src/main/java/mb/scopegraph/pepm16/terms/path/AEStep.java
Java
apache-2.0
2,096
/* * Copyright 2015 the original author or authors. * @https://github.com/scouter-project/scouter * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package scouter.lang.step; import scouter.io.DataInputX; import scouter.io.DataOutputX; import java.io.IOException; public class MethodStep2 extends MethodStep { public int error; public byte getStepType() { return StepEnum.METHOD2; } public int getError() { return error; } public void write(DataOutputX out) throws IOException { super.write(out); out.writeDecimal(error); } public Step read(DataInputX in) throws IOException { super.read(in); this.error = (int) in.readDecimal(); return this; } }
scouter-project/scouter
scouter.common/src/main/java/scouter/lang/step/MethodStep2.java
Java
apache-2.0
1,207
/*! * ${copyright} */ // Provides control sap.m.VBox. sap.ui.define(['jquery.sap.global', './FlexBox', './library'], function(jQuery, FlexBox, library) { "use strict"; /** * Constructor for a new VBox. * * @param {string} [sId] id for the new control, generated automatically if no id is given * @param {object} [mSettings] initial settings for the new control * * @class * The VBox control builds the container for a vertical flexible box layout. VBox is a convenience control, as it is just a specialized FlexBox control.<br> * <br> * <b>Note:</b> Be sure to check the <code>renderType</code> setting to avoid issues due to browser inconsistencies. * * @extends sap.m.FlexBox * * @author SAP SE * @version ${version} * * @constructor * @public * @alias sap.m.VBox * @ui5-metamodel This control/element also will be described in the UI5 (legacy) designtime metamodel */ var VBox = FlexBox.extend("sap.m.VBox", /** @lends sap.m.VBox.prototype */ { metadata : { library : "sap.m" }}); return VBox; }, /* bExport= */ true);
olirogers/openui5
src/sap.m/src/sap/m/VBox.js
JavaScript
apache-2.0
1,081
// Karma configuration // Generated on Thu Sep 17 2015 09:48:07 GMT+0530 (IST) module.exports = function(config) { config.set({ // base path that will be used to resolve all patterns (eg. files, exclude) basePath: '', // frameworks to use // available frameworks: https://npmjs.org/browse/keyword/karma-adapter frameworks: ['jasmine'], // list of files / patterns to load in the browser // list of files / patterns to load in the browser files: [ '../www/lib/angular/angular.js', '../www/js/*.js', '../www/lib/angular-mocks/angular-mocks.js', '**/*tests.js' ], // Use the PhantomJS browser instead of Chrome browsers: ['PhantomJS'], // list of files to exclude exclude: [ ], // preprocess matching files before serving them to the browser // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor preprocessors: { }, // test results reporter to use // possible values: 'dots', 'progress' // available reporters: https://npmjs.org/browse/keyword/karma-reporter reporters: ['progress'], // web server port port: 9876, // enable / disable colors in the output (reporters and logs) colors: true, // level of logging // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG logLevel: config.LOG_INFO, // enable / disable watching file and executing tests whenever any file changes autoWatch: true, // start these browsers // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher //browsers: ['Chrome'], // Continuous Integration mode // if true, Karma captures browsers, runs the tests and exits singleRun: false }) }
magnusmel/ramisdance
tests/my.conf.js
JavaScript
apache-2.0
1,793
/* * Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.dynamodbv2; import static java.util.concurrent.Executors.newFixedThreadPool; import javax.annotation.Generated; import com.amazonaws.services.dynamodbv2.model.*; import com.amazonaws.client.AwsAsyncClientParams; import com.amazonaws.annotation.ThreadSafe; import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import java.util.concurrent.ExecutorService; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; /** * Client for accessing Amazon DynamoDB Streams asynchronously. Each asynchronous method will return a Java Future * object representing the asynchronous operation; overloads which accept an {@code AsyncHandler} can be used to receive * notification when an asynchronous operation completes. * <p> * <fullname>Amazon DynamoDB</fullname> * <p> * Amazon DynamoDB Streams provides API actions for accessing streams and processing stream records. To learn more about * application development with Streams, see <a * href="http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html">Capturing Table Activity with * DynamoDB Streams</a> in the Amazon DynamoDB Developer Guide. * </p> */ @ThreadSafe @Generated("com.amazonaws:aws-java-sdk-code-generator") public class AmazonDynamoDBStreamsAsyncClient extends AmazonDynamoDBStreamsClient implements AmazonDynamoDBStreamsAsync { private static final int DEFAULT_THREAD_POOL_SIZE = 50; private final java.util.concurrent.ExecutorService executorService; /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams. A credentials provider * chain will be used that searches for credentials in this order: * <ul> * <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY</li> * <li>Java System Properties - aws.accessKeyId and aws.secretKey</li> * <li>Credential profiles file at the default location (~/.aws/credentials) shared by all AWS SDKs and the AWS CLI</li> * <li>Instance profile credentials delivered through the Amazon EC2 metadata service</li> * </ul> * <p> * Asynchronous methods are delegated to a fixed-size thread pool containing 50 threads (to match the default * maximum number of concurrent connections to the service). * * @see com.amazonaws.auth.DefaultAWSCredentialsProviderChain * @see java.util.concurrent.Executors#newFixedThreadPool(int) * @deprecated use {@link AmazonDynamoDBStreamsAsyncClientBuilder#defaultClient()} */ @Deprecated public AmazonDynamoDBStreamsAsyncClient() { this(DefaultAWSCredentialsProviderChain.getInstance()); } /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams. A credentials provider * chain will be used that searches for credentials in this order: * <ul> * <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY</li> * <li>Java System Properties - aws.accessKeyId and aws.secretKey</li> * <li>Credential profiles file at the default location (~/.aws/credentials) shared by all AWS SDKs and the AWS CLI</li> * <li>Instance profile credentials delivered through the Amazon EC2 metadata service</li> * </ul> * <p> * Asynchronous methods are delegated to a fixed-size thread pool containing a number of threads equal to the * maximum number of concurrent connections configured via {@code ClientConfiguration.getMaxConnections()}. * * @param clientConfiguration * The client configuration options controlling how this client connects to Amazon DynamoDB Streams (ex: * proxy settings, retry counts, etc). * * @see com.amazonaws.auth.DefaultAWSCredentialsProviderChain * @see java.util.concurrent.Executors#newFixedThreadPool(int) * @deprecated use {@link AmazonDynamoDBStreamsAsyncClientBuilder#withClientConfiguration(ClientConfiguration)} */ @Deprecated public AmazonDynamoDBStreamsAsyncClient(ClientConfiguration clientConfiguration) { this(DefaultAWSCredentialsProviderChain.getInstance(), clientConfiguration, newFixedThreadPool(clientConfiguration.getMaxConnections())); } /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams using the specified AWS * account credentials. * <p> * Asynchronous methods are delegated to a fixed-size thread pool containing 50 threads (to match the default * maximum number of concurrent connections to the service). * * @param awsCredentials * The AWS credentials (access key ID and secret key) to use when authenticating with AWS services. * @see java.util.concurrent.Executors#newFixedThreadPool(int) * @deprecated use {@link AmazonDynamoDBStreamsAsyncClientBuilder#withCredentials(AWSCredentialsProvider)} */ @Deprecated public AmazonDynamoDBStreamsAsyncClient(AWSCredentials awsCredentials) { this(awsCredentials, newFixedThreadPool(DEFAULT_THREAD_POOL_SIZE)); } /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams using the specified AWS * account credentials and executor service. Default client settings will be used. * * @param awsCredentials * The AWS credentials (access key ID and secret key) to use when authenticating with AWS services. * @param executorService * The executor service by which all asynchronous requests will be executed. * @deprecated use {@link AmazonDynamoDBStreamsAsyncClientBuilder#withCredentials(AWSCredentialsProvider)} and * {@link AmazonDynamoDBStreamsAsyncClientBuilder#withExecutorFactory(com.amazonaws.client.builder.ExecutorFactory)} */ @Deprecated public AmazonDynamoDBStreamsAsyncClient(AWSCredentials awsCredentials, ExecutorService executorService) { this(awsCredentials, configFactory.getConfig(), executorService); } /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams using the specified AWS * account credentials, executor service, and client configuration options. * * @param awsCredentials * The AWS credentials (access key ID and secret key) to use when authenticating with AWS services. * @param clientConfiguration * Client configuration options (ex: max retry limit, proxy settings, etc). * @param executorService * The executor service by which all asynchronous requests will be executed. * @deprecated use {@link AmazonDynamoDBStreamsAsyncClientBuilder#withCredentials(AWSCredentialsProvider)} and * {@link AmazonDynamoDBStreamsAsyncClientBuilder#withClientConfiguration(ClientConfiguration)} and * {@link AmazonDynamoDBStreamsAsyncClientBuilder#withExecutorFactory(com.amazonaws.client.builder.ExecutorFactory)} */ @Deprecated public AmazonDynamoDBStreamsAsyncClient(AWSCredentials awsCredentials, ClientConfiguration clientConfiguration, ExecutorService executorService) { super(awsCredentials, clientConfiguration); this.executorService = executorService; } /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams using the specified AWS * account credentials provider. Default client settings will be used. * <p> * Asynchronous methods are delegated to a fixed-size thread pool containing 50 threads (to match the default * maximum number of concurrent connections to the service). * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to authenticate requests with AWS services. * @see java.util.concurrent.Executors#newFixedThreadPool(int) * @deprecated use {@link AmazonDynamoDBStreamsAsyncClientBuilder#withCredentials(AWSCredentialsProvider)} */ @Deprecated public AmazonDynamoDBStreamsAsyncClient(AWSCredentialsProvider awsCredentialsProvider) { this(awsCredentialsProvider, newFixedThreadPool(DEFAULT_THREAD_POOL_SIZE)); } /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams using the provided AWS * account credentials provider and client configuration options. * <p> * Asynchronous methods are delegated to a fixed-size thread pool containing a number of threads equal to the * maximum number of concurrent connections configured via {@code ClientConfiguration.getMaxConnections()}. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to authenticate requests with AWS services. * @param clientConfiguration * Client configuration options (ex: max retry limit, proxy settings, etc). * * @see com.amazonaws.auth.DefaultAWSCredentialsProviderChain * @see java.util.concurrent.Executors#newFixedThreadPool(int) * @deprecated use {@link AmazonDynamoDBStreamsAsyncClientBuilder#withCredentials(AWSCredentialsProvider)} and * {@link AmazonDynamoDBStreamsAsyncClientBuilder#withClientConfiguration(ClientConfiguration)} */ @Deprecated public AmazonDynamoDBStreamsAsyncClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration) { this(awsCredentialsProvider, clientConfiguration, newFixedThreadPool(clientConfiguration.getMaxConnections())); } /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams using the specified AWS * account credentials provider and executor service. Default client settings will be used. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to authenticate requests with AWS services. * @param executorService * The executor service by which all asynchronous requests will be executed. * @deprecated use {@link AmazonDynamoDBStreamsAsyncClientBuilder#withCredentials(AWSCredentialsProvider)} and * {@link AmazonDynamoDBStreamsAsyncClientBuilder#withExecutorFactory(com.amazonaws.client.builder.ExecutorFactory)} */ @Deprecated public AmazonDynamoDBStreamsAsyncClient(AWSCredentialsProvider awsCredentialsProvider, ExecutorService executorService) { this(awsCredentialsProvider, configFactory.getConfig(), executorService); } /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams using the specified AWS * account credentials provider, executor service, and client configuration options. * * @param awsCredentialsProvider * The AWS credentials provider which will provide credentials to authenticate requests with AWS services. * @param clientConfiguration * Client configuration options (ex: max retry limit, proxy settings, etc). * @param executorService * The executor service by which all asynchronous requests will be executed. * @deprecated use {@link AmazonDynamoDBStreamsAsyncClientBuilder#withCredentials(AWSCredentialsProvider)} and * {@link AmazonDynamoDBStreamsAsyncClientBuilder#withClientConfiguration(ClientConfiguration)} and * {@link AmazonDynamoDBStreamsAsyncClientBuilder#withExecutorFactory(com.amazonaws.client.builder.ExecutorFactory)} */ @Deprecated public AmazonDynamoDBStreamsAsyncClient(AWSCredentialsProvider awsCredentialsProvider, ClientConfiguration clientConfiguration, ExecutorService executorService) { super(awsCredentialsProvider, clientConfiguration); this.executorService = executorService; } public static AmazonDynamoDBStreamsAsyncClientBuilder asyncBuilder() { return AmazonDynamoDBStreamsAsyncClientBuilder.standard(); } /** * Constructs a new asynchronous client to invoke service methods on Amazon DynamoDB Streams using the specified * parameters. * * @param asyncClientParams * Object providing client parameters. */ AmazonDynamoDBStreamsAsyncClient(AwsAsyncClientParams asyncClientParams) { super(asyncClientParams); this.executorService = asyncClientParams.getExecutor(); } /** * Returns the executor service used by this client to execute async requests. * * @return The executor service used by this client to execute async requests. */ public ExecutorService getExecutorService() { return executorService; } @Override public java.util.concurrent.Future<DescribeStreamResult> describeStreamAsync(DescribeStreamRequest request) { return describeStreamAsync(request, null); } @Override public java.util.concurrent.Future<DescribeStreamResult> describeStreamAsync(final DescribeStreamRequest request, final com.amazonaws.handlers.AsyncHandler<DescribeStreamRequest, DescribeStreamResult> asyncHandler) { final DescribeStreamRequest finalRequest = beforeClientExecution(request); return executorService.submit(new java.util.concurrent.Callable<DescribeStreamResult>() { @Override public DescribeStreamResult call() throws Exception { DescribeStreamResult result = null; try { result = executeDescribeStream(finalRequest); } catch (Exception ex) { if (asyncHandler != null) { asyncHandler.onError(ex); } throw ex; } if (asyncHandler != null) { asyncHandler.onSuccess(finalRequest, result); } return result; } }); } @Override public java.util.concurrent.Future<GetRecordsResult> getRecordsAsync(GetRecordsRequest request) { return getRecordsAsync(request, null); } @Override public java.util.concurrent.Future<GetRecordsResult> getRecordsAsync(final GetRecordsRequest request, final com.amazonaws.handlers.AsyncHandler<GetRecordsRequest, GetRecordsResult> asyncHandler) { final GetRecordsRequest finalRequest = beforeClientExecution(request); return executorService.submit(new java.util.concurrent.Callable<GetRecordsResult>() { @Override public GetRecordsResult call() throws Exception { GetRecordsResult result = null; try { result = executeGetRecords(finalRequest); } catch (Exception ex) { if (asyncHandler != null) { asyncHandler.onError(ex); } throw ex; } if (asyncHandler != null) { asyncHandler.onSuccess(finalRequest, result); } return result; } }); } @Override public java.util.concurrent.Future<GetShardIteratorResult> getShardIteratorAsync(GetShardIteratorRequest request) { return getShardIteratorAsync(request, null); } @Override public java.util.concurrent.Future<GetShardIteratorResult> getShardIteratorAsync(final GetShardIteratorRequest request, final com.amazonaws.handlers.AsyncHandler<GetShardIteratorRequest, GetShardIteratorResult> asyncHandler) { final GetShardIteratorRequest finalRequest = beforeClientExecution(request); return executorService.submit(new java.util.concurrent.Callable<GetShardIteratorResult>() { @Override public GetShardIteratorResult call() throws Exception { GetShardIteratorResult result = null; try { result = executeGetShardIterator(finalRequest); } catch (Exception ex) { if (asyncHandler != null) { asyncHandler.onError(ex); } throw ex; } if (asyncHandler != null) { asyncHandler.onSuccess(finalRequest, result); } return result; } }); } @Override public java.util.concurrent.Future<ListStreamsResult> listStreamsAsync(ListStreamsRequest request) { return listStreamsAsync(request, null); } @Override public java.util.concurrent.Future<ListStreamsResult> listStreamsAsync(final ListStreamsRequest request, final com.amazonaws.handlers.AsyncHandler<ListStreamsRequest, ListStreamsResult> asyncHandler) { final ListStreamsRequest finalRequest = beforeClientExecution(request); return executorService.submit(new java.util.concurrent.Callable<ListStreamsResult>() { @Override public ListStreamsResult call() throws Exception { ListStreamsResult result = null; try { result = executeListStreams(finalRequest); } catch (Exception ex) { if (asyncHandler != null) { asyncHandler.onError(ex); } throw ex; } if (asyncHandler != null) { asyncHandler.onSuccess(finalRequest, result); } return result; } }); } /** * Shuts down the client, releasing all managed resources. This includes forcibly terminating all pending * asynchronous service calls. Clients who wish to give pending asynchronous service calls time to complete should * call {@code getExecutorService().shutdown()} followed by {@code getExecutorService().awaitTermination()} prior to * calling this method. */ @Override public void shutdown() { super.shutdown(); executorService.shutdownNow(); } }
dagnir/aws-sdk-java
aws-java-sdk-dynamodb/src/main/java/com/amazonaws/services/dynamodbv2/AmazonDynamoDBStreamsAsyncClient.java
Java
apache-2.0
18,797
// SSRSDeployerTool / DailyRecurrence.cs // ------------------------------------------------------------------ // Author: Mark Ewer (MEwer@LetterBllc.com) // Modified: 9/23/2015 // ------------------------------------------------------------------ // This software is a copyrighted work and no license to use, // modify, or distribute is granted to you without specific written // consent from Letter B LLC. // // Copyright 2014 Letter B LLC, All Rights Reserved // ------------------------------------------------------------------ namespace SSRSDeployerTool.SSRSWebService { /// <remarks/> [System.CodeDom.Compiler.GeneratedCodeAttribute("System.Xml", "4.0.30319.34230")] [System.SerializableAttribute()] [System.Diagnostics.DebuggerStepThroughAttribute()] [System.ComponentModel.DesignerCategoryAttribute("code")] [System.Xml.Serialization.XmlTypeAttribute(Namespace = "http://schemas.microsoft.com/sqlserver/2005/06/30/reporting/reportingservices")] public partial class DailyRecurrence : RecurrencePattern { /// <remarks/> public int DaysInterval { get; set; } } }
LetterBllc/SSRS-Deployer
Source/SSRSDeployerTool/Web References/SSRSWebService/DailyRecurrence.cs
C#
apache-2.0
1,146
package ru.stqa.pft.sandbox; public class FirstProgramm { public static void main(String[] args) { Hello("world"); Hello("baby"); Square s = new Square(5); System.out.println("Площадь квадрата со стороной, " + s.l + " = " + s.area()); Rectangle r = new Rectangle(4, 6); System.out.println("Площадь квадрата со сторонaми " + r.a + " и " + r.b + " = " + r.area()); } public static void Hello(String some) { System.out.println("Hello, " + some + "!"); } }
greeniol/Java_pdt45
sandbox/src/main/java/ru/stqa/pft/sandbox/FirstProgramm.java
Java
apache-2.0
547
# Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. require "helper" require "json" require "uri" describe Google::Cloud::Storage::File, :signed_url, :mock_storage do let(:bucket_name) { "bucket" } let(:bucket_gapi) { Google::Apis::StorageV1::Bucket.from_json random_bucket_hash(bucket_name).to_json } let(:bucket) { Google::Cloud::Storage::Bucket.from_gapi bucket_gapi, storage.service } let(:file_name) { "file.ext" } let(:file_gapi) { Google::Apis::StorageV1::Object.from_json random_file_hash(bucket.name, file_name).to_json } let(:file) { Google::Cloud::Storage::File.from_gapi file_gapi, storage.service } it "uses the credentials' issuer and signing_key to generate signed_url" do Time.stub :now, Time.new(2012,1,1,0,0,0, "+00:00") do signing_key_mock = Minitest::Mock.new signing_key_mock.expect :sign, "native-signature", [OpenSSL::Digest::SHA256, "GET\n\n\n1325376300\n/bucket/file.ext"] credentials.issuer = "native_client_email" credentials.signing_key = signing_key_mock signed_url = file.signed_url signed_url_params = CGI::parse(URI(signed_url).query) signed_url_params["GoogleAccessId"].must_equal ["native_client_email"] signed_url_params["Signature"].must_equal [Base64.strict_encode64("native-signature").delete("\n")] signing_key_mock.verify end end it "allows issuer and signing_key to be passed in as options" do Time.stub :now, Time.new(2012,1,1,0,0,0, "+00:00") do credentials.issuer = "native_client_email" credentials.signing_key = PoisonSigningKey.new signing_key_mock = Minitest::Mock.new signing_key_mock.expect :sign, "option-signature", [OpenSSL::Digest::SHA256, "GET\n\n\n1325376300\n/bucket/file.ext"] signed_url = file.signed_url issuer: "option_issuer", signing_key: signing_key_mock signed_url_params = CGI::parse(URI(signed_url).query) signed_url_params["GoogleAccessId"].must_equal ["option_issuer"] signed_url_params["Signature"].must_equal [Base64.strict_encode64("option-signature").delete("\n")] signing_key_mock.verify end end it "allows client_email and private to be passed in as options" do Time.stub :now, Time.new(2012,1,1,0,0,0, "+00:00") do credentials.issuer = "native_client_email" credentials.signing_key = PoisonSigningKey.new signing_key_mock = Minitest::Mock.new signing_key_mock.expect :sign, "option-signature", [OpenSSL::Digest::SHA256, "GET\n\n\n1325376300\n/bucket/file.ext"] OpenSSL::PKey::RSA.stub :new, signing_key_mock do signed_url = file.signed_url client_email: "option_client_email", private_key: "option_private_key" signed_url_params = CGI::parse(URI(signed_url).query) signed_url_params["GoogleAccessId"].must_equal ["option_client_email"] signed_url_params["Signature"].must_equal [Base64.strict_encode64("option-signature").delete("\n")] end signing_key_mock.verify end end it "allows headers to be passed in as options" do Time.stub :now, Time.new(2012,1,1,0,0,0, "+00:00") do signing_key_mock = Minitest::Mock.new signing_key_mock.expect :sign, "native-signature", [OpenSSL::Digest::SHA256, "GET\n\n\n1325376300\nx-goog-acl:public-read\nx-goog-meta-foo:bar,baz\n/bucket/file.ext"] credentials.issuer = "native_client_email" credentials.signing_key = signing_key_mock signed_url = file.signed_url headers: { "X-Goog-Meta-FOO" => "bar,baz", "X-Goog-ACL" => "public-read" } signed_url_params = CGI::parse(URI(signed_url).query) signed_url_params["GoogleAccessId"].must_equal ["native_client_email"] signed_url_params["Signature"].must_equal [Base64.strict_encode64("native-signature").delete("\n")] signing_key_mock.verify end end it "allows response content type and disposition to be passed in as options" do Time.stub :now, Time.new(2012,1,1,0,0,0, "+00:00") do signing_key_mock = Minitest::Mock.new signing_key_mock.expect :sign, "native-signature", [OpenSSL::Digest::SHA256, "GET\n\n\n1325376300\n/bucket/file.ext"] credentials.issuer = "native_client_email" credentials.signing_key = signing_key_mock signed_url = file.signed_url query: { "response-content-disposition" => "attachment; filename=\"test.png\"" } signed_url_params = CGI::parse(URI(signed_url).query) signed_url_params["GoogleAccessId"].must_equal ["native_client_email"] signed_url_params["Signature"].must_equal [Base64.strict_encode64("native-signature").delete("\n")] signed_url_params["response-content-disposition"].must_equal ["attachment; filename=\"test.png\""] signing_key_mock.verify end end it "raises when missing issuer" do credentials.issuer = nil credentials.signing_key = PoisonSigningKey.new expect { file.signed_url }.must_raise Google::Cloud::Storage::SignedUrlUnavailable end it "raises when missing signing_key" do credentials.issuer = "native_issuer" credentials.signing_key = nil expect { file.signed_url }.must_raise Google::Cloud::Storage::SignedUrlUnavailable end describe "Files with spaces in them" do let(:file_name) { "hello world.txt" } it "properly escapes the path when generating signed_url" do Time.stub :now, Time.new(2012,1,1,0,0,0, "+00:00") do signing_key_mock = Minitest::Mock.new signing_key_mock.expect :sign, "native-signature", [OpenSSL::Digest::SHA256, "GET\n\n\n1325376300\n/bucket/hello%20world.txt"] credentials.issuer = "native_client_email" credentials.signing_key = signing_key_mock signed_url = file.signed_url signed_uri = URI signed_url signed_uri.path.must_equal "/bucket/hello%20world.txt" signed_url_params = CGI::parse signed_uri.query signed_url_params["GoogleAccessId"].must_equal ["native_client_email"] signed_url_params["Signature"].must_equal [Base64.strict_encode64("native-signature").delete("\n")] signing_key_mock.verify end end end it "allows query params to be passed in" do Time.stub :now, Time.new(2012,1,1,0,0,0, "+00:00") do signing_key_mock = Minitest::Mock.new signing_key_mock.expect :sign, "native-signature", [OpenSSL::Digest::SHA256, "GET\n\n\n1325376300\n/bucket/file.ext"] credentials.issuer = "native_client_email" credentials.signing_key = signing_key_mock signed_url = file.signed_url query: { "response-content-disposition" => "attachment; filename=\"google-cloud.png\"" } signed_url_params = CGI::parse(URI(signed_url).query) signed_url_params["GoogleAccessId"].must_equal ["native_client_email"] signed_url_params["Signature"].must_equal [Base64.strict_encode64("native-signature").delete("\n")] signed_url_params["response-content-disposition"].must_equal ["attachment; filename=\"google-cloud.png\""] signing_key_mock.verify end end class PoisonSigningKey def sign kind, sig raise "The wrong signing_key was used" end end end
hxiong388/google-cloud-ruby
google-cloud-storage/test/google/cloud/storage/signed_url_test.rb
Ruby
apache-2.0
7,790
/* Copyright 2015 Kyle E. Mitchell * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ var concat = require('concat-stream') var http = require('http') var merkleize = require('commonform-merkleize') var series = require('async-series') var server = require('./server') var tape = require('tape') var url = require('url') tape('POST /callbacks with URL', function(test) { server(function(port, done) { http .request({ method: 'POST', path: '/callbacks', port: port }) .once('response', function(response) { test.equal(response.statusCode, 202, 'responds 202') done() test.end() }) .end('https://somewhere.else/endpoint') }) }) tape('POST /callbacks with bad URL', function(test) { server(function(port, done) { http .request({ method: 'POST', path: '/callbacks', port: port }) .once('response', function(response) { test.equal(response.statusCode, 400, 'responds 400') done() test.end() }) .end('blah blah blah') }) }) tape('GET /callbacks', function(test) { server(function(port, done) { var request = { method: 'GET', path: '/callbacks', port: port } http.request(request, function(response) { test.equal(response.statusCode, 405, 'responds 405') done() test.end() }) .end() }) }) tape('callback on POST /forms', function(test) { test.plan(3) var form = { content: [ 'Some text' ] } var digest = merkleize(form).digest var endpoint = '/x' var calledback = http .createServer() .on('request', function(request, response) { var parsed = url.parse(request.url) var path = parsed.pathname var callback = ( request.method === 'POST' && path === endpoint ) if (callback) { request.pipe(concat(function(buffer) { test.equal( buffer.toString(), digest, 'called back with digest') response.end() calledback.close() } )) } else { throw new Error() } }) .listen(0, function() { var calledbackPort = this.address().port var callbackURL = ( 'http://localhost:' + calledbackPort + endpoint ) server(function(port, done) { series( [ function(done) { http .request({ method: 'POST', path: '/callbacks', port: port }) .once('response', function(response) { test.equal(response.statusCode, 202, 'registered') done() }) .once('error', done) .end(callbackURL) }, function(done) { http .request({ method: 'POST', path: '/forms', port: port }) .once('response', function(response) { test.equal(response.statusCode, 201, 'posted form') done() }) .once('error', done) .end(JSON.stringify(form)) }, function () { done() } ], function(error) { test.ifError(error) done() }) }) }) }) tape('no callback on POST /forms with existing form', function(test) { var form = { content: [ 'Some text' ] } var calledback = http .createServer() .on('request', function(request, response) { test.equal(true, false, 'no callback request received') response.end() calledback.close() }) .listen(0, function() { var calledbackPort = this.address().port var callbackURL = ( 'http://localhost:' + calledbackPort + '/x' ) server(function(port, done) { series( [ function(done) { http .request({ method: 'POST', path: '/forms', port: port }) .once('response', function(response) { test.equal(response.statusCode, 201, 'posted form') done() }) .once('error', done) .end(JSON.stringify(form)) }, function(done) { http .request({ method: 'POST', path: '/callbacks', port: port }) .once('response', function(response) { test.equal(response.statusCode, 202, 'registered') done() }) .once('error', done) .end(callbackURL) }, function(done) { http .request({ method: 'POST', path: '/forms', port: port }) .once('response', function(response) { test.equal(response.statusCode, 200, 'posted form') done() }) .once('error', done) .end(JSON.stringify(form)) }, function () { setTimeout( function() { calledback.close() done() test.end() }, 100) } ], function(error) { test.ifError(error) calledback.close() done() }) }) }) })
commonform/commonform-serve
test/callbacks.test.js
JavaScript
apache-2.0
5,465
/*- * -\-\- * Spydra * -- * Copyright (C) 2016 - 2018 Spotify AB * -- * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * -/-/- */ package com.spotify.spydra.historytools; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Optional; import java.util.Spliterator; import java.util.Spliterators; import java.util.stream.Collectors; import java.util.stream.StreamSupport; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.JobID; import org.apache.hadoop.mapreduce.jobhistory.HistoryViewer; import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.logaggregation.LogCLIHelpers; import org.apache.hadoop.yarn.util.ConverterUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class HistoryLogUtils { private static final Logger logger = LoggerFactory.getLogger(HistoryLogUtils.class); public static final String DEFAULT_USER_NAME = "root"; public static final String HISTORY_LOG_CONFIG_NAME = "history.xml"; public static final String SPYDRA_HISTORY_CLIENT_ID_PROPERTY = "spydra.job.history.client_id"; public static final String SPYDRA_HISTORY_USERNAME_PROPERTY = "spydra.job.history.username"; public static final String SPYDRA_HISTORY_BUCKET_PROPERTY = "spydra.job.history.bucket"; // for cases when we don't actually need a username, such as history operations private static final String DUMMY_USER_NAME = "dummy"; /** * Creates a specialized hadoop configuration for spydra. This configuration is * special in the sense that it configures hadoop tooling to be able to access GCS * for logs, history and is able to run a read-only job-history server (not moving * or deleting logs or history files). This configuration is dependent on client * and username due to how this information is stored in GCS. * * @param clientId client id to generate configuration for * @param username username to generate configuration for * @param bucket name of the bucket storing logs and history information */ public static Configuration generateHadoopConfig( String clientId, String username, String bucket) { // We want minimalistic and clean options that are unlikely to collide with anything, // that's why not loading defaults Configuration cfg = new Configuration(false); cfg.addResource(HISTORY_LOG_CONFIG_NAME); cfg.reloadConfiguration(); cfg.set(SPYDRA_HISTORY_CLIENT_ID_PROPERTY, clientId); cfg.set(SPYDRA_HISTORY_USERNAME_PROPERTY, username); cfg.set(SPYDRA_HISTORY_BUCKET_PROPERTY, bucket); if (logger.isDebugEnabled()) { logger.debug("Dumping generated config to be applied for log/history tools"); logger.debug( StreamSupport.stream(cfg.spliterator(), false) .map(Object::toString) .collect(Collectors.joining("\n"))); } return cfg; } /** * Convenience version of @{see LogReader#generateHadoopConfig} for operations where a user * is not required. */ public static Configuration generateHadoopConfig(String clientId, String bucket) { return generateHadoopConfig(clientId, DUMMY_USER_NAME, bucket); } /** * Dumps the full job logs for a particular application to stdout. * * @param applicationId application to dump logs for */ public static void dumpFullLogs(Configuration cfg, ApplicationId applicationId) { LogCLIHelpers logCliHelpers = new LogCLIHelpers(); // TODO: Add the proper base dir settings etc... logCliHelpers.setConf(cfg); try { logCliHelpers.dumpAllContainersLogs( applicationId, cfg.get(SPYDRA_HISTORY_USERNAME_PROPERTY), System.out); } catch (IOException e) { logger.error("Failed dumping log files for application " + applicationId.toString(), e); } } /** * Dumps the full job history information to stdout. * * @param applicationId application to dump history for */ public static void dumpFullHistory(Configuration cfg, ApplicationId applicationId) { try { // TODO: This might be a problem if we have intermediate // TODO: and done dirs in different filesystems FileSystem fs = FileSystem.get(new URI(cfg.get(JHAdminConfig.MR_HISTORY_DONE_DIR)), cfg); // TODO: Seems to hang if there is no listing?? findHistoryFilePath(fs, cfg.get(JHAdminConfig.MR_HISTORY_DONE_DIR), applicationId) .ifPresent(x -> { try { logger.info("Starting HistoryViewer"); new HistoryViewer(x, cfg, true).print(); } catch (IOException e) { logger.error("Failed running HistoryViewer to dump history", e); } }); } catch (IOException e) { logger.error("Failed instantiating filesystem", e); } catch (URISyntaxException e) { logger.error("history location is not a valid URI", e); } } /** * Tries to locate a mapreduce job history file for some client id and application. * * @return Path of the located jhist file * @throws IOException Failure to initialize filesystem to access history file * @throws URISyntaxException Location specified for history path prefix is not a valid URI */ public static Optional<String> findHistoryFilePath( FileSystem fs, String historyDirPrefix, ApplicationId applicationId) throws IOException, URISyntaxException { Path jhistPathPattern = new Path(historyDirPrefix); return findHistoryFilePath( new RemoteIteratorAdaptor<>(fs.listFiles(jhistPathPattern, true)), applicationId); } public static Optional<String> findHistoryFilePath( Iterator<LocatedFileStatus> listing, ApplicationId applicationId) { JobID jobId = new JobID( String.valueOf(applicationId.getClusterTimestamp()), applicationId.getId()); List<LocatedFileStatus> jhistFiles = new ArrayList<>(); // maybe this could work more nicely with some recursive glob and a filter try { jhistFiles = StreamSupport .stream(Spliterators.spliteratorUnknownSize(listing, Spliterator.NONNULL), false) .filter(fstatus -> fstatus.getPath().toString() .matches(".*" + jobId.toString() + ".*.jhist")) .collect(Collectors.toList()); } catch (RemoteIteratorAdaptor.WrappedRemoteIteratorException wrie) { // We can't really do overly much at this point, as this is an error from the // underlying hadoop filesystem implementation. But we want to at least log this // separately from other conditions. logger.error("Retrieving remote listing failed", wrie); } if (jhistFiles.size() < 1) { logger.error("Could not locate a history file for parameters"); return Optional.empty(); } else if (jhistFiles.size() > 1) { logger.error("Found two or more matching files, will dump first"); } return jhistFiles.stream() .findFirst() .map(x -> x.getPath().toString()); } /** * Starts a minimal JobHistoryServer. */ public static void startJhs(Configuration cfg) { try { JobHistoryServer jobHistoryServer = new JobHistoryServer(); jobHistoryServer.init(cfg); logger.info(String.format( "Starting JobHistoryServer on: http://%s", cfg.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS))); jobHistoryServer.start(); } catch (Exception e) { logger.error("Error starting JobHistoryServer", e); System.exit(1); } } public static void main(String[] args) { String clientId = args[0]; String appIdStr = args[1]; String bucket = args[2]; ApplicationId appId = ConverterUtils.toApplicationId(appIdStr); Configuration cfg = generateHadoopConfig(clientId, DEFAULT_USER_NAME, bucket); logger.info("Dumping log files"); dumpFullLogs(cfg, appId); logger.info("Dumping full history information"); dumpFullHistory(cfg, appId); logger.info("Starting local JHS"); startJhs(cfg); } }
spotify/spydra
spydra/src/main/java/com/spotify/spydra/historytools/HistoryLogUtils.java
Java
apache-2.0
8,874
// Copyright 2014 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package blueprint import ( "fmt" "path/filepath" "text/scanner" ) // A Module handles generating all of the Ninja build actions needed to build a // single module based on properties defined in a Blueprints file. Module // objects are initially created during the parse phase of a Context using one // of the registered module types (and the associated ModuleFactory function). // The Module's properties struct is automatically filled in with the property // values specified in the Blueprints file (see Context.RegisterModuleType for more // information on this). // // A Module can be split into multiple Modules by a Mutator. All existing // properties set on the module will be duplicated to the new Module, and then // modified as necessary by the Mutator. // // The Module implementation can access the build configuration as well as any // modules on which on which it depends (as defined by the "deps" property // specified in the Blueprints file or dynamically added by implementing the // DynamicDependerModule interface) using the ModuleContext passed to // GenerateBuildActions. This ModuleContext is also used to create Ninja build // actions and to report errors to the user. // // In addition to implementing the GenerateBuildActions method, a Module should // implement methods that provide dependant modules and singletons information // they need to generate their build actions. These methods will only be called // after GenerateBuildActions is called because the Context calls // GenerateBuildActions in dependency-order (and singletons are invoked after // all the Modules). The set of methods a Module supports will determine how // dependant Modules interact with it. // // For example, consider a Module that is responsible for generating a library // that other modules can link against. The library Module might implement the // following interface: // // type LibraryProducer interface { // LibraryFileName() string // } // // func IsLibraryProducer(module blueprint.Module) { // _, ok := module.(LibraryProducer) // return ok // } // // A binary-producing Module that depends on the library Module could then do: // // func (m *myBinaryModule) GenerateBuildActions(ctx blueprint.ModuleContext) { // ... // var libraryFiles []string // ctx.VisitDepsDepthFirstIf(IsLibraryProducer, // func(module blueprint.Module) { // libProducer := module.(LibraryProducer) // libraryFiles = append(libraryFiles, libProducer.LibraryFileName()) // }) // ... // } // // to build the list of library file names that should be included in its link // command. // // GenerateBuildActions may be called from multiple threads. It is guaranteed to // be called after it has finished being called on all dependencies and on all // variants of that appear earlier in the ModuleContext.VisitAllModuleVariants list. // Any accesses to global variables or to Module objects that are not dependencies // or variants of the current Module must be synchronized by the implementation of // GenerateBuildActions. type Module interface { // GenerateBuildActions is called by the Context that created the Module // during its generate phase. This call should generate all Ninja build // actions (rules, pools, and build statements) needed to build the module. GenerateBuildActions(ModuleContext) } // A DynamicDependerModule is a Module that may add dependencies that do not // appear in its "deps" property. Any Module that implements this interface // will have its DynamicDependencies method called by the Context that created // it during generate phase. type DynamicDependerModule interface { Module // DynamicDependencies is called by the Context that created the // DynamicDependerModule during its generate phase. This call should return // the list of module names that the DynamicDependerModule depends on // dynamically. Module names that already appear in the "deps" property may // but do not need to be included in the returned list. DynamicDependencies(DynamicDependerModuleContext) []string } type BaseModuleContext interface { ModuleName() string ModuleDir() string Config() interface{} ContainsProperty(name string) bool Errorf(pos scanner.Position, fmt string, args ...interface{}) ModuleErrorf(fmt string, args ...interface{}) PropertyErrorf(property, fmt string, args ...interface{}) Failed() bool } type DynamicDependerModuleContext interface { BaseModuleContext AddVariationDependencies([]Variation, ...string) AddFarVariationDependencies([]Variation, ...string) } type ModuleContext interface { BaseModuleContext OtherModuleName(m Module) string OtherModuleErrorf(m Module, fmt string, args ...interface{}) VisitDirectDeps(visit func(Module)) VisitDirectDepsIf(pred func(Module) bool, visit func(Module)) VisitDepsDepthFirst(visit func(Module)) VisitDepsDepthFirstIf(pred func(Module) bool, visit func(Module)) ModuleSubDir() string Variable(pctx *PackageContext, name, value string) Rule(pctx *PackageContext, name string, params RuleParams, argNames ...string) Rule Build(pctx *PackageContext, params BuildParams) AddNinjaFileDeps(deps ...string) PrimaryModule() Module FinalModule() Module VisitAllModuleVariants(visit func(Module)) } var _ BaseModuleContext = (*baseModuleContext)(nil) type baseModuleContext struct { context *Context config interface{} module *moduleInfo errs []error } func (d *baseModuleContext) ModuleName() string { return d.module.properties.Name } func (d *baseModuleContext) ContainsProperty(name string) bool { _, ok := d.module.propertyPos[name] return ok } func (d *baseModuleContext) ModuleDir() string { return filepath.Dir(d.module.relBlueprintsFile) } func (d *baseModuleContext) Config() interface{} { return d.config } func (d *baseModuleContext) Errorf(pos scanner.Position, format string, args ...interface{}) { d.errs = append(d.errs, &Error{ Err: fmt.Errorf(format, args...), Pos: pos, }) } func (d *baseModuleContext) ModuleErrorf(format string, args ...interface{}) { d.errs = append(d.errs, &Error{ Err: fmt.Errorf(format, args...), Pos: d.module.pos, }) } func (d *baseModuleContext) PropertyErrorf(property, format string, args ...interface{}) { pos, ok := d.module.propertyPos[property] if !ok { panic(fmt.Errorf("property %q was not set for this module", property)) } d.errs = append(d.errs, &Error{ Err: fmt.Errorf(format, args...), Pos: pos, }) } func (d *baseModuleContext) Failed() bool { return len(d.errs) > 0 } var _ ModuleContext = (*moduleContext)(nil) type moduleContext struct { baseModuleContext scope *localScope ninjaFileDeps []string actionDefs localBuildActions } func (m *moduleContext) OtherModuleName(logicModule Module) string { module := m.context.moduleInfo[logicModule] return module.properties.Name } func (m *moduleContext) OtherModuleErrorf(logicModule Module, format string, args ...interface{}) { module := m.context.moduleInfo[logicModule] m.errs = append(m.errs, &Error{ Err: fmt.Errorf(format, args...), Pos: module.pos, }) } func (m *moduleContext) VisitDirectDeps(visit func(Module)) { m.context.visitDirectDeps(m.module, visit) } func (m *moduleContext) VisitDirectDepsIf(pred func(Module) bool, visit func(Module)) { m.context.visitDirectDepsIf(m.module, pred, visit) } func (m *moduleContext) VisitDepsDepthFirst(visit func(Module)) { m.context.visitDepsDepthFirst(m.module, visit) } func (m *moduleContext) VisitDepsDepthFirstIf(pred func(Module) bool, visit func(Module)) { m.context.visitDepsDepthFirstIf(m.module, pred, visit) } func (m *moduleContext) ModuleSubDir() string { return m.module.variantName } func (m *moduleContext) Variable(pctx *PackageContext, name, value string) { m.scope.ReparentTo(pctx) v, err := m.scope.AddLocalVariable(name, value) if err != nil { panic(err) } m.actionDefs.variables = append(m.actionDefs.variables, v) } func (m *moduleContext) Rule(pctx *PackageContext, name string, params RuleParams, argNames ...string) Rule { m.scope.ReparentTo(pctx) r, err := m.scope.AddLocalRule(name, &params, argNames...) if err != nil { panic(err) } m.actionDefs.rules = append(m.actionDefs.rules, r) return r } func (m *moduleContext) Build(pctx *PackageContext, params BuildParams) { m.scope.ReparentTo(pctx) def, err := parseBuildParams(m.scope, &params) if err != nil { panic(err) } m.actionDefs.buildDefs = append(m.actionDefs.buildDefs, def) } func (m *moduleContext) AddNinjaFileDeps(deps ...string) { m.ninjaFileDeps = append(m.ninjaFileDeps, deps...) } func (m *moduleContext) PrimaryModule() Module { return m.module.group.modules[0].logicModule } func (m *moduleContext) FinalModule() Module { return m.module.group.modules[len(m.module.group.modules)-1].logicModule } func (m *moduleContext) VisitAllModuleVariants(visit func(Module)) { for _, module := range m.module.group.modules { visit(module.logicModule) } } // // DynamicDependerModuleContext // type dynamicDependerModuleContext struct { baseModuleContext module *moduleInfo } // AddVariationDependencies adds deps as dependencies of the current module, but uses the variations // argument to select which variant of the dependency to use. A variant of the dependency must // exist that matches the all of the non-local variations of the current module, plus the variations // argument. func (mctx *dynamicDependerModuleContext) AddVariationDependencies(variations []Variation, deps ...string) { for _, dep := range deps { errs := mctx.context.addVariationDependency(mctx.module, variations, dep, false) if len(errs) > 0 { mctx.errs = append(mctx.errs, errs...) } } } // AddFarVariationDependencies adds deps as dependencies of the current module, but uses the // variations argument to select which variant of the dependency to use. A variant of the // dependency must exist that matches the variations argument, but may also have other variations. // For any unspecified variation the first variant will be used. // // Unlike AddVariationDependencies, the variations of the current module are ignored - the // depdendency only needs to match the supplied variations. func (mctx *dynamicDependerModuleContext) AddFarVariationDependencies(variations []Variation, deps ...string) { for _, dep := range deps { errs := mctx.context.addVariationDependency(mctx.module, variations, dep, true) if len(errs) > 0 { mctx.errs = append(mctx.errs, errs...) } } } // // MutatorContext // type mutatorContext struct { baseModuleContext name string dependenciesModified bool } type baseMutatorContext interface { BaseModuleContext Module() Module } type EarlyMutatorContext interface { baseMutatorContext CreateVariations(...string) []Module CreateLocalVariations(...string) []Module } type TopDownMutatorContext interface { baseMutatorContext VisitDirectDeps(visit func(Module)) VisitDirectDepsIf(pred func(Module) bool, visit func(Module)) VisitDepsDepthFirst(visit func(Module)) VisitDepsDepthFirstIf(pred func(Module) bool, visit func(Module)) } type BottomUpMutatorContext interface { baseMutatorContext AddDependency(module Module, name string) CreateVariations(...string) []Module SetDependencyVariation(string) } // A Mutator function is called for each Module, and can use // MutatorContext.CreateVariations to split a Module into multiple Modules, // modifying properties on the new modules to differentiate them. It is called // after parsing all Blueprint files, but before generating any build rules, // and is always called on dependencies before being called on the depending module. // // The Mutator function should only modify members of properties structs, and not // members of the module struct itself, to ensure the modified values are copied // if a second Mutator chooses to split the module a second time. type TopDownMutator func(mctx TopDownMutatorContext) type BottomUpMutator func(mctx BottomUpMutatorContext) type EarlyMutator func(mctx EarlyMutatorContext) // Split a module into mulitple variants, one for each name in the variationNames // parameter. It returns a list of new modules in the same order as the variationNames // list. // // If any of the dependencies of the module being operated on were already split // by calling CreateVariations with the same name, the dependency will automatically // be updated to point the matching variant. // // If a module is split, and then a module depending on the first module is not split // when the Mutator is later called on it, the dependency of the depending module will // automatically be updated to point to the first variant. func (mctx *mutatorContext) CreateVariations(variationNames ...string) []Module { return mctx.createVariations(variationNames, false) } // Split a module into mulitple variants, one for each name in the variantNames // parameter. It returns a list of new modules in the same order as the variantNames // list. // // Local variations do not affect automatic dependency resolution - dependencies added // to the split module via deps or DynamicDependerModule must exactly match a variant // that contains all the non-local variations. func (mctx *mutatorContext) CreateLocalVariations(variationNames ...string) []Module { return mctx.createVariations(variationNames, true) } func (mctx *mutatorContext) createVariations(variationNames []string, local bool) []Module { ret := []Module{} modules, errs := mctx.context.createVariations(mctx.module, mctx.name, variationNames) if len(errs) > 0 { mctx.errs = append(mctx.errs, errs...) } for i, module := range modules { ret = append(ret, module.logicModule) if !local { module.dependencyVariant[mctx.name] = variationNames[i] } } if len(ret) != len(variationNames) { panic("oops!") } return ret } // Set all dangling dependencies on the current module to point to the variation // with given name. func (mctx *mutatorContext) SetDependencyVariation(variationName string) { mctx.context.convertDepsToVariation(mctx.module, mctx.name, variationName) } func (mctx *mutatorContext) Module() Module { return mctx.module.logicModule } // Add a dependency to the given module. The depender can be a specific variant // of a module, but the dependee must be a module that has no variations. // Does not affect the ordering of the current mutator pass, but will be ordered // correctly for all future mutator passes. func (mctx *mutatorContext) AddDependency(module Module, depName string) { errs := mctx.context.addDependency(mctx.context.moduleInfo[module], depName) if len(errs) > 0 { mctx.errs = append(mctx.errs, errs...) } mctx.dependenciesModified = true } func (mctx *mutatorContext) VisitDirectDeps(visit func(Module)) { mctx.context.visitDirectDeps(mctx.module, visit) } func (mctx *mutatorContext) VisitDirectDepsIf(pred func(Module) bool, visit func(Module)) { mctx.context.visitDirectDepsIf(mctx.module, pred, visit) } func (mctx *mutatorContext) VisitDepsDepthFirst(visit func(Module)) { mctx.context.visitDepsDepthFirst(mctx.module, visit) } func (mctx *mutatorContext) VisitDepsDepthFirstIf(pred func(Module) bool, visit func(Module)) { mctx.context.visitDepsDepthFirstIf(mctx.module, pred, visit) }
jgennis/blueprint
module_ctx.go
GO
apache-2.0
16,003
require 'spec_helper' shared_examples_for 'splunk forwarder' do it { is_expected.to compile.with_all_deps } it { is_expected.to contain_class('splunk') } it { is_expected.to contain_class('splunk::params') } it { is_expected.to contain_class('splunk::forwarder') } it { is_expected.to contain_class('splunk::forwarder::install') } it { is_expected.to contain_class('splunk::forwarder::config') } it { is_expected.to contain_class('splunk::forwarder::service') } it { is_expected.to contain_splunk_config('splunk') } it { is_expected.to contain_package('splunkforwarder').with(ensure: 'installed') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/system/local/deploymentclient.conf') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/system/local/outputs.conf') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/system/local/inputs.conf') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/system/local/limits.conf') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/system/local/props.conf') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/system/local/transforms.conf') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/system/local/web.conf') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/system/local/limits.conf') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/system/local/server.conf') } it { is_expected.to contain_splunkforwarder_web('forwarder_splunkd_port').with(value: '127.0.0.1:8089') } it { is_expected.not_to contain_file('/opt/splunkforwarder/etc/splunk.secret') } it { is_expected.not_to contain_file('/opt/splunkforwarder/etc/passwd') } end describe 'splunk::forwarder' do context 'supported operating systems' do on_supported_os.each do |os, facts| if os.start_with?('windows') # Splunk Server not used supported on windows else context "on #{os}" do let(:facts) do facts end context 'splunk when including forwarder and enterprise' do let(:pre_condition) do 'include splunk::enterprise' end it { is_expected.to compile.and_raise_error(%r{Do not include splunk::forwarder on the same node as splunk::enterprise}) } end context 'when manage_password = true' do if facts[:kernel] == 'Linux' || facts[:kernel] == 'SunOS' let(:params) { { 'manage_password' => true } } it { is_expected.to compile.with_all_deps } it { is_expected.to contain_file('/opt/splunkforwarder/etc/splunk.secret') } it { is_expected.to contain_file('/opt/splunkforwarder/etc/passwd') } end end context 'when package_provider = yum' do if facts[:kernel] == 'Linux' || facts[:kernel] == 'SunOS' let(:params) { { 'package_provider' => 'yum' } } it { is_expected.to contain_package('splunkforwarder').with(provider: 'yum') } end end context 'with $boot_start = true (defaults)' do if facts[:kernel] == 'Linux' || facts[:kernel] == 'SunOS' context 'with $facts[service_provider] == init and $splunk::params::version >= 7.2.2' do let(:facts) do facts.merge(service_provider: 'init') end let(:pre_condition) do "class { 'splunk::params': version => '7.2.2' }" end it_behaves_like 'splunk forwarder' it { is_expected.to contain_class('splunk::forwarder::service::nix') } it { is_expected.to contain_class('splunk::forwarder').with(service_name: 'splunk') } it { is_expected.to contain_exec('stop_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk stop') } it { is_expected.to contain_exec('enable_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk enable boot-start -user root --accept-license --answer-yes --no-prompt') } it { is_expected.not_to contain_exec('disable_splunkforwarder') } it { is_expected.not_to contain_exec('license_splunkforwarder') } it { is_expected.to contain_service('splunk').with(ensure: 'running', enable: true, status: nil, restart: nil, start: nil, stop: nil) } end context 'with $facts[service_provider] == init and $splunk::params::version < 7.2.2' do let(:facts) do facts.merge(service_provider: 'init') end let(:pre_condition) do "class { 'splunk::params': version => '6.0.0' }" end it_behaves_like 'splunk forwarder' it { is_expected.to contain_class('splunk::forwarder::service::nix') } it { is_expected.to contain_class('splunk::forwarder').with(service_name: 'splunk') } it { is_expected.to contain_exec('stop_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk stop') } it { is_expected.to contain_exec('enable_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk enable boot-start -user root --accept-license --answer-yes --no-prompt') } it { is_expected.not_to contain_exec('disable_splunkforwarder') } it { is_expected.not_to contain_exec('license_splunkforwarder') } it { is_expected.to contain_service('splunk').with(ensure: 'running', enable: true, status: nil, restart: nil, start: nil, stop: nil) } end context 'with $facts[service_provider] == systemd and $splunk::params::version >= 7.2.2' do let(:facts) do facts.merge(service_provider: 'systemd') end let(:pre_condition) do "class { 'splunk::params': version => '7.2.2' }" end it_behaves_like 'splunk forwarder' it { is_expected.to contain_class('splunk::forwarder::service::nix') } it { is_expected.to contain_class('splunk::forwarder').with(service_name: 'SplunkForwarder') } it { is_expected.to contain_exec('stop_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk stop') } it { is_expected.to contain_exec('enable_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk enable boot-start -systemd-managed 1 --accept-license --answer-yes --no-prompt') } it { is_expected.not_to contain_exec('disable_splunkforwarder') } it { is_expected.not_to contain_exec('license_splunkforwarder') } it { is_expected.to contain_service('SplunkForwarder').with(ensure: 'running', enable: true, status: nil, restart: nil, start: nil, stop: nil) } end context 'with $facts[service_provider] == systemd and $splunk::params::version >= 7.2.2 and user != root' do let(:facts) do facts.merge(service_provider: 'systemd') end let(:pre_condition) do "class { 'splunk::params': version => '7.2.2' }" end let(:params) { { splunk_user: 'splunk' } } it { is_expected.to contain_exec('enable_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk enable boot-start -user splunk -systemd-managed 1 --accept-license --answer-yes --no-prompt') } end context 'with $facts[service_provider] == systemd and $splunk::params::version < 7.2.2' do let(:facts) do facts.merge(service_provider: 'systemd') end let(:pre_condition) do "class { 'splunk::params': version => '6.0.0' }" end it_behaves_like 'splunk forwarder' it { is_expected.to contain_class('splunk::forwarder::service::nix') } it { is_expected.to contain_class('splunk::forwarder').with(service_name: 'splunk') } it { is_expected.to contain_exec('stop_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk stop') } it { is_expected.to contain_exec('enable_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk enable boot-start -user root --accept-license --answer-yes --no-prompt') } it { is_expected.not_to contain_exec('disable_splunkforwarder') } it { is_expected.not_to contain_exec('license_splunkforwarder') } it { is_expected.to contain_service('splunk').with(ensure: 'running', enable: true, status: nil, restart: nil, start: nil, stop: nil) } end end end context 'with $boot_start = false' do if facts[:kernel] == 'Linux' || facts[:kernel] == 'SunOS' context 'with $facts[service_provider] == init and $splunk::params::version >= 7.2.2' do let(:facts) do facts.merge(service_provider: 'init') end let(:pre_condition) do "class { 'splunk::params': version => '7.2.2', boot_start => false }" end it_behaves_like 'splunk forwarder' it { is_expected.to contain_class('splunk::forwarder::service::nix') } it { is_expected.to contain_class('splunk::forwarder').with(service_name: 'splunk') } it { is_expected.not_to contain_exec('stop_splunkforwarder') } it { is_expected.not_to contain_exec('enable_splunkforwarder') } it { is_expected.to contain_exec('disable_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk disable boot-start -user root --accept-license --answer-yes --no-prompt') } it { is_expected.to contain_exec('license_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk ftr --accept-license --answer-yes --no-prompt') } it { is_expected.to contain_service('splunk').with(restart: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk restart'", start: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk start'", stop: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk stop'", status: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk status'") } end context 'with $facts[service_provider] == init and $splunk::params::version < 7.2.2' do let(:facts) do facts.merge(service_provider: 'init') end let(:pre_condition) do "class { 'splunk::params': version => '6.0.0', boot_start => false }" end it_behaves_like 'splunk forwarder' it { is_expected.to contain_class('splunk::forwarder::service::nix') } it { is_expected.to contain_class('splunk::forwarder').with(service_name: 'splunk') } it { is_expected.not_to contain_exec('stop_splunkforwarder') } it { is_expected.not_to contain_exec('enable_splunkforwarder') } it { is_expected.to contain_exec('disable_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk disable boot-start -user root --accept-license --answer-yes --no-prompt') } it { is_expected.to contain_exec('license_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk ftr --accept-license --answer-yes --no-prompt') } it { is_expected.to contain_service('splunk').with(restart: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk restart'", start: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk start'", stop: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk stop'", status: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk status'") } end context 'with $facts[service_provider] == systemd and $splunk::params::version >= 7.2.2' do let(:facts) do facts.merge(service_provider: 'systemd') end let(:pre_condition) do "class { 'splunk::params': version => '7.2.2', boot_start => false }" end it_behaves_like 'splunk forwarder' it { is_expected.to contain_class('splunk::forwarder::service::nix') } it { is_expected.to contain_class('splunk::forwarder').with(service_name: 'SplunkForwarder') } it { is_expected.not_to contain_exec('stop_splunkforwarder') } it { is_expected.not_to contain_exec('enable_splunkforwarder') } it { is_expected.to contain_exec('disable_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk disable boot-start -user root --accept-license --answer-yes --no-prompt') } it { is_expected.to contain_exec('license_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk ftr --accept-license --answer-yes --no-prompt') } it { is_expected.to contain_service('SplunkForwarder').with(restart: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk restart'", start: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk start'", stop: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk stop'", status: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk status'") } end context 'with $facts[service_provider] == systemd and $splunk::params::version < 7.2.2' do let(:facts) do facts.merge(service_provider: 'systemd') end let(:pre_condition) do "class { 'splunk::params': version => '6.0.0', boot_start => false }" end it_behaves_like 'splunk forwarder' it { is_expected.to contain_class('splunk::forwarder::service::nix') } it { is_expected.to contain_class('splunk::forwarder').with(service_name: 'splunk') } it { is_expected.not_to contain_exec('stop_splunkforwarder') } it { is_expected.not_to contain_exec('enable_splunkforwarder') } it { is_expected.to contain_exec('disable_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk disable boot-start -user root --accept-license --answer-yes --no-prompt') } it { is_expected.to contain_exec('license_splunkforwarder').with(command: '/opt/splunkforwarder/bin/splunk ftr --accept-license --answer-yes --no-prompt') } it { is_expected.to contain_service('splunk').with(restart: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk restart'", start: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk start'", stop: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk stop'", status: "/usr/sbin/runuser -l root -c '/opt/splunkforwarder/bin/splunk status'") } end end end context 'when forwarder not already installed' do let(:facts) do facts.merge(splunkforwarder_version: nil, service_provider: facts[:kernel] == 'FreeBSD' ? 'freebsd' : 'systemd') end let(:pre_condition) do "class { 'splunk::params': version => '7.2.2' }" end let(:accept_tos_command) do '/opt/splunkforwarder/bin/splunk stop && /opt/splunkforwarder/bin/splunk start --accept-license --answer-yes && /opt/splunkforwarder/bin/splunk stop' end let(:service_name) do facts[:kernel] == 'FreeBSD' ? 'splunk' : 'SplunkForwarder' end it_behaves_like 'splunk forwarder' it do is_expected.to contain_exec('splunk-forwarder-accept-tos').with( command: accept_tos_command, user: 'root', before: "Service[#{service_name}]", subscribe: nil, require: 'Exec[enable_splunkforwarder]', refreshonly: 'true' ) end end context 'when forwarder already installed' do let(:facts) do facts.merge(splunkforwarder_version: '7.3.3', service_provider: facts[:kernel] == 'FreeBSD' ? 'freebsd' : 'systemd') end let(:pre_condition) do "class { 'splunk::params': version => '7.2.2' }" end let(:accept_tos_command) do '/opt/splunkforwarder/bin/splunk stop && /opt/splunkforwarder/bin/splunk start --accept-license --answer-yes && /opt/splunkforwarder/bin/splunk stop' end let(:service_name) do facts[:kernel] == 'FreeBSD' ? 'splunk' : 'SplunkForwarder' end it_behaves_like 'splunk forwarder' it do is_expected.to contain_exec('splunk-forwarder-accept-tos').with( command: accept_tos_command, user: 'root', before: "Service[#{service_name}]", subscribe: 'Package[splunkforwarder]', require: 'Exec[enable_splunkforwarder]', refreshonly: 'true' ) end end end end end end end
dhoppe/puppet-splunk
spec/classes/forwarder_spec.rb
Ruby
apache-2.0
17,547
/* * Copyright 2014,2015 agwlvssainokuni * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cherry.common.foundation.impl; import static com.mysema.query.types.expr.DateTimeExpression.currentTimestamp; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; import org.joda.time.LocalDate; import org.joda.time.LocalDateTime; import org.junit.Test; import org.junit.runner.RunWith; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import org.springframework.transaction.annotation.Transactional; import cherry.common.db.gen.dto.BizdatetimeMaster; import cherry.common.db.gen.mapper.BizdatetimeMasterMapper; import cherry.foundation.bizdtm.BizDateTime; import com.mysema.query.sql.SQLQueryFactory; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = "classpath:config/applicationContext-test.xml") @Transactional public class BizDateTimeTest { @Autowired private BizDateTime bizDateTime; @Autowired private BizdatetimeMasterMapper bizdatetimeMasterMapper; @Autowired private SQLQueryFactory queryFactory; @Test public void testTodayWithoutMaster() { LocalDate pre = LocalDate.now(); LocalDate today = bizDateTime.today(); LocalDate post = LocalDate.now(); assertThat(today.isBefore(pre), is(false)); assertThat(today.isAfter(post), is(false)); } @Test public void testTodayWithMaster() { LocalDate orig = LocalDate.now().plusDays(14); BizdatetimeMaster record = new BizdatetimeMaster(); record.setBizdate(orig); bizdatetimeMasterMapper.insertSelective(record); assertThat(bizDateTime.today(), is(orig)); } @Test public void testNowWithoutMaster() { LocalDateTime pre = LocalDateTime.now(); LocalDateTime now = bizDateTime.now(); LocalDateTime post = LocalDateTime.now(); assertThat(now.isBefore(pre), is(false)); assertThat(now.isAfter(post), is(false)); } @Test public void testNowWithMaster() { BizdatetimeMaster record = new BizdatetimeMaster(); record.setOffsetDay(1); record.setOffsetHour(2); record.setOffsetMinute(3); record.setOffsetSecond(4); bizdatetimeMasterMapper.insertSelective(record); LocalDateTime curDtm = queryFactory.query().uniqueResult(currentTimestamp(LocalDateTime.class)); LocalDateTime expected = curDtm.plusDays(record.getOffsetDay()).plusHours(record.getOffsetHour()) .plusMinutes(record.getOffsetMinute()).plusSeconds(record.getOffsetSecond()); LocalDateTime now = bizDateTime.now(); assertThat(now, is(expected)); } }
agwlvssainokuni/springapp
common/src/test/java/cherry/common/foundation/impl/BizDateTimeTest.java
Java
apache-2.0
3,254
/* * Copyright (C) 2016 Piotr Wittchen * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.pwittchen.reactivenetwork.library.rx2; import android.content.Context; import android.net.ConnectivityManager; import android.net.NetworkInfo; import io.reactivex.functions.Predicate; import org.junit.Test; import org.junit.runner.RunWith; import org.robolectric.RobolectricTestRunner; import org.robolectric.RuntimeEnvironment; import static com.google.common.truth.Truth.assertThat; @RunWith(RobolectricTestRunner.class) @SuppressWarnings("NullAway") public class ConnectivityTest { private static final String TYPE_NAME_WIFI = "WIFI"; private static final String TYPE_NAME_MOBILE = "MOBILE"; private static final String TYPE_NAME_NONE = "NONE"; @Test public void shouldCreateConnectivity() { // when Connectivity connectivity = Connectivity.create(); // then assertThat(connectivity).isNotNull(); assertThat(connectivity.state()).isEqualTo(NetworkInfo.State.DISCONNECTED); assertThat(connectivity.detailedState()).isEqualTo(NetworkInfo.DetailedState.IDLE); assertThat(connectivity.type()).isEqualTo(Connectivity.UNKNOWN_TYPE); assertThat(connectivity.subType()).isEqualTo(Connectivity.UNKNOWN_SUB_TYPE); assertThat(connectivity.available()).isFalse(); assertThat(connectivity.failover()).isFalse(); assertThat(connectivity.roaming()).isFalse(); assertThat(connectivity.typeName()).isEqualTo(TYPE_NAME_NONE); assertThat(connectivity.subTypeName()).isEqualTo(TYPE_NAME_NONE); assertThat(connectivity.reason()).isEmpty(); assertThat(connectivity.extraInfo()).isEmpty(); } @Test public void stateShouldBeEqualToGivenValue() throws Exception { // given final Connectivity connectivity = Connectivity.state(NetworkInfo.State.CONNECTED) .type(ConnectivityManager.TYPE_WIFI) .typeName(TYPE_NAME_WIFI) .build(); // when final Predicate<Connectivity> equalTo = ConnectivityPredicate.hasState(connectivity.state()); final Boolean shouldBeEqualToGivenStatus = equalTo.test(connectivity); // then assertThat(shouldBeEqualToGivenStatus).isTrue(); } @Test public void stateShouldBeEqualToOneOfGivenMultipleValues() throws Exception { // given final Connectivity connectivity = Connectivity.state(NetworkInfo.State.CONNECTING) .type(ConnectivityManager.TYPE_WIFI) .typeName(TYPE_NAME_WIFI) .build(); final NetworkInfo.State states[] = { NetworkInfo.State.CONNECTED, NetworkInfo.State.CONNECTING }; // when final Predicate<Connectivity> equalTo = ConnectivityPredicate.hasState(states); final Boolean shouldBeEqualToGivenStatus = equalTo.test(connectivity); // then assertThat(shouldBeEqualToGivenStatus).isTrue(); } @Test public void stateShouldNotBeEqualToGivenValue() throws Exception { // given final Connectivity connectivity = Connectivity.state(NetworkInfo.State.DISCONNECTED) .type(ConnectivityManager.TYPE_WIFI) .typeName(TYPE_NAME_WIFI) .build(); // when final Predicate<Connectivity> equalTo = ConnectivityPredicate.hasState(NetworkInfo.State.CONNECTED); final Boolean shouldBeEqualToGivenStatus = equalTo.test(connectivity); // then assertThat(shouldBeEqualToGivenStatus).isFalse(); } @Test public void typeShouldBeEqualToGivenValue() throws Exception { // given final Connectivity connectivity = Connectivity.state(NetworkInfo.State.CONNECTED) .type(ConnectivityManager.TYPE_WIFI) .typeName(TYPE_NAME_WIFI) .build(); // note that unknown type is added initially by the ConnectivityPredicate#hasType method final int givenTypes[] = { connectivity.type(), Connectivity.UNKNOWN_TYPE }; // when final Predicate<Connectivity> equalTo = ConnectivityPredicate.hasType(givenTypes); final Boolean shouldBeEqualToGivenStatus = equalTo.test(connectivity); // then assertThat(shouldBeEqualToGivenStatus).isTrue(); } @Test public void typeShouldBeEqualToOneOfGivenMultipleValues() throws Exception { // given final Connectivity connectivity = Connectivity.state(NetworkInfo.State.CONNECTING) .type(ConnectivityManager.TYPE_MOBILE) .typeName(TYPE_NAME_MOBILE) .build(); // note that unknown type is added initially by the ConnectivityPredicate#hasType method final int givenTypes[] = { ConnectivityManager.TYPE_WIFI, ConnectivityManager.TYPE_MOBILE, Connectivity.UNKNOWN_TYPE }; // when final Predicate<Connectivity> equalTo = ConnectivityPredicate.hasType(givenTypes); final Boolean shouldBeEqualToGivenStatus = equalTo.test(connectivity); // then assertThat(shouldBeEqualToGivenStatus).isTrue(); } @Test public void typeShouldNotBeEqualToGivenValue() throws Exception { // given final Connectivity connectivity = Connectivity.state(NetworkInfo.State.CONNECTED) .type(ConnectivityManager.TYPE_WIFI) .typeName(TYPE_NAME_WIFI) .build(); // note that unknown type is added initially by the ConnectivityPredicate#hasType method final int givenTypes[] = { ConnectivityManager.TYPE_MOBILE, Connectivity.UNKNOWN_TYPE }; // when final Predicate<Connectivity> equalTo = ConnectivityPredicate.hasType(givenTypes); final Boolean shouldBeEqualToGivenStatus = equalTo.test(connectivity); // then assertThat(shouldBeEqualToGivenStatus).isFalse(); } @Test(expected = IllegalArgumentException.class) public void createShouldThrowAnExceptionWhenContextIsNull() { // given final Context context = null; // when Connectivity.create(context); // then // an exception is thrown } @Test public void shouldReturnProperToStringValue() { // given final String expectedToString = "Connectivity{" + "state=DISCONNECTED, " + "detailedState=IDLE, " + "type=-1, " + "subType=-1, " + "available=false, " + "failover=false, " + "roaming=false, " + "typeName='NONE', " + "subTypeName='NONE', " + "reason='', " + "extraInfo=''}"; // when Connectivity connectivity = Connectivity.create(); // then assertThat(connectivity.toString()).isEqualTo(expectedToString); } @Test public void theSameConnectivityObjectsShouldBeEqual() { // given final Connectivity connectivityOne = Connectivity.create(); final Connectivity connectivityTwo = Connectivity.create(); // when boolean objectsAreEqual = connectivityOne.equals(connectivityTwo); // then assertThat(objectsAreEqual).isTrue(); } @Test public void twoDefaultObjectsShouldBeInTheSameBucket() { // given final Connectivity connectivityOne = Connectivity.create(); final Connectivity connectivityTwo = Connectivity.create(); // when boolean hashCodesAreEqual = connectivityOne.hashCode() == connectivityTwo.hashCode(); // then assertThat(hashCodesAreEqual).isTrue(); } @Test public void shouldAppendUnknownTypeWhileFilteringNetworkTypesInsidePredicate() { // given int[] types = { ConnectivityManager.TYPE_MOBILE, ConnectivityManager.TYPE_WIFI }; int[] expectedOutputTypes = { ConnectivityManager.TYPE_MOBILE, ConnectivityManager.TYPE_WIFI, Connectivity.UNKNOWN_TYPE }; // when int[] outputTypes = ConnectivityPredicate.appendUnknownNetworkTypeToTypes(types); // then assertThat(outputTypes).isEqualTo(expectedOutputTypes); } @Test public void shouldAppendUnknownTypeWhileFilteringNetworkTypesInsidePredicateForEmptyArray() { // given int[] types = {}; int[] expectedOutputTypes = { Connectivity.UNKNOWN_TYPE }; // when int[] outputTypes = ConnectivityPredicate.appendUnknownNetworkTypeToTypes(types); // then assertThat(outputTypes).isEqualTo(expectedOutputTypes); } @Test public void shouldCreateConnectivityWithBuilder() { // given NetworkInfo.State state = NetworkInfo.State.CONNECTED; NetworkInfo.DetailedState detailedState = NetworkInfo.DetailedState.CONNECTED; int type = ConnectivityManager.TYPE_WIFI; int subType = ConnectivityManager.TYPE_WIMAX; String typeName = TYPE_NAME_WIFI; String subTypeName = "test subType"; String reason = "no reason"; String extraInfo = "extra info"; // when Connectivity connectivity = Connectivity.state(state) .detailedState(detailedState) .type(type) .subType(subType) .available(true) .failover(false) .roaming(true) .typeName(typeName) .subTypeName(subTypeName) .reason(reason) .extraInfo(extraInfo) .build(); // then assertThat(connectivity.state()).isEqualTo(state); assertThat(connectivity.detailedState()).isEqualTo(detailedState); assertThat(connectivity.type()).isEqualTo(type); assertThat(connectivity.subType()).isEqualTo(subType); assertThat(connectivity.available()).isTrue(); assertThat(connectivity.failover()).isFalse(); assertThat(connectivity.roaming()).isTrue(); assertThat(connectivity.typeName()).isEqualTo(typeName); assertThat(connectivity.subTypeName()).isEqualTo(subTypeName); assertThat(connectivity.reason()).isEqualTo(reason); assertThat(connectivity.extraInfo()).isEqualTo(extraInfo); } @Test public void connectivityShouldNotBeEqualToAnotherOne() { // given Connectivity connectivityOne = Connectivity.state(NetworkInfo.State.CONNECTED) .detailedState(NetworkInfo.DetailedState.CONNECTED) .type(ConnectivityManager.TYPE_WIFI) .subType(1) .available(true) .failover(true) .roaming(true) .typeName(TYPE_NAME_WIFI) .subTypeName("subtypeOne") .reason("reasonOne") .extraInfo("extraInfoOne") .build(); Connectivity connectivityTwo = Connectivity.state(NetworkInfo.State.DISCONNECTED) .detailedState(NetworkInfo.DetailedState.DISCONNECTED) .type(ConnectivityManager.TYPE_MOBILE) .subType(2) .available(false) .failover(false) .roaming(false) .typeName(TYPE_NAME_MOBILE) .subTypeName("subtypeTwo") .reason("reasonTwo") .extraInfo("extraInfoTwo") .build(); // when final boolean isAnotherConnectivityTheSame = connectivityOne.equals(connectivityTwo); // then assertThat(isAnotherConnectivityTheSame).isFalse(); } @Test public void shouldCreateDefaultConnectivityWhenConnectivityManagerIsNull() { // given final Context context = RuntimeEnvironment.application.getApplicationContext(); final ConnectivityManager connectivityManager = null; // when Connectivity connectivity = Connectivity.create(context, connectivityManager); // then assertThat(connectivity.type()).isEqualTo(Connectivity.UNKNOWN_TYPE); assertThat(connectivity.subType()).isEqualTo(Connectivity.UNKNOWN_SUB_TYPE); assertThat(connectivity.state()).isEqualTo(NetworkInfo.State.DISCONNECTED); assertThat(connectivity.detailedState()).isEqualTo(NetworkInfo.DetailedState.IDLE); assertThat(connectivity.available()).isFalse(); assertThat(connectivity.failover()).isFalse(); assertThat(connectivity.roaming()).isFalse(); assertThat(connectivity.typeName()).isEqualTo(TYPE_NAME_NONE); assertThat(connectivity.subTypeName()).isEqualTo(TYPE_NAME_NONE); assertThat(connectivity.reason()).isEmpty(); assertThat(connectivity.extraInfo()).isEmpty(); } }
pwittchen/ReactiveNetwork
library/src/test/java/com/github/pwittchen/reactivenetwork/library/rx2/ConnectivityTest.java
Java
apache-2.0
12,175
/** * @author wangboxuan * @date 2014-7-14 下午01:43:02 * @Description: TODO * @version V1.0 */ package wbx; import javax.swing.plaf.SliderUI; /** * @author wangboxuan * @date 2014-7-14 * @Description: */ public class TestViolate { private static volatile int c = 0; public static void main(String[] args) throws InterruptedException { for (int i = 0; i < 1000; i++) { new Thread( ){ public void run() { c++; }; }.start(); } Thread.sleep(10*1000); System.out.println(c); } }
windsunmoon/javaTest
src/wbx/TestViolate.java
Java
apache-2.0
538
import java.util.Scanner; public class Average{ public static void main(String args[]){ Scanner input = new Scanner(System.in); int count=0,n; double sum=0,avg,num; System.out.println("Count of the Numbers which we want to Calculate the Average"); n=input.nextInt(); System.out.println("Enter those " + n + " Numbers"); while(count<n){ num=input.nextDouble(); sum=sum+num; count++; } avg=sum/n; System.out.println("Average of those " + n + " Numbers is " + avg); } }
vinitraj10/Java
Java/Loops/Average.java
Java
apache-2.0
498
package tests import ( "fmt" r "gopkg.in/rethinkdb/rethinkdb-go.v6" ) // Return the first five squares. func ExampleTerm_Map() { cur, err := r.Expr([]int{1, 2, 3, 4, 5}).Map(func(val r.Term) r.Term { return val.Mul(val) }).Run(session) if err != nil { fmt.Print(err) return } var res []int err = cur.All(&res) if err != nil { fmt.Print(err) return } fmt.Print(res) // Output: // [1 4 9 16 25] } // Sum the elements of three sequences. func ExampleMap_multipleSequences() { var sequence1 = []int{100, 200, 300, 400} var sequence2 = []int{10, 20, 30, 40} var sequence3 = []int{1, 2, 3, 4} cur, err := r.Map(sequence1, sequence2, sequence3, func(val1, val2, val3 r.Term) r.Term { return val1.Add(val2).Add(val3) }).Run(session) if err != nil { fmt.Print(err) return } var res []int err = cur.All(&res) if err != nil { fmt.Print(err) return } fmt.Print(res) // Output: // [111 222 333 444] } // Order all the posts using the index date. func ExampleTerm_OrderBy_index() { cur, err := r.DB("examples").Table("posts").OrderBy(r.OrderByOpts{ Index: "date", }).Run(session) if err != nil { fmt.Print(err) return } var res []interface{} err = cur.All(&res) if err != nil { fmt.Print(err) return } fmt.Print(res) } // Order all the posts using the index date in descending order. func ExampleTerm_OrderBy_indexDesc() { cur, err := r.DB("examples").Table("posts").OrderBy(r.OrderByOpts{ Index: r.Desc("date"), }).Run(session) if err != nil { fmt.Print(err) return } var res []interface{} err = cur.All(&res) if err != nil { fmt.Print(err) return } fmt.Print(res) } // You can efficiently order using multiple fields by using a compound index. // For example order by date and title. func ExampleTerm_OrderBy_compound() { cur, err := r.DB("examples").Table("posts").OrderBy(r.OrderByOpts{ Index: r.Desc("dateAndTitle"), }).Run(session) if err != nil { fmt.Print(err) return } var res []interface{} err = cur.All(&res) if err != nil { fmt.Print(err) return } fmt.Print(res) } // If you have a sequence with fewer documents than the arrayLimit, you can order // it by multiple fields without an index. func ExampleTerm_OrderBy_multiple() { cur, err := r.DB("examples").Table("posts").OrderBy( "title", r.OrderByOpts{Index: r.Desc("date")}, ).Run(session) if err != nil { fmt.Print(err) return } var res []interface{} err = cur.All(&res) if err != nil { fmt.Print(err) return } fmt.Print(res) } // Notice that an index ordering always has highest precedence. The following // query orders posts by date, and if multiple posts were published on the same // date, they will be ordered by title. func ExampleTerm_OrderBy_multipleWithIndex() { cur, err := r.DB("examples").Table("posts").OrderBy( "title", r.OrderByOpts{Index: r.Desc("date")}, ).Run(session) if err != nil { fmt.Print(err) return } var res []interface{} err = cur.All(&res) if err != nil { fmt.Print(err) return } fmt.Print(res) }
GoRethink/gorethink
internal/integration/tests/example_query_transformation_test.go
GO
apache-2.0
3,050
using Jal.Converter.Impl; using Jal.Converter.Tests.Model; namespace Jal.Converter.Tests.Impl { public class CustomerRequestCustomerConverter : AbstractConverter<CustomerRequest, Customer> { public override Customer Convert(CustomerRequest source) { return new Customer() { Name = source.Name, Age = source.Age, Category = "None" }; } } }
raulnq/Jal.Converter
Jal.Converter.Tests/Impl/CustomerRequestCustomerConverter.cs
C#
apache-2.0
498
package opsmanager import ( "bufio" "bytes" "errors" "fmt" "io" "io/ioutil" "net/http" urllib "net/url" "path" "github.com/cloudfoundry-community/go-cfenv" "github.com/pivotalservices/cfbackup" "github.com/pivotalservices/gtils/command" ghttp "github.com/pivotalservices/gtils/http" "github.com/pivotalservices/gtils/log" "github.com/pivotalservices/gtils/uaa" "github.com/xchapter7x/lo" ) // NewOpsManager initializes an OpsManager instance var NewOpsManager = func(opsManagerHostname, adminUsername, adminPassword, adminToken, opsManagerUsername, opsManagerPassword, opsManagerPassphrase, clientID, clientSecret, target, cryptKey string) (context *OpsManager, err error) { backupContext := cfbackup.NewBackupContext(target, cfenv.CurrentEnv(), cryptKey) settingsHTTPRequestor := ghttp.NewHttpGateway() settingsMultiHTTPRequestor := httpUploader(cfbackup.GetUploader(backupContext)) assetsHTTPRequestor := ghttp.NewHttpGateway() assetsMultiHTTPRequestor := httpUploader(cfbackup.GetUploader(backupContext)) context = &OpsManager{ SettingsUploader: settingsMultiHTTPRequestor, AssetsUploader: assetsMultiHTTPRequestor, SettingsRequestor: settingsHTTPRequestor, AssetsRequestor: assetsHTTPRequestor, DeploymentDir: path.Join(target, OpsMgrBackupDir, OpsMgrDeploymentsDir), Hostname: opsManagerHostname, Username: adminUsername, Password: adminPassword, ClientID: clientID, ClientSecret: clientSecret, Token: adminToken, BackupContext: backupContext, LocalExecuter: command.NewLocalExecuter(), OpsmanagerBackupDir: OpsMgrBackupDir, SSHUsername: opsManagerUsername, SSHPassword: opsManagerPassword, Passphrase: opsManagerPassphrase, SSHPort: OpsMgrDefaultSSHPort, ClearBoshManifest: false, } err = context.createExecuter() return } //SetSSHPrivateKey - sets the private key in the ops manager object and rebuilds the remote executer associated with the opsmanager func (context *OpsManager) SetSSHPrivateKey(key string) { lo.G.Debug("Setting SSHKey") context.SSHPrivateKey = key context.createExecuter() } func (context *OpsManager) createExecuter() (err error) { context.Executer, err = command.NewRemoteExecutor(command.SshConfig{ Username: context.SSHUsername, Password: context.SSHPassword, Host: context.Hostname, Port: context.SSHPort, SSLKey: context.SSHPrivateKey, }) return } // GetInstallationSettings retrieves all the installation settings from OpsMan // and returns them in a buffered reader func (context *OpsManager) GetInstallationSettings() (settings io.Reader, err error) { var bytesBuffer = new(bytes.Buffer) url := fmt.Sprintf(OpsMgrInstallationSettingsURL, context.Hostname) lo.G.Debug(fmt.Sprintf("Exporting url '%s'", url)) if err = context.saveHTTPResponse(url, bytesBuffer); err == nil { settings = bytesBuffer } return } //~ Backup Operations // Backup performs a backup of a Pivotal Ops Manager instance func (context *OpsManager) Backup() (err error) { if err = context.saveDeployments(); err == nil { err = context.saveInstallation() } return } func (context *OpsManager) saveDeployments() (err error) { var backupWriter io.WriteCloser if backupWriter, err = context.Writer(context.TargetDir, context.OpsmanagerBackupDir, OpsMgrDeploymentsFileName); err == nil { defer backupWriter.Close() command := "cd /var/tempest/workspaces/default && tar cz deployments" err = context.Executer.Execute(backupWriter, command) } return } func (context *OpsManager) saveInstallation() error { return context.saveInstallationSettingsAndAssets() } func (context *OpsManager) saveInstallationSettingsAndAssets() (err error) { if err = context.exportFile(OpsMgrInstallationSettingsURL, OpsMgrInstallationSettingsFilename); err == nil { err = context.exportFile(OpsMgrInstallationAssetsURL, OpsMgrInstallationAssetsFileName) } return } func (context *OpsManager) exportFile(urlFormat string, filename string) (err error) { url := fmt.Sprintf(urlFormat, context.Hostname) lo.G.Debug("Exporting file", log.Data{"url": url, "filename": filename}) var backupWriter io.WriteCloser if backupWriter, err = context.Writer(context.TargetDir, context.OpsmanagerBackupDir, filename); err == nil { defer backupWriter.Close() err = context.saveHTTPResponse(url, backupWriter) } return } func (context *OpsManager) saveHTTPResponse(url string, dest io.Writer) (err error) { var resp *http.Response lo.G.Debug("attempting to auth against", url) if resp, err = context.oauthHTTPGet(url); err != nil { lo.G.Info("falling back to basic auth for legacy system") lo.G.Debug("error: ", err) resp, err = context.legacyHTTPGet(url) } if err == nil && resp.StatusCode == http.StatusOK { defer resp.Body.Close() _, err = io.Copy(dest, resp.Body) } else if resp != nil && resp.StatusCode != http.StatusOK { errMsg, _ := ioutil.ReadAll(resp.Body) err = errors.New(string(errMsg[:])) } if err != nil { lo.G.Error("error in save http request", err) } return } func (context *OpsManager) legacyHTTPGet(url string) (resp *http.Response, err error) { requestor := context.SettingsRequestor resp, err = requestor.Get(ghttp.HttpRequestEntity{ Url: url, Username: context.Username, Password: context.Password, ContentType: "application/octet-stream", })() lo.G.Debug("called basic auth on legacy ops manager", url, err) return } func (context *OpsManager) oauthHTTPGet(urlString string) (resp *http.Response, err error) { var token = context.Token var uaaURL, _ = urllib.Parse(urlString) var opsManagerUsername = context.Username var opsManagerPassword = context.Password var clientID string var clientSecret = context.ClientSecret var grantType string if context.ClientID == "" { clientID = "opsman" grantType = "password" } else { clientID = context.ClientID grantType = "client_credentials" } lo.G.Debug("aquiring your token from: ", uaaURL, urlString) if token == "" { if token, err = uaa.GetToken("https://"+uaaURL.Host+"/uaa", opsManagerUsername, opsManagerPassword, clientID, clientSecret, grantType); err != nil { return nil, err } lo.G.Debug("token acquired") } requestor := context.SettingsRequestor resp, err = requestor.Get(ghttp.HttpRequestEntity{ Url: urlString, ContentType: "application/octet-stream", Authorization: "Bearer " + token, })() return } //~ Restore Operations // Restore performs a restore of a Pivotal Ops Manager instance func (context *OpsManager) Restore() (err error) { lo.G.Info("Starting restore for Opsman") err = context.importInstallation() return } func (context *OpsManager) importInstallation() (err error) { defer func() { if err == nil && context.ClearBoshManifest { lo.G.Debug("removing deployment files") err = context.removeExistingDeploymentFiles() } }() installAssetsURL := fmt.Sprintf(OpsMgrInstallationAssetsURL, context.Hostname) lo.G.Debug("uploading installation assets installAssetsURL: %s", installAssetsURL) err = context.importInstallationPart(installAssetsURL, OpsMgrInstallationAssetsFileName, OpsMgrInstallationAssetsPostFieldName, context.AssetsUploader) return } func (context *OpsManager) importInstallationPart(url, filename, fieldname string, upload httpUploader) (err error) { var backupReader io.ReadCloser if backupReader, err = context.Reader(context.TargetDir, context.OpsmanagerBackupDir, filename); err == nil { defer backupReader.Close() var resp *http.Response conn := ghttp.ConnAuth{ Url: url, Username: context.Username, Password: context.Password, } filePath := path.Join(context.TargetDir, context.OpsmanagerBackupDir, filename) bufferedReader := bufio.NewReader(backupReader) lo.G.Debug("upload request", log.Data{"fieldname": fieldname, "filePath": filePath}) creds := map[string]string{ "password": context.Password, "passphrase": context.Passphrase, } resp, err = upload(conn, fieldname, filePath, -1, bufferedReader, creds) if err == nil && resp.StatusCode == http.StatusOK { lo.G.Debug("Request for %s succeeded with status: %s", url, resp.Status) } else if resp != nil && resp.StatusCode != http.StatusOK { err = fmt.Errorf("Request for %s failed with status: %s", url, resp.Status) } if err != nil { lo.G.Error("error uploading installation: %s", err.Error()) } } return } func (context *OpsManager) removeExistingDeploymentFiles() (err error) { var w bytes.Buffer command := fmt.Sprintf("if [ -f %s ]; then sudo rm %s;fi", OpsMgrDeploymentsFile, OpsMgrDeploymentsFile) err = context.Executer.Execute(&w, command) return }
pivotalservices/cfbackup
tiles/opsmanager/opsmanager.go
GO
apache-2.0
8,787
/* * This file is part of the KubeVirt project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2018 Red Hat, Inc. * */ package tests_test import ( "flag" "fmt" . "github.com/onsi/ginkgo" "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" "kubevirt.io/kubevirt/tests" ) var _ = Describe("User Access", func() { flag.Parse() BeforeEach(func() { tests.BeforeTestCleanup() }) Describe("With default kubevirt service accounts", func() { It("should verify only admin role has access only to kubevirt-config", func() { tests.SkipIfNoKubectl() verbs := []string{"get", "update", "patch"} namespace := tests.NamespaceTestDefault resourceNamespace := tests.KubeVirtInstallNamespace saNames := []string{tests.ViewServiceAccountName, tests.EditServiceAccountName, tests.AdminServiceAccountName} for _, saName := range saNames { // Verifies targeted access to only the kubevirt config By(fmt.Sprintf("verifying expected permissions for sa %s for resource configmaps/kubevirt-config", saName)) for _, verb := range verbs { expectedRes := "no" if saName == tests.AdminServiceAccountName { expectedRes = "yes" } resource := "configmaps/kubevirt-config" as := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, saName) result, err := tests.RunKubectlCommand("auth", "can-i", "-n", resourceNamespace, "--as", as, verb, resource) Expect(err).ToNot(HaveOccurred()) Expect(result).To(ContainSubstring(expectedRes)) } By(fmt.Sprintf("verifying expected permissions for sa %s for resource configmaps/kubevirt-madethisup", saName)) for _, verb := range verbs { expectedRes := "no" resource := "configmaps/kubevirt-imadethisup" as := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, saName) result, err := tests.RunKubectlCommand("auth", "can-i", "-n", resourceNamespace, "--as", as, verb, resource) Expect(err).ToNot(HaveOccurred()) Expect(result).To(ContainSubstring(expectedRes)) } } }) table.DescribeTable("should verify permissions are correct for view, edit, and admin", func(resource string) { tests.SkipIfNoKubectl() view := tests.ViewServiceAccountName edit := tests.EditServiceAccountName admin := tests.AdminServiceAccountName viewVerbs := make(map[string]string) editVerbs := make(map[string]string) adminVerbs := make(map[string]string) // GET viewVerbs["get"] = "yes" editVerbs["get"] = "yes" adminVerbs["get"] = "yes" // List viewVerbs["list"] = "yes" editVerbs["list"] = "yes" adminVerbs["list"] = "yes" // WATCH viewVerbs["watch"] = "yes" editVerbs["watch"] = "yes" adminVerbs["watch"] = "yes" // DELETE viewVerbs["delete"] = "no" editVerbs["delete"] = "yes" adminVerbs["delete"] = "yes" // CREATE viewVerbs["create"] = "no" editVerbs["create"] = "yes" adminVerbs["create"] = "yes" // UPDATE viewVerbs["update"] = "no" editVerbs["update"] = "yes" adminVerbs["update"] = "yes" // PATCH viewVerbs["patch"] = "no" editVerbs["patch"] = "yes" adminVerbs["patch"] = "yes" // DELETE COllECTION viewVerbs["deleteCollection"] = "no" editVerbs["deleteCollection"] = "no" adminVerbs["deleteCollection"] = "yes" namespace := tests.NamespaceTestDefault verbs := []string{"get", "list", "watch", "delete", "create", "update", "patch", "deletecollection"} for _, verb := range verbs { // VIEW By(fmt.Sprintf("verifying VIEW sa for verb %s", verb)) expectedRes, _ := viewVerbs[verb] as := fmt.Sprintf("system:serviceaccount:%s:%s", namespace, view) result, err := tests.RunKubectlCommand("auth", "can-i", "--as", as, verb, resource) Expect(err).ToNot(HaveOccurred()) Expect(result).To(ContainSubstring(expectedRes)) // EDIT By(fmt.Sprintf("verifying EDIT sa for verb %s", verb)) expectedRes, _ = editVerbs[verb] as = fmt.Sprintf("system:serviceaccount:%s:%s", namespace, edit) result, err = tests.RunKubectlCommand("auth", "can-i", "--as", as, verb, resource) Expect(err).ToNot(HaveOccurred()) Expect(result).To(ContainSubstring(expectedRes)) // ADMIN By(fmt.Sprintf("verifying ADMIN sa for verb %s", verb)) expectedRes, _ = adminVerbs[verb] as = fmt.Sprintf("system:serviceaccount:%s:%s", namespace, admin) result, err = tests.RunKubectlCommand("auth", "can-i", "--as", as, verb, resource) Expect(err).ToNot(HaveOccurred()) Expect(result).To(ContainSubstring(expectedRes)) // DEFAULT - the default should always return 'no' for ever verb. // This is primarily a sanity check. By(fmt.Sprintf("verifying DEFAULT sa for verb %s", verb)) expectedRes = "no" as = fmt.Sprintf("system:serviceaccount:%s:default", namespace) result, err = tests.RunKubectlCommand("auth", "can-i", "--as", as, verb, resource) Expect(err).ToNot(HaveOccurred()) Expect(result).To(ContainSubstring(expectedRes)) } }, table.Entry("given a vmi", "virtualmachineinstances"), table.Entry("given an vm", "virtualmachines"), table.Entry("given a vmi preset", "virtualmachineinstancepresets"), table.Entry("given a vmi replica set", "virtualmachineinstancereplicasets"), ) }) })
rmohr/kubevirt
tests/access_test.go
GO
apache-2.0
5,809
/* Copyright [2013-2014] eBay Software Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.ccoe.build.service; import static org.junit.Assert.assertEquals; import org.junit.Test; import com.ccoe.build.service.BuildServiceScheduler; public class BuildServiceSchedulerTest { @Test public void testIsSchedulerEnabled() { BuildServiceScheduler buildServiceScheduler = new BuildServiceScheduler(); assertEquals(false, buildServiceScheduler.isSchedulerEnabled()); } }
eBay/mTracker
build-service/src/test/java/com/ccoe/build/service/BuildServiceSchedulerTest.java
Java
apache-2.0
975
<?php namespace Habari; class LMSPlugin extends Plugin { private $output; public function filter_autoload_dirs($dirs) { $dirs[] = __DIR__ . '/classes'; return $dirs; } public function action_init() { DB::register_table( 'tracks' ); DB::register_table( 'courses' ); DB::register_table( 'units' ); DB::register_table( 'alerts' ); DB::register_table( 'progress' ); DB::register_table( 'quizs' ); DB::register_table( 'questions' ); DB::register_table( 'answers' ); DB::register_table( 'user_quizs' ); $this->add_support_files(); $this->add_templates(); } public function action_plugin_activation( $plugin_file ) { Post::add_new_type( 'track' ); Post::add_new_type( 'course' ); Post::add_new_type( 'unit' ); Post::add_new_type( 'quiz' ); $group = UserGroup::get_by_name( _t( 'authenticated' )); $group->grant( 'post_track', 'read'); $group->grant( 'post_course', 'read'); $group->grant( 'post_unit', 'read'); $group->grant( 'post_quiz', 'read'); } public function action_plugin_activated( $plugin_file ) { $this->create_tracks_table(); $this->create_courses_table(); $this->create_units_table(); $this->create_alerts_table(); $this->create_quizs_table(); $this->create_questions_table(); $this->create_answers_table(); } public function filter_post_type_display($type, $g_number) { switch($type) { case 'course': switch($g_number) { case 'singular': return _t('Course'); case 'plural': return _t('Courses'); } break; case 'unit': switch($g_number) { case 'singular': return _t('Unit'); case 'plural': return _t('Units'); } break; case 'track': switch($g_number) { case 'singular': return _t('Track'); case 'plural': return _t('Tracks'); } break; case 'quiz': switch($g_number) { case 'singular': return _t('Quiz'); case 'plural': return _t('Quizes'); } break; } return $type; } private function add_support_files() { Stack::add('template_stylesheet', array($this->get_url('/templates/lms.css'), 'screen, projection'), 'lms_styles'); Stack::add('template_header_javascript', $this->get_url('/templates/js/jquery.knob.js'), 'jquery.knob', 'jquery'); } private function add_templates() { $this->add_template( 'track.multiple', dirname(__FILE__) . '/templates/track.multiple.php' ); $this->add_template( 'course.multiple', dirname(__FILE__) . '/templates/course.multiple.php' ); $this->add_template( 'course.single', dirname(__FILE__) . '/templates/course.single.php' ); $this->add_template( 'course.single.syllabus', dirname(__FILE__) . '/templates/course.single.syllabus.php' ); $this->add_template( 'course.single.nav', dirname(__FILE__) . '/templates/course.single.nav.php' ); $this->add_template( 'course.unit.single', dirname(__FILE__) . '/templates/course.unit.single.php' ); $this->add_template( 'course.new', dirname(__FILE__) . '/templates/course.new.php' ); $this->add_template( 'course.dropzone', dirname(__FILE__) . '/templates/course.dropzone.php' ); $this->add_template( 'unit.new', dirname(__FILE__) . '/templates/unit.new.php' ); $this->add_template( 'unit.dropzone', dirname(__FILE__) . '/templates/unit.dropzone.php' ); $this->add_template( 'quiz.new', dirname(__FILE__) . '/templates/quiz.new.php' ); $this->add_template( 'unit.quiz.single', dirname(__FILE__) . '/templates/unit.quiz.single.php' ); } private function create_tracks_table() { $sql = "CREATE TABLE " . DB::table('tracks') . " ( id int(11) unsigned NOT NULL AUTO_INCREMENT, post_id int(11) unsigned DEFAULT '0', track_permissions int(11) unsigned DEFAULT '0', track_hero varchar(255) DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1;"; return DB::dbdelta( $sql ); } private function create_courses_table() { $sql = "CREATE TABLE " . DB::table('courses') . " ( `id` int(11) unsigned NOT NULL AUTO_INCREMENT, `post_id` int(11) unsigned DEFAULT '0', `parent` int(11) unsigned DEFAULT '0', `locked` int(11) unsigned DEFAULT NULL, `course_permissions` int(11) unsigned DEFAULT '0', `course_category` int(11) unsigned DEFAULT '0', `course_group` int(11) unsigned DEFAULT '0', `course_type` varchar(255) DEFAULT NULL, `course_badge` int(11) unsigned DEFAULT '0', `course_hero` varchar(255) DEFAULT NULL, `course_length` varchar(255) DEFAULT NULL, `course_prereq` int(11) unsigned DEFAULT '0', `course_meeting_dates` varchar(255) DEFAULT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=latin1;"; return DB::dbdelta( $sql ); } private function create_units_table() { $sql = "CREATE TABLE " . DB::table('units') . " ( id int(11) unsigned NOT NULL AUTO_INCREMENT, post_id int(11) unsigned DEFAULT 0, parent int(11) unsigned DEFAULT 0, pinned int(11) unsigned DEFAULT 0, locked int(11) unsigned DEFAULT 0, views int(11) unsigned DEFAULT 0, length VARCHAR(255) NULL, video VARCHAR(255) NULL, audio VARCHAR(255) NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1;"; return DB::dbdelta( $sql ); } private function create_alerts_table() { $sql = "CREATE TABLE " . DB::table('alerts') . " ( id int(11) NOT NULL AUTO_INCREMENT, post_id int(11) NOT NULL, user_id int(11) DEFAULT NULL, message varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL, PRIMARY KEY (`id`), KEY `post` (`post_id`,`user_id`) ) ENGINE=MyISAM AUTO_INCREMENT=11 DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;"; } private function create_quizs_table() { $sql = "CREATE TABLE " . DB::table('quizs') . " ( id int(11) unsigned NOT NULL AUTO_INCREMENT, post_id int(11) unsigned DEFAULT '0', course_id int(11) unsigned DEFAULT '0', unit_id int(11) unsigned DEFAULT '0', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1;"; return DB::dbdelta( $sql ); } private function create_user_quizs_table() { $sql = "CREATE TABLE " . DB::table('user_quizs') . " ( id int(11) unsigned NOT NULL AUTO_INCREMENT, user_id int(11) unsigned DEFAULT '0', quiz_id int(11) unsigned DEFAULT '0', result int(11) unsigned DEFAULT '0', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1;"; return DB::dbdelta( $sql ); } private function create_questions_table() { $sql = "CREATE TABLE " . DB::table('questions') . " ( id int(11) unsigned NOT NULL AUTO_INCREMENT, quiz_id int(11) unsigned DEFAULT '0', question varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL, choices varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL, correct_choice int(11) unsigned DEFAULT '0', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1;"; return DB::dbdelta( $sql ); } private function create_answers_table() { $sql = "CREATE TABLE " . DB::table('answers') . " ( id int(11) unsigned NOT NULL AUTO_INCREMENT, quiz_id int(11) unsigned DEFAULT '0', question_id int(11) unsigned DEFAULT '0', answer varchar(255) COLLATE utf8_unicode_ci DEFAULT NULL, passed int(11) unsigned DEFAULT '0', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1;"; return DB::dbdelta( $sql ); } private function temp_id($prefix = '') { $user = User::identify(); $ids = $user->info->ids; if(!is_array($ids)) { $ids = array(); } if(!isset($ids[$prefix])) { $ids[$prefix] = 0; } $ids[$prefix] = $ids[$prefix] + 1; $user->info->ids = $ids; $user->info->commit(); $id = sprintf('%05d-', $user->id) . $prefix . sprintf('%0' . (7 - strlen($prefix)) . 'd', $ids[$prefix]); return $id; } private function increment_views($topic) { $sample_rate = 1; if( mt_rand(1, $sample_rate) == 1 ) { $views = $topic->views + 1; $topic->views = $views; $topic->update(); } } private function create_dir($path) { if ( !is_dir( $path ) ){ mkdir( $path, 0777 ); } } private function make_safe_filename( $safe_file ) { $safe_file = str_replace( "#", "No.", $safe_file ); $safe_file = str_replace( "$", "Dollar", $safe_file ); $safe_file = str_replace( "%", "Percent", $safe_file ); $safe_file = str_replace( "^", "", $safe_file ); $safe_file = str_replace( "&", "and", $safe_file ); $safe_file = str_replace( "*", "", $safe_file ); $safe_file = str_replace( "?", "", $safe_file ); $safe_file = str_replace( " ", "", $safe_file ); return $safe_file; } private function make_safe( $file ) { // check that this is an image, and not a file. $safe_file = $file['files']['name'][0]; $safe_file = $this->make_safe_filename( $safe_file ); return $safe_file; } private function upload($file, $upload_dir) { $return = new \stdClass(); if( $file != '' ) { $cleaned = $this->make_safe( $file ); $this->create_dir( $upload_dir ); $path = $upload_dir . '/' . $cleaned; $finfo = new \finfo; $fileinfo = $finfo->file($file['files']['tmp_name'][0], FILEINFO_MIME); $mime = explode( ';', $fileinfo ); if( copy($file['files']['tmp_name'][0], $path) ) { $file_name = $file['files']['name'][0]; $file_size = $file['files']['size'][0]; if( $file_size > 999999 ) { $div = $file_size / 1000000; $file_size = round( $div, 1 ) . ' MB'; } else { $div = $file_size / 1000; $file_size = round( $div, 1 ) . ' KB'; } $return->document = $path; } } return $return; } private function create_forum($course) { $forum = Forum::get( array('slug' => 'lms') ); $postdata = array( 'content_type' => Post::type('topic'), 'title' => $course->title, 'slug' => Utils::slugify( 'Forum ' . $course->title ), 'content' => '', 'parent' => $forum->id, 'pinned' => 0, 'locked' => 0, 'user_id' => User::identify()->id, 'status' => Post::status('published'), 'pubdate' => DateTime::date_create( date(DATE_RFC822) ), ); $forum = Topic::create( $postdata ); $course->course_group = $forum->id; $course->update(); } public function filter_user_get($out, $name, $user) { switch($name) { case 'gravatar' : $image = Site::get_url('theme') . '/images/default_avatar.png'; $out = '<img src="https://www.gravatar.com/avatar/' . md5( strtolower( trim( $user->email ) ) ) . '?d=' . urlencode( $image ) . '" alt="personal avatar" class="gravatar">'; break; } return $out; } public function filter_posts_get_paramarray($paramarray) { $queried_types = Posts::extract_param($paramarray, 'content_type'); if($queried_types && in_array(Post::type('track'), $queried_types)) { $paramarray['post_join'][] = '{tracks}'; $default_fields = isset($paramarray['default_fields']) ? $paramarray['default_fields'] : array(); $default_fields['{tracks}.track_permissions'] = ''; $default_fields['{tracks}.track_hero'] = ''; $paramarray['default_fields'] = $default_fields; } if($queried_types && in_array(Post::type('course'), $queried_types)) { $paramarray['post_join'][] = '{courses}'; $default_fields = isset($paramarray['default_fields']) ? $paramarray['default_fields'] : array(); $default_fields['{courses}.course_permissions'] = ''; $default_fields['{courses}.course_category'] = ''; $default_fields['{courses}.course_group'] = ''; $default_fields['{courses}.locked'] = ''; $default_fields['{courses}.course_type'] = ''; $default_fields['{courses}.course_badge'] = ''; $default_fields['{courses}.course_hero'] = ''; $default_fields['{courses}.course_length'] = ''; $default_fields['{courses}.course_prereq'] = ''; $default_fields['{courses}.course_meeting_dates'] = ''; $paramarray['default_fields'] = $default_fields; } if($queried_types && in_array(Post::type('unit'), $queried_types)) { $paramarray['post_join'][] = '{units}'; $default_fields = isset($paramarray['default_fields']) ? $paramarray['default_fields'] : array(); $default_fields['{units}.parent'] = ''; $default_fields['{units}.pinned'] = ''; $default_fields['{units}.locked'] = ''; $default_fields['{units}.views'] = ''; $default_fields['{units}.length'] = ''; $default_fields['{units}.video'] = ''; $paramarray['default_fields'] = $default_fields; } if($queried_types && in_array(Post::type('quiz'), $queried_types)) { $paramarray['post_join'][] = '{quizs}'; $default_fields = isset($paramarray['default_fields']) ? $paramarray['default_fields'] : array(); $default_fields['{quizs}.course_id'] = ''; $default_fields['{quizs}.unit_id'] = ''; $paramarray['default_fields'] = $default_fields; } return $paramarray; } public function filter_post_schema_map_track($schema, $post) { $schema['tracks'] = $schema['*']; $schema['tracks']['post_id'] = '*id'; return $schema; } public function filter_post_schema_map_course($schema, $post) { $schema['courses'] = $schema['*']; $schema['courses']['post_id'] = '*id'; return $schema; } public function filter_post_schema_map_unit($schema, $post) { $schema['units'] = $schema['*']; $schema['units']['post_id'] = '*id'; return $schema; } public function filter_post_schema_map_quiz($schema, $post) { $schema['quizs'] = $schema['*']; $schema['quizs']['post_id'] = '*id'; return $schema; } public function filter_default_rewrite_rules( $rules ) { $this->add_rule('"tracks"', 'display_tracks'); $this->add_rule('"courses"', 'display_courses'); // create a new course, under a specific track. $this->add_rule('"track"/track/"new"/id', 'display_course_new'); $this->add_rule('"track"/track/"edit"/id', 'display_course_edit'); // display a specific course, and its associated information $this->add_rule('"track"/slug/"course"/course/"syllabus"', 'display_course_syllabus'); $this->add_rule('"track"/slug/"course"/course', 'display_course'); // create a specific unit, under a specific course. $this->add_rule('"course"/course/"new"/id', 'display_unit_new'); $this->add_rule('"course"/course/"edit"/id', 'display_unit_edit'); $this->add_rule('"start"/"course"/course', 'start_course'); // display a specific course, and all its associate info. $this->add_rule('"course"/slug/"unit"/unit', 'display_course_unit'); // create a new quiz, under a specific unit. $this->add_rule('"unit"/unit/"quiz"/quiz', 'display_unit_quiz'); $this->add_rule('"unit"/unit/"new"/id', 'display_quiz_new'); $this->add_rule('"unit"/unit/"edit"/id', 'display_quiz_edit'); // authentication crap $this->add_rule('"partner"/"login"', 'lms_login'); return $rules; } public function theme_route_lms_login($theme, $params) { $name = $_POST['username']; $pass = $_POST['password']; if ( ( null != $name ) || ( null != $pass ) ) { $user = User::authenticate( $name, $pass ); if ( ( $user instanceOf User ) && ( $user != false ) ) { // if there's an unused password reset token, unset it to make sure there's no possibility of a compromise that way if ( isset( $user->info->password_reset ) ) { unset( $user->info->password_reset ); } /* Successfully authenticated. */ // Timestamp last login date and time. $user->info->authenticate_time = DateTime::create()->format( 'Y-m-d H:i:s' ); $user->update(); // Remove left over expired session error message. if ( Session::has_errors( 'expired_session' ) ) { Session::remove_error( 'expired_session' ); } $login_session = Session::get_set( 'login' ); if ( ! empty( $login_session ) ) { /* Now that we know we're dealing with the same user, transfer the form data so he does not lose his request */ if ( ! empty( $login_session['post_data'] ) ) { Session::add_to_set( 'last_form_data', $last_form_data['post'], 'post' ); } if ( ! empty( $login_session['get_data'] ) ) { Session::add_to_set( 'last_form_data', $last_form_data['get'], 'get' ); } // don't bother parsing out the URL, we store the URI that was requested, so just append that to the hostname and we're done $login_dest = Site::get_url('host') . $login_session['original']; } else { $login_session = null; $login_dest = Site::get_url( 'admin' ); } // filter the destination $login_dest = Plugins::filter( 'login_redirect_dest', $login_dest, $user, $login_session ); // finally, redirect to the destination Utils::redirect( $login_dest ); return true; } } } public function theme_route_start_course($theme, $params) { $course = Course::get( array('slug' => $params['course']) ); $units = $course->units(); $user = User::identify(); $started = false; $found_complete = array(); $total_units = array(); $course_check = DB::get_column( "SELECT id, started, completed from {progress} WHERE user_id = ? AND post_id = ?", array($user->id, $course->id) ); foreach( $units as $unit ) { $unit_check = DB::get_results( "SELECT id, started, completed, post_id from {progress} WHERE user_id = ? AND post_id = ? AND started != 0 AND completed = 0", array($user->id, $unit->id) ); if( !empty($unit_check) ) { $started = true; $last_unit = $unit_check[0]->post_id; } } if( empty($course_check) && $started == false ) { $unit = $units[0]; $course->start( $user ); $unit->start( $user ); Utils::redirect( URL::get('display_course_unit', array('slug' => $course->slug, 'unit' => $unit->slug)) ); } elseif( $started == true ) { $return = Unit::get( array('id' => $last_unit) ); Utils::redirect( URL::get('display_course_unit', array('slug' => $course->slug, 'unit' => $return->slug)) ); } else { if( !empty($course_check) && $started == false ) { foreach( $units as $unit ) { $complete_check = DB::get_results( "SELECT id, started, completed, post_id from {progress} WHERE user_id = ? AND post_id = ? AND started != 0 AND completed != 0", array($user->id, $unit->id) ); $total_units[] = $unit->id; if( !empty($complete_check)) { $found_complete[] = $complete_check[0]->post_id; } } } $units_left = array_diff($total_units, $found_complete); if( count($units_left) > 1 ) { array_pop( $units_left ); } foreach( $units_left as $key => $value ) { $return = Unit::get( array('id' => $value) ); $return->start( $user ); } Utils::redirect( URL::get('display_course_unit', array('slug' => $course->slug, 'unit' => $return->slug)) ); } } public function theme_route_display_tracks($theme, $params) { $theme->tracks = Tracks::get( array('orderby' => 'id ASC') ); $theme->display( 'track.multiple' ); } public function theme_route_display_courses($theme, $params) { $theme->forums = Courses::get( array('orderby' => 'forum_order ASC') ); $theme->display( 'course.multiple' ); } public function theme_route_display_course($theme, $params) { $theme->track = Track::get( array('slug' => $params['slug']) ); $theme->course = Course::get( array('slug' => $params['course']) ); $theme->prereq = Course::get( array('id' => $theme->course->course_prereq) ); $theme->display( 'course.single' ); } public function theme_route_display_course_syllabus($theme, $params) { $theme->track = Track::get( array('slug' => $params['slug']) ); $theme->course = Course::get( array('slug' => $params['course']) ); $theme->prereq = Course::get( array('id' => $theme->course->course_prereq) ); $theme->syllabi = Units::get( array('parent' => $theme->course->id) ); $theme->display( 'course.single.syllabus' ); } public function theme_route_display_course_unit($theme, $params) { $user = User::identify(); $theme->course = Course::get( array('slug' => $params['slug']) ); $theme->track = Track::get( array('id' => $theme->course->parent) ); $theme->unit = Unit::get( array('slug' => $params['unit']) ); $theme->quiz = Quiz::get( array('unit_id' => $theme->unit->id) ); if( $user->can('admin') || $user->in_group($theme->forum->forum_group) ) { // continue } else { Utils::redirect(Site::get_url('habari')); exit(); } $this->increment_views( $theme->unit ); $theme->display('course.unit.single'); } public function theme_route_display_course_new($theme, $params) { $theme->mode = 1; $theme->track = Track::get( array('slug' => $params['track']) ); $theme->course = Course::get( array('id' => $params['id']) ); $theme->courses = Courses::get( array('not:id' => $theme->course->id, 'parent' => $theme->track->id, 'nolimit' => true) ); $theme->display( 'course.new' ); } public function theme_route_display_course_edit($theme, $params) { $theme->mode = 2; $theme->track = Track::get( array('slug' => $params['track']) ); $theme->course = Course::get( array('id' => $params['id']) ); $theme->courses = Courses::get( array('not:id' => $theme->course->id, 'parent' => $theme->track->id, 'nolimit' => true) ); $theme->display( 'course.new' ); } public function theme_route_display_unit_new($theme, $params) { $theme->mode = 1; $theme->course = Course::get( array('slug' => $params['course']) ); $theme->unit = Unit::get( array('id' => $params['id']) ); $theme->display( 'unit.new' ); } public function theme_route_display_unit_edit($theme, $params) { $theme->mode = 2; $theme->course = Course::get( array('slug' => $params['course']) ); $theme->unit = Unit::get( array('id' => $params['id']) ); $theme->display( 'unit.new' ); } public function theme_route_display_unit_quiz($theme, $params) { $theme->unit = Unit::get( array('unit' => $params['unit']) ); $theme->course = Course::get( array('id' => $theme->unit->parent) ); $theme->track = Track::get( array('id' => $theme->course->parent) ); $theme->quiz = Quiz::get( array('id' => $params['quiz']) ); if( $_GET['retake'] == 1 ) { $theme->quiz->retake( User::identify() ); } $theme->display( 'unit.quiz.single' ); } public function theme_route_display_quiz_new($theme, $params) { $theme->mode = 1; $theme->unit = Unit::get( array('unit' => $params['unit']) ); $theme->course = Course::get( array('id' => $theme->unit->parent) ); $theme->quiz = Quiz::get( array('id' => $params['id']) ); $theme->display( 'quiz.new' ); } public function theme_route_display_quiz_edit($theme, $params) { $theme->mode = 2; $theme->unit = Unit::get( array('unit' => $params['unit']) ); $theme->course = Course::get( array('id' => $theme->unit->parent) ); $theme->quiz = Quiz::get( array('id' => $params['id']) ); $theme->display( 'quiz.new' ); } public function action_auth_ajax_create_course() { $vars = $_GET; $track = Track::get( array('slug' => $vars['track']) ); $title = $this->temp_id(); $postdata = array( 'content_type' => Post::type('course'), 'title' => $title, 'slug' => Utils::slugify($title), 'content' => '', 'parent' => $track->id, 'user_id' => User::identify()->id, 'status' => Post::status('draft'), 'pubdate' => DateTime::date_create( date(DATE_RFC822) ), ); $p = Course::create( $postdata ); $this->create_dir( Site::get_path('user') . '/files/uploads/courses/' . $p->id ); $this->create_forum( $p ); Utils::redirect( URL::get('display_course_new', array( 'track' => $track->slug, 'id' => $p->id)) ); } public function action_auth_ajax_update_course() { $vars = $_POST; $t = Track::get( array('track' => $vars['track']) ); $c = Course::get( array('id' => $vars['id']) ); if( isset($vars['lock']) ) { $lock = 1; } else { $lock = 0; } $c->title = $vars['title']; $c->slug = Utils::slugify( $vars['title'] ); $c->content = $vars['content']; $c->tags = $vars['tags']; $c->locked = $lock; $c->status = Post::status('published'); $c->course_prereq = $vars['prereq'] ? $vars['prereq'] : 0; $c->course_meeting_dates = $vars['meeting_dates'] ? $vars['meeting_dates'] : ''; $c->update(); Utils::redirect( URL::get('display_course', array('slug' => $t->slug, 'course' => $c->slug)) ); } public function action_auth_ajax_create_unit() { $vars = $_GET; $course = Course::get( array('slug' => $vars['course']) ); $title = $this->temp_id(); $postdata = array( 'content_type' => Post::type('unit'), 'title' => $title, 'slug' => Utils::slugify($title), 'content' => '', 'parent' => $course->id, 'user_id' => User::identify()->id, 'status' => Post::status('draft'), 'pubdate' => DateTime::date_create( date(DATE_RFC822) ), ); $u = Unit::create( $postdata ); $this->create_dir( Site::get_path('user') . '/files/uploads/courses/' . $course->id . '/' . $u->id ); Utils::redirect( URL::get('display_unit_new', array( 'course' => $course->slug, 'id' => $u->id)) ); } public function action_auth_ajax_update_unit() { $vars = $_POST; $course = Course::get( array('id' => $vars['course']) ); $unit = Unit::get( array('id' => $vars['id']) ); $this->create_dir( Site::get_path('user') . '/files/uploads/courses/' . $course->id . '/' . $unit->id ); if( isset($vars['lock']) ) { $lock = 1; } else { $lock = 0; } if( $vars['media_link'] ) { $video_l = explode( '?v=', $vars['media_link'] ); if( count($video_l) > 2 ) { $video_l = '//youtube.com/embed/' . $video_l[1]; } else { $video_l = $unit->video; } } $unit->title = $vars['title']; $unit->slug = Utils::slugify( $vars['title'] ); $unit->content = $vars['content']; $unit->tags = $vars['tags']; $unit->locked = $lock; $unit->status = Post::status('published'); $unit->length = $vars['length']; $unit->video = $vars['media_link'] ? $video_l : ''; $unit->update(); Utils::redirect( URL::get('display_unit_edit', array( 'course' => $course->slug, 'id' => $unit->id)) ); } public static function format_tags( $terms, $between = ', ', $between_last = null, $sort_alphabetical = false ) { $array = array(); if ( !$terms instanceof Terms ) { $terms = new Terms( $terms ); } foreach ( $terms as $term ) { $array[$term->term] = $term->term_display; } if ( $sort_alphabetical ) { ksort( $array ); } if ( $between_last === null ) { $between_last = _t( ' and ' ); } $fn = function($a, $b) { return $a; }; $array = array_map( $fn, $array, array_keys( $array ) ); $last = array_pop( $array ); $out = implode( $between, $array ); $out .= ( $out == '' ) ? $last : $between_last . $last; return $out; } public function action_comment_insert_before($comment) { $comment->status = Comment::status('approved'); return $comment; } public function action_auth_ajax_add_lms_comment() { $vars = $_POST; $user = User::identify(); $return = array(); $name = $user->displayname; $email = $user->email; $url = Site::get_url('habari'); $unit = Unit::get( array('id' => $vars['id']) ); $content = $vars['response']; $comment = new Comment( array( 'post_id' => $vars['id'], 'name' => $name, 'email' => $email, 'url' => $url, 'ip' => sprintf( "%u", ip2long( Utils::get_ip() ) ), 'content' => $content, 'status' => Comment::status('approved'), 'date' => DateTime::date_create(), 'type' => Comment::type('comment'), ) ); try { $comment->insert(); $status = 200; $data = array(); $message = 'Response Added.'; } catch( Exception $e ) { $status = 401; $data = array(); $message = 'Response could not be added.'; } $ar = new AjaxResponse( $status, $data, $message ); $ar->out(); } public function action_auth_ajax_lms_upload() { $file = $_FILES; $message = ''; $data = array(); $course = Course::get( array('slug' => $_POST['name']) ); $unit = Unit::get( array('id' => $_POST['id']) ); $dir = Site::get_path('user') . '/files/uploads/courses/' . $course->id . '/' . $unit->id; foreach( $_FILES['files']['name'] as $index => $value ) { $file = $this->upload( $file, $dir, $value ); if( $file->document ) { $status = 200; } else { $status = 401; } $ar = new AjaxResponse( $status, $message, $data ); $ar->out(); } } public function action_auth_ajax_add_quiz() { $vars = $_GET; $title = $this->temp_id(); $unit = Unit::get( array('id' => $vars['unit']) ); $postdata = array( 'content_type' => Post::type('quiz'), 'title' => $title, 'slug' => Utils::slugify( $title ), 'content' => '', 'course_id' => $vars['course'], 'unit_id' => $vars['unit'], 'user_id' => User::identify()->id, 'status' => Post::status('draft'), 'pubdate' => DateTime::date_create( date(DATE_RFC822) ), ); $q = Quiz::create( $postdata ); $this->create_dir( Site::get_path('user') . '/files/uploads/courses/' . $vars['course'] . '/' . $vars['unit'] . '/' . $q->id ); Utils::redirect( URL::get('display_quiz_new', array( 'unit' => $unit->slug, 'id' => $q->id)) ); } public function action_auth_ajax_update_quiz() { $vars = $_POST; $unit = Unit::get( array('id' => $vars['unit']) ); $quiz = Quiz::get( array('id' => $vars['id']) ); foreach( $vars['question_text'] as $key => $value ) { if( $vars['question_id'][$key] != '' ) { $check = DB::query( "SELECT id FROM {questions} WHERE id = ?", array($vars['question_id'][$key]) ); } else { $check = false; } $postdata = array( 'quiz_id' => $quiz->id, 'question' => $value, 'choices' => $vars['question_answers'][$key], 'correct_choice' => $vars['question_answer'][$key], ); if( $check ) { $insert = DB::update( DB::table('questions'), $postdata, array('id' => $vars['question_id'][$key]) ); } else { $insert = DB::insert( DB::table('questions'), $postdata ); } } $quiz->content = $vars['content']; $quiz->status = Post::status('published'); $quiz->update(); Utils::redirect( URL::get('display_quiz_edit', array( 'unit' => $unit->slug, 'id' => $quiz->id)) ); } public function action_auth_ajax_check_quiz() { $vars = $_POST; $passed = 0; $failed = 0; $pass = 0; $user = User::identify(); $quiz = Quiz::get( array('id' => $vars['quiz_id']) ); $unit = Unit::get( array('id' => $quiz->unit_id) ); $total = count( $quiz->questions() ); foreach( $vars['question'] as $key => $value ) { $given = $value; $correct = DB::get_results( "SELECT id, correct_choice FROM {questions} WHERE id = ?", array( $key ) ); $data = array( 'user_id' => $user->id, 'quiz_id' => $quiz->id, 'question_id' => $correct[0]->id, 'answer' => $value ); if( $given == $correct[0]->correct_choice ) { $data['passed'] = 1; $passed++; } else { $data['passed'] = 0; $failed++; } $processed = DB::insert( DB::table('answers'), $data ); } $count1 = $passed / $total; $count2 = $count1 * 100; $percentage = number_format($count2, 0); if( $percentage >= 75 ) { $pass = 1; $unit->complete( $user ); } $postdata = array( 'quiz_id' => $quiz->id, 'user_id' => $user->id, 'result' => $pass ); DB::insert( DB::table('user_quizs'), $postdata ); Utils::redirect( URL::get('display_unit_quiz', array('unit' => $unit->slug, 'quiz' => $quiz->id)) ); exit(); } public function action_auth_ajax_mark_unit_complete() { $vars = $_GET; $user = User::identify(); $unit = Unit::get( array('id' => $vars['unit']) ); $course = Course::get( array('id' => $unit->parent) ); $track = Track::get( array('id' => $course->parent) ); $unit->complete( $user ); $course->check_for_completeness( $user ); if( $unit->ascend() ) { Utils::redirect( $course->course_start_url( $user ) ); } else { Utils::redirect( URL::get('display_tracks') ); } exit(); } } ?>
chrisjdavis/habari-lms
user/plugins/LMS/lms.plugin.php
PHP
apache-2.0
32,049
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/compiler/xla/service/hlo_cost_analysis.h" #include <cmath> #include "absl/algorithm/container.h" #include "absl/memory/memory.h" #include "tensorflow/compiler/xla/service/hlo_computation.h" #include "tensorflow/compiler/xla/service/hlo_instruction.h" #include "tensorflow/compiler/xla/service/hlo_opcode.h" #include "tensorflow/compiler/xla/shape_util.h" #include "tensorflow/compiler/xla/status_macros.h" #include "tensorflow/compiler/xla/util.h" #include "tensorflow/compiler/xla/window_util.h" #include "tensorflow/core/lib/core/bits.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/map_util.h" namespace xla { constexpr const char HloCostAnalysis::kFlopsKey[]; constexpr const char HloCostAnalysis::kTranscendentalsKey[]; constexpr const char HloCostAnalysis::kBytesAccessedKey[]; constexpr const char HloCostAnalysis::kOptimalSecondsKey[]; HloCostAnalysis::HloCostAnalysis(const ShapeSizeFunction& shape_size) : HloCostAnalysis(shape_size, {}) {} HloCostAnalysis::HloCostAnalysis(const ShapeSizeFunction& shape_size, const Properties& per_second_rates) : shape_size_(shape_size), per_second_rates_(per_second_rates) {} Status HloCostAnalysis::Preprocess(const HloInstruction* hlo) { // Set current instruction cost values to reasonable default values. Each // handler can overwrite these values. In Postprocess, these values are // accumulated and written to the per-instruction maps. current_properties_.clear(); current_should_compute_bottleneck_time_ = true; // The default number of bytes accessed for an instruction is the sum of the // sizes of the inputs and outputs. The default ShapeUtil::ByteSizeOf does not // handle opaque types. float bytes_accessed = GetShapeSize(hlo->shape()); SetOutputBytesAccessed(GetShapeSize(hlo->shape())); for (int64 i = 0; i < hlo->operand_count(); ++i) { const HloInstruction* operand = hlo->operand(i); bytes_accessed += GetShapeSize(operand->shape()); SetOperandBytesAccessed(i, GetShapeSize(operand->shape())); } current_properties_[kBytesAccessedKey] = bytes_accessed; return Status::OK(); } Status HloCostAnalysis::Postprocess(const HloInstruction* hlo) { if (current_should_compute_bottleneck_time_) { // Compute the time as the time of the bottleneck, i.e. the slowest property // given the per-second rate of each property. float optimal_seconds = 0.0f; for (const auto& property : current_properties_) { if (property.first != kOptimalSecondsKey) { optimal_seconds = std::max( optimal_seconds, property.second / GetProperty(property.first, per_second_rates_, INFINITY)); } } current_properties_[kOptimalSecondsKey] = optimal_seconds; } TF_RET_CHECK(hlo_properties_.emplace(hlo, current_properties_).second); for (const auto& property : current_properties_) { properties_sum_[property.first] += property.second; } return Status::OK(); } Status HloCostAnalysis::HandleElementwiseOp( const HloInstruction* hlo_instruction) { const auto& shape = hlo_instruction->shape(); // For element-wise operations, the number of computations is the same as the // number of elements in the output shape. auto computation_count = ShapeUtil::ElementsIn(shape); auto opcode = hlo_instruction->opcode(); // We treat transcendental operations separately since one transcendental // operation can correspond to several floating point ops. if (opcode == HloOpcode::kExp || opcode == HloOpcode::kLog || opcode == HloOpcode::kPower || opcode == HloOpcode::kSqrt || opcode == HloOpcode::kRsqrt || opcode == HloOpcode::kTanh || opcode == HloOpcode::kSin || opcode == HloOpcode::kCos || opcode == HloOpcode::kExpm1 || opcode == HloOpcode::kLog1p || opcode == HloOpcode::kAtan2) { current_properties_[kTranscendentalsKey] = computation_count; } else { // Note: transcendental operations are considered a separate category from // FLOPs. current_properties_[kFlopsKey] = computation_count; } return Status::OK(); } /*static*/ float HloCostAnalysis::GetProperty(const string& key, const Properties& properties, const float default_value) { auto key_value = properties.find(key); return key_value == properties.end() ? default_value : key_value->second; } /*static*/ float HloCostAnalysis::GetPropertyForHlo( const HloInstruction& hlo, const string& key, const HloToProperties& hlo_to_properties) { auto it = hlo_to_properties.find(&hlo); if (it == hlo_to_properties.end()) { return 0.0f; } else { return GetProperty(key, it->second); } } int64 HloCostAnalysis::GetShapeSize(const Shape& shape) const { if (!LayoutUtil::HasLayout(shape)) { return 0; } return shape_size_(shape); } int64 HloCostAnalysis::FusionParameterReadBytes( const HloInstruction* hlo) const { int64 size = 0; bool seen_trivial_user = false; CHECK(hlo->IsFused() && hlo->opcode() == HloOpcode::kParameter); for (const HloInstruction* user : hlo->users()) { switch (user->opcode()) { case HloOpcode::kFusion: { for (int64 idx : user->OperandIndices(hlo)) { size += FusionParameterReadBytes(user->fused_parameter(idx)); } break; } case HloOpcode::kSlice: size += GetShapeSize(user->shape()); break; case HloOpcode::kDynamicSlice: size += hlo == user->operand(0) ? GetShapeSize(user->shape()) : GetShapeSize(hlo->shape()); break; case HloOpcode::kDynamicUpdateSlice: // Uses the same shape as 'update' which is operand 1. size += hlo == user->operand(0) ? GetShapeSize(user->operand(1)->shape()) : GetShapeSize(hlo->shape()); break; case HloOpcode::kBroadcast: case HloOpcode::kReshape: size += GetShapeSize(hlo->shape()); break; default: // Other instructions reading this parameter are assumed to be able to // share the read from memory. if (!seen_trivial_user) { seen_trivial_user = true; size += GetShapeSize(hlo->shape()); } } } return size; } Status HloCostAnalysis::HandleElementwiseUnary(const HloInstruction* hlo) { return HandleElementwiseOp(hlo); } Status HloCostAnalysis::HandleElementwiseBinary(const HloInstruction* hlo) { return HandleElementwiseOp(hlo); } Status HloCostAnalysis::HandleCompare(const HloInstruction* compare) { return HandleElementwiseOp(compare); } Status HloCostAnalysis::HandleClamp(const HloInstruction* clamp) { return HandleElementwiseOp(clamp); } Status HloCostAnalysis::HandleReducePrecision(const HloInstruction* hlo) { return HandleElementwiseOp(hlo); } Status HloCostAnalysis::HandleParameter(const HloInstruction*) { current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; SetOutputBytesAccessed(0); current_properties_[kOptimalSecondsKey] = 0; return Status::OK(); } Status HloCostAnalysis::HandleConstant(const HloInstruction*) { current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; SetOutputBytesAccessed(0); current_properties_[kOptimalSecondsKey] = 0; return Status::OK(); } Status HloCostAnalysis::HandleIota(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleGetTupleElement( const HloInstruction* get_tuple_element) { // GetTupleElement forwards a pointer and does not touch each element in the // output. current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; SetOutputBytesAccessed(0); SetOperandBytesAccessed(0, 0); current_properties_[kOptimalSecondsKey] = 0; return Status::OK(); } Status HloCostAnalysis::HandleSelect(const HloInstruction* hlo) { return HandleElementwiseOp(hlo); } Status HloCostAnalysis::HandleTupleSelect(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleReverse(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleSlice(const HloInstruction* slice) { current_properties_[kBytesAccessedKey] = GetShapeSize(slice->shape()) * 2; SetOutputBytesAccessed(GetShapeSize(slice->shape())); SetOperandBytesAccessed(0, GetShapeSize(slice->shape())); return Status::OK(); } Status HloCostAnalysis::HandleDynamicSlice( const HloInstruction* dynamic_slice) { current_properties_[kBytesAccessedKey] = GetShapeSize(dynamic_slice->shape()) * 2 + GetShapeSize(dynamic_slice->operand(1)->shape()); SetOutputBytesAccessed(GetShapeSize(dynamic_slice->shape())); SetOperandBytesAccessed(0, GetShapeSize(dynamic_slice->shape())); SetOperandBytesAccessed(1, GetShapeSize(dynamic_slice->operand(1)->shape())); return Status::OK(); } Status HloCostAnalysis::HandleDynamicUpdateSlice( const HloInstruction* dynamic_update_slice) { current_properties_[kBytesAccessedKey] = GetShapeSize(dynamic_update_slice->operand(1)->shape()) * 2 + GetShapeSize(dynamic_update_slice->operand(2)->shape()); // Operand 0 aliases with the output. SetOutputBytesAccessed( GetShapeSize(dynamic_update_slice->operand(1)->shape())); SetOperandBytesAccessed(0, 0); SetOperandBytesAccessed( 1, GetShapeSize(dynamic_update_slice->operand(1)->shape())); SetOperandBytesAccessed( 2, GetShapeSize(dynamic_update_slice->operand(2)->shape())); return Status::OK(); } Status HloCostAnalysis::HandleTuple(const HloInstruction* tuple) { // The tuple instruction only gathers pointers from inputs (it doesn't iterate // through them). The memory touched is then only the size of the output // index table of the tuple. current_properties_[kBytesAccessedKey] = GetShapeSize(tuple->shape()); SetOutputBytesAccessed(GetShapeSize(tuple->shape())); for (int i = 0; i < tuple->operand_count(); ++i) { SetOperandBytesAccessed(i, 0); } return Status::OK(); } Status HloCostAnalysis::HandleConcatenate(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleConvert(const HloInstruction* convert) { return HandleElementwiseOp(convert); } Status HloCostAnalysis::HandleCopy(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleDomain(const HloInstruction* domain) { // Domain does not have any computation or data transfer. current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; SetOutputBytesAccessed(0); for (int i = 0; i < domain->operand_count(); ++i) { SetOperandBytesAccessed(i, 0); } current_properties_[kOptimalSecondsKey] = 0; return Status::OK(); } Status HloCostAnalysis::HandleDot(const HloInstruction* dot) { const Shape& lhs_shape = dot->operand(0)->shape(); const Shape& dot_shape = dot->shape(); const DotDimensionNumbers& dnums = dot->dot_dimension_numbers(); // Count of elements along the reduction dimension (last dimension for the // rhs). int64 reduction_width = 1; for (auto dim : dnums.lhs_contracting_dimensions()) { reduction_width *= lhs_shape.dimensions(dim); } // Each output element requires reduction_width FMA operations. current_properties_[kFlopsKey] = kFmaFlops * ShapeUtil::ElementsIn(dot_shape) * reduction_width; return Status::OK(); } Status HloCostAnalysis::HandleInfeed(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleOutfeed(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleMap(const HloInstruction* map) { // Compute properties of the mapped function. TF_ASSIGN_OR_RETURN(const Properties sub_properties, ProcessSubcomputation(map->to_apply())); // Compute the cost of all elements for this Map operation. const int64 element_count = ShapeUtil::ElementsIn(map->shape()); for (const auto& property : sub_properties) { if (!absl::StartsWith(property.first, kBytesAccessedKey)) { current_properties_[property.first] = property.second * element_count; } } return Status::OK(); } Status HloCostAnalysis::HandleReduce(const HloInstruction* reduce) { HloComputation* function = reduce->to_apply(); // Compute the cost of the user function. TF_ASSIGN_OR_RETURN(const Properties sub_properties, ProcessSubcomputation(function)); // Compute the cost of all elements for this Reduce operation. // This counts the number of times the reduction function is applied, so it // does not need to be multiplied by the number of input tensors - that's // already "priced in" by the sub-computation doing more work. auto arg = reduce->operand(0); auto output_shape = reduce->shape().IsArray() ? reduce->shape() : reduce->shape().tuple_shapes(0); int64 reduction_count = ShapeUtil::ElementsIn(arg->shape()) - ShapeUtil::ElementsIn(output_shape); for (const auto& property : sub_properties) { if (!absl::StartsWith(property.first, kBytesAccessedKey)) { current_properties_[property.first] = property.second * reduction_count; } } return Status::OK(); } Status HloCostAnalysis::HandleReduceWindow( const HloInstruction* reduce_window) { const Window& window = reduce_window->window(); auto function = reduce_window->to_apply(); // Compute the properties of the reduction function. TF_ASSIGN_OR_RETURN(const Properties sub_properties, ProcessSubcomputation(function)); // Compute the cost of all elements for this ReduceWindow operation. For each // output element there are window_size - 1 reductions to perform. int64 window_element_count = 1; for (const auto& dimension : window.dimensions()) { window_element_count *= dimension.size(); } const int64 output_element_count = ShapeUtil::ElementsIn(reduce_window->shape()); const int64 reduction_count = (window_element_count - 1) * output_element_count; for (const auto& property : sub_properties) { if (!absl::StartsWith(property.first, kBytesAccessedKey)) { current_properties_[property.first] = property.second * reduction_count; } } return Status::OK(); } Status HloCostAnalysis::HandleSelectAndScatter( const HloInstruction* instruction) { // Compute the properties of the select and scatter function. // Compute the properties of the reduction function. TF_ASSIGN_OR_RETURN(const Properties select_properties, ProcessSubcomputation(instruction->select())); TF_ASSIGN_OR_RETURN(const Properties scatter_properties, ProcessSubcomputation(instruction->scatter())); // Compute the cost of all elements for this operation. For each scatter // source element there are window_size - 1 select computations to perform and // 1 scatter computation to perform. const auto source = instruction->operand(1); const auto source_element_count = ShapeUtil::ElementsIn(source->shape()); int64 window_element_count = 1; for (const auto& dimension : instruction->window().dimensions()) { window_element_count *= dimension.size(); } const int64 select_count = source_element_count * (window_element_count - 1); for (const auto& property : select_properties) { if (!absl::StartsWith(property.first, kBytesAccessedKey)) { current_properties_[property.first] += property.second * select_count; } } for (const auto& property : scatter_properties) { if (!absl::StartsWith(property.first, kBytesAccessedKey)) { current_properties_[property.first] += property.second * source_element_count; } } return Status::OK(); } Status HloCostAnalysis::HandleBitcast(const HloInstruction*) { // A bitcast does no computation and touches no memory. current_properties_[kBytesAccessedKey] = 0; SetOutputBytesAccessed(0); SetOperandBytesAccessed(0, 0); current_properties_[kOptimalSecondsKey] = 0; return Status::OK(); } Status HloCostAnalysis::HandleBroadcast(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandlePad(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleCopyStart(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleCopyDone(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleSend(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleSendDone(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleRecv(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleRecvDone(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleReshape(const HloInstruction*) { return Status::OK(); } Status HloCostAnalysis::HandleBatchNormTraining(const HloInstruction*) { // TODO(b/62294698): Implement cost analysis for batch-norm-training. return Status::OK(); } Status HloCostAnalysis::HandleBatchNormInference(const HloInstruction*) { // TODO(b/62294698): Implement cost analysis for batch-norm-inference. return Status::OK(); } Status HloCostAnalysis::HandleBatchNormGrad(const HloInstruction*) { // TODO(b/62294698): Implement cost analysis for batch-norm-grad. return Status::OK(); } Status HloCostAnalysis::HandleTranspose(const HloInstruction* transpose) { if (transpose->IsEffectiveBitcast()) { return HandleBitcast(transpose); } return Status::OK(); } Status HloCostAnalysis::HandleAfterAll(const HloInstruction* token) { // This instruction is used to enforce ordering at compile time. No code is // emitted. current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; SetOutputBytesAccessed(0); for (int i = 0; i < token->operand_count(); ++i) { SetOperandBytesAccessed(i, 0); } current_properties_[kOptimalSecondsKey] = 0; return Status::OK(); } Status HloCostAnalysis::HandleAddDependency( const HloInstruction* add_dependency) { // This instruction is used to enforce ordering at compile time. No code is // emitted. current_should_compute_bottleneck_time_ = false; current_properties_[kBytesAccessedKey] = 0; SetOutputBytesAccessed(0); for (int i = 0; i < add_dependency->operand_count(); ++i) { SetOperandBytesAccessed(i, 0); } current_properties_[kOptimalSecondsKey] = 0; return Status::OK(); } Status HloCostAnalysis::HandleConvolution(const HloInstruction* convolution) { auto lhs = convolution->operand(0); auto rhs = convolution->operand(1); Window window = convolution->window(); const auto& result_shape = convolution->shape(); const Shape& lhs_shape = lhs->shape(); const Shape& rhs_shape = rhs->shape(); const auto& dnums = convolution->convolution_dimension_numbers(); const int64 input_batch_dim = dnums.input_batch_dimension(); const int64 input_feature_dim = dnums.input_feature_dimension(); const int64 output_feature_dim = dnums.output_feature_dimension(); const int64 input_feature = ShapeUtil::GetDimension(lhs_shape, input_feature_dim); const int64 output_feature = ShapeUtil::GetDimension(result_shape, output_feature_dim); const int64 batch = ShapeUtil::GetDimension(lhs_shape, input_batch_dim); DimensionVector kernel_limits; DimensionVector output_limits; DimensionVector input_limits; if (window.dimensions().empty()) { window = window_util::MakeWindow({1}); kernel_limits.push_back(1); output_limits.push_back(1); input_limits.push_back(1); } else { for (int64 spatial_dimension = 0; spatial_dimension < window.dimensions_size(); ++spatial_dimension) { // Spatial dimension number for kernel (rhs). const int64 kernel_spatial_dim = dnums.kernel_spatial_dimensions(spatial_dimension); const int64 kernel_limit = rhs_shape.dimensions(kernel_spatial_dim); kernel_limits.push_back(kernel_limit); // Spatial dimension number for output. const int64 output_spatial_dim = dnums.output_spatial_dimensions(spatial_dimension); const int64 output_limit = result_shape.dimensions(output_spatial_dim); output_limits.push_back(output_limit); // Spatial dimension number for input (lhs). const int64 input_spatial_dim = dnums.input_spatial_dimensions(spatial_dimension); const int64 input_limit = lhs_shape.dimensions(input_spatial_dim); input_limits.push_back(input_limit); } } DimensionVector valid_position_counts; // Loop over each spatial dimension. for (int64 spatial_dimension = 0; spatial_dimension < window.dimensions_size(); ++spatial_dimension) { const auto& window_dim = window.dimensions(spatial_dimension); // These two conditions will create an N^2 iteration pattern with only N // valid elements. This is a performance optimization and produces the same // result as the whole loop. if (input_limits[spatial_dimension] == output_limits[spatial_dimension] && kernel_limits[spatial_dimension] == output_limits[spatial_dimension] && input_limits[spatial_dimension] == window_dim.base_dilation() && window_dim.window_dilation() == 1 && std::max<int64>(1, input_limits[spatial_dimension] - 1) == window_dim.stride() && window_dim.padding_low() == 0 && window_dim.padding_high() == 0) { valid_position_counts.push_back(input_limits[spatial_dimension]); continue; } if (input_limits[spatial_dimension] == 1 && kernel_limits[spatial_dimension] == output_limits[spatial_dimension] && window_dim.window_dilation() == 1 && window_dim.base_dilation() == 1 && window_dim.stride() == 1 && window_dim.padding_high() == output_limits[spatial_dimension] - 1 && window_dim.padding_low() == output_limits[spatial_dimension] - 1) { valid_position_counts.push_back(output_limits[spatial_dimension]); continue; } int64 valid_position_count = 0; // Loop over each point in the kernel. for (int64 kernel_idx = 0; kernel_idx < kernel_limits[spatial_dimension]; ++kernel_idx) { // Loop over each point in the output. for (int64 output_idx = 0; output_idx < output_limits[spatial_dimension]; ++output_idx) { // Calculate lhs (input) index without taking base dilation into // account. const int64 undilated_index = output_idx * window_dim.stride() - window_dim.padding_low() + kernel_idx * window_dim.window_dilation(); // Calculate the actual lhs (input) index after dilation. Avoid the // division as an optimization. const int64 lhs_spatial_index = window_dim.base_dilation() > 1 ? undilated_index / window_dim.base_dilation() : undilated_index; // Skip if the lhs (input) index is to be dilated. if (undilated_index != lhs_spatial_index * window_dim.base_dilation()) { continue; } // Skip if input index is not in bound. if (lhs_spatial_index < 0 || lhs_spatial_index >= input_limits[spatial_dimension]) { continue; } valid_position_count += 1; } } valid_position_counts.push_back(valid_position_count); } const int64 fma_count = (input_feature / convolution->feature_group_count()) * output_feature * (batch / convolution->batch_group_count()) * Product(valid_position_counts); current_properties_[kFlopsKey] = fma_count * kFmaFlops; return Status::OK(); } Status HloCostAnalysis::HandleFft(const HloInstruction* fft) { auto real_shape = fft->operand(0)->shape().IsTuple() ? ShapeUtil::GetTupleElementShape(fft->operand(0)->shape(), 0) : fft->operand(0)->shape(); constexpr int kFmaPerComplexMul = 4; int64 log_factors = 1; for (int64 dim : fft->fft_length()) { log_factors *= tensorflow::Log2Floor(dim); } current_properties_[kFlopsKey] = kFmaFlops * kFmaPerComplexMul * log_factors * ShapeUtil::ElementsIn(real_shape); return Status::OK(); } Status HloCostAnalysis::HandleTriangularSolve(const HloInstruction* hlo) { // Half of operand 0 is read. float bytes_accessed = GetShapeSize(hlo->shape()); SetOutputBytesAccessed(GetShapeSize(hlo->shape())); bytes_accessed += GetShapeSize(hlo->operand(0)->shape()) / 2.0f; SetOperandBytesAccessed(0, GetShapeSize(hlo->operand(0)->shape()) / 2.0f); bytes_accessed += GetShapeSize(hlo->operand(1)->shape()); SetOperandBytesAccessed(0, GetShapeSize(hlo->operand(1)->shape())); current_properties_[kBytesAccessedKey] = bytes_accessed; const Shape& a_shape = hlo->operand(0)->shape(); const Shape& b_shape = hlo->operand(1)->shape(); // Estimate as batch * mn^2 / 2 flops. int64 elems = a_shape.dimensions(a_shape.dimensions_size() - 1); elems *= ShapeUtil::ElementsIn(b_shape); current_properties_[kFlopsKey] = kFmaFlops * elems; return Status::OK(); } Status HloCostAnalysis::HandleCholesky(const HloInstruction* hlo) { // Half of operand 0 is read and half of the output will be written. float bytes_accessed = GetShapeSize(hlo->operand(0)->shape()) / 2.0f; SetOutputBytesAccessed(GetShapeSize(hlo->operand(0)->shape()) / 2.0f); bytes_accessed += GetShapeSize(hlo->operand(0)->shape()) / 2.0f; SetOperandBytesAccessed(0, GetShapeSize(hlo->operand(0)->shape()) / 2.0f); current_properties_[kBytesAccessedKey] = bytes_accessed; const Shape& a_shape = hlo->operand(0)->shape(); // Estimate as batch * n^3 / 3 flops. int64 elems = a_shape.dimensions(a_shape.dimensions_size() - 1); elems *= ShapeUtil::ElementsIn(a_shape); current_properties_[kFlopsKey] = elems / 3; return Status::OK(); } Status HloCostAnalysis::HandleAllReduce(const HloInstruction* crs) { // We assume 2 replicas, so that each output element is the sum of two input // elements. // // TODO(b/33004697): Compute correct cost here, taking the actual number of // replicas into account. double flops = 0.0; ShapeUtil::ForEachSubshape(crs->shape(), [&](const Shape& subshape, const ShapeIndex&) { if (subshape.IsArray()) { flops += ShapeUtil::ElementsIn(subshape); } }); current_properties_[kFlopsKey] = flops; return Status::OK(); } Status HloCostAnalysis::HandleAllToAll(const HloInstruction* hlo) { return Status::OK(); } Status HloCostAnalysis::HandleCollectivePermute(const HloInstruction* /*hlo*/) { return Status::OK(); } Status HloCostAnalysis::HandlePartitionId(const HloInstruction* /*hlo*/) { return Status::OK(); } Status HloCostAnalysis::HandleReplicaId(const HloInstruction* /*hlo*/) { return Status::OK(); } Status HloCostAnalysis::HandleRng(const HloInstruction* random) { // TODO(b/26346211): Implement better estimates for the RNG cost, since the // cost changes with the implementation and the distribution. For now, assume // the cost of each RNG is same as a transcendental operation. current_properties_[kTranscendentalsKey] = ShapeUtil::ElementsIn(random->shape()); return Status::OK(); } Status HloCostAnalysis::HandleRngBitGenerator(const HloInstruction* random) { // TODO(b/26346211): Implement better estimates for the RNG cost, since the // cost changes with the implementation and the distribution. For now, assume // the cost of each RNG is same as a transcendental operation. current_properties_[kTranscendentalsKey] = ShapeUtil::ElementsInRecursive(random->shape()); return Status::OK(); } Status HloCostAnalysis::HandleRngGetAndUpdateState( const HloInstruction* random) { return Status::OK(); } Status HloCostAnalysis::HandleFusion(const HloInstruction* fusion) { if (fusion->IsCustomFusion()) { for (const HloInstruction* hlo : fusion->fused_instructions_computation()->instructions()) { if (hlo->opcode() == HloOpcode::kGather) { return HandleGather(hlo); } if (hlo->opcode() == HloOpcode::kScatter) { return HandleScatter(hlo); } } } TF_ASSIGN_OR_RETURN( current_properties_, ProcessSubcomputation(fusion->fused_instructions_computation())); // Fusion nodes that produce a tuple also produce the entries in the tuple. // Ignore the memory accessed inside fused ops, since fusion is supposed to // prevent intermediate data from touching slow memory. current_properties_[kBytesAccessedKey] = 0; ShapeUtil::ForEachSubshape( fusion->shape(), [this, fusion](const Shape& subshape, const ShapeIndex& shape_index) { if (!subshape.IsArray()) { return; } if (shape_index.empty()) { if (fusion->fused_expression_root()->opcode() == HloOpcode::kDynamicUpdateSlice) { int64 size = GetShapeSize( fusion->fused_expression_root()->operand(1)->shape()); current_properties_[kBytesAccessedKey] += size; SetOutputBytesAccessed(shape_index, size); return; } } else if (shape_index.size() == 1) { if (fusion->fused_expression_root()->opcode() == HloOpcode::kTuple && fusion->fused_expression_root() ->operand(shape_index[0]) ->opcode() == HloOpcode::kDynamicUpdateSlice) { int64 size = GetShapeSize(fusion->fused_expression_root() ->operand(shape_index[0]) ->operand(1) ->shape()); current_properties_[kBytesAccessedKey] += size; SetOutputBytesAccessed(shape_index, size); return; } } current_properties_[kBytesAccessedKey] += GetShapeSize(subshape); SetOutputBytesAccessed(shape_index, GetShapeSize(subshape)); }); if (fusion->shape().IsTuple()) { // Propagate and accumulate the output tuple bytes from the tuple subshapes. // This ensures we have the correct output bytes accessed for the shape // index // {}. std::function<float(const Shape&, const ShapeIndex&)> propagate_output_size_to_parent; propagate_output_size_to_parent = [&](const Shape& shape, const ShapeIndex& shape_index) { auto output_bytes_it = current_properties_.find(GetOutputBytesAccessedKey(shape_index)); if (output_bytes_it != current_properties_.end()) { return output_bytes_it->second; } float bytes_accessed = 0; for (int i = 0; i < shape.tuple_shapes_size(); ++i) { const Shape& subshape = shape.tuple_shapes(i); ShapeIndex subshape_index(shape_index); subshape_index.push_back(i); bytes_accessed += propagate_output_size_to_parent(subshape, subshape_index); } SetOutputBytesAccessed(shape_index, bytes_accessed); return bytes_accessed; }; current_properties_.erase( current_properties_.find(GetOutputBytesAccessedKey())); propagate_output_size_to_parent(fusion->shape(), {}); } for (int64 i = 0; i < fusion->fused_parameters().size(); ++i) { const HloInstruction* operand = fusion->fused_parameter(i); int64 size = FusionParameterReadBytes(operand); current_properties_[kBytesAccessedKey] += size; SetOperandBytesAccessed(i, size); } return Status::OK(); } Status HloCostAnalysis::HandleCall(const HloInstruction* call) { TF_ASSIGN_OR_RETURN(current_properties_, ProcessSubcomputation(call->to_apply())); current_should_compute_bottleneck_time_ = false; return Status::OK(); } Status HloCostAnalysis::HandleCustomCall(const HloInstruction* custom_call) { // Mark applicable fields as "unknown", since we don't know what CustomCall // does. This is better than returning an error, which would stop iteration, // and therefore would prevent us from getting *any* stats for a computation // which contains a CustomCall. current_properties_[kOptimalSecondsKey] = -1; current_properties_[kBytesAccessedKey] = -1; SetOutputBytesAccessed(-1); for (int i = 0; i < custom_call->operand_count(); ++i) { SetOperandBytesAccessed(i, -1); } current_properties_[kFlopsKey] = -1; current_should_compute_bottleneck_time_ = false; return Status::OK(); } Status HloCostAnalysis::HandleSort(const HloInstruction* sort) { // This assumes a comparison based N*log(N) algorithm. As for all ops, the // actual properties of the op depend on the backend implementation. int64 elements = ShapeUtil::ElementsIn(sort->operand(0)->shape()); current_properties_[kFlopsKey] = elements * tensorflow::Log2Ceiling(elements); return Status::OK(); } Status HloCostAnalysis::HandleWhile(const HloInstruction* xla_while) { // Since the number of iterations of the while node will not always be // something that we can statically analyze, we cannot precisely compute the // cost of a while node. For now compute the cost of a single iteration. TF_ASSIGN_OR_RETURN(const Properties body_properties, ProcessSubcomputation(xla_while->while_body())); TF_ASSIGN_OR_RETURN(const Properties condition_properties, ProcessSubcomputation(xla_while->while_condition())); current_properties_.clear(); for (const auto& property : body_properties) { current_properties_[property.first] += property.second; } for (const auto& property : condition_properties) { current_properties_[property.first] += property.second; } current_should_compute_bottleneck_time_ = false; return Status::OK(); } Status HloCostAnalysis::HandleConditional(const HloInstruction* conditional) { // Compute the cost of the branch computations and take the maximum from those // for each property. TF_ASSIGN_OR_RETURN( const Properties branch0_computation_properties, ProcessSubcomputation(conditional->branch_computation(0))); current_properties_ = branch0_computation_properties; for (int j = 1; j < conditional->branch_count(); ++j) { TF_ASSIGN_OR_RETURN( const Properties branch_computation_properties, ProcessSubcomputation(conditional->branch_computation(j))); for (const auto& property : branch_computation_properties) { if (!tensorflow::gtl::InsertIfNotPresent(&current_properties_, property)) { auto& current_property = current_properties_[property.first]; current_property = std::max(current_property, property.second); } } } current_should_compute_bottleneck_time_ = false; return Status::OK(); } Status HloCostAnalysis::HandleGather(const HloInstruction* gather) { // Gather doesn't read the whole input buffer, it's equivalent to a copy the // size of the output shape and a read of the gather indices. int64 output_size = GetShapeSize(gather->shape()); current_properties_[kBytesAccessedKey] = output_size * 2 + GetShapeSize(gather->operand(1)->shape()); SetOperandBytesAccessed(0, output_size); SetOperandBytesAccessed(1, GetShapeSize(gather->operand(1)->shape())); SetOutputBytesAccessed(output_size); // Gather does not issue any flops. return Status::OK(); } Status HloCostAnalysis::HandleScatter(const HloInstruction* scatter) { // Scatter accesses the equivalent of 3 update shapes (input, output, and // updates), and the scatter indices. int64 update_size = GetShapeSize(scatter->operand(2)->shape()); current_properties_[kBytesAccessedKey] = update_size * 3 + GetShapeSize(scatter->operand(1)->shape()); SetOperandBytesAccessed(0, update_size); SetOperandBytesAccessed(1, GetShapeSize(scatter->operand(1)->shape())); SetOperandBytesAccessed(2, update_size); SetOutputBytesAccessed(update_size); const int64 element_count = ShapeUtil::ElementsIn(scatter->operand(2)->shape()); TF_ASSIGN_OR_RETURN(const Properties sub_properties, ProcessSubcomputation(scatter->to_apply())); for (const auto& property : sub_properties) { if (!absl::StartsWith(property.first, kBytesAccessedKey)) { current_properties_[property.first] = property.second * element_count; } } return Status::OK(); } Status HloCostAnalysis::HandleGetDimensionSize( const HloInstruction* /*get_size*/) { return Status::OK(); } Status HloCostAnalysis::HandleSetDimensionSize( const HloInstruction* /*set_size*/) { return Status::OK(); } Status HloCostAnalysis::FinishVisit(const HloInstruction*) { return Status::OK(); } float HloCostAnalysis::flop_count() const { return GetProperty(kFlopsKey, properties_sum_); } float HloCostAnalysis::transcendental_count() const { return GetProperty(kTranscendentalsKey, properties_sum_); } float HloCostAnalysis::bytes_accessed() const { return GetProperty(kBytesAccessedKey, properties_sum_); } float HloCostAnalysis::optimal_seconds() const { return GetProperty(kOptimalSecondsKey, properties_sum_); } int64 HloCostAnalysis::flop_count(const HloInstruction& hlo) const { return GetPropertyForHlo(hlo, kFlopsKey, hlo_properties_); } int64 HloCostAnalysis::transcendental_count(const HloInstruction& hlo) const { return GetPropertyForHlo(hlo, kTranscendentalsKey, hlo_properties_); } int64 HloCostAnalysis::bytes_accessed(const HloInstruction& hlo) const { return GetPropertyForHlo(hlo, kBytesAccessedKey, hlo_properties_); } int64 HloCostAnalysis::operand_bytes_accessed(const HloInstruction& hlo, int64 operand_num, ShapeIndex index) const { return GetPropertyForHlo(hlo, GetOperandBytesAccessedKey(operand_num, index), hlo_properties_); } int64 HloCostAnalysis::output_bytes_accessed(const HloInstruction& hlo, ShapeIndex index) const { return GetPropertyForHlo(hlo, GetOutputBytesAccessedKey(index), hlo_properties_); } float HloCostAnalysis::optimal_seconds(const HloInstruction& hlo) const { return GetPropertyForHlo(hlo, kOptimalSecondsKey, hlo_properties_); } StatusOr<HloCostAnalysis::Properties> HloCostAnalysis::ProcessSubcomputation( HloComputation* computation) { auto visitor = CreateNestedCostAnalysis(shape_size_, per_second_rates_); visitor->ReserveVisitStates(computation->instruction_count()); TF_RETURN_IF_ERROR(computation->Accept(visitor.get())); hlo_properties_.insert(visitor->hlo_properties_.begin(), visitor->hlo_properties_.end()); return visitor->properties(); } std::unique_ptr<HloCostAnalysis> HloCostAnalysis::CreateNestedCostAnalysis( const ShapeSizeFunction& shape_size, const Properties& per_second_rates) { return absl::WrapUnique(new HloCostAnalysis(shape_size, per_second_rates)); } void HloCostAnalysis::SetOperandBytesAccessed(int64 operand_num, float value) { current_properties_[GetOperandBytesAccessedKey(operand_num).c_str()] = value; } void HloCostAnalysis::SetOperandBytesAccessed(int64 operand_num, ShapeIndex index, float value) { current_properties_[GetOperandBytesAccessedKey(operand_num, index).c_str()] = value; } void HloCostAnalysis::SetOutputBytesAccessed(float value) { current_properties_[GetOutputBytesAccessedKey()] = value; } void HloCostAnalysis::SetOutputBytesAccessed(ShapeIndex index, float value) { current_properties_[GetOutputBytesAccessedKey(index)] = value; } /*static*/ std::string HloCostAnalysis::GetOperandBytesAccessedKey( int64 operand_num, ShapeIndex index) { return absl::StrCat(kBytesAccessedKey, " operand ", operand_num, " ", index.ToString()); } /*static*/ std::string HloCostAnalysis::GetOutputBytesAccessedKey( ShapeIndex index) { return absl::StrCat(kBytesAccessedKey, " output ", index.ToString()); } } // namespace xla
gunan/tensorflow
tensorflow/compiler/xla/service/hlo_cost_analysis.cc
C++
apache-2.0
41,201
/** Copyright 2009 TiTA Project, Vienna University of Technology Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE\-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package at.ac.tuwien.ifs.tita.entity; import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; import javax.persistence.Table; import javax.persistence.Version; /** * TiTAUserProject represents the join column of TiTAUser and TiTAProject. Extra * Class is necessary for storing information about the target hours that should * be consumed by a Time-Consumer. * * @author ASE Group 10 */ @Entity @Table(name = "USER_PROJECT") //@SequenceGenerator(name = "seq_tita_user_project", sequenceName = "TITA_USER_PROJECT_ID_SEQ", allocationSize = 1) public class TiTAUserProject extends BaseEntity<Long> { @Id @Column(name = "ID") @GeneratedValue(strategy = GenerationType.IDENTITY) private Long id; @Column(name = "TARGET_HOURS") private Long targetHours; @ManyToOne(cascade = {CascadeType.PERSIST, CascadeType.MERGE}) @JoinColumn(name = "USER_ID", referencedColumnName = "ID") private TiTAUser user; @ManyToOne @JoinColumn(name = "TITA_PROJECT_ID", referencedColumnName = "ID") private TiTAProject project; @SuppressWarnings("unused") @Column(name = "MODIFICATION_VERSION") @Version private Long modificationVersion; public TiTAUserProject() { } public TiTAUserProject(TiTAUser user, TiTAProject project, Long targetHours) { this.user = user; this.project = project; this.targetHours = targetHours; } /** * Method for getting the targetHours. * * @return the targetHours */ public Long getTargetHours() { return targetHours; } /** * Method for setting the targetHours. * * @param targetHours the targetHours to set */ public void setTargetHours(Long targetHours) { this.targetHours = targetHours; } /** * Method for getting the user. * * @return the user */ public TiTAUser getUser() { return user; } /** * Method for setting the user. * * @param user the user to set */ public void setUser(TiTAUser user) { this.user = user; } /** * Method for getting the project. * * @return the project */ public TiTAProject getProject() { return project; } /** * Method for setting the project. * * @param project the project to set */ public void setProject(TiTAProject project) { this.project = project; } @Override public Long getId() { return id; } }
tita/tita
tita-entity/src/main/java/at/ac/tuwien/ifs/tita/entity/TiTAUserProject.java
Java
apache-2.0
3,378
package org.morozko.java.mod.ws.manager.config; import java.io.Serializable; import java.util.HashMap; import java.util.Map; import org.morozko.java.core.cfg.ConfigException; import org.morozko.java.core.cfg.helpers.XMLConfigurableObject; import org.w3c.dom.Element; public class ModuleConfig extends XMLConfigurableObject implements Serializable { /** * */ private static final long serialVersionUID = -676547739796531218L; private String name; private ContextConfig context; public ContextConfig getContext() { return context; } public void setContext(ContextConfig context) { this.context = context; } public ModuleConfig() { this.operationMap = new HashMap<String, OperationConfig>(); } public ModuleConfig(String name) { this(); this.setName( name ); } public String getName() { return name; } public void setName(String name) { this.name = name; } public Map<String, OperationConfig> getOperationMap() { return operationMap; } public void setOperationMap(Map<String, OperationConfig> operationMap) { this.operationMap = operationMap; } private Map<String, OperationConfig> operationMap; @Override public void configure(Element tag) throws ConfigException { } }
fugeritaetas/morozko-lib
java14-morozko/org.morozko.java.mod.ws/src/org/morozko/java/mod/ws/manager/config/ModuleConfig.java
Java
apache-2.0
1,301
package org.hl7.fhir.instance.model.valuesets; /* Copyright (c) 2011+, HL7, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of HL7 nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Generated on Sat, Aug 22, 2015 23:00-0400 for FHIR v0.5.0 public enum DeviceAction { /** * The device was implanted in the patient during the procedure */ IMPLANTED, /** * The device was explanted from the patient during the procedure */ EXPLANTED, /** * The device remains in that patient, but it's location, settings, or functionality was changed */ MANIPULATED, /** * added to help the parsers */ NULL; public static DeviceAction fromCode(String codeString) throws Exception { if (codeString == null || "".equals(codeString)) return null; if ("implanted".equals(codeString)) return IMPLANTED; if ("explanted".equals(codeString)) return EXPLANTED; if ("manipulated".equals(codeString)) return MANIPULATED; throw new Exception("Unknown DeviceAction code '"+codeString+"'"); } public String toCode() { switch (this) { case IMPLANTED: return "implanted"; case EXPLANTED: return "explanted"; case MANIPULATED: return "manipulated"; default: return "?"; } } public String getSystem() { return "http://hl7.org/fhir/device-action"; } public String getDefinition() { switch (this) { case IMPLANTED: return "The device was implanted in the patient during the procedure"; case EXPLANTED: return "The device was explanted from the patient during the procedure"; case MANIPULATED: return "The device remains in that patient, but it's location, settings, or functionality was changed"; default: return "?"; } } public String getDisplay() { switch (this) { case IMPLANTED: return "Implanted"; case EXPLANTED: return "Explanted"; case MANIPULATED: return "Manipulated"; default: return "?"; } } }
Nodstuff/hapi-fhir
hapi-fhir-structures-hl7org-dstu2/src/main/java/org/hl7/fhir/instance/model/valuesets/DeviceAction.java
Java
apache-2.0
3,806
using System; using System.Linq; using System.Collections.Generic; using System.Text; namespace NDatabase.CF.Sample { public class SampleClass { public string Name; public int Age; } class Program { static void Main(string[] args) { NDatabase.Odb.OdbFactory.Delete("cf.ndb"); using (var odb = NDatabase.Odb.OdbFactory.Open("cf.ndb")) { var item = new SampleClass(); item.Age = 3; item.Name = "Julia"; odb.Store<SampleClass>(item); } using (var odb = NDatabase.Odb.OdbFactory.Open("cf.ndb")) { var storedItem = odb.QueryAndExecute<SampleClass>().GetFirst(); Console.Write("Name: " + storedItem.Name); Console.Write("Age: " + storedItem.Age); } Console.WriteLine("Press key..."); Console.ReadLine(); } } }
WaltChen/NDatabase
Samples/NDatabase.CF.Sample2/Program.cs
C#
apache-2.0
1,041
/* * Copyright 2012-2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.web.reactive.server; import java.util.Map; import org.springframework.boot.web.server.WebServer; import org.springframework.http.server.reactive.HttpHandler; /** * Factory interface that can be used to create a reactive {@link WebServer}. * * @author Brian Clozel * @since 2.0.0 * @see WebServer */ public interface ReactiveWebServerFactory { /** * Gets a new fully configured but paused {@link WebServer} instance. Clients should * not be able to connect to the returned server until {@link WebServer#start()} is * called (which happens when the {@code ApplicationContext} has been fully * refreshed). * @param httpHandler the HTTP handler in charge of processing requests * @return a fully configured and started {@link WebServer} * @see WebServer#stop() */ WebServer getWebServer(HttpHandler httpHandler); /** * Register a map of {@link HttpHandler}s, each to a specific context path. * @param handlerMap a map of context paths and the associated {@code HttpHandler} * @return a fully configured and started {@link WebServer} * @see WebServer#stop() */ WebServer getWebServer(Map<String, HttpHandler> handlerMap); }
lexandro/spring-boot
spring-boot/src/main/java/org/springframework/boot/web/reactive/server/ReactiveWebServerFactory.java
Java
apache-2.0
1,816
using BehaveAsSakura.Attributes; namespace BehaveAsSakura.Tasks { [BehaveAsTable] [BehaveAsUnionInclude(typeof(ITaskDesc), 14)] [Task("Loop/Until Failure")] public class UntilFailureTaskDesc : RepeaterTaskDesc, IDecoratorTaskDesc { void ITaskDesc.Validate() { } Task ITaskDesc.CreateTask(BehaviorTree tree, Task parentTask, uint id) { return new UntilFailureTask(tree, parentTask, id, this); } } class UntilFailureTask : RepeaterTask { public UntilFailureTask(BehaviorTree tree, Task parentTask, uint id, UntilFailureTaskDesc description) : base(tree, parentTask, id, description) { } protected override bool IsRepeaterCompleted(TaskResult result) { return result == TaskResult.Failure; } } }
wuyuntao/BehaveAsSakura
BehaveAsSakura/Tasks/UntilFailureTask.cs
C#
apache-2.0
868
package main import ( "reflect" "testing" "github.com/soniah/gosnmp" "github.com/prometheus/snmp_exporter/config" ) func TestOidToList(t *testing.T) { cases := []struct { oid string result []int }{ { oid: "1", result: []int{1}, }, { oid: "1.2.3.4", result: []int{1, 2, 3, 4}, }, } for _, c := range cases { got := oidToList(c.oid) if !reflect.DeepEqual(got, c.result) { t.Errorf("oidToList(%v): got %v, want %v", c.oid, got, c.result) } } } func TestSplitOid(t *testing.T) { cases := []struct { oid []int count int resultHead []int resultTail []int }{ { oid: []int{1, 2, 3, 4}, count: 2, resultHead: []int{1, 2}, resultTail: []int{3, 4}, }, { oid: []int{1, 2}, count: 4, resultHead: []int{1, 2, 0, 0}, resultTail: []int{}, }, { oid: []int{}, count: 2, resultHead: []int{0, 0}, resultTail: []int{}, }, } for _, c := range cases { head, tail := splitOid(c.oid, c.count) if !reflect.DeepEqual(head, c.resultHead) || !reflect.DeepEqual(tail, c.resultTail) { t.Errorf("splitOid(%v, %d): got [%v, %v], want [%v, %v]", c.oid, c.count, head, tail, c.resultHead, c.resultTail) } } } func TestPduValueAsString(t *testing.T) { cases := []struct { pdu *gosnmp.SnmpPDU typ string result string }{ { pdu: &gosnmp.SnmpPDU{Value: int(-1)}, result: "-1", }, { pdu: &gosnmp.SnmpPDU{Value: uint(1)}, result: "1", }, { pdu: &gosnmp.SnmpPDU{Value: int64(-1000000000000)}, result: "-1000000000000", }, { pdu: &gosnmp.SnmpPDU{Value: ".1.2.3.4", Type: gosnmp.ObjectIdentifier}, result: "1.2.3.4", }, { pdu: &gosnmp.SnmpPDU{Value: "1.2.3.4", Type: gosnmp.IPAddress}, result: "1.2.3.4", }, { pdu: &gosnmp.SnmpPDU{Value: []byte{}}, result: "", }, { pdu: &gosnmp.SnmpPDU{Value: []byte{65, 66}}, typ: "DisplayString", result: "AB", }, { pdu: &gosnmp.SnmpPDU{Value: []byte{127, 128, 255, 0}}, result: "0x7F80FF00", }, { pdu: &gosnmp.SnmpPDU{Value: []byte{127, 128, 255, 0}}, typ: "OctetString", result: "0x7F80FF00", }, { pdu: &gosnmp.SnmpPDU{Value: []byte{1, 2, 3, 4}}, typ: "IpAddr", result: "1.2.3.4", }, { pdu: &gosnmp.SnmpPDU{Value: nil}, result: "", }, } for _, c := range cases { got := pduValueAsString(c.pdu, c.typ) if !reflect.DeepEqual(got, c.result) { t.Errorf("pduValueAsString(%v, %q): got %q, want %q", c.pdu, c.typ, got, c.result) } } } func TestIndexesToLabels(t *testing.T) { cases := []struct { oid []int metric config.Metric oidToPdu map[string]gosnmp.SnmpPDU result map[string]string }{ { oid: []int{}, metric: config.Metric{}, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{}, }, { oid: []int{4}, metric: config.Metric{Indexes: []*config.Index{{Labelname: "l", Type: "gauge"}}}, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{"l": "4"}, }, { oid: []int{3, 4}, metric: config.Metric{ Indexes: []*config.Index{{Labelname: "a", Type: "gauge"}, {Labelname: "b", Type: "gauge"}}, Lookups: []*config.Lookup{{Labels: []string{"a", "b"}, Labelname: "l", Oid: "1.2"}}, }, oidToPdu: map[string]gosnmp.SnmpPDU{"1.2.3.4": gosnmp.SnmpPDU{Value: "eth0"}}, result: map[string]string{"a": "3", "b": "4", "l": "eth0"}, }, { oid: []int{4}, metric: config.Metric{ Indexes: []*config.Index{{Labelname: "l", Type: "gauge"}}, Lookups: []*config.Lookup{{Labels: []string{"l"}, Labelname: "l", Oid: "1.2.3"}}, }, oidToPdu: map[string]gosnmp.SnmpPDU{"1.2.3.4": gosnmp.SnmpPDU{Value: "eth0"}}, result: map[string]string{"l": "eth0"}, }, { oid: []int{4}, metric: config.Metric{ Indexes: []*config.Index{{Labelname: "l", Type: "gauge"}}, Lookups: []*config.Lookup{{Labels: []string{"l"}, Labelname: "l", Oid: "1.2.3", Type: "IpAddr"}}, }, oidToPdu: map[string]gosnmp.SnmpPDU{"1.2.3.4": gosnmp.SnmpPDU{Value: []byte{5, 6, 7, 8}}}, result: map[string]string{"l": "5.6.7.8"}, }, { oid: []int{4}, metric: config.Metric{ Indexes: []*config.Index{{Labelname: "l", Type: "gauge"}}, Lookups: []*config.Lookup{{Labels: []string{"l"}, Labelname: "l", Oid: "1.2.3"}}, }, oidToPdu: map[string]gosnmp.SnmpPDU{"1.2.3.4": gosnmp.SnmpPDU{Value: []byte{5, 6, 7, 8}}}, result: map[string]string{"l": "0x05060708"}, }, { oid: []int{4}, metric: config.Metric{ Indexes: []*config.Index{{Labelname: "l", Type: "gauge"}}, Lookups: []*config.Lookup{{Labels: []string{"l"}, Labelname: "l", Oid: "1.2.3"}}, }, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{"l": ""}, }, { oid: []int{}, metric: config.Metric{Indexes: []*config.Index{{Labelname: "l", Type: "gauge"}}}, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{"l": "0"}, }, { oid: []int{1, 255, 0, 0, 0, 16}, metric: config.Metric{Indexes: []*config.Index{{Labelname: "l", Type: "PhysAddress48"}}}, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{"l": "01:FF:00:00:00:10"}, }, { oid: []int{3, 65, 32, 255}, metric: config.Metric{Indexes: []*config.Index{{Labelname: "l", Type: "OctetString"}}}, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{"l": "0x4120FF"}, }, { oid: []int{3, 65, 32, 255}, metric: config.Metric{ Indexes: []*config.Index{{Labelname: "l", Type: "OctetString"}}, Lookups: []*config.Lookup{{Labels: []string{"l"}, Labelname: "l", Oid: "1"}}, }, oidToPdu: map[string]gosnmp.SnmpPDU{"1.3.65.32.255": gosnmp.SnmpPDU{Value: "octet"}}, result: map[string]string{"l": "octet"}, }, { oid: []int{1, 4, 192, 168, 1, 2}, metric: config.Metric{Indexes: []*config.Index{{Labelname: "l", Type: "InetAddress"}}}, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{"l": "192.168.1.2"}, }, { oid: []int{1, 4, 192, 168, 1, 2, 7}, metric: config.Metric{ Indexes: []*config.Index{{Labelname: "l", Type: "InetAddress"}, {Labelname: "b", Type: "gauge"}}, Lookups: []*config.Lookup{{Labels: []string{"l"}, Labelname: "l", Oid: "3"}}, }, oidToPdu: map[string]gosnmp.SnmpPDU{"3.1.4.192.168.1.2": gosnmp.SnmpPDU{Value: "ipv4"}}, result: map[string]string{"l": "ipv4", "b": "7"}, }, { oid: []int{2, 16, 42, 6, 29, 128, 0, 1, 0, 3, 0, 0, 0, 0, 0, 1, 1, 52}, metric: config.Metric{Indexes: []*config.Index{{Labelname: "l", Type: "InetAddress"}}}, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{"l": "2A06:1D80:0001:0003:0000:0000:0001:0134"}, }, { oid: []int{3, 1, 9}, metric: config.Metric{Indexes: []*config.Index{{Labelname: "l", Type: "InetAddress"}}}, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{"l": "0x09"}, }, { oid: []int{2, 16, 42, 6, 29, 128, 0, 1, 0, 3, 0, 0, 0, 0, 0, 1, 1, 52, 7}, metric: config.Metric{ Indexes: []*config.Index{{Labelname: "l", Type: "InetAddress"}, {Labelname: "b", Type: "gauge"}}, Lookups: []*config.Lookup{{Labels: []string{"l"}, Labelname: "l", Oid: "3"}}, }, oidToPdu: map[string]gosnmp.SnmpPDU{"3.2.16.42.6.29.128.0.1.0.3.0.0.0.0.0.1.1.52": gosnmp.SnmpPDU{Value: "ipv6"}}, result: map[string]string{"l": "ipv6", "b": "7"}, }, { oid: []int{192, 168, 1, 2}, metric: config.Metric{Indexes: []*config.Index{{Labelname: "l", Type: "IpAddr"}}}, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{"l": "192.168.1.2"}, }, { oid: []int{0, 1, 2, 3, 4, 16, 42}, metric: config.Metric{ Indexes: []*config.Index{ {Labelname: "a", Type: "InetAddressType"}, {Labelname: "b", Type: "InetAddressType"}, {Labelname: "c", Type: "InetAddressType"}, {Labelname: "d", Type: "InetAddressType"}, {Labelname: "e", Type: "InetAddressType"}, {Labelname: "f", Type: "InetAddressType"}, {Labelname: "g", Type: "InetAddressType"}, }, }, oidToPdu: map[string]gosnmp.SnmpPDU{}, result: map[string]string{ "a": "unknown", "b": "ipv4", "c": "ipv6", "d": "ipv4z", "e": "ipv6z", "f": "dns", "g": "42", }, }, } for _, c := range cases { got := indexesToLabels(c.oid, &c.metric, c.oidToPdu) if !reflect.DeepEqual(got, c.result) { t.Errorf("oidToList(%v, %v, %v): got %v, want %v", c.oid, c.metric, c.oidToPdu, got, c.result) } } }
Theci/snmp_exporter
collector_test.go
GO
apache-2.0
8,632
// RUN: %clang_cc1 -verify -fopenmp %s // RUN: %clang_cc1 -verify -fopenmp-simd %s typedef void **omp_allocator_handle_t; extern const omp_allocator_handle_t omp_default_mem_alloc; extern const omp_allocator_handle_t omp_large_cap_mem_alloc; extern const omp_allocator_handle_t omp_const_mem_alloc; extern const omp_allocator_handle_t omp_high_bw_mem_alloc; extern const omp_allocator_handle_t omp_low_lat_mem_alloc; extern const omp_allocator_handle_t omp_cgroup_mem_alloc; extern const omp_allocator_handle_t omp_pteam_mem_alloc; extern const omp_allocator_handle_t omp_thread_mem_alloc; struct S1; // expected-note 2 {{declared here}} expected-note 2 {{forward declaration of 'S1'}} extern S1 a; class S2 { mutable int a; public: S2() : a(0) {} }; const S2 b; const S2 ba[5]; class S3 { int a; public: S3() : a(0) {} }; const S3 ca[5]; class S4 { int a; S4(); // expected-note {{implicitly declared private here}} public: S4(int v) : a(v) { #pragma omp target private(a) private(this->a) for (int k = 0; k < v; ++k) ++this->a; } }; class S5 { int a; S5() : a(0) {} // expected-note {{implicitly declared private here}} public: S5(int v) : a(v) {} S5 &operator=(S5 &s) { #pragma omp target private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}} for (int k = 0; k < s.a; ++k) // expected-warning {{Non-trivial type 'S5' is mapped, only trivial types are guaranteed to be mapped correctly}} ++s.a; return *this; } }; template <typename T> class S6 { public: T a; S6() : a(0) {} S6(T v) : a(v) { #pragma omp target private(a) private(this->a) allocate(omp_thread_mem_alloc: a) // expected-warning {{allocator with the 'thread' trait access has unspecified behavior on 'target' directive}} for (int k = 0; k < v; ++k) ++this->a; } S6 &operator=(S6 &s) { #pragma omp target private(a) private(this->a) private(s.a) // expected-error {{expected variable name or data member of current class}} for (int k = 0; k < s.a; ++k) ++s.a; return *this; } }; template <typename T> class S7 : public T { T a; S7() : a(0) {} public: S7(T v) : a(v) { #pragma omp target private(a) private(this->a) private(T::a) for (int k = 0; k < a.a; ++k) ++this->a.a; } S7 &operator=(S7 &s) { #pragma omp target private(a) private(this->a) private(s.a) private(s.T::a) // expected-error 2 {{expected variable name or data member of current class}} for (int k = 0; k < s.a.a; ++k) ++s.a.a; return *this; } }; S3 h; #pragma omp threadprivate(h) // expected-note 2 {{defined as threadprivate or thread local}} template <class I, class C> int foomain(I argc, C **argv) { I e(4); I g(5); int i; int &j = i; #pragma omp target private // expected-error {{expected '(' after 'private'}} {} #pragma omp target private( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} {} #pragma omp target private() // expected-error {{expected expression}} {} #pragma omp target private(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} {} #pragma omp target private(argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} {} #pragma omp target private(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}} {} #pragma omp target private(argc) allocate , allocate(, allocate(omp_default , allocate(omp_default_mem_alloc, allocate(omp_default_mem_alloc:, allocate(omp_default_mem_alloc: argc, allocate(omp_default_mem_alloc: argv), allocate(argv) // expected-error {{expected '(' after 'allocate'}} expected-error 2 {{expected expression}} expected-error 2 {{expected ')'}} expected-error {{use of undeclared identifier 'omp_default'}} expected-note 2 {{to match this '('}} {} #pragma omp target private(S1) // expected-error {{'S1' does not refer to a value}} {} #pragma omp target private(a, b) // expected-error {{private variable with incomplete type 'S1'}} {} #pragma omp target private(argv[1]) // expected-error {{expected variable name}} {} #pragma omp target private(e, g) {} #pragma omp target private(h) // expected-error {{threadprivate or thread local variable cannot be private}} {} #pragma omp target shared(i) // expected-error {{unexpected OpenMP clause 'shared' in directive '#pragma omp target'}} #pragma omp parallel { int v = 0; int i; } #pragma omp parallel shared(i) #pragma omp parallel private(i) #pragma omp target private(j) {} #pragma omp target private(i) {} return 0; } void bar(S4 a[2]) { #pragma omp parallel #pragma omp target private(a) {} } namespace A { double x; #pragma omp threadprivate(x) // expected-note {{defined as threadprivate or thread local}} } namespace B { using A::x; } int main(int argc, char **argv) { S4 e(4); S5 g(5); S6<float> s6(0.0) , s6_0(1.0); // expected-note {{in instantiation of member function 'S6<float>::S6' requested here}} S7<S6<float> > s7(0.0) , s7_0(1.0); int i; int &j = i; #pragma omp target private // expected-error {{expected '(' after 'private'}} {} #pragma omp target private( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} {} #pragma omp target private() // expected-error {{expected expression}} {} #pragma omp target private(argc // expected-error {{expected ')'}} expected-note {{to match this '('}} {} #pragma omp target private(argc, // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} {} #pragma omp target private(argc > 0 ? argv[1] : argv[2]) // expected-error {{expected variable name}} {} #pragma omp target private(argc) {} #pragma omp target private(S1) // expected-error {{'S1' does not refer to a value}} {} #pragma omp target private(a, b) // expected-error {{private variable with incomplete type 'S1'}} {} #pragma omp target private(argv[1]) // expected-error {{expected variable name}} {} #pragma omp target private(e, g) // expected-error {{calling a private constructor of class 'S4'}} expected-error {{calling a private constructor of class 'S5'}} {} #pragma omp target private(h) // expected-error {{threadprivate or thread local variable cannot be private}} {} #pragma omp target private(B::x) // expected-error {{threadprivate or thread local variable cannot be private}} {} #pragma omp target shared(i) // expected-error {{unexpected OpenMP clause 'shared' in directive '#pragma omp target'}} #pragma omp parallel { int i; } #pragma omp parallel shared(i) #pragma omp parallel private(i) #pragma omp target private(j) {} #pragma omp target private(i) {} static int si; #pragma omp target private(si) // OK {} #pragma omp target map(i) private(i) // expected-error {{private variable cannot be in a map clause in '#pragma omp target' directive}} {} s6 = s6_0; // expected-note {{in instantiation of member function 'S6<float>::operator=' requested here}} s7 = s7_0; // expected-note {{in instantiation of member function 'S7<S6<float> >::operator=' requested here}} return foomain(argc, argv); // expected-note {{in instantiation of function template specialization 'foomain<int, char>' requested here}} }
apple/swift-clang
test/OpenMP/target_private_messages.cpp
C++
apache-2.0
7,306
package org.txazo.tool.util.async; /** * TaskConfig */ public class TaskConfig { private String id; private int timeout; private Object defaultResult; public TaskConfig(String id, int timeout, Object defaultResult) { this.id = id; this.timeout = timeout; this.defaultResult = defaultResult; } public String getId() { return id; } public int getTimeout() { return timeout; } public Object getDefaultResult() { return defaultResult; } }
txazo/java
src/main/java/org/txazo/tool/util/async/TaskConfig.java
Java
apache-2.0
536
var handlebars = require('handlebars') , HAProxy = require('haproxy') , fs = require('fs') , resolve = require('path').resolve , util = require('util') , f = util.format , assert = require('assert') , EventEmitter = require('events').EventEmitter , debounce = require('debounce') , deepEqual = require('deep-equal') ; var HAProxyManager = module.exports = function HAProxyManager (opts) { if (typeof opts !== 'object') opts = {}; assert(opts.data, 'opts.data required'); assert(opts.haproxy, 'opts.haproxy required'); this.config = {}; this.config.templateFile = resolve(opts.templateFile || __dirname + '/../default.haproxycfg.tmpl'); this.config.haproxyCfgPath = resolve(opts.haproxyCfgPath || '/etc/haproxy/haproxy.cfg'); this.config.watchConfigFile = (opts.watchConfigFile !== undefined) ? opts.watchConfigFile : true; this.config.debounceRate = opts.debounceRate || 2000; this.log = (typeof opts.log === 'function') ? opts.log : function (){}; this.throttleTimeout = null this.latestConfig = ""; if (!fs.existsSync(this.config.templateFile)) { this.log('error', f("template file %s doesn't exists!", this.config.templateFile)); } this.template = handlebars.compile(fs.readFileSync(this.config.templateFile, 'utf-8')); this.writeConfigDebounced = debounce(this.writeConfig.bind(this), this.config.debounceRate, false); this.data = opts.data; this.haproxy = opts.haproxy; this.data.frontends.on( 'changes', this._changeFrontEnd.bind(this) ); this.data.backends.on ( 'changes', this._changeBackEnd.bind(this) ); this.writeConfigDebounced(); }; util.inherits(HAProxyManager, EventEmitter); HAProxyManager.prototype.writeConfig = function() { var data = { frontends: this.data.frontends.toJSON(), backends: this.data.backends.toJSON(), haproxySocketPath: this.haproxy.socket }; var previousConfig = this.latestConfig; this.latestConfig = this.template(data); // only write the config and reload if it actually changed if (!deepEqual(previousConfig, this.latestConfig)) { clearTimeout(this.throttleTimeout); fs.writeFileSync(this.config.haproxyCfgPath, this.latestConfig , 'utf-8'); this.emit('configChanged'); this.throttledReload(); } }; HAProxyManager.prototype.throttledReload = function() { this.throttleTimeout = null; this.log('info', 'HaproxyManager.throttledReload', 'Scheduling reload of haproxy'); this.throttleTimeout = setTimeout((function() { this.reload(); }).bind(this), 500); } HAProxyManager.prototype.reload = function () { var self = this; self.haproxy.running(function (err, running) { if (err) return self.log('error', 'HaproxyManager.reload', { error: String(err) }); function handleRestart (err) { if (err) return self.log('error', 'HaproxyManager.reload', { error: String(err) }); } if (running) { self.log('info', 'HaproxyManager.reload', 'Reloading haproxy config'); self.haproxy.reload(handleRestart); } else { self.log('info', 'HaproxyManager.reload', 'Starting haproxy'); self.haproxy.start(handleRestart); } }); }; HAProxyManager.prototype._changeFrontEnd = function(row, changed) { this.log('debug', 'HaproxyManager._changeFrontEnd', changed); this.writeConfigDebounced(); }; HAProxyManager.prototype._changeBackEnd = function(row, changed) { this.log('debug', 'HaproxyManager_changeBackEnd', changed); this.writeConfigDebounced(); }; // // // // // // // // TODO refactor all these helper, reconsider business logic // // template helper for outputing FrontEnd acl rules handlebars.registerHelper('aclRule', function (rule) { var rand = Math.random().toString(36).substring(3); var name = rule.type + '_' + rand; if (rule.type === 'path' || rule.type === 'url') { return util.format("acl %s %s %s\nuse_backend %s if %s\n", name, rule.operation, rule.value, rule.backend, name); } else if (rule.type === 'header') { return util.format("acl %s %s(%s) %s\nuse_backend %s if %s\n", name, rule.operation, rule.header, rule.value, rule.backend, name); } }); handlebars.registerHelper('frontendHelper', function (frontend) { var output = []; var hasRules = frontend.rules && frontend.rules.length > 0; var hasNatives = frontend.natives && frontend.natives.length > 0; output.push("bind " + frontend.bind); output.push("mode " + frontend.mode); output.push("default_backend " + frontend.backend); // http only default options if (frontend.mode === 'http') { output.push("option httplog"); // The default keep-alive behavior is to use keep-alive if clients and // backends support it. However, if haproxy will only process rules when // a connection is first established so if any rules are used then server-close // should be specified at least and haproxy will let clients use keep-alive // to haproxy but close the backend connections each time. // // If there are any rules, the default behavior is to use http-server-close // and http-pretend-keepalive if (frontend.keepalive === 'server-close') { output.push("option http-server-close"); output.push("option http-pretend-keepalive"); } else if (frontend.keepalive === 'close'){ output.push("option forceclose"); } // the default if there are rules is to use server close else if (hasRules) { output.push("option http-server-close"); output.push("option http-pretend-keepalive"); } } if (hasRules) { frontend.rules.forEach(function (rule) { var rand = Math.random().toString(36).substring(3); var name = rule.type + '_' + rand; if (rule.type === 'path' || rule.type === 'url') { output.push(util.format("acl %s %s %s\nuse_backend %s if %s", name, rule.operation, rule.value, rule.backend, name)); } else if (rule.type === 'header') { output.push(util.format("acl %s %s(%s) %s\nuse_backend %s if %s", name, rule.operation, rule.header, rule.value, rule.backend, name)); } }); } if (hasNatives) { frontend.natives.forEach(function (native) { output.push(native); }); } return output.join('\n '); }); // helper to output http check and servers block handlebars.registerHelper('backendHelper', function (backend) { var host = backend.host; var health = backend.health; var members = backend.members; var output = []; var hasNatives = backend.natives && backend.natives.length > 0; // output mode and balance options output.push("mode " + backend.mode); output.push("balance " + backend.balance); // host header propagation if (backend.host) { output.push("reqirep ^Host:\\ .* Host:\\ " + backend.host); } // option httpchk if (backend.mode === 'http' && health) { var httpVersion = (health.httpVersion === 'HTTP/1.1') ? ('HTTP/1.1\\r\\nHost:\\ ' + backend.host) : health.httpVersion; output.push(util.format("option httpchk %s %s %s", health.method, health.uri, httpVersion)); } if (hasNatives) { backend.natives.forEach(function (native) { output.push(native); }); } if (members) { // server lines for each member members.forEach(function (member) { var name = util.format("%s_%s:%s", backend.key, member.host, member.port); var interval = (health) ? health.interval : 2000; output.push(util.format("server %s %s:%s check inter %s", name, member.host, member.port, interval)); }); } return output.join('\n '); });
apuckey/thalassa-aqueduct
lib/HaproxyManager.js
JavaScript
apache-2.0
7,597
# == Schema Information # # Table name: transactions # # amount :decimal(12, 2) not null # created_at :datetime not null # date :date not null # id :integer not null, primary key # memo :string not null # source_id :integer # tags :string # target_id :integer # type :string not null # updated_at :datetime not null # user_id :integer # # Indexes # # index_transactions_on_type (type) # index_transactions_on_user_id (user_id) # class Transfer < Transaction belongs_to :source, class_name: "Partition" belongs_to :target, class_name: "Partition" with_options presence: true, on: :create do validates :source_id, :target_id end end
serracorwin/checkmate
app/models/transfer.rb
Ruby
apache-2.0
763
package basic.sorting; import static org.junit.Assert.*; import org.junit.Test; public class InsertionSorterTest { @Test public void test() { int[] result = new int[]{1}; InsertionSorter.sort(result); assertArrayEquals(new int[]{1}, result); result = new int[]{2,1}; InsertionSorter.sort(result); assertArrayEquals(new int[]{1, 2}, result); result = new int[]{2,1,3}; InsertionSorter.sort(result); assertArrayEquals(new int[]{1, 2,3}, result); result = new int[]{2,1,4,3}; InsertionSorter.sort(result); assertArrayEquals(new int[]{1, 2,3,4}, result); } }
zhou-jg/Algorithm
test/basic/sorting/InsertionSorterTest.java
Java
apache-2.0
624
package net.ftlines.blog.cdidemo.web.app; import javax.inject.Inject; import net.ftlines.blog.cdidemo.model.Member; import net.ftlines.blog.cdidemo.model.PositionsRepository; import net.ftlines.blog.cdidemo.model.Team; import net.ftlines.blog.cdidemo.web.EntityModel; import net.ftlines.blog.cdidemo.web.UserAction; import org.apache.wicket.ajax.AjaxRequestTarget; import org.apache.wicket.ajax.markup.html.AjaxLink; import org.apache.wicket.extensions.ajax.markup.html.modal.ModalWindow; import org.apache.wicket.markup.html.basic.Label; import org.apache.wicket.markup.html.form.Button; import org.apache.wicket.markup.html.form.Form; import org.apache.wicket.markup.html.form.TextField; import org.apache.wicket.markup.html.link.Link; import org.apache.wicket.markup.html.list.ListItem; import org.apache.wicket.markup.html.list.ListView; import org.apache.wicket.model.IModel; import org.apache.wicket.model.PropertyModel; import org.apache.wicket.request.mapper.parameter.PageParameters; public class TeamEditPage extends BasePage { @Inject UserAction action; public TeamEditPage(PageParameters params) { action.begin(); Long teamId = params.get("id").toLong(); IModel<Team> team = new EntityModel<Team>(Team.class, teamId); setDefaultModel(team); Form form = new Form("form"); add(form); form.add(new TextField("name", new PropertyModel(team, "name"))); final ModalWindow modal = new ModalWindow("modal"); form.add(modal); form.add(new ListView<Member>("members", new PropertyModel(team, "members")) { @Override protected void populateItem(final ListItem<Member> item) { item.setOutputMarkupId(true); IModel<Member> member = item.getModel(); item.add(new Label("employee", new PropertyModel(member, "employee.fullName"))); item.add(new Label("effectiveDate", new PropertyModel(member, "effectiveDate"))); item.add(new Label("position", new PropertyModel(member, "position.name"))); item.add(new AjaxLink<Member>("edit", member) { @Override public void onClick(AjaxRequestTarget target) { modal.setContent(new MemberEditPanel(ModalWindow.CONTENT_ID, getModel()) { @Override protected void onApply(AjaxRequestTarget target) { modal.close(target); target.add(item); } @Override protected void onCancel(AjaxRequestTarget target) { modal.close(target); } }); modal.show(target); } }); item.add(moveUpLink("up", item)); item.add(moveDownLink("down", item)); } }.setReuseItems(true)); form.add(new Button("save") { @Override public void onSubmit() { action.apply(); setResponsePage(TeamsListPage.class); } }); form.add(new Link("cancel") { @Override public void onClick() { action.undo(); setResponsePage(TeamsListPage.class); } }); } public static PageParameters forTeam(Team team) { PageParameters params = new PageParameters(); params.set("id", team.getId()); return params; } }
42Lines/blog-cdidemo
src/main/java/net/ftlines/blog/cdidemo/web/app/TeamEditPage.java
Java
apache-2.0
3,336
package com.airbnb.epoxy; import androidx.annotation.LayoutRes; import androidx.annotation.Nullable; import java.lang.CharSequence; import java.lang.Number; import java.lang.Object; import java.lang.Override; import java.lang.String; /** * Generated file. Do not modify! */ public class ModelWithSuper_ extends ModelWithSuper implements GeneratedModel<Object>, ModelWithSuperBuilder { private OnModelBoundListener<ModelWithSuper_, Object> onModelBoundListener_epoxyGeneratedModel; private OnModelUnboundListener<ModelWithSuper_, Object> onModelUnboundListener_epoxyGeneratedModel; private OnModelVisibilityStateChangedListener<ModelWithSuper_, Object> onModelVisibilityStateChangedListener_epoxyGeneratedModel; private OnModelVisibilityChangedListener<ModelWithSuper_, Object> onModelVisibilityChangedListener_epoxyGeneratedModel; public ModelWithSuper_() { super(); } @Override public void addTo(EpoxyController controller) { super.addTo(controller); addWithDebugValidation(controller); } @Override public void handlePreBind(final EpoxyViewHolder holder, final Object object, final int position) { validateStateHasNotChangedSinceAdded("The model was changed between being added to the controller and being bound.", position); } @Override public void handlePostBind(final Object object, int position) { if (onModelBoundListener_epoxyGeneratedModel != null) { onModelBoundListener_epoxyGeneratedModel.onModelBound(this, object, position); } validateStateHasNotChangedSinceAdded("The model was changed during the bind call.", position); } /** * Register a listener that will be called when this model is bound to a view. * <p> * The listener will contribute to this model's hashCode state per the {@link * com.airbnb.epoxy.EpoxyAttribute.Option#DoNotHash} rules. * <p> * You may clear the listener by setting a null value, or by calling {@link #reset()} */ public ModelWithSuper_ onBind(OnModelBoundListener<ModelWithSuper_, Object> listener) { onMutation(); this.onModelBoundListener_epoxyGeneratedModel = listener; return this; } @Override public void unbind(Object object) { super.unbind(object); if (onModelUnboundListener_epoxyGeneratedModel != null) { onModelUnboundListener_epoxyGeneratedModel.onModelUnbound(this, object); } } /** * Register a listener that will be called when this model is unbound from a view. * <p> * The listener will contribute to this model's hashCode state per the {@link * com.airbnb.epoxy.EpoxyAttribute.Option#DoNotHash} rules. * <p> * You may clear the listener by setting a null value, or by calling {@link #reset()} */ public ModelWithSuper_ onUnbind(OnModelUnboundListener<ModelWithSuper_, Object> listener) { onMutation(); this.onModelUnboundListener_epoxyGeneratedModel = listener; return this; } @Override public void onVisibilityStateChanged(int visibilityState, final Object object) { if (onModelVisibilityStateChangedListener_epoxyGeneratedModel != null) { onModelVisibilityStateChangedListener_epoxyGeneratedModel.onVisibilityStateChanged(this, object, visibilityState); } super.onVisibilityStateChanged(visibilityState, object); } /** * Register a listener that will be called when this model visibility state has changed. * <p> * The listener will contribute to this model's hashCode state per the {@link * com.airbnb.epoxy.EpoxyAttribute.Option#DoNotHash} rules. */ public ModelWithSuper_ onVisibilityStateChanged( OnModelVisibilityStateChangedListener<ModelWithSuper_, Object> listener) { onMutation(); this.onModelVisibilityStateChangedListener_epoxyGeneratedModel = listener; return this; } @Override public void onVisibilityChanged(float percentVisibleHeight, float percentVisibleWidth, int visibleHeight, int visibleWidth, final Object object) { if (onModelVisibilityChangedListener_epoxyGeneratedModel != null) { onModelVisibilityChangedListener_epoxyGeneratedModel.onVisibilityChanged(this, object, percentVisibleHeight, percentVisibleWidth, visibleHeight, visibleWidth); } super.onVisibilityChanged(percentVisibleHeight, percentVisibleWidth, visibleHeight, visibleWidth, object); } /** * Register a listener that will be called when this model visibility has changed. * <p> * The listener will contribute to this model's hashCode state per the {@link * com.airbnb.epoxy.EpoxyAttribute.Option#DoNotHash} rules. */ public ModelWithSuper_ onVisibilityChanged( OnModelVisibilityChangedListener<ModelWithSuper_, Object> listener) { onMutation(); this.onModelVisibilityChangedListener_epoxyGeneratedModel = listener; return this; } public ModelWithSuper_ valueInt(int valueInt) { onMutation(); super.valueInt = valueInt; super.valueInt(valueInt); return this; } public int valueInt() { return valueInt; } @Override public ModelWithSuper_ id(long id) { super.id(id); return this; } @Override public ModelWithSuper_ id(@Nullable Number... ids) { super.id(ids); return this; } @Override public ModelWithSuper_ id(long id1, long id2) { super.id(id1, id2); return this; } @Override public ModelWithSuper_ id(@Nullable CharSequence key) { super.id(key); return this; } @Override public ModelWithSuper_ id(@Nullable CharSequence key, @Nullable CharSequence... otherKeys) { super.id(key, otherKeys); return this; } @Override public ModelWithSuper_ id(@Nullable CharSequence key, long id) { super.id(key, id); return this; } @Override public ModelWithSuper_ layout(@LayoutRes int layoutRes) { super.layout(layoutRes); return this; } @Override public ModelWithSuper_ spanSizeOverride( @Nullable EpoxyModel.SpanSizeOverrideCallback spanSizeCallback) { super.spanSizeOverride(spanSizeCallback); return this; } @Override public ModelWithSuper_ show() { super.show(); return this; } @Override public ModelWithSuper_ show(boolean show) { super.show(show); return this; } @Override public ModelWithSuper_ hide() { super.hide(); return this; } @Override public ModelWithSuper_ reset() { onModelBoundListener_epoxyGeneratedModel = null; onModelUnboundListener_epoxyGeneratedModel = null; onModelVisibilityStateChangedListener_epoxyGeneratedModel = null; onModelVisibilityChangedListener_epoxyGeneratedModel = null; super.valueInt = 0; super.reset(); return this; } @Override public boolean equals(Object o) { if (o == this) { return true; } if (!(o instanceof ModelWithSuper_)) { return false; } if (!super.equals(o)) { return false; } ModelWithSuper_ that = (ModelWithSuper_) o; if (((onModelBoundListener_epoxyGeneratedModel == null) != (that.onModelBoundListener_epoxyGeneratedModel == null))) { return false; } if (((onModelUnboundListener_epoxyGeneratedModel == null) != (that.onModelUnboundListener_epoxyGeneratedModel == null))) { return false; } if (((onModelVisibilityStateChangedListener_epoxyGeneratedModel == null) != (that.onModelVisibilityStateChangedListener_epoxyGeneratedModel == null))) { return false; } if (((onModelVisibilityChangedListener_epoxyGeneratedModel == null) != (that.onModelVisibilityChangedListener_epoxyGeneratedModel == null))) { return false; } if ((valueInt != that.valueInt)) { return false; } return true; } @Override public int hashCode() { int _result = super.hashCode(); _result = 31 * _result + (onModelBoundListener_epoxyGeneratedModel != null ? 1 : 0); _result = 31 * _result + (onModelUnboundListener_epoxyGeneratedModel != null ? 1 : 0); _result = 31 * _result + (onModelVisibilityStateChangedListener_epoxyGeneratedModel != null ? 1 : 0); _result = 31 * _result + (onModelVisibilityChangedListener_epoxyGeneratedModel != null ? 1 : 0); _result = 31 * _result + valueInt; return _result; } @Override public String toString() { return "ModelWithSuper_{" + "valueInt=" + valueInt + "}" + super.toString(); } }
airbnb/epoxy
epoxy-processortest/src/test/resources/ModelWithSuper_.java
Java
apache-2.0
8,328
/* * Copyright 2010-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ using System; using System.Collections.Generic; using System.IO; using Amazon.DataPipeline.Model; using Amazon.Runtime.Internal.Transform; namespace Amazon.DataPipeline.Model.Internal.MarshallTransformations { /// <summary> /// FieldUnmarshaller /// </summary> internal class FieldUnmarshaller : IUnmarshaller<Field, XmlUnmarshallerContext>, IUnmarshaller<Field, JsonUnmarshallerContext> { Field IUnmarshaller<Field, XmlUnmarshallerContext>.Unmarshall(XmlUnmarshallerContext context) { throw new NotImplementedException(); } public Field Unmarshall(JsonUnmarshallerContext context) { Field field = new Field(); int originalDepth = context.CurrentDepth; int targetDepth = originalDepth + 1; while (context.Read()) { if ((context.IsKey) && (context.CurrentDepth == targetDepth)) { context.Read(); context.Read(); if (context.TestExpression("Key", targetDepth)) { field.Key = StringUnmarshaller.GetInstance().Unmarshall(context); continue; } if (context.TestExpression("StringValue", targetDepth)) { field.StringValue = StringUnmarshaller.GetInstance().Unmarshall(context); continue; } if (context.TestExpression("RefValue", targetDepth)) { field.RefValue = StringUnmarshaller.GetInstance().Unmarshall(context); continue; } } else if (context.IsEndElement && context.CurrentDepth <= originalDepth) { return field; } } return field; } private static FieldUnmarshaller instance; public static FieldUnmarshaller GetInstance() { if (instance == null) instance = new FieldUnmarshaller(); return instance; } } }
emcvipr/dataservices-sdk-dotnet
AWSSDK/Amazon.DataPipeline/Model/Internal/MarshallTransformations/FieldUnmarshaller.cs
C#
apache-2.0
2,790
// Original file: deps/protoc-gen-validate/validate/validate.proto /** * EnumRules describe the constraints applied to enum values */ export interface EnumRules { /** * Const specifies that this field must be exactly the specified value */ 'const'?: (number); /** * DefinedOnly specifies that this field must be only one of the defined * values for this enum, failing on any undefined value. */ 'defined_only'?: (boolean); /** * In specifies that this field must be equal to one of the specified * values */ 'in'?: (number)[]; /** * NotIn specifies that this field cannot be equal to one of the specified * values */ 'not_in'?: (number)[]; } /** * EnumRules describe the constraints applied to enum values */ export interface EnumRules__Output { /** * Const specifies that this field must be exactly the specified value */ 'const': (number); /** * DefinedOnly specifies that this field must be only one of the defined * values for this enum, failing on any undefined value. */ 'defined_only': (boolean); /** * In specifies that this field must be equal to one of the specified * values */ 'in': (number)[]; /** * NotIn specifies that this field cannot be equal to one of the specified * values */ 'not_in': (number)[]; }
grpc/grpc-node
packages/grpc-js-xds/src/generated/validate/EnumRules.ts
TypeScript
apache-2.0
1,326
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2021 DBeaver Corp and others * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.model.impl.struct; import org.jkiss.code.Nullable; import org.jkiss.dbeaver.model.DBPImage; import org.jkiss.dbeaver.model.DBPImageProvider; import org.jkiss.dbeaver.model.DBValueFormatting; import org.jkiss.dbeaver.model.struct.rdb.DBSTriggerColumn; /** * AbstractTriggerColumn */ public abstract class AbstractTriggerColumn implements DBSTriggerColumn, DBPImageProvider { @Override public boolean isPersisted() { return true; } @Nullable @Override public DBPImage getObjectImage() { return DBValueFormatting.getObjectImage(getTableColumn()); } }
Sargul/dbeaver
plugins/org.jkiss.dbeaver.model/src/org/jkiss/dbeaver/model/impl/struct/AbstractTriggerColumn.java
Java
apache-2.0
1,333
package com.riversoft.weixin.pay.payment.wrapper; import com.fasterxml.jackson.annotation.JsonUnwrapped; import com.riversoft.weixin.pay.payment.bean.UnifiedOrderResponse; /** * @borball on 1/13/2017. */ public class UnifiedOrderResponseWrapper extends BaseSettings { @JsonUnwrapped private UnifiedOrderResponse response; public UnifiedOrderResponse getResponse() { return response; } public void setResponse(UnifiedOrderResponse response) { this.response = response; } }
borball/weixin-sdk
weixin-pay/src/main/java/com/riversoft/weixin/pay/payment/wrapper/UnifiedOrderResponseWrapper.java
Java
apache-2.0
536
package com.vmware.vim25; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for anonymous complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="returnval" type="{urn:vim25}ManagedObjectReference"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "", propOrder = { "returnval" }) @XmlRootElement(name = "reloadVirtualMachineFromPath_TaskResponse") public class ReloadVirtualMachineFromPathTaskResponse { @XmlElement(required = true) protected ManagedObjectReference returnval; /** * Gets the value of the returnval property. * * @return * possible object is * {@link ManagedObjectReference } * */ public ManagedObjectReference getReturnval() { return returnval; } /** * Sets the value of the returnval property. * * @param value * allowed object is * {@link ManagedObjectReference } * */ public void setReturnval(ManagedObjectReference value) { this.returnval = value; } }
jdgwartney/vsphere-ws
java/JAXWS/samples/com/vmware/vim25/ReloadVirtualMachineFromPathTaskResponse.java
Java
apache-2.0
1,666
using System; using System.Net; using Battleship438Game.Network.Messages; using Lidgren.Network; namespace Battleship438Game.Network { public class ServerNetworkManager : INetworkManager{ //=======================================================================// private bool _isDisposed; public NetServer Server { get; private set; } public bool Running { get; set; } //=======================================================================// public void Connect(){ var config = new NetPeerConfiguration("Battleship438"){ Port = 14241, }; config.EnableMessageType(NetIncomingMessageType.WarningMessage); config.EnableMessageType(NetIncomingMessageType.VerboseDebugMessage); config.EnableMessageType(NetIncomingMessageType.ErrorMessage); config.EnableMessageType(NetIncomingMessageType.Error); config.EnableMessageType(NetIncomingMessageType.StatusChanged); config.EnableMessageType(NetIncomingMessageType.Data); config.EnableMessageType(NetIncomingMessageType.ConnectionApproval); this.Server = new NetServer(config); this.Server.Start(); Running = true; } public int Connection(){ return Server.ConnectionsCount; } public NetOutgoingMessage CreateMessage(){ return this.Server.CreateMessage(); } public void Disconnect(){ this.Server.Shutdown("Bye"); Running = false; } public void Dispose(){ this.Dispose(true); } public NetIncomingMessage ReadMessage(){ return this.Server.ReadMessage(); } public void Recycle(NetIncomingMessage im){ this.Server.Recycle(im); } //=======================================================================// public void SendMessage(IGameMessage gameMessage) { NetOutgoingMessage om = this.Server.CreateMessage(); om.Write((byte)gameMessage.MessageType); gameMessage.Encode(om); this.Server.SendToAll(om, NetDeliveryMethod.ReliableOrdered); } //=======================================================================// private void Dispose(bool disposing){ if (this._isDisposed) return; if (disposing) this.Disconnect(); this._isDisposed = true; } } }
kflo/Battleship2
Battleship438/Network/ServerNetworkManager.cs
C#
apache-2.0
2,704
// Copyright [2011] [PagSeguro Internet Ltda.] // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. using System; using System.Net; using Uol.PagSeguro.Constants; using Uol.PagSeguro.Domain; using Uol.PagSeguro.Exception; using Uol.PagSeguro.Resources; namespace CreatePayment { class Program { static void Main(string[] args) { bool isSandbox = true; EnvironmentConfiguration.ChangeEnvironment(isSandbox); try { AccountCredentials credentials = PagSeguroConfiguration.Credentials(); // Instantiate a new payment request PaymentRequest payment = new PaymentRequest(); // Sets the currency payment.Currency = Currency.Brl; // Add an item for this payment request payment.Items.Add(new Item("0001", "Notebook Prata", 1, 2430.00m)); // Add another item for this payment request payment.Items.Add(new Item("0002", "Notebook Rosa", 2, 150.99m)); // Sets a reference code for this payment request, it is useful to identify this payment in future notifications. payment.Reference = "REF1234"; // Sets shipping information for this payment request payment.Shipping = new Shipping(); payment.Shipping.ShippingType = ShippingType.Sedex; //Passando valor para ShippingCost payment.Shipping.Cost = 10.00m; payment.Shipping.Address = new Address( "BRA", "SP", "Sao Paulo", "Jardim Paulistano", "01452002", "Av. Brig. Faria Lima", "1384", "5o andar" ); // Sets your customer information. payment.Sender = new Sender( "Joao Comprador", "comprador@uol.com.br", new Phone("11", "56273440") ); // Sets the url used by PagSeguro for redirect user after ends checkout process payment.RedirectUri = new Uri("http://www.lojamodelo.com.br"); // Add checkout metadata information payment.AddMetaData(MetaDataItemKeys.GetItemKeyByDescription("CPF do passageiro"), "123.456.789-09", 1); payment.AddMetaData("PASSENGER_PASSPORT", "23456", 1); // Another way to set checkout parameters payment.AddParameter("senderBirthday", "07/05/1980"); payment.AddIndexedParameter("itemColor", "verde", 1); payment.AddIndexedParameter("itemId", "0003", 3); payment.AddIndexedParameter("itemDescription", "Mouse", 3); payment.AddIndexedParameter("itemQuantity", "1", 3); payment.AddIndexedParameter("itemAmount", "200.00", 3); SenderDocument senderCPF = new SenderDocument(Documents.GetDocumentByType("CPF"), "12345678909"); payment.Sender.Documents.Add(senderCPF); Uri paymentRedirectUri = payment.Register(credentials); Console.WriteLine("URL do pagamento : " + paymentRedirectUri); Console.ReadKey(); } catch (PagSeguroServiceException exception) { Console.WriteLine(exception.Message + "\n"); foreach (ServiceError element in exception.Errors) { Console.WriteLine(element + "\n"); } Console.ReadKey(); } } } }
fortesinformatica/PagSeguro
source/Examples/CreatePayment/Program.cs
C#
apache-2.0
4,354
/* * Copyright 2014 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #include "SkTypes.h" #include "SkData.h" #include "SkFixed.h" #include "SkFontDescriptor.h" #include "SkFontHost_FreeType_common.h" #include "SkFontMgr.h" #include "SkFontMgr_android.h" #include "SkFontMgr_android_parser.h" #include "SkFontStyle.h" #include "SkOSFile.h" #include "SkPaint.h" #include "SkRefCnt.h" #include "SkString.h" #include "SkStream.h" #include "SkTArray.h" #include "SkTDArray.h" #include "SkTSearch.h" #include "SkTemplates.h" #include "SkTypefaceCache.h" #include <limits> class SkData; class SkTypeface_Android : public SkTypeface_FreeType { public: SkTypeface_Android(const SkFontStyle& style, bool isFixedPitch, const SkString& familyName) : INHERITED(style, SkTypefaceCache::NewFontID(), isFixedPitch) , fFamilyName(familyName) { } protected: void onGetFamilyName(SkString* familyName) const override { *familyName = fFamilyName; } SkString fFamilyName; private: typedef SkTypeface_FreeType INHERITED; }; class SkTypeface_AndroidSystem : public SkTypeface_Android { public: SkTypeface_AndroidSystem(const SkString& pathName, const bool cacheFontFiles, int index, const SkFixed* axes, int axesCount, const SkFontStyle& style, bool isFixedPitch, const SkString& familyName, const SkLanguage& lang, FontVariant variantStyle) : INHERITED(style, isFixedPitch, familyName) , fPathName(pathName) , fIndex(index) , fAxes(axes, axesCount) , fLang(lang) , fVariantStyle(variantStyle) , fFile(cacheFontFiles ? sk_fopen(fPathName.c_str(), kRead_SkFILE_Flag) : nullptr) { if (cacheFontFiles) { SkASSERT(fFile); } } SkStreamAsset* createStream() const { if (fFile) { SkData* data = SkData::NewFromFILE(fFile); return data ? new SkMemoryStream(data) : nullptr; } return SkStream::NewFromFile(fPathName.c_str()); } virtual void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override { SkASSERT(desc); SkASSERT(serialize); desc->setFamilyName(fFamilyName.c_str()); *serialize = false; } SkStreamAsset* onOpenStream(int* ttcIndex) const override { *ttcIndex = fIndex; return this->createStream(); } SkFontData* onCreateFontData() const override { return new SkFontData(this->createStream(), fIndex, fAxes.begin(), fAxes.count()); } const SkString fPathName; int fIndex; const SkSTArray<4, SkFixed, true> fAxes; const SkLanguage fLang; const FontVariant fVariantStyle; SkAutoTCallVProc<FILE, sk_fclose> fFile; typedef SkTypeface_Android INHERITED; }; class SkTypeface_AndroidStream : public SkTypeface_Android { public: SkTypeface_AndroidStream(SkFontData* data, const SkFontStyle& style, bool isFixedPitch, const SkString& familyName) : INHERITED(style, isFixedPitch, familyName) , fData(data) { } virtual void onGetFontDescriptor(SkFontDescriptor* desc, bool* serialize) const override { SkASSERT(desc); SkASSERT(serialize); desc->setFamilyName(fFamilyName.c_str()); *serialize = true; } SkStreamAsset* onOpenStream(int* ttcIndex) const override { *ttcIndex = fData->getIndex(); return fData->duplicateStream(); } SkFontData* onCreateFontData() const override { return new SkFontData(*fData.get()); } private: const SkAutoTDelete<const SkFontData> fData; typedef SkTypeface_Android INHERITED; }; class SkFontStyleSet_Android : public SkFontStyleSet { typedef SkTypeface_FreeType::Scanner Scanner; public: explicit SkFontStyleSet_Android(const FontFamily& family, const Scanner& scanner, const bool cacheFontFiles) { const SkString* cannonicalFamilyName = nullptr; if (family.fNames.count() > 0) { cannonicalFamilyName = &family.fNames[0]; } // TODO? make this lazy for (int i = 0; i < family.fFonts.count(); ++i) { const FontFileInfo& fontFile = family.fFonts[i]; SkString pathName(family.fBasePath); pathName.append(fontFile.fFileName); SkAutoTDelete<SkStream> stream(SkStream::NewFromFile(pathName.c_str())); if (!stream.get()) { SkDEBUGF(("Requested font file %s does not exist or cannot be opened.\n", pathName.c_str())); continue; } const int ttcIndex = fontFile.fIndex; SkString familyName; SkFontStyle style; bool isFixedWidth; Scanner::AxisDefinitions axisDefinitions; if (!scanner.scanFont(stream.get(), ttcIndex, &familyName, &style, &isFixedWidth, &axisDefinitions)) { SkDEBUGF(("Requested font file %s exists, but is not a valid font.\n", pathName.c_str())); continue; } int weight = fontFile.fWeight != 0 ? fontFile.fWeight : style.weight(); SkFontStyle::Slant slant = style.slant(); switch (fontFile.fStyle) { case FontFileInfo::Style::kAuto: slant = style.slant(); break; case FontFileInfo::Style::kNormal: slant = SkFontStyle::kUpright_Slant; break; case FontFileInfo::Style::kItalic: slant = SkFontStyle::kItalic_Slant; break; default: SkASSERT(false); break; } style = SkFontStyle(weight, style.width(), slant); const SkLanguage& lang = family.fLanguage; uint32_t variant = family.fVariant; if (kDefault_FontVariant == variant) { variant = kCompact_FontVariant | kElegant_FontVariant; } // The first specified family name overrides the family name found in the font. // TODO: SkTypeface_AndroidSystem::onCreateFamilyNameIterator should return // all of the specified family names in addition to the names found in the font. if (cannonicalFamilyName != nullptr) { familyName = *cannonicalFamilyName; } SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count()); Scanner::computeAxisValues(axisDefinitions, fontFile.fAxes.begin(), fontFile.fAxes.count(), axisValues, familyName); fStyles.push_back().reset(new SkTypeface_AndroidSystem( pathName, cacheFontFiles, ttcIndex, axisValues.get(), axisDefinitions.count(), style, isFixedWidth, familyName, lang, variant)); } } int count() override { return fStyles.count(); } void getStyle(int index, SkFontStyle* style, SkString* name) override { if (index < 0 || fStyles.count() <= index) { return; } if (style) { *style = this->style(index); } if (name) { name->reset(); } } SkTypeface_AndroidSystem* createTypeface(int index) override { if (index < 0 || fStyles.count() <= index) { return nullptr; } return SkRef(fStyles[index].get()); } /** Find the typeface in this style set that most closely matches the given pattern. * TODO: consider replacing with SkStyleSet_Indirect::matchStyle(); * this simpler version using match_score() passes all our tests. */ SkTypeface_AndroidSystem* matchStyle(const SkFontStyle& pattern) override { if (0 == fStyles.count()) { return nullptr; } SkTypeface_AndroidSystem* closest = fStyles[0]; int minScore = std::numeric_limits<int>::max(); for (int i = 0; i < fStyles.count(); ++i) { SkFontStyle style = this->style(i); int score = match_score(pattern, style); if (score < minScore) { closest = fStyles[i]; minScore = score; } } return SkRef(closest); } private: SkFontStyle style(int index) { return fStyles[index]->fontStyle(); } static int match_score(const SkFontStyle& pattern, const SkFontStyle& candidate) { int score = 0; score += SkTAbs((pattern.width() - candidate.width()) * 100); score += SkTAbs((pattern.slant() == candidate.slant()) ? 0 : 1000); score += SkTAbs(pattern.weight() - candidate.weight()); return score; } SkTArray<SkAutoTUnref<SkTypeface_AndroidSystem>, true> fStyles; friend struct NameToFamily; friend class SkFontMgr_Android; typedef SkFontStyleSet INHERITED; }; /** On Android a single family can have many names, but our API assumes unique names. * Map names to the back end so that all names for a given family refer to the same * (non-replicated) set of typefaces. * SkTDict<> doesn't let us do index-based lookup, so we write our own mapping. */ struct NameToFamily { SkString name; SkFontStyleSet_Android* styleSet; }; class SkFontMgr_Android : public SkFontMgr { public: SkFontMgr_Android(const SkFontMgr_Android_CustomFonts* custom) { SkTDArray<FontFamily*> families; if (custom && SkFontMgr_Android_CustomFonts::kPreferSystem != custom->fSystemFontUse) { SkString base(custom->fBasePath); SkFontMgr_Android_Parser::GetCustomFontFamilies( families, base, custom->fFontsXml, custom->fFallbackFontsXml); } if (!custom || (custom && SkFontMgr_Android_CustomFonts::kOnlyCustom != custom->fSystemFontUse)) { SkFontMgr_Android_Parser::GetSystemFontFamilies(families); } if (custom && SkFontMgr_Android_CustomFonts::kPreferSystem == custom->fSystemFontUse) { SkString base(custom->fBasePath); SkFontMgr_Android_Parser::GetCustomFontFamilies( families, base, custom->fFontsXml, custom->fFallbackFontsXml); } this->buildNameToFamilyMap(families, custom ? custom->fIsolated : false); this->findDefaultFont(); families.deleteAll(); } protected: /** Returns not how many families we have, but how many unique names * exist among the families. */ int onCountFamilies() const override { return fNameToFamilyMap.count(); } void onGetFamilyName(int index, SkString* familyName) const override { if (index < 0 || fNameToFamilyMap.count() <= index) { familyName->reset(); return; } familyName->set(fNameToFamilyMap[index].name); } SkFontStyleSet* onCreateStyleSet(int index) const override { if (index < 0 || fNameToFamilyMap.count() <= index) { return nullptr; } return SkRef(fNameToFamilyMap[index].styleSet); } SkFontStyleSet* onMatchFamily(const char familyName[]) const override { if (!familyName) { return nullptr; } SkAutoAsciiToLC tolc(familyName); for (int i = 0; i < fNameToFamilyMap.count(); ++i) { if (fNameToFamilyMap[i].name.equals(tolc.lc())) { return SkRef(fNameToFamilyMap[i].styleSet); } } // TODO: eventually we should not need to name fallback families. for (int i = 0; i < fFallbackNameToFamilyMap.count(); ++i) { if (fFallbackNameToFamilyMap[i].name.equals(tolc.lc())) { return SkRef(fFallbackNameToFamilyMap[i].styleSet); } } return nullptr; } virtual SkTypeface* onMatchFamilyStyle(const char familyName[], const SkFontStyle& style) const override { SkAutoTUnref<SkFontStyleSet> sset(this->matchFamily(familyName)); return sset->matchStyle(style); } virtual SkTypeface* onMatchFaceStyle(const SkTypeface* typeface, const SkFontStyle& style) const override { for (int i = 0; i < fFontStyleSets.count(); ++i) { for (int j = 0; j < fFontStyleSets[i]->fStyles.count(); ++j) { if (fFontStyleSets[i]->fStyles[j] == typeface) { return fFontStyleSets[i]->matchStyle(style); } } } return nullptr; } static SkTypeface_AndroidSystem* find_family_style_character( const SkTDArray<NameToFamily>& fallbackNameToFamilyMap, const SkFontStyle& style, bool elegant, const SkString& langTag, SkUnichar character) { for (int i = 0; i < fallbackNameToFamilyMap.count(); ++i) { SkFontStyleSet_Android* family = fallbackNameToFamilyMap[i].styleSet; SkAutoTUnref<SkTypeface_AndroidSystem> face(family->matchStyle(style)); if (!langTag.isEmpty() && !face->fLang.getTag().startsWith(langTag.c_str())) { continue; } if (SkToBool(face->fVariantStyle & kElegant_FontVariant) != elegant) { continue; } SkPaint paint; paint.setTypeface(face); paint.setTextEncoding(SkPaint::kUTF32_TextEncoding); uint16_t glyphID; paint.textToGlyphs(&character, sizeof(character), &glyphID); if (glyphID != 0) { return face.release(); } } return nullptr; } virtual SkTypeface* onMatchFamilyStyleCharacter(const char familyName[], const SkFontStyle& style, const char* bcp47[], int bcp47Count, SkUnichar character) const override { // The variant 'elegant' is 'not squashed', 'compact' is 'stays in ascent/descent'. // The variant 'default' means 'compact and elegant'. // As a result, it is not possible to know the variant context from the font alone. // TODO: add 'is_elegant' and 'is_compact' bits to 'style' request. // The first time match anything elegant, second time anything not elegant. for (int elegant = 2; elegant --> 0;) { for (int bcp47Index = bcp47Count; bcp47Index --> 0;) { SkLanguage lang(bcp47[bcp47Index]); while (!lang.getTag().isEmpty()) { SkTypeface_AndroidSystem* matchingTypeface = find_family_style_character(fFallbackNameToFamilyMap, style, SkToBool(elegant), lang.getTag(), character); if (matchingTypeface) { return matchingTypeface; } lang = lang.getParent(); } } SkTypeface_AndroidSystem* matchingTypeface = find_family_style_character(fFallbackNameToFamilyMap, style, SkToBool(elegant), SkString(), character); if (matchingTypeface) { return matchingTypeface; } } return nullptr; } SkTypeface* onCreateFromData(SkData* data, int ttcIndex) const override { return this->createFromStream(new SkMemoryStream(data), ttcIndex); } SkTypeface* onCreateFromFile(const char path[], int ttcIndex) const override { SkAutoTDelete<SkStreamAsset> stream(SkStream::NewFromFile(path)); return stream.get() ? this->createFromStream(stream.release(), ttcIndex) : nullptr; } SkTypeface* onCreateFromStream(SkStreamAsset* bareStream, int ttcIndex) const override { SkAutoTDelete<SkStreamAsset> stream(bareStream); bool isFixedPitch; SkFontStyle style; SkString name; if (!fScanner.scanFont(stream, ttcIndex, &name, &style, &isFixedPitch, nullptr)) { return nullptr; } SkFontData* data(new SkFontData(stream.release(), ttcIndex, nullptr, 0)); return new SkTypeface_AndroidStream(data, style, isFixedPitch, name); } SkTypeface* onCreateFromStream(SkStreamAsset* s, const FontParameters& params) const override { using Scanner = SkTypeface_FreeType::Scanner; SkAutoTDelete<SkStreamAsset> stream(s); bool isFixedPitch; SkFontStyle style; SkString name; Scanner::AxisDefinitions axisDefinitions; if (!fScanner.scanFont(stream, params.getCollectionIndex(), &name, &style, &isFixedPitch, &axisDefinitions)) { return nullptr; } int paramAxisCount; const FontParameters::Axis* paramAxes = params.getAxes(&paramAxisCount); SkAutoSTMalloc<4, SkFixed> axisValues(axisDefinitions.count()); Scanner::computeAxisValues(axisDefinitions, paramAxes, paramAxisCount, axisValues, name); SkFontData* data(new SkFontData(stream.release(), params.getCollectionIndex(), axisValues.get(), axisDefinitions.count())); return new SkTypeface_AndroidStream(data, style, isFixedPitch, name); } SkTypeface* onCreateFromFontData(SkFontData* data) const override { SkStreamAsset* stream(data->getStream()); bool isFixedPitch; SkFontStyle style; SkString name; if (!fScanner.scanFont(stream, data->getIndex(), &name, &style, &isFixedPitch, nullptr)) { return nullptr; } return new SkTypeface_AndroidStream(data, style, isFixedPitch, name); } SkTypeface* onLegacyCreateTypeface(const char familyName[], SkFontStyle style) const override { if (familyName) { // On Android, we must return nullptr when we can't find the requested // named typeface so that the system/app can provide their own recovery // mechanism. On other platforms we'd provide a typeface from the // default family instead. return this->onMatchFamilyStyle(familyName, style); } return fDefaultFamily->matchStyle(style); } private: SkTypeface_FreeType::Scanner fScanner; SkTArray<SkAutoTUnref<SkFontStyleSet_Android>, true> fFontStyleSets; SkFontStyleSet* fDefaultFamily; SkTypeface* fDefaultTypeface; SkTDArray<NameToFamily> fNameToFamilyMap; SkTDArray<NameToFamily> fFallbackNameToFamilyMap; void buildNameToFamilyMap(SkTDArray<FontFamily*> families, const bool isolated) { for (int i = 0; i < families.count(); i++) { FontFamily& family = *families[i]; SkTDArray<NameToFamily>* nameToFamily = &fNameToFamilyMap; if (family.fIsFallbackFont) { nameToFamily = &fFallbackNameToFamilyMap; if (0 == family.fNames.count()) { SkString& fallbackName = family.fNames.push_back(); fallbackName.printf("%.2x##fallback", i); } } SkFontStyleSet_Android* newSet = new SkFontStyleSet_Android(family, fScanner, isolated); if (0 == newSet->count()) { delete newSet; continue; } fFontStyleSets.push_back().reset(newSet); for (int j = 0; j < family.fNames.count(); j++) { NameToFamily* nextEntry = nameToFamily->append(); new (&nextEntry->name) SkString(family.fNames[j]); nextEntry->styleSet = newSet; } } } void findDefaultFont() { SkASSERT(!fFontStyleSets.empty()); static const char* gDefaultNames[] = { "sans-serif" }; for (size_t i = 0; i < SK_ARRAY_COUNT(gDefaultNames); ++i) { SkFontStyleSet* set = this->onMatchFamily(gDefaultNames[i]); if (nullptr == set) { continue; } SkTypeface* tf = set->matchStyle(SkFontStyle()); if (nullptr == tf) { continue; } fDefaultFamily = set; fDefaultTypeface = tf; break; } if (nullptr == fDefaultTypeface) { fDefaultFamily = fFontStyleSets[0]; fDefaultTypeface = fDefaultFamily->createTypeface(0); } SkASSERT(fDefaultFamily); SkASSERT(fDefaultTypeface); } typedef SkFontMgr INHERITED; }; #ifdef SK_DEBUG static char const * const gSystemFontUseStrings[] = { "OnlyCustom", "PreferCustom", "PreferSystem" }; #endif SkFontMgr* SkFontMgr_New_Android(const SkFontMgr_Android_CustomFonts* custom) { if (custom) { SkASSERT(0 <= custom->fSystemFontUse); SkASSERT(custom->fSystemFontUse < SK_ARRAY_COUNT(gSystemFontUseStrings)); SkDEBUGF(("SystemFontUse: %s BasePath: %s Fonts: %s FallbackFonts: %s\n", gSystemFontUseStrings[custom->fSystemFontUse], custom->fBasePath, custom->fFontsXml, custom->fFallbackFontsXml)); } return new SkFontMgr_Android(custom); }
tmpvar/skia.cc
src/ports/SkFontMgr_android.cpp
C++
apache-2.0
21,989
using System; using System.Collections.Concurrent; using System.Linq; using System.Threading; using System.Threading.Tasks; namespace Cassandra.Mapping.Statements { /// <summary> /// Creates statements from CQL that can be executed with the C* driver. /// </summary> internal class StatementFactory { private readonly ConcurrentDictionary<string, Task<PreparedStatement>> _statementCache; private static readonly Logger Logger = new Logger(typeof(StatementFactory)); private int _statementCacheCount; public int MaxPreparedStatementsThreshold { get; set; } public StatementFactory() { MaxPreparedStatementsThreshold = 500; _statementCache = new ConcurrentDictionary<string, Task<PreparedStatement>>(); } /// <summary> /// Given a <see cref="Cql"/>, it creates the corresponding <see cref="Statement"/>. /// </summary> /// <param name="session">The current session.</param> /// <param name="cql">The cql query, parameter and options.</param> /// <param name="forceNoPrepare">When defined, it's used to override the CQL options behavior.</param> public async Task<Statement> GetStatementAsync(ISession session, Cql cql, bool? forceNoPrepare = null) { var noPrepare = forceNoPrepare ?? cql.QueryOptions.NoPrepare; if (noPrepare) { // Use a SimpleStatement if we're not supposed to prepare var statement = new SimpleStatement(cql.Statement, cql.Arguments); SetStatementProperties(statement, cql); return statement; } var ps = await _statementCache .GetOrAdd(cql.Statement, query => PrepareNew(query, session)) .ConfigureAwait(false); var boundStatement = ps.Bind(cql.Arguments); SetStatementProperties(boundStatement, cql); return boundStatement; } private Task<PreparedStatement> PrepareNew(string query, ISession session) { var count = Interlocked.Increment(ref _statementCacheCount); if (count > MaxPreparedStatementsThreshold) { Logger.Warning("The prepared statement cache contains {0} queries. This issue is probably due " + "to misuse of the driver, you should use parameter markers for queries. You can " + "configure this warning threshold using " + "MappingConfiguration.SetMaxStatementPreparedThreshold() method.", count); } return session.PrepareAsync(query); } private void SetStatementProperties(IStatement stmt, Cql cql) { cql.QueryOptions.CopyOptionsToStatement(stmt); stmt.SetAutoPage(cql.AutoPage); } public Statement GetStatement(ISession session, Cql cql) { // Just use async version's result return GetStatementAsync(session, cql).Result; } public async Task<BatchStatement> GetBatchStatementAsync(ISession session, ICqlBatch cqlBatch) { // Get all the statements async in parallel, then add to batch var childStatements = await Task .WhenAll(cqlBatch.Statements.Select(cql => GetStatementAsync(session, cql, cqlBatch.Options.NoPrepare))) .ConfigureAwait(false); var statement = new BatchStatement().SetBatchType(cqlBatch.BatchType); cqlBatch.Options.CopyOptionsToStatement(statement); foreach (var stmt in childStatements) { statement.Add(stmt); } return statement; } } }
trurl123/csharp-driver
src/Cassandra/Mapping/Statements/StatementFactory.cs
C#
apache-2.0
3,826
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.firebase.perf.metrics; import static com.google.common.truth.Truth.assertThat; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.mockito.MockitoAnnotations.initMocks; import com.google.firebase.perf.FirebasePerformance.HttpMethod; import com.google.firebase.perf.FirebasePerformanceTestBase; import com.google.firebase.perf.transport.TransportManager; import com.google.firebase.perf.util.Constants; import com.google.firebase.perf.util.Timer; import com.google.firebase.perf.v1.ApplicationProcessState; import com.google.firebase.perf.v1.NetworkRequestMetric; import java.util.Map; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatchers; import org.mockito.Captor; import org.mockito.Mock; import org.robolectric.RobolectricTestRunner; /** Unit tests for {@link HttpMetric}. */ @RunWith(RobolectricTestRunner.class) public class HttpMetricTest extends FirebasePerformanceTestBase { @Mock private TransportManager transportManager; @Mock private Timer timer; @Captor private ArgumentCaptor<NetworkRequestMetric> networkArgumentCaptor; @Before public void setUp() { initMocks(this); when(timer.getMicros()).thenReturn(1000L); when(timer.getDurationMicros()).thenReturn(2000L).thenReturn(3000L); } @Test public void startStop() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); verifyMetric(networkArgumentCaptor.getValue()); } @Test public void setHttpResponseCode() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.setHttpResponseCode(200); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); verifyMetric(metricValue); assertThat(metricValue.getHttpResponseCode()).isEqualTo(200); } @Test public void setRequestSize() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.setRequestPayloadSize(256); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); verifyMetric(metricValue); assertThat(metricValue.getRequestPayloadBytes()).isEqualTo(256); } @Test public void setResponseSize() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.setResponsePayloadSize(256); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); verifyMetric(metricValue); assertThat(metricValue.getResponsePayloadBytes()).isEqualTo(256); } @Test public void setResponseContentType() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.setResponseContentType("text/html"); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); verifyMetric(metricValue); assertThat(metricValue.getResponseContentType()).isEqualTo("text/html"); } @Test public void markRequestComplete() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.markRequestComplete(); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToRequestCompletedUs()).isEqualTo(2000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(3000); } @Test public void markResponseStart() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.markResponseStart(); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseInitiatedUs()).isEqualTo(2000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(3000); } @Test public void putAttribute() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.putAttribute("attr1", "free"); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(2000); assertThat(metricValue.getCustomAttributesCount()).isEqualTo(1); assertThat(metricValue.getCustomAttributesMap()).containsEntry("attr1", "free"); } @Test public void putInvalidAttribute() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.putAttribute("_invalidattr1", "free"); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(2000); assertThat(metricValue.getCustomAttributesCount()).isEqualTo(0); } @Test public void putAttributeAfterHttpMetricIsStopped() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.stop(); metric.putAttribute("attr1", "free"); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(2000); assertThat(metricValue.getCustomAttributesCount()).isEqualTo(0); } @Test public void removeAttribute() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.putAttribute("attr1", "free"); Map<String, String> attributes = metric.getAttributes(); assertThat(attributes.size()).isEqualTo(1); metric.removeAttribute("attr1"); attributes = metric.getAttributes(); assertThat(attributes.size()).isEqualTo(0); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(2000); assertThat(metricValue.getCustomAttributesCount()).isEqualTo(0); } @Test public void removeAttributeAfterStopped() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.putAttribute("attr1", "free"); metric.stop(); metric.removeAttribute("attr1"); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(2000); assertThat(metricValue.getCustomAttributesCount()).isEqualTo(1); assertThat(metricValue.getCustomAttributesMap()).containsEntry("attr1", "free"); } @Test public void addAttributeWithSameName() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); metric.putAttribute("attr1", "free"); metric.putAttribute("attr1", "paid"); metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(2000); assertThat(metricValue.getCustomAttributesCount()).isEqualTo(1); assertThat(metricValue.getCustomAttributesMap()).containsEntry("attr1", "paid"); } @Test public void testMaxAttributes() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); for (int i = 0; i <= Constants.MAX_TRACE_CUSTOM_ATTRIBUTES; i++) { metric.putAttribute("dim" + i, "value" + i); } for (int i = 0; i <= Constants.MAX_TRACE_CUSTOM_ATTRIBUTES; i++) { metric.putAttribute("dim" + i, "value" + (i + 1)); } metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(2000); assertThat(metricValue.getCustomAttributesCount()) .isEqualTo(Constants.MAX_TRACE_CUSTOM_ATTRIBUTES); for (int i = 0; i < Constants.MAX_TRACE_CUSTOM_ATTRIBUTES; i++) { String attributeValue = "value" + (i + 1); String attributeKey = "dim" + i; assertThat(metric.getAttribute(attributeKey)).isEqualTo(attributeValue); } } @Test public void testMoreThanMaxAttributes() { HttpMetric metric = new HttpMetric("https://www.google.com/", HttpMethod.GET, transportManager, timer); metric.start(); for (int i = 0; i <= Constants.MAX_TRACE_CUSTOM_ATTRIBUTES; i++) { metric.putAttribute("dim" + i, "value" + i); } metric.stop(); verify(transportManager) .log( networkArgumentCaptor.capture(), ArgumentMatchers.nullable(ApplicationProcessState.class)); NetworkRequestMetric metricValue = networkArgumentCaptor.getValue(); assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(2000); assertThat(metricValue.getCustomAttributesCount()) .isEqualTo(Constants.MAX_TRACE_CUSTOM_ATTRIBUTES); for (int i = 0; i < Constants.MAX_TRACE_CUSTOM_ATTRIBUTES; i++) { String attributeValue = "value" + i; String attributeKey = "dim" + i; assertThat(metric.getAttribute(attributeKey)).isEqualTo(attributeValue); } assertThat(metricValue.getCustomAttributesMap()).doesNotContainKey("attr6"); } private void verifyMetric(NetworkRequestMetric metricValue) { assertThat(metricValue.getUrl()).isEqualTo("https://www.google.com/"); assertThat(metricValue.getHttpMethod()) .isEqualTo(com.google.firebase.perf.v1.NetworkRequestMetric.HttpMethod.GET); assertThat(metricValue.getClientStartTimeUs()).isEqualTo(1000); assertThat(metricValue.getTimeToResponseCompletedUs()).isEqualTo(2000); } }
firebase/firebase-android-sdk
firebase-perf/src/test/java/com/google/firebase/perf/metrics/HttpMetricTest.java
Java
apache-2.0
15,643
package nl.futureedge.simple.jmx.client; import org.junit.Test; public class ClientMBeanServerConnectionFactoryTest { @Test public void test() { } }
willemsrb/simple-jmx
src/test/java/nl/futureedge/simple/jmx/client/ClientMBeanServerConnectionFactoryTest.java
Java
apache-2.0
166
// Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/cloud/dialogflow/v2beta1/session.proto package com.google.cloud.dialogflow.v2beta1; public interface TextInputOrBuilder extends // @@protoc_insertion_point(interface_extends:google.cloud.dialogflow.v2beta1.TextInput) com.google.protobuf.MessageOrBuilder { /** * * * <pre> * Required. The UTF-8 encoded natural language text to be processed. * Text length must not exceed 256 bytes. * </pre> * * <code>string text = 1;</code> */ java.lang.String getText(); /** * * * <pre> * Required. The UTF-8 encoded natural language text to be processed. * Text length must not exceed 256 bytes. * </pre> * * <code>string text = 1;</code> */ com.google.protobuf.ByteString getTextBytes(); /** * * * <pre> * Required. The language of this conversational query. See [Language * Support](https://dialogflow.com/docs/languages) for a list of the * currently supported language codes. Note that queries in the same session * do not necessarily need to specify the same language. * </pre> * * <code>string language_code = 2;</code> */ java.lang.String getLanguageCode(); /** * * * <pre> * Required. The language of this conversational query. See [Language * Support](https://dialogflow.com/docs/languages) for a list of the * currently supported language codes. Note that queries in the same session * do not necessarily need to specify the same language. * </pre> * * <code>string language_code = 2;</code> */ com.google.protobuf.ByteString getLanguageCodeBytes(); }
vam-google/google-cloud-java
google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/TextInputOrBuilder.java
Java
apache-2.0
1,678
/* * Copyright 2015-2017 Richard Linsdale. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.theretiredprogrammer.nbpcglibrary.expressionparserandevaluate.parse; import java.util.ArrayList; import java.util.List; import uk.theretiredprogrammer.nbpcglibrary.expressionparserandevaluate.LanguageDefinition; import uk.theretiredprogrammer.nbpcglibrary.expressionparserandevaluate.parsetree.ParseTree; import uk.theretiredprogrammer.nbpcglibrary.expressionparserandevaluate.tokens.OperatorToken; import uk.theretiredprogrammer.nbpcglibrary.expressionparserandevaluate.tokens.Token; /** * The Syntax Analyser for an Operator precedence expression. * * @author Richard Linsdale (richard at theretiredprogrammer.uk) */ public class SyntaxAnalyser { /** * The action to be taken following operator comparison */ public enum TestResult { /** * Insert the object (push?) */ INSERT, /** * Parse finished */ FINISH, /** * Continue processing */ CONTINUE }; private final LanguageDefinition languagedefinition; private final List<SAItem> stack = new ArrayList<>(); private LexicalAnalyser lex; /** * Constructor. * * @param languagedefinition the expression language definition */ public SyntaxAnalyser(LanguageDefinition languagedefinition) { this.languagedefinition = languagedefinition; } /** * Complete the syntax analysis of an expression. * * @param expression the expression string * @param exactMatch true if exact match parameter handling is required * @return the expression Parse Tree */ public ParseTree analysis(String expression, Boolean exactMatch) { lex = new LexicalAnalyser(languagedefinition, expression, exactMatch); Token t = lex.nextToken(); while (true) { if (!(t instanceof OperatorToken)) { lex.throwParseException("expecting Operator; found a terminal"); } OperatorToken ot = (OperatorToken) t; TestResult res = testPrecidence(ot); if (res == TestResult.FINISH) { return stack.get(0).pt; } if (res == TestResult.INSERT) { SAItem si = new SAItem(); si.op = ot; t = lex.nextToken(); if (t instanceof TerminalToken) { TerminalToken tt = (TerminalToken) t; si.pt = tt.getParseTree(); t = lex.nextToken(); } stack.add(si); } } } private TestResult testPrecidence(OperatorToken ot) { int l = stack.size() - 1; if (l >= 0) { switch (languagedefinition.getOperatorPrecedence(ot, stack.get(l).op)) { case COMPLETE: return TestResult.FINISH; case END: extractParseTree(); return testPrecidence(ot); case START: break; // do nothing case IN: break; // do nothing case ERROR: lex.throwParseException("illegal syntax in expression"); } } return TestResult.INSERT; } private void extractParseTree() { int l = stack.size() - 1; int s = l; while (s > 0) { SAItem higher = stack.get(s); SAItem lower = stack.get(s - 1); switch (languagedefinition.getOperatorPrecedence(higher.op, lower.op)) { case START: lower.pt = higher.op.getParseTree(lower.pt, higher.pt); for (int i = l; i >= s; i--) { stack.remove(i);//pop } return; case IN: s--; break; case ERROR: lex.throwParseException("illegal operator precidence during handle extraction"); } } lex.throwParseException("illegal operator precidence during handle extraction - no start handle found"); } private class SAItem { public OperatorToken op; public ParseTree pt; } }
Richard-Linsdale/nbpcglibrary
expressionparserandevaluate/src/main/java/uk/theretiredprogrammer/nbpcglibrary/expressionparserandevaluate/parse/SyntaxAnalyser.java
Java
apache-2.0
4,887
<?php /** * contentinum - accessibility websites * * LICENSE * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * * @category Mcevent * @package Form * @author Michael Jochum, michael.jochum@jochum-mediaservices.de * @copyright Copyright (c) 2009-2013 jochum-mediaservices, Katja Jochum (http://www.jochum-mediaservices.de) * @license http://www.opensource.org/licenses/bsd-license * @since contentinum version 5.0 * @link https://github.com/Mikel1961/contentinum-components * @version 1.0.0 */ namespace Mcevent\Form; use ContentinumComponents\Forms\AbstractForms; /** * contentinum mcwork form fieldtypes * * @author Michael Jochum, michael.jochum@jochum-mediaservices.de */ class Dates extends AbstractForms { /** * User friendly function for tab header * @return string */ protected function tabHeader() { $translation = $this->getServiceLocator()->get('translator'); $html = '<dl class="tabs" data-tab="data-tab">';// tab header start $html .= '<dd class="active"><a href="#fieldsetEvent">' . $translation->translate('Event') . '</a></dd>';// tab1 $html .= '<dd><a href="#fieldsetDescription">' . $translation->translate('Description and Images') . '</a></dd>'; $html .= '<dd><a href="#fieldsetAddress">' . $translation->translate('Event address') . '</a></dd>';// tab2 $html .= '<dd><a href="#fieldsetResponsibility">Teilnehmeranmeldung, Verantwortlicher Organisator</a></dd>';// tab2 $html .= '</dl><div class="tabs-content">';// finish and start tab content area return $html; } /** * form field elements * * @see \ContentinumComponents\Forms\AbstractForms::elements() */ public function elements() { return array( array( 'spec' => array( 'name' => 'formpreftab', 'options' => array( 'fieldset' => array( 'nofieldset' => 1 ) ), 'type' => 'ContentinumComponents\Forms\Elements\Note', 'attributes' => array( 'id' => 'formpreftab', 'value' => $this->tabHeader(), ) ) ), array( 'spec' => array( 'name' => 'formRowStartfieldsetName', 'options' => array( ), 'type' => 'ContentinumComponents\Forms\Elements\Note', 'attributes' => array( 'id' => 'formColumStartfieldsetName', 'value' => '<div class="row"><div class="medium-6 columns">' ) ) ), array( 'spec' => array( 'name' => 'calendar', 'required' => true, 'options' => array( 'label' => 'Calendar', 'empty_option' => 'Please select', 'value_options' => $this->getSelectOptions('calendar'), 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), ), 'type' => 'Select', 'attributes' => array( 'required' => 'required', 'id' => 'calendar' ) ) ), array( 'spec' => array( 'name' => 'location', 'required' => false, 'options' => array( 'label' => 'Veranstaltungsort', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), 'description' => 'Veranstaltungsort hier eintragen, wenn dieser nicht in Organisationen angelegt ist', ), 'type' => 'Text', 'attributes' => array( 'id' => 'location' ) ) ), array( 'spec' => array( 'name' => 'organizer', 'required' => false, 'options' => array( 'label' => 'Veranstalter', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), 'description' => 'Veranstalter hier eintragen, wenn dieser nicht in Organisationen angelegt ist', ), 'type' => 'Text', 'attributes' => array( 'id' => 'organizer' ) ) ), array( 'spec' => array( 'name' => 'infoUrl', 'required' => false, 'options' => array( 'label' => 'Mehr Information unter (URL)', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), 'description' => 'Auf eine Webseite mit weiteren Informationen verweisen, bitte mit http:// oder https:// angeben', ), 'type' => 'Text', 'attributes' => array( 'id' => 'infoUrl' ) ) ), array( 'spec' => array( 'name' => 'formRowMiddlefieldsetName', 'options' => array( ), 'type' => 'ContentinumComponents\Forms\Elements\Note', 'attributes' => array( 'id' => 'formColumMiddlefieldsetName', 'value' => '</div><div class="medium-6 columns">' ) ) ), array( 'spec' => array( 'name' => 'summary', 'required' => true, 'options' => array( 'label' => 'Summary', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), ), 'type' => 'Text', 'attributes' => array( 'required' => 'required', 'id' => 'summary' ) ) ), array( 'spec' => array( 'name' => 'account', 'required' => false, 'options' => array( 'label' => 'Veranstaltungsort auswählen', 'empty_option' => 'Please select', 'value_options' => $this->getOptions('mcevent_locations'), 'description' => 'Überschreibt das Feld Veranstaltungsort', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW) ), 'type' => 'Select', 'attributes' => array( 'id' => 'account', 'class' => 'chosen-select' ) ) ), array( 'spec' => array( 'name' => 'organizerId', 'required' => false, 'options' => array( 'label' => 'Veranstalter auswählen', 'empty_option' => 'Please select', 'value_options' => $this->getOptions('mcevent_organizer'), 'description' => 'Überschreibt das Feld Veranstaltunger', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW) ), 'type' => 'Select', 'attributes' => array( 'id' => 'organizerId', 'class' => 'chosen-select' ) ) ), array( 'spec' => array( 'name' => 'formRowStartDate', 'options' => array( ), 'type' => 'ContentinumComponents\Forms\Elements\Note', 'attributes' => array( 'id' => 'formColumStartDate', 'value' => '<div class="row"><div class="medium-6 columns">' ) ) ), array( 'spec' => array( 'name' => 'dateStart', 'required' => true, 'options' => array( 'label' => 'Beginn Veranstaltung', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW) ), 'type' => 'Text', 'attributes' => array( 'required' => 'required', 'id' => 'dateStart', ) ) ), array( 'spec' => array( 'name' => 'formRowMiddleDate', 'options' => array( ), 'type' => 'ContentinumComponents\Forms\Elements\Note', 'attributes' => array( 'id' => 'formRowMiddleDate', 'value' => '</div><div class="medium-6 columns">', ) ) ) , array( 'spec' => array( 'name' => 'dateEnd', 'required' => false, 'options' => array( 'label' => 'Ende Veranstaltung', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), ), 'type' => 'Text', 'attributes' => array( 'id' => 'dateEnd', ) ) ), array( 'spec' => array( 'name' => 'formRowEndDate', 'options' => array( ), 'type' => 'ContentinumComponents\Forms\Elements\Note', 'attributes' => array( 'id' => 'formColumnEndDate', 'value' => '</div></div>' ) ) ) , array( 'spec' => array( 'name' => 'formRowEndfieldsetName', 'options' => array( ), 'type' => 'ContentinumComponents\Forms\Elements\Note', 'attributes' => array( 'id' => 'formColumEndfieldsetName', 'value' => '</div></div>' ) ) ), array( 'spec' => array( 'name' => 'webFilesId', 'required' => false, 'options' => array( 'label' => 'Add a file attachment', 'empty_option' => 'Please select', 'value_options' => $this->getServiceLocator()->get('mcwork_public_pdf'), 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), ), 'type' => 'Select', 'attributes' => array( 'id' => 'webFilesId', 'class' => 'chosen-select', 'value' => 1, ) ) ), array( 'spec' => array( 'name' => 'downloadLabel', 'required' => false, 'options' => array( 'label' => 'Label Dateidownload', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), 'fieldset' => array( 'legend' => 'Event', 'attributes' => array( 'class' => 'content active', 'id' => 'fieldsetEvent'// tab1 ) ) ), 'type' => 'Text', 'attributes' => array( 'id' => 'downloadLabel' ) ) ), array( 'spec' => array( 'name' => 'webMediasId', 'required' => false, 'options' => array( 'label' => 'Add images', 'empty_option' => 'Please select', 'value_options' => $this->getServiceLocator()->get('mcwork_public_media'), 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), ), 'type' => 'Select', 'attributes' => array( 'id' => 'webMediasId', 'class' => 'chosen-select', 'value' => 1, ) ) ), array( 'spec' => array( 'name' => 'description', 'required' => false, 'options' => array( 'label' => 'Description', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), 'fieldset' => array( 'legend' => 'Description and Files', 'attributes' => array( 'class' => 'content smalleditor', 'id' => 'fieldsetDescription'// tab1 ) ) ), 'type' => 'Textarea', 'attributes' => array( 'rows' => '4', 'id' => 'description', 'class' => 'datedescription', ) ) ), array( 'spec' => array( 'name' => 'locationAddresse', 'required' => false, 'options' => array( 'label' => 'Veranstaltungsort Straße', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), ), 'type' => 'Text', 'attributes' => array( 'id' => 'locationAddresse' ) ) ), array( 'spec' => array( 'name' => 'locationZipcode', 'required' => false, 'options' => array( 'label' => 'Veranstaltungsort Postleitzahl', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), ), 'type' => 'Text', 'attributes' => array( 'id' => 'locationZipcode' ) ) ), array( 'spec' => array( 'name' => 'locationCity', 'required' => false, 'options' => array( 'label' => 'Veranstaltungsort Stadt', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), 'fieldset' => array( 'legend' => 'Event address', 'attributes' => array( 'class' => 'content', 'id' => 'fieldsetAddress'// tab1 ) ) ), 'type' => 'Text', 'attributes' => array( 'id' => 'locationCity' ) ) ), array( 'spec' => array( 'name' => 'configureIdent', 'required' => true, 'options' => array( 'label' => 'Konfiguration Teilnehmeranmeldung', 'value_options' => $this->getServiceLocator()->get('mcevent_configuration'), 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), ), 'type' => 'Select', 'attributes' => array( 'id' => 'configureIdent', ) ) ), array( 'spec' => array( 'name' => 'applicantInt', 'required' => false, 'options' => array( 'label' => 'Anzahl intern', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), 'description' => 'Anzahl der zu vergebenen Teilnehmer intern, 0 keine Begrenzung' ), 'type' => 'Text', 'attributes' => array( 'id' => 'applicantInt', 'value' => 0 ) ) ), array( 'spec' => array( 'name' => 'applicantExt', 'required' => false, 'options' => array( 'label' => 'Anzahl öffentlich', 'deco-row' => $this->getDecorators(self::DECO_ELM_ROW), 'description' => 'Anzahl der zu vergebenen Teilnehmer extern, 0 keine Begrenzung', 'fieldset' => array( 'legend' => 'Verantwortlicher Organisator', 'attributes' => array( 'class' => 'content', 'id' => 'fieldsetResponsibility'// tab1 ) ) ), 'type' => 'Text', 'attributes' => array( 'id' => 'applicantExt', 'value' => 0 ) ) ), array( 'spec' => array( 'name' => 'formtabend', 'options' => array( 'fieldset' => array( 'nofieldset' => 1 ) ), 'type' => 'ContentinumComponents\Forms\Elements\Note', 'attributes' => array( 'id' => 'formtabend', 'value' => '</div>' ) ) ), ); } /** * form input filter and validation * * @see \ContentinumComponents\Forms\AbstractForms::filter() */ public function filter() { return array( 'account' => array( 'required' => false, ), 'organizerId' => array( 'required' => false, ), 'webFilesId' => array( 'required' => false, ), 'webMediasId' => array( 'required' => false, ), 'configureIdent' => array( 'required' => false, ), ); } /** * initiation and get form * * @see \ContentinumComponents\Forms\AbstractForms::getForm() */ public function getForm() { return $this->factory->createForm(array( 'hydrator' => 'Zend\Stdlib\Hydrator\ArraySerializable', 'elements' => $this->elements(), 'input_filter' => $this->filter() )); } }
jochum-mediaservices/contentinum5.5
module/Mcevent/src/Mcevent/Form/Dates.php
PHP
apache-2.0
22,247
#pragma once //glm #include <glm\glm.hpp> #include <glm\gtx\quaternion.hpp> namespace naga { typedef char i8; typedef unsigned char u8; typedef short i16; typedef unsigned short u16; typedef int i32; typedef unsigned int u32; typedef long long i64; typedef unsigned long long u64; typedef float f32; typedef double f64; typedef glm::vec2 vec2; typedef glm::ivec2 vec2_i32; typedef glm::uvec2 vec2_u32; typedef glm::dvec2 vec2_f64; typedef glm::vec3 vec3; typedef glm::ivec3 vec3_i32; typedef glm::uvec3 vec3_u32; typedef glm::dvec3 vec3_f64; typedef glm::vec4 vec4; typedef glm::ivec4 vec4_i32; typedef glm::uvec4 vec4_u32; typedef glm::dvec4 vec4_f64; typedef glm::fquat quat; typedef glm::mat3 mat3; typedef glm::mat4 mat4; }; template<typename T, typename U> inline T unsafe_cast(U u) { return (T)u; }
cmbasnett/mandala
engine/src/types.hpp
C++
apache-2.0
1,156
// HTTP.methods({ // 'api/v1/staffMembers': { // get: function(data) { // this.setContentType('application/json'); // var staffMembers = StaffMembers.find(); // var test = []; // staffMembers.forEach(function(staffMember) { // test.push(staffMember); // // // SECOND RUN // // delete staffMember.animeIdOld; // // delete staffMember.personIdOld; // // First RUN // // var anime = Anime.findOne({id: staffMember.animeIdOld}); // // var person = People.findOne({id: staffMember.personIdOld}); // // if (anime) // // staffMember.animeId = anime._id; // // if (person) // // staffMember.personId = person._id; // // // Delete the old one and insert a new one // // StaffMembers.remove(staffMember._id); // // delete staffMember._id; // // staffMember.createdAt = new Date(); // // staffMember.updatedAt = new Date(); // // StaffMembers.insert(staffMember); // }); // return JSON.stringify(test); // } // } // });
phanime/phanime
server/restapi/legacy/staffMembers.js
JavaScript
apache-2.0
1,026
package com.dianyitech.madaptor.contacts.adapter; import android.content.Context; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.TextView; import com.dianyitech.madaptor.contacts.pojo.MoreOperateBean; import java.util.List; public class OperateAdapterV3 extends BaseAdapter { private Context context; private List<MoreOperateBean> operateList; public OperateAdapterV3(Context paramContext, List<MoreOperateBean> paramList) { this.context = paramContext; this.operateList = paramList; } public int getCount() { return this.operateList.size(); } public Object getItem(int paramInt) { return this.operateList.get(paramInt); } public long getItemId(int paramInt) { return paramInt; } public View getView(int paramInt, View paramView, ViewGroup paramViewGroup) { MoreOperateBean localMoreOperateBean = (MoreOperateBean)this.operateList.get(paramInt); ViewHolder localViewHolder; if (paramView == null) { paramView = LayoutInflater.from(this.context).inflate(2130903082, null); localViewHolder = new ViewHolder(); localViewHolder.operateIconIv = ((ImageView)paramView.findViewById(2131361980)); localViewHolder.operateIconNameTv = ((TextView)paramView.findViewById(2131361981)); paramView.setTag(localViewHolder); } while (true) { localViewHolder.operateIconNameTv.setText(localMoreOperateBean.getMethodName()); return paramView; localViewHolder = (ViewHolder)paramView.getTag(); } } public static class ViewHolder { ImageView operateIconIv; TextView operateIconNameTv; } } /* Location: /Users/zqq/Downloads/??????è½?ä»? 2/classes.jar * Qualified Name: com.dianyitech.madaptor.contacts.adapter.OperateAdapterV3 * JD-Core Version: 0.6.2 */
quding0308/sxrk
Sxrk/src/com/dianyitech/madaptor/contacts/adapter/OperateAdapterV3.java
Java
apache-2.0
1,945
/** * Copyright (C) 2012-2014 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package controllers; import ninja.Context; import ninja.Cookie; import ninja.FilterWith; import ninja.Result; import ninja.Results; import ninja.SecureFilter; import com.google.inject.Singleton; import filters.LoggerFilter; import filters.TeaPotFilter; @Singleton public class FilterController { /** * Not yet finished. * * Simply demonstrates how controllers can be annotated and filtered using * the FilterWith annotation: * * @param context */ @FilterWith(SecureFilter.class) public Result filter(Context context) { // System.out.println("cookies: " + // context.getHttpServletRequest().getCookies()); if (context.getCookies() != null) { for (Cookie cookie : context.getCookies()) { System.out.println("cookie: " + cookie.getName()); } } return Results.html() .addCookie(Cookie.builder("myname", "myvalue").build()); } /** * Really cool. We are using two filters on the method. * * Filters are executed sequentially. First the LoggerFilter then the * TeaPotFilter. * * The TeaPotFilter changes completely the output and the status. * * @param context */ @FilterWith({ LoggerFilter.class, TeaPotFilter.class}) public Result teapot(Context context) { //this will never be executed. Have a look at the TeaPotFilter.class! return Results.html(); } }
krahman/emedia
ninja-servlet-integration-test/src/main/java/controllers/FilterController.java
Java
apache-2.0
1,989
package com.fsck.k9.mail; public class K9MailLib { private static DebugStatus debugStatus = new DefaultDebugStatus(); private K9MailLib() { } public static final int PUSH_WAKE_LOCK_TIMEOUT = 60000; public static final String IDENTITY_HEADER = "X-K9mail-Identity"; public static final String CHAT_HEADER = "Chat-Version"; /** * Should K-9 log the conversation it has over the wire with * SMTP servers? */ public static boolean DEBUG_PROTOCOL_SMTP = true; /** * Should K-9 log the conversation it has over the wire with * IMAP servers? */ public static boolean DEBUG_PROTOCOL_IMAP = true; /** * Should K-9 log the conversation it has over the wire with * POP3 servers? */ public static boolean DEBUG_PROTOCOL_POP3 = true; /** * Should K-9 log the conversation it has over the wire with * WebDAV servers? */ public static boolean DEBUG_PROTOCOL_WEBDAV = true; public static boolean isDebug() { return debugStatus.enabled(); } public static boolean isDebugSensitive() { return debugStatus.debugSensitive(); } public static void setDebugSensitive(boolean b) { if (debugStatus instanceof WritableDebugStatus) { ((WritableDebugStatus) debugStatus).setSensitive(b); } } public static void setDebug(boolean b) { if (debugStatus instanceof WritableDebugStatus) { ((WritableDebugStatus) debugStatus).setEnabled(b); } } public interface DebugStatus { boolean enabled(); boolean debugSensitive(); } public static void setDebugStatus(DebugStatus status) { if (status == null) { throw new IllegalArgumentException("status cannot be null"); } debugStatus = status; } private interface WritableDebugStatus extends DebugStatus { void setEnabled(boolean enabled); void setSensitive(boolean sensitive); } private static class DefaultDebugStatus implements WritableDebugStatus { private boolean enabled; private boolean sensitive; @Override public boolean enabled() { return enabled; } @Override public boolean debugSensitive() { return sensitive; } @Override public void setEnabled(boolean enabled) { this.enabled = enabled; } @Override public void setSensitive(boolean sensitive) { this.sensitive = sensitive; } } }
k9mail/k-9
mail/common/src/main/java/com/fsck/k9/mail/K9MailLib.java
Java
apache-2.0
2,599
# Copyright 2012 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo.db import exception as db_exc import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from sqlalchemy.orm import scoped_session from neutron.api.v2 import attributes as attr from neutron.common import constants from neutron.common import uos_constants from neutron.common import utils from neutron.db import db_base_plugin_v2 from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import _uos_sgrule_default_cfg as sg_cfg from neutron.extensions import securitygroup as ext_sg from neutron.openstack.common import excutils from neutron.openstack.common import timeutils from neutron.openstack.common import uuidutils IP_PROTOCOL_MAP = {constants.PROTO_NAME_TCP: constants.PROTO_NUM_TCP, constants.PROTO_NAME_UDP: constants.PROTO_NUM_UDP, constants.PROTO_NAME_ICMP: constants.PROTO_NUM_ICMP, constants.PROTO_NAME_ICMP_V6: constants.PROTO_NUM_ICMP_V6} class SecurityGroup(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant, models_v2.TimestampMixin): """Represents a v2 neutron security group.""" __table_args__ = ( sa.UniqueConstraint('tenant_id', 'name', name='uniq_sg0tenant_id0name'), ) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) class SecurityGroupPortBinding(model_base.BASEV2): """Represents binding between neutron ports and security profiles.""" port_id = sa.Column(sa.String(36), sa.ForeignKey("ports.id", ondelete='CASCADE'), primary_key=True) security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id"), primary_key=True) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly load security group bindings ports = orm.relationship( models_v2.Port, backref=orm.backref("security_groups", lazy='joined', cascade='delete')) class SecurityGroupRule(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): """Represents a v2 neutron security group rule.""" security_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=False) remote_group_id = sa.Column(sa.String(36), sa.ForeignKey("securitygroups.id", ondelete="CASCADE"), nullable=True) direction = sa.Column(sa.Enum('ingress', 'egress', name='securitygrouprules_direction')) ethertype = sa.Column(sa.String(40)) protocol = sa.Column(sa.String(40)) port_range_min = sa.Column(sa.Integer) port_range_max = sa.Column(sa.Integer) remote_ip_prefix = sa.Column(sa.String(255)) security_group = orm.relationship( SecurityGroup, backref=orm.backref('rules', cascade='all,delete'), primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id") source_group = orm.relationship( SecurityGroup, backref=orm.backref('source_rules', cascade='all,delete'), primaryjoin="SecurityGroup.id==SecurityGroupRule.remote_group_id") class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): """Mixin class to add security group to db_base_plugin_v2.""" __native_bulk_support = True def __init_sg_db_mixin__(self): sg_cfg.DefaultSGRulesConfig.get_valid_rules() def create_security_group_bulk(self, context, security_group_rule): return self._create_bulk('security_group', context, security_group_rule) def create_security_group(self, context, security_group, default_sg=False): """Create security group. If default_sg is true that means we are a default security group for a given tenant if it does not exist. """ s = security_group['security_group'] tenant_id = self._get_tenant_id_for_create(context, s) if not default_sg: self._ensure_default_security_group(context, tenant_id) try: with context.session.begin(subtransactions=True): security_group_db = SecurityGroup(id=s.get('id') or ( uuidutils.generate_uuid()), description=s['description'], tenant_id=tenant_id, name=s['name'], created_at=timeutils.utcnow() ) utils.make_default_name(security_group_db, uos_constants.UOS_PRE_SG) context.session.add(security_group_db) for ethertype in ext_sg.sg_supported_ethertypes: if s.get('name') == 'default': # Allow intercommunication ingress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='ingress', ethertype=ethertype, source_group=security_group_db) context.session.add(ingress_rule) egress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='egress', ethertype=ethertype) context.session.add(egress_rule) # gongysh UOS default ingress port open for default_rule in (sg_cfg.DefaultSGRulesConfig. sg_default_rules): if (s.get('name') != 'default' and 'self' == default_rule.protocol): # Allow intercommunication ingress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='ingress', ethertype=ethertype, source_group=security_group_db) context.session.add(ingress_rule) elif ('self' != default_rule.protocol): ingress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction=default_rule.direction, ethertype=ethertype, protocol=default_rule.protocol, port_range_min=default_rule.port_range_min, port_range_max=default_rule.port_range_max) context.session.add(ingress_rule) except db_exc.DBDuplicateEntry as e: if e.columns == ['tenant_id', 'name']: if default_sg: raise else: raise ext_sg.SecurityGroupDuplicateName(name=s['name']) return self._make_security_group_dict(security_group_db, process_extensions=False) def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False, default_sg=False): # If default_sg is True do not call _ensure_default_security_group() # so this can be done recursively. Context.tenant_id is checked # because all the unit tests do not explicitly set the context on # GETS. TODO(arosen) context handling can probably be improved here. if not default_sg and context.tenant_id: self._ensure_default_security_group(context, context.tenant_id) marker_obj = self._get_marker_obj(context, 'security_group', limit, marker) return self._get_collection(context, SecurityGroup, self._make_security_group_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_security_groups_count(self, context, filters=None): return self._get_collection_count(context, SecurityGroup, filters=filters) def get_security_group(self, context, id, fields=None, tenant_id=None): """Tenant id is given to handle the case when creating a security group rule on behalf of another use. """ if tenant_id: tmp_context_tenant_id = context.tenant_id context.tenant_id = tenant_id try: with context.session.begin(subtransactions=True): ret = self._make_security_group_dict(self._get_security_group( context, id), fields) ret['security_group_rules'] = self.get_security_group_rules( context, {'security_group_id': [id]}) finally: if tenant_id: context.tenant_id = tmp_context_tenant_id return ret def _get_security_group(self, context, id): try: query = self._model_query(context, SecurityGroup) sg = query.filter(SecurityGroup.id == id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupNotFound(id=id) return sg def delete_security_group(self, context, id): filters = {'security_group_id': [id]} ports = self._get_port_security_group_bindings(context, filters) if ports: raise ext_sg.SecurityGroupInUse(id=id) # confirm security group exists sg = self._get_security_group(context, id) if sg['name'] == 'default' and not context.is_admin: raise ext_sg.SecurityGroupCannotRemoveDefault() with context.session.begin(subtransactions=True): context.session.delete(sg) def update_security_group(self, context, id, security_group): s = security_group['security_group'] # NOTE(gongysh) for the purpose update the security_group with # data return security_group created just s.pop('created_at', None) with context.session.begin(subtransactions=True): sg = self._get_security_group(context, id) if sg['name'] == 'default' and 'name' in s: raise ext_sg.SecurityGroupCannotUpdateDefault() sg.update(s) return self._make_security_group_dict(sg) def _make_security_group_dict(self, security_group, fields=None, process_extensions=True): res = {'id': security_group['id'], 'name': security_group['name'], 'tenant_id': security_group['tenant_id'], 'description': security_group['description']} res['security_group_rules'] = [self._make_security_group_rule_dict(r) for r in security_group.rules] if process_extensions: self._apply_dict_extend_functions('security_groups', res, security_group) return self._fields(res, fields) def _make_security_group_binding_dict(self, security_group, fields=None): res = {'port_id': security_group['port_id'], 'security_group_id': security_group['security_group_id']} return self._fields(res, fields) def _create_port_security_group_binding(self, context, port_id, security_group_id): with context.session.begin(subtransactions=True): db = SecurityGroupPortBinding(port_id=port_id, security_group_id=security_group_id) context.session.add(db) def _get_port_security_group_bindings(self, context, filters=None, fields=None): return self._get_collection(context, SecurityGroupPortBinding, self._make_security_group_binding_dict, filters=filters, fields=fields) def _delete_port_security_group_bindings(self, context, port_id): query = self._model_query(context, SecurityGroupPortBinding) bindings = query.filter( SecurityGroupPortBinding.port_id == port_id) with context.session.begin(subtransactions=True): for binding in bindings: context.session.delete(binding) def create_security_group_rule_bulk(self, context, security_group_rule): return self._create_bulk('security_group_rule', context, security_group_rule) def create_security_group_rule_bulk_native(self, context, security_group_rule): r = security_group_rule['security_group_rules'] for rule_dict in r: rule = rule_dict['security_group_rule'] #NOTE(gongysh) unspecified address should be None remote_ip_prefix = rule.get('remote_ip_prefix') if (remote_ip_prefix == "0.0.0.0/0" or remote_ip_prefix == "::/0"): rule['remote_ip_prefix'] = None scoped_session(context.session) security_group_id = self._validate_security_group_rules( context, security_group_rule) with context.session.begin(subtransactions=True): if not self.get_security_group(context, security_group_id): raise ext_sg.SecurityGroupNotFound(id=security_group_id) self._check_for_duplicate_rules(context, r) ret = [] for rule_dict in r: rule = rule_dict['security_group_rule'] tenant_id = self._get_tenant_id_for_create(context, rule) db = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group_id=rule['security_group_id'], direction=rule['direction'], remote_group_id=rule.get('remote_group_id'), ethertype=rule['ethertype'], protocol=rule['protocol'], port_range_min=rule['port_range_min'], port_range_max=rule['port_range_max'], remote_ip_prefix=rule.get('remote_ip_prefix')) context.session.add(db) ret.append(self._make_security_group_rule_dict(db)) return ret def create_security_group_rule(self, context, security_group_rule): bulk_rule = {'security_group_rules': [security_group_rule]} return self.create_security_group_rule_bulk_native(context, bulk_rule)[0] def _get_ip_proto_number(self, protocol): if protocol is None: return return IP_PROTOCOL_MAP.get(protocol, protocol) def _validate_port_range(self, rule): """Check that port_range is valid.""" if (rule['port_range_min'] is None and rule['port_range_max'] is None): return if not rule['protocol']: raise ext_sg.SecurityGroupProtocolRequiredWithPorts() ip_proto = self._get_ip_proto_number(rule['protocol']) if ip_proto in [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP]: if (rule['port_range_min'] is not None and rule['port_range_min'] <= rule['port_range_max']): pass else: raise ext_sg.SecurityGroupInvalidPortRange() elif ip_proto == constants.PROTO_NUM_ICMP: for attr, field in [('port_range_min', 'type'), ('port_range_max', 'code')]: if rule[attr] > 255: raise ext_sg.SecurityGroupInvalidIcmpValue( field=field, attr=attr, value=rule[attr]) if (rule['port_range_min'] is None and rule['port_range_max']): raise ext_sg.SecurityGroupMissingIcmpType( value=rule['port_range_max']) def _validate_security_group_rules(self, context, security_group_rule): """Check that rules being installed. Check that all rules belong to the same security group, remote_group_id/security_group_id belong to the same tenant, and rules are valid. """ new_rules = set() tenant_ids = set() for rules in security_group_rule['security_group_rules']: rule = rules.get('security_group_rule') new_rules.add(rule['security_group_id']) self._validate_port_range(rule) self._validate_ip_prefix(rule) if rule.get('remote_ip_prefix') and rule['remote_group_id']: raise ext_sg.SecurityGroupRemoteGroupAndRemoteIpPrefix() if rule['tenant_id'] not in tenant_ids: tenant_ids.add(rule['tenant_id']) remote_group_id = rule.get('remote_group_id') # Check that remote_group_id exists for tenant if remote_group_id: self.get_security_group(context, remote_group_id, tenant_id=rule['tenant_id']) if len(new_rules) > 1: raise ext_sg.SecurityGroupNotSingleGroupRules() security_group_id = new_rules.pop() # Confirm single tenant and that the tenant has permission # to add rules to this security group. if len(tenant_ids) > 1: raise ext_sg.SecurityGroupRulesNotSingleTenant() for tenant_id in tenant_ids: self.get_security_group(context, security_group_id, tenant_id=tenant_id) return security_group_id def _make_security_group_rule_dict(self, security_group_rule, fields=None): res = {'id': security_group_rule['id'], 'tenant_id': security_group_rule['tenant_id'], 'security_group_id': security_group_rule['security_group_id'], 'ethertype': security_group_rule['ethertype'], 'direction': security_group_rule['direction'], 'protocol': security_group_rule['protocol'], 'port_range_min': security_group_rule['port_range_min'], 'port_range_max': security_group_rule['port_range_max'], 'remote_ip_prefix': security_group_rule['remote_ip_prefix'], 'remote_group_id': security_group_rule['remote_group_id']} return self._fields(res, fields) def _make_security_group_rule_filter_dict(self, security_group_rule): sgr = security_group_rule['security_group_rule'] res = {'tenant_id': [sgr['tenant_id']], 'security_group_id': [sgr['security_group_id']], 'direction': [sgr['direction']]} include_if_present = ['protocol', 'port_range_max', 'port_range_min', 'ethertype', 'remote_ip_prefix', 'remote_group_id'] for key in include_if_present: value = sgr.get(key) if value: res[key] = [value] return res def _check_for_duplicate_rules(self, context, security_group_rules): for i in security_group_rules: found_self = False for j in security_group_rules: if i['security_group_rule'] == j['security_group_rule']: if found_self: raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i) found_self = True # Check in database if rule exists filters = self._make_security_group_rule_filter_dict(i) db_rules = self.get_security_group_rules(context, filters) # Note(arosen): the call to get_security_group_rules wildcards # values in the filter that have a value of [None]. For # example, filters = {'remote_group_id': [None]} will return # all security group rules regardless of their value of # remote_group_id. Therefore it is not possible to do this # query unless the behavior of _get_collection() # is changed which cannot be because other methods are already # relying on this behavior. Therefore, we do the filtering # below to check for these corner cases. for db_rule in db_rules: # need to remove id from db_rule for matching id = db_rule.pop('id') if (i['security_group_rule'] == db_rule): raise ext_sg.SecurityGroupRuleExists(id=id) def _validate_ip_prefix(self, rule): """Check that a valid cidr was specified as remote_ip_prefix No need to check that it is in fact an IP address as this is already validated by attribute validators. Check that rule ethertype is consistent with remote_ip_prefix ip type. Add mask to ip_prefix if absent (192.168.1.10 -> 192.168.1.10/32). """ input_prefix = rule['remote_ip_prefix'] if input_prefix: addr = netaddr.IPNetwork(input_prefix) # set input_prefix to always include the netmask: rule['remote_ip_prefix'] = str(addr) # check consistency of ethertype with addr version if rule['ethertype'] != "IPv%d" % (addr.version): raise ext_sg.SecurityGroupRuleParameterConflict( ethertype=rule['ethertype'], cidr=input_prefix) def get_security_group_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'security_group_rule', limit, marker) return self._get_collection(context, SecurityGroupRule, self._make_security_group_rule_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) def get_security_group_rules_count(self, context, filters=None): return self._get_collection_count(context, SecurityGroupRule, filters=filters) def get_security_group_rule(self, context, id, fields=None): security_group_rule = self._get_security_group_rule(context, id) return self._make_security_group_rule_dict(security_group_rule, fields) def _get_security_group_rule(self, context, id): try: query = self._model_query(context, SecurityGroupRule) sgr = query.filter(SecurityGroupRule.id == id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupRuleNotFound(id=id) return sgr def delete_security_group_rule(self, context, id): with context.session.begin(subtransactions=True): rule = self._get_security_group_rule(context, id) context.session.delete(rule) def _extend_port_dict_security_group(self, port_res, port_db): # Security group bindings will be retrieved from the sqlalchemy # model. As they're loaded eagerly with ports because of the # joined load they will not cause an extra query. security_group_ids = [sec_group_mapping['security_group_id'] for sec_group_mapping in port_db.security_groups] port_res[ext_sg.SECURITYGROUPS] = security_group_ids return port_res # Register dict extend functions for ports db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attr.PORTS, ['_extend_port_dict_security_group']) def _process_port_create_security_group(self, context, port, security_group_ids): if attr.is_attr_set(security_group_ids): for security_group_id in security_group_ids: self._create_port_security_group_binding(context, port['id'], security_group_id) # Convert to list as a set might be passed here and # this has to be serialized port[ext_sg.SECURITYGROUPS] = (security_group_ids and list(security_group_ids) or []) def _ensure_default_security_group(self, context, tenant_id): try: return self.__ensure_default_security_group(context, tenant_id) except db_exc.DBDuplicateEntry as e: with excutils.save_and_reraise_exception() as ctxt: if e.columns == ['tenant_id', 'name']: ctxt.reraise = False return self.__ensure_default_security_group(context, tenant_id) def __ensure_default_security_group(self, context, tenant_id): """Create a default security group if one doesn't exist. :returns: the default security group id. it should not be a subtransaction """ with context.session.begin(subtransactions=True): filters = {'name': ['default'], 'tenant_id': [tenant_id]} default_group = self.get_security_groups(context, filters, default_sg=True) if not default_group: security_group = {'security_group': {'name': 'default', 'tenant_id': tenant_id, 'description': 'default'}} ret = self.create_security_group(context, security_group, True) return ret['id'] else: return default_group[0]['id'] def _get_security_groups_on_port(self, context, port): """Check that all security groups on port belong to tenant. :returns: all security groups IDs on port belonging to tenant. """ p = port['port'] if not attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)): return if p.get('device_owner') and p['device_owner'].startswith('network:'): return port_sg = p.get(ext_sg.SECURITYGROUPS, []) valid_groups = set(g['id'] for g in self.get_security_groups(context, fields=['id'], filters={'id': port_sg})) requested_groups = set(port_sg) port_sg_missing = requested_groups - valid_groups if port_sg_missing: raise ext_sg.SecurityGroupNotFound(id=str(port_sg_missing[0])) return requested_groups def _ensure_default_security_group_on_port(self, context, port): # we don't apply security groups for dhcp, router if (port['port'].get('device_owner') and port['port']['device_owner'].startswith('network:')): return tenant_id = self._get_tenant_id_for_create(context, port['port']) default_sg = self._ensure_default_security_group(context, tenant_id) if attr.is_attr_set(port['port'].get(ext_sg.SECURITYGROUPS)): sgids = port['port'].get(ext_sg.SECURITYGROUPS) else: sgids = [default_sg] port['port'][ext_sg.SECURITYGROUPS] = sgids def _check_update_deletes_security_groups(self, port): """Return True if port has as a security group and it's value is either [] or not is_attr_set, otherwise return False """ if (ext_sg.SECURITYGROUPS in port['port'] and not (attr.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and port['port'][ext_sg.SECURITYGROUPS] != [])): return True return False def _check_update_has_security_groups(self, port): """Return True if port has as a security group and False if the security_group field is is_attr_set or []. """ if (ext_sg.SECURITYGROUPS in port['port'] and (attr.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and port['port'][ext_sg.SECURITYGROUPS] != [])): return True return False
CingHu/neutron-ustack
neutron/db/securitygroups_db.py
Python
apache-2.0
30,323
package com.troy.collapsibleheaderlayout; import android.content.Context; import android.support.test.InstrumentationRegistry; import android.support.test.runner.AndroidJUnit4; import org.junit.Test; import org.junit.runner.RunWith; import static org.junit.Assert.*; /** * Instrumentation test, which will execute on an Android device. * * @see <a href="http://d.android.com/tools/testing">Testing documentation</a> */ @RunWith(AndroidJUnit4.class) public class ExampleInstrumentedTest { @Test public void useAppContext() throws Exception { // Context of the app under test. Context appContext = InstrumentationRegistry.getTargetContext(); assertEquals("com.troy.collapsibleheaderlayout.test", appContext.getPackageName()); } }
kfrozen/HeaderCollapsibleLayout
collapsibleheaderlayout/src/androidTest/java/com/troy/collapsibleheaderlayout/ExampleInstrumentedTest.java
Java
apache-2.0
777
/* https://en.cppreference.com/w/cpp/language/ub Access to pointer passed to realloc */ #include <cstdlib> #include <iostream> int main() { auto *p1 = static_cast<int *>(std::malloc(sizeof(int))); auto *p2 = static_cast<int *>(std::realloc(p1, sizeof(int))); std::cout << *p1 << *p2 << '\n'; free(p2); return EXIT_SUCCESS; }
geoffviola/undefined_behavior_study
src/access_after_realloc.cpp
C++
apache-2.0
337
package com.google.cloud.hadoop.io.bigquery; import com.google.api.client.util.Sleeper; import com.google.cloud.hadoop.util.HadoopToStringUtil; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.ArrayDeque; import java.util.HashSet; import java.util.Queue; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * DynamicFileListRecordReader implements hadoop RecordReader by exposing a single logical * record stream despite being made up of multiple files which are still newly appearing * while this RecordReader is being used. Requires a single zero-record file to mark the end of the * series of files, and all files must appear under a single directory. Note that with some * file encodings a 0-record file may not be the same as a 0-length file. Filenames must follow * the naming convention specified in the static final members of this class; files will be * read in whatever order they appear, so multiple uses of this RecordReader may result in * very different orderings of data being read. */ public class DynamicFileListRecordReader<K, V> extends RecordReader<K, V> { // Logger. public static final Logger LOG = LoggerFactory.getLogger(DynamicFileListRecordReader.class); // Directory/file-pattern which will contain all the files we read with this reader. private Path inputDirectoryAndPattern; // The estimated number of records we will read in total. private long estimatedNumRecords; // The interval we will poll listStatus/globStatus inside nextKeyValue() if we don't already // have a file ready for reading. private int pollIntervalMs; // Stashed away context for use with delegate readers. private TaskAttemptContext context; // The filesystem we will poll for files, based on initial inputDirectoryAndPattern Path. private FileSystem fileSystem; // The Pattern for matching export files, set up at initialization time. private Pattern exportPattern; // Counter for the number of records read so far. private long recordsRead = 0; // Factory for creating the underlying reader for iterating over records within a single file. private DelegateRecordReaderFactory<K, V> delegateRecordReaderFactory; // Underlying reader for iterating over the records within a single file. private RecordReader<K, V> delegateReader = null; // Set of all files we've successfully listed so far. Doesn't include end-indicator empty file. private Set<String> knownFileSet = new HashSet<>(); // Queue of ready-but-not-yet-processed files whose filenames are also saved in knownFileSet, // in the order we discovered them. private Queue<FileStatus> fileQueue = new ArrayDeque<>(); // Becomes positive once we've discovered the end-indicator file for the first time. private int endFileNumber = -1; // Sleeper used to sleep when polling listStatus. private Sleeper sleeper = Sleeper.DEFAULT; // Stored current key/value. private K currentKey = null; private V currentValue = null; public DynamicFileListRecordReader( DelegateRecordReaderFactory<K, V> delegateRecordReaderFactory) { this.delegateRecordReaderFactory = delegateRecordReaderFactory; } @Override public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException { try { LOG.info("Initializing DynamicFileListRecordReader with split '{}', task context '{}'", HadoopToStringUtil.toString(genericSplit), HadoopToStringUtil.toString(context)); } catch (InterruptedException ie) { LOG.warn("InterruptedException when logging InputSplit.", ie); } Preconditions.checkArgument(genericSplit instanceof ShardedInputSplit, "InputSplit genericSplit should be an instance of ShardedInputSplit."); this.context = context; // Get inputDirectoryAndPattern from the split. ShardedInputSplit shardedSplit = (ShardedInputSplit) genericSplit; inputDirectoryAndPattern = shardedSplit.getShardDirectoryAndPattern(); estimatedNumRecords = shardedSplit.getLength(); if (estimatedNumRecords <= 0) { LOG.warn("Non-positive estimatedNumRecords '{}'; clipping to 1.", estimatedNumRecords); estimatedNumRecords = 1; } // Grab pollIntervalMs out of the config. pollIntervalMs = context.getConfiguration().getInt( BigQueryConfiguration.DYNAMIC_FILE_LIST_RECORD_READER_POLL_INTERVAL_MS_KEY, BigQueryConfiguration.DYNAMIC_FILE_LIST_RECORD_READER_POLL_INTERVAL_MS_DEFAULT); fileSystem = inputDirectoryAndPattern.getFileSystem(context.getConfiguration()); // TODO(user): Make the base export pattern configurable. String exportPatternRegex = inputDirectoryAndPattern.getName().replace("*", "(\\d+)"); exportPattern = Pattern.compile(exportPatternRegex); fileSystem.mkdirs(inputDirectoryAndPattern.getParent()); } /** * Reads the next key, value pair. Gets next line and parses Json object. May hang for a long * time waiting for more files to appear in this reader's directory. * * @return true if a key/value pair was read. * @throws IOException on IO Error. */ @Override public boolean nextKeyValue() throws IOException, InterruptedException { currentValue = null; // Check if we already have a reader in-progress. if (delegateReader != null) { if (delegateReader.nextKeyValue()) { populateCurrentKeyValue(); return true; } else { delegateReader.close(); delegateReader = null; } } boolean needRefresh = !isNextFileReady() && shouldExpectMoreFiles(); while (needRefresh) { LOG.debug("No files available, but more are expected; refreshing..."); refreshFileList(); needRefresh = !isNextFileReady() && shouldExpectMoreFiles(); if (needRefresh) { LOG.debug("No new files found, sleeping before trying again..."); try { sleeper.sleep(pollIntervalMs); context.progress(); } catch (InterruptedException ie) { LOG.warn("Interrupted while sleeping.", ie); } } } if (isNextFileReady()) { // Open the file and see if it's the 0-record end of dataset marker: FileStatus newFile = moveToNextFile(); LOG.info("Moving to next file '{}' which has {} bytes. Records read so far: {}", newFile.getPath(), newFile.getLen(), recordsRead); InputSplit split = new FileSplit(newFile.getPath(), 0, newFile.getLen(), new String[0]); delegateReader = delegateRecordReaderFactory.createDelegateRecordReader( split, context.getConfiguration()); delegateReader.initialize(split, context); if (!delegateReader.nextKeyValue()) { // we found the end of dataset marker. setEndFileMarkerFile(newFile.getPath().getName()); return nextKeyValue(); } else { populateCurrentKeyValue(); return true; } } Preconditions.checkState( !shouldExpectMoreFiles(), "Should not have exited the refresh loop shouldExpectMoreFiles = true " + "and no files ready to read."); // No files ready and we shouldn't expect any more. return false; } /** * Gets the current key as reported by the delegate record reader. This will generally be the * byte position within the current file. * * @return the current key or null if there is no current key. */ @Override public K getCurrentKey() { return currentKey; } /** * Gets the current value. * * @return the current value or null if there is no current value. */ @Override public V getCurrentValue() { return currentValue; } /** * Returns the current progress based on the number of records read compared to the *estimated* * total number of records planned to be read; this number may be inexact, but will not * report a number greater than 1. * * @return a number between 0.0 and 1.0 that is the fraction of the data read. */ @Override public float getProgress() { return Math.min(1.0f, recordsRead / (float) estimatedNumRecords); } /** * Closes the record reader. * * @throws IOException on IO Error. */ @Override public void close() throws IOException { if (delegateReader != null) { LOG.warn("Got non-null delegateReader during close(); possible premature close() call."); delegateReader.close(); delegateReader = null; } } /** * Allows setting a mock Sleeper for tests to not have to wait in realtime for polling. */ @VisibleForTesting void setSleeper(Sleeper sleeper) { this.sleeper = sleeper; } /** * Helper for populating currentKey and currentValue from delegateReader. Should only be called * once per new key/value from the delegateReader; this method is also responsible for tracking * the number of records read so far. */ private void populateCurrentKeyValue() throws IOException, InterruptedException { currentKey = delegateReader.getCurrentKey(); currentValue = delegateReader.getCurrentValue(); ++recordsRead; } /** * @return true if the next file is available for immediate usage. */ private boolean isNextFileReady() { return !fileQueue.isEmpty(); } /** * Moves to the next file; must have checked to make sure isNextFileReady() returns true prior * to calling this. */ private FileStatus moveToNextFile() { return fileQueue.remove(); } /** * @return true if we haven't found the end-indicator file yet, or if the number of known files * is less than the total number of files as indicated by the endFileNumber. Note that * returning false does *not* mean this RecordReader is done, just that we know of all the * files we plan to read. */ private boolean shouldExpectMoreFiles() { if (endFileNumber == -1 || knownFileSet.size() <= endFileNumber) { return true; } return false; } /** * Parses the numerical index out of a String which matches exportPattern; the exportPattern * should have been compiled from a regex that looks like "data-(\d+).json". * * @throws IndexOutOfBoundsException if the parsed value is greater than Integer.MAX_VALUE. */ private int parseFileIndex(String fileName) { Matcher match = null; String indexString = null; try { match = exportPattern.matcher(fileName); match.find(); indexString = match.group(1); } catch (Exception e) { throw new IllegalStateException(String.format("Failed to parse file '%s'", fileName), e); } long longValue = Long.parseLong(indexString); if (longValue > Integer.MAX_VALUE) { throw new IndexOutOfBoundsException(String.format( "Invalid fileName '%s'; max allowable index is %d, got %d instead", fileName, Integer.MAX_VALUE, longValue)); } return (int) longValue; } /** * Record a specific file as being the 0-record end of stream marker. */ private void setEndFileMarkerFile(String fileName) { int fileIndex = parseFileIndex(fileName); if (endFileNumber == -1) { // First time finding the end-marker file. endFileNumber = fileIndex; LOG.info("Found end-marker file '{}' with index {}", fileName, endFileNumber); // Sanity-check known filenames against the endFileNumber. for (String knownFile : knownFileSet) { int knownFileIndex = parseFileIndex(knownFile); Preconditions.checkState( knownFileIndex <= endFileNumber, "Found known file '%s' with index %s, which isn't less than or " + "equal to than endFileNumber %s!", knownFile, knownFileIndex, endFileNumber); } } else { // If we found it before, make sure the file we're looking at has the same index. Preconditions.checkState(fileIndex == endFileNumber, "Found new end-marker file '%s' with index %s but already have endFileNumber %s!", fileName, fileIndex, endFileNumber); } } /** * Lists files, and sifts through the results for any new files we haven't found before. * If a file of size 0 is found, we mark the 'endFileNumber' from it. */ private void refreshFileList() throws IOException { FileStatus[] files = fileSystem.globStatus(inputDirectoryAndPattern); for (FileStatus file : files) { String fileName = file.getPath().getName(); if (!knownFileSet.contains(fileName)) { if (endFileNumber != -1) { // Sanity check against endFileNumber. int newFileIndex = parseFileIndex(fileName); Preconditions.checkState(newFileIndex < endFileNumber, "Found new file '%s' with index %s, which isn't less than endFileNumber %s!", fileName, newFileIndex, endFileNumber); } LOG.info("Adding new file '{}' of size {} to knownFileSet.", fileName, file.getLen()); knownFileSet.add(fileName); fileQueue.add(file); } } } }
peltekster/bigdata-interop-leanplum
bigquery/src/main/java/com/google/cloud/hadoop/io/bigquery/DynamicFileListRecordReader.java
Java
apache-2.0
13,528
/* * Copyright 2010-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.service.codepipeline.model; import java.io.Serializable; /** * <p> * Represents the output of an acknowledge job action. * </p> */ public class AcknowledgeJobResult implements Serializable, Cloneable { /** * <p> * Whether the job worker has received the specified job. * </p> */ private String status; /** * <p> * Whether the job worker has received the specified job. * </p> * * @param status * Whether the job worker has received the specified job. * @see JobStatus */ public void setStatus(String status) { this.status = status; } /** * <p> * Whether the job worker has received the specified job. * </p> * * @return Whether the job worker has received the specified job. * @see JobStatus */ public String getStatus() { return this.status; } /** * <p> * Whether the job worker has received the specified job. * </p> * * @param status * Whether the job worker has received the specified job. * @return Returns a reference to this object so that method calls can be * chained together. * @see JobStatus */ public AcknowledgeJobResult withStatus(String status) { setStatus(status); return this; } /** * <p> * Whether the job worker has received the specified job. * </p> * * @param status * Whether the job worker has received the specified job. * @return Returns a reference to this object so that method calls can be * chained together. * @see JobStatus */ public void setStatus(JobStatus status) { this.status = status.toString(); } /** * <p> * Whether the job worker has received the specified job. * </p> * * @param status * Whether the job worker has received the specified job. * @return Returns a reference to this object so that method calls can be * chained together. * @see JobStatus */ public AcknowledgeJobResult withStatus(JobStatus status) { setStatus(status); return this; } /** * Returns a string representation of this object; useful for testing and * debugging. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getStatus() != null) sb.append("Status: " + getStatus()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof AcknowledgeJobResult == false) return false; AcknowledgeJobResult other = (AcknowledgeJobResult) obj; if (other.getStatus() == null ^ this.getStatus() == null) return false; if (other.getStatus() != null && other.getStatus().equals(this.getStatus()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getStatus() == null) ? 0 : getStatus().hashCode()); return hashCode; } @Override public AcknowledgeJobResult clone() { try { return (AcknowledgeJobResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException( "Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
galaxynut/aws-sdk-java
aws-java-sdk-codepipeline/src/main/java/com/amazonaws/service/codepipeline/model/AcknowledgeJobResult.java
Java
apache-2.0
4,499
// bdlmt_throttle.t.cpp -*-C++-*- #include <bdlmt_throttle.h> #include <bdlsb_fixedmemoutstreambuf.h> #include <bslim_testutil.h> #include <bslmt_barrier.h> #include <bslmt_lockguard.h> #include <bslmt_mutex.h> #include <bslmt_threadgroup.h> #include <bslmt_threadutil.h> #include <bslma_default.h> #include <bslma_defaultallocatorguard.h> #include <bslma_newdeleteallocator.h> #include <bslma_testallocator.h> #include <bslma_testallocatormonitor.h> #include <bslmf_assert.h> #include <bslmt_mutex.h> #include <bsls_assert.h> #include <bsls_asserttest.h> #include <bsls_atomic.h> #include <bsls_stopwatch.h> #include <bsls_systemtime.h> #include <bsls_bsltestutil.h> #include <bsl_algorithm.h> #include <bsl_c_ctype.h> // 'isdigit' #include <bsl_cstdio.h> #include <bsl_cstdlib.h> // 'atoi' #include <bsl_cstring.h> #include <bsl_iostream.h> #include <bsl_limits.h> #include <bsl_vector.h> using namespace BloombergLP; using bsl::cout; using bsl::cerr; using bsl::endl; using bsl::flush; // ============================================================================ // TEST PLAN // ---------------------------------------------------------------------------- // Overview // -------- // ---------------------------------------------------------------------------- // MANIPULATORS // [ 6] int requestPermissionIfValid(bool*,int); // [ 6] int requestPermissionIfValid(bool*,int,const bsls::TimeInterval&); // [ 5] bool requestPermission(); // [ 5] bool requestPermission(int); // [ 4] requestPermission(int, const bsls::TimeInterval&); // [ 4] requestPermission(const bsls::TimeInterval&); // [ 2] void initialize(int, Int64, SystemClockType::Enum); // // ACCESSORS // [ 7] int nextPermit(bsls::TimeInterval *, int) const; // [ 2] bsls::SystemClockType::Enum clockType() const; // [ 2] int maxSimultaneousActions() const; // [ 2] Int64 nanosecondsPerAction() const; // // MACROS // [ 8] BDLMT_THROTTLE_INIT(int, Int64) // [ 8] BDLMT_THROTTLE_INIT_REALTIME(int, Int64) // [ 9] BDLMT_THROTTLE_IF(int, Int64) // [ 9] BDLMT_THROTTLE_IF_REALTIME(int, Int64) // [ 9] BDLMT_THROTTLE_IF_ALLOW_ALL // [ 9] BDLMT_THROTTLE_IF_ALLOW_NONE // ---------------------------------------------------------------------------- // [16] USAGE EXAMPLE // [15] CONCERN: BDLMT_THROTTLE_INIT_ALLOW_NONE stress test // [15] CONCERN: BDLMT_THROTTLE_IF_ALLOW_NONE stress test // [14] CONCERN: BDLMT_THROTTLE_INIT_ALLOW_ALL stress test // [14] CONCERN: BDLMT_THROTTLE_IF_ALLOW_ALL stress test // [13] CONCERN: Multithreaded, high contention, macro if // [12] CONCERN: Multithreaded, low contention, macro if // [11] CONCERN: Multithreaded, high contention, macro init // [10] CONCERN: Multithreaded, low contention, macro init // [ 3] TEST APPARATUS // [-1] EVENTS DROPPED TEST // [ 1] BREATHING TEST // ---------------------------------------------------------------------------- // ============================================================================ // STANDARD BDE ASSERT TEST FUNCTION // ---------------------------------------------------------------------------- namespace { int testStatus = 0; void aSsErT(bool condition, const char *message, int line) { if (condition) { cout << "Error " __FILE__ "(" << line << "): " << message << " (failed)" << endl; if (0 <= testStatus && testStatus <= 100) { ++testStatus; } } } } // close unnamed namespace // ============================================================================ // STANDARD BDE TEST DRIVER MACRO ABBREVIATIONS // ---------------------------------------------------------------------------- #define ASSERT BSLIM_TESTUTIL_ASSERT #define ASSERTV BSLIM_TESTUTIL_ASSERTV #define LOOP_ASSERT BSLIM_TESTUTIL_LOOP_ASSERT #define LOOP0_ASSERT BSLIM_TESTUTIL_LOOP0_ASSERT #define LOOP1_ASSERT BSLIM_TESTUTIL_LOOP1_ASSERT #define LOOP2_ASSERT BSLIM_TESTUTIL_LOOP2_ASSERT #define LOOP3_ASSERT BSLIM_TESTUTIL_LOOP3_ASSERT #define LOOP4_ASSERT BSLIM_TESTUTIL_LOOP4_ASSERT #define LOOP5_ASSERT BSLIM_TESTUTIL_LOOP5_ASSERT #define LOOP6_ASSERT BSLIM_TESTUTIL_LOOP6_ASSERT #define Q BSLIM_TESTUTIL_Q // Quote identifier literally. #define P BSLIM_TESTUTIL_P // Print identifier and value. #define P_ BSLIM_TESTUTIL_P_ // P(X) without '\n'. #define T_ BSLIM_TESTUTIL_T_ // Print a tab (w/o newline). #define L_ BSLIM_TESTUTIL_L_ // current Line number // ============================================================================ // NEGATIVE-TEST MACRO ABBREVIATIONS // ---------------------------------------------------------------------------- #define ASSERT_FAIL(expr) BSLS_ASSERTTEST_ASSERT_FAIL(expr) #define ASSERT_PASS(expr) BSLS_ASSERTTEST_ASSERT_PASS(expr) // ============================================================================ // GLOBAL TYPEDEFS/CONSTANTS/VARIABLES FOR TESTING // ---------------------------------------------------------------------------- typedef bdlmt::Throttle Obj; typedef bsls::SystemClockType CT; typedef CT::Enum ClockType; typedef bsls::Types::Int64 Int64; typedef bsls::Types::Uint64 Uint64; typedef bsls::TimeInterval TimeInterval; int test; bool verbose; bool veryVerbose; bool veryVeryVerbose; bool veryVeryVeryVerbose; namespace { namespace u { typedef bsls::AtomicOperations AtomicOps; enum { k_MICRO = 1000, k_MILLI = 1000 * 1000, k_SECOND = 1000 * 1000 * 1000 }; bslma::TestAllocator ta("test", veryVeryVeryVerbose); // test allocator const Uint64 epsilon = 100; // 100 nanoseconds const double maxOver = 0.2; // 'u::sleep' can take quite a bit more // time than was requested. const double minSleep = 0.000001; // 10 microseconds const int numThreads = 40; TimeInterval start, end; bslmt::Mutex outputMutex; } // close namespace u } // close unnamed namespace // ============================================================================ // HELPER CLASSES AND FUNCTIONS FOR TESTING // ---------------------------------------------------------------------------- namespace { namespace u { typedef void (*VoidFunc)(); const char *b(bool x) // Return the specified 'x' in string form. { return x ? "true" : "false"; } inline bsls::TimeInterval clockTi(ClockType clockType = CT::e_MONOTONIC) // Return a 'TimeInterval' indicating the current time, using the clock // indicated by the optionally specified 'clockType'. { return bsls::SystemTime::now(clockType); } double doubleClock(ClockType clockType = CT::e_MONOTONIC) // Return the time in seconds, as a floating point value, using the // specified optionally specified 'clockType'. { return u::clockTi(clockType).totalSecondsAsDouble(); } Int64 get(bsls::AtomicOperations::AtomicTypes::Int64 *x_p) // Return the specified atomic '*x_p'. { return bsls::AtomicOperations::getInt64(x_p); } inline Int64 nanoClock(ClockType clockType = CT::e_MONOTONIC) // Return the current time, in nanoseconds, according to the clock // indicated by the optionally specified 'clockType'. { return clockTi(clockType).totalNanoseconds(); } bsls::TimeInterval toTime(const char *); // forward declaration double secondsPrevLeakTime(Obj *throttle_p) { const Int64 lag = nanoClock(throttle_p->d_clockType) - get(&throttle_p->d_prevLeakTime); return static_cast<double>(lag) * 1e-9; } inline void sleep(double timeInSeconds) // Sleep for the specified 'timeInSeconds' seconds, where 10 * 1000 // microseconds is minimum sleep, at least on some platforms. { bslmt::ThreadUtil::microSleep(static_cast<int>(timeInSeconds * 1e6)); } inline double checkedSleep(double timeInSeconds, ClockType clockType = CT::e_MONOTONIC) // Sleep for at least the specified 'timeInSeconds', using the system clock // specified by 'clockType'. Return 0 if the amount of time slept was at // least 'timeInSeconds', and not more than 1.1 times 'timeInSeconds', // return a positive value if we slept too long, and a negative value if we // slept too short. { const double start = doubleClock(clockType); sleep(timeInSeconds); const double elapsed = doubleClock(clockType) - start; if (elapsed < timeInSeconds || timeInSeconds * 1.1 < elapsed) { return elapsed - timeInSeconds; // RETURN } return 0; } bsls::TimeInterval toTime(const char *timeStr) // Return a time interval converted from the specified 'timeStr' of the // form "<minutes>:<seconds>.<frac>:<nano>", where: //: o <minutes> and <seconds> can be any number of digits. //: o <frac> is the fractional part of a second following the '.'. //: o <nano> is nanoseconds and can be any number of digits // Note that both ':'s and the '.' are optional. If no '.' is present, the // second ':' may not be present. If the first ':' is not present, the // first field is taken to be seconds. The behavior is undefined if // 'bsl::strlen(timeStr) > 128', if any extra '.'s or ':''s are in the // 'timeStr', if 'timeStr' contains any character other than digits, '.', // and ':', or if <minutes>, <seconds>, or <nano> are too long to be // parsed into an 'int'. { char buf[128 + 1]; const bsl::size_t len = bsl::strlen(timeStr); BSLS_ASSERT(len < sizeof(buf)); bsl::strcpy(buf, timeStr); char *minutesStr = 0; char *secondsStr = bsl::strchr(buf, ':'); char *fracStr = bsl::strchr(buf, '.'); char *nanoStr = bsl::strchr(buf, 'n'); char *maxStr = buf; (void) maxStr; if (!secondsStr) { secondsStr = buf; } else { *secondsStr = 0; ++secondsStr; minutesStr = buf; } BSLS_ASSERT(secondsStr); if (fracStr) { *fracStr = 0; ++fracStr; } if (nanoStr) { *nanoStr = 0; ++nanoStr; } BSLS_ASSERT(secondsStr); BSLS_ASSERT(!minutesStr || maxStr == minutesStr); BSLS_ASSERT(maxStr <= secondsStr); maxStr = secondsStr; if (fracStr) { BSLS_ASSERT(maxStr <= fracStr); maxStr = fracStr; } if (nanoStr) { BSLS_ASSERT(maxStr <= nanoStr); } for (const char *pc = buf; pc < buf + len; ++pc) { BSLS_ASSERT(!*pc || isdigit(*pc)); } bsls::TimeInterval ret; if (minutesStr) { ret.addMinutes(bsl::atoi(minutesStr)); } if (*secondsStr) { ret.addSeconds(bsl::atoi(secondsStr)); } if (fracStr) { enum { k_FRAC_DIGITS = 9 }; char fracBuf[k_FRAC_DIGITS + 1]; bsl::fill(fracBuf + 0, fracBuf + sizeof(fracBuf), 0); bsl::strncpy(fracBuf, fracStr, k_FRAC_DIGITS); bsl::fill(fracBuf + bsl::strlen(fracBuf), fracBuf+k_FRAC_DIGITS, '0'); ret.addNanoseconds(bsl::atoi(fracBuf)); } if (nanoStr) { ret.addNanoseconds(bsl::atoi(nanoStr)); } return ret; } inline bsls::TimeInterval tiAdapt(const char *timeStr) // This overloaded function enables 'TimeInterval's to be indicated in // tables by either the specified 'timeStr', which this function forwards // to 'toTime', or by specifying an actual 'TimeInterval', which is // specified to the 'orig' argument of the other overload. { return toTime(timeStr); } inline bsls::TimeInterval tiAdapt(const bsls::TimeInterval& orig) { return orig; } } // close namespace u } // close unnamed namespace bsl::ostream& operator<<(bsl::ostream& stream, const bsl::vector<Int64>& v) // Output the specified 'v', one element per line, to the specified // 'stream'. { typedef bsl::vector<Int64>::const_iterator It; stream << endl; for (It it = v.begin(); v.end() != it; ++it) { stream << *it << endl; } return stream; } bsl::ostream& operator<<(bsl::ostream& stream, bsls::SystemClockType::Enum clockType) // Output the specified 'clockType' to the specified 'stream'. { stream << bsls::SystemClockType::toAscii(clockType); return stream; } #if 0 bsl::ostream& operator<<(bsl::ostream& stream, bsls::TimeInterval timeInterval) // Output the specified 'clockType' to the specified 'stream'. { stream << timeInterval.seconds();; char nanoseconds[10]; bsl::sprintf(nanoseconds, "%09d", timeInterval.nanoseconds()); char *pc = nanoseconds + 8; for (; nanoseconds <= pc && '0' == *pc; --pc) { *pc = 0; } if (nanoseconds <= pc) { stream << '.' <<nanoseconds; } return stream; } #endif // ---------- // Case_Usage // ---------- namespace Case_Usage { ///Usage ///----- // In this section we show intended usage of this component. // ///Example 1: Error Reporting /// - - - - - - - - - - - - - // Suppose we have an error reporting function 'reportError', that prints an // error message to a log stream. There is a possibility that 'reportError' // will be called very frequently, and that reports of this error will // overwhelm the other contents of the log, so we want to throttle the number // of times this error will be reported. For our application we decide that we // want to see at most 10 reports of the error at any given time, and that if // the error is occurring continuously, that we want a maximum sustained rate // of one error report every five seconds. // // First, we declare the signature of our 'reportError' function: //.. void reportError(bsl::ostream& stream) // Report an error to the specified 'stream'. { //.. // Then, we define the maximum number of traces that can happen at a time to be // 10: //.. static const int maxSimultaneousTraces = 10; //.. // Next, we define the minimum interval between subsequent reported errors, if // errors are being continuously reported to be one report every 5 seconds. // Note that the units are nanoseconds, which must be represented using a 64 // bit integral value: //.. static const bsls::Types::Int64 nanosecondsPerSustainedTrace = 5 * bdlt::TimeUnitRatio::k_NANOSECONDS_PER_SECOND; //.. // Then, we declare our 'throttle' object and use the 'BDLMT_THROTTLE_INIT' // macro to initialize it, using the two above constants. Note that the two // above constants *MUST* be calculated at compile-time, which means, among // other things, that they can't contain any floating point sub-expressions: //.. static bdlmt::Throttle throttle = BDLMT_THROTTLE_INIT( maxSimultaneousTraces, nanosecondsPerSustainedTrace); //.. // Now, we call 'requestPermission' at run-time to determine whether to report // the next error to the log: //.. if (throttle.requestPermission()) { //.. // Finally, we write the message to the log: //.. stream << "Help! I'm being held prisoner in a microprocessor!\n"; } } //.. } // close namespace Case_Usage // --------------- // Case_Allow_None // --------------- namespace Case_Allow_None { bsls::AtomicInt64 eventsSoFar(0); bsls::AtomicInt atomicBarrier(-1); void threadJobIf() // Make a large number of requests via '..._IF_ALLOW_NONE' and verify that // none of the actions and permitted and all of them are refused. { enum { k_TIGHT_ITERATIONS = 1024 }; while (atomicBarrier < 0) {} while (atomicBarrier < 1) { int delta = 0; for (int ii = 0; ii < k_TIGHT_ITERATIONS; ++ii) { BDLMT_THROTTLE_IF_ALLOW_NONE { ASSERT(0 && "Action rejected\n"); } else { ++delta; } } ASSERTV(delta, k_TIGHT_ITERATIONS == delta); eventsSoFar += delta; } } void threadJobInit() // Make a large number of requests of varying numbers of actions on a // throttle configured with '..._ALLOW_NONE' and verify that none of the // are permitted and all of them are refused. { enum { k_BILLION = 1000 * 1000 * 1000, k_TIGHT_ITERATIONS = 1024 }; static bdlmt::Throttle throttle = BDLMT_THROTTLE_INIT_ALLOW_NONE; bool lastDone = false; while (atomicBarrier < 0) {} while (atomicBarrier < 1) { int delta = 0; for (int ii = 0; ii < k_TIGHT_ITERATIONS; ++ii) { bool permitted = false; int todo = ii % 8; switch (todo) { case 0: { permitted = throttle.requestPermission(); } break; case 1: { permitted = throttle.requestPermission(1); } break; case 2: { permitted = throttle.requestPermission(100); } break; case 3: { permitted = throttle.requestPermission(k_BILLION); } break; case 4: { permitted = throttle.requestPermission(u::clockTi()); } break; case 5: { permitted = throttle.requestPermission(1, u::clockTi()); } break; case 6: { permitted = throttle.requestPermission(100, u::clockTi()); } break; case 7: { lastDone = true; permitted = throttle.requestPermission(k_BILLION, u::clockTi()); } break; default: { BSLS_ASSERT_OPT(0); } } if (permitted) { ASSERTV(todo, 0 && "Action accepted\n"); } else { ++delta; } } ASSERT(lastDone); ASSERTV(delta, k_TIGHT_ITERATIONS == delta); eventsSoFar += delta; } } } // close namespace Case_Allow_None // -------------- // Case_Allow_All // -------------- namespace Case_Allow_All { bsls::AtomicInt64 eventsSoFar(0); bsls::AtomicInt atomicBarrier(-1); void threadJobIf() // Make a large number of requests for permission using '..._IF_ALLOW_ALL' // and observe that actions are always allowed. { enum { k_TIGHT_ITERATIONS = 1024 }; while (atomicBarrier < 0) {} while (atomicBarrier < 1) { int delta = 0; for (int ii = 0; ii < k_TIGHT_ITERATIONS; ++ii) { BDLMT_THROTTLE_IF_ALLOW_ALL { ++delta; } else { ASSERT(0 && "Action rejected\n"); } } ASSERTV(delta, k_TIGHT_ITERATIONS == delta); eventsSoFar += delta; } } void threadJobInit() // Make a large number of requests for permission for varying numbers of // actions via varying overloads of the 'requestPermission' method and // observe that actions are always allowed. { enum { k_BILLION = 1000 * 1000 * 1000, k_TIGHT_ITERATIONS = 1024 }; static bdlmt::Throttle throttle = BDLMT_THROTTLE_INIT_ALLOW_ALL; bool lastDone = false; while (atomicBarrier < 0) {} while (atomicBarrier < 1) { int delta = 0; for (int ii = 0; ii < k_TIGHT_ITERATIONS; ++ii) { bool permitted = false; switch (ii % 8) { case 0: { permitted = throttle.requestPermission(); } break; case 1: { permitted = throttle.requestPermission(1); } break; case 2: { permitted = throttle.requestPermission(100); } break; case 3: { permitted = throttle.requestPermission(k_BILLION); } break; case 4: { permitted = throttle.requestPermission(u::clockTi()); } break; case 5: { permitted = throttle.requestPermission(1, u::clockTi()); } break; case 6: { permitted = throttle.requestPermission(100, u::clockTi()); } break; case 7: { lastDone = true; permitted = throttle.requestPermission(k_BILLION, u::clockTi()); } break; default: { BSLS_ASSERT_OPT(0); } } if (permitted) { ++delta; } else { ASSERT(0 && "Action rejected\n"); } } ASSERT(lastDone); ASSERTV(delta, k_TIGHT_ITERATIONS == delta); eventsSoFar += delta; } } } // close namespace Case_Allow_All // --------------------------- // Case_Throttle_MULTITHREADED // --------------------------- namespace Case_Throttle_MULTITHREADED { const Int64 leakPeriod = 300 * u::k_MILLI; const double leakPeriodSecs = static_cast<double>(leakPeriod) * 1e-9; const double balanceFactor = 1e-9 / leakPeriodSecs; const int burstSize = 10; const int periods = 3; const double duration = leakPeriodSecs *(periods - 0.5); const double sleepPeriod = 100e-6; const int expEvents = burstSize + (periods - 1); bslmt::Mutex mutex; enum TestMode { e_INIT_LOW, e_INIT_HIGH, e_IF_LOW, e_IF_HIGH } testMode; ClockType clockType; bsls::AtomicInt eventsSoFar(0); bsls::AtomicInt rejectedSoFar(0); bsls::AtomicInt ii(0); bslmt::Barrier barrier(u::numThreads + 1); Obj throttleMonotonic = BDLMT_THROTTLE_INIT( burstSize, leakPeriod); Obj throttleRealtime = BDLMT_THROTTLE_INIT_REALTIME( burstSize, leakPeriod); inline double calculateBalance(Obj *throttle, const bsls::TimeInterval& now) { const Int64 diff = now.totalNanoseconds() - u::get(&throttle->d_prevLeakTime); return static_cast<double>(diff) * balanceFactor; } void threadJob() // Request permission for many actions using 'Throttle' objections // initialialized with the '..._INIT*' macros, with the type of clock // driven by 'clockType' and the contention driven by 'contention'. { Obj& throttle = CT::e_MONOTONIC == clockType ? throttleMonotonic : throttleRealtime; ASSERTV(throttle.clockType() == clockType); barrier.wait(); bsls::TimeInterval now; switch (testMode) { case e_INIT_HIGH: { while ((now = u::clockTi(clockType)) < u::end) { if (veryVerbose && ++ii < 20) { const double balance = calculateBalance(&throttle, now); bslmt::LockGuard<bslmt::Mutex> guard(&mutex); P(balance); } if (throttle.requestPermission(now)) { ++eventsSoFar; } else { ++rejectedSoFar; } } } break; case e_INIT_LOW: { while ((now = u::clockTi(clockType)) < u::end) { u::sleep(sleepPeriod); if (veryVerbose && ++ii < 20) { const double balance = calculateBalance(&throttle, now); bslmt::LockGuard<bslmt::Mutex> guard(&mutex); P(balance); } if (throttle.requestPermission(now)) { ++eventsSoFar; } else { ++rejectedSoFar; } } } break; case e_IF_HIGH: { while ((now = u::clockTi(clockType)) < u::end) { if (CT::e_MONOTONIC == clockType) { BDLMT_THROTTLE_IF(burstSize, leakPeriod) { ++eventsSoFar; } else { ++rejectedSoFar; } } else { BDLMT_THROTTLE_IF_REALTIME(burstSize, leakPeriod) { ++eventsSoFar; } else { ++rejectedSoFar; } } } } break; case e_IF_LOW: { while ((now = u::clockTi(clockType)) < u::end) { u::sleep(sleepPeriod); if (CT::e_MONOTONIC == clockType) { BDLMT_THROTTLE_IF(burstSize, leakPeriod) { ++eventsSoFar; } else { ++rejectedSoFar; } } else { BDLMT_THROTTLE_IF_REALTIME(burstSize, leakPeriod) { ++eventsSoFar; } else { ++rejectedSoFar; } } } } break; default: { BSLS_ASSERT(0); } } barrier.wait(); }; void multiThreadedMain(TestMode mode) { testMode = mode; bslmt::ThreadGroup tg(&u::ta); int retries = 0; int testStatusSum = 0; for (int ti = 0; ti < 2; ++ti) { clockType = ti ? CT::e_REALTIME : CT::e_MONOTONIC; eventsSoFar = 0; rejectedSoFar = 0; ii = 0; if (verbose) cout << "Pass " << ti << ' ' << clockType << endl; tg.addThreads(&threadJob, u::numThreads); u::sleep(0.1); // get everybody waiting on the barrier u::end = (u::start = u::clockTi(clockType)) + duration; barrier.wait(); // all start TimeInterval afterBarrier = u::clockTi(clockType); barrier.wait(); // all finish const double elapsed = (u::clockTi(clockType) - u::start). totalSecondsAsDouble(); const bool tookTooLong = 0.4 * leakPeriodSecs < elapsed - duration; const bool tooFewEvents = eventsSoFar < expEvents; ASSERTV(clockType, expEvents, eventsSoFar, tookTooLong || eventsSoFar <= expEvents); if (tookTooLong || tooFewEvents) { ++ retries; if (7 < retries) { ASSERTV(clockType, duration, elapsed, rejectedSoFar, retries <= 7); break; } cout << "Retry necessary:" << clockType << (tookTooLong ? " tookTooLong: " : "") << (tooFewEvents ? " tooFewEvents: " : ""); if (tookTooLong) { P_(duration); P_(elapsed); } if (tooFewEvents) { P_(eventsSoFar); cout << " <"; P_(expEvents); } P(retries); u::sleep((burstSize + 1) * leakPeriodSecs); --ti; } else { ASSERTV(clockType, expEvents, eventsSoFar, expEvents == eventsSoFar); } if (veryVerbose || testStatus) { const double scheduled = (u::end - u::start). totalSecondsAsDouble(); const double barrier = (afterBarrier - u::start). totalSecondsAsDouble(); P_(elapsed); P_(scheduled); P_(barrier); P(eventsSoFar); P_(rejectedSoFar); P(duration); } tg.joinAll(); testStatusSum += testStatus; testStatus = 0; } testStatus = testStatusSum; } } // close namespace Case_Throttle_MULTITHREADED // --------------------------- // Case_Minus_1_Events_Dropped // --------------------------- namespace Case_Minus_1_Events_Dropped { const Int64 period = 100 * u::k_MILLI; const double sleepTime = 2.0 * period / u::k_SECOND; const double shortSleepTime = 0.0001; const int eventsPerPeriodPerThread = 10; const int eventsPerPeriod = eventsPerPeriodPerThread * u::numThreads; bsls::AtomicInt eventsSoFar(0); bsls::AtomicInt atomicBarrier(-1); bslmt::Barrier barrier(u::numThreads + 1); void threadJob() // Request permission, many times under very high contention, and see if // any actions are refused. { barrier.wait(); while (0 != atomicBarrier) {} for (int jj = 0; jj < eventsPerPeriodPerThread; ++jj) { BDLMT_THROTTLE_IF(eventsPerPeriod, period) { ++eventsSoFar; } } } } // close namespace Case_Minus_1_Events_Dropped // ============================================================================ // GLOBAL TEST DATA // ---------------------------------------------------------------------------- // ============================================================================ // MAIN PROGRAM // ---------------------------------------------------------------------------- int main(int argc, char *argv[]) { test = argc > 1 ? atoi(argv[1]) : 0; verbose = argc > 2; veryVerbose = argc > 3; veryVeryVerbose = argc > 4; veryVeryVeryVerbose = argc > 5; cout << "TEST " << __FILE__ << " CASE " << test << endl; bslma::TestAllocator defaultAllocator("default", veryVeryVeryVerbose); bslma::Default::setDefaultAllocator(&defaultAllocator); bslma::TestAllocator globalAllocator("global", veryVeryVeryVerbose); bslma::Default::setGlobalAllocator(&globalAllocator); switch (test) { case 0: case 16: { // -------------------------------------------------------------------- // USAGE EXAMPLE // Extracted from component header file. // // Concerns: //: 1 The usage example provided in the component header file compiles, //: links, and runs as shown. // // Plan: //: 1 Incorporate usage example from header into test driver, remove //: leading comment characters, and replace 'assert' with 'ASSERT'. //: (C-1) // // Testing: // USAGE EXAMPLE // -------------------------------------------------------------------- if (verbose) cout << "USAGE EXAMPLE\n" "=============\n"; using namespace Case_Usage; char buffer[10 * 1024]; bdlsb::FixedMemOutStreamBuf streamBuf(buffer, sizeof(buffer)); bsl::ostream ostr(&streamBuf); bsls::Stopwatch stopwatch; stopwatch.start(); while (stopwatch.accumulatedWallTime() < 7.0) { reportError(ostr); bslmt::ThreadUtil::microSleep(10 * 1000); } const bsl::size_t numLines = bsl::count(streamBuf.data(), streamBuf.data() + streamBuf.length(), '\n'); ASSERT(11 == numLines); } break; case 15: { // -------------------------------------------------------------------- // ALLOW_NONE STRESS TEST // // Concerns: //: 1 That the 'BDLMT_THROTTLE_IF_ALLOW_NONE' and //: 'BDLMT_THROTTLE_INIT_ALLOW_NONE' macros permit no events, and //: appropriately control an 'else' block as well. // // Plan: //: 1 Have 40 threads in a tight loop calling the 'allow no' macro //: controlling a 'then' clause that we confirm was never taken, and //: an 'else' clause that we confirm was always taken. //: 2 Measure the speed with which events are approved. // // Testing: // CONCERN: BDLMT_THROTTLE_INIT_ALLOW_NONE stress test // CONCERN: BDLMT_THROTTLE_IF_ALLOW_NONE stress test // -------------------------------------------------------------------- if (verbose) cout << "ALLOW_NONE STRESS TEST\n" "======================\n"; if (verbose) cout << "BDLMT_THROTTLE_IF_ALLOW_NONE\n" "============================\n"; namespace TC = Case_Allow_None; bslmt::ThreadGroup tg(&u::ta); tg.addThreads(&TC::threadJobIf, u::numThreads); { u::sleep(0.01); const Uint64 start = u::nanoClock(); TC::atomicBarrier = 0; u::sleep(0.1); TC::atomicBarrier = 1; const double elapsed = 1e-9 * static_cast<double>( u::nanoClock() - start); const double events = static_cast<double>(TC::eventsSoFar); const double eventsPerSecond = events / elapsed / u::numThreads; tg.joinAll(); if (verbose) cout << "Events per sec: " << eventsPerSecond << endl; } if (verbose) P(TC::eventsSoFar); if (verbose) cout << "BDLMT_THROTTLE_INIT_ALLOW_NONE\n" "==============================\n"; TC::eventsSoFar = 0; TC::atomicBarrier = -1; tg.addThreads(&TC::threadJobInit, u::numThreads); u::sleep(0.01); const Uint64 start = u::nanoClock(); TC::atomicBarrier = 0; u::sleep(0.1); TC::atomicBarrier = 1; const double elapsed = 1e-9 * static_cast<double>( u::nanoClock() - start); const double events = static_cast<double>(TC::eventsSoFar); const double eventsPerSecond = events / elapsed / u::numThreads; tg.joinAll(); if (verbose) cout << "Events per sec: " << eventsPerSecond << endl; if (verbose) P(TC::eventsSoFar); } break; case 14: { // -------------------------------------------------------------------- // ALLOW_ALL STRESS TEST // // Concerns: //: 1 That the 'BDLMT_THROTTLE_IF_ALLOW_ALL' and //: 'BDLMT_THROTTLE_INIT_ALLOW_ALL' macros permit all events, and //: appropriately control an 'else' block as well. // // Plan: //: 1 Have 40 threads in a tight loop calling the 'allow all' macro //: controlling a 'then' clause that we confirm was always taken, and //: an 'else' clause that we confirm was never taken. //: 2 Measure the speed with which events are approved. // // Testing: // CONCERN: BDLMT_THROTTLE_INIT_ALLOW_ALL stress test // CONCERN: BDLMT_THROTTLE_IF_ALLOW_ALL stress test // -------------------------------------------------------------------- if (verbose) cout << "ALLOW_ALL STRESS TEST\n" "=====================\n"; if (verbose) cout << "BDLMT_THROTTLE_IF_ALLOW_ALL\n" "===========================\n"; namespace TC = Case_Allow_All; bslmt::ThreadGroup tg(&u::ta); tg.addThreads(&TC::threadJobIf, u::numThreads); { u::sleep(0.01); const Int64 start = u::nanoClock(); TC::atomicBarrier = 0; u::sleep(0.1); TC::atomicBarrier = 1; const double elapsed = 1e-9 * static_cast<double>( u::nanoClock() - start); const double events = static_cast<double>(TC::eventsSoFar); const double eventsPerSecond = events / elapsed / u::numThreads; tg.joinAll(); if (verbose) cout << "Events per sec: " << eventsPerSecond << endl; } if (verbose) P(TC::eventsSoFar); if (verbose) cout << "BDLMT_THROTTLE_INIT_ALLOW_ALL\n" "=============================\n"; TC::eventsSoFar = 0; TC::atomicBarrier = -1; tg.addThreads(&TC::threadJobInit, u::numThreads); u::sleep(0.01); const Int64 start = u::nanoClock(); TC::atomicBarrier = 0; u::sleep(0.1); TC::atomicBarrier = 1; const double elapsed = 1e-9 * static_cast<double>( u::nanoClock() - start); const double events = static_cast<double>(TC::eventsSoFar); const double eventsPerSecond = events / elapsed / u::numThreads; tg.joinAll(); if (verbose) cout << "Events per sec: " << eventsPerSecond << endl; if (verbose) P(TC::eventsSoFar); } break; case 13: { // -------------------------------------------------------------------- // MULTITHREADED TEST -- HIGH CONTENTION, IF // // Concerns: //: 1 That the type under test functions properly under light //: multithreaded contention. // // Plan: //: 1 Repeat the first test in the breathing test, only in a //: multithreaded context. // // Testing: // CONCERN: Multithreaded, high contention, macro if // -------------------------------------------------------------------- if (verbose) cout << "MULTITHREADED TEST -- HIGH CONTENTION, IF\n" "=========================================\n"; namespace TC = Case_Throttle_MULTITHREADED; TC::multiThreadedMain(TC::e_IF_HIGH); } break; case 12: { // -------------------------------------------------------------------- // MULTITHREADED TEST -- LOW CONTENTION, IF // // Concerns: //: 1 That the type under test functions properly under light //: multithreaded contention. // // Plan: //: 1 Repeat the first test in the breathing test, only in a //: multithreaded context. // // Testing: // CONCERN: Multithreaded, low contention, macro if // -------------------------------------------------------------------- if (verbose) cout << "MULTITHREADED TEST -- LOW CONTENTION, IF\n" "========================================\n"; namespace TC = Case_Throttle_MULTITHREADED; bslmt::ThreadGroup tg(&u::ta); TC::multiThreadedMain(TC::e_IF_LOW); } break; case 11: { // -------------------------------------------------------------------- // MULTITHREADED TEST -- HIGH CONTENTION, INIT // // Concerns: //: 1 That the type under test functions properly under light //: multithreaded contention. // // Plan: //: 1 Repeat the first test in the breathing test, only in a //: multithreaded context. // // Testing: // CONCERN: Multithreaded, high contention, macro init // -------------------------------------------------------------------- if (verbose) cout << "MULTITHREADED TEST -- HIGH CONTENTION, INIT\n" "===========================================\n"; namespace TC = Case_Throttle_MULTITHREADED; TC::multiThreadedMain(TC::e_INIT_HIGH); } break; case 10: { // -------------------------------------------------------------------- // MULTITHREADED TEST -- LOW CONTENTION, INIT // // Concerns: //: 1 That the type under test functions properly under light //: multithreaded contention. // // Plan: //: 1 Repeat the first test in the breathing test, only in a //: multithreaded context. // // Testing: // CONCERN: Multithreaded, low contention, macro init // -------------------------------------------------------------------- if (verbose) cout << "MULTITHREADED TEST -- LOW CONTENTION, INIT\n" "==========================================\n"; namespace TC = Case_Throttle_MULTITHREADED; TC::multiThreadedMain(TC::e_INIT_LOW); } break; case 9: { // -------------------------------------------------------------------- // TESTING BDLMT_THROTTLE_IF MACROS // // Concerns: //: 1 That all the 'BDLMT_THROTTLE_IF*' macros work correctly. // // Plan: //: 1 Black box testing 1: Iterate a number of times, calling a //: 'BDLMT_THROTTLE_IF' with counters in both if 'if' clause and the //: 'else' clause controlled by the macro, enough times to exhaust //: 'maxSimultaneousActions' but not enough to run long enough for //: 'nanosecondsPerAction' to expire. This makes the tallies of //: counters very predictable, and confirm them. //: //: 2 Black box testing 2: Iterate a number of times calling a //: 'BDLMT_THROTTLE_IF' with a fairly short 'nanosecondsPerAction', //: with counters in both clauses, and a sleep for 1/4 of the //: 'nanospecondsPerAction', and the outer loop timed to finish after //: '2.5 * nanospecondsPerAction' has expired. This should result in //: 'maxSimultaneousActions + 2' events being approved, and a roughly //: predictable number of events rejected. //: //: 3: Repeat both black box tests for 'BDLMT_THROTTLE_IF_REALTIME'. //: //: 4 White box test: //: o Define a macro taking 'maxSimultaneousActions' and //: 'nanoSecondsPerAction' arguments, which expands into a code //: block calling 'BDLMT_THROTTLE_IF' 'maxSimultaneousActions' //: times, with separate bools being set in the 'if' and 'else' //: clauses, verifying that the 'if' was always set and the 'else' //: bool never was. //: o In the 'if' clause, take a reference to the static throttle //: instantiated by the macro, and use the accessors to confirm //: that the state is as expected. //: o Call the macro with a variety of inputs. //: //: 5 Repeat the white box test for 'BDLMT_THROTTLE_IF_REALTIME'. //: //: 6 Adapt the white box test to 'BDLMT_THROTTLE_ALLOW_ALL'. Note //: that there is no need for all the code to be in a macro, since //: there are no args to be varied. This test requires very //: intimate white-box knowledge of the component, because the //: the state of the throttle is set to strange values in that case. //: //: 6 Adapt the white box test to 'BDLMT_THROTTLE_ALLOW_NONE'. This //: is very similar to the 'BDLMT_THROTTLE_ALLOW_ALL' case except //: that the throttle defined in the conditional has to be //: accessed from the 'else' clause, since the 'if' clause is never //: executed. // // Testing: // BDLMT_THROTTLE_IF(int, Int64) // BDLMT_THROTTLE_IF_REALTIME(int, Int64) // BDLMT_THROTTLE_IF_ALLOW_ALL // BDLMT_THROTTLE_IF_ALLOW_NONE // -------------------------------------------------------------------- if (verbose) cout << "TESTING BDLMT_THROTTLE_IF MACROS\n" "================================\n"; if (verbose) cout << "BDLMT_THROTTLE_IF(4, u::k_SECOND)\n"; { int ii, jj, kk; for (ii = 0, jj = 0, kk = 0; kk < 10; ++ii) { ASSERTV(ii, jj, kk, jj + kk == ii); BDLMT_THROTTLE_IF(4, u::k_SECOND) { ASSERTV(ii, jj, kk, 0 == kk); ++jj; } else { ASSERTV(ii, jj, kk, 4 == jj); ++kk; } } ASSERTV(ii, jj, kk, 14 == ii); ASSERTV(ii, jj, kk, 4 == jj); ASSERTV(ii, jj, kk, 10 == kk); } if (verbose) cout << "BDLMT_THROTTLE_IF(4, 80 * u::k_MILLI)\n"; { const TimeInterval start = u::clockTi(); const TimeInterval end = start + 0.2; int ii, jj, kk; for (ii = 0, jj = 0, kk = 0; u::clockTi() < end; ++ii) { const double elapsed = (u::clockTi() - start).totalSecondsAsDouble(); ASSERTV(ii, jj, kk, jj + kk == ii); BDLMT_THROTTLE_IF(4, 80 * u::k_MILLI) { if (veryVerbose) { P_(jj); P(elapsed); } ++jj; } else { ASSERT(4 <= jj); if (veryVerbose) { P_(kk); P(elapsed); } u::checkedSleep(0.02); ++kk; } } ASSERTV(ii, jj, kk, jj + kk == ii); ASSERTV(ii, jj, kk, jj <= 4 + 1 + 1); ASSERTV(ii, jj, kk, kk < 12); } if (verbose) cout << "BDLMT_THROTTLE_IF_REALTIME(4, u::k_SECOND)\n"; { int ii, jj, kk; for (ii = 0, jj = 0, kk = 0; kk < 10; ++ii) { ASSERTV(ii, jj, kk, jj + kk == ii); BDLMT_THROTTLE_IF_REALTIME(4, u::k_SECOND) { ASSERTV(ii, jj, kk, 0 == kk); ++jj; } else { ASSERTV(ii, jj, kk, 4 == jj); ++kk; } } ASSERTV(ii, jj, kk, 14 == ii); ASSERTV(ii, jj, kk, 4 == jj); ASSERTV(ii, jj, kk, 10 == kk); } if (verbose) cout <<"BDLMT_THROTTLE_IF_REALTIME(4, 80 * u::k_MILLI)\n"; { const TimeInterval start = u::clockTi(CT::e_REALTIME); const TimeInterval end = start + 0.2; int ii, jj, kk; for (ii = 0, jj = 0, kk = 0; u::clockTi(CT::e_REALTIME) < end; ++ii) { const double elapsed = (u::clockTi(CT::e_REALTIME) - start).totalSecondsAsDouble(); ASSERTV(ii, jj, kk, jj + kk == ii); BDLMT_THROTTLE_IF_REALTIME(4, 80 * u::k_MILLI) { if (veryVerbose) { P_(jj); P(elapsed); } ++jj; } else { ASSERT(4 <= jj); if (veryVerbose) { P_(kk); P(elapsed); } u::checkedSleep(0.02); ++kk; } } ASSERTV(ii, jj, kk, jj + kk == ii); ASSERTV(ii, jj, kk, jj <= 4 + 1 + 1); } if (verbose) cout << "BDLMT_THROTTLE_IF -- WHITE BOX\n"; #undef WHITE_BOX_TEST_THROTTLE_IF #define WHITE_BOX_TEST_THROTTLE_IF(msa, npa) { \ for (int ii = 0; ii < (msa); ++ii) { \ BSLMF_ASSERT(0 < msa && 0 < npa); \ bool found = false, elseFound = false; \ BDLMT_THROTTLE_IF(msa, npa) { \ Obj& throttle = bdlmt_throttle_iFtHrOtTlE; \ ASSERTV(throttle.maxSimultaneousActions() == msa); \ ASSERTV(throttle.nanosecondsPerAction() == npa); \ ASSERTV(throttle.clockType() == \ CT::e_MONOTONIC); \ found = true; \ } \ else { \ elseFound = true; \ } \ ASSERT(found); \ ASSERT(!elseFound); \ } \ } WHITE_BOX_TEST_THROTTLE_IF( 1, 1); WHITE_BOX_TEST_THROTTLE_IF( 1, u::k_SECOND); WHITE_BOX_TEST_THROTTLE_IF(10, u::k_SECOND); WHITE_BOX_TEST_THROTTLE_IF( 1, u::k_MILLI); WHITE_BOX_TEST_THROTTLE_IF(10, u::k_MILLI); WHITE_BOX_TEST_THROTTLE_IF( 1, 3600LL * u::k_SECOND); WHITE_BOX_TEST_THROTTLE_IF(10, 3600LL * u::k_SECOND); #undef WHITE_BOX_TEST_THROTTLE_IF if (verbose) cout << "BDLMT_THROTTLE_IF_REALTIME -- WHITE BOX\n"; #undef WHITE_BOX_TEST_THROTTLE_IF_REALTIME #define WHITE_BOX_TEST_THROTTLE_IF_REALTIME(msa, npa) { \ for (int ii = 0; ii < (msa); ++ii) { \ BSLMF_ASSERT(0 < msa && 0 < npa); \ bool found = false, elseFound = false; \ BDLMT_THROTTLE_IF_REALTIME(msa, npa) { \ Obj& throttle = bdlmt_throttle_iFtHrOtTlE; \ ASSERTV(throttle.maxSimultaneousActions() == msa); \ ASSERTV(throttle.nanosecondsPerAction() == npa); \ ASSERTV(throttle.clockType() == \ CT::e_REALTIME); \ found = true; \ } \ else { \ elseFound = true; \ } \ ASSERT(found); \ ASSERT(!elseFound); \ } \ } WHITE_BOX_TEST_THROTTLE_IF_REALTIME( 1, 1); WHITE_BOX_TEST_THROTTLE_IF_REALTIME( 1, u::k_SECOND); WHITE_BOX_TEST_THROTTLE_IF_REALTIME(10, u::k_SECOND); WHITE_BOX_TEST_THROTTLE_IF_REALTIME( 1, u::k_MILLI); WHITE_BOX_TEST_THROTTLE_IF_REALTIME(10, u::k_MILLI); WHITE_BOX_TEST_THROTTLE_IF_REALTIME( 1, 3600LL * u::k_SECOND); WHITE_BOX_TEST_THROTTLE_IF_REALTIME(10, 3600LL * u::k_SECOND); #undef WHITE_BOX_TEST_THROTTLE_IF_REALTIME if (verbose) cout << "BDLMT_THROTTLE_IF_ALLOW_ALL -- WHITE BOX\n"; for (int ii = 0; ii < 100; ++ii) { bool found = false, elseFound = false; BDLMT_THROTTLE_IF_ALLOW_ALL { Obj& throttle = bdlmt_throttle_iFtHrOtTlE; ASSERTV(throttle.maxSimultaneousActions(), INT_MAX, throttle.maxSimultaneousActions() == INT_MAX); ASSERTV(throttle.nanosecondsPerAction(), LLONG_MIN, throttle.nanosecondsPerAction() == LLONG_MIN); ASSERTV(throttle.clockType() == CT::e_MONOTONIC); found = true; } else { elseFound = true; } ASSERT(found); ASSERT(!elseFound); } if (verbose) cout << "BDLMT_THROTTLE_IF_ALLOW_NONE -- WHITE BOX\n"; for (int ii = 0; ii < 100; ++ii) { bool found = false, elseFound = false; BDLMT_THROTTLE_IF_ALLOW_NONE { found = true; } else { Obj& throttle = bdlmt_throttle_iFtHrOtTlE; ASSERTV(throttle.maxSimultaneousActions(), throttle.maxSimultaneousActions() == 0); ASSERTV(throttle.nanosecondsPerAction(), LLONG_MAX, throttle.nanosecondsPerAction() == LLONG_MAX); ASSERTV(throttle.clockType() == CT::e_MONOTONIC); elseFound = true; } ASSERT(!found); ASSERT(elseFound); } } break; case 8: { // -------------------------------------------------------------------- // TESTING INITIALIZATION MACROS // // Concerns: //: 1 That the macro initializers create throttles with the same state //: as a throttle with 'initialize' called with the same arguments. // // Plan: //: 1 Statically initialize a set of throttles with different //: arguments, and have each one pointed to by a line in a table that //: contains the same arguments to be passed to 'initialize', after //: which 'memcmp' is called to verify that the throttles have //: identical state. // // Testing: // BDLMT_THROTTLE_INIT(int, Int64) // BDLMT_THROTTLE_INIT_REALTIME(int, Int64) // -------------------------------------------------------------------- if (verbose) cout << "TESTING INITIALIZATION MACROS\n" "=============================\n"; typedef bdlt::TimeUnitRatio TUR; static Obj throttle00 = BDLMT_THROTTLE_INIT(1, u::k_MILLI); static Obj throttle01 = BDLMT_THROTTLE_INIT_REALTIME(1, u::k_MILLI); static Obj throttle02 = BDLMT_THROTTLE_INIT(5, u::k_MILLI); static Obj throttle03 = BDLMT_THROTTLE_INIT_REALTIME(5, u::k_MILLI); static Obj throttle04 = BDLMT_THROTTLE_INIT(1000, u::k_SECOND); static Obj throttle05 = BDLMT_THROTTLE_INIT_REALTIME( 1000, u::k_SECOND); static Obj throttle16 = BDLMT_THROTTLE_INIT( 10, 10 * TUR::k_NANOSECONDS_PER_HOUR); static Obj throttle17 = BDLMT_THROTTLE_INIT_REALTIME( 10, 10 * TUR::k_NANOSECONDS_PER_HOUR); static Obj throttle08 = BDLMT_THROTTLE_INIT(100, 2 * u::k_MILLI); static Obj throttle09 = BDLMT_THROTTLE_INIT_REALTIME( 100, 2 * u::k_MILLI); static const ClockType mono = CT::e_MONOTONIC; static const ClockType real = CT::e_REALTIME; static const struct Data { int d_line; int d_maxSimultaneousActions; Int64 d_nanosecondsPerAction; ClockType d_clockType; Obj *d_staticThrottle_p; } DATA[] = { { L_, 1, u::k_MILLI, mono, &throttle00 }, { L_, 1, u::k_MILLI, real, &throttle01 }, { L_, 5, u::k_MILLI, mono, &throttle02 }, { L_, 5, u::k_MILLI, real, &throttle03 }, { L_, 1000, u::k_SECOND, mono, &throttle04 }, { L_, 1000, u::k_SECOND, real, &throttle05 }, { L_, 10, 10 * TUR::k_NANOSECONDS_PER_HOUR, mono, &throttle16 }, { L_, 10, 10 * TUR::k_NANOSECONDS_PER_HOUR, real, &throttle17 }, { L_, 100, 2 * u::k_MILLI, mono, &throttle08 }, { L_, 100, 2 * u::k_MILLI, real, &throttle09 }}; enum { k_NUM_DATA = sizeof DATA / sizeof *DATA }; for (int ti = 0; ti < k_NUM_DATA; ++ti) { const Data& data = DATA[ti]; const int LINE = data.d_line; const int maxSimultaneousActions = data.d_maxSimultaneousActions; const Int64 nanosecondsPerAction = data.d_nanosecondsPerAction; const ClockType clockType = data.d_clockType; const Obj *pStaticThrottle = data.d_staticThrottle_p; ASSERT(clockType == pStaticThrottle->clockType()); Obj mX; mX.initialize(maxSimultaneousActions, nanosecondsPerAction, clockType); ASSERTV(LINE, 0 == bsl::memcmp(&mX, data.d_staticThrottle_p, sizeof(Obj))); } } break; case 7: { // -------------------------------------------------------------------- // TESTING 'nextPermit' // // Concerns: //: 1 If 'nextPermit' is called with invalid input, it will return a //: non-zero value without modifying the time interval passed to it. //: //: 2 If 'nextPermit' is called with valid input, it will return 0, and //: set the time interval passed by pointer to it to the exact //: earliest nanosecond when the specified 'numActions' would be //: permitted. // // Plan: //: 1 Iterate through a table executing 4 commands: //: o 'e_CMD_INIT': Initialize the throttle, but don't call any //: manipulators that will effect its 'd_prevLeakTime' field. This //: will only be called for setting the throttle to 'allow all' or //: 'allow none'. //: o 'e_CMD_INIT_SET_TIME': Initialize the throttle, and call //: 'requestPermissions' to set the 'd_prevLeakTime' field to //: exactly the time specified. //: o 'e_CMD_NEXT_PERMIT': Call 'nextPermit', expect it to succeed //: and return 0 and set the time interval passed to the exact //: nanosecond when 'numActions' actions would be permitted. Then //: follow up with 'requestPermission' to verify that, then //: re-initialize the throttle to the exact state it was in before //: 'nextPermit' was called. //: o 'e_CMD_NEXT_PERMIT_INVALID': Call 'nextPermit', expect it to //: return a non-zero value without modifying the time interval //: that was passed to it. // // Testing: // int nextPermit(bsls::TimeInterval *, int) const; // -------------------------------------------------------------------- if (verbose) cout << "TESTING 'nextPermit'\n" "====================\n"; enum { k_BILLION = 1000 * 1000 * 1000 }; enum Cmd { e_CMD_INIT, e_CMD_INIT_SET_TIME, e_CMD_NEXT_PERMIT, e_CMD_NEXT_PERMIT_INVALID }; const TimeInterval nullTI(-123456789, -123456789); // We assign the time interval to be returned from 'nextPermit' to // this ridiculous value before the call. This value is never // expected from a valid call, while with invalid calls we confirm // that the time interval is unmodified. const TimeInterval minusTenYears( - Obj::k_TEN_YEARS_NANOSECONDS / k_BILLION, -(Obj::k_TEN_YEARS_NANOSECONDS % k_BILLION)); // White box -- this is the time interval that will be returned by // 'nextPermit' if the throttle was initialized 'allow all'. // 'INIT_ALLOW_ALL': initialize the throttle to 'allow all'. #undef INIT_ALLOW_ALL #define INIT_ALLOW_ALL \ { L_, e_CMD_INIT, 1, 0, -1, TimeInterval() } // 'INIT_ALLOW_NONE': initialize the throttle to 'allow none'. #undef INIT_ALLOW_NONE #define INIT_ALLOW_NONE \ { L_, e_CMD_INIT, 0, 1, -1, TimeInterval() } // 'INIT_SET_TIME': initialize the throttle with the specified // 'maxSimultaneousActions' and 'secondsPerAction', and use // 'requestPermission' to set the 'd_prevLeakTime' to the specified // string 'setTimeSpec', which is to be parsed by 'u::toTime'. #undef INIT_SET_TIME #define INIT_SET_TIME(maxSimultaneousActions, secondsPerAction, setTimeSpec) \ { L_, e_CMD_INIT_SET_TIME, maxSimultaneousActions, secondsPerAction, \ -1, u::toTime(setTimeSpec) } // 'NEXT_PERMIT': call 'nextPermit' with the specified 'numActions' and // verify that it succeeds. Compare the time returned to the specified // 'expectedTimeSpec' and verify that it matches. If the throttle was // not initialized to 'allow all', call 'requestPermission' after the // 'nextPermit' call to verify that the time returned was the exact // nanosecond when permission would be granted, then re-initialize the // throttle back to the state it was in before the 'nextPermit' call. // Note that 'expectedTimeSpec' can be either a string to be parsed by // 'u::toTime' or a 'TimeInterval'. #undef NEXT_PERMIT #define NEXT_PERMIT(numActions, expectedTimeSpec) \ { L_, e_CMD_NEXT_PERMIT, -1, -1, numActions, u::tiAdapt(expectedTimeSpec) } // 'NEXT_PERMIT': call 'nextPermit' with the specified 'numActions' and // verify that it fails, and that the 'TimeInterval' passed to the call // was not modified. #undef NEXT_PERMIT_INVALID #define NEXT_PERMIT_INVALID(numActions) \ { L_, e_CMD_NEXT_PERMIT_INVALID, -1, -1, numActions, TimeInterval() } const struct Data { int d_line; Cmd d_command; int d_maxSimultaneousActions; double d_secondsPerAction; int d_numActions; TimeInterval d_timeInterval; } DATA[] = { INIT_SET_TIME(4, 2, ":0"), NEXT_PERMIT(1, ":2"), NEXT_PERMIT(2, ":4"), NEXT_PERMIT(3, ":6"), NEXT_PERMIT(4, ":8"), NEXT_PERMIT_INVALID(5), NEXT_PERMIT_INVALID(0), NEXT_PERMIT_INVALID(-1), NEXT_PERMIT_INVALID(INT_MIN), NEXT_PERMIT_INVALID(INT_MAX), NEXT_PERMIT(4, ":8"), INIT_SET_TIME(1, 10, ":5"), NEXT_PERMIT(1, ":15"), NEXT_PERMIT_INVALID(2), NEXT_PERMIT_INVALID(0), NEXT_PERMIT_INVALID(-1), NEXT_PERMIT_INVALID(INT_MIN), NEXT_PERMIT_INVALID(INT_MAX), NEXT_PERMIT(1, ":15"), INIT_SET_TIME(10, 1, ":30"), NEXT_PERMIT(1, ":31"), NEXT_PERMIT(2, ":32"), NEXT_PERMIT(3, ":33"), NEXT_PERMIT(4, ":34"), NEXT_PERMIT(5, ":35"), NEXT_PERMIT(6, ":36"), NEXT_PERMIT(7, ":37"), NEXT_PERMIT(8, ":38"), NEXT_PERMIT(9, ":39"), NEXT_PERMIT(10, ":40"), NEXT_PERMIT_INVALID(11), NEXT_PERMIT_INVALID(0), NEXT_PERMIT_INVALID(-1), NEXT_PERMIT_INVALID(INT_MIN), NEXT_PERMIT_INVALID(INT_MAX), NEXT_PERMIT(5, ":35"), INIT_SET_TIME(8, (1.0 / 8), "37:27"), NEXT_PERMIT(1, "37:27.125"), NEXT_PERMIT(2, "37:27.25"), NEXT_PERMIT(3, "37:27.375"), NEXT_PERMIT(4, "37:27.5"), NEXT_PERMIT(5, "37:27.625"), NEXT_PERMIT(6, "37:27.75"), NEXT_PERMIT(7, "37:27.875"), NEXT_PERMIT(8, "37:28"), NEXT_PERMIT_INVALID(9), NEXT_PERMIT_INVALID(0), NEXT_PERMIT_INVALID(-1), NEXT_PERMIT_INVALID(INT_MIN), NEXT_PERMIT_INVALID(INT_MAX), NEXT_PERMIT(8, "37:28"), INIT_ALLOW_ALL, NEXT_PERMIT(1, minusTenYears), NEXT_PERMIT(2, minusTenYears), NEXT_PERMIT(100, minusTenYears), NEXT_PERMIT(1000, minusTenYears), NEXT_PERMIT(1000000, minusTenYears), NEXT_PERMIT(INT_MAX, minusTenYears), NEXT_PERMIT_INVALID(0), NEXT_PERMIT_INVALID(-1), NEXT_PERMIT_INVALID(INT_MIN), NEXT_PERMIT(200, minusTenYears), INIT_ALLOW_NONE, NEXT_PERMIT_INVALID(1), NEXT_PERMIT_INVALID(INT_MIN), NEXT_PERMIT_INVALID(-1), NEXT_PERMIT_INVALID(0), NEXT_PERMIT_INVALID(INT_MAX) }; #undef INIT_ALLOW_ALL #undef INIT_ALLOW_NONE #undef INIT_SET_TIME #undef NEXT_PERMIT #undef NEXT_PERMIT_INVALID enum { k_NUM_DATA = sizeof DATA / sizeof *DATA }; Obj mX; // We declare the following variables associated with initialization // outside the loop so that they can be re-used to re-initialize // objects in the 'e_CMD_NEXT_PERMIT == cmd' case. int maxSimultaneousActions; double secondsPerAction; TimeInterval initTimeInterval; bool allowAll; // 'e_CMD_NEXT_PERMIT == cmd' needs to know // if, on the last initialization, the // throttle was set to 'allow all'. for (int ti = 0; ti < k_NUM_DATA; ++ti) { const Data& data = DATA[ti]; const int LINE = data.d_line; const Cmd cmd = data.d_command; const TimeInterval& timeInterval = data.d_timeInterval; switch (cmd) { case e_CMD_INIT: case e_CMD_INIT_SET_TIME: { maxSimultaneousActions = data.d_maxSimultaneousActions; secondsPerAction = data.d_secondsPerAction; initTimeInterval = timeInterval; allowAll = 1 == maxSimultaneousActions && 0 == secondsPerAction; mX.initialize(maxSimultaneousActions, static_cast<Int64>(secondsPerAction *1e9)); if (e_CMD_INIT_SET_TIME == cmd) { // Call 'requestPermission' to set the throttle's // 'd_prevLeakTime' field to 'initTimeInterval'. ASSERTV(LINE, mX.requestPermission(maxSimultaneousActions, initTimeInterval)); ASSERTV(LINE, !mX.requestPermission(initTimeInterval)); } } break; case e_CMD_NEXT_PERMIT: { const int numActions = data.d_numActions; TimeInterval result = nullTI; const int rc = mX.nextPermit(&result, numActions); ASSERTV(LINE, numActions, 0 == rc); ASSERTV(LINE, numActions, timeInterval, result, timeInterval == result); if (!allowAll) { // Use 'requestPermission' to verify that 'result' was the // EXACT nanosecond when 'numActions' would first have been // permitted. result.addNanoseconds(-1); ASSERTV(LINE, !mX.requestPermission(numActions, result)); result.addNanoseconds(1); ASSERTV(LINE, mX.requestPermission(numActions, result)); ASSERTV(LINE, !mX.requestPermission(result)); // Re-initialize the throttle to the state it was in before // 'nextPermit' was called. mX.initialize(maxSimultaneousActions, static_cast<Int64>(secondsPerAction *1e9)); ASSERTV(LINE, mX.requestPermission(maxSimultaneousActions, initTimeInterval)); ASSERTV(LINE, !mX.requestPermission(initTimeInterval)); } } break; case e_CMD_NEXT_PERMIT_INVALID: { const int numActions = data.d_numActions; TimeInterval result = nullTI; const int rc = mX.nextPermit(&result, numActions); ASSERTV(LINE, numActions, 0 != rc); ASSERTV(LINE, numActions, nullTI, result, nullTI == result); } break; default: { ASSERTV(LINE, cmd, 0 && "unrecognized cmd"); } } } } break; case 6: { // -------------------------------------------------------------------- // TESTING 'requestPermissionIfValid' -- WHITE BOX // // Conerns: //: 1 That 'requestPermissionIfValid' returns non-zero when fed invalid //: input, without modifying the value of the specified '*result'. //: //: 2 That 'requestPermissionIfValid' returns 0 when fed valid input. // // Plan: //: 1 Do a table-driven test to feed various invalid inputs to //: 'requestPermissionIfValid'. //: //: 2 Loop twice, once where '*result' is pre-set to 'false', once with //: it pre-set to 'true', and observe that neither time is it //: modified unless the inputs were valid. //: //: 3 Have the table call the function a few time with valid inputs, //: and observe in those cases that 0 is returned a '*result' has the //: expected value. //: //: 4 Note that we won't be extensively testing for correctness of the //: '*result' returned when the function is fed valid input, since we //: have already tested 'requestPermission'. // // Testing: // int requestPermissionIfValid(bool*,int); // int requestPermissionIfValid(bool*,int,const bsls::TimeInterval&); // -------------------------------------------------------------------- if (verbose) cout << "TESTING 'requestPermissionIfValid' -- WHITE BOX\n" "===============================================\n"; enum Cmd { e_CMD_INIT, e_CMD_REQUEST }; enum ExpOutcome { e_EXP_INVALID, e_EXP_TRUE, e_EXP_FALSE }; const char *expOutcomeStr[] = { "exp_invalid", "exp_true", "exp_false" }; const Int64 int64Max = bsl::numeric_limits<Int64>::max(); const Int64 secondsMax = int64Max / u::k_SECOND; const Int64 int64Min = bsl::numeric_limits<Int64>::min(); const Int64 secondsMin = int64Min / u::k_SECOND; const TimeInterval nullTI = u::toTime("1234:56.789"); // When 'd_timeInterval' is set to this value, it indicates that // the time is not to be passed to 'requestPermissionIfValid' and // that it is to call the system time. // 'INIT': Initialize the throttle with the specified // 'maxSimultaneousActions' andb 'secondsPerAction'. #undef INIT #define INIT(maxSimultaneousActions, secondsPerAction) \ { L_, e_CMD_INIT, (maxSimultaneousActions), \ static_cast<Int64>((secondsPerAction) * 1e9), -1, nullTI, \ e_EXP_INVALID } // 'REQUEST_INVALID' -- call 'requestPermissionIfValid' with the // specified 'numActions'. If 'timeSpec' does not evaluate to the same // time interval as 'nullTI', pass the time interval to the function is // well. The function under test is expected to return a non-zero // value, without modifying '*result'. #undef REQUEST_INVALID #define REQUEST_INVALID(numActions, timeSpec) \ { L_, e_CMD_REQUEST, -1, -1, (numActions), u::tiAdapt(timeSpec), \ e_EXP_INVALID } // 'REQUEST_INVALID' -- call 'requestPermissionIfValid' with the // specified 'numActions'. If 'timeSpec' does not evaluate to the same // time interval as 'nullTI', pass the time interval to the function is // well. The function under test is expected to return 0 with // '*result' set to 'expResult'. #undef REQUEST_OK #define REQUEST_OK(numActions, timeSpec, expResult) \ { L_, e_CMD_REQUEST, -1, -1, (numActions), u::tiAdapt(timeSpec), \ (expResult ? e_EXP_TRUE : e_EXP_FALSE) } const struct Data { int d_line; Cmd d_command; int d_maxSimultaneousActions; Int64 d_nanosecondsPerAction; int d_numActions; TimeInterval d_timeInterval; ExpOutcome d_expOutcome; } DATA[] = { INIT(4, 0.010), REQUEST_INVALID(-1, ":0"), REQUEST_INVALID(0, ":0"), REQUEST_INVALID(5, ":0"), REQUEST_INVALID(-1, nullTI), REQUEST_INVALID(0, nullTI), REQUEST_INVALID(5, nullTI), REQUEST_INVALID(1, TimeInterval(int64Min, 0)), REQUEST_INVALID(2, TimeInterval(int64Min, 0)), REQUEST_INVALID(3, TimeInterval(int64Min, 0)), REQUEST_INVALID(4, TimeInterval(int64Min, 0)), REQUEST_INVALID(1, TimeInterval(secondsMin, -999999999)), REQUEST_INVALID(2, TimeInterval(secondsMin, -999999999)), REQUEST_INVALID(3, TimeInterval(secondsMin, -999999999)), REQUEST_INVALID(4, TimeInterval(secondsMin, -999999999)), REQUEST_OK(1, TimeInterval(secondsMin, 0), false), REQUEST_OK(2, TimeInterval(secondsMin, 0), false), REQUEST_OK(3, TimeInterval(secondsMin, 0), false), REQUEST_OK(4, TimeInterval(secondsMin, 0), false), REQUEST_OK(1, ":0", true), REQUEST_OK(2, ":0", true), REQUEST_OK(3, ":0", false), REQUEST_OK(4, ":0", false), REQUEST_OK(1, ":0", true), REQUEST_OK(1, nullTI, true), REQUEST_OK(2, nullTI, true), REQUEST_OK(3, nullTI, false), REQUEST_OK(4, nullTI, false), REQUEST_OK(1, nullTI, true), REQUEST_OK(1, TimeInterval(secondsMax, 0), true), REQUEST_OK(2, TimeInterval(secondsMax, 0), true), REQUEST_OK(3, TimeInterval(secondsMax, 0), false), REQUEST_OK(4, TimeInterval(secondsMax, 0), false), REQUEST_OK(1, TimeInterval(secondsMax, 0), true), REQUEST_INVALID(1, TimeInterval(secondsMax, 999999999)), REQUEST_INVALID(2, TimeInterval(secondsMax, 999999999)), REQUEST_INVALID(3, TimeInterval(secondsMax, 999999999)), REQUEST_INVALID(4, TimeInterval(secondsMax, 999999999)), REQUEST_INVALID(1, TimeInterval(int64Max, 0)), REQUEST_INVALID(2, TimeInterval(int64Max, 0)), REQUEST_INVALID(3, TimeInterval(int64Max, 0)), REQUEST_INVALID(4, TimeInterval(int64Max, 0)), INIT(1, 0.009), REQUEST_INVALID(-1, ":0"), REQUEST_INVALID(0, ":0"), REQUEST_INVALID(2, ":0"), REQUEST_INVALID(-1, nullTI), REQUEST_INVALID(0, nullTI), REQUEST_INVALID(2, nullTI), REQUEST_OK(1, ":0", true), REQUEST_OK(1, ":0.010", true), REQUEST_OK(1, ":0.015", false), REQUEST_OK(1, ":0.020", true), REQUEST_OK(1, ":0.020", false), REQUEST_OK(1, ":0.023", false), REQUEST_OK(1, ":0.023", false), REQUEST_OK(1, ":0.027", false), REQUEST_OK(1, ":0.030", true), // If we don't pass the time, it will pass the current time, which // will be >45 years after the epoch. REQUEST_OK(1, nullTI, true), REQUEST_OK(1, nullTI, false), REQUEST_OK(1, nullTI, false) }; #undef INIT #undef REQUEST_INVALID #undef REQUEST_OK enum { k_NUM_DATA = sizeof DATA / sizeof *DATA }; Obj mX; for (int ti = 0; ti < 2; ++ti) { // This outer 'ti' loop iterates through two values, 'false' and // 'true', to which the returned value 'ret' is set before calling // 'requestPermissionIfValid'. If the parameters were not valid, // we verify that 'ret' is unchanged by the call. for (int tj = 0; tj < k_NUM_DATA; ++tj) { const Data& data = DATA[tj]; const int LINE = data.d_line; const Cmd cmd = data.d_command; switch (cmd) { case e_CMD_INIT: { const int maxSimultaneousActions = data.d_maxSimultaneousActions; const Int64 nanosecondsPerAction = data.d_nanosecondsPerAction; if (veryVerbose) cout << "mX.initialize(" << maxSimultaneousActions << ", " << nanosecondsPerAction << ");\n"; mX.initialize(maxSimultaneousActions, nanosecondsPerAction, CT::e_REALTIME); } break; case e_CMD_REQUEST: { const int numActions = data.d_numActions; const TimeInterval& timeInterval = data.d_timeInterval; const ExpOutcome expOutcome = data.d_expOutcome; const bool passTheTime = nullTI != timeInterval; const bool expRet = e_EXP_INVALID == expOutcome ? ti : e_EXP_TRUE == expOutcome; bool ret = ti; int rc = passTheTime ? mX.requestPermissionIfValid(&ret, numActions, timeInterval) : mX.requestPermissionIfValid(&ret, numActions); ASSERTV(LINE, u::b(ti), expOutcomeStr[expOutcome], rc, (0 == rc) == (e_EXP_INVALID != expOutcome)); ASSERTV(LINE, u::b(ti), u::b(expRet), u::b(ret), expRet == ret); if (veryVerbose) { cout << "request(" << numActions; if (passTheTime) { cout << ", TI(" << timeInterval.seconds() << ", " << timeInterval.nanoseconds() << ")"; } cout << ") "; P_(rc); P(u::b(ret)); } } break; default: { ASSERTV(cmd, 0 && "invalid cmd"); } } } } } break; case 5: { // -------------------------------------------------------------------- // TESTING 'requestPermission' USING SYSTEM TIME // // Concerns: //: 1 That 'requestPermission' works when not passed a //: 'bsls::TimeInterval' object, in which case it calls the system //: time. // // Plan: //: 1 Do a table-driven test, manipulating elapsed time by calling //: 'u::sleep'. // // Testing: // bool requestPermission(); // bool requestPermission(int); // -------------------------------------------------------------------- if (verbose) cout << "TESTING 'requestPermission' USING SYSTEM TIME\n" "=============================================\n"; enum Cmd { e_CMD_INIT, e_CMD_SLEEP, e_CMD_REQUEST }; #undef INIT #define INIT(maxSimultaneousActions, secondsPerAction) \ L_, e_CMD_INIT, (maxSimultaneousActions), \ static_cast<Int64>((secondsPerAction) * 1e9), 0, 0, 0 #undef MILLI_SLEEP #define MILLI_SLEEP(timeInMillisecs) \ L_, e_CMD_SLEEP, -1, -1, -1, (timeInMillisecs)/1000.0, 0 #undef REQUEST #define REQUEST(numActions, expected) \ L_, e_CMD_REQUEST, -1, -1, (numActions), 0, (expected) static const struct Data { int d_line; Cmd d_command; int d_maxSimultaneousActions; Int64 d_nanosecondsPerAction; int d_numActions; double d_sleepSeconds; bool d_expPermission; } DATA[] = { INIT(1, 80e-3), REQUEST(1, 1), REQUEST(1, 0), REQUEST(1, 0), MILLI_SLEEP(40), REQUEST(1, 0), MILLI_SLEEP(80), REQUEST(1, 1), REQUEST(1, 0), INIT(4, 80e-3), REQUEST(2, 1), REQUEST(1, 1), REQUEST(1, 1), REQUEST(1, 0), REQUEST(4, 0), MILLI_SLEEP(40), REQUEST(1, 0), MILLI_SLEEP(80), REQUEST(1, 1), REQUEST(1, 0), MILLI_SLEEP(160), REQUEST(2, 1), REQUEST(4, 0), REQUEST(3, 0), REQUEST(2, 0), REQUEST(1, 0), INIT(4, 80e-3), REQUEST(1, 1), REQUEST(3, 1), REQUEST(1, 0), MILLI_SLEEP(280), REQUEST(1, 1), REQUEST(4, 0), MILLI_SLEEP(160), REQUEST(4, 1), REQUEST(4, 0), REQUEST(3, 0), REQUEST(2, 0), REQUEST(1, 0), MILLI_SLEEP(200), REQUEST(4, 0), REQUEST(3, 0), REQUEST(2, 1), REQUEST(1, 0) }; #undef INIT #undef MILLI_SLEEP #undef REQUEST enum { k_NUM_DATA = sizeof DATA / sizeof *DATA }; static const struct DataTk { int d_tkLine; bool d_defaultToMono; bool d_singleAction; bool d_clockRealTime; } DATA_TK[] = { { L_, 0, 0, 0 }, { L_, 0, 0, 1 }, { L_, 0, 1, 0 }, { L_, 0, 1, 1 }, { L_, 1, 0, 0 }, { L_, 1, 1, 0 } }; enum { k_NUM_DATA_TK = sizeof DATA_TK / sizeof *DATA_TK }; bool quit = false; Obj mX; const Obj& X = mX; if (verbose) cout << "Defaulting to calling system time\n"; for (int tk = 0; !quit && tk < k_NUM_DATA_TK; ++tk) { const DataTk dataTk = DATA_TK[tk]; const int TK_LINE = dataTk.d_tkLine; const bool DEFAULT_TO_MONO = dataTk.d_defaultToMono; const bool SINGLE_ACTION_ONLY = dataTk.d_singleAction; const ClockType CLOCK_TYPE = dataTk.d_clockRealTime ? CT::e_REALTIME : CT::e_MONOTONIC; ASSERTV(TK_LINE, !DEFAULT_TO_MONO || CT::e_MONOTONIC == CLOCK_TYPE); if (veryVerbose) { cout << endl; P_(TK_LINE); P_(DEFAULT_TO_MONO); P_(SINGLE_ACTION_ONLY); P(CLOCK_TYPE); } Int64 nanosecondsPerAction; double secondsPerAction; double start = 0, scheduledTime = 0, actualTime = 0, overshoot = 0; int retries = 0; for (int ti = 0; !quit && ti< k_NUM_DATA; ++ti) { const Data data = DATA[ti]; const int LINE = data.d_line; const Cmd CMD = data.d_command; switch (CMD) { case e_CMD_INIT: { const int MAX_SIMULTANEOUS_ACTIONS = data.d_maxSimultaneousActions; nanosecondsPerAction = data.d_nanosecondsPerAction; secondsPerAction = 1e-9 * static_cast<double>(nanosecondsPerAction); if (DEFAULT_TO_MONO) { mX.initialize(MAX_SIMULTANEOUS_ACTIONS, nanosecondsPerAction); } else { mX.initialize(MAX_SIMULTANEOUS_ACTIONS, nanosecondsPerAction, CLOCK_TYPE); } ASSERT(X.clockType() == CLOCK_TYPE); if (veryVerbose) { cout << "initialize(" << MAX_SIMULTANEOUS_ACTIONS << ", " << secondsPerAction; if (!DEFAULT_TO_MONO) { cout << ", " << CLOCK_TYPE; } cout << ");\n"; } start = u::doubleClock(CLOCK_TYPE); scheduledTime = 0; actualTime = 0; } break; case e_CMD_SLEEP: { const double SLEEP_SECONDS = data.d_sleepSeconds; if (veryVerbose) { cout << "sleep(" << SLEEP_SECONDS; if (!DEFAULT_TO_MONO) { cout << ", " << CLOCK_TYPE; } cout << ");\n"; } const double timeToSleep = SLEEP_SECONDS - overshoot; u::sleep(timeToSleep); actualTime = u::doubleClock(CLOCK_TYPE) - start; scheduledTime += SLEEP_SECONDS; overshoot = actualTime - scheduledTime; if (secondsPerAction < overshoot * 4) { ++retries; const double overshootPercent = 100 * overshoot / secondsPerAction; ASSERTV(LINE, retries, overshootPercent, retries < 10); if (10 < retries || veryVerbose) { cout << "Retry << " << retries << " triggered. " << overshootPercent << "% overshoot on line: " << LINE << " clock type: " << CLOCK_TYPE << endl; } if (15 < retries) { quit = true; break; } // Back up to before preceeding init and try again. for (--ti; e_CMD_INIT != DATA[ti-- % k_NUM_DATA].d_command;) { ; } } } break; case e_CMD_REQUEST: { const int NUM_ACTIONS = data.d_numActions; const bool EXP = data.d_expPermission; if (SINGLE_ACTION_ONLY) { if (NUM_ACTIONS > 1 && !EXP) { continue; } for (int ii = 0; ii < NUM_ACTIONS; ++ii) { double spltBefore = u::secondsPrevLeakTime(&mX); const bool ret = mX.requestPermission(); if (veryVerbose) { cout << "reqestPermission()"; if (NUM_ACTIONS > 1) { cout << ' ' << (ii + 1); } cout << " == " << u::b(ret) << endl; } ASSERTV(LINE, EXP, ret, ii, spltBefore,EXP == ret); } } else { double spltBefore = u::secondsPrevLeakTime(&mX); const bool ret = mX.requestPermission(NUM_ACTIONS); if (veryVerbose) cout << "reqestPermission(" << NUM_ACTIONS << ") == " << u::b(ret) << endl; ASSERTV(LINE, EXP, ret, overshoot, spltBefore, EXP == ret); } } break; default: { ASSERT(0 && "invalid CMD"); } } } } } break; case 4: { // -------------------------------------------------------------------- // TESTING 'requestPermission' WITH TIME PASSED // // Concerns: //: 1 That request permission grants or refuses permission : //: appropriately when a throttle is configured for finite //: permission. //: //: 2 That request permission always grants permission when the //: throttle is configured 'allow all'. //: //: 3 That request permission never grants permission when the throttle //: is configured 'allow none'. //: //: 4 Negative testing of 'requestPermission'. // // Plan: //: 1 Construct a table with nullable values indicating how the //: throttle is to be initialized, and with a 'const char *' field to //: indicate time specs, an int field to indicate //: 'maxSimultaneousActions', and a bool field to indicate whether //: permission is expected to be granted. Rows are to take two //: forms: //: o 2nd & 3rd column specify args to 'initialize' //: o 2nd & 3rd columns are -1, and the remaining columns specify the //: args with which 'requestPermission' is to be called and the //: expected result of this call. //: //: 2 Iterate through the table, and if it's an initialization record, //: call 'initalize' on the throttle, otherwise call //: 'requestPermission' with the specified arguments and check that //: the permission matches expectations. //: //: 3 Use the same table with a similar loop to drive the single-arg //; 'requestPermission'. //: //: 4 Write a loop going through the same table to drive testing of //: a throttle initialized for 'allow all'. //: //: 5 Write a loop going through the same table to drive testing of //: 'allow none'. //: //: 6 Do negative testing. // // Testing: // requestPermission(int, const bsls::TimeInterval&); // requestPermission(const bsls::TimeInterval&); // -------------------------------------------------------------------- if (verbose) cout << "TESTING 'requestPermission' WITH TIME PASSED\n" "============================================\n"; #undef REQUEST #define REQUEST -1, -1 #undef INITIALIZE #define INITIALIZE 0, 0, 0 static const struct Data { int d_line; int d_maxSimultaneousActions; Int64 d_nanosecondsPerAction; int d_numActions; const char *d_timeStr; bool d_expPermission; } DATA[] = { { L_, 1, 10, INITIALIZE }, { L_, REQUEST, 1, "0", 1 }, { L_, REQUEST, 1, "0", 0 }, { L_, REQUEST, 1, "0", 0 }, { L_, REQUEST, 1, "n5", 0 }, { L_, REQUEST, 1, "n10", 1 }, { L_, REQUEST, 1, "n15", 0 }, { L_, REQUEST, 1, "n10", 0 }, { L_, REQUEST, 1, "0", 0 }, { L_, REQUEST, 1, ":1", 1 }, { L_, REQUEST, 1, ":1n5", 0 }, { L_, REQUEST, 1, ":1n10", 1 }, { L_, REQUEST, 1, ":10n0", 1 }, { L_, REQUEST, 1, ":10n2", 0 }, { L_, REQUEST, 1, ":10n8", 0 }, { L_, REQUEST, 1, ":10n10", 1 }, { L_, REQUEST, 1, "20:n", 1 }, { L_, REQUEST, 1, "20:n", 0 }, { L_, REQUEST, 1, "20:n", 0 }, { L_, 4, 10, INITIALIZE }, { L_, REQUEST, 1, "0", 1 }, { L_, REQUEST, 2, "0", 1 }, { L_, REQUEST, 2, "0", 0 }, { L_, REQUEST, 4, "0", 0 }, { L_, REQUEST, 1, "0", 1 }, { L_, REQUEST, 1, "0", 0 }, { L_, REQUEST, 4, "0", 0 }, { L_, REQUEST, 4, ":1", 1 }, { L_, REQUEST, 1, ":1", 0 }, { L_, REQUEST, 4, ":2", 1 }, { L_, REQUEST, 1, ":2n5", 0 }, { L_, REQUEST, 1, ":2n9", 0 }, { L_, REQUEST, 2, ":2n10", 0 }, { L_, REQUEST, 4, ":2n10", 0 }, { L_, REQUEST, 1, ":2n10", 1 }, { L_, REQUEST, 1, ":2n10", 0 }, { L_, REQUEST, 1, ":2n10", 0 }, { L_, REQUEST, 4, ":2n49", 0 }, { L_, REQUEST, 4, ":2n50", 1 }, { L_, REQUEST, 1, "15:", 1 }, { L_, REQUEST, 1, "15:", 1 }, { L_, REQUEST, 1, "15:", 1 }, { L_, REQUEST, 1, "15:", 1 }, { L_, REQUEST, 1, "15:", 0 }, { L_, 4, u::k_SECOND, INITIALIZE }, { L_, REQUEST, 2, "0", 1 }, { L_, REQUEST, 2, "0", 1 }, { L_, REQUEST, 1, "0", 0 }, { L_, REQUEST, 1, "0:.1", 0 }, { L_, REQUEST, 1, "0:.2", 0 }, { L_, REQUEST, 1, "0:.5", 0 }, { L_, REQUEST, 1, "0:.5", 0 }, { L_, REQUEST, 1, "0:.5", 0 }, { L_, REQUEST, 4, "0:1", 0 }, { L_, REQUEST, 3, "0:1", 0 }, { L_, REQUEST, 2, "0:1", 0 }, { L_, REQUEST, 1, "0:1", 1 }, { L_, REQUEST, 4, "0:1.1", 0 }, { L_, REQUEST, 3, "0:1.1", 0 }, { L_, REQUEST, 2, "0:1.1", 0 }, { L_, REQUEST, 1, "0:1.1", 0 }, { L_, REQUEST, 4, "0:1.2", 0 }, { L_, REQUEST, 3, "0:1.2", 0 }, { L_, REQUEST, 2, "0:1.2", 0 }, { L_, REQUEST, 1, "0:1.2", 0 }, { L_, REQUEST, 4, "0:1.2", 0 }, { L_, REQUEST, 4, "0:1.3", 0 }, { L_, REQUEST, 3, "0:1.3", 0 }, { L_, REQUEST, 2, "0:1.3", 0 }, { L_, REQUEST, 1, "0:1.3", 0 }, { L_, REQUEST, 4, "0:1.9", 0 }, { L_, REQUEST, 3, "0:1.9", 0 }, { L_, REQUEST, 2, "0:1.9", 0 }, { L_, REQUEST, 1, "0:1.9", 0 }, { L_, REQUEST, 4, "0:5", 1 }, { L_, REQUEST, 1, "100:", 1 }, { L_, REQUEST, 4, "100:", 0 }, { L_, REQUEST, 4, "100:1", 1 } }; #undef REQUEST #undef INITIALIZE enum { k_NUM_DATA = sizeof DATA / sizeof *DATA }; if (verbose) cout << "Table-Driven Testing - 2 arg\n"; if (veryVerbose) cout << "----------------------------\n"; for (int ti = 0; ti < k_NUM_DATA; ++ti) { const Data& data = DATA[ti]; const int line = data.d_line; const int msa = data.d_maxSimultaneousActions; const Int64 npa = data.d_nanosecondsPerAction; const int numActions = data.d_numActions; const char * const timeStr = data.d_timeStr; const bool EXP = data.d_expPermission; static Obj mX; if (-1 != msa) { ASSERTV(line, npa, -1 < npa); ASSERTV(line, numActions, 0 == numActions); ASSERTV(line, timeStr, 0 == timeStr); ASSERTV(line, EXP, !EXP); if (veryVerbose) cout << "'initialize(" << msa << ", " << npa << ");\n"; mX.initialize(msa, npa); continue; } ASSERTV(line, npa, -1 == npa); const TimeInterval time = u::toTime(timeStr); ASSERTV(line, msa, npa, -1 == msa && -1 == npa); const bool ret = mX.requestPermission(numActions, time); if (veryVerbose) cout << "'requestPermissions(int, toTime(\"" << timeStr << "\"))' == " << u::b(ret) << endl; ASSERTV(line, EXP, ret, EXP == ret); } if (verbose) cout << "Table-Driven Testing - 1 arg\n"; if (veryVerbose) cout << "----------------------------\n"; for (int ti = 0; ti < k_NUM_DATA; ++ti) { const Data& data = DATA[ti]; const int line = data.d_line; const int msa = data.d_maxSimultaneousActions; const Int64 npa = data.d_nanosecondsPerAction; const int numActions = data.d_numActions; const char * const timeStr = data.d_timeStr; const bool EXP = data.d_expPermission; static Obj mX; if (-1 != msa) { ASSERTV(line, npa, -1 < npa); ASSERTV(line, numActions, 0 == numActions); ASSERTV(line, timeStr, 0 == timeStr); ASSERTV(line, EXP, !EXP); if (veryVerbose) cout << "'initialize(" << msa << ", " << npa << ");\n"; mX.initialize(msa, npa); continue; } ASSERTV(line, npa, -1 == npa); if (1 < numActions && !EXP) { continue; } const TimeInterval time = u::toTime(timeStr); ASSERTV(line, msa, npa, -1 == msa && -1 == npa); for (int tj = 0; tj < numActions; ++tj) { const bool ret = mX.requestPermission(time); if (veryVerbose) cout << "'requestPermissions(toTime(\"" << timeStr << "\"))' == " << u::b(ret) << " - " << (tj + 1) << endl; ASSERTV(line, EXP, ret, tj, EXP == ret); } } if (verbose) cout << "Allow All\n"; if (veryVerbose) cout << "---------\n"; for (int ti = 0; ti < k_NUM_DATA; ++ti) { const Data& data = DATA[ti]; const int line = data.d_line; const int msa = data.d_maxSimultaneousActions; const int numActions = data.d_numActions; const char * const timeStr = data.d_timeStr; static Obj mX; if (-1 != msa) { mX.initialize(1, 0); // allow all static const Obj mXAll = BDLMT_THROTTLE_INIT_ALLOW_ALL; ASSERT(0 == bsl::memcmp(&mX, &mXAll, sizeof(mX))); continue; } const TimeInterval time = u::toTime(timeStr); ASSERTV(line, mX.requestPermission(numActions, time)); ASSERTV(line, mX.requestPermission(time)); ASSERTV(line, mX.requestPermission(1, time)); ASSERTV(line, mX.requestPermission(INT_MAX, time)); } if (verbose) cout << "Allow None\n"; if (veryVerbose) cout << "----------\n"; for (int ti = 0; ti < k_NUM_DATA; ++ti) { const Data& data = DATA[ti]; const int line = data.d_line; const int msa = data.d_maxSimultaneousActions; const int numActions = data.d_numActions; const char * const timeStr = data.d_timeStr; static Obj mX; if (-1 != msa) { mX.initialize(0, 1); // allow none static const Obj mXNone = BDLMT_THROTTLE_INIT_ALLOW_NONE; ASSERT(0 == bsl::memcmp(&mX, &mXNone, sizeof(mX))); continue; } const TimeInterval time = u::toTime(timeStr); ASSERTV(line, false == mX.requestPermission(numActions, time)); ASSERTV(line, false == mX.requestPermission(time)); ASSERTV(line, false == mX.requestPermission(1, time)); ASSERTV(line, false == mX.requestPermission(INT_MAX, time)); } if (verbose) cout << "Negative Testing\n"; if (veryVerbose) cout << "----------------\n"; { bsls::AssertTestHandlerGuard hG; Obj mX = BDLMT_THROTTLE_INIT(4, u::k_SECOND); typedef TimeInterval TI; const TI time; const Int64 int64Max = bsl::numeric_limits<Int64>::max(); const Int64 secondsMax = int64Max / u::k_SECOND; const Int64 int64Min = bsl::numeric_limits<Int64>::min(); const Int64 secondsMin = int64Min / u::k_SECOND; if (veryVerbose) cout << "Negative Testing, 2-Arg\n"; ASSERT_PASS(mX.requestPermission(1, time)); ASSERT_PASS(mX.requestPermission(2, time)); ASSERT_PASS(mX.requestPermission(3, time)); ASSERT_PASS(mX.requestPermission(4, time)); ASSERT_PASS(mX.requestPermission(1, TI(secondsMax, 100))); ASSERT_PASS(mX.requestPermission(2, TI(secondsMin, 0))); ASSERT_FAIL(mX.requestPermission(1, TI(secondsMax, 999999999))); ASSERT_FAIL(mX.requestPermission(2, TI(secondsMin, -999999999))); ASSERT_FAIL(mX.requestPermission(1, TI(int64Max, 0))); ASSERT_FAIL(mX.requestPermission(1, TI(int64Max, 999999999))); ASSERT_FAIL(mX.requestPermission(1, TI(int64Min, 0))); ASSERT_FAIL(mX.requestPermission(1, TI(int64Min, -999999999))); ASSERT_FAIL(mX.requestPermission(0, time)); ASSERT_FAIL(mX.requestPermission(5, time)); ASSERT_FAIL(mX.requestPermission(-1, time)); ASSERT_FAIL(mX.requestPermission(INT_MIN, time)); ASSERT_FAIL(mX.requestPermission(INT_MAX, time)); if (veryVerbose) cout << "Negative Testing, 1-Arg\n"; ASSERT_PASS(mX.requestPermission(TI(secondsMax, 100))); ASSERT_PASS(mX.requestPermission(TI(secondsMin, 0))); ASSERT_FAIL(mX.requestPermission(TI(secondsMax, 999999999))); ASSERT_FAIL(mX.requestPermission(TI(secondsMin, -999999999))); ASSERT_FAIL(mX.requestPermission(TI(int64Max, 0))); ASSERT_FAIL(mX.requestPermission(TI(int64Max, 999999999))); ASSERT_FAIL(mX.requestPermission(TI(int64Min, 0))); ASSERT_FAIL(mX.requestPermission(TI(int64Min, -999999999))); } } break; case 3: { // -------------------------------------------------------------------- // TESTING TEST APPARATUS // // Concerns: //: 1 That 'toTime' works as specced. // // Plan: //: 2 Call 'toTime' with table-driven inputs and observe the output is //: as expected. // // TESTING // TEST APPARATUS // -------------------------------------------------------------------- if (verbose) cout << "TESTING TEST APPARATUS\n" "======================\n"; static const struct Data { int d_line; const char *d_timeStr; int d_seconds; int d_nanoseconds; } DATA[] = { { L_, "", 0, 0 }, { L_, "0", 0, 0 }, { L_, ":.n", 0, 0 }, { L_, "0:.n", 0, 0 }, { L_, ":0.n", 0, 0 }, { L_, ":.0n", 0, 0 }, { L_, ":.n0", 0, 0 }, { L_, ":n0", 0, 0 }, { L_, "1:.n", 60, 0 }, { L_, ":1.n", 1, 0 }, { L_, ":.1n", 0, 100000000 }, { L_, ":.n1", 0, 1 }, { L_, "n1", 0, 1 }, { L_, ":n1", 0, 1 }, { L_, ".n1", 0, 1 }, { L_, "12:34.100n1", 754, 100000001 }, { L_, "2:22.543n123", 142, 543000123 }, { L_, "431.87n90", 431, 870000090 }, { L_, ".n12345", 0, 12345 }, { L_, "0.n12345", 0, 12345 }, { L_, "0.0n12345", 0, 12345 }, { L_, "0:0.0n12345", 0, 12345 }, { L_, "23", 23, 0 }, { L_, "23.4", 23, 400000000 }, { L_, "1:23.3", 83, 300000000 }, { L_, "1:23n300000000", 83, 300000000 }, { L_, ":83n300000000", 83, 300000000 }, { L_, "1:23.987654321", 83, 987654321 }, { L_, "1:23.9876543", 83, 987654300 }, { L_, "1:23n987654321", 83, 987654321 }, { L_, "1:23n987654300", 83, 987654300 }, { L_, "45:", 2700, 0 }, { L_, "45:0", 2700, 0 }, { L_, "45:0.", 2700, 0 }, { L_, "45:0.0", 2700, 0 }, { L_, "45:0.0n", 2700, 0 }, { L_, "45:0.0n0", 2700, 0 }, { L_, "45:.", 2700, 0 }, { L_, "45:.n", 2700, 0 } }; enum { k_NUM_DATA = sizeof DATA / sizeof *DATA }; for (int ti = 0; ti < k_NUM_DATA; ++ti) { const Data& data = DATA[ti]; const int LINE = data.d_line; const char * const TIME_STR = data.d_timeStr; const int SECONDS = data.d_seconds; const int NANOSECONDS = data.d_nanoseconds; const TimeInterval EXP(SECONDS, NANOSECONDS); const TimeInterval ret = u::toTime(TIME_STR); ASSERTV(LINE, EXP, ret, EXP == ret); const double doubleVal = SECONDS + NANOSECONDS / 1e9; const double retDouble = ret.totalSecondsAsDouble(); if (veryVerbose) { P_(doubleVal); P(retDouble); } ASSERTV(LINE, doubleVal, retDouble, doubleVal + 1e-10 > retDouble); ASSERTV(LINE, doubleVal, retDouble, doubleVal - 1e-10 < retDouble); } } break; case 2: { // -------------------------------------------------------------------- // TESTING 'initialize' and ACCESSORS // // Concerns: //: 1 That after an object is initialized using 'initialize' that the //: accessors verify the state of the object is as expected. // // Plan: //: 1 Set up a table and loop over it. //: //: 2 Each iteration, create two throttles, one of which has the clock //: type defaulting, and one of which has the clock type from the //: table passed. //: //: 3 If the clock type is monotonic, compare the two throttles and //: verify that they are binary identical. //: //: 4 Use the 3 accessors to verify that the state of the throttle //: corresponds to the table driven inputs. //: //: 5 Note that we never test the 'allow all' or 'allow none' cases in //: this test case, those will be covered later. // // Testing: // void initialize(int, Int64, SystemClockType::Enum); // bsls::SystemClockType::Enum clockType() const; // int maxSimultaneousActions() const; // Int64 nanosecondsPerAction() const; // -------------------------------------------------------------------- if (verbose) cout << "TESTING 'initialize' and ACCESSORS\n" "==================================\n"; typedef bdlt::TimeUnitRatio TUR; if (verbose) cout << "Table-driven testing\n"; static const ClockType mono = CT::e_MONOTONIC; static const ClockType real = CT::e_REALTIME; static const struct Data { int d_line; int d_maxSimultaneousActions; Int64 d_nanosecondsPerAction; ClockType d_clockType; } DATA[] = { { L_, 1, u::k_MILLI, mono }, { L_, 1, u::k_MILLI, real }, { L_, 5, u::k_MILLI, mono }, { L_, 5, u::k_MILLI, real }, { L_, 1000, u::k_SECOND, mono }, { L_, 1000, u::k_SECOND, real }, { L_, 10, 10 * TUR::k_NANOSECONDS_PER_HOUR, mono }, { L_, 10, 10 * TUR::k_NANOSECONDS_PER_HOUR, real }, { L_, 100, 2 * u::k_MILLI, mono }, { L_, 100, 2 * u::k_MILLI, real } }; enum { k_NUM_DATA = sizeof DATA / sizeof *DATA }; for (int ti = 0; ti < k_NUM_DATA; ++ti) { const Data& data = DATA[ti]; const int LINE = data.d_line; const int maxSimultaneousActions = data.d_maxSimultaneousActions; const Int64 nanosecondsPerAction = data.d_nanosecondsPerAction; const ClockType clockType = data.d_clockType; Obj monoThrottle; monoThrottle.initialize(maxSimultaneousActions, nanosecondsPerAction); Obj mX; const Obj& X = mX; mX.initialize(maxSimultaneousActions, nanosecondsPerAction, clockType); ASSERTV(LINE, CT::e_MONOTONIC != clockType || 0 == bsl::memcmp(&monoThrottle, &mX, sizeof(Obj))); ASSERTV(LINE, maxSimultaneousActions ==X.maxSimultaneousActions()); ASSERTV(LINE, nanosecondsPerAction == X.nanosecondsPerAction()); ASSERTV(LINE, X.clockType(), clockType, X.clockType() == clockType); } if (verbose) cout << "Negative Testing\n"; { // values for 'maxSimultaneousActions' const int msaBad = INT_MIN; (void) msaBad; const int msaLo = -1; (void) msaLo; const int msaMin = 0; const int msaMax = INT_MAX; // values for 'nanosecondsPerAction' const Int64 nsaBad = bsl::numeric_limits<Int64>::min(); const Int64 nsaLo = -1; (void) nsaBad; (void) nsaLo; const Int64 nsaMin = 0; const Int64 nsaMax = bsl::numeric_limits<Int64>::max(); // values for 'clockType' const ClockType ctm = CT::e_MONOTONIC; const ClockType ctr = CT::e_REALTIME; const ClockType ctMin = (ClockType) bsl::min(ctm, ctr); const ClockType ctMax = (ClockType) bsl::max(ctm, ctr); const ClockType ctLo = (ClockType) (ctMin - 1); (void) ctLo; const ClockType ctHi = (ClockType) (ctMax + 1); (void) ctHi; for (int ti = 0; ti < 2; ++ti) { const ClockType ctValid = ti ? ctm : ctr; bsls::AssertTestHandlerGuard hG; Obj mX; ASSERT_PASS(mX.initialize(msaMin, 1, ctValid)); ASSERT_PASS(mX.initialize(msaMin, nsaMax, ctValid)); ASSERT_PASS(mX.initialize( 1, nsaMin, ctValid)); ASSERT_PASS(mX.initialize(msaMax, nsaMin, ctValid)); ASSERT_PASS(mX.initialize( 1, 1, ctValid)); ASSERT_FAIL(mX.initialize(msaMin, nsaMin, ctValid)); ASSERT_FAIL(mX.initialize(msaBad, 1, ctValid)); ASSERT_FAIL(mX.initialize(msaBad, nsaMax, ctValid)); ASSERT_FAIL(mX.initialize( msaLo, 1, ctValid)); ASSERT_FAIL(mX.initialize( msaLo, nsaMax, ctValid)); ASSERT_FAIL(mX.initialize( 1, nsaBad, ctValid)); ASSERT_FAIL(mX.initialize(msaMax, nsaBad, ctValid)); ASSERT_FAIL(mX.initialize( 1, nsaLo, ctValid)); ASSERT_FAIL(mX.initialize(msaMax, nsaLo, ctValid)); ASSERT_FAIL(mX.initialize(msaBad, nsaLo, ctValid)); ASSERT_FAIL(mX.initialize( msaLo, nsaBad, ctValid)); ASSERT_FAIL(mX.initialize( msaLo, nsaLo, ctValid)); ASSERT_FAIL(mX.initialize(msaMin, 1, ctLo)); ASSERT_FAIL(mX.initialize(msaMin, nsaMax, ctLo)); ASSERT_FAIL(mX.initialize( 1, nsaMin, ctLo)); ASSERT_FAIL(mX.initialize(msaMax, nsaMin, ctLo)); ASSERT_FAIL(mX.initialize( 1, 1, ctLo)); ASSERT_FAIL(mX.initialize(msaMin, 1, ctHi)); ASSERT_FAIL(mX.initialize(msaMin, nsaMax, ctHi)); ASSERT_FAIL(mX.initialize( 1, nsaMin, ctHi)); ASSERT_FAIL(mX.initialize(msaMax, nsaMin, ctHi)); ASSERT_FAIL(mX.initialize( 1, 1, ctHi)); ASSERT_FAIL(mX.initialize( msaLo, nsaLo, ctHi)); ASSERT_FAIL(mX.initialize( msaLo, nsaLo, ctLo)); } } } break; case 1: { // -------------------------------------------------------------------- // BREATHING TEST // This case exercises (but does not fully test) basic functionality. // // Concerns: //: 1 The class is sufficiently functional to enable comprehensive //: testing in subsequent test cases. // // Plan: //: 1 Initialize some throttles to values, and manipulators and //: accessors. // // Testing: // BREATHING TEST // -------------------------------------------------------------------- if (verbose) cout << "BREATHING TEST\n" "==============\n"; int saveTestStatus = 0; for (int kk = 0; kk < 3 * 3; ++kk) { int jj = kk / 3; int mm = kk % 3; int leakPeriods[3] = { 100, 10, 1 }; static const Int64 leakPeriod = leakPeriods[jj] * u::k_MILLI; if (veryVerbose) cout << leakPeriods[jj] << " milliseconds, 20 action test, burst = 10\n"; static const unsigned burst = 10; const unsigned numLeakPeriods = 20; bsls::SystemClockType::Enum clockType; Obj mX; switch (mm) { case 0: { mX.initialize(burst, leakPeriod); clockType = bsls::SystemClockType::e_MONOTONIC; } break; case 1: { mX.initialize(burst, leakPeriod, bsls::SystemClockType::e_MONOTONIC); clockType = bsls::SystemClockType::e_MONOTONIC; } break; case 2: { mX.initialize(burst, leakPeriod, bsls::SystemClockType::e_REALTIME); clockType = bsls::SystemClockType::e_REALTIME; } break; default: { ASSERTV(mm, 0); } } if (veryVerbose) { P_(burst); P(leakPeriod); P(u::nanoClock(clockType)); P(u::get(&mX.d_prevLeakTime)); P_(mX.d_nanosecondsPerAction); P(mX.d_nanosecondsPerTotalReset); } bsl::vector<Int64> results(&u::ta); results.reserve(numLeakPeriods + 1); Int64 startTime = u::nanoClock(clockType); for (unsigned ii = 0; ii < numLeakPeriods; ) { if (mX.requestPermission()) { if (++ii > burst) { Int64 t = u::nanoClock(clockType); const Int64 diff = (t - startTime) - (ii - burst) * leakPeriod; if (diff < 0) { results.push_back(diff); } } } } const Int64 elapsed = u::nanoClock(clockType) - startTime; const Int64 expElapsed = leakPeriod * (numLeakPeriods - burst) - u::epsilon; ASSERTV(elapsed - expElapsed, results, leakPeriod, elapsed >= expElapsed); if (veryVerbose) { P_(elapsed); P_(elapsed - expElapsed); P(results); } saveTestStatus += testStatus; testStatus = 0; } testStatus = saveTestStatus; if (verbose) cout << "1 millsecond leak time, burst 10, 20 periods\n"; for (int mm = 0; mm < 2; ++mm) { static const Int64 leakPeriod = 1 * u::k_MILLI; static const unsigned burst = 10; const unsigned numLeakPeriods = 20; bsls::SystemClockType::Enum clockType; Obj *pMx; switch (mm) { case 0: { static Obj mX = BDLMT_THROTTLE_INIT(burst, leakPeriod); pMx = &mX; clockType = bsls::SystemClockType::e_MONOTONIC; } break; case 1: { static Obj mX = BDLMT_THROTTLE_INIT_REALTIME(burst, leakPeriod); pMx = &mX; clockType = bsls::SystemClockType::e_REALTIME; } break; default: { ASSERTV(mm, 0); } } if (veryVerbose) { P_(burst); P(leakPeriod); P(u::nanoClock(clockType)); P(u::get(&pMx->d_prevLeakTime)); P_(pMx->d_nanosecondsPerAction); P(pMx->d_nanosecondsPerTotalReset); } bsl::vector<Int64> results(&u::ta); results.reserve(numLeakPeriods + 1); Int64 startTime = u::nanoClock(clockType); for (unsigned ii = 0; ii < numLeakPeriods; ) { if (pMx->requestPermission()) { if (++ii > burst) { const Int64 t = u::nanoClock(clockType); const Int64 diff = (t - startTime) - (ii - burst) * leakPeriod; if (diff < 0) { results.push_back(diff); } } } } const Int64 elapsed = u::nanoClock(clockType) - startTime; const Int64 expElapsed = leakPeriod * (numLeakPeriods - burst) - u::epsilon; ASSERTV(elapsed - expElapsed, results, leakPeriod, elapsed >= expElapsed); if (veryVerbose) { P_(elapsed); P_(elapsed - expElapsed); P(results); } if (veryVerbose) { P_(elapsed); P_(elapsed - expElapsed); P(results); } } } break; case -1: { // -------------------------------------------------------------------- // EVENTS DROPPED TEST // // Concerns: //: 1 That under high contention under circumstances where multiple //: threads are likely to update the clock at the same time, that //: events are not lost. // // Plan: //: 1 Control everything by a double barrier -- first, a //: 'bslmt::Barrier' for long waits, then spinning on the atomic //: 'atomicBarrier' for short waits. This guarantees that all //: threads will be released from the second barrier at very close to //: exactly the same time, without wasting too many cycles doing long //: spins on the atomic. //: //: 2 After being released from the double barrier, all subthreads will //: attempt to get permission for a number of events. There will be //: enough events allowed in the period for ALL attempted events to //: acquire permission. //: //: 3 At the end, check the numer of events that were permitted and //: verify that none were refused. // // Testing: // EVENTS DROPPED TEST // -------------------------------------------------------------------- if (verbose) cout << "EVENTS DROPPED TEST\n" "===================\n"; namespace TC = Case_Minus_1_Events_Dropped; bslmt::ThreadGroup tg(&u::ta); tg.addThreads(&TC::threadJob, u::numThreads); TC::barrier.wait(); u::sleep(TC::shortSleepTime); ++TC::atomicBarrier; ASSERT(TC::atomicBarrier == 0); tg.joinAll(); ASSERTV(TC::eventsPerPeriod, TC::eventsSoFar, TC::eventsPerPeriod == TC::eventsSoFar); } break; default: { cerr << "WARNING: CASE `" << test << "' NOT FOUND." << endl; testStatus = -1; } } // CONCERN: In no case does memory come from the global or default // allocators. LOOP_ASSERT(globalAllocator.numBlocksTotal(), 0 == globalAllocator.numBlocksTotal()); LOOP_ASSERT(defaultAllocator.numBlocksTotal(), 0 == defaultAllocator.numBlocksTotal()); if (testStatus > 0) { cerr << "Error, non-zero test status = " << testStatus << "." << endl; } return testStatus; } // ---------------------------------------------------------------------------- // Copyright 2015 Bloomberg Finance L.P. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ----------------------------- END-OF-FILE ----------------------------------
apaprocki/bde
groups/bdl/bdlmt/bdlmt_throttle.t.cpp
C++
apache-2.0
125,514
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.tinkerpop.gremlin.neo4j.process.traversal.strategy.optimization; import org.apache.tinkerpop.gremlin.neo4j.process.traversal.step.sideEffect.Neo4jGraphStep; import org.apache.tinkerpop.gremlin.process.traversal.Step; import org.apache.tinkerpop.gremlin.process.traversal.Traversal; import org.apache.tinkerpop.gremlin.process.traversal.TraversalStrategy; import org.apache.tinkerpop.gremlin.process.traversal.step.HasContainerHolder; import org.apache.tinkerpop.gremlin.process.traversal.step.map.GraphStep; import org.apache.tinkerpop.gremlin.process.traversal.strategy.AbstractTraversalStrategy; import org.apache.tinkerpop.gremlin.process.traversal.util.TraversalHelper; /** * @author Pieter Martin * @author Marko A. Rodriguez (http://markorodriguez.com) */ public final class Neo4jGraphStepStrategy extends AbstractTraversalStrategy<TraversalStrategy.ProviderOptimizationStrategy> implements TraversalStrategy.ProviderOptimizationStrategy { private static final Neo4jGraphStepStrategy INSTANCE = new Neo4jGraphStepStrategy(); private Neo4jGraphStepStrategy() { } @Override public void apply(final Traversal.Admin<?, ?> traversal) { TraversalHelper.getStepsOfClass(GraphStep.class, traversal).forEach(originalGraphStep -> { final Neo4jGraphStep<?, ?> neo4jGraphStep = new Neo4jGraphStep<>(originalGraphStep); TraversalHelper.replaceStep(originalGraphStep, (Step) neo4jGraphStep, traversal); Step<?, ?> currentStep = neo4jGraphStep.getNextStep(); while (currentStep instanceof HasContainerHolder) { ((HasContainerHolder) currentStep).getHasContainers().forEach(hasContainer -> { if (!GraphStep.processHasContainerIds(neo4jGraphStep, hasContainer)) neo4jGraphStep.addHasContainer(hasContainer); }); currentStep.getLabels().forEach(neo4jGraphStep::addLabel); traversal.removeStep(currentStep); currentStep = currentStep.getNextStep(); } }); } public static Neo4jGraphStepStrategy instance() { return INSTANCE; } }
newkek/incubator-tinkerpop
neo4j-gremlin/src/main/java/org/apache/tinkerpop/gremlin/neo4j/process/traversal/strategy/optimization/Neo4jGraphStepStrategy.java
Java
apache-2.0
2,989
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from solum.api.handlers import assembly_handler from solum.objects import assembly from solum.tests import base from solum.tests import fakes from solum.tests import utils STATES = assembly.States @mock.patch('solum.objects.registry') class TestAssemblyHandler(base.BaseTestCase): def setUp(self): super(TestAssemblyHandler, self).setUp() self.ctx = utils.dummy_context() def test_assembly_get(self, mock_registry): mock_registry.return_value.Assembly.get_by_uuid.return_value = { 'plan_id': '1234' } handler = assembly_handler.AssemblyHandler(self.ctx) res = handler.get('test_id') self.assertIsNotNone(res) get_by_uuid = mock_registry.Assembly.get_by_uuid get_by_uuid.assert_called_once_with(self.ctx, 'test_id') def test_assembly_get_all(self, mock_registry): mock_registry.AssemblyList.get_all.return_value = {} handler = assembly_handler.AssemblyHandler(self.ctx) res = handler.get_all() self.assertIsNotNone(res) mock_registry.AssemblyList.get_all.assert_called_once_with(self.ctx) def test_update(self, mock_registry): data = {'user_id': 'new_user_id', 'plan_uuid': 'input_plan_uuid'} db_obj = fakes.FakeAssembly() mock_registry.Assembly.get_by_uuid.return_value = db_obj handler = assembly_handler.AssemblyHandler(self.ctx) res = handler.update('test_id', data) self.assertEqual(db_obj.user_id, res.user_id) db_obj.save.assert_called_once_with(self.ctx) db_obj.update.assert_called_once_with(data) mock_registry.Assembly.get_by_uuid.assert_called_once_with(self.ctx, 'test_id') @mock.patch('solum.worker.api.API.build') @mock.patch('solum.common.solum_keystoneclient.KeystoneClientV3') def test_create(self, mock_kc, mock_build, mock_registry): data = {'user_id': 'new_user_id', 'uuid': 'input_uuid', 'plan_uuid': 'input_plan_uuid'} db_obj = fakes.FakeAssembly() mock_registry.Assembly.return_value = db_obj fp = fakes.FakePlan() mock_registry.Plan.get_by_id.return_value = fp fp.raw_content = { 'name': 'theplan', 'artifacts': [{'name': 'nodeus', 'artifact_type': 'heroku', 'content': { 'href': 'https://example.com/ex.git'}, 'language_pack': 'auto'}]} mock_registry.Image.return_value = fakes.FakeImage() trust_ctx = utils.dummy_context() trust_ctx.trust_id = '12345' mock_kc.return_value.create_trust_context.return_value = trust_ctx handler = assembly_handler.AssemblyHandler(self.ctx) res = handler.create(data) db_obj.update.assert_called_once_with(data) db_obj.create.assert_called_once_with(self.ctx) self.assertEqual(db_obj, res) mock_build.assert_called_once_with( build_id=8, name='nodeus', assembly_id=8, source_uri='https://example.com/ex.git', test_cmd=None, base_image_id='auto', source_format='heroku', image_format='qcow2') mock_kc.return_value.create_trust_context.assert_called_once_with() @mock.patch('solum.common.solum_keystoneclient.KeystoneClientV3') @mock.patch('solum.deployer.api.API.delete_heat_stack') def test_delete(self, mock_deploy, mock_kc, mock_registry): db_obj = fakes.FakeAssembly() mock_registry.Assembly.get_by_uuid.return_value = db_obj handler = assembly_handler.AssemblyHandler(self.ctx) handler.delete('test_id') db_obj.save.assert_called_once_with(self.ctx) mock_registry.Assembly.get_by_uuid.assert_called_once_with(self.ctx, 'test_id') mock_kc.return_value.delete_trust.assert_called_once_with( 'trust_worthy') mock_deploy.assert_called_once_with(assem_id=db_obj.id) self.assertEqual(STATES.DELETING, db_obj.status) def test_trigger_workflow(self, mock_registry): trigger_id = 1 artifacts = [{"name": "Test", "artifact_type": "heroku", "content": {"href": "https://github.com/some/project"}, "language_pack": "auto"}] db_obj = fakes.FakeAssembly() mock_registry.Assembly.get_by_trigger_id.return_value = db_obj plan_obj = fakes.FakePlan() mock_registry.Plan.get_by_id.return_value = plan_obj plan_obj.raw_content = {"artifacts": artifacts} handler = assembly_handler.AssemblyHandler(self.ctx) handler._build_artifact = mock.MagicMock() handler._context_from_trust_id = mock.MagicMock(return_value=self.ctx) handler.trigger_workflow(trigger_id) handler._build_artifact.assert_called_once_with(db_obj, artifacts[0]) handler._context_from_trust_id.assert_called_once_with('trust_worthy') mock_registry.Assembly.get_by_trigger_id.assert_called_once_with( None, trigger_id) mock_registry.Plan.get_by_id.assert_called_once_with(self.ctx, db_obj.plan_id)
gilbertpilz/solum
solum/tests/api/handlers/test_assembly.py
Python
apache-2.0
5,985
package pl.jalokim.propertiestojson.util; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; import org.assertj.core.api.Assertions; import pl.jalokim.propertiestojson.domain.MainObject; public class AbstractPropertiesToJsonConverterTest { protected static final String FIELD2_VALUE = "die3"; protected static final String FIELD1_VALUE = "die2"; protected static final String COST_STRING_VALUE = "123.0"; protected static final Double COST_INT_VALUE = 123.0; protected static final String INSRANCE_TYPE = "Medical"; protected static final String STREET = "Jp2"; protected static final String CITY = "Waraw"; protected static final String SURNAME = "Surname"; protected static final String NAME = "John"; protected static final String EMAIL_1 = "example@gg.com"; protected static final String EMAIL_2 = "example2@cc.com"; protected static final String EMAIL_3 = "example3@gg.com"; protected static final String EMAILS = String.format("%s,%s,%s,%s", EMAIL_1, EMAIL_2, EMAIL_3, EMAIL_3); protected static final String GROUP_1 = "group1"; protected static final String COMMERCIAL = "Commercial"; protected static final String GROUP_3 = "group3"; protected static final String GROUP_2 = "group2"; protected static final String FREE = "Free"; protected static final String CARS = "cars"; protected static final String COMPUTERS = "computers"; protected static final String WOMEN = "women"; protected static final String SCIENCE = "science"; protected static final String FILE_PATH = "src/test/resources/example.properties"; protected static final String MAN_COST = "126.543"; protected static final Double EXPECTED_MAN_COST = 126.543; protected InputStream getPropertiesFromFile() throws IOException { return new FileInputStream(FILE_PATH); } protected Map<String, String> initProperlyPropertiesMap() { Map<String, String> properties = new HashMap<>(); properties.put("man.name", NAME); properties.put("man.surname", SURNAME); properties.put("man.address.city", CITY); properties.put("man.address.street", STREET); properties.put("insurance.type", INSRANCE_TYPE); properties.put("insurance.cost", COST_STRING_VALUE); properties.put("field1", FIELD1_VALUE); properties.put("field2", FIELD2_VALUE); properties.put("man.emails", EMAILS); properties.put("man.groups[0].name", GROUP_1); properties.put("man.groups[0].type", COMMERCIAL); properties.put("man.groups[2].name", GROUP_3); properties.put("man.groups[2].type", COMMERCIAL); properties.put("man.groups[1].name", GROUP_2); properties.put("man.groups[1].type", FREE); properties.put("man.hoobies[0]", CARS); properties.put("man.hoobies[3]", COMPUTERS); properties.put("man.hoobies[2]", WOMEN); properties.put("man.hoobies[1]", SCIENCE); properties.put("man.married", "false"); properties.put("man.insurance.cost", MAN_COST); properties.put("man.insurance.valid", "true"); return properties; } protected Properties initProperlyProperties() { Properties properties = new Properties(); properties.put("man.name", NAME); properties.put("man.surname", SURNAME); properties.put("man.address.city", CITY); properties.put("man.address.street", STREET); properties.put("insurance.type", INSRANCE_TYPE); properties.put("insurance.cost", COST_INT_VALUE); properties.put("field1", FIELD1_VALUE); properties.put("field2", FIELD2_VALUE); properties.put("man.emails", Arrays.asList(EMAIL_1, EMAIL_2, EMAIL_3, EMAIL_3)); properties.put("man.groups[0].name", GROUP_1); properties.put("man.groups[0].type", COMMERCIAL); properties.put("man.groups[2].name", GROUP_3); properties.put("man.groups[2].type", COMMERCIAL); properties.put("man.groups[1].name", GROUP_2); properties.put("man.groups[1].type", FREE); properties.put("man.hoobies[0]", CARS); properties.put("man.hoobies[3]", COMPUTERS); properties.put("man.hoobies[2]", WOMEN); properties.put("man.hoobies[1]", SCIENCE); properties.put("man.married", false); properties.put("man.insurance.cost", EXPECTED_MAN_COST); properties.put("man.insurance.valid", true); return properties; } protected void assertGroupByIdAndExpectedValues(MainObject mainObject, int index, String name, String type) { Assertions.assertThat(mainObject.getMan().getGroups().get(index).getName()).isEqualTo(name); Assertions.assertThat(mainObject.getMan().getGroups().get(index).getType()).isEqualTo(type); } protected void assertEmailList(MainObject mainObject) { List<String> emails = mainObject.getMan().getEmails(); Assertions.assertThat(emails.get(0)).isEqualTo(EMAIL_1); Assertions.assertThat(emails.get(1)).isEqualTo(EMAIL_2); Assertions.assertThat(emails.get(2)).isEqualTo(EMAIL_3); Assertions.assertThat(emails.get(3)).isEqualTo(EMAIL_3); } protected void assertHobbiesList(MainObject mainObject) { List<String> hobbies = mainObject.getMan().getHoobies(); Assertions.assertThat(hobbies.get(0)).isEqualTo(CARS); Assertions.assertThat(hobbies.get(1)).isEqualTo(SCIENCE); Assertions.assertThat(hobbies.get(2)).isEqualTo(WOMEN); Assertions.assertThat(hobbies.get(3)).isEqualTo(COMPUTERS); } }
mikolajmitura/java-properties-to-json
src/test/java/pl/jalokim/propertiestojson/util/AbstractPropertiesToJsonConverterTest.java
Java
apache-2.0
5,760
package com.jiangjg.lib.ThingkingInJava.Th15; import java.util.ArrayList; import java.util.List; class Fruit{} class Apple extends Fruit {} class Orange extends Fruit {} public class GenericsAndCovariance { public static void main(String[] args) { List<? extends Fruit> flist = new ArrayList<Fruit>(); //flist.add(new Fruit()); //flist.add(new Apple()); } }
jiangjiguang/lib-java
src/com/jiangjg/lib/ThingkingInJava/Th15/GenericsAndCovariance.java
Java
apache-2.0
372
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/migrationhubstrategy/model/Severity.h> #include <aws/core/utils/HashingUtils.h> #include <aws/core/Globals.h> #include <aws/core/utils/EnumParseOverflowContainer.h> using namespace Aws::Utils; namespace Aws { namespace MigrationHubStrategyRecommendations { namespace Model { namespace SeverityMapper { static const int HIGH_HASH = HashingUtils::HashString("HIGH"); static const int MEDIUM_HASH = HashingUtils::HashString("MEDIUM"); static const int LOW_HASH = HashingUtils::HashString("LOW"); Severity GetSeverityForName(const Aws::String& name) { int hashCode = HashingUtils::HashString(name.c_str()); if (hashCode == HIGH_HASH) { return Severity::HIGH; } else if (hashCode == MEDIUM_HASH) { return Severity::MEDIUM; } else if (hashCode == LOW_HASH) { return Severity::LOW; } EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { overflowContainer->StoreOverflow(hashCode, name); return static_cast<Severity>(hashCode); } return Severity::NOT_SET; } Aws::String GetNameForSeverity(Severity enumValue) { switch(enumValue) { case Severity::HIGH: return "HIGH"; case Severity::MEDIUM: return "MEDIUM"; case Severity::LOW: return "LOW"; default: EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer(); if(overflowContainer) { return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue)); } return {}; } } } // namespace SeverityMapper } // namespace Model } // namespace MigrationHubStrategyRecommendations } // namespace Aws
aws/aws-sdk-cpp
aws-cpp-sdk-migrationhubstrategy/source/model/Severity.cpp
C++
apache-2.0
2,154
using System; using System.Collections.Generic; using System.Text; namespace SuperMap.Connector.Interface { public interface IRealspace : IComponent { } }
SuperMap/iClient-for-DotNet
Connector/Interface/IRealspace.cs
C#
apache-2.0
172
/** * Appcelerator Kroll - licensed under the Apache Public License 2 * see LICENSE in the root folder for details on the license. * Copyright (c) 2009 Appcelerator, Inc. All Rights Reserved. */ #include "utils.h" using std::string; using std::vector; using std::pair; namespace UTILS_NS { namespace BootUtils { // These are also used in application.cpp void ScanBundledComponents(string, vector<SharedComponent>&); static void ScanRuntimesAtPath(string, vector<SharedComponent>&, bool=true); static void ScanModulesAtPath(string, vector<SharedComponent>&, bool=true); static void ScanSDKsAtPath(string, vector<SharedComponent>&, bool=true); static void ScanMobileSDKsAtPath(string, vector<SharedComponent>&, bool=true); static void AddToComponentVector(vector<SharedComponent>&, SharedComponent); static void AddToComponentVector(vector<SharedComponent>& components, SharedComponent c) { // Avoid adding duplicate components to a component vector vector<SharedComponent>::iterator i = components.begin(); while (i != components.end()) { SharedComponent e = *i++; if (e->type == c->type && e->path == c->path) { return; } } components.push_back(c); } vector<SharedComponent>& GetInstalledComponents(bool force) { static std::vector<SharedComponent> installedComponents; if (installedComponents.empty() || force) { installedComponents.clear(); vector<string>& paths = GetComponentSearchPaths(); vector<string>::iterator i = paths.begin(); while (i != paths.end()) { string path(*i++); ScanRuntimesAtPath(path, installedComponents, false); ScanSDKsAtPath(path, installedComponents, false); ScanMobileSDKsAtPath(path, installedComponents, false); ScanModulesAtPath(path, installedComponents, false); } // Sort components by version here so that the latest version of // any component will always be chosen. Use a stable_sort because we // want to give preference to components earlier on the search path. std::stable_sort( installedComponents.begin(), installedComponents.end(), BootUtils::WeakCompareComponents); } return installedComponents; } class PathBits { public: PathBits(const string& name, const string& fullPath) : name(name), fullPath(fullPath) { } std::string name; std::string fullPath; }; static vector<PathBits> GetDirectoriesAtPath(std::string& path) { vector<PathBits> directories; vector<string> paths; FileUtils::ListDir(path, paths); vector<string>::iterator i = paths.begin(); while (i != paths.end()) { string& subpath(*i++); if (subpath[0] == '.') continue; string fullPath(FileUtils::Join(path.c_str(), subpath.c_str(), NULL)); if (!FileUtils::IsDirectory(fullPath)) continue; directories.push_back(PathBits(subpath, fullPath)); } return directories; } static void ScanRuntimesAtPath(string path, vector<SharedComponent>& results, bool bundled) { if (!FileUtils::IsDirectory(path)) return; // Read everything that looks like <searchpath>/runtime/<os>/* string rtPath(FileUtils::Join(path.c_str(), "runtime", 0)); if (!bundled) rtPath = FileUtils::Join(rtPath.c_str(), OS_NAME, 0); vector<PathBits> versions(GetDirectoriesAtPath(rtPath)); for (size_t i = 0; i < versions.size(); i++) { PathBits& b = versions[i]; AddToComponentVector(results, KComponent::NewComponent(RUNTIME, "runtime", b.name, b.fullPath)); } } static void ScanSDKsAtPath(string path, vector<SharedComponent>& results, bool bundled) { if (!FileUtils::IsDirectory(path)) return; // Read everything that looks like <searchpath>/sdk/<os>/* string sdkPath(FileUtils::Join(path.c_str(), "sdk", 0)); if (!bundled) sdkPath = FileUtils::Join(sdkPath.c_str(), OS_NAME, 0); vector<PathBits> versions(GetDirectoriesAtPath(sdkPath)); for (size_t i = 0; i < versions.size(); i++) { PathBits& b = versions[i]; AddToComponentVector(results, KComponent::NewComponent(SDK, "sdk", b.name, b.fullPath, bundled)); } } static void ScanMobileSDKsAtPath(string path, vector<SharedComponent>& results, bool bundled) { if (!FileUtils::IsDirectory(path)) return; // Read everything that looks like <searchpath>/mobilesdk/<os>/* string sdkPath(FileUtils::Join(path.c_str(), "mobilesdk", 0)); if (!bundled) sdkPath = FileUtils::Join(sdkPath.c_str(), OS_NAME, 0); vector<PathBits> versions(GetDirectoriesAtPath(sdkPath)); for (size_t i = 0; i < versions.size(); i++) { PathBits& b = versions[i]; AddToComponentVector(results, KComponent::NewComponent(MOBILESDK, "mobilesdk", b.name, b.fullPath, bundled)); } } static void ScanModulesAtPath(string path, vector<SharedComponent>& results, bool bundled) { if (!FileUtils::IsDirectory(path)) return; // Read everything that looks like <searchpath>/modules/<os>/* string namesPath(FileUtils::Join(path.c_str(), "modules", 0)); if (!bundled) namesPath = FileUtils::Join(namesPath.c_str(), OS_NAME, 0); vector<PathBits> moduleNames(GetDirectoriesAtPath(namesPath)); for (size_t i = 0; i < moduleNames.size(); i++) { PathBits& moduleName = moduleNames[i]; // Read everything that looks like <searchpath>/modules/<os>/<name>/* vector<PathBits> moduleVersions(GetDirectoriesAtPath(moduleName.fullPath)); for (size_t j = 0; j < moduleVersions.size(); j++) { PathBits& moduleVersion = moduleVersions[j]; AddToComponentVector(results, KComponent::NewComponent( MODULE, moduleName.name, moduleVersion.name, moduleVersion.fullPath, bundled)); } } } void ScanBundledComponents(string path, vector<SharedComponent>& results) { ScanRuntimesAtPath(path, results, true); ScanMobileSDKsAtPath(path, results, true); ScanSDKsAtPath(path, results, true); ScanModulesAtPath(path, results, true); } int CompareVersions(string one, string two) { if (one.empty() && two.empty()) return 0; if (one.empty()) return -1; if (two.empty()) return 1; vector<string> listOne; vector<string> listTwo; FileUtils::Tokenize(one, listOne, "."); FileUtils::Tokenize(two, listTwo, "."); size_t min = listOne.size(); if (listTwo.size() < listOne.size()) min = listTwo.size(); for (size_t i = 0; i < min; i++) { int result = listOne.at(i).compare(listTwo.at(i)); if (result != 0) return result; } if (listOne.size() > listTwo.size()) return 1; else if (listTwo.size() > listOne.size()) return -1; else return 0; } bool WeakCompareComponents(SharedComponent one, SharedComponent two) { return BootUtils::CompareVersions(one->version, two->version) > 0; } vector<pair<string, string> > ReadManifestFile(std::string path) { vector<pair<string, string> > manifest; if (!FileUtils::IsFile(path)) return manifest; string manifestContents(FileUtils::ReadFile(path)); if (manifestContents.empty()) return manifest; vector<string> manifestLines; FileUtils::Tokenize(manifestContents, manifestLines, "\n"); for (size_t i = 0; i < manifestLines.size(); i++) { string line = FileUtils::Trim(manifestLines[i]); size_t pos = line.find(":"); if (pos == 0 || pos == line.length() - 1) { continue; } else { manifest.push_back(pair<string, string>( FileUtils::Trim(line.substr(0, pos)), // The key FileUtils::Trim(line.substr(pos + 1, line.length())))); // The value. } } return manifest; } SharedComponent ResolveDependency(SharedDependency dep, vector<SharedComponent>& components) { vector<SharedComponent>::iterator i = components.begin(); while (i != components.end()) { SharedComponent comp = *i++; if (dep->type != comp->type || dep->name != comp->name) continue; int compare = CompareVersions(comp->version, dep->version); if ((dep->requirement == Dependency::EQ && compare == 0) || (dep->requirement == Dependency::GTE && compare >= 0) || (dep->requirement == Dependency::GT && compare > 0) || (dep->requirement == Dependency::LT && compare < 0)) { return comp; } } return NULL; } } SharedDependency Dependency::NewDependencyFromValues( KComponentType type, std::string name, std::string version) { Dependency* d = new Dependency(); d->type = type; d->name = name; d->version = version; d->requirement = EQ; return d; } SharedDependency Dependency::NewDependencyFromManifestLine( string key, string value) { Dependency* d = new Dependency(); size_t versionStart; if (value.find(">=") != string::npos) { d->requirement = GTE; versionStart = 2; } else if (value.find("<=") != string::npos) { d->requirement = LTE; versionStart = 2; } else if (value.find("<") != string::npos) { d->requirement = LT; versionStart = 1; } else if (value.find(">") != string::npos) { d->requirement = GT; versionStart = 1; } else if (value.find("=") != string::npos) { d->requirement = EQ; versionStart = 1; } else { d->requirement = EQ; versionStart = 0; } d->name = key; d->version = value.substr(versionStart); if (key == "runtime") { d->type = RUNTIME; } else if (key == "sdk") { d->type = SDK; } else if (key == "mobilesdk") { d->type = MOBILESDK; } else { d->type = MODULE; } return d; } SharedComponent KComponent::NewComponent(KComponentType type, string name, string version, string path, bool bundled) { KComponent* c = new KComponent(); c->type = type; c->name = name; c->version = version; c->path = path; c->bundled = true; return c; } }
appcelerator/kroll
libkroll/utils/boot_utils.cpp
C++
apache-2.0
9,629
// idempotently prepends bin directory to current PATH and prints; e.g., // export PATH=`go run src/github.com/xoba/goutil/setpath/setpath.go` package main import ( "fmt" "os" "path" "strings" ) func main() { wd, err := os.Getwd() if err != nil { fmt.Fprintln(os.Stderr, "can't run os.Getwd()") os.Exit(1) } bin := path.Clean(wd + "/bin") parts := strings.Split(os.Getenv("PATH"), ":") var hasBin bool for _, p := range parts { if p == bin { hasBin = true } } if !hasBin { var out []string out = append(out, bin) out = append(out, parts...) parts = out } fmt.Println(strings.Join(parts, ":")) }
xoba/goutil
setpath/setpath.go
GO
apache-2.0
631
(function ($){ $(".social-link").click(function (e) { var type = $("[name=social_type]:checked").val(); if (!type) return; var href = this.href; this.href = href.replace(/next=\/([^\/]+)\/([^\/]+)\/([^\/]+)\//, function (all, action, backend, old_type) { return "next=/{action}/{backend}/{type}/" .replace("{action}", action) .replace("{backend}", backend) .replace("{type}", type); }); }); $(".cgu-checkbox").click(function () { var toggable = $(this).data("toggle"); var $el = $(toggable); if ($el.attr("disabled")) $el.removeAttr("disabled"); else $el.attr("disabled", "disabled"); }); $(".btn-register").click(function (e) { var checkbox = $(this).data("checkbox"); var name = ($(this).hasClass("btn-register-social")) ? "social_type" : "type"; function abort () { e.preventDefault(); e.stopPropagation(); } if (!assertTypeChecked(name)) { abort(); var $container = $("[name="+ name +"]").parents(".form-group"); if (!$(".text-danger", $container).length) { $container.append("<span class='text-danger'>Veuillez sélectionner une valeur.</span>"); } } else if (!$(checkbox).is(':checked')) abort(); }); function assertTypeChecked(name) { if (!$("[type=radio][name="+ name +"]").length) return true; return $("[name="+ name +"]:checked").length !== 0; } })(jQuery);
huguesmayolle/famille
famille/static/js/register.js
JavaScript
apache-2.0
1,594
package com.example.dell.mytextapplication.share; import java.io.File; import java.util.ArrayList; import java.util.List; import java.util.Timer; import java.util.TimerTask; import org.xmlpull.v1.XmlPullParser; import com.example.dell.mytextapplication.R; import com.example.dell.mytextapplication.component.AppActivityClose; import com.example.dell.mytextapplication.component.AppMediaPlayerFunction; import com.example.dell.mytextapplication.dialog.wifi_not_connect; import com.example.dell.mytextapplication.share.DeleteDialog; import com.example.dell.mytextapplication.share.layoutparams.VideoParams; import android.content.ContentResolver; import android.annotation.SuppressLint; import android.app.Activity; import android.app.Dialog; import android.content.ContentValues; import android.content.Context; import android.content.Intent; import android.graphics.Bitmap; import android.media.ThumbnailUtils; import android.net.ConnectivityManager; import android.net.NetworkInfo; import android.net.Uri; import android.os.Bundle; import android.os.Handler; import android.os.Message; import android.provider.MediaStore; import android.provider.MediaStore.Video; import android.util.AttributeSet; import android.util.Xml; import android.view.Display; import android.view.Gravity; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.view.Window; import android.view.WindowManager; import android.view.View.OnClickListener; import android.widget.AdapterView; import android.widget.AdapterView.OnItemSelectedListener; import android.widget.Button; import android.widget.Gallery.LayoutParams; import android.widget.BaseAdapter; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.PopupWindow; import android.widget.RelativeLayout; import android.widget.TextView; @SuppressLint("SdCardPath") public class VideoGalleryActivity extends Activity { private static VideoGalleryActivity mContext = null; public final static int DELETE_ENBALE = 5000; public final static int SHARE_ENABLE = 5001; public VideoAdapter imageAdapterV; public static XmlPullParser parser; private PopupWindow mPopupWindow; private MyGallery myGallery; public LinearLayout Parent; private String videoPath; RelativeLayout myGallerylLayout; private int positionV; public List<String> video_path; public VideoParams videoParams; private TextView photos_count; private Button deleButton; private Button shareButton; Timer deletetDelayTimer; Timer sharetDelayTimer; private boolean isShowing = false; private boolean connectWifi = false; private File file; private Dialog dlg; Handler handler; public boolean isExit = false; public boolean isShared = false; public boolean isBackgroud = false; public static VideoGalleryActivity getInstance() { return mContext; } @Override protected void onStop() { super.onStop(); if (isShared && !isExit) { isShared = false; }else if (isBackgroud) { isBackgroud = false; }else if (!isShared && !isExit && !AppActivityClose.isExit) { isExit = true; AppActivityClose.getInstance().exitAll(); } } @Override protected void onDestroy() { for (int i = 0; i < 2; i++) { if (bitmap[i] != null && !bitmap[i].isRecycled()) { bitmap[i].recycle(); bitmap[i] = null; } } if (sharetDelayTimer != null) { sharetDelayTimer.cancel(); sharetDelayTimer = null; } if (deletetDelayTimer != null) { deletetDelayTimer.cancel(); deletetDelayTimer = null; } dismiss(); super.onDestroy(); } @SuppressWarnings("deprecation") @SuppressLint("HandlerLeak") @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); mContext = this; requestWindowFeature(Window.FEATURE_NO_TITLE); getWindow().setFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON, WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); if (parser == null) { parser = getResources().getXml(R.layout.my_gallery); } AttributeSet attributes = Xml.asAttributeSet(parser); videoParams = new VideoParams(this); Parent = new LinearLayout(this); myGallerylLayout = new RelativeLayout(this); myGallerylLayout.setId(3); myGallery = new MyGallery(mContext, attributes); myGallery.setSpacing(16); videoParams.getDisplayMetrics(); videoParams.initVar(); videoParams.initLandLayoutParams(); myGallerylLayout.addView(myGallery,videoParams.myGallerylLayoutParams); Parent.addView(myGallerylLayout,videoParams.GallerylLayoutParams); setContentView(Parent,videoParams.parentLayoutParams); AppActivityClose.getInstance().addActivity(this); //tianjia connectWifi = note_Intent(mContext); video_path = getInSDPhotoVideo(); imageAdapterV = new VideoAdapter(getApplicationContext()); Intent intent = getIntent(); videoPath = intent.getStringExtra("videoPath"); int currenPosition = intent.getIntExtra("position", 0); myGallery.setAdapter(imageAdapterV); myGallery.setSelection(currenPosition); myGallery.setOnItemSelectedListener(listenerVideo); handler = new Handler(){ @Override public void handleMessage(Message msg) { super.handleMessage(msg); switch (msg.what) { case DELETE_ENBALE: deleButton.setEnabled(true); shareButton.setEnabled(true); break; case SHARE_ENABLE: shareButton.setEnabled(true); deleButton.setEnabled(true); break; } } }; } /* * 获取指定目录下的视频文件后缀为.mp4 */ public static List<String> getInSDPhotoVideo() { List<String> it_p = new ArrayList<String>(); String path = "/mnt/sdcard/";//AppInforToCustom.getAppInforToCustomInstance().getCameraShootingPath(); File f = new File(path); if (f.exists()) { //首先要判断文件夹是否存在 File[] files = f.listFiles(); for(File file : files){ if (file.isFile()) { //如果是文件的话 String fileName = file.getName(); if (fileName.endsWith(".mp4")) { it_p.add(file.getPath()); } } } }else { //如果文件夹不存在则返回的list的大小为0,同时创建一个新的文件夹 f.mkdirs(); } return it_p; } /* * 判断网络是否连接 */ public boolean note_Intent(Context context) { ConnectivityManager con = (ConnectivityManager) context .getSystemService(Context.CONNECTIVITY_SERVICE); NetworkInfo networkinfo = con.getActiveNetworkInfo(); if (networkinfo == null || !networkinfo.isAvailable()) { return false; } else{ return true; } } /* * 当界面消失时,将PopupWindow取消 */ public void dismiss() { if (mPopupWindow != null) { mPopupWindow.dismiss(); mPopupWindow = null; } } class deleteDelayTask extends TimerTask{ @Override public void run() { Message msg = new Message(); msg.what = DELETE_ENBALE; handler.sendMessage(msg); deletetDelayTimer.cancel(); deletetDelayTimer = null; } } class shareDelayTask extends TimerTask{ @Override public void run() { Message msg = new Message(); msg.what = SHARE_ENABLE; handler.sendMessage(msg); sharetDelayTimer.cancel(); sharetDelayTimer = null; } } @SuppressWarnings("deprecation") private void showPopWindow(){ dismiss(); isShowing = true; View foot_popunwindwow = null; LayoutInflater LayoutInflater = (LayoutInflater) mContext .getSystemService(Activity.LAYOUT_INFLATER_SERVICE); foot_popunwindwow = LayoutInflater.inflate(R.layout.photo_count, null); mPopupWindow = new PopupWindow(foot_popunwindwow, LayoutParams.MATCH_PARENT, LayoutParams.WRAP_CONTENT); mPopupWindow.showAtLocation(findViewById(3), Gravity.TOP , 0, 5); mPopupWindow.update(); photos_count = (TextView) foot_popunwindwow.findViewById(R.id.photo_counts); deleButton = (Button) foot_popunwindwow.findViewById(R.id.delete_button); deleButton.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { deleButton.setEnabled(false); shareButton.setEnabled(false); if(deletetDelayTimer == null){ deletetDelayTimer = new Timer(); deletetDelayTimer.schedule(new deleteDelayTask(), 1500); } dlg = new DeleteDialog(mContext,R.style.DeleteDialog,2); WindowManager m = getWindowManager(); Display d = m.getDefaultDisplay(); Window w=dlg.getWindow(); WindowManager.LayoutParams lp =w.getAttributes(); w.setGravity(Gravity.RIGHT | Gravity.TOP); lp.x=10; lp.y=70; lp.height = (int) (d.getHeight() * 0.3); w.setAttributes(lp); dlg.show(); } }); shareButton = (Button) foot_popunwindwow.findViewById(R.id.share_button); shareButton.setOnClickListener(new View.OnClickListener() { public void onClick(View v) { if(connectWifi){ shareButton.setEnabled(false); deleButton.setEnabled(false); if(sharetDelayTimer ==null){ sharetDelayTimer = new Timer(true); sharetDelayTimer.schedule(new shareDelayTask(), 1500); } Intent shareIntent =new Intent(); shareIntent.setAction("android.intent.action.SEND"); shareIntent.setType("video/*"); file = new File(videoPath); ContentValues content = new ContentValues(5); content.put(Video.VideoColumns.TITLE, "Share"); content.put(MediaStore.Video.VideoColumns.SIZE, file.length()); content.put(Video.VideoColumns.DATE_ADDED,System.currentTimeMillis() / 1000); content.put(Video.Media.MIME_TYPE, "video/mp4"); content.put(MediaStore.Video.Media.DATA, videoPath); ContentResolver contentResolver = getContentResolver(); Uri base = MediaStore.Video.Media.EXTERNAL_CONTENT_URI; Uri newUri = contentResolver.insert(base, content); if(newUri == null){ shareIntent.putExtra(Intent.EXTRA_STREAM, Uri.fromFile(file)); }else{ shareIntent.putExtra(Intent.EXTRA_STREAM, newUri); } shareIntent.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK); startActivity(Intent.createChooser(shareIntent, "Share")); isShared = true; }else{ wifi_not_connect.createwificonnectDialog(mContext).show(); } } }); } public OnItemSelectedListener listenerVideo = new OnItemSelectedListener() { public void onItemSelected(AdapterView<?> adapter, View view, int position, long id) { positionV = position; videoPath = video_path.get(positionV).toString(); if(!isShowing){ showPopWindow(); } photos_count.setText(positionV + 1 + " of " + video_path.size()); } public void onNothingSelected(AdapterView<?> arg0) { } }; public void Delete_video() { file = new File(videoPath); if(file.exists()){ file.delete(); file = null; } VideoImage.imageAdapterV.removePhoto(positionV);//这里要判断视频的缩略图是否小于100张同时视频的个数大于100,如果符合则要添加视频的缩略图到VideoImage中。 if (positionV < VideoImage.instance.video_path.size()) { VideoImage.instance.video_path.remove(positionV); } if (video_path.size() -1 == 0) { dismiss(); isExit = true; mContext.finish(); }else{ video_path.remove(positionV); if(positionV == video_path.size()){ //如果是删除最后一个,返回到第一个 positionV = 0; imageAdapterV = new VideoAdapter(getApplicationContext()); myGallery.setAdapter(imageAdapterV); myGallery.setSelection(positionV); }else { imageAdapterV.notifyDataSetChanged(); } dlg.dismiss(); videoPath = video_path.get(positionV).toString(); photos_count.setText(positionV + 1 + " of " + video_path.size()); } } public Bitmap[] bitmap; class VideoAdapter extends BaseAdapter{ private Context mContext; LayoutInflater inflater1; public VideoAdapter(Context context) { mContext = context; inflater1 = LayoutInflater.from(mContext); bitmap = new Bitmap[2]; } public VideoAdapter(VideoGalleryActivity mContext2) { mContext = mContext2; inflater1 = LayoutInflater.from(mContext); } public int getCount() { return video_path.size(); } public void removeVideo(int position){ video_path.remove(position); } public Object getItem(int position) { return video_path.get(position); } public long getItemId(int position) { return position; } public View getView(int position, View convertView, ViewGroup parent) { HolderView holderView; if (convertView == null) { convertView = inflater1.inflate(R.layout.video_play_item, null); holderView = new HolderView(); holderView.imgv = (ImageView)convertView.findViewById(R.id.imageView_video_play); holderView.playBtn = (ImageView)convertView.findViewById(R.id.video_play_button); convertView.setTag(holderView); }else { holderView = (HolderView)convertView.getTag(); } holderView.playBtn.setOnClickListener(videoPlayListent); holderView.imgv.setScaleType(ImageView.ScaleType.FIT_CENTER); if (bitmap[0] == null) { bitmap[1] = getVideoThumbnail(bitmap[0], video_path.get(position).toString(),100,100,MediaStore.Images.Thumbnails.MICRO_KIND); holderView.imgv.setImageBitmap(bitmap[1]); if (bitmap[0] != null && !bitmap[0].isRecycled()) { bitmap[0].recycle(); bitmap[0] = null; } } return convertView; } class HolderView{ ImageView playBtn; ImageView imgv; } } /* * 获取视频的缩图 * 先通过ThumbnailUtils来创建一个视频的图,然后再利用ThumbnailUtils来生成指定大小的图 * MICRO_KIND */ private Bitmap getVideoThumbnail(Bitmap bitmap, String videoPath, int width , int height, int kind){ bitmap = ThumbnailUtils.createVideoThumbnail(videoPath, kind); bitmap = ThumbnailUtils.extractThumbnail(bitmap, width, height, ThumbnailUtils.OPTIONS_RECYCLE_INPUT); return bitmap; } public OnClickListener videoPlayListent = new OnClickListener() { public void onClick(View v) { //这里要判断这个视频文件是否存在,如果存在则播放,如果不存在,则刷新视频数据 File file = new File(videoPath); if(file.exists()){ Intent intent = new Intent(VideoGalleryActivity.this, AppMediaPlayerFunction.class); intent.putExtra("file_name", videoPath); intent.putExtra("file_position", positionV); startActivity(intent); isBackgroud = true; }else {//如果这个文件不存在,则刷新数据,提示文件被非法删除 video_path = getInSDPhotoVideo(); int size = video_path.size(); if (size > 0) { //如果用户在后台将所有的视频都删除,则关闭这个界面 if (positionV > size - 1) { positionV = 0; } videoPath = video_path.get(positionV).toString(); }else{ isExit = true; mContext.finish(); } } } }; @Override public void onBackPressed() { isExit = true; mContext.finish(); } }
YiNPNG/test
MyTextApplication/app/src/main/java/com/example/dell/mytextapplication/share/VideoGalleryActivity.java
Java
apache-2.0
15,064
/* * Licensed to CRATE Technology GmbH ("Crate") under one or more contributor * license agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. Crate licenses * this file to you under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * However, if you have executed another commercial license agreement * with Crate these terms will supersede the license and you may use the * software solely pursuant to the terms of the relevant commercial agreement. */ package io.crate.analyze; import org.elasticsearch.common.settings.Settings; import javax.annotation.ParametersAreNonnullByDefault; @ParametersAreNonnullByDefault public class CreateRepositoryAnalyzedStatement extends AbstractDDLAnalyzedStatement { private final String repositoryName; private final String repositoryType; private final Settings settings; public CreateRepositoryAnalyzedStatement(String repositoryName, String repositoryType, Settings settings) { this.repositoryName = repositoryName; this.repositoryType = repositoryType; this.settings = settings; } public String repositoryName() { return repositoryName; } public String repositoryType() { return repositoryType; } public Settings settings() { return settings; } @Override public <C, R> R accept(AnalyzedStatementVisitor<C, R> analyzedStatementVisitor, C context) { return analyzedStatementVisitor.visitCreateRepositoryAnalyzedStatement(this, context); } }
aslanbekirov/crate
sql/src/main/java/io/crate/analyze/CreateRepositoryAnalyzedStatement.java
Java
apache-2.0
2,070
<?php /** * 显示层类 * 主要功能为模板解析 */ class GF_View { /** * 路径相关参数 */ private $group_name; private $module_name; private $action_name; private $include_pattern = '/{include:([a-zA-Z][a-zA-Z0-9_-]*):([a-zA-Z][a-zA-Z0-9_-]*)}/'; private $layout_url = ''; //布局模板路径 private $layout = ''; //布局模板内容 /** * 构造函数 */ public function __construct($group_name, $module_name, $action_name) { $this->group_name = $group_name; $this->module_name = $module_name; $this->action_name = $action_name; $this->layout(); //初始化布局模板 } /** * 处理模板从这里开始 * 首先判断读取原模板还是读取模板缓存 * * @param $tpl <string> 默认模板路径 */ public function processTemplate($tpl = null) { //重新解析模板开关,默认不解析 $parseTemplateStatus = 0; //获取当前模板路径 $template_path = $this->getTemplatePath($tpl); //获取当前模板最后修改时间 if (!file_exists($template_path)) sysError('要载入的模板文件不存在!' . $template_path); else $template_mtime = filemtime($template_path); $cache_template_path = $this->getTemplateCachePath($template_path); if (file_exists($cache_template_path) == true) { $cache_template_mtime = filemtime($cache_template_path); //模板更新,重新创建模板缓存 if ($template_mtime > $cache_template_mtime) $parseTemplateStatus = 1; } else { $parseTemplateStatus = 1; //没有缓存模板则需要解析 } //当前模板如果不需要解析,查看Layout布局模板是否需要重新解析 if ($parseTemplateStatus == 0) { if (C('layout') == true) { $layout_mtime = filemtime($this->layout_url); if ($layout_mtime > $cache_template_mtime) $parseTemplateStatus = 1; } } //当前模板和Layout如果不需要解析,则查看他们包含的include模板是否需要重新解析 if ($parseTemplateStatus == 0) { //获取原模板内容,如果启用layout,则该模板为layout叠加后的内容 $content = $this->getTemplateContent($template_path); //获取include模板url $include_url_array = $this->_getIncludeUrl($content); if (!empty($include_url_array)) { foreach ($include_url_array as $k => $v) { if (!file_exists($v)) sysError('要载入的模板文件不存在!' . $v); else { $include_template_mtime = filemtime($v); //如果模板更新,重新创建模板缓存 if ($include_template_mtime > $cache_template_mtime) $parseTemplateStatus = 1; } } } } //如果模板(包括include模板)已经更新,则重新解析 || debug模式每次重新生成cache if ($parseTemplateStatus == 1 || C('debug') == true) $this->cacheTemplate($template_path); return $cache_template_path; } /** * 定位模板路径 * * @param $tpl <string> 模板路径 */ private function getTemplatePath($tpl = null) { $group_tpl_suffix = getGroupTplSuffix(); if ($tpl != null) { if (strpos($tpl, '.') == 0) { $tpl = $this->module_name . '.' . $tpl; } } else { $tpl = $this->module_name . '.' . $this->action_name; } $template_path = APP_VIEW_PATH . "/{$this->group_name}/{$tpl}" . $group_tpl_suffix; return $template_path; } /** * 定位模板缓存路径 */ private function getTemplateCachePath($template_path) { $template_path = str_replace(getFileSuffix($template_path), '.php', $template_path); $template_cache_path = str_replace('View', 'Cache/Template', $template_path); return $template_cache_path; } /** * CacheTemplate 解析模板并存储为cache文件 * @param string $template_path 模板路径 * @return string $template_cache 返回模板缓存路径 */ public function cacheTemplate($template_path) { $content = $this->getTemplateContent($template_path); //获取模板内容 $content = $this->_parseTemplate($content); //解析模板,转化为php语法 $cache_template_path = $this->getCacheTemplatePath($template_path); //生成php缓存模板的地址 if ($this->saveTemplateCache($cache_template_path, $content)) return $cache_template_path; //生成php缓存文件,返回cache路径 else sysError('Template cache file does not save ! url : ' . $cache_template_path); } /** * 获取原始模板内容 * @param $template_path 模板路径 * @return $content 模板内容 */ private function getTemplateContent($template_path) { if (!file_exists($template_path)) sysError('Template does not exist !'); else { $content = file_get_contents($template_path); $no_layout = '{__NOLAYOUT__}'; $pos = strpos($content, $no_layout); if ($pos === 0 or $pos > 0) { //有NOLAYOUT标签,不包含Layout文件,直接返回模板内容 $content = str_replace($no_layout, '', $content); return $content; } else { if (!empty($this->layout)) $content = str_replace('{__CONTENT__}', $content, $this->layout); return $content; } } } /** * 解析模板 * @param text $content 原始模板 * @return text $content 解析后模板 */ public function _parseTemplate($content) { //获取包含文件,合并到母版 $content = $this->_parseInclude($content); //模板解析 $content = preg_replace('/{(\$[a-zA-Z][a-zA-Z0-9_-]*)}/', '<?php echo \\1;?>', $content); // 匹配格式如:{$username} $content = preg_replace('/{(\$[a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)}/', '<?php echo \\1["\\2"];?>', $content); //匹配格式如:{$vo.id} $content = preg_replace('/{(\$[a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)}/', '<?php echo \\1["\\2"]["\\3"];?>', $content); //匹配格式如:{$data.user.id} $content = preg_replace('/<volist name="([a-zA-Z][a-zA-Z0-9_-]*)" id="([a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php foreach (\$\\1 as \$key=>\$\\2) { ?>', $content); //匹配格式如:<volist name="list" id="vo"> $content = preg_replace('/<volist name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)" id="([a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php foreach (\$\\1["\\2"] as \$key1=>\$\\3) { ?>', $content); //匹配格式如:<volist name="list.sub" id="sub"> $content = preg_replace('/<\/volist>/', '<?php }?>', $content); //匹配格式如:</volist> $content = preg_replace('/<eq name="([a-zA-Z][a-zA-Z0-9_-]*)" value="([a-zA-Z0-9._-]*)">/', '<?php if (isset(\$\\1) && \$\\1 == "\\2") {?>', $content); //匹配格式如:<eq name="username" value="abc"> $content = preg_replace('/<eq name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)" value="([a-zA-Z0-9._-]*)">/', '<?php if (isset(\$\\1["\\2"]) && \$\\1["\\2"] == "\\3") {?>', $content); //匹配格式如:<eq name="user.name" value="abc"> $content = preg_replace('/<eq name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)" value="(\$[a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (isset(\$\\1["\\2"]) && \$\\1["\\2"] == \\3) { ?>', $content); //匹配格式如:<eq name="vo.id" value="$uid"> $content = preg_replace('/<neq name="([a-zA-Z][a-zA-Z0-9_-]*)" value="([a-zA-Z0-9._-]*)">/', '<?php if (isset(\$\\1) && \$\\1 != "\\2") {?>', $content); //匹配格式如:<neq name="username" value="abc"> $content = preg_replace('/<neq name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)" value="([a-zA-Z0-9._-]*)">/', '<?php if (isset(\$\\1["\\2"]) && \$\\1["\\2"] != "\\3") {?>', $content); //匹配格式如:<neq name="user.name" value="abc"> $content = preg_replace('/<neq name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)" value="(\$[a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (isset(\$\\1["\\2"]) && \$\\1["\\2"] != \\3) { ?>', $content); //匹配格式如:<neq name="vo.id" value="$uid"> $content = preg_replace('/<if name="([a-zA-Z][a-zA-Z0-9_-]*)" value="([a-zA-Z0-9._-]*)">/', '<?php if (isset(\$\\1) && \$\\1 == "\\2") {?>', $content); //匹配格式如:<if name="username" value="abc"> $content = preg_replace('/<if name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)" value="([a-zA-Z0-9._-]*)">/', '<?php if (isset(\$\\1["\\2"]) && \$\\1["\\2"] == "\\3") {?>', $content); //匹配格式如:<if name="user.id" value="1"> $content = preg_replace('/<if name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)" value="(\$[a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (isset(\$\\1["\\2"]) && \$\\1["\\2"] == \\3) { ?>', $content); //匹配格式如:<if name="vo.id" value="$uid"> $content = preg_replace('/<if name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)" value="(\$[a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (isset(\$\\1["\\2"]) && isset(\\3["\\4"]) && \$\\1["\\2"] == \\3["\\4"]) { ?>', $content); //匹配格式如:<if name="user.id" value="$vo.id"> $content = preg_replace('/<if name="([a-zA-Z][a-zA-Z0-9_-]*)" value="(\$[a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (isset(\$\\1) && isset(\\2) && \$\\1 == \\2) { ?>', $content); //匹配格式如:<if name="key" value="$sid"> $content = preg_replace('/<eq name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)" value="(\$[a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (isset(\$\\1["\\2"]) && isset(\\3["\\4"]) && \$\\1["\\2"] == \\3["\\4"]) { ?>', $content); //匹配格式如:<eq name="user.id" value="$vo.id"> $content = preg_replace('/<\/else>/', '<?php } else { ?>', $content); //匹配格式如:</else> $content = preg_replace('/<\/eq>|<\/neq>|<\/if>|<\/empty>|<\/notempty>/', '<?php }?>', $content); //匹配格式如:</eq> 或 </neq></if></empty> $content = preg_replace('/<empty name="([a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (!isset(\$\\1) || empty(\$\\1)) {?>', $content); //匹配格式如:<empty name="username"></empty> $content = preg_replace('/<empty name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (!isset(\$\\1["\\2"]) || empty(\$\\1["\\2"])) {?>', $content); //匹配格式如:<empty name="user.name"></empty> $content = preg_replace('/<notempty name="([a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (isset(\$\\1) && !empty(\$\\1)) {?>', $content); //匹配格式如:<notempty name="username"></empty> $content = preg_replace('/<notempty name="([a-zA-Z][a-zA-Z0-9_-]*)\.([a-zA-Z][a-zA-Z0-9_-]*)">/', '<?php if (isset(\$\\1["\\2"]) && !empty(\$\\1["\\2"])) {?>', $content); //匹配格式如:<notempty name="user.name"></empty> $content = preg_replace('/{__RUNTIME__}/', '<?php $start_time = Request::getData("sys", "start_time"); $end_time = microtime(true); $runtime = round(($end_time - $start_time) * 1000, 1); echo $runtime; ?>', $content); // 仅匹配{__RUNTIME__} $content = preg_replace('/{(__[A-Z][A-Z]*__)}/', '<?php echo \\1;?>', $content); // 通配格式如:{__ROOT__} 注意:此项要放在__RUNTIME__之后 $content = preg_replace('/{\%([a-zA-Z][a-zA-Z0-9_-]*)}/', '<?php echo \\1;?>', $content); //适用于已定义的常量和变量,通配:{%APP_NAME} return $content; } /** * 获取原始模板中包含的子模板url * * @param <text> $content 模板内容 * @return 空或者数组 */ private function _getIncludeUrl($content) { $count = preg_match_all($this->include_pattern, $content, $array); //匹配格式如:<include:Public:footer> if ($count == 0) return ''; $url_array = array(); for ($i = 0; $i < $count; $i++) { $filePath = APP_VIEW_PATH . '/' . $this->group_name . '/' . $array[1][$i] . '.' . $array[2][$i] . getGroupTplSuffix(); array_push($url_array, $filePath); } return $url_array; } /** * 替换include模板片段,匹配格式如:{include:Public:header} * * @param <text> $content 模板内容 * @return <text> $content 模板内容 */ private function _parseInclude($content) { $count = preg_match_all($this->include_pattern, $content, $array); //匹配格式如:{Controller:Public:footer} if ($count == 0) return $content; for ($i = 0; $i < $count; $i++) { $filePath = APP_VIEW_PATH . '/' . $this->group_name . '/' . $array[1][$i] . '.' . $array[2][$i] . getGroupTplSuffix(); if (file_exists($filePath)) $res = file_get_contents($filePath); else return '要包含的文件不存在<br>URL:' . $filePath; $content = str_replace('{include:' . $array[1][$i] . ':' . $array[2][$i] . '}', $res, $content); } return $content; } /** * 获取缓存模板路径 * @param string $template_path 原始模板路径 */ private function getCacheTemplatePath($template_path) { $cache_template_path = str_replace(getFileSuffix($template_path), '.php', $template_path); $cache_template_path = str_replace('View', 'Cache/Template', $cache_template_path); return $cache_template_path; } /** * 存储模板缓存 */ private function saveTemplateCache($fileName, $content) { $groupDirPath = dirname($fileName); if (!is_dir($groupDirPath)) mkdir($groupDirPath, 0777); if (file_put_contents($fileName, $content) > 0) return true; else return false; } /** * 获取布局模板内容 */ private function layout() { if (C('layout') == true) { $this->layout_url = APP_VIEW_PATH . C('url_separator') . $this->group_name . C('url_separator') . 'Layout' . C('view_suffix'); if (!file_exists($this->layout_url)) { sysError('没有找到布局模板') ; } else { $this->layout = file_get_contents($this->layout_url); } } } }
ylhtan/g-framework
Core/View.php
PHP
apache-2.0
14,956
package com.cwbase.logback; import static org.junit.Assert.assertEquals; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.MDC; import redis.clients.jedis.Jedis; import redis.clients.jedis.JedisPool; import ch.qos.logback.classic.LoggerContext; import ch.qos.logback.classic.joran.JoranConfigurator; import ch.qos.logback.core.joran.spi.JoranException; import ch.qos.logback.core.util.StatusPrinter; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ArrayNode; public class RedisAppenderTest { String key = "logstash"; Jedis redis; @Test public void logTest() throws Exception { // refer to logback.xml in test folder configLogger("/logback.xml"); Logger logger = LoggerFactory.getLogger(RedisAppenderTest.class); logger.debug("Test Log #1"); logger.debug("Test Log #2"); logger.debug("Test Log #3"); logger.debug("Test Log #4"); logger.debug("Test Log #5"); // list length check long len = redis.llen(key); assertEquals(5L, len); // Use Jackson to check JSON content String content = redis.lpop(key); ObjectMapper mapper = new ObjectMapper(); JsonNode node = mapper.readTree(content); assertEquals("test-application", node.get("source").asText()); assertEquals("Test Log #1", node.get("message").asText()); assertEquals("MyValue", node.get("MyKey").asText()); assertEquals("MyOtherValue", node.get("MySecondKey").asText()); } @Test public void logTestMDC() throws Exception { // refer to logback-mdc.xml in test folder configLogger("/logback-mdc.xml"); Logger logger = LoggerFactory.getLogger(RedisAppenderTest.class); MDC.put("mdcvar1", "test1"); MDC.put("mdcvar2", "test2"); logger.debug("Test MDC Log"); String content = redis.lpop(key); ObjectMapper mapper = new ObjectMapper(); JsonNode node = mapper.readTree(content); ArrayNode tags = (ArrayNode) node.get("tags"); assertEquals("test1", tags.get(0).asText()); assertEquals("test2", tags.get(1).asText()); assertEquals("test1 test2 mdcvar3_NOT_FOUND", tags.get(2).asText()); } @Before public void setUp() { System.out.println("Before Test, clearing Redis"); JedisPool pool = new JedisPool("localhost"); redis = pool.getResource(); // clear the redis list first redis.ltrim(key, 1, 0); } protected void configLogger(String loggerxml) { LoggerContext context = (LoggerContext) LoggerFactory .getILoggerFactory(); try { JoranConfigurator configurator = new JoranConfigurator(); configurator.setContext(context); context.reset(); configurator.doConfigure(this.getClass().getResourceAsStream( loggerxml)); } catch (JoranException je) { // StatusPrinter will handle this } StatusPrinter.printInCaseOfErrorsOrWarnings(context); } }
elancom/logback-redis-appender
src/test/java/com/cwbase/logback/RedisAppenderTest.java
Java
apache-2.0
2,883
import classNames from "classnames"; import React, { Component } from "react"; import { Tooltip } from "reactjs-components"; import { FormReducer as ContainerReducer } from "../../reducers/serviceForm/Container"; import { FormReducer as ContainersReducer } from "../../reducers/serviceForm/Containers"; import { findNestedPropertyInObject } from "../../../../../../src/js/utils/Util"; import ArtifactsSection from "./ArtifactsSection"; import ContainerConstants from "../../constants/ContainerConstants"; import FieldError from "../../../../../../src/js/components/form/FieldError"; import FieldHelp from "../../../../../../src/js/components/form/FieldHelp"; import FieldInput from "../../../../../../src/js/components/form/FieldInput"; import FieldLabel from "../../../../../../src/js/components/form/FieldLabel"; import FormGroup from "../../../../../../src/js/components/form/FormGroup"; import FormGroupHeadingContent from "../../../../../../src/js/components/form/FormGroupHeadingContent"; import FormRow from "../../../../../../src/js/components/form/FormRow"; import PodSpec from "../../structs/PodSpec"; const { DOCKER } = ContainerConstants.type; const containerSettings = { privileged: { label: "授予容器特权", helpText: "默认容器都是非授权的,并且没有特殊权限。比如在一个docker内运行另一个docker.", dockerOnly: "只有在Docker容器中才支持授予特权操作." }, forcePullImage: { label: "启动时强制拉取镜像", helpText: "在启动每个实例前强制拉取镜像.", dockerOnly: "启动前强制拉取镜像只在docker容器下才支持." } }; const appPaths = { artifacts: "fetch", cmd: "cmd", containerName: "", cpus: "cpus", disk: "disk", forcePullImage: "{basePath}.docker.forcePullImage", gpus: "gpus", image: "{basePath}.docker.image", mem: "mem", privileged: "{basePath}.docker.privileged", type: "{basePath}.type" }; const podPaths = { artifacts: "{basePath}.artifacts", cmd: "{basePath}.exec.command.shell", containerName: "{basePath}.name", cpus: "{basePath}.resources.cpus", disk: "{basePath}.resources.disk", forcePullImage: "", gpus: "", image: "{basePath}.image.id", mem: "{basePath}.resources.mem", privileged: "", type: "{basePath}.type" }; class ContainerServiceFormAdvancedSection extends Component { getFieldPath(basePath, fieldName) { if (this.props.service instanceof PodSpec) { return podPaths[fieldName].replace("{basePath}", basePath); } return appPaths[fieldName].replace("{basePath}", basePath); } isGpusDisabled() { const { data, path } = this.props; const typePath = this.getFieldPath(path, "type"); return findNestedPropertyInObject(data, typePath) === DOCKER; } getGPUSField() { const { data, errors, path, service } = this.props; if (service instanceof PodSpec) { return null; } const gpusPath = this.getFieldPath(path, "gpus"); const gpusErrors = findNestedPropertyInObject(errors, gpusPath); const gpusDisabled = this.isGpusDisabled(); let inputNode = ( <FieldInput disabled={gpusDisabled} min="0" name={gpusPath} step="any" type="number" value={findNestedPropertyInObject(data, gpusPath)} /> ); if (gpusDisabled) { inputNode = ( <Tooltip content="Docker 引擎不支持GPU 资源, 如果想使用GPU资源,请选择 Universal 容器环境." interactive={true} maxWidth={300} scrollContainer=".gm-scroll-view" wrapText={true} wrapperClassName="tooltip-wrapper tooltip-block-wrapper" > {inputNode} </Tooltip> ); } return ( <FormGroup className="column-4" showError={Boolean(!gpusDisabled && gpusErrors)} > <FieldLabel className="text-no-transform"> <FormGroupHeadingContent primary={true}> GPUs </FormGroupHeadingContent> </FieldLabel> {inputNode} <FieldError>{gpusErrors}</FieldError> </FormGroup> ); } getContainerSettings() { const { data, errors, path, service } = this.props; if (service instanceof PodSpec) { return null; } const typePath = this.getFieldPath(path, "type"); const containerType = findNestedPropertyInObject(data, typePath); const typeErrors = findNestedPropertyInObject(errors, typePath); const sectionCount = Object.keys(containerSettings).length; const selections = Object.keys( containerSettings ).map((settingName, index) => { const { helpText, label, dockerOnly } = containerSettings[settingName]; const settingsPath = this.getFieldPath(path, settingName); const checked = findNestedPropertyInObject(data, settingsPath); const isDisabled = containerType !== DOCKER; const labelNodeClasses = classNames({ "disabled muted": isDisabled, "flush-bottom": index === sectionCount - 1 }); let labelNode = ( <FieldLabel key={`label.${index}`} className={labelNodeClasses}> <FieldInput checked={!isDisabled && Boolean(checked)} name={settingsPath} type="checkbox" disabled={isDisabled} value={settingName} /> {label} <FieldHelp>{helpText}</FieldHelp> </FieldLabel> ); if (isDisabled) { labelNode = ( <Tooltip content={dockerOnly} key={`tooltip.${index}`} position="top" scrollContainer=".gm-scroll-view" width={300} wrapperClassName="tooltip-wrapper tooltip-block-wrapper" wrapText={true} > {labelNode} </Tooltip> ); } return labelNode; }); return ( <FormGroup showError={Boolean(typeErrors)}> {selections} <FieldError>{typeErrors}</FieldError> </FormGroup> ); } render() { const { data, errors, path } = this.props; const artifactsPath = this.getFieldPath(path, "artifacts"); const artifacts = findNestedPropertyInObject(data, artifactsPath) || []; const artifactErrors = findNestedPropertyInObject( errors, artifactsPath ) || []; const diskPath = this.getFieldPath(path, "disk"); const diskErrors = findNestedPropertyInObject(errors, diskPath); return ( <div> <h3 className="short-bottom"> 高级设置 </h3> <p>高级设置与您选择的运行时环境有关.</p> {this.getContainerSettings()} <FormRow> {this.getGPUSField()} <FormGroup className="column-4" showError={Boolean(diskErrors)}> <FieldLabel className="text-no-transform"> <FormGroupHeadingContent primary={true}> 硬盘 (MiB) </FormGroupHeadingContent> </FieldLabel> <FieldInput min="0.001" name={diskPath} step="any" type="number" value={findNestedPropertyInObject(data, diskPath)} /> <FieldError>{diskErrors}</FieldError> </FormGroup> </FormRow> <ArtifactsSection data={artifacts} path={artifactsPath} errors={artifactErrors} onRemoveItem={this.props.onRemoveItem} onAddItem={this.props.onAddItem} /> </div> ); } } ContainerServiceFormAdvancedSection.defaultProps = { data: {}, errors: {}, onAddItem() {}, onRemoveItem() {}, path: "container" }; ContainerServiceFormAdvancedSection.propTypes = { data: React.PropTypes.object, errors: React.PropTypes.object, onAddItem: React.PropTypes.func, onRemoveItem: React.PropTypes.func, path: React.PropTypes.string }; ContainerServiceFormAdvancedSection.configReducers = { container: ContainerReducer, containers: ContainersReducer }; module.exports = ContainerServiceFormAdvancedSection;
jcloud-shengtai/dcos-ui_CN
plugins/services/src/js/components/forms/ContainerServiceFormAdvancedSection.js
JavaScript
apache-2.0
8,131
#!/usr/bin/env python # -*- coding: utf-8 -*- from threading import Thread import unreal_engine as ue import ue_site # Google Assistant imports from googlesdk.assistant.embedded.v1alpha1 import embedded_assistant_pb2 from googlesamples.assistant import common_settings # General Google imports from google.rpc import code_pb2 END_OF_UTTERANCE = embedded_assistant_pb2.ConverseResponse.END_OF_UTTERANCE DIALOG_FOLLOW_ON = embedded_assistant_pb2.ConverseResult.DIALOG_FOLLOW_ON CLOSE_MICROPHONE = embedded_assistant_pb2.ConverseResult.CLOSE_MICROPHONE class ThreadedAssistant(Thread): def __init__(self): # Opaque blob provided in ConverseResponse that, # when provided in a follow-up ConverseRequest, # gives the Assistant a context marker within the current state # of the multi-Converse()-RPC "conversation". # This value, along with MicrophoneMode, supports a more natural # "conversation" with the Assistant. self.conversation_state = None # Create Google Assistant API gRPC client. self.deadline = common_settings.DEFAULT_GRPC_DEADLINE Thread.__init__(self) def __enter__(self): return self def __exit__(self, etype, e, traceback): if e: return False ue_site.conversation_stream.close() def is_grpc_error_unavailable(e): is_grpc_error = isinstance(e, grpc.RpcError) if is_grpc_error and (e.code() == grpc.StatusCode.UNAVAILABLE): ue.log_error('grpc unavailable error: %s', e) return True return False def run(self): """Send a voice request to the Assistant and playback the response. Returns: True if conversation should continue. """ continue_conversation = False ue_site.conversation_stream.start_recording() ue.log('Recording audio request.') # This generator yields ConverseResponse proto messages # received from the gRPC Google Assistant API. for resp in ue_site.assistant.Converse(self.gen_converse_requests(), self.deadline): # Something went wrong if resp.error.code != code_pb2.OK: ue.log_error('Server error: ' + str(resp.error.message)) break # Detected the user is done talking if resp.event_type == END_OF_UTTERANCE: ue.log('End of audio request detected') ue_site.conversation_stream.stop_recording() # We parsed what the user said if resp.result.spoken_request_text: ue.log('Transcript of user request: ' + str(resp.result.spoken_request_text)) # We have a response ready to play out the speakers if len(resp.audio_out.audio_data) > 0: ue_site.conversation_stream.write(resp.audio_out.audio_data) # We have an updated conversation state if resp.result.conversation_state: self.conversation_state = resp.result.conversation_state # Volume level needs to be updated if resp.result.volume_percentage != 0: ue_site.conversation_stream.volume_percentage = ( resp.result.volume_percentage ) # Check if user should reply if resp.result.microphone_mode == DIALOG_FOLLOW_ON: # Expecting user to reply continue_conversation = True ue.log('Expecting follow-on query from user.') elif resp.result.microphone_mode == CLOSE_MICROPHONE: # Not expecting user to reply continue_conversation = False ue.log('Finished playing assistant response.') ue_site.conversation_stream.stop_playback() return continue_conversation def gen_converse_requests(self): """Generates ConverseRequest messages to send to the API. This happens over multiple frames, so it should be run in a separate thread. Otherwise it WILL lock up the game thread while it's "thinking." """ converse_state = None if self.conversation_state: ue.log('Sending converse_state: '+ str(self.conversation_state)) converse_state = embedded_assistant_pb2.ConverseState( conversation_state=self.conversation_state, ) # Generate the config for the assistant config = embedded_assistant_pb2.ConverseConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=ue_site.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=ue_site.conversation_stream.sample_rate, volume_percentage=ue_site.conversation_stream.volume_percentage, ), converse_state=converse_state ) # The first ConverseRequest must contain the ConverseConfig # and no audio data. yield embedded_assistant_pb2.ConverseRequest(config=config) # Below, we actually activate the microphone and begin recording. for data in ue_site.conversation_stream: # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.ConverseRequest(audio_in=data) ue_site.conversation_stream.start_playback()
Jay2645/Unreal-Google-Assistant
Content/Scripts/threaded_assistant.py
Python
apache-2.0
4,927
package de.framey.lab.evil.squishytentaclefun.blocksandlocals; import de.framey.lab.evil.eviltentaclesofdeath.Tentacle; import de.framey.lab.evil.squishytentaclefun.util.Basicified; public class AmbiguousLocalVars implements Tentacle, Basicified { public static void main(String[] args) { new AmbiguousLocalVars(); } private AmbiguousLocalVars() { doBadThings(); } private void doBadThings() { int j = 0; GOTO(24); do { int x = 42; PRINT("%d: %d", LINE(), ++x); } while (j++ < 1); GOTO(27); int i = 666; PRINT("%d: %d", LINE(), i); GOTO(21); PRINT("%d: %d", LINE(), i); } }
Franknjava/TheNightOfTheTentacles
projects/SquishyTentacleFun/src/main/java/de/framey/lab/evil/squishytentaclefun/blocksandlocals/AmbiguousLocalVars.java
Java
apache-2.0
715
/******************************************************************************* * Copyright 2014 Katja Hahn * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package com.github.katjahahn.parser.coffheader; import static com.github.katjahahn.parser.IOUtil.*; import static com.github.katjahahn.parser.coffheader.COFFHeaderKey.*; import static com.google.common.base.Preconditions.*; import java.io.IOException; import java.util.Date; import java.util.LinkedList; import java.util.List; import java.util.Map; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import com.github.katjahahn.parser.Header; import com.github.katjahahn.parser.IOUtil; import com.github.katjahahn.parser.IOUtil.SpecificationFormat; import com.github.katjahahn.parser.StandardField; /** * Represents the COFF File Header. * * @author Katja Hahn * */ public class COFFFileHeader extends Header<COFFHeaderKey> { /** the size of the header is {@value} */ public static final int HEADER_SIZE = 20; /** the specification name */ private static final String COFF_SPEC_FILE = "coffheaderspec"; /** the bytes that make up the header data */ private final byte[] headerbytes; /** the header fields */ private Map<COFFHeaderKey, StandardField> data; /** the file offset of the header */ private final long offset; /** the logger for the COFF File Header */ private static final Logger logger = LogManager .getLogger(COFFFileHeader.class.getName()); /** * Creates a COFFFileHeader instance based on the byte array. * * @param headerbytes * an array that holds the headerbytes. The length of the array * must be {@link #HEADER_SIZE}. * @param offset * the file offset of the header * @throws IllegalArgumentException * if length of the array != {@link #HEADER_SIZE} */ private COFFFileHeader(byte[] headerbytes, long offset) { checkNotNull(headerbytes); checkArgument(headerbytes.length == HEADER_SIZE); this.headerbytes = headerbytes.clone(); this.offset = offset; } /** * {@inheritDoc} */ @Override public long getOffset() { return offset; } /** * Reads the header's fields. */ private void read() { // define the specification format final int key = 0; final int description = 1; final int offset = 2; final int length = 3; SpecificationFormat format = new SpecificationFormat(key, description, offset, length); // read the header data try { data = IOUtil.readHeaderEntries(COFFHeaderKey.class, format, COFF_SPEC_FILE, headerbytes, getOffset()); } catch (IOException e) { logger.error("unable to read coff specification: " + e.getMessage()); } } /** * {@inheritDoc} */ @Override public String getInfo() { // make title StringBuilder b = new StringBuilder("----------------" + NL + "COFF File Header" + NL + "----------------" + NL); // loop through standard fields for (StandardField field : data.values()) { long value = field.getValue(); COFFHeaderKey key = (COFFHeaderKey) field.getKey(); String description = field.getDescription(); // handle special fields that have additional representations if (key == COFFHeaderKey.CHARACTERISTICS) { b.append(NL + description + ": " + NL); b.append(getCharacteristicsInfo(value)); } else if (key == COFFHeaderKey.TIME_DATE) { b.append(description + ": "); b.append(convertToDate(value)); } else if (key == COFFHeaderKey.MACHINE) { b.append(description + ": "); b.append(getMachineTypeString((int) value)); } else { b.append(field.toString()); } b.append(NL); } return b.toString(); } private static String getCharacteristicsInfo(long value) { StringBuilder b = new StringBuilder(); List<FileCharacteristic> characs = FileCharacteristic.getAllFor(value); for (FileCharacteristic ch : characs) { b.append("\t* " + ch.getDescription() + NL); } if (characs.isEmpty()) { b.append("\t**no characteristics**" + NL); } return b.toString(); } /** * Returns the machine type description string that belongs to the value. * * @param value * the value of the machine type * @return the machine type description */ private String getMachineTypeString(int value) { return MachineType.getForValue(value).getDescription(); } /** * Converts seconds to a date object. * * @param seconds * time in seconds * @return date */ private Date convertToDate(long seconds) { // convert seconds to milli seconds long millis = seconds * 1000; return new Date(millis); } /** * {@inheritDoc} */ @Override public long get(COFFHeaderKey key) { return getField(key).getValue(); } /** * {@inheritDoc} */ @Override public StandardField getField(COFFHeaderKey key) { return data.get(key); } /** * Returns a list with all characteristics of the file. * <p> * Ensures that the result is never null. * * @return list of file characteristics */ public List<FileCharacteristic> getCharacteristics() { long value = get(CHARACTERISTICS); List<FileCharacteristic> characteristics = FileCharacteristic .getAllFor(value); // ensurance assert characteristics != null; return characteristics; } /** * Returns whether the characteristic is set. * * @param characteristic * a file characteristic * @return true if characteristic is set, false otherwise */ public boolean hasCharacteristic(FileCharacteristic characteristic) { return (get(CHARACTERISTICS) & characteristic.getValue()) != 0; } /** * Returns the enum that denotes the machine type. * * @return MachineType */ public MachineType getMachineType() { long value = get(MACHINE); try { return MachineType.getForValue(value); } catch (IllegalArgumentException e) { logger.error("Unable to resolve machine type for value: " + value); return MachineType.UNKNOWN; } } /** * Creates a date object from the TIME_DATE read in the COFF File Header. * * @return the date */ public Date getTimeDate() { return convertToDate(get(TIME_DATE)); } /** * Returns the SizeOfOptionalHeader value. * * @return size of optional header */ public int getSizeOfOptionalHeader() { assert get(SIZE_OF_OPT_HEADER) == (int) get(SIZE_OF_OPT_HEADER); // 2-byte value can be casted to int return (int) get(SIZE_OF_OPT_HEADER); } /** * Returns the number of sections. * * @return number of sections */ public int getNumberOfSections() { assert get(SECTION_NR) == (int) get(SECTION_NR); // 2-byte value can be casted to int return (int) get(SECTION_NR); } /** * Returns a list of the header entries. * * @return a list of header entries */ public List<StandardField> getHeaderEntries() { return new LinkedList<>(data.values()); } /** * Creates an instance of the COFF File Header based on headerbytes and * offset. * * @param headerbytes * the bytes that make up the COFF File Header * @param offset * the file offset to the beginning of the header * @return COFFFileHeader instance */ public static COFFFileHeader newInstance(byte[] headerbytes, long offset) { COFFFileHeader header = new COFFFileHeader(headerbytes, offset); header.read(); return header; } }
katjahahn/PortEx
src/main/java/com/github/katjahahn/parser/coffheader/COFFFileHeader.java
Java
apache-2.0
8,980
package com.tesshu.subsonic.client.fx.view; import com.tesshu.subsonic.client.model.Directory; import javafx.scene.image.Image; import java.util.Optional; public interface ImageCache { Optional<Image> getMiddleIconImage(Directory directory); Optional<Image> getSmallIconImage(Directory directory); }
tesshucom/subsonic-fx-player
subsonic-fx-player-api/src/main/java/com/tesshu/subsonic/client/fx/view/ImageCache.java
Java
apache-2.0
312
/* * Copyright 2002-2020 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.r2dbc.connection.lookup; import io.r2dbc.spi.ConnectionFactory; import org.springframework.beans.BeansException; import org.springframework.beans.factory.BeanFactory; import org.springframework.beans.factory.BeanFactoryAware; import org.springframework.lang.Nullable; import org.springframework.util.Assert; /** * {@link ConnectionFactoryLookup} implementation based on a * Spring {@link BeanFactory}. * * <p>Will lookup Spring managed beans identified by bean name, * expecting them to be of type {@link ConnectionFactory}. * * @author Mark Paluch * @since 5.3 * @see BeanFactory */ public class BeanFactoryConnectionFactoryLookup implements ConnectionFactoryLookup, BeanFactoryAware { @Nullable private BeanFactory beanFactory; /** * Create a new instance of the {@link BeanFactoryConnectionFactoryLookup} class. * <p>The BeanFactory to access must be set via {@code setBeanFactory}. * @see #setBeanFactory */ public BeanFactoryConnectionFactoryLookup() {} /** * Create a new instance of the {@link BeanFactoryConnectionFactoryLookup} class. * <p>Use of this constructor is redundant if this object is being created * by a Spring IoC container, as the supplied {@link BeanFactory} will be * replaced by the {@link BeanFactory} that creates it (c.f. the * {@link BeanFactoryAware} contract). So only use this constructor if you * are using this class outside the context of a Spring IoC container. * @param beanFactory the bean factory to be used to lookup {@link ConnectionFactory * ConnectionFactories} */ public BeanFactoryConnectionFactoryLookup(BeanFactory beanFactory) { Assert.notNull(beanFactory, "BeanFactory must not be null"); this.beanFactory = beanFactory; } @Override public void setBeanFactory(BeanFactory beanFactory) { this.beanFactory = beanFactory; } @Override public ConnectionFactory getConnectionFactory(String connectionFactoryName) throws ConnectionFactoryLookupFailureException { Assert.state(this.beanFactory != null, "BeanFactory is required"); try { return this.beanFactory.getBean(connectionFactoryName, ConnectionFactory.class); } catch (BeansException ex) { throw new ConnectionFactoryLookupFailureException( String.format("Failed to look up ConnectionFactory bean with name '%s'", connectionFactoryName), ex); } } }
spring-projects/spring-framework
spring-r2dbc/src/main/java/org/springframework/r2dbc/connection/lookup/BeanFactoryConnectionFactoryLookup.java
Java
apache-2.0
2,991
#region License and Terms // MoreLINQ - Extensions to LINQ to Objects // Copyright (c) 2008 Jonathan Skeet. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #endregion namespace MoreLinq { using System; using System.Collections.Generic; using System.Diagnostics; public static partial class MoreEnumerable { /// <summary> /// Returns a sequence resulting from applying a function to each /// element in the source sequence and its /// predecessor, with the exception of the first element which is /// only returned as the predecessor of the second element. /// </summary> /// <typeparam name="TSource">The type of the elements of <paramref name="source"/>.</typeparam> /// <typeparam name="TResult">The type of the element of the returned sequence.</typeparam> /// <param name="source">The source sequence.</param> /// <param name="resultSelector">A transform function to apply to /// each pair of sequence.</param> /// <returns> /// Returns the resulting sequence. /// </returns> /// <remarks> /// This operator uses deferred execution and streams its results. /// </remarks> /// <example> /// <code> /// int[] numbers = { 123, 456, 789 }; /// IEnumerable&lt;int&gt; result = numbers.Pairwise(5, (a, b) => a + b); /// </code> /// The <c>result</c> variable, when iterated over, will yield /// 579 and 1245, in turn. /// </example> public static IEnumerable<TResult> Pairwise<TSource, TResult>(this IEnumerable<TSource> source, Func<TSource, TSource, TResult> resultSelector) { if (source == null) throw new ArgumentNullException("source"); if (resultSelector == null) throw new ArgumentNullException("resultSelector"); return PairwiseImpl(source, resultSelector); } private static IEnumerable<TResult> PairwiseImpl<TSource, TResult>(this IEnumerable<TSource> source, Func<TSource, TSource, TResult> resultSelector) { Debug.Assert(source != null); Debug.Assert(resultSelector != null); using (var e = source.GetEnumerator()) { if (!e.MoveNext()) yield break; var previous = e.Current; while (e.MoveNext()) { yield return resultSelector(previous, e.Current); previous = e.Current; } } } } }
clearwavebuild/elmah
src/Elmah/MoreLinq/MoreEnumerable.Pairwise.cs
C#
apache-2.0
3,216
/* Copyright 2002 The Apache Software Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.apache.batik.css.parser; /** * This class provides an implementation of the * {@link org.w3c.css.sac.AttributeCondition} interface. * * @author <a href="mailto:stephane@hillion.org">Stephane Hillion</a> * @version $Id$ */ public class DefaultPseudoClassCondition extends AbstractAttributeCondition { /** * The namespaceURI. */ protected String namespaceURI; /** * Creates a new DefaultAttributeCondition object. */ public DefaultPseudoClassCondition(String namespaceURI, String value) { super(value); this.namespaceURI = namespaceURI; } /** * <b>SAC</b>: Implements {@link * org.w3c.css.sac.Condition#getConditionType()}. */ public short getConditionType() { return SAC_PSEUDO_CLASS_CONDITION; } /** * <b>SAC</b>: Implements {@link * org.w3c.css.sac.AttributeCondition#getNamespaceURI()}. */ public String getNamespaceURI() { return namespaceURI; } /** * <b>SAC</b>: Implements {@link * org.w3c.css.sac.AttributeCondition#getLocalName()}. */ public String getLocalName() { return null; } /** * <b>SAC</b>: Implements {@link * org.w3c.css.sac.AttributeCondition#getSpecified()}. */ public boolean getSpecified() { return false; } /** * Returns a text representation of this object. */ public String toString() { return ":" + getValue(); } }
Uni-Sol/batik
sources/org/apache/batik/css/parser/DefaultPseudoClassCondition.java
Java
apache-2.0
2,066
using System.Reflection; // // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. // [assembly: AssemblyCompany("raccoom.net")] [assembly: AssemblyCopyright("(C) 2009 Christoph Richner, all rights reserved")] [assembly: AssemblyTrademark("")] // // In order to sign your assembly you must specify a key to use. Refer to the // Microsoft .NET Framework documentation for more information on assembly signing. // // Use the attributes below to control which key is used for signing. // // Notes: // (*) If no key is specified, the assembly is not signed. // (*) KeyName refers to a key that has been installed in the Crypto Service // Provider (CSP) on your machine. KeyFile refers to a file which contains // a key. // (*) If the KeyFile and the KeyName values are both specified, the // following processing occurs: // (1) If the KeyName can be found in the CSP, that key is used. // (2) If the KeyName does not exist and the KeyFile does exist, the key // in the KeyFile is installed into the CSP and used. // (*) In order to create a KeyFile, you can use the sn.exe (Strong Name) utility. // When specifying the KeyFile, the location of the KeyFile should be // relative to the project output directory which is // %Project Directory%\obj\<configuration>. For example, if your KeyFile is // located in the project directory, you would specify the AssemblyKeyFile // attribute as [assembly: AssemblyKeyFile("..\\..\\mykey.snk")] // (*) Delay Signing is an advanced option - see the Microsoft .NET Framework // documentation for more information on this. // [assembly: AssemblyDelaySign(false)] [assembly: AssemblyKeyFile("")] [assembly: AssemblyKeyName("")] // // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Revision and Build Numbers // by using the '*' as shown below: [assembly: System.Runtime.InteropServices.ComVisible(false)] [assembly: System.Reflection.AssemblyVersion("3.0.0.*")]
ChrisRichner/TreeViewFolderBrowser
BaseAssemblyInfo.cs
C#
apache-2.0
2,300
package org.loon.framework.android.game.core.graphics.component; /** * Copyright 2008 - 2010 * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * * @project loonframework * @author chenpeng * @email:ceponline@yahoo.com.cn * @version 0.1 */ public abstract class ActorSpeed extends Actor { private Speed speed = new Speed(); protected double x; protected double y; public ActorSpeed() { } public ActorSpeed(Speed speed) { this.speed = speed; } public void move() { this.x += this.speed.getX(); this.y += this.speed.getY(); if (this.x >= getLLayer().getWidth()) { this.x = 0.0D; } if (this.x < 0.0D) { this.x = (getLLayer().getWidth() - 1); } if (this.y >= getLLayer().getHeight()) { this.y = 0.0D; } if (this.y < 0.0D) { this.y = (getLLayer().getHeight() - 1); } setLocation(this.x, this.y); } public void setLocation(double x, double y) { this.x = x; this.y = y; super.setLocation((int) x, (int) y); } public void setLocation(int x, int y) { this.x = x; this.y = y; super.setLocation(x, y); } public void increaseSpeed(Speed s) { this.speed.add(s); } public Speed getSpeed() { return this.speed; } }
cping/LGame
Java/old/Canvas_ver/src/org/loon/framework/android/game/core/graphics/component/ActorSpeed.java
Java
apache-2.0
1,697
using System.Collections.Generic; using System.Collections.ObjectModel; using System.ComponentModel; using System.ComponentModel.DataAnnotations; using System.ComponentModel.DataAnnotations.Schema; using Tralus.Framework.BusinessModel.Entities; using Tralus.Framework.BusinessModel.Security; namespace Mahan.Infrastructure.BusinessModel { /// <summary> /// نوع هواپیما: مدل‌های مختلف هواپیما /// </summary> [Table("AircraftType", Schema = "Infrastructure")] [DefaultProperty("Name")] [SecurityAvailablePermissions(SecurityOperations.ActivateAircrafts)] public class AircraftType : EntityBase { public AircraftType() { Aircrafts = new Collection<Aircraft>(); } [StringLength(200)] public string Name { get; set; } [StringLength(200)] public string Manufacturer { get; set; } [StringLength(200)] public string TypeVariation { get; set; } [StringLength(200)] public string FullTypeName { get; set; } [StringLength(3)] public string IataCode { get; set; } [StringLength(4)] public string IcaoCode { get; set; } [StringLength(3)] public string FwName { get; set; } [StringLength(20)] public string Status { get; set; } //public new static IEnumerable<string> AvailablePermissions //{ // get { yield return SecurityOperations.ActivateAircrafts; } //} public virtual ICollection<Aircraft> Aircrafts { get; set; } } }
mehrandvd/Tralus
Samples/Infrastructure/Source/Mahan.Tralus.Infrastructure.BusinessModel/Entities/General/AircraftType.cs
C#
apache-2.0
1,635
package com.github.open96.jypm.fxml; import com.github.open96.jypm.download.DownloadManager; import com.github.open96.jypm.thread.TASK_TYPE; import com.github.open96.jypm.thread.ThreadManager; import javafx.application.Platform; import javafx.fxml.FXML; import javafx.fxml.Initializable; import javafx.scene.control.ScrollPane; import javafx.scene.text.Text; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.net.URL; import java.util.ResourceBundle; public class DetailsWindowController implements Initializable { public static boolean threadKiller = false; //Initialize log4j logger for later use in this class private static final Logger LOG = LogManager.getLogger(DetailsWindowController.class.getName()); @FXML ScrollPane scrollPane; @FXML Text detailsText; @Override public void initialize(URL location, ResourceBundle resources) { //Start a task on separate Thread that will update text displayed in the window ThreadManager .getInstance() .sendVoidTask(new Thread(() -> { while (ThreadManager.getExecutionPermission()) { try { String details = DownloadManager .getInstance() .getDetailsString(); if (details != null) { if (!detailsText.getText().equals(details) && details.toCharArray().length >= 0) { //If details String is different from what is being displayed //to user - change it and scroll to its bottom Platform.runLater(() -> detailsText.setText(details)); scrollPane.setVvalue(1.0); } } Thread.sleep(1000); if (threadKiller) { break; } } catch (InterruptedException | NullPointerException | ArrayIndexOutOfBoundsException | NegativeArraySizeException e) { LOG.error("There was a problem during initialization", e); } } }), TASK_TYPE.UI); } }
Open96/JYpm
jypm-fxml/src/main/java/com/github/open96/jypm/fxml/DetailsWindowController.java
Java
apache-2.0
2,470
/** * $RCSfile: UserAuthentication.java,v $ * $Revision: 1.1 $ * $Date: 2012/11/23 06:05:05 $ * * Copyright 2003-2007 Jive Software. * * All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jivesoftware.smack; import org.apache.harmony.javax.security.auth.callback.CallbackHandler; /** * There are two ways to authenticate a user with a server. Using SASL or Non-SASL * authentication. This interface makes {@link SASLAuthentication} and * {@link NonSASLAuthentication} polyphormic. * * @author Gaston Dombiak * @author Jay Kline */ interface UserAuthentication { /** * Authenticates the user with the server. This method will return the full JID provided by * the server. The server may assign a full JID with a username and resource different than * requested by this method. * * Note that using callbacks is the prefered method of authenticating users since it allows * more flexability in the mechanisms used. * * @param username the requested username (authorization ID) for authenticating to the server * @param resource the requested resource. * @param cbh the CallbackHandler used to obtain authentication ID, password, or other * information * @return the full JID provided by the server while binding a resource for the connection. * @throws XMPPException if an error occurs while authenticating. */ String authenticate(String username, String resource, CallbackHandler cbh) throws XMPPException; /** * Authenticates the user with the server. This method will return the full JID provided by * the server. The server may assign a full JID with a username and resource different than * the requested by this method. * * It is recommended that @{link #authenticate(String, String, CallbackHandler)} be used instead * since it provides greater flexability in authenticaiton and authorization. * * @param username the username that is authenticating with the server. * @param password the password to send to the server. * @param resource the desired resource. * @return the full JID provided by the server while binding a resource for the connection. * @throws XMPPException if an error occures while authenticating. */ String authenticate(String username, String password, String resource) throws XMPPException; /** * Performs an anonymous authentication with the server. The server will created a new full JID * for this connection. An exception will be thrown if the server does not support anonymous * authentication. * * @return the full JID provided by the server while binding a resource for the connection. * @throws XMPPException if an error occures while authenticating. */ String authenticateAnonymously() throws XMPPException; }
micorochio/SVN
workspace/MobilePlatform/asmack/org/jivesoftware/smack/UserAuthentication.java
Java
apache-2.0
3,424
""" Support for Wink binary sensors. For more details about this platform, please refer to the documentation at at https://home-assistant.io/components/binary_sensor.wink/ """ import asyncio import logging from homeassistant.components.binary_sensor import BinarySensorDevice from homeassistant.components.wink import DOMAIN, WinkDevice _LOGGER = logging.getLogger(__name__) DEPENDENCIES = ['wink'] # These are the available sensors mapped to binary_sensor class SENSOR_TYPES = { 'brightness': 'light', 'capturing_audio': 'sound', 'capturing_video': None, 'co_detected': 'gas', 'liquid_detected': 'moisture', 'loudness': 'sound', 'motion': 'motion', 'noise': 'sound', 'opened': 'opening', 'presence': 'occupancy', 'smoke_detected': 'smoke', 'vibration': 'vibration', } def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Wink binary sensor platform.""" import pywink for sensor in pywink.get_sensors(): _id = sensor.object_id() + sensor.name() if _id not in hass.data[DOMAIN]['unique_ids']: if sensor.capability() in SENSOR_TYPES: add_entities([WinkBinarySensorDevice(sensor, hass)]) for key in pywink.get_keys(): _id = key.object_id() + key.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkBinarySensorDevice(key, hass)]) for sensor in pywink.get_smoke_and_co_detectors(): _id = sensor.object_id() + sensor.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkSmokeDetector(sensor, hass)]) for hub in pywink.get_hubs(): _id = hub.object_id() + hub.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkHub(hub, hass)]) for remote in pywink.get_remotes(): _id = remote.object_id() + remote.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkRemote(remote, hass)]) for button in pywink.get_buttons(): _id = button.object_id() + button.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkButton(button, hass)]) for gang in pywink.get_gangs(): _id = gang.object_id() + gang.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkGang(gang, hass)]) for door_bell_sensor in pywink.get_door_bells(): _id = door_bell_sensor.object_id() + door_bell_sensor.name() if _id not in hass.data[DOMAIN]['unique_ids']: add_entities([WinkBinarySensorDevice(door_bell_sensor, hass)]) for camera_sensor in pywink.get_cameras(): _id = camera_sensor.object_id() + camera_sensor.name() if _id not in hass.data[DOMAIN]['unique_ids']: try: if camera_sensor.capability() in SENSOR_TYPES: add_entities([WinkBinarySensorDevice(camera_sensor, hass)]) except AttributeError: _LOGGER.info("Device isn't a sensor, skipping") class WinkBinarySensorDevice(WinkDevice, BinarySensorDevice): """Representation of a Wink binary sensor.""" def __init__(self, wink, hass): """Initialize the Wink binary sensor.""" super().__init__(wink, hass) if hasattr(self.wink, 'unit'): self._unit_of_measurement = self.wink.unit() else: self._unit_of_measurement = None if hasattr(self.wink, 'capability'): self.capability = self.wink.capability() else: self.capability = None @asyncio.coroutine def async_added_to_hass(self): """Call when entity is added to hass.""" self.hass.data[DOMAIN]['entities']['binary_sensor'].append(self) @property def is_on(self): """Return true if the binary sensor is on.""" return self.wink.state() @property def device_class(self): """Return the class of this sensor, from DEVICE_CLASSES.""" return SENSOR_TYPES.get(self.capability) @property def device_state_attributes(self): """Return the device state attributes.""" return super().device_state_attributes class WinkSmokeDetector(WinkBinarySensorDevice): """Representation of a Wink Smoke detector.""" @property def device_state_attributes(self): """Return the device state attributes.""" _attributes = super().device_state_attributes _attributes['test_activated'] = self.wink.test_activated() return _attributes class WinkHub(WinkBinarySensorDevice): """Representation of a Wink Hub.""" @property def device_state_attributes(self): """Return the device state attributes.""" _attributes = super().device_state_attributes _attributes['update_needed'] = self.wink.update_needed() _attributes['firmware_version'] = self.wink.firmware_version() _attributes['pairing_mode'] = self.wink.pairing_mode() _kidde_code = self.wink.kidde_radio_code() if _kidde_code is not None: # The service call to set the Kidde code # takes a string of 1s and 0s so it makes # sense to display it to the user that way _formatted_kidde_code = "{:b}".format(_kidde_code).zfill(8) _attributes['kidde_radio_code'] = _formatted_kidde_code return _attributes class WinkRemote(WinkBinarySensorDevice): """Representation of a Wink Lutron Connected bulb remote.""" @property def device_state_attributes(self): """Return the state attributes.""" _attributes = super().device_state_attributes _attributes['button_on_pressed'] = self.wink.button_on_pressed() _attributes['button_off_pressed'] = self.wink.button_off_pressed() _attributes['button_up_pressed'] = self.wink.button_up_pressed() _attributes['button_down_pressed'] = self.wink.button_down_pressed() return _attributes @property def device_class(self): """Return the class of this sensor, from DEVICE_CLASSES.""" return None class WinkButton(WinkBinarySensorDevice): """Representation of a Wink Relay button.""" @property def device_state_attributes(self): """Return the device state attributes.""" _attributes = super().device_state_attributes _attributes['pressed'] = self.wink.pressed() _attributes['long_pressed'] = self.wink.long_pressed() return _attributes class WinkGang(WinkBinarySensorDevice): """Representation of a Wink Relay gang.""" @property def is_on(self): """Return true if the gang is connected.""" return self.wink.state()
persandstrom/home-assistant
homeassistant/components/binary_sensor/wink.py
Python
apache-2.0
6,764
package k8sauth import ( "context" k8s "github.com/Bplotka/oidc/login/k8scache" "github.com/improbable-eng/kedge/pkg/tokenauth" directauth "github.com/improbable-eng/kedge/pkg/tokenauth/sources/direct" oauth2auth "github.com/improbable-eng/kedge/pkg/tokenauth/sources/oauth2" oidcauth "github.com/improbable-eng/kedge/pkg/tokenauth/sources/oidc" "github.com/pkg/errors" cfg "k8s.io/client-go/tools/clientcmd" ) // New constructs appropriate tokenAuth Source to the given AuthInfo from kube config referenced by user. // This is really convenient if you want to reuse well configured kube config. func New(ctx context.Context, name string, configPath string, userName string) (tokenauth.Source, error) { if configPath == "" { configPath = k8s.DefaultKubeConfigPath } k8sConfig, err := cfg.LoadFromFile(configPath) if err != nil { return nil, errors.Wrapf(err, "Failed to load k8s config from file %v. Make sure it is there or change"+ " permissions.", configPath) } info, ok := k8sConfig.AuthInfos[userName] if !ok { return nil, errors.Errorf("Failed to find user %s inside k8s config AuthInfo from file %v", userName, configPath) } // Currently supported: // - token // - OIDC // - Google compute platform via Oauth2 if info.AuthProvider != nil { switch info.AuthProvider.Name { case "oidc": cache, err := k8s.NewCacheFromUser(configPath, userName) if err != nil { return nil, errors.Wrap(err, "Failed to get OIDC configuration from user. ") } s, _, err := oidcauth.NewWithCache(ctx, name, cache, nil) return s, err case "gcp": c, err := oauth2auth.NewConfigFromMap(info.AuthProvider.Config) if err != nil { return nil, errors.Wrap(err, "Failed to create OAuth2 config from map.") } return oauth2auth.NewGCP(name, userName, configPath, c) default: // TODO(bplotka): Add support for more of them if needed. return nil, errors.Errorf("Not supported k8s Auth provider %v", info.AuthProvider.Name) } } if info.Token != "" { return directauth.New(name, info.Token), nil } return nil, errors.Errorf("Not found supported auth source called %s from k8s config %+v", userName, info) }
mwitkow/kedge
pkg/tokenauth/sources/k8s/k8s.go
GO
apache-2.0
2,170
<?php /** * Created by PhpStorm. * User: Administrator * Date: 3/11/2016 * Time: 7:35 PM */ if (!defined('BASEPATH')) exit('No direct script access allowed'); require_once APPPATH."/third_party/PHPEpp/Protocols/EPP/eppHttpsConnection.php"; class EppHttpsConnection extends Metaregistrar\EPP\eppHttpsConnection { public function __construct() { parent::__construct(); } }
aasiimweDataCare/sugarGirls
application/libraries/ProtocolsEPP_eppHttspConnection.php
PHP
apache-2.0
407
package tsvadaptor import ( "github.com/ProtoML/ProtoML/formatadaptor/delimiteradaptor" ) func New() *delimiteradaptor.DelimiterAdaptor { return delimiteradaptor.New('\t') }
ProtoML/ProtoML
formatadaptor/tsvadaptor/tsvadaptor.go
GO
apache-2.0
179
module IOStreams module Line class Writer < IOStreams::Writer attr_reader :delimiter # Write a line at a time to a stream. def self.stream(output_stream, **args) # Pass-through if already a line writer return yield(output_stream) if output_stream.is_a?(self.class) yield new(output_stream, **args) end # A delimited stream writer that will write to the supplied output stream. # # The output stream will have the encoding of data written to it. # To change the output encoding, use IOStreams::Encode::Writer. # # Parameters # output_stream # The output stream that implements #write # # delimiter: [String] # Add the specified delimiter after every record when writing it # to the output stream # Default: OS Specific. Linux: "\n" def initialize(output_stream, delimiter: $/, original_file_name: nil) super(output_stream) @delimiter = delimiter end # Write a line to the output stream # # Example: # IOStreams.path('a.txt').writer(:line) do |stream| # stream << 'first line' << 'second line' # end def <<(data) write(data) self end # Write a line to the output stream followed by the delimiter. # Returns [Integer] the number of bytes written. # # Example: # IOStreams.path('a.txt').writer(:line) do |stream| # count = stream.write('first line') # puts "Wrote #{count} bytes to the output file, including the delimiter" # end def write(data) output_stream.write(data.to_s + delimiter) end end end end
rocketjob/iostreams
lib/io_streams/line/writer.rb
Ruby
apache-2.0
1,744
/* * Copyright (c) 2014 Personal-Health-Monitoring-System * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cse3310.phms.ui.views.wiziard_model; import android.content.Context; import android.text.InputType; import co.juliansuarez.libwizardpager.wizard.model.AbstractWizardModel; import co.juliansuarez.libwizardpager.wizard.model.PageList; import com.andreabaccega.formedittextvalidator.Validator; import com.andreabaccega.formedittextvalidator.WebUrlValidator; import com.cse3310.phms.model.EStorage; import com.cse3310.phms.ui.cards.UrlCard; import com.cse3310.phms.ui.views.pager.EditTextPage; import de.greenrobot.event.EventBus; /** * Created by Owner on 4/16/14. */ public class UrlWizardModel extends AbstractWizardModel { public static final String URL_LINK = "Web Address"; public static final String URL_TITLE = "Web Title"; private UrlCard mUrlCard; public UrlWizardModel(Context context) { super(context); } @Override protected PageList onNewRootPageList() { Validator urlValidator = new WebUrlValidator("Not a valid URL."); // Check if a card was pass to this class to determine if the user // adding or editing a card. mUrlCard = EventBus.getDefault().removeStickyEvent(UrlCard.class); if (mUrlCard != null) { // if a card was passed, the user pressed the edit button. EStorage UrlInfo = mUrlCard.getUrlInfo(); // since we are editing the card, let pre-set all the values. return new PageList( new EditTextPage(this, URL_LINK, urlValidator).setValue(UrlInfo.getUrl()).setInputType(InputType.TYPE_CLASS_TEXT), new EditTextPage(this, URL_TITLE).setValue(UrlInfo.getTitle()).setInputType(InputType.TYPE_CLASS_TEXT) ); } return new PageList( new EditTextPage(this, URL_LINK, urlValidator).setInputType(InputType.TYPE_CLASS_TEXT), new EditTextPage(this, URL_TITLE).setInputType(InputType.TYPE_CLASS_TEXT) ); } public UrlCard getUrlCard() { return mUrlCard; } }
Thangiee/Personal-Health-Monitoring-System
PHMS/src/main/java/com/cse3310/phms/ui/views/wiziard_model/UrlWizardModel.java
Java
apache-2.0
2,647
package me.aaron.androidprocessalive; import android.os.Bundle; import android.support.v7.app.AppCompatActivity; import android.support.v7.widget.Toolbar; import android.view.Menu; import android.view.MenuItem; import android.view.View; import me.aaron.androidprocessalive.service.ForegroundService; import me.aaron.androidprocessalive.service.GrayService; import me.aaron.androidprocessalive.service.NormalService; import me.aaron.androidprocessalive.service.NotifyService; public class MainActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar); setSupportActionBar(toolbar); initViews(); } private void initViews() { findViewById(R.id.btn_start_normal).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { NormalService.actionStart(MainActivity.this); } }); findViewById(R.id.btn_start_foreground).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { ForegroundService.actionStart(MainActivity.this); } }); findViewById(R.id.btn_start_gray).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { GrayService.actionStart(MainActivity.this); } }); } @Override protected void onStart() { super.onStart(); NotifyService.actionStart(this); } @Override public boolean onCreateOptionsMenu(Menu menu) { getMenuInflater().inflate(R.menu.menu_main, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { int id = item.getItemId(); if (id == R.id.action_settings) { return true; } return super.onOptionsItemSelected(item); } }
AaronChanSunny/AndroidProcessAlive
app/src/main/java/me/aaron/androidprocessalive/MainActivity.java
Java
apache-2.0
2,119
export const indicators = Object.freeze({ CARET: "caret", OPERATOR: "operator" }); export const AVAILABLE_INDICATORS = Object.freeze(Object.values(indicators));
Autodesk/hig
packages/tree-view/src/constants.js
JavaScript
apache-2.0
166
/******************************************************************************* * Copyright FUJITSU LIMITED 2017 *******************************************************************************/ // // This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.4-2 // See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Any modifications to this file will be lost upon recompilation of the source schema. // Generated on: 2013.05.22 at 12:50:01 PM CEST // package org.oscm.saml2.api.model.assertion; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlType; /** * <p> * Java class for OneTimeUseType complex type. * * <p> * The following schema fragment specifies the expected content contained within * this class. * * <pre> * &lt;complexType name="OneTimeUseType"> * &lt;complexContent> * &lt;extension base="{urn:oasis:names:tc:SAML:2.0:assertion}ConditionAbstractType"> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "OneTimeUseType") public class OneTimeUseType extends ConditionAbstractType { }
opetrovski/development
oscm-saml2-api/javasrc/org/oscm/saml2/api/model/assertion/OneTimeUseType.java
Java
apache-2.0
1,284
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.mengge.pagefactory.locator; import org.openqa.selenium.support.pagefactory.ElementLocator; public interface CacheableLocator extends ElementLocator { public boolean isLookUpCached(); }
JoeUtt/menggeqa
src/main/java/com/mengge/pagefactory/locator/CacheableLocator.java
Java
apache-2.0
877
using System; using System.Collections.Generic; using System.Diagnostics.Contracts; using QuickGraph.Contracts; namespace QuickGraph { /// <summary> /// A mutable incidence graph /// </summary> /// <typeparam name="TVertex"></typeparam> /// <typeparam name="TEdge"></typeparam> [ContractClass(typeof(IMutableIncidenceGraphContract<,>))] public interface IMutableIncidenceGraph<TVertex,TEdge> : IMutableGraph<TVertex,TEdge> , IIncidenceGraph<TVertex,TEdge> where TEdge : IEdge<TVertex> { /// <summary> /// Removes all out edges of <paramref name="v"/> /// where <paramref name="predicate"/> evalutes to true. /// </summary> /// <param name="v"></param> /// <param name="predicate"></param> /// <returns></returns> int RemoveOutEdgeIf( TVertex v, EdgePredicate<TVertex, TEdge> predicate); /// <summary> /// Trims the out edges of vertex <paramref name="v"/> /// </summary> /// <param name="v"></param> void ClearOutEdges(TVertex v); /// <summary> /// Trims excess storage allocated for edges /// </summary> void TrimEdgeExcess(); } }
ezg/PanoramicDataWin8
PanoramicDataWin8/utils/GraphSharpUWP/QuickGraph/IMutableIncidenceGraph.cs
C#
apache-2.0
1,259