code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
var a = require('A/a'); a.A();
wangyanxing/TypeScript
tests/baselines/reference/projectOutput/relativePaths/node/app.js
JavaScript
apache-2.0
33
(function () { 'use strict'; /** * Provides AddMe dialog to add/remove a user to a contact collection. * * @requires * emailMeDialog.html and ui.bootstrap module */ var controllerId1 = 'emailMeController'; var controllerId2 = 'emailMeModalController'; angular.module('app').controller(controllerId1, ['$scope', '$uibModal', 'common', emailMeController]); angular.module('app').controller(controllerId2, ['$rootScope', '$uibModalInstance', 'profile', 'common', 'config', 'profileCollectionService',emailMeModalController]); /** * Parent controller, it passes profile to the child controller. * * @example * To use this <a class="btn" * ng-controller="emailMeController" * ng-click="openEmailMeModal(vm.profile)"> */ function emailMeController($scope, $uibModal, common) { var getLogFn = common.logger.getLogFn; var log = getLogFn(controllerId1); /** * Opens the AddMe modal dialog for the authenticated user in attempt * to add/remove the target user. * The button that has this method attached to should NOT show up if * the request is not authenticated. * * @param * profile: the user profile who is being added or removed by the * authenticated user. */ $scope.openEmailMeModal = function (profile) { var modalInstance = $uibModal.open({ templateUrl: '/wwwroot/app/profile/emailMe/emailMeDialog.html', controller: 'emailMeModalController as vm', resolve: { // passing to child controller below profile: function () { return profile; }, } }); }; } /** * Child controller used by the AddMeDialog.html, if a user is not authenticated * the user should not see this modal. * * @param * $rootScope: needed for $broadcast * $modalInstance: needed for close and dismiss of the dialog * profile: the target user profile * common: for logging * config: for events names */ function emailMeModalController($rootScope, $uibModalInstance, profile, common, config, profileCollectionService) { var getLogFn = common.logger.getLogFn; var log = getLogFn(controllerId2); var logError = getLogFn(controllerId2, 'error'); var logSuccess = getLogFn(controllerId2, 'success'); /* ---------------------------------------------------------- view model */ var vm = this; vm.message = ""; vm.profile = profile; vm.disableButton = false; vm.send = send; vm.cancel = cancel; vm.btnText = "Send"; /* ---------------------------------------------------------- public methods */ // todo: disable send button once clicked function send() { vm.disableButton = true; vm.btnText = "Sending..."; profileCollectionService.emailMe(profile.userName, vm.message).then(success, failed); function success() { $uibModalInstance.dismiss('cancel'); } function failed(err) { logError(err); vm.disableButton = false; vm.btnText = "Send"; } } function cancel() { $uibModalInstance.dismiss('cancel'); }; } })();
FanrayMedia/Chef.me
Projects/Chef.Web/wwwroot/app/profile/emailMe/emailMeDialog.js
JavaScript
apache-2.0
3,593
/** * Copyright (C) 2015 Valkyrie RCP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.valkyriercp.binding.value.support; import org.valkyriercp.convert.ConversionExecutor; import org.valkyriercp.binding.value.DerivedValueModel; import org.valkyriercp.binding.value.ValueChangeDetector; import org.valkyriercp.binding.value.ValueModel; import org.valkyriercp.rules.closure.Closure; import org.valkyriercp.util.ValkyrieRepository; import java.beans.PropertyChangeListener; /** * A value model wrapper that supports converting the wrapped value to and from * another type using the supplied conversion Closures. * * @author Keith Donald * @author Oliver Hutchison */ public class TypeConverter extends AbstractValueModelWrapper implements DerivedValueModel { private final Closure convertTo; private final Closure convertFrom; /** * Convenience constructor using conversionExecutors. * * @param wrappedModel the inner valueModel * @param convertTo conversion to use when setting a value. * @param convertFrom conversion to use when getting a value. * * @see #TypeConverter(ValueModel, Closure, Closure) */ public TypeConverter(ValueModel wrappedModel, ConversionExecutor convertTo, ConversionExecutor convertFrom) { this(wrappedModel, new ConversionExecutorClosure(convertTo), new ConversionExecutorClosure(convertFrom)); } /** * Constructor which uses Closure blocks to convert between values. * * @param wrappedModel the inner valueModel * @param convertTo Closure to execute when setting a value. * @param convertFrom Closure to execute when getting a value. */ public TypeConverter(ValueModel wrappedModel, Closure convertTo, Closure convertFrom) { super(wrappedModel); this.convertTo = convertFrom; this.convertFrom = convertTo; } /** * {@inheritDoc} * * Value from inner model will be converted using the supplied convertFrom closure. */ public Object getValue() throws IllegalArgumentException { return convertFrom.call(super.getValue()); } public void setValueSilently(Object value, PropertyChangeListener listenerToSkip) throws IllegalArgumentException { // only set the convertTo value if the convertFrom value has changed if (getValueChangeDetector().hasValueChanged(getValue(), value)) { super.setValueSilently(convertTo.call(value), listenerToSkip); } } public ValueModel[] getSourceValueModels() { return new ValueModel[] { getWrappedValueModel() }; } public boolean isReadOnly() { return false; } protected ValueChangeDetector getValueChangeDetector() { return ValkyrieRepository.getInstance().getApplicationConfig().valueChangeDetector(); } /** * Helper class wrapping ConversionExecutors in a Closure. */ private static class ConversionExecutorClosure implements Closure { private final ConversionExecutor conversionExecutor; public ConversionExecutorClosure(ConversionExecutor conversionExecutor) { this.conversionExecutor = conversionExecutor; } public Object call(Object argument) { return conversionExecutor.execute(argument); } } }
lievendoclo/Valkyrie-RCP
valkyrie-rcp-core/src/main/java/org/valkyriercp/binding/value/support/TypeConverter.java
Java
apache-2.0
3,600
package org.ovirt.engine.core.dao; import java.util.List; import org.ovirt.engine.core.common.businessentities.network; import org.ovirt.engine.core.compat.Guid; /** * <code>NetworkDAO</code> defines a type for performing CRUD operations on instances of {@link network}. * * */ public interface NetworkDAO extends GenericDao<network, Guid> { /** * Retrieves the network with the specified name. * * @param name * the network name * @return the network */ network getByName(String name); /** * Retrieves all networks for the given data center. * * @param id * the data center * @return the list of networks */ List<network> getAllForDataCenter(Guid id); /** * Retrieves all networks for the given cluster. * * @param id * the cluster * @return the list of networks */ List<network> getAllForCluster(Guid id); /** * Retrieves all networks for the given cluster with optional permission filtering. * * @param id * the cluster * @param userID * the ID of the user requesting the information * @param isFiltered * Whether the results should be filtered according to the user's permissions * @return the list of networks */ List<network> getAllForCluster(Guid id, Guid userID, boolean isFiltered); }
Dhandapani/gluster-ovirt
backend/manager/modules/dal/src/main/java/org/ovirt/engine/core/dao/NetworkDAO.java
Java
apache-2.0
1,442
/********************************************************************** * * Name: cpl_atomic_ops.cpp * Project: CPL - Common Portability Library * Purpose: Atomic operation functions. * Author: Even Rouault, <even dot rouault at mines dash paris dot org> * ********************************************************************** * Copyright (c) 2009-2010, Even Rouault <even dot rouault at mines-paris dot org> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "cpl_atomic_ops.h" #include "cpl_config.h" // TODO: If C++11, use #include <atomic>. CPL_CVSID("$Id: cpl_atomic_ops.cpp 9a53d222083f9d406142013c04cfccf2bb75655c 2018-02-13 16:44:50Z Even Rouault $") #if defined(_MSC_VER) #include <windows.h> int CPLAtomicAdd(volatile int* ptr, int increment) { return InterlockedExchangeAdd((volatile LONG*)(ptr), (LONG)(increment)) + increment; } int CPLAtomicCompareAndExchange(volatile int* ptr, int oldval, int newval) { return (LONG)InterlockedCompareExchange((volatile LONG*)(ptr), (LONG)newval, (LONG)oldval) == (LONG)oldval; } #elif defined(__MINGW32__) && defined(__i386__) #include <windows.h> int CPLAtomicAdd(volatile int* ptr, int increment) { return InterlockedExchangeAdd((LONG*)(ptr), (LONG)(increment)) + increment; } int CPLAtomicCompareAndExchange(volatile int* ptr, int oldval, int newval) { return (LONG)InterlockedCompareExchange((LONG*)(ptr), (LONG)newval, (LONG)oldval) == (LONG)oldval; } #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) int CPLAtomicAdd(volatile int* ptr, int increment) { int temp = increment; __asm__ __volatile__("lock; xaddl %0,%1" : "+r" (temp), "+m" (*ptr) : : "memory"); return temp + increment; } int CPLAtomicCompareAndExchange(volatile int* ptr, int oldval, int newval) { unsigned char ret; __asm__ __volatile__ ( " lock; cmpxchgl %2,%1\n" " sete %0\n" : "=q" (ret), "=m" (*ptr) : "r" (newval), "m" (*ptr), "a" (oldval) : "memory"); return static_cast<int>(ret); } #elif defined(HAVE_GCC_ATOMIC_BUILTINS) // Starting with GCC 4.1.0, built-in functions for atomic memory access are // provided. See: // http://gcc.gnu.org/onlinedocs/gcc-4.1.0/gcc/Atomic-Builtins.html // We use a ./configure test to determine whether this builtins are available. // as it appears that the GCC 4.1 version used on debian etch is broken when // linking such instructions. int CPLAtomicAdd( volatile int* ptr, int increment ) { if( increment > 0 ) return __sync_add_and_fetch(ptr, increment); return __sync_sub_and_fetch(ptr, -increment); } int CPLAtomicCompareAndExchange( volatile int* ptr, int oldval, int newval ) { return __sync_bool_compare_and_swap (ptr, oldval, newval); } #elif defined(__MACH__) && defined(__APPLE__) #include <libkern/OSAtomic.h> int CPLAtomicAdd(volatile int* ptr, int increment) { return OSAtomicAdd32(increment, (int*)(ptr)); } int CPLAtomicCompareAndExchange(volatile int* ptr, int oldval, int newval) { return OSAtomicCompareAndSwap32(oldval, newval, (int*)(ptr)); } #elif !defined(CPL_MULTIPROC_PTHREAD) #warning "Needs real lock API to implement properly atomic increment" // Dummy implementation. int CPLAtomicAdd(volatile int* ptr, int increment) { (*ptr) += increment; return *ptr; } int CPLAtomicCompareAndExchange( volatile int* ptr, int oldval, int newval ) { if( *ptr == oldval ) { *ptr = newval; return TRUE; } return FALSE; } #else #include "cpl_multiproc.h" static CPLLock *hAtomicOpLock = nullptr; // Slow, but safe, implementation using a mutex. int CPLAtomicAdd(volatile int* ptr, int increment) { CPLLockHolderD(&hAtomicOpLock, LOCK_SPIN); (*ptr) += increment; return *ptr; } int CPLAtomicCompareAndExchange(volatile int* ptr, int oldval, int newval) { CPLLockHolderD(&hAtomicOpLock, LOCK_SPIN); if( *ptr == oldval ) { *ptr = newval; return TRUE; } return FALSE; } #endif
naturalatlas/node-gdal
deps/libgdal/gdal/port/cpl_atomic_ops.cpp
C++
apache-2.0
5,249
/* * Copyright 2016 The Bazel Authors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.idea.blaze.base.lang.projectview; import static com.google.common.truth.Truth.assertThat; import com.google.common.base.Joiner; import com.google.idea.blaze.base.lang.projectview.completion.ProjectViewKeywordCompletionContributor; import com.google.idea.blaze.base.model.primitives.WorkspacePath; import com.google.idea.blaze.base.model.primitives.WorkspaceType; import com.google.idea.blaze.base.projectview.section.SectionParser; import com.google.idea.blaze.base.projectview.section.sections.Sections; import com.intellij.codeInsight.lookup.Lookup; import com.intellij.codeInsight.lookup.LookupElement; import com.intellij.psi.PsiFile; import java.util.Arrays; import java.util.stream.Collectors; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** Tests auto-complete in project view files */ @RunWith(JUnit4.class) public class ProjectViewCompletionTest extends ProjectViewIntegrationTestCase { private PsiFile setInput(String... fileContents) { return testFixture.configureByText(".blazeproject", Joiner.on("\n").join(fileContents)); } private void assertResult(String... resultingFileContents) { testFixture.getFile().getText(); testFixture.checkResult(Joiner.on("\n").join(resultingFileContents)); } @Test public void testSectionTypeKeywords() { setInput("<caret>"); String[] keywords = editorTest.getCompletionItemsAsStrings(); assertThat(keywords) .asList() .containsAllIn( Sections.getUndeprecatedParsers() .stream() .filter(ProjectViewKeywordCompletionContributor::handledSectionType) .map(SectionParser::getName) .collect(Collectors.toList())); } @Test public void testColonAndNewLineAndIndentInsertedAfterListSection() { setInput("sync_fla<caret>"); assertThat(editorTest.completeIfUnique()).isTrue(); assertResult("sync_flags:", " <caret>"); } @Test public void testWhitespaceDividerInsertedAfterScalarSection() { setInput("impo<caret>"); LookupElement[] completionItems = testFixture.completeBasic(); assertThat(completionItems[0].getLookupString()).isEqualTo("import"); testFixture.getLookup().setCurrentItem(completionItems[0]); testFixture.finishLookup(Lookup.NORMAL_SELECT_CHAR); assertResult("import <caret>"); } @Test public void testColonDividerAndSpaceInsertedAfterScalarSection() { setInput("works<caret>"); assertThat(editorTest.completeIfUnique()).isTrue(); assertResult("workspace_type: <caret>"); } @Test public void testNoKeywordCompletionInListItem() { setInput("directories:", " <caret>"); String[] completionItems = editorTest.getCompletionItemsAsStrings(); if (completionItems == null) { Assert.fail("Spurious completion. New file contents: " + testFixture.getFile().getText()); } assertThat(completionItems).isEmpty(); } @Test public void testNoKeywordCompletionAfterKeyword() { setInput("import <caret>"); String[] completionItems = editorTest.getCompletionItemsAsStrings(); if (completionItems == null) { Assert.fail("Spurious completion. New file contents: " + testFixture.getFile().getText()); } assertThat(completionItems).isEmpty(); } @Test public void testWorkspaceTypeCompletion() { setInput("workspace_type: <caret>"); String[] types = editorTest.getCompletionItemsAsStrings(); assertThat(types) .asList() .containsAllIn( Arrays.stream(WorkspaceType.values()) .map(WorkspaceType::getName) .collect(Collectors.toList())); } @Test public void testUniqueDirectoryCompleted() { setInput("import <caret>"); workspace.createDirectory(new WorkspacePath("java")); String[] completionItems = editorTest.getCompletionItemsAsStrings(); assertThat(completionItems).isNull(); assertResult("import java<caret>"); } @Test public void testUniqueMultiSegmentDirectoryCompleted() { setInput("import <caret>"); workspace.createDirectory(new WorkspacePath("java/com/google")); String[] completionItems = editorTest.getCompletionItemsAsStrings(); assertThat(completionItems).isNull(); assertResult("import java/com/google<caret>"); } @Test public void testNonDirectoriesIgnoredForDirectoryOnlySection() { setInput("directories:", " <caret>"); workspace.createDirectory(new WorkspacePath("java/com/google")); workspace.createFile(new WorkspacePath("java/IgnoredFile.java")); String[] completionItems = editorTest.getCompletionItemsAsStrings(); assertThat(completionItems).isNull(); assertResult("directories:", " java/com/google<caret>"); } @Test public void testNonDirectoriesIncludedForSectionsAcceptingFiles() { setInput("import java<caret>"); workspace.createFile(new WorkspacePath("java/.blazeproject")); String[] completionItems = editorTest.getCompletionItemsAsStrings(); assertThat(completionItems).isNull(); assertResult("import java/.blazeproject<caret>"); } @Test public void testMultipleDirectoryOptions() { workspace.createDirectory(new WorkspacePath("foo")); workspace.createDirectory(new WorkspacePath("bar")); workspace.createDirectory(new WorkspacePath("other")); workspace.createDirectory(new WorkspacePath("ostrich/foo")); workspace.createDirectory(new WorkspacePath("ostrich/fooz")); setInput("targets:", " //o<caret>"); String[] completionItems = editorTest.getCompletionItemsAsSuggestionStrings(); assertThat(completionItems).asList().containsExactly("other", "ostrich"); editorTest.performTypingAction(testFixture.getEditor(), 's'); completionItems = editorTest.getCompletionItemsAsStrings(); assertThat(completionItems).isNull(); assertResult("targets:", " //ostrich<caret>"); } @Test public void testTargetCompletion() { workspace.createFile(new WorkspacePath("BUILD"), "java_library(name = 'lib')"); setInput("targets:", " //:<caret>"); String[] completionItems = editorTest.getCompletionItemsAsSuggestionStrings(); assertThat(completionItems).isNull(); assertResult("targets:", " //:lib<caret>"); } }
bazelbuild/intellij
base/tests/integrationtests/com/google/idea/blaze/base/lang/projectview/ProjectViewCompletionTest.java
Java
apache-2.0
6,931
/******************************************************************************* * DARPA XDATA licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2013 Raytheon BBN Technologies Corp. All Rights Reserved. * ******************************************************************************/ package com.bbn.xdata.twitter; public class TwitterDataRecord { private TwitterDataSchema schema = new TwitterDataSchema(); private String[] rawData = new String[schema.size()]; public TwitterDataRecord( ) { } public TwitterDataRecord( TwitterDataSchema schema ) { this.schema = schema; rawData = new String[schema.size()]; } public boolean set( TwitterDataColumn col, String data ) { int index = schema.getColIndex( col ); if( index >= 0 ) { rawData[index] = data; return true; } else { throw new IllegalArgumentException( "Column " + col + " is not part of the schema for this record. Data not inserted." ); } } public String get( TwitterDataColumn col ) { int index = schema.getColIndex( col ); if( index >= 0 ) { return rawData[index]; } else { throw new IllegalArgumentException( "Column " + col + " is not part of the schema for this record. Cannot get data." ); } } public String get( int index ) { if( index >= 0 && index < rawData.length ) { return rawData[index]; } else { throw new IllegalArgumentException( "Invalid index " + index + " for this record. Cannot get data." ); } } public String[] getRawData() { return rawData.clone(); } public String toCSVString() { return toDelimitedString( "," ); } public String toTSVString() { return toDelimitedString( "\t" ); } public String toDelimitedString(String delim) { StringBuffer sb = new StringBuffer(); boolean first = true; for( String data : rawData ) { if( first ) { sb.append( data ); first = false; } else { sb.append( delim ); sb.append( data ); } } return sb.toString(); } /** * @param args */ public static void main(String[] args) { // TODO Auto-generated method stub } }
plamenbbn/XDATA
pint/TwitterData/src/com/bbn/xdata/twitter/TwitterDataRecord.java
Java
apache-2.0
2,641
/** * */ package org.javahispano.jfootball.core; /** * @author alfonso * */ public class PlayerBase extends MovingEntity { private String name; private int number; private double velocity; private double power; private double accuracy; public PlayerBase() { this.name = "No name"; this.number = 0; this.velocity = 0; this.power = 0; this.accuracy = 0; } public PlayerBase(String name, int number, double velocity, double power, double accuracy) { this.name = name; this.number = number; this.velocity = velocity; this.power = power; this.accuracy = accuracy; } /** * @return the name */ public String getName() { return name; } /** * @param name the name to set */ public void setName(String name) { this.name = name; } /** * @return the number */ public int getNumber() { return number; } /** * @param number the number to set */ public void setNumber(int number) { this.number = number; } /** * @return the velocity */ public double getVelocity() { return velocity; } /** * @param velocity the velocity to set */ public void setVelocity(double velocity) { this.velocity = velocity; } /** * @return the power */ public double getPower() { return power; } /** * @param power the power to set */ public void setPower(double power) { this.power = power; } /** * @return the accuracy */ public double getAccuracy() { return accuracy; } /** * @param accuracy the accuracy to set */ public void setAccuracy(double accuracy) { this.accuracy = accuracy; } }
alfonsodou/jfootball
core/src/org/javahispano/jfootball/core/PlayerBase.java
Java
apache-2.0
1,593
# coding=utf-8 # Copyright 2022 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for tree_utils.""" import pytest from tensorflow_datasets.core.utils import tree_utils def test_tree_parallel_map(): assert tree_utils.parallel_map(lambda x: x * 10, { 'a': [1, 2, 3], 'b': [4, 5] }) == { 'a': [10, 20, 30], 'b': [40, 50] } def test_tree_parallel_map_reraise(): def fn(x): raise ValueError('Bad value') with pytest.raises(ValueError, match='Bad value'): tree_utils.parallel_map(fn, [1])
tensorflow/datasets
tensorflow_datasets/core/utils/tree_utils_test.py
Python
apache-2.0
1,072
/** * Tests for the "ConvertList2Array" and "ConvertMatrix2List" tasks. * * @author Dmitrii Eskov (eskovdmi@gmail.com) * @version 1.0 * @since 22.01.2019 */ package ru.job4j.list;
DmitriiEskov/deskov
chapter_003/src/test/java/ru/job4j/list/package-info.java
Java
apache-2.0
185
<?php namespace OpenCloud\Zf2\Enum; /** * Provider enumerated types * * @package OpenCloud\Zf2\Enum */ class Provider { const RACKSPACE = 'Rackspace'; const OPENSTACK = 'OpenStack'; }
jamiehannaford/php-opencloud-zf2
src/Enum/Provider.php
PHP
apache-2.0
199
// *********************************************************************************** // Created by zbw911 // 创建于:2013年06月03日 16:48 // // 修改于:2013年06月03日 17:25 // 文件名:CASServer/WebApp.Test/UnitTestApi.cs // // 如果有更好的建议或意见请邮件至 zbw911#gmail.com // *********************************************************************************** using System; using System.Globalization; using System.IO; using System.Text; using Microsoft.VisualStudio.TestTools.UnitTesting; namespace WebApp.Test { [TestClass] public class UnitTestApi { #region Instance Methods [TestMethod] public void MyTestMethod_GetReg() { var url = "http://localhost:34382/api/User/GetRegDateTime?uid=111111111"; var result = Dev.Comm.Net.Http.GetUrl(url); Console.WriteLine(result); var time = Dev.Comm.JsonConvert.ToJsonObject<DateTime?>(result); Console.WriteLine(time ?? System.DateTime.MinValue); } [TestMethod] public void TestMethod1() { var url = "http://localhost:34382/api/User/GetUserInfo?uid=10011838"; var result = Dev.Comm.Net.Http.GetUrl(url); Console.WriteLine(result); } [TestMethod] public void TestMethod12() { var url = "http://localhost:34382/api/User/GetUserInfoByNickname?nickname=张保维"; var result = Dev.Comm.Net.Http.GetUrl(url); Console.WriteLine(result); } [TestMethod] public void TestMethod1GetUserInfoList() { var url = "http://localhost:34382/api/User/GetUserInfoList?uids=10011838&uids=10011839"; var result = Dev.Comm.Net.Http.GetUrl(url); Console.WriteLine(result); } [TestMethod] public void TestMethodCheckByNickNames() { var url = "http://localhost:34382/api/User/CheckNick?nickname=张保维"; var result = Dev.Comm.Net.Http.GetUrl(url); Console.WriteLine(result); } [TestMethod] public void TestMethodCheckByNickNames2() { var url = "http://localhost:34382/api/User/CheckNick?nickname=张保维1"; var result = Dev.Comm.Net.Http.GetUrl(url); Console.WriteLine(result); } [TestMethod] public void TestMethodGetUserInfoByUserName() { var url = "http://localhost:34382/api/User/GetUserInfoByUserName?username=zbw911@qq.com"; var result = Dev.Comm.Net.Http.GetUrl(url); Console.WriteLine(result); } [TestMethod] public void TestMethodGetUserProfileByNickNames() { var url = "http://localhost:34382/api/User/GetUserInfoListByNickNames?nicknames=张保维&nicknames=zbw911"; var result = Dev.Comm.Net.Http.GetUrl(url); Console.WriteLine(result); } [TestMethod] public void MyTestMethod() { EntryPointNotFoundException entryPointNotFoundException = new EntryPointNotFoundException(); StringBuilder builder = new StringBuilder(); builder.ToString().Split(",".ToCharArray()); var fileName = ""; Stream stream = new FileStream(fileName, FileMode.Open); StreamReader reader = new StreamReader(stream); EntryPointNotFoundException exception = new EntryPointNotFoundException(fileName); Stream inputStream = File.OpenRead(fileName); } #endregion } }
zbw911/CasServer
Presentation/WebApp.Test/UnitTestApi.cs
C#
apache-2.0
3,831
/* * Copyright 2012-2013 inBloom, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.slc.sli.modeling.rest.helpers; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.List; import java.util.Stack; import org.junit.Test; import org.slc.sli.modeling.rest.Param; import org.slc.sli.modeling.rest.ParamStyle; import org.slc.sli.modeling.rest.Resource; /** * JUnit test for RestHelper class. * * @author wscott * */ public class TestRestHelper { private static final String R1P1_ID = "r1p1"; private static final String R2P1_ID = "r2p1"; private static final String R2P2_ID = "r2p2"; private static final String R3P1_ID = "r3p1"; private static final String R3P2_ID = "r3p2"; @Test public void testConstructor() { new RestHelper(); } @Test public void testComputeRequestTemplateParamsEmptyAncestors() { Stack<Resource> resources = new Stack<Resource>(); Param r1p1 = mock(Param.class); when(r1p1.getStyle()).thenReturn(ParamStyle.TEMPLATE); when(r1p1.getId()).thenReturn(R1P1_ID); List<Param> r1Params = new ArrayList<Param>(1); r1Params.add(r1p1); Resource r1 = mock(Resource.class); when(r1.getParams()).thenReturn(r1Params); List<Param> templateParams = RestHelper.computeRequestTemplateParams(r1, resources); assertEquals(1, templateParams.size()); assertEquals(R1P1_ID, templateParams.get(0).getId()); } @Test public void testComputeRequestTemplateParams() { Stack<Resource> resources = new Stack<Resource>(); // mock first resource Param r1p1 = mock(Param.class); when(r1p1.getStyle()).thenReturn(ParamStyle.TEMPLATE); when(r1p1.getId()).thenReturn(R1P1_ID); List<Param> r1Params = new ArrayList<Param>(1); r1Params.add(r1p1); Resource r1 = mock(Resource.class); when(r1.getParams()).thenReturn(r1Params); // mock second resource Param r2p1 = mock(Param.class); when(r2p1.getStyle()).thenReturn(ParamStyle.TEMPLATE); when(r2p1.getId()).thenReturn(R2P1_ID); Param r2p2 = mock(Param.class); when(r2p2.getStyle()).thenReturn(ParamStyle.QUERY); when(r2p2.getId()).thenReturn(R2P2_ID); List<Param> r2Params = new ArrayList<Param>(2); r2Params.add(r2p1); r2Params.add(r2p2); Resource r2 = mock(Resource.class); when(r2.getParams()).thenReturn(r2Params); // mock third resource Param r3p1 = mock(Param.class); when(r3p1.getStyle()).thenReturn(ParamStyle.TEMPLATE); when(r3p1.getId()).thenReturn(R3P1_ID); Param r3p2 = mock(Param.class); when(r3p2.getStyle()).thenReturn(ParamStyle.TEMPLATE); when(r3p2.getId()).thenReturn(R3P2_ID); List<Param> r3Params = new ArrayList<Param>(2); r3Params.add(r3p1); r3Params.add(r3p2); Resource r3 = mock(Resource.class); when(r3.getParams()).thenReturn(r3Params); resources.push(r2); resources.push(r3); List<Param> templateParams = RestHelper.computeRequestTemplateParams(r1, resources); assertEquals(4, templateParams.size()); assertEquals(R3P1_ID, templateParams.get(0).getId()); assertEquals(R3P2_ID, templateParams.get(1).getId()); assertEquals(R2P1_ID, templateParams.get(2).getId()); assertEquals(R1P1_ID, templateParams.get(3).getId()); } @Test public void testReverse() { List<Integer> numbers = new ArrayList<Integer>(3); numbers.add(1); numbers.add(2); numbers.add(3); List<Integer> reversedNumbers = RestHelper.reverse(numbers); Integer crntNumber = 3; assertEquals(3, reversedNumbers.size()); for (Integer i : reversedNumbers) { assertEquals(crntNumber--, i); } } @Test public void testReverseEmpty() { List<Integer> numbers = new ArrayList<Integer>(0); List<Integer> reversedNumbers = RestHelper.reverse(numbers); assertEquals(0, reversedNumbers.size()); } @Test public void testReverseOne() { List<Integer> numbers = new ArrayList<Integer>(1); numbers.add(9); List<Integer> reversedNumbers = RestHelper.reverse(numbers); assertEquals(1, reversedNumbers.size()); assertEquals((Integer) 9, (Integer) reversedNumbers.get(0)); } }
inbloom/secure-data-service
sli/modeling/rest/src/test/java/org/slc/sli/modeling/rest/helpers/TestRestHelper.java
Java
apache-2.0
5,136
/******************************************************************************* * * Copyright (C) 2015-2022 the BBoxDB project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *******************************************************************************/ package org.bboxdb.storage.tuplestore.manager; class DistributionRegionEntity { /** * The distribution group name */ private final String distributionGroupName; /** * The region id */ private final long regionId; public DistributionRegionEntity(final String distributionGroupName, final long regionId) { this.distributionGroupName = distributionGroupName; this.regionId = regionId; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((distributionGroupName == null) ? 0 : distributionGroupName.hashCode()); result = prime * result + (int) (regionId ^ (regionId >>> 32)); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; DistributionRegionEntity other = (DistributionRegionEntity) obj; if (distributionGroupName == null) { if (other.distributionGroupName != null) return false; } else if (!distributionGroupName.equals(other.distributionGroupName)) return false; if (regionId != other.regionId) return false; return true; } @Override public String toString() { return "DistributionRegionEntity [distributionGroupName=" + distributionGroupName + ", regionId=" + regionId + "]"; } public String getDistributionGroupName() { return distributionGroupName; } public long getRegionId() { return regionId; } }
jnidzwetzki/bboxdb
bboxdb-server/src/main/java/org/bboxdb/storage/tuplestore/manager/DistributionRegionEntity.java
Java
apache-2.0
2,285
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "URITest.h" #include <decaf/net/URI.h> #include <decaf/lang/Integer.h> #include <decaf/lang/Boolean.h> using namespace std; using namespace decaf; using namespace decaf::net; using namespace decaf::lang; using namespace decaf::lang::exceptions; //////////////////////////////////////////////////////////////////////////////// URITest::URITest() { } namespace decaf{ namespace net{ //////////////////////////////////////////////////////////////////////////////// std::vector<URI> getUris() { std::vector<URI> uris; uris.push_back( URI( "http://user%60%20info@host/a%20path?qu%60%20ery#fr%5E%20ag" ) ); // escaped octets for illegal chars uris.push_back( URI( "http", "user%60%20info", "host", 80, "/a%20path", "qu%60%20ery", "fr%5E%20ag" ) ); // escaped octets for illegal uris.push_back( URI( "http", "user%C3%9F%C2%A3info", "host", -1, "/a%E2%82%ACpath", "qu%C2%A9%C2%AEery", "fr%C3%A4%C3%A8g" ) ); // Illegal Chars uris.push_back( URI( "http", "user` info", "host", 81, "/a path", "qu` ery", "fr^ ag" ) ); // % as illegal char, not escaped octet uris.push_back( URI( "http", "user%info", "host", 0, "/a%path", "que%ry", "f%rag" ) ); uris.push_back( URI( "mailto", "user@domain.com", "" ) ); // no host, path, query or fragment uris.push_back( URI( "../adirectory/file.html#" ) ); // relative path with empty fragment; uris.push_back( URI( "news", "comp.infosystems.www.servers.unix", "" ) ); uris.push_back( URI( "", "", "", "fragment" ) ); uris.push_back( URI( "telnet://server.org" ) ); uris.push_back( URI( "http://reg:istry?query" ) ); uris.push_back( URI( "file:///c:/temp/calculate.pl?" ) ); return uris; } }} //////////////////////////////////////////////////////////////////////////////// void URITest::testConstructorOneString() { std::vector<std::string> constructorTests; constructorTests.push_back( "http://user@www.google.com:45/search?q=helpinfo#somefragment" ); // http with authority, query and fragment constructorTests.push_back( "ftp://ftp.is.co.za/rfc/rfc1808.txt" ); // ftp constructorTests.push_back( "gopher://spinaltap.micro.umn.edu/00/Weather/California/Los%20Angeles" ); // gopher constructorTests.push_back( "mailto:mduerst@ifi.unizh.ch" ); // mailto constructorTests.push_back( "news:comp.infosystems.www.servers.unix" ); // news constructorTests.push_back( "telnet://melvyl.ucop.edu/" ); // telnet constructorTests.push_back( "http://123.24.17.98/test" ); // IPv4 authority constructorTests.push_back( "http://www.google.com:80/test" );// domain name authority constructorTests.push_back( "http://joe@[3ffe:2a00:100:7031::1]:80/test" ); // IPv6 authority, with userinfo and port constructorTests.push_back( "/relative" ); // relative starting with / constructorTests.push_back( "//relative" ); // relative starting with // constructorTests.push_back( "relative" ); // relative with no / constructorTests.push_back( "#fragment" );// relative just with fragment constructorTests.push_back( "http://user@host:80" ); // UI, host,port constructorTests.push_back( "http://user@host" ); // ui, host constructorTests.push_back( "http://host" ); // host constructorTests.push_back( "http://host:80" ); // host,port constructorTests.push_back( "http://joe@:80" ); // ui, port (becomes registry-based) constructorTests.push_back( "file:///foo/bar" ); // empty authority, non empty path constructorTests.push_back( "ht?tp://hoe@host:80" ); // miscellaneous tests constructorTests.push_back( "mai/lto:hey?joe#man" ); constructorTests.push_back( "http://host/a%20path#frag" ); // path with an escaped octet for space char constructorTests.push_back( "http://host/a%E2%82%ACpath#frag" ); // path with escaped octet for unicode char, not USASCII // constructorTests.push_back( "http://host/a\u20ACpath#frag" ); // path with unicode char, not USASCII equivalent to constructorTests.push_back( "http://host%20name/" ); // escaped octets in host (becomes registry based) constructorTests.push_back( "http://host\u00DFname/" ); // unicodechar in host (becomes registry based) // equivalent to = "http://host\u00dfname/", constructorTests.push_back( "ht123-+tp://www.google.com:80/test" ); // legal chars in scheme for( unsigned int i = 0; i < constructorTests.size(); i++ ) { CPPUNIT_ASSERT_NO_THROW_MESSAGE( string( "Failed to construct URI for: " ) + constructorTests[i], URI( constructorTests.at(i) ) ); } std::vector<const char*> constructorTestsInvalid; // space char in path, not in escaped constructorTestsInvalid.push_back( "http:///a path#frag" ); // octet form, with no host constructorTestsInvalid.push_back( "http://host/a[path#frag" ); // an illegal char, not in escaped octet form, should throw an exception // invalid escape sequence in path constructorTestsInvalid.push_back( "http://host/a%path#frag" ); // incomplete escape sequence in path constructorTestsInvalid.push_back( "http://host/a%#frag" ); // space char in fragment, not in constructorTestsInvalid.push_back( "http://host#a frag" ); // escaped octet form, no path // illegal char in fragment constructorTestsInvalid.push_back( "http://host/a#fr#ag" ); // invalid escape sequence in fragment, constructorTestsInvalid.push_back( "http:///path#fr%ag" ); // with no host // incomplete escape sequence in fragment constructorTestsInvalid.push_back( "http://host/path#frag%" ); // space char in query, not in escaped octet form constructorTestsInvalid.push_back( "http://host/path?a query#frag" ); // invalid escape sequence in query, no path constructorTestsInvalid.push_back( "http://host?query%ag" ); // incomplete escape sequence in query, with no host constructorTestsInvalid.push_back( "http:///path?query%" ); // invalid char in scheme specific part constructorTestsInvalid.push_back( "mailto:user^name@fklkf.com" ); for( unsigned int i = 0; i < constructorTestsInvalid.size(); i++ ) { CPPUNIT_ASSERT_THROW_MESSAGE( string( "URI not caught as invalid: " ) + constructorTestsInvalid[i], URI( constructorTestsInvalid.at(i) ), URISyntaxException ); } std::vector<const char*> constructorTestsInvalid2; // authority validation constructorTestsInvalid2.push_back( "http://user@[3ffe:2x00:100:7031::1]:80/test" );// malformed // IPv6 authority constructorTestsInvalid2.push_back( "http://[ipv6address]/apath#frag" ); // malformed ipv6 address constructorTestsInvalid2.push_back( "http://[ipv6address/apath#frag" ); // malformed ipv6 address constructorTestsInvalid2.push_back( "http://ipv6address]/apath#frag" ); // illegal char in host name constructorTestsInvalid2.push_back( "http://ipv6[address/apath#frag" ); constructorTestsInvalid2.push_back( "http://ipv6addr]ess/apath#frag" ); constructorTestsInvalid2.push_back( "http://ipv6address[]/apath#frag" ); // illegal char in username... constructorTestsInvalid2.push_back( "http://us[]er@host/path?query#frag" ); constructorTestsInvalid2.push_back( "http://host name/path" ); // illegal // char in authority constructorTestsInvalid2.push_back( "http://host^name#fragment" ); // illegal char in authority constructorTestsInvalid2.push_back( "telnet://us er@hostname/" ); // illegal char in authority // missing components constructorTestsInvalid2.push_back( "//" ); // Authority expected constructorTestsInvalid2.push_back( "ascheme://" ); // Authority expected constructorTestsInvalid2.push_back( "ascheme:" ); // Scheme-specific part expected // scheme validation constructorTestsInvalid2.push_back( "a scheme://reg/" ); // illegal char constructorTestsInvalid2.push_back( "1scheme://reg/" ); // non alpha char as 1st char constructorTestsInvalid2.push_back( "asche\u00dfme:ssp" ); // unicode char , not USASCII constructorTestsInvalid2.push_back( "asc%20heme:ssp" );// escape octets for( unsigned int i = 0; i < constructorTestsInvalid2.size(); i++ ) { CPPUNIT_ASSERT_THROW_MESSAGE( string( "URI not caught as invalid: " ) + constructorTestsInvalid2[i], URI( constructorTestsInvalid2.at(i) ), URISyntaxException ); } try { URI("%3"); CPPUNIT_FAIL( "Assert 0: URI constructor failed to throw exception on invalid input." ); } catch( URISyntaxException& e ) { CPPUNIT_ASSERT_MESSAGE( "Assert 1: Wrong index in URISyntaxException.", 0 == e.getIndex() ); } // Regression test for HARMONY-25 // if port value is negative, the authority should be considered // registry-based. URI uri("http://host:-8096/path/index.html"); CPPUNIT_ASSERT_MESSAGE( "Assert 2: returned wrong port value,", -1 == uri.getPort() ); CPPUNIT_ASSERT_MESSAGE( "Assert 3: returned wrong host value,", uri.getHost() == "" ); CPPUNIT_ASSERT_THROW_MESSAGE( "Assert 4: Expected URISyntaxException: ", uri.parseServerAuthority(), URISyntaxException ); URI uri2( "http", "//myhost:-8096", "" ); CPPUNIT_ASSERT_MESSAGE( "Assert 5: returned wrong port value,", -1 == uri2.getPort() ); CPPUNIT_ASSERT_MESSAGE( "Assert 6: returned wrong host value,", uri2.getHost() == "" ); CPPUNIT_ASSERT_THROW_MESSAGE( "Assert 7: Expected URISyntaxException: ", uri.parseServerAuthority(), URISyntaxException ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testURIString() { CPPUNIT_ASSERT_NO_THROW_MESSAGE( "Should Not Throw an Exception URI( \"\\\" )", URI uri( "/" ) ); try { URI myUri( ":abc@mymail.com" ); CPPUNIT_FAIL("TestA, URISyntaxException expected, but not received."); } catch( URISyntaxException& e ) { CPPUNIT_ASSERT_MESSAGE( "TestA, Wrong URISyntaxException index, ", 0 == e.getIndex()); } try { URI uri( "path[one" ); CPPUNIT_FAIL("TestB, URISyntaxException expected, but not received."); } catch( URISyntaxException& e1 ) { CPPUNIT_ASSERT_MESSAGE( "TestB, Wrong URISyntaxException index, ", 4 == e1.getIndex()); } try { URI uri( " " ); CPPUNIT_FAIL("TestC, URISyntaxException expected, but not received."); } catch( URISyntaxException& e2 ) { CPPUNIT_ASSERT_MESSAGE( "TestC, Wrong URISyntaxException index, ", 0 == e2.getIndex()); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testConstructorThreeString() { URI uri( "mailto", "mduerst@ifi.unizh.ch", "" ); CPPUNIT_ASSERT_MESSAGE( "wrong userinfo", uri.getUserInfo() == "" ); CPPUNIT_ASSERT_MESSAGE( "wrong hostname", uri.getHost() == "" ); CPPUNIT_ASSERT_MESSAGE( "wrong authority", uri.getAuthority() == "" ); CPPUNIT_ASSERT_MESSAGE( "wrong port number", -1 == uri.getPort() ); CPPUNIT_ASSERT_MESSAGE( "wrong path", uri.getPath() == "" ); CPPUNIT_ASSERT_MESSAGE( "wrong query", uri.getQuery() == "" ); CPPUNIT_ASSERT_MESSAGE( "wrong fragment", uri.getFragment() == "" ); CPPUNIT_ASSERT_MESSAGE( "wrong SchemeSpecificPart", uri.getSchemeSpecificPart() == "mduerst@ifi.unizh.ch" ); // scheme specific part can not be null CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "mailto", "", "" ), URISyntaxException ); // scheme needs to start with an alpha char CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "3scheme", "//authority/path", "fragment" ), URISyntaxException ); // scheme can not be a colon CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( ":", "//authority/path", "fragment" ), URISyntaxException ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testConstructorStringPlusInts() { // check for URISyntaxException for invalid Server Authority CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "http", "user", "host\u00DFname", -1, "/file", "query", "fragment" ), URISyntaxException ); // escaped octets in host name CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "http", "user", "host%20name", -1, "/file", "query", "fragment" ), URISyntaxException ); // illegal char in host name CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "http", "user", "host name", -1, "/file", "query", "fragment" ), URISyntaxException ); // illegal char in host name CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "http", "user", "host]name", -1, "/file", "query", "fragment" ), URISyntaxException ); // missing host name CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "http", "user", "", 80, "/file", "query", "fragment" ), URISyntaxException ); // missing host name CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "http", "user", "", -1, "/file", "query", "fragment" ), URISyntaxException ); // malformed ipv4 address CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "telnet", "", "256.197.221.200", -1, "", "", "" ), URISyntaxException ); // malformed ipv4 address CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "ftp", "", "198.256.221.200", -1, "", "", "" ), URISyntaxException ); // check for URISyntaxException for invalid scheme // escaped octets in scheme CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "ht%20tp", "user", "hostname", -1, "/file", "query", "fragment" ), URISyntaxException ); // illegal char in scheme CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "ht tp", "user", "hostname", -1, "/file", "query", "fragment" ), URISyntaxException ); // illegal char in scheme CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "ht]tp", "user", "hostname", -1, "/file", "query", "fragment" ), URISyntaxException ); // relative path with scheme CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "http", "user", "hostname", -1, "relative", "query", "fragment" ), URISyntaxException ); // functional test try { URI uri( "http", "us:e@r", "hostname", 85, "/file/dir#/qu?e/", "qu?er#y", "frag#me?nt" ); CPPUNIT_ASSERT_MESSAGE( "wrong userinfo", uri.getUserInfo() == "us:e@r" ); CPPUNIT_ASSERT_MESSAGE( "wrong hostname", uri.getHost() == "hostname" ); CPPUNIT_ASSERT_MESSAGE( "wrong port number", 85 == uri.getPort() ); CPPUNIT_ASSERT_MESSAGE( "wrong path", uri.getPath() =="/file/dir#/qu?e/" ); CPPUNIT_ASSERT_MESSAGE( "wrong query", uri.getQuery() == "qu?er#y" ); CPPUNIT_ASSERT_MESSAGE( "wrong fragment", uri.getFragment() == "frag#me?nt" ); CPPUNIT_ASSERT_MESSAGE( "wrong SchemeSpecificPart", uri.getSchemeSpecificPart() == "//us:e@r@hostname:85/file/dir#/qu?e/?qu?er#y" ); } catch( URISyntaxException& e ) { CPPUNIT_FAIL( "Unexpected Exception: " + e.getMessage() ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testConstructorFourString() { // relative path CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "http", "www.joe.com", "relative", "jimmy" ), URISyntaxException ); // valid parameters for this constructor { URI uri( "http", "www.joe.com", "/path", "jimmy" ); } // illegal char in path { URI uri( "http", "www.host.com", "/path?q", "somefragment" ); } // empty fragment { URI uri( "ftp", "ftp.is.co.za", "/rfc/rfc1808.txt", "" ); } // path with escaped octet for unicode char, not USASCII { URI uri( "http", "host", "/a%E2%82%ACpath", "frag" ); } // All fields empty { URI uri( "", "", "", "" ); } // CPPUNIT_ASSERT_THROW_MESSAGE( // "Expected URISyntaxException: ", // URI( "http", ":2:3:4:5:6:7:8", "/apath", "\u20ACfrag" ), // URISyntaxException ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testConstructorFiveString() { // URISyntaxException on relative path CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException: ", URI( "http", "www.joe.com", "relative", "query", "jimmy" ), URISyntaxException ); // test if empty authority is parsed into undefined host, userinfo and port // and if escaped octets in components are preserved, illegal chars are quoted URI uri("ht12-3+tp", "", "/p#a%E2%82%ACth", "q^u%25ery", "fragment"); CPPUNIT_ASSERT_MESSAGE( "wrong scheme", uri.getScheme() == "ht12-3+tp" ); CPPUNIT_ASSERT_MESSAGE( "wrong port number", -1 == uri.getPort() ); CPPUNIT_ASSERT_MESSAGE( "wrong path", uri.getPath() == "/p#a%E2%82%ACth" ); CPPUNIT_ASSERT_MESSAGE( "wrong query", uri.getQuery() == "q^u%25ery" ); CPPUNIT_ASSERT_MESSAGE( "wrong fragment", uri.getFragment() == "fragment" ); CPPUNIT_ASSERT_MESSAGE( string( "wrong SchemeSpecificPart: " ) + uri.getSchemeSpecificPart(), uri.getSchemeSpecificPart() == "///p#a%E2%82%ACth?q^u%25ery" ); CPPUNIT_ASSERT_MESSAGE( string( "wrong RawSchemeSpecificPart" ) + uri.getRawSchemeSpecificPart(), uri.getRawSchemeSpecificPart() == "///p%23a%25E2%2582%25ACth?q%5Eu%2525ery" ); CPPUNIT_ASSERT_MESSAGE( string( "incorrect toString()" ) + uri.toString(), uri.toString() == "ht12-3+tp:/p%23a%25E2%2582%25ACth?q%5Eu%2525ery#fragment" ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testConstructorFiveString2() { // accept [] as part of valid ipv6 host name URI uri( "ftp", "[0001:1234::0001]", "/dir1/dir2", "query", "frag" ); CPPUNIT_ASSERT_MESSAGE( "Returned incorrect host", uri.getHost() == "[0001:1234::0001]" ); // do not accept [] as part of invalid ipv6 address CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException for invalid ipv6 address", URI( "ftp", "[www.abc.com]", "/dir1/dir2", "query", "frag" ), URISyntaxException ); // do not accept [] as part of user info CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException for invalid user info", URI( "ftp", "[user]@host", "/dir1/dir2", "query", "frag" ), URISyntaxException ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testCompareToOne() { // compareTo tests const char* compareToData[34][2] = { // scheme tests { "http:test", "" }, // scheme null, scheme not null { "", "http:test" }, // reverse { "http:test", "ftp:test" }, // schemes different { "/test", "/test" }, // schemes null { "http://joe", "http://joe" }, // schemes same { "http://joe", "hTTp://joe" }, // schemes same ignoring case // opacity : one opaque, the other not { "http:opaque", "http://nonopaque" }, { "http://nonopaque", "http:opaque" }, { "mailto:abc", "mailto:abc" }, // same ssp { "mailto:abC", "mailto:Abc" }, // different, by case { "mailto:abc", "mailto:def" }, // different by letter { "mailto:abc#ABC", "mailto:abc#DEF" }, { "mailto:abc#ABC", "mailto:abc#ABC" }, { "mailto:abc#DEF", "mailto:abc#ABC" }, // hierarchical tests.. // different authorities { "//www.test.com/test", "//www.test2.com/test" }, { "/nullauth", "//nonnullauth/test" }, // one null authority { "//nonnull", "/null" }, { "/hello", "/hello" }, // both authorities null // different userinfo { "http://joe@test.com:80", "http://test.com" }, { "http://jim@test.com", "http://james@test.com" }, // different hostnames { "http://test.com", "http://toast.com" }, { "http://test.com:80", "test.com:87" }, // different ports { "http://test.com", "http://test.com:80" }, // different paths { "http://test.com:91/dir1", "http://test.com:91/dir2" }, // one null host { "http:/hostless", "http://hostfilled.com/hostless" }, // queries { "http://test.com/dir?query", "http://test.com/dir?koory" }, { "/test?query", "/test" }, { "/test", "/test?query" }, { "/test", "/test" }, // fragments { "ftp://test.com/path?query#frag", "ftp://test.com/path?query" }, { "ftp://test.com/path?query", "ftp://test.com/path?query#frag" }, { "#frag", "#frag" }, { "p", "" }, { "http://www.google.com", "#test" } // miscellaneous }; int compareToResults[34] = { 1, -1, 1, 0, 0, 0, 1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 1, 0, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1, 0, 1, -1, 0, 1, 1 }; // test compareTo functionality for( unsigned int i = 0; i < 34; i++ ) { URI b( compareToData[i][0] ); URI r( compareToData[i][1] ); if( b.compareTo( r ) != compareToResults[i] ) { CPPUNIT_FAIL( std::string("Test ") + decaf::lang::Integer::toString(i) + std::string(": ") + compareToData[i][0] + " compared to " + compareToData[i][1] + " -> " + decaf::lang::Integer::toString( b.compareTo(r) ) + " rather than " + decaf::lang::Integer::toString( compareToResults[i] ) ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testCompareToTwo() { { // test URIs with host names with different casing URI uri1( "http://AbC.cOm/root/news" ); URI uri2( "http://aBc.CoM/root/news" ); CPPUNIT_ASSERT_MESSAGE( "TestA", 0 == uri1.compareTo( uri2 ) ); CPPUNIT_ASSERT_MESSAGE( "TestB", 0 == uri2.compareTo( uri1 ) ); } { // test URIs with one undefined component URI uri1( "http://abc.com:80/root/news" ); URI uri2( "http://abc.com/root/news" ); CPPUNIT_ASSERT_MESSAGE( "TestC", uri1.compareTo( uri2 ) > 0 ); CPPUNIT_ASSERT_MESSAGE( "TestD", uri2.compareTo( uri1 ) < 0 ); } { // test URIs with one undefined component URI uri1( "http://user@abc.com/root/news" ); URI uri2( "http://abc.com/root/news" ); CPPUNIT_ASSERT_MESSAGE( "TestE", uri1.compareTo( uri2 ) > 0 ); CPPUNIT_ASSERT_MESSAGE( "TestF", uri2.compareTo( uri1 ) < 0 ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testCreate() { CPPUNIT_ASSERT_THROW_MESSAGE( "IllegalArgumentException expected but not received.", URI::create( "a scheme://reg/" ), IllegalArgumentException ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testEquals() { const char* equalsData[30][2] = { { "", "" }, // null frags { "/path", "/path#frag" }, { "#frag", "#frag2" }, { "#frag", "#FRag" }, // case insensitive on hex escapes { "#fr%4F", "#fr%4f" }, { "scheme:test", "scheme2:test" }, // scheme stuff { "test", "http:test" }, { "http:test", "test" }, { "SCheme:test", "schEMe:test" }, // hierarchical/opaque mismatch { "mailto:jim", "mailto://jim" }, { "mailto://test", "mailto:test" }, // opaque { "mailto:name", "mailto:name" }, { "mailtO:john", "mailto:jim" }, // test hex case insensitivity on ssp { "mailto:te%4Fst", "mailto:te%4fst" }, { "mailto:john#frag", "mailto:john#frag2" }, // hierarchical { "/test", "/test" }, // paths { "/te%F4st", "/te%f4st" }, { "/TEst", "/teSt" }, { "", "/test" }, // registry based because they don't resolve properly to // server-based add more tests here { "//host.com:80err", "//host.com:80e" }, { "//host.com:81e%Abrr", "//host.com:81e%abrr" }, { "/test", "//auth.com/test" }, { "//test.com", "/test" }, { "//test.com", "//test.com" }, // hosts // case insensitivity for hosts { "//HoSt.coM/", "//hOsT.cOm/" }, { "//te%ae.com", "//te%aE.com" }, { "//test.com:80", "//test.com:81" }, { "//joe@test.com:80", "//test.com:80" }, { "//jo%3E@test.com:82", "//jo%3E@test.com:82" }, { "//test@test.com:85", "//test@test.com" }, }; bool equalsResults[30] = { true, false, false, false, true, false, false, false, true, false, false, true, false, true, false, true, true, false, false, false, true, false, false, true, true, true, false, false, true, false, }; // test equals functionality for( int i = 0; i < 30; i++ ) { URI b( equalsData[i][0] ); URI r( equalsData[i][1] ); if( b.equals( r ) != equalsResults[i] ) { CPPUNIT_FAIL( std::string( "Error: " ) + equalsData[i][0] + std::string( " == " ) + equalsData[i][1] + "? -> " + Boolean::toString( b.equals( r ) ) + " expected " + Boolean::toString( equalsResults[i] ) ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testEquals2() { { // test URIs with empty string authority URI uri1( "http:///~/dictionary" ); URI uri2( uri1.getScheme(), uri1.getAuthority(), uri1.getPath(), uri1.getQuery(), uri1.getFragment() ); CPPUNIT_ASSERT( uri2.equals( uri1 ) ); } { // test URIs with port number URI uri1( "http://abc.com%E2%82%AC:88/root/news" ); URI uri2( "http://abc.com%E2%82%AC/root/news" ); CPPUNIT_ASSERT( !uri1.equals( uri2 ) ); CPPUNIT_ASSERT( !uri2.equals( uri1 ) ); } { // test URIs with host names with different casing URI uri1( "http://AbC.cOm/root/news" ); URI uri2( "http://aBc.CoM/root/news" ); CPPUNIT_ASSERT( uri1.equals( uri2 ) ); CPPUNIT_ASSERT( uri2.equals( uri1 ) ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetAuthority() { std::vector<URI> uris = getUris(); const char* getAuthorityResults[] = { "user` info@host", "user%60%20info@host:80", "user%C3%9F%C2%A3info@host", "user` info@host:81", "user%info@host:0", "", "", "", "", "server.org", "reg:istry", "" }; for( unsigned int i = 0; i < uris.size(); i++) { std::string result = uris[i].getAuthority(); if( getAuthorityResults[i] != result ) { CPPUNIT_FAIL( std::string( "Error: For URI \"" ) + uris[i].toString() + std::string( "\"," ) + "\n" + string( "getAuthority() returned: " ) + result + "\n" + string( "expected: ") + getAuthorityResults[i] ); } } CPPUNIT_ASSERT( URI( "", "", "", 127, "", "", "" ).getAuthority() == "" ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetAuthority2() { { // tests for URIs with empty string authority component URI uri( "file:///tmp/" ); CPPUNIT_ASSERT_MESSAGE( string( "Authority not empty for URI: " ) + uri.toString(), uri.getAuthority() == "" ); CPPUNIT_ASSERT_MESSAGE( string( "Host not empty for URI " ) + uri.toString(), uri.getHost() == "" ); CPPUNIT_ASSERT_MESSAGE( "testA, toString() returned incorrect value", string( "file:///tmp/" ) == uri.toString() ); } { URI uri( "file", "", "/tmp", "frag" ); CPPUNIT_ASSERT_MESSAGE( string( "Authority not null for URI: " ) + uri.toString(), uri.getAuthority() == "" ); CPPUNIT_ASSERT_MESSAGE( string( "Host not null for URI " ) + uri.toString(), uri.getHost() == "" ); CPPUNIT_ASSERT_MESSAGE( string( "testB, toString() returned incorrect value:" ) + uri.toString(), string( "file:/tmp#frag" ) == uri.toString() ); } { URI uri( "file", "", "/tmp", "query", "frag" ); CPPUNIT_ASSERT_MESSAGE( string( "Authority not null for URI: " ) + uri.toString(), uri.getAuthority() == "" ); CPPUNIT_ASSERT_MESSAGE( "Host not null for URI " + uri.toString(), uri.getHost() == "" ); CPPUNIT_ASSERT_MESSAGE( "test C, toString() returned incorrect value", string( "file:/tmp?query#frag" ) == uri.toString() ); } // after normalization the host string info may be lost since the // uri string is reconstructed URI uri( "file", "www.google.com", "/tmp/a/../b/c", "query", "frag" ); URI uri2 = uri.normalize(); CPPUNIT_ASSERT_MESSAGE( string( "Authority not null for URI: " ) + uri2.toString(), uri.getAuthority() == "www.google.com" ); CPPUNIT_ASSERT_MESSAGE( string( "Host not null for URI " ) + uri2.toString(), uri.getHost() == "www.google.com" ); CPPUNIT_ASSERT_MESSAGE( "test D, toString() returned incorrect value: " + uri.toString(), string( "file://www.google.com/tmp/a/../b/c?query#frag" ) == uri.toString() ); CPPUNIT_ASSERT_MESSAGE( "test E, toString() returned incorrect value: " + uri2.toString(), string( "file://www.google.com/tmp/b/c?query#frag" ) == uri2.toString() ); // the empty string host will give URISyntaxException // for the 7 arg constructor CPPUNIT_ASSERT_THROW_MESSAGE( "URISyntaxException expected but not received.", URI("file", "user", "", 80, "/path", "query", "frag"), URISyntaxException ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetFragment() { std::vector<URI> uris = getUris(); const char* getFragmentResults[] = { "fr^ ag", "fr%5E%20ag", "fr%C3%A4%C3%A8g", "fr^ ag", "f%rag", "", "", "", "fragment", "", "", "" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getFragment(); if( getFragmentResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", getFragment() returned: " ) + result + string( ", expected: " ) + getFragmentResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetHost() { std::vector<URI> uris = getUris(); const char* getHostResults[] = { "host", "host", "host", "host", "host", "", "", "", "", "server.org", "", "" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getHost(); if( getHostResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", getHost() returned: " ) + result + string( ", expected: " ) + getHostResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetPath() { std::vector<URI> uris = getUris(); const char* getPathResults[] = { "/a path", "/a%20path", "/a%E2%82%ACpath", "/a path", "/a%path", "", "../adirectory/file.html", "", "", "", "", "/c:/temp/calculate.pl" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getPath(); if( getPathResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", getPath() returned: " ) + result + string( ", expected: " ) + getPathResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetPort() { std::vector<URI> uris = getUris(); int getPortResults[] = { -1, 80, -1, 81, 0, -1, -1, -1, -1, -1, -1, -1 }; for( unsigned int i = 0; i < uris.size(); i++ ) { int result = uris[i].getPort(); CPPUNIT_ASSERT_MESSAGE( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", getPort() returned: " ) + Integer::toString( result ) + string( ", expected: " ) + Integer::toString( getPortResults[i] ), result == getPortResults[i] ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetPort2() { // if port value is negative, the authority should be considered registry based. URI uri( "http://myhost:-8096/site/index.html" ); CPPUNIT_ASSERT_MESSAGE( "TestA, returned wrong port value,", -1 == uri.getPort() ); CPPUNIT_ASSERT_MESSAGE( "TestA, returned wrong host value,", uri.getHost() == "" ); CPPUNIT_ASSERT_THROW_MESSAGE( "TestA - URISyntaxException expected but not received.", uri.parseServerAuthority(), URISyntaxException ); URI uri2( "http", "//myhost:-8096", "" ); CPPUNIT_ASSERT_MESSAGE( "TestB returned wrong port value,", -1 == uri2.getPort() ); CPPUNIT_ASSERT_MESSAGE( "TestB returned wrong host value,", uri2.getHost() == "" ); CPPUNIT_ASSERT_THROW_MESSAGE( "TestB - URISyntaxException expected but not received.", uri2.parseServerAuthority(), URISyntaxException ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetQuery() { std::vector<URI> uris = getUris(); const char* getQueryResults[] = { "qu` ery", "qu%60%20ery", "qu%C2%A9%C2%AEery", "qu` ery", "que%ry", "", "", "", "", "", "query", "" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getQuery(); if( getQueryResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", getQuery() returned: " ) + result + string( ", expected: " ) + getQueryResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetRawAuthority() { std::vector<URI> uris = getUris(); const char* getRawAuthorityResults[] = { "user%60%20info@host", "user%2560%2520info@host:80", "user%25C3%259F%25C2%25A3info@host", "user%60%20info@host:81", "user%25info@host:0", "", "", "", "", "server.org", "reg:istry", "" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getRawAuthority(); if( getRawAuthorityResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", getRawAuthority() returned: " ) + result + string( ", expected: " ) + getRawAuthorityResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetRawFragment() { std::vector<URI> uris = getUris(); const char* getRawFragmentResults[] = { "fr%5E%20ag", "fr%255E%2520ag", "fr%25C3%25A4%25C3%25A8g", "fr%5E%20ag", "f%25rag", "", "", "", "fragment", "", "", "" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getRawFragment(); if( getRawFragmentResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", getRawFragment() returned: " ) + result + string( ", expected: " ) + getRawFragmentResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetRawPath() { std::vector<URI> uris = getUris(); const char* getRawPathResults[] = { "/a%20path", "/a%2520path", "/a%25E2%2582%25ACpath", "/a%20path", "/a%25path", "", "../adirectory/file.html", "", "", "", "", "/c:/temp/calculate.pl" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getRawPath(); if( getRawPathResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", getRawPath() returned: " ) + result + string( ", expected: " ) + getRawPathResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetRawQuery() { std::vector<URI> uris = getUris(); const char* getRawQueryResults[] = { "qu%60%20ery", "qu%2560%2520ery", "qu%25C2%25A9%25C2%25AEery", "qu%60%20ery", "que%25ry", "", "", "", "", "", "query", "" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getRawQuery(); if( getRawQueryResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", getRawQuery() returned: " ) + result + string( ", expected: " ) + getRawQueryResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetRawSchemeSpecificPart() { std::vector<URI> uris = getUris(); const char* getRawSspResults[] = { "//user%60%20info@host/a%20path?qu%60%20ery", "//user%2560%2520info@host:80/a%2520path?qu%2560%2520ery", "//user%25C3%259F%25C2%25A3info@host/a%25E2%2582%25ACpath?qu%25C2%25A9%25C2%25AEery", "//user%60%20info@host:81/a%20path?qu%60%20ery", "//user%25info@host:0/a%25path?que%25ry", "user@domain.com", "../adirectory/file.html", "comp.infosystems.www.servers.unix", "", "//server.org", "//reg:istry?query", "///c:/temp/calculate.pl?" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getRawSchemeSpecificPart(); if( getRawSspResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI[" ) + Integer::toString( i ) + string( "] \"" ) + uris[i].toString() + string( "\", getRawSchemeSpecificPart() returned: " ) + result + string( ", expected: " ) + getRawSspResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetRawUserInfo() { std::vector<URI> uris = getUris(); const char* getRawUserInfoResults[] = { "user%60%20info", "user%2560%2520info", "user%25C3%259F%25C2%25A3info", "user%60%20info", "user%25info", "", "", "", "", "", "", "" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getRawUserInfo(); if( getRawUserInfoResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI[" ) + Integer::toString( i ) + string( "] \"" ) + uris[i].toString() + string( "\", getRawUserInfo() returned: " ) + result + string( ", expected: " ) + getRawUserInfoResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetScheme() { std::vector<URI> uris = getUris(); const char* getSchemeResults[] = { "http", "http", "http", "http", "http", "mailto", "", "news", "", "telnet", "http", "file" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getScheme(); if( getSchemeResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI[" ) + Integer::toString( i ) + string( "] \"" ) + uris[i].toString() + string( "\", getScheme() returned: " ) + result + string( ", expected: " ) + getSchemeResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetSchemeSpecificPart() { std::vector<URI> uris = getUris(); const char* getSspResults[] = { "//user` info@host/a path?qu` ery", "//user%60%20info@host:80/a%20path?qu%60%20ery", "//user%C3%9F%C2%A3info@host/a%E2%82%ACpath?qu%C2%A9%C2%AEery", "//user` info@host:81/a path?qu` ery", "//user%info@host:0/a%path?que%ry", "user@domain.com", "../adirectory/file.html", "comp.infosystems.www.servers.unix", "", "//server.org", "//reg:istry?query", "///c:/temp/calculate.pl?" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getSchemeSpecificPart(); if( getSspResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI[" ) + Integer::toString( i ) + string( "] \"" ) + uris[i].toString() + string( "\", getSchemeSpecificPart() returned: " ) + result + string( ", expected: " ) + getSspResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testGetUserInfo() { std::vector<URI> uris = getUris(); const char* getUserInfoResults[] = { "user` info", "user%60%20info", "user%C3%9F%C2%A3info", "user` info", "user%info", "", "", "", "", "", "", "" }; for( unsigned int i = 0; i < uris.size(); i++ ) { string result = uris[i].getUserInfo(); if( getUserInfoResults[i] != result ) { CPPUNIT_FAIL( string( "Error: For URI[" ) + Integer::toString( i ) + string( "] \"" ) + uris[i].toString() + string( "\", getUserInfo() returned: " ) + result + string( ", expected: " ) + getUserInfoResults[i] ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testIsAbsolute() { const char* isAbsoluteData[] = { "mailto:user@ca.ibm.com", "urn:isbn:123498989h", "news:software.ibm.com", "http://www.amazon.ca", "file:///d:/temp/results.txt", "scheme:ssp", "calculate.pl?isbn=123498989h", "?isbn=123498989h", "//www.amazon.ca", "a.html", "#top", "//pc1/", "//user@host/path/file" }; bool results[] = { true, true, true, true, true, true, false, false, false, false, false, false, false }; for( unsigned int i = 0; i < 13; i++) { bool result = URI( isAbsoluteData[i] ).isAbsolute(); CPPUNIT_ASSERT_MESSAGE( string( "URI(" ) + isAbsoluteData[i] + string( ").isAbsolute() = " ) + Boolean::toString( result ), results[i] == result ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testIsOpaque() { const char* isOpaqueData[] = { "mailto:user@ca.ibm.com", "urn:isbn:123498989h", "news:software.ibm.com", "http://www.amazon.ca", "file:///d:/temp/results.txt", "scheme:ssp", "calculate.pl?isbn=123498989h", "?isbn=123498989h", "//www.amazon.ca", "a.html", "#top", "//pc1/", "//user@host/path/file" }; bool results[] = { true, true, true, false, false, true, false, false, false, false, false, false, false }; for( unsigned int i = 0; i < 13; i++) { bool result = URI( isOpaqueData[i] ).isOpaque(); CPPUNIT_ASSERT_MESSAGE( string( "URI(" ) + isOpaqueData[i] + string( ").isOpaque() = " ) + Boolean::toString( result ), results[i] == result ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testNormalize() { const char* normalizeData[] = { // normal "/", "/a", "/a/b", "/a/b/c", // single, '.' "/.", "/./", "/./.", "/././", "/./a", "/./a/", "/././a", "/././a/", "/a/.", "/a/./", "/a/./.", "/a/./b", // double, '..' "/a/..", "/a/../", "/a/../b", "/a/../b/..", "/a/../b/../", "/a/../b/../c", "/..", "/../", "/../..", "/../../", "/../a", "/../a/", "/../../a", "/../../a/", "/a/b/../../c", "/a/b/../..", "/a/b/../../", "/a/b/../../c", "/a/b/c/../../../d", "/a/b/..", "/a/b/../", "/a/b/../c", // miscellaneous "/a/b/.././../../c/./d/../e", "/a/../../.c././../././c/d/../g/..", // '.' in the middle of segments "/a./b", "/.a/b", "/a.b/c", "/a/b../c", "/a/..b/c", "/a/b..c/d", // no leading slash, miscellaneous "", "a", "a/b", "a/b/c", "../", ".", "..", "../g", "g/a/../../b/c/./g", "a/b/.././../../c/./d/../e", "a/../../.c././../././c/d/../g/..", }; const char* normalizeResults[] = { "/", "/a", "/a/b", "/a/b/c", "/", "/", "/", "/", "/a", "/a/", "/a", "/a/", "/a/", "/a/", "/a/", "/a/b", "/", "/", "/b", "/", "/", "/c", "/..", "/../", "/../..", "/../../", "/../a", "/../a/", "/../../a", "/../../a/", "/c", "/", "/", "/c", "/d", "/a/", "/a/", "/a/c", "/../c/e", "/../c/", "/a./b", "/.a/b", "/a.b/c", "/a/b../c", "/a/..b/c", "/a/b..c/d", "", "a", "a/b", "a/b/c", "../", "", "..", "../g", "b/c/g", "../c/e", "../c/", }; for( unsigned int i = 0; i < 57; i++) { URI test( normalizeData[i] ); std::string result = test.normalize().toString(); CPPUNIT_ASSERT_MESSAGE( string( "URI(" ) + normalizeData[i] + string( ") normalized incorrectly to := " ) + result, result == normalizeResults[i] ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testNormalize2() { URI uri1( "file:/D:/one/two/../../three" ); URI uri2 = uri1.normalize(); CPPUNIT_ASSERT_MESSAGE( "Normalized to incorrect URI: " + uri2.toString(), string( "file:/D:/three" ) == uri2.toString() ); CPPUNIT_ASSERT_MESSAGE( "Resolved URI is not absolute", uri2.isAbsolute() ); CPPUNIT_ASSERT_MESSAGE( "Resolved URI is opaque", !uri2.isOpaque() ); CPPUNIT_ASSERT_MESSAGE( "Resolved URI has incorrect scheme specific part", string( "/D:/three" ) == uri2.getRawSchemeSpecificPart() ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testNormalize3() { // return same URI if it has a normalized path already URI uri1( "http://host/D:/one/two/three" ); URI uri2 = uri1.normalize(); CPPUNIT_ASSERT_MESSAGE( "Failed to return same URI after normalization", uri1.toString() == uri2.toString() ); // try with empty path URI uri3( "http", "host", "", "fragment" ); URI uri4 = uri3.normalize(); CPPUNIT_ASSERT_MESSAGE( "Failed to return same URI after normalization", uri3.toString() == uri4.toString() ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testParseServerAuthority() { // registry based uris std::vector<URI> uris; // port number not digits uris.push_back( URI( "http://foo:bar/file#fragment" ) ); uris.push_back( URI( "http", "//foo:bar/file", "fragment" ) ); // escaped octets in host name uris.push_back( URI( "http://host%20name/" ) ); uris.push_back( URI( "http", "//host%20name/", "" ) ); // missing host name, port number uris.push_back( URI( "http://joe@:80" ) ); // missing host name, no port number uris.push_back( URI( "http://user@/file?query#fragment" ) ); uris.push_back( URI( "//host.com:80err") ); // malformed port number uris.push_back( URI( "//host.com:81e%Abrr") ); // malformed ipv4 address uris.push_back( URI( "telnet", "//256.197.221.200", "" ) ); uris.push_back( URI( "telnet://198.256.221.200") ); uris.push_back( URI( "//te%ae.com") ); // misc .. uris.push_back( URI( "//:port") ); uris.push_back( URI( "//:80" ) ); // last label has to start with alpha char uris.push_back( URI( "//fgj234fkgj.jhj.123.") ); uris.push_back( URI( "//fgj234fkgj.jhj.123") ); // '-' cannot be first or last character in a label uris.push_back( URI( "//-domain.name" ) ); uris.push_back( URI( "//domain.name-" ) ); uris.push_back( URI( "//domain-") ); // illegal char in host name uris.push_back( URI( "//doma*in") ); // host expected uris.push_back( URI( "http://:80/") ); uris.push_back( URI( "http://user@/" ) ); // ipv6 address not enclosed in "[]" uris.push_back( URI( "http://3ffe:2a00:100:7031:22:1:80:89/" ) ); // expected ipv6 addresses to be enclosed in "[]" uris.push_back( URI( "http", "34::56:78", "/path", "query", "fragment" ) ); // expected host uris.push_back( URI( "http", "user@", "/path", "query", "fragment" ) ); // these URIs do not have valid server based authorities, // but single arg, 3 and 5 arg constructors // parse them as valid registry based authorities // exception should occur when parseServerAuthority is // requested on these uris for( unsigned int i = 0; i < uris.size(); i++ ) { try { URI uri = uris[i].parseServerAuthority(); CPPUNIT_FAIL( string( "URISyntaxException expected but not received for URI: " ) + uris[i].toString()); } catch( URISyntaxException& e ) { // Expected } } // valid Server based authorities URI( "http", "3ffe:2a00:100:7031:2e:1:80:80", "/path", "fragment" ).parseServerAuthority(); URI( "http", "host:80", "/path", "query", "fragment" ).parseServerAuthority(); URI( "http://[::3abc:4abc]:80/" ).parseServerAuthority(); URI( "http", "34::56:78", "/path", "fragment" ).parseServerAuthority(); URI( "http", "[34:56::78]:80", "/path", "fragment" ).parseServerAuthority(); // invalid authorities (neither server nor registry) CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException for URI ", URI( "http://us[er@host:80/" ), URISyntaxException ); CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException for URI ", URI( "http://[ddd::hgghh]/" ), URISyntaxException ); CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException for URI ", URI( "http", "[3ffe:2a00:100:7031:2e:1:80:80]a:80", "/path", "fragment" ), URISyntaxException ); CPPUNIT_ASSERT_THROW_MESSAGE( "Expected URISyntaxException for URI ", URI( "http", "host:80", "/path", "fragment" ), URISyntaxException ); CPPUNIT_ASSERT_NO_THROW( URI::create("file://C:/1.txt").parseServerAuthority() ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testRelativizeLURI() { // relativization tests const char* relativizeData[][2] = { // first is base, second is the one to relativize { "http://www.google.com/dir1/dir2", "mailto:test" }, // rel = opaque { "mailto:test", "http://www.google.com" }, // base = opaque // different authority { "http://www.eclipse.org/dir1", "http://www.google.com/dir1/dir2" }, // different scheme { "http://www.google.com", "ftp://www.google.com" }, { "http://www.google.com/dir1/dir2/", "http://www.google.com/dir3/dir4/file.txt" }, { "http://www.google.com/dir1/", "http://www.google.com/dir1/dir2/file.txt" }, { "./dir1/", "./dir1/hi" }, { "/dir1/./dir2", "/dir1/./dir2/hi" }, { "/dir1/dir2/..", "/dir1/dir2/../hi" }, { "/dir1/dir2/..", "/dir1/dir2/hi" }, { "/dir1/dir2/", "/dir1/dir3/../dir2/text" }, { "//www.google.com", "//www.google.com/dir1/file" }, { "/dir1", "/dir1/hi" }, { "/dir1/", "/dir1/hi" }, }; // expected results const char* relativizeResults[] = { "mailto:test", "http://www.google.com", "http://www.google.com/dir1/dir2", "ftp://www.google.com", "http://www.google.com/dir3/dir4/file.txt", "dir2/file.txt", "hi", "hi", "hi", "dir2/hi", "text", "dir1/file", "hi", "hi", }; for( unsigned int i = 0; i < 14; i++ ) { try { URI b( relativizeData[i][0] ); URI r( relativizeData[i][1] ); if( ( b.relativize( r ) ).toString() != relativizeResults[i] ) { CPPUNIT_FAIL( string( "Error: relativize, " ) + relativizeData[i][0] + string( ", " ) + relativizeData[i][1] + " returned: " + b.relativize( r ).toString() + ", expected:" + relativizeResults[i] ); } } catch( URISyntaxException& e ) { CPPUNIT_FAIL( string( "Exception on relativize test on data " ) + relativizeData[i][0] + ", " + relativizeData[i][1] + string( ": " ) + e.getMessage() ); } } { URI a( "http://host/dir" ); URI b( "http://host/dir/file?query" ); CPPUNIT_ASSERT_MESSAGE( string( "Assert 0: URI relativized incorrectly: " ) + a.relativize( b ).toString(), URI( "file?query" ).equals( a.relativize( b ) ) ); } { // One URI with empty host URI a( "file:///~/first" ); URI b( "file://tools/~/first" ); CPPUNIT_ASSERT_MESSAGE("Assert 1: URI relativized incorrectly,", URI( "file://tools/~/first" ).equals( a.relativize( b ) ) ); CPPUNIT_ASSERT_MESSAGE("Assert 2: URI relativized incorrectly,", URI( "file:///~/first" ).equals( b.relativize( a ) ) ); } { // Both URIs with empty hosts URI a( "file:///~/first" ); URI b( "file:///~/second" ); CPPUNIT_ASSERT_MESSAGE( "Assert 3: URI relativized incorrectly,", URI( "file:///~/second").equals( a.relativize( b ) ) ); CPPUNIT_ASSERT_MESSAGE("Assert 4: URI relativized incorrectly,", URI( "file:///~/first").equals( b.relativize( a ) ) ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testRelativize2() { { URI a( "http://host/dir" ); URI b( "http://host/dir/file?query" ); CPPUNIT_ASSERT_MESSAGE( "relativized incorrectly,", URI( "file?query" ).equals( a.relativize( b ) ) ); } { // one URI with empty host URI a( "file:///~/dictionary" ); URI b( "file://tools/~/dictionary" ); CPPUNIT_ASSERT_MESSAGE( "relativized incorrectly,", URI( "file://tools/~/dictionary" ).equals( a.relativize( b ) ) ); CPPUNIT_ASSERT_MESSAGE("relativized incorrectly,", URI( "file:///~/dictionary" ).equals( b.relativize( a ) ) ); } { // two URIs with empty hosts URI a( "file:///~/dictionary" ); URI b( "file:///~/therasus" ); CPPUNIT_ASSERT_MESSAGE( "relativized incorrectly,", URI( "file:///~/therasus" ).equals( a.relativize( b ) ) ); CPPUNIT_ASSERT_MESSAGE( "relativized incorrectly,", URI( "file:///~/dictionary").equals( b.relativize( a ) ) ); } { URI one( "file:/C:/test/ws" ); URI two( "file:/C:/test/ws" ); URI empty(""); CPPUNIT_ASSERT( empty.equals( one.relativize( two ) ) ); } { URI one( "file:/C:/test/ws" ); URI two( "file:/C:/test/ws/p1" ); URI result( "p1" ); CPPUNIT_ASSERT( result.equals( one.relativize( two ) ) ); } { URI one( "file:/C:/test/ws/" ); URI two( "file:/C:/test/ws/p1" ); URI result( "p1" ); CPPUNIT_ASSERT( result.equals( one.relativize( two ) ) ); } { URI uri( "file", "", "/test/location", "" ); URI base( "file", "", "/test", "" ); URI relative = base.relativize( uri ); CPPUNIT_ASSERT( string( "location" ) == relative.getSchemeSpecificPart() ); CPPUNIT_ASSERT( relative.getScheme() == "" ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testResolve() { URI uri1( "file:/D:/one/two/three" ); URI uri2 = uri1.resolve( URI( ".." ) ); CPPUNIT_ASSERT_MESSAGE( string( "Resolved to incorrect URI: " ) + uri2.toString(), string( "file:/D:/one/" ) == uri2.toString() ); CPPUNIT_ASSERT_MESSAGE( "Resolved URI is not absolute", uri2.isAbsolute()); CPPUNIT_ASSERT_MESSAGE( "Resolved URI is opaque", !uri2.isOpaque() ); CPPUNIT_ASSERT_MESSAGE( string( "Resolved URI has incorrect scheme specific part" ) + uri2.getRawSchemeSpecificPart(), string( "/D:/one/" ) == uri2.getRawSchemeSpecificPart() ); } //////////////////////////////////////////////////////////////////////////////// void URITest::testResolveURI() { // resolution tests const char* resolveData[12][2] = { // authority in given URI { "http://www.test.com/dir", "//www.test.com/hello?query#fragment" }, // no authority, absolute path { "http://www.test.com/dir", "/abspath/file.txt" }, // no authority, relative paths { "/", "dir1/file.txt" }, { "/dir1", "dir2/file.txt" }, { "/dir1/", "dir2/file.txt" }, { "", "dir1/file.txt" }, { "dir1", "dir2/file.txt" }, { "dir1/", "dir2/file.txt" }, // normalization required { "/dir1/dir2/../dir3/./", "dir4/./file.txt" }, // allow a standalone fragment to be resolved { "http://www.google.com/hey/joe?query#fragment", "#frag2" }, // return given when base is opaque { "mailto:idontexist@uk.ibm.com", "dir1/dir2" }, // return given when given is absolute { "http://www.google.com/hi/joe", "http://www.oogle.com" }, }; // expected results const char* resolveResults[] = { "http://www.test.com/hello?query#fragment", "http://www.test.com/abspath/file.txt", "/dir1/file.txt", "/dir2/file.txt", "/dir1/dir2/file.txt", "dir1/file.txt", "dir2/file.txt", "dir1/dir2/file.txt", "/dir1/dir3/dir4/file.txt", "http://www.google.com/hey/joe?query#frag2", "dir1/dir2", "http://www.oogle.com", }; for( unsigned int i = 0; i < 12; i++ ) { try { URI b( resolveData[i][0] ); URI r( resolveData[i][1] ); URI result = b.resolve( r ); if( result.toString() != resolveResults[i] ) { CPPUNIT_FAIL( string( "Error: resolve at iteration: " ) + Integer::toString( i ) + ": \n" + resolveData[i][0] + ", " + resolveData[i][1] + string( "\nreturned: " ) + b.resolve( r ).toString() + string( "\nexpected: " ) + resolveResults[i] ); } if( !b.isOpaque() ) { CPPUNIT_ASSERT_MESSAGE( b.toString() + " and " + result.toString() + " incorrectly differ in absoluteness", b.isAbsolute() == result.isAbsolute() ); } } catch( URISyntaxException& e ) { CPPUNIT_FAIL( string( "Exception on resolve test on data " ) + resolveData[i][0] + ", " + resolveData[i][1] + ": " + e.getMessage() ); } } } //////////////////////////////////////////////////////////////////////////////// void URITest::testToString() { std::vector<URI> uris = getUris(); const char* toStringResults[] = { "http://user%60%20info@host/a%20path?qu%60%20ery#fr%5E%20ag", "http://user%2560%2520info@host:80/a%2520path?qu%2560%2520ery#fr%255E%2520ag", "http://user%25C3%259F%25C2%25A3info@host/a%25E2%2582%25ACpath?qu%25C2%25A9%25C2%25AEery#fr%25C3%25A4%25C3%25A8g", "http://user%60%20info@host:81/a%20path?qu%60%20ery#fr%5E%20ag", "http://user%25info@host:0/a%25path?que%25ry#f%25rag", "mailto:user@domain.com", "../adirectory/file.html#", "news:comp.infosystems.www.servers.unix", "#fragment", "telnet://server.org", "http://reg:istry?query", "file:///c:/temp/calculate.pl?" }; for( unsigned int i = 0; i < uris.size(); i++ ) { std::string result = uris[i].toString(); CPPUNIT_ASSERT_MESSAGE( string( "Error: For URI \"" ) + uris[i].toString() + string( "\", toString() returned: \n" ) + result + string( "\n, expected: \n" ) + toStringResults[i], result == toStringResults[i] ); } } //////////////////////////////////////////////////////////////////////////////// void URITest::testToURL() { const char* absoluteuris[] = { "mailto:noreply@apache.org", "urn:isbn:123498989h", "news:software.ibm.com", "http://www.apache.org", "file:///d:/temp/results.txt", "scheme:ssp", }; const char* relativeuris[] = { "calculate.pl?isbn=123498989h", "?isbn=123498989h", "//www.apache.org", "a.html", "#top", "//pc1/", "//user@host/path/file" }; for( int i = 0; i < 6; i++ ) { try { URI( absoluteuris[i] ).toURL(); } catch( MalformedURLException& e ) { // not all the URIs can be translated into valid URLs } } for( int i = 0; i < 7; i++ ) { try { URI( relativeuris[i] ).toURL(); CPPUNIT_FAIL( "Expected IllegalArgumentException not thrown" ); } catch( IllegalArgumentException& e ) { // Expected } } }
cedricvidal/activemq-cpp-debian
src/test/decaf/net/URITest.cpp
C++
apache-2.0
65,521
<?php defined('BASEPATH') or exit('No direct script access allowed'); $lang['streams:integer.name'] = "Entier";
sanayaCorp/bpkad
system/cms/modules/streams_core/field_types/integer/language/french/integer_lang.php
PHP
apache-2.0
118
/* * Waltz - Enterprise Architecture * Copyright (C) 2016, 2017, 2018, 2019 Waltz open source project * See README.md for more information * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific * */ import _ from "lodash"; import {CORE_API} from "../../../common/services/core-api-utils"; import {buildHierarchies, doSearch, prepareSearchNodes} from "../../../common/hierarchy-utils"; import template from "./data-type-usage-count-tree.html"; import {mkAuthoritativeRatingSchemeItems} from "../../../ratings/rating-utils"; const bindings = { onSelection: "<" }; function ratingToRag(r) { switch(r){ case "PRIMARY": return "G"; case "SECONDARY": return "A"; case "DISCOURAGED": return "R"; case "NO_OPINION": return "Z"; default: return r; } } function prepareTree(dataTypes = [], usageCounts = []) { const dataTypesById = _.keyBy(dataTypes, "id"); _.chain(usageCounts) .filter(uc => uc.decoratorEntityReference.kind === "DATA_TYPE") .filter(uc => ! _.isNil(dataTypesById[uc.decoratorEntityReference.id])) .forEach(uc => { const dtId = uc.decoratorEntityReference.id; const dt = dataTypesById[dtId]; const rag = ratingToRag(uc.rating); dt.directCounts = Object.assign( {}, dt.directCounts, { [rag] : uc.count }); }) .value(); const hierarchy = buildHierarchies(_.values(dataTypesById), false); const sumBy = (rating, n) => { if (!n) return 0; const childTotals = _.sum(_.map(n.children, c => sumBy(rating, c))); const total = childTotals + _.get(n, `directCounts.${rating}`, 0); n.cumulativeCounts = Object.assign({}, n.cumulativeCounts, { [rating] : total }); return total; }; _.forEach(hierarchy, root => { const R = sumBy("R", root); const A = sumBy("A", root); const G = sumBy("G", root); const Z = sumBy("Z", root); root.cumulativeCounts = { R, A, G, Z, total: R + A + G + Z }; }); return hierarchy; } function prepareExpandedNodes(hierarchy = []) { return hierarchy.length < 6 // pre-expand small trees ? _.clone(hierarchy) : []; } function controller(displayNameService, serviceBroker) { const vm = this; vm.$onInit = () => { vm.ratingSchemeItems = mkAuthoritativeRatingSchemeItems(displayNameService); serviceBroker .loadAppData(CORE_API.DataTypeStore.findAll, []) .then(r => { vm.dataTypes = r.data; vm.searchNodes = prepareSearchNodes(vm.dataTypes); }) .then(() => serviceBroker.loadViewData(CORE_API.LogicalFlowDecoratorStore.summarizeInboundForAll)) .then(r => { vm.hierarchy = prepareTree(vm.dataTypes, r.data); vm.maxTotal = _ .chain(vm.hierarchy) .map("cumulativeCounts.total") .max() .value(); }); }; vm.treeOptions = { nodeChildren: "children", dirSelectable: true, equality: (a, b) => a && b && a.id === b.id }; vm.searchTermsChanged = (termStr = "") => { const matchingNodes = doSearch(termStr, vm.searchNodes); vm.hierarchy = prepareTree(matchingNodes); vm.expandedNodes = prepareExpandedNodes(vm.hierarchy); }; vm.clearSearch = () => { vm.searchTermsChanged(""); vm.searchTerms = ""; }; } controller.$inject = [ "DisplayNameService", "ServiceBroker" ]; const component = { bindings, template, controller }; const id = "waltzDataTypeUsageCountTree"; export default { id, component }
rovats/waltz
waltz-ng/client/data-types/components/usage-count-tree/data-type-usage-count-tree.js
JavaScript
apache-2.0
4,378
#include <iostream> #include <stdlib.h> #include <stdio.h> #include <vector> #include <map> #include <algorithm> #include <set> #include <string> #include <cstring> using namespace std; int main() { string line, res; int index = 1; while(cin >> line, line != "*") { if(line == "Hajj") res = "Hajj-e-Akbar"; else res = "Hajj-e-Asghar"; cout << "Case " << index++ << ": " << res << endl; } return 0; }
diugalde/CompetitiveProgramming
uva/12557_hajjAkbar.cpp
C++
apache-2.0
445
package sk.upjs.calltree; import java.awt.*; /** * Configuration settings for call tree visualization. Any changes of * configuration must be realized before building a call tree. */ public class Config { /** * Collection of precomputed colors for a basic color. */ private static class ColorCollection { final Color basicColor; final Color lighter; final Color darker; public ColorCollection(Color color) { basicColor = color; float hsbVals[] = Color.RGBtoHSB(color.getRed(), color.getGreen(), color.getBlue(), null); lighter = Color.getHSBColor(hsbVals[0], hsbVals[1], 0.5f * (1f + hsbVals[2])); darker = Color.getHSBColor(hsbVals[0], hsbVals[1], 0.8f * hsbVals[2]); } } /** * Padding of box displaying a method call and its execution. */ private int boxPadding = 7; /** * Horizontal space between neighboring call subtrees. */ private int hSpace = 9; /** * Vertical space between box and child call subtrees. */ private int vSpace = 30; /** * Padding of the visualization pane. */ private int globalPadding = 10; /** * Font for text in the visualization pane. */ private Font font = new Font(Font.SANS_SERIF, Font.PLAIN, 14); /** * List of colors used to visually distinguish different methods in a call * tree. */ private ColorCollection[] methodColors; /** * Color collection for selected node. */ private ColorCollection selectedColor; /** * Color for printing returned values. */ private Color returnValueColor; /** * Indicates that configuration changes are not allowed. */ private boolean locked = false; /** * Constructs default configuration. */ public Config() { setReturnValueColor(Color.blue); setSelectedColor(Color.GRAY); setMethodColors(new Color[] { new Color(244, 244, 244), new Color(222, 184, 135), new Color(255, 246, 143), new Color(245, 245, 220), new Color(127, 255, 212) }); } /** * Checks whether configuration changes are allowed. */ private void checkLock() { if (locked) throw new RuntimeException("Configuration can be changed only before building of a call tree."); } /** * Locks any configuration changes. */ synchronized void lockChanges() { locked = true; } /** * Returns padding of box displaying a method call and its execution. * * @return the box padding in pixels. */ public synchronized int getBoxPadding() { return boxPadding; } /** * Sets padding of box displaying a method call and its execution. * * @param boxPadding * the desired padding. */ public synchronized void setBoxPadding(int boxPadding) { checkLock(); this.boxPadding = boxPadding; } /** * Returns horizontal space between neighboring call subtrees. * * @return the horizontal space in pixels. */ public synchronized int getHSpace() { return hSpace; } /** * Sets horizontal space between neighboring call subtrees. * * @param hSpace * the desired horizontal space. */ public synchronized void setHSpace(int hSpace) { checkLock(); this.hSpace = hSpace; } /** * Returns vertical space between box and child call subtrees. * * @return the vertical space in pixels. */ public synchronized int getVSpace() { return vSpace; } /** * Sets vertical space between box and child call subtrees. * * @param vSpace * the desired vertical space. */ public synchronized void setVSpace(int vSpace) { checkLock(); this.vSpace = vSpace; } /** * Returns padding of the visualization pane. * * @return the padding. */ public synchronized int getGlobalPadding() { return globalPadding; } /** * Sets padding of the visualization pane. * * @param globalPadding * the desired padding. */ public synchronized void setGlobalPadding(int globalPadding) { checkLock(); this.globalPadding = globalPadding; } /** * Returns font used for texts in the visualization pane. * * @return the font. */ public synchronized Font getFont() { return font; } /** * Sets font for texts in the visualization pane. * * @param font * the desired font. */ public synchronized void setFont(Font font) { if (font == null) { throw new RuntimeException("Font cannot be null."); } checkLock(); this.font = font; } /** * Sets colors used to distinguish different methods in call trees. * * @param colors * non-empty array of collors */ public synchronized void setMethodColors(Color[] colors) { if ((colors == null) || (colors.length == 0)) { throw new RuntimeException("Colors array must contain at least one color."); } for (Color c : colors) if (c == null) { throw new RuntimeException("Color cannot be null."); } checkLock(); methodColors = new ColorCollection[colors.length]; for (int i = 0; i < methodColors.length; i++) methodColors[i] = new ColorCollection(colors[i]); } /** * Returns colors used to distinguish different methods in call trees. * * @return array of colors */ public synchronized Color[] getMethodColors() { Color[] colors = new Color[methodColors.length]; int idx = 0; for (ColorCollection cc : methodColors) { colors[idx] = cc.basicColor; idx++; } return colors; } /** * Sets color used for selected nodes. * * @param c * the color */ public synchronized void setSelectedColor(Color c) { if (c == null) { throw new RuntimeException("Color cannot be null."); } checkLock(); selectedColor = new ColorCollection(c); } /** * Returns color used for selected nodes. * * @return color for selected nodes */ public synchronized Color getSelectedColor() { return selectedColor.basicColor; } /** * Returns color of returned values. * * @return the color of returned values. */ public synchronized Color getReturnValueColor() { return returnValueColor; } /** * Sets color of returned values. * * @param c * the color of returned values. */ public synchronized void setReturnValueColor(Color c) { if (c == null) { throw new RuntimeException("Color cannot be null."); } checkLock(); this.returnValueColor = c; } /** * Creates a background paint for box of a method call with given * "category". * * @return the paint. */ synchronized Paint createMethodCallBgPaint(int methodIdx, Rectangle methodCallBox) { if (methodIdx >= 0) { methodIdx = methodIdx % methodColors.length; ColorCollection cc = methodColors[methodIdx]; return new GradientPaint(0, methodCallBox.y, cc.lighter, 0, methodCallBox.y + methodCallBox.height, cc.darker); } else { return Color.white; } } /** * Creates a background paint for box of a method call with given "category" * for preview drawing. * * @return the paint. */ synchronized Paint createMethodCallBgPreviewPaint(int methodIdx, Rectangle methodCallBox) { if (methodIdx >= 0) { methodIdx = methodIdx % methodColors.length; ColorCollection cc = methodColors[methodIdx]; return cc.basicColor; } else { return Color.white; } } /** * Creates a background paint for selected box of a method call. * * @return the paint. */ synchronized Paint createSelectedBgPaint(Rectangle methodCallBox) { return new GradientPaint(0, methodCallBox.y, selectedColor.lighter, 0, methodCallBox.y + methodCallBox.height, selectedColor.darker); } }
ics-upjs/CallTree
main/src/main/java/sk/upjs/calltree/Config.java
Java
apache-2.0
7,807
package org.zkoss.zss.essential.events; import org.zkoss.zk.ui.Component; import org.zkoss.zk.ui.select.SelectorComposer; import org.zkoss.zk.ui.select.annotation.Listen; import org.zkoss.zk.ui.select.annotation.Wire; import org.zkoss.zss.ui.event.HeaderMouseEvent; import org.zkoss.zul.Menupopup; /** * Demonstrate header events. * @author Hawk * */ @SuppressWarnings("serial") public class MouseEventsComposer extends SelectorComposer<Component> { @Wire private Menupopup topHeaderMenu; @Wire private Menupopup leftHeaderMenu; @Listen("onHeaderRightClick = spreadsheet") public void onHeaderRightClick(HeaderMouseEvent event) { switch(event.getType()){ case COLUMN: topHeaderMenu.open(event.getClientx(), event.getClienty()); break; case ROW: leftHeaderMenu.open(event.getClientx(), event.getClienty()); break; } } }
zkoss/zssessentials
src/main/java/org/zkoss/zss/essential/events/MouseEventsComposer.java
Java
apache-2.0
860
/** * Copyright (c) 2009, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.wso2.carbon.analytics.hive.dto; import java.util.ArrayList; import java.util.List; public class ScriptResult { private List<QueryResult> queryResults = new ArrayList<QueryResult>(); private String errorMessage; public void addQueryResult(QueryResult result) { queryResults.add(result); } public QueryResult[] getQueryResults() { return queryResults.toArray(new QueryResult[]{}); } public void setErrorMessage(String errorMessage) { this.errorMessage = errorMessage; } public String getErrorMessage() { return this.errorMessage; } }
maheshika/carbon-analytics
components/analytics/org.wso2.carbon.analytics.hive/src/main/java/org/wso2/carbon/analytics/hive/dto/ScriptResult.java
Java
apache-2.0
1,263
# Copyright 2012 Kevin Minnick # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Record interface """ from dnsclient import base class Record(base.Resource): """ A record. """ HUMAN_ID = False NAME_ATTR = 'name' def __repr__(self): return "<Record: %s" % self.label def delete(self): self.manager.delete(self) class RecordManager(base.ManagerWithFind): """ Manage :class:`Record` resources. """ resource_class = Record def list(self, domainId): """ Get a list of all records for the domain. :rtype: list of :class:`Record`. """ return self._list("/domains/%s/records" % base.getid(domainId), "records") def create(self, args, domainId): """ Create a record in the dns system. The following parameters are required type, name, data. :param type: str :param name: str :param ttl: int :param data: str :param priority: int :param comment: str :rtype: list of :class:`Record` """ body = { "records" : [ { "name" : args.name, "comment" : args.comment, "ttl" : int(args.ttl), "type" : args.type, "data" : args.data, "priority" : args.priority } ] } url = '/domains/%s/records' % base.getid(domainId) if args.type == "PTR": url = '/rdns' body = { "recordsList" : { "records" : [ { "name" : args.name, "comment" : args.comment, "ttl" : int(args.ttl), "type" : args.type, "data" : args.data } ] }, "link" : { "content" : "", "href" : args.server_href, "rel" : "cloudServersOpenStack" } } return self._create_async(url, body, return_raw=False, response_key="") def modify(self, args, domainId): """ Modify a record in the dns system. The following parameters are required recordId and name. :param record_id: str :param domain: str :param name: str :param ttl: int :param data: str :param priority: int :param comment: str :rtype: list of :class:`Record` """ body = { "name" : args.name, "comment" : args.comment, "ttl" : int(args.ttl), "data" : args.data, "priority" : args.priority } url = '/domains/%s/records/%s' % (base.getid(domainId), base.getid(args.record_id)) if hasattr(args, 'type'): if args.type == "PTR": url = '/rdns' body = { "recordsList" : { "records" : [ { "name" : args.name, "id" : args.record_id, "comment" : args.comment, "ttl" : int(args.ttl), "type" : args.type, "data" : args.data } ] }, "link" : { "content" : "", "href" : args.server_href, "rel" : "cloudServersOpenStack" } } return self._update(url, body, return_raw=False, response_key="") def delete(self, domainId, recordId): """ Delete a specific record. :param domainId: The ID of the :class:`Domain` to delete. :param recordId: The ID of the :class:`Record` to delete. """ self._delete("/domains/%s/records/%s" % (base.getid(domainId), base.getid(recordId))) def rdns_list(self, href): """ List all PTR records configured for the specified Cloud device. :param href: The href of the device to get . :rtype: :class:`Record` """ return self._list("/rdns/cloudServersOpenStack?href=%s" % href, "records") def rdns_delete(self, href, ip): """ Remove one or all PTR records associated with a Rackspace Cloud device. Use the optional ip query parameter to specify a specific record to delete. Omitting this parameter removes all PTR records associated with the specified device. :param href: The ID of the device to delete. :param ip: The ip of the specific record to delete. """ self._delete("/rdns/cloudServersOpenStack?href=%s&ip=%s" % (href, ip))
kwminnick/rackspace-dns-cli
dnsclient/v1_0/records.py
Python
apache-2.0
5,748
<?php /* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * GENERATED CODE WARNING * Generated by gapic-generator-php from the file * https://github.com/google/googleapis/blob/master/google/ads/googleads/v9/services/customer_label_service.proto * Updates to the above are reflected here through a refresh process. */ namespace Google\Ads\GoogleAds\V9\Services\Gapic; use Google\Ads\GoogleAds\V9\Resources\CustomerLabel; use Google\Ads\GoogleAds\V9\Services\CustomerLabelOperation; use Google\Ads\GoogleAds\V9\Services\GetCustomerLabelRequest; use Google\Ads\GoogleAds\V9\Services\MutateCustomerLabelsRequest; use Google\Ads\GoogleAds\V9\Services\MutateCustomerLabelsResponse; use Google\ApiCore\ApiException; use Google\ApiCore\CredentialsWrapper; use Google\ApiCore\GapicClientTrait; use Google\ApiCore\PathTemplate; use Google\ApiCore\RequestParamsHeaderDescriptor; use Google\ApiCore\RetrySettings; use Google\ApiCore\Transport\TransportInterface; use Google\ApiCore\ValidationException; use Google\Auth\FetchAuthTokenInterface; /** * Service Description: Service to manage labels on customers. * * This class provides the ability to make remote calls to the backing service through method * calls that map to API methods. Sample code to get started: * * ``` * $customerLabelServiceClient = new CustomerLabelServiceClient(); * try { * $formattedResourceName = $customerLabelServiceClient->customerLabelName('[CUSTOMER_ID]', '[LABEL_ID]'); * $response = $customerLabelServiceClient->getCustomerLabel($formattedResourceName); * } finally { * $customerLabelServiceClient->close(); * } * ``` * * Many parameters require resource names to be formatted in a particular way. To * assist with these names, this class includes a format method for each type of * name, and additionally a parseName method to extract the individual identifiers * contained within formatted names that are returned by the API. */ class CustomerLabelServiceGapicClient { use GapicClientTrait; /** * The name of the service. */ const SERVICE_NAME = 'google.ads.googleads.v9.services.CustomerLabelService'; /** * The default address of the service. */ const SERVICE_ADDRESS = 'googleads.googleapis.com'; /** * The default port of the service. */ const DEFAULT_SERVICE_PORT = 443; /** * The name of the code generator, to be included in the agent header. */ const CODEGEN_NAME = 'gapic'; /** * The default scopes required by the service. */ public static $serviceScopes = [ 'https://www.googleapis.com/auth/adwords', ]; private static $customerLabelNameTemplate; private static $pathTemplateMap; private static function getClientDefaults() { return [ 'serviceName' => self::SERVICE_NAME, 'serviceAddress' => self::SERVICE_ADDRESS . ':' . self::DEFAULT_SERVICE_PORT, 'clientConfig' => __DIR__ . '/../resources/customer_label_service_client_config.json', 'descriptorsConfigPath' => __DIR__ . '/../resources/customer_label_service_descriptor_config.php', 'gcpApiConfigPath' => __DIR__ . '/../resources/customer_label_service_grpc_config.json', 'credentialsConfig' => [ 'defaultScopes' => self::$serviceScopes, ], 'transportConfig' => [ 'rest' => [ 'restClientConfigPath' => __DIR__ . '/../resources/customer_label_service_rest_client_config.php', ], ], ]; } private static function getCustomerLabelNameTemplate() { if (self::$customerLabelNameTemplate == null) { self::$customerLabelNameTemplate = new PathTemplate('customers/{customer_id}/customerLabels/{label_id}'); } return self::$customerLabelNameTemplate; } private static function getPathTemplateMap() { if (self::$pathTemplateMap == null) { self::$pathTemplateMap = [ 'customerLabel' => self::getCustomerLabelNameTemplate(), ]; } return self::$pathTemplateMap; } /** * Formats a string containing the fully-qualified path to represent a * customer_label resource. * * @param string $customerId * @param string $labelId * * @return string The formatted customer_label resource. */ public static function customerLabelName($customerId, $labelId) { return self::getCustomerLabelNameTemplate()->render([ 'customer_id' => $customerId, 'label_id' => $labelId, ]); } /** * Parses a formatted name string and returns an associative array of the components in the name. * The following name formats are supported: * Template: Pattern * - customerLabel: customers/{customer_id}/customerLabels/{label_id} * * The optional $template argument can be supplied to specify a particular pattern, * and must match one of the templates listed above. If no $template argument is * provided, or if the $template argument does not match one of the templates * listed, then parseName will check each of the supported templates, and return * the first match. * * @param string $formattedName The formatted name string * @param string $template Optional name of template to match * * @return array An associative array from name component IDs to component values. * * @throws ValidationException If $formattedName could not be matched. */ public static function parseName($formattedName, $template = null) { $templateMap = self::getPathTemplateMap(); if ($template) { if (!isset($templateMap[$template])) { throw new ValidationException("Template name $template does not exist"); } return $templateMap[$template]->match($formattedName); } foreach ($templateMap as $templateName => $pathTemplate) { try { return $pathTemplate->match($formattedName); } catch (ValidationException $ex) { // Swallow the exception to continue trying other path templates } } throw new ValidationException("Input did not match any known format. Input: $formattedName"); } /** * Constructor. * * @param array $options { * Optional. Options for configuring the service API wrapper. * * @type string $serviceAddress * The address of the API remote host. May optionally include the port, formatted * as "<uri>:<port>". Default 'googleads.googleapis.com:443'. * @type string|array|FetchAuthTokenInterface|CredentialsWrapper $credentials * The credentials to be used by the client to authorize API calls. This option * accepts either a path to a credentials file, or a decoded credentials file as a * PHP array. * *Advanced usage*: In addition, this option can also accept a pre-constructed * {@see \Google\Auth\FetchAuthTokenInterface} object or * {@see \Google\ApiCore\CredentialsWrapper} object. Note that when one of these * objects are provided, any settings in $credentialsConfig will be ignored. * @type array $credentialsConfig * Options used to configure credentials, including auth token caching, for the * client. For a full list of supporting configuration options, see * {@see \Google\ApiCore\CredentialsWrapper::build()} . * @type bool $disableRetries * Determines whether or not retries defined by the client configuration should be * disabled. Defaults to `false`. * @type string|array $clientConfig * Client method configuration, including retry settings. This option can be either * a path to a JSON file, or a PHP array containing the decoded JSON data. By * default this settings points to the default client config file, which is * provided in the resources folder. * @type string|TransportInterface $transport * The transport used for executing network requests. May be either the string * `rest` or `grpc`. Defaults to `grpc` if gRPC support is detected on the system. * *Advanced usage*: Additionally, it is possible to pass in an already * instantiated {@see \Google\ApiCore\Transport\TransportInterface} object. Note * that when this object is provided, any settings in $transportConfig, and any * $serviceAddress setting, will be ignored. * @type array $transportConfig * Configuration options that will be used to construct the transport. Options for * each supported transport type should be passed in a key for that transport. For * example: * $transportConfig = [ * 'grpc' => [...], * 'rest' => [...], * ]; * See the {@see \Google\ApiCore\Transport\GrpcTransport::build()} and * {@see \Google\ApiCore\Transport\RestTransport::build()} methods for the * supported options. * @type callable $clientCertSource * A callable which returns the client cert as a string. This can be used to * provide a certificate and private key to the transport layer for mTLS. * } * * @throws ValidationException */ public function __construct(array $options = []) { $clientOptions = $this->buildClientOptions($options); $this->setClientOptions($clientOptions); } /** * Returns the requested customer-label relationship in full detail. * * List of thrown errors: * [AuthenticationError]() * [AuthorizationError]() * [HeaderError]() * [InternalError]() * [QuotaError]() * [RequestError]() * * Sample code: * ``` * $customerLabelServiceClient = new CustomerLabelServiceClient(); * try { * $formattedResourceName = $customerLabelServiceClient->customerLabelName('[CUSTOMER_ID]', '[LABEL_ID]'); * $response = $customerLabelServiceClient->getCustomerLabel($formattedResourceName); * } finally { * $customerLabelServiceClient->close(); * } * ``` * * @param string $resourceName Required. The resource name of the customer-label relationship to fetch. * @param array $optionalArgs { * Optional. * * @type RetrySettings|array $retrySettings * Retry settings to use for this call. Can be a * {@see Google\ApiCore\RetrySettings} object, or an associative array of retry * settings parameters. See the documentation on * {@see Google\ApiCore\RetrySettings} for example usage. * } * * @return \Google\Ads\GoogleAds\V9\Resources\CustomerLabel * * @throws ApiException if the remote call fails */ public function getCustomerLabel($resourceName, array $optionalArgs = []) { $request = new GetCustomerLabelRequest(); $requestParamHeaders = []; $request->setResourceName($resourceName); $requestParamHeaders['resource_name'] = $resourceName; $requestParams = new RequestParamsHeaderDescriptor($requestParamHeaders); $optionalArgs['headers'] = isset($optionalArgs['headers']) ? array_merge($requestParams->getHeader(), $optionalArgs['headers']) : $requestParams->getHeader(); return $this->startCall('GetCustomerLabel', CustomerLabel::class, $optionalArgs, $request)->wait(); } /** * Creates and removes customer-label relationships. * Operation statuses are returned. * * List of thrown errors: * [AuthenticationError]() * [AuthorizationError]() * [DatabaseError]() * [HeaderError]() * [InternalError]() * [LabelError]() * [MutateError]() * [QuotaError]() * [RequestError]() * * Sample code: * ``` * $customerLabelServiceClient = new CustomerLabelServiceClient(); * try { * $customerId = 'customer_id'; * $operations = []; * $response = $customerLabelServiceClient->mutateCustomerLabels($customerId, $operations); * } finally { * $customerLabelServiceClient->close(); * } * ``` * * @param string $customerId Required. ID of the customer whose customer-label relationships are being modified. * @param CustomerLabelOperation[] $operations Required. The list of operations to perform on customer-label relationships. * @param array $optionalArgs { * Optional. * * @type bool $partialFailure * If true, successful operations will be carried out and invalid * operations will return errors. If false, all operations will be carried * out in one transaction if and only if they are all valid. * Default is false. * @type bool $validateOnly * If true, the request is validated but not executed. Only errors are * returned, not results. * @type RetrySettings|array $retrySettings * Retry settings to use for this call. Can be a * {@see Google\ApiCore\RetrySettings} object, or an associative array of retry * settings parameters. See the documentation on * {@see Google\ApiCore\RetrySettings} for example usage. * } * * @return \Google\Ads\GoogleAds\V9\Services\MutateCustomerLabelsResponse * * @throws ApiException if the remote call fails */ public function mutateCustomerLabels($customerId, $operations, array $optionalArgs = []) { $request = new MutateCustomerLabelsRequest(); $requestParamHeaders = []; $request->setCustomerId($customerId); $request->setOperations($operations); $requestParamHeaders['customer_id'] = $customerId; if (isset($optionalArgs['partialFailure'])) { $request->setPartialFailure($optionalArgs['partialFailure']); } if (isset($optionalArgs['validateOnly'])) { $request->setValidateOnly($optionalArgs['validateOnly']); } $requestParams = new RequestParamsHeaderDescriptor($requestParamHeaders); $optionalArgs['headers'] = isset($optionalArgs['headers']) ? array_merge($requestParams->getHeader(), $optionalArgs['headers']) : $requestParams->getHeader(); return $this->startCall('MutateCustomerLabels', MutateCustomerLabelsResponse::class, $optionalArgs, $request)->wait(); } }
googleads/google-ads-php
src/Google/Ads/GoogleAds/V9/Services/Gapic/CustomerLabelServiceGapicClient.php
PHP
apache-2.0
15,563
using System.Collections.Generic; using System.ComponentModel.DataAnnotations; using Microsoft.AspNet.Identity; using Microsoft.Owin.Security; namespace WebDev.Models { public class IndexViewModel { public bool HasPassword { get; set; } public IList<UserLoginInfo> Logins { get; set; } public string PhoneNumber { get; set; } public bool TwoFactor { get; set; } public bool BrowserRemembered { get; set; } } public class ManageLoginsViewModel { public IList<UserLoginInfo> CurrentLogins { get; set; } public IList<AuthenticationDescription> OtherLogins { get; set; } } public class FactorViewModel { public string Purpose { get; set; } } public class SetPasswordViewModel { [Required] [StringLength(100, ErrorMessage = "The {0} must be at least {2} characters long.", MinimumLength = 6)] [DataType(DataType.Password)] [Display(Name = "New password")] public string NewPassword { get; set; } [DataType(DataType.Password)] [Display(Name = "Confirm new password")] [Compare("NewPassword", ErrorMessage = "The new password and confirmation password do not match.")] public string ConfirmPassword { get; set; } } public class ChangePasswordViewModel { [Required] [DataType(DataType.Password)] [Display(Name = "Current password")] public string OldPassword { get; set; } [Required] [StringLength(100, ErrorMessage = "The {0} must be at least {2} characters long.", MinimumLength = 6)] [DataType(DataType.Password)] [Display(Name = "New password")] public string NewPassword { get; set; } [DataType(DataType.Password)] [Display(Name = "Confirm new password")] [Compare("NewPassword", ErrorMessage = "The new password and confirmation password do not match.")] public string ConfirmPassword { get; set; } } public class AddPhoneNumberViewModel { [Required] [Phone] [Display(Name = "Phone Number")] public string Number { get; set; } } public class VerifyPhoneNumberViewModel { [Required] [Display(Name = "Code")] public string Code { get; set; } [Required] [Phone] [Display(Name = "Phone Number")] public string PhoneNumber { get; set; } } public class ConfigureTwoFactorViewModel { public string SelectedProvider { get; set; } public ICollection<System.Web.Mvc.SelectListItem> Providers { get; set; } } }
ISEPTrabalhos/BizzKit
WebDev/WebDev/Models/ManageViewModels.cs
C#
apache-2.0
2,652
package topology.principal.components; import backtype.storm.LocalCluster; import backtype.storm.StormSubmitter; import backtype.storm.generated.AlreadyAliveException; import backtype.storm.generated.InvalidTopologyException; import backtype.storm.generated.StormTopology; import backtype.storm.tuple.Fields; import bolt.ml.state.pca.create.WindowedPcaFactory; import bolt.ml.state.pca.query.PrincipalComponentsAggregator; import bolt.ml.state.pca.query.PrincipalComponentsQuery; import bolt.ml.state.pca.update.PrincipalComponentUpdater; import spout.ml.MlStormSpout; import spout.ml.sensor.SensorStreamingSpout; import storm.trident.Stream; import storm.trident.TridentState; import storm.trident.TridentTopology; import storm.trident.spout.ITridentSpout; import storm.trident.spout.RichSpoutBatchExecutor; import storm.trident.state.StateFactory; import utils.MlStormConfig; import utils.fields.FieldTemplate; import utils.fields.MlStormFieldTemplate; /* * Copyright 2013-2015 Lakshmisha Bhat * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Run PCA algorithm on storm. This provides horizontal scaling. * Another IncrementalPCATopology is coming soon that's completely distributed. */ public class PcaTopology { // The minimum no. of samples required to run the PCA algorithm. private static final int PCA_SAMPLE_SIZE = 100; // The number of principal components output by the algorithm. private static final int PRINCIPAL_COMPONENTS = 5; private static StormTopology buildTopology(final MlStormSpout mlStormSpout, final int parallelism, final int pcaRowWidth, final int numPrincipalComponents, final FieldTemplate template) { final TridentTopology topology = new TridentTopology(); final Stream sensorStream = topology.newStream(FieldTemplate.FieldConstants.PCA.PCA, mlStormSpout); final StateFactory pcaFactory = new WindowedPcaFactory(pcaRowWidth, numPrincipalComponents, template); final TridentState principalComponents = sensorStream .partitionPersist(pcaFactory, new Fields(template.getKeyField(), template.getFeatureVectorField()), new PrincipalComponentUpdater(template)) .parallelismHint(parallelism); topology.newDRPCStream(FieldTemplate.FieldConstants.PCA.PCA_DRPC) .broadcast() .stateQuery(principalComponents, new Fields(FieldTemplate.FieldConstants.ARGS), new PrincipalComponentsQuery(), new Fields(FieldTemplate.FieldConstants.PCA.PCA_COMPONENTS)) .project(new Fields(FieldTemplate.FieldConstants.PCA.PCA_COMPONENTS)) .aggregate(new Fields(FieldTemplate.FieldConstants.PCA.PCA_COMPONENTS), new PrincipalComponentsAggregator(), new Fields(FieldTemplate.FieldConstants.PCA.PCA_EIGEN)) .project(new Fields(FieldTemplate.FieldConstants.PCA.PCA_EIGEN)); return topology.build(); } // This is all the code you need to write to run PCA algorithm. Create your own Spout that reads and emits MlStormFeatureVector. // Look at spout.ml.sensor.SensorStreamingSpout or spout.ml.weka.AustralianElectricityPricingSpout for example. public static void main(String[] args) throws AlreadyAliveException, InvalidTopologyException { final FieldTemplate fieldTemplate = new MlStormFieldTemplate(); final MlStormSpout spout = new SensorStreamingSpout(fieldTemplate); final int parallelism = args.length > 0 ? Integer.valueOf(args[0]) : 1; final StormTopology stormTopology = buildTopology(spout, parallelism, PCA_SAMPLE_SIZE, PRINCIPAL_COMPONENTS, fieldTemplate); if (parallelism == 1) { final LocalCluster cluster = new LocalCluster(); cluster.submitTopology(FieldTemplate.FieldConstants.PCA.PCA, MlStormConfig.getDefaultMlStormConfig(parallelism), stormTopology); } else { StormSubmitter.submitTopology(FieldTemplate.FieldConstants.PCA.PCA, MlStormConfig.getDefaultMlStormConfig(parallelism), stormTopology); } } }
LakkiB/mlstorm
src/com.mlstorm.jvm/streaming/topology/principal/components/PcaTopology.java
Java
apache-2.0
4,758
/** * Copyright 2013 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @providesModule ReactDOMIDOperations * @typechecks static-only */ /*jslint evil: true */ "use strict"; var CSSPropertyOperations = require("./CSSPropertyOperations"); var DOMChildrenOperations = require("./DOMChildrenOperations"); var DOMPropertyOperations = require("./DOMPropertyOperations"); var ReactID = require("./ReactID"); var getTextContentAccessor = require("./getTextContentAccessor"); var invariant = require("./invariant"); /** * Errors for properties that should not be updated with `updatePropertyById()`. * * @type {object} * @private */ var INVALID_PROPERTY_ERRORS = { dangerouslySetInnerHTML: '`dangerouslySetInnerHTML` must be set using `updateInnerHTMLByID()`.', style: '`style` must be set using `updateStylesByID()`.' }; /** * The DOM property to use when setting text content. * * @type {string} * @private */ var textContentAccessor = getTextContentAccessor() || 'NA'; /** * Operations used to process updates to DOM nodes. This is made injectable via * `ReactComponent.DOMIDOperations`. */ var ReactDOMIDOperations = { /** * Updates a DOM node with new property values. This should only be used to * update DOM properties in `DOMProperty`. * * @param {string} id ID of the node to update. * @param {string} name A valid property name, see `DOMProperty`. * @param {*} value New value of the property. * @internal */ updatePropertyByID: function(id, name, value) { var node = ReactID.getNode(id); invariant(!INVALID_PROPERTY_ERRORS.hasOwnProperty(name)); DOMPropertyOperations.setValueForProperty(node, name, value); }, /** * Updates a DOM node to remove a property. This should only be used to remove * DOM properties in `DOMProperty`. * * @param {string} id ID of the node to update. * @param {string} name A property name to remove, see `DOMProperty`. * @internal */ deletePropertyByID: function(id, name, value) { var node = ReactID.getNode(id); invariant(!INVALID_PROPERTY_ERRORS.hasOwnProperty(name)); DOMPropertyOperations.deleteValueForProperty(node, name, value); }, /** * This should almost never be used instead of `updatePropertyByID()` due to * the extra object allocation required by the API. That said, this is useful * for batching up several operations across worker thread boundaries. * * @param {string} id ID of the node to update. * @param {object} properties A mapping of valid property names. * @internal * @see {ReactDOMIDOperations.updatePropertyByID} */ updatePropertiesByID: function(id, properties) { for (var name in properties) { if (!properties.hasOwnProperty(name)) { continue; } ReactDOMIDOperations.updatePropertiesByID(id, name, properties[name]); } }, /** * Updates a DOM node with new style values. If a value is specified as '', * the corresponding style property will be unset. * * @param {string} id ID of the node to update. * @param {object} styles Mapping from styles to values. * @internal */ updateStylesByID: function(id, styles) { var node = ReactID.getNode(id); CSSPropertyOperations.setValueForStyles(node, styles); }, /** * Updates a DOM node's innerHTML set by `props.dangerouslySetInnerHTML`. * * @param {string} id ID of the node to update. * @param {object} html An HTML object with the `__html` property. * @internal */ updateInnerHTMLByID: function(id, html) { var node = ReactID.getNode(id); // HACK: IE8- normalize whitespace in innerHTML, removing leading spaces. // @see quirksmode.org/bugreports/archives/2004/11/innerhtml_and_t.html node.innerHTML = (html && html.__html || '').replace(/^ /g, '&nbsp;'); }, /** * Updates a DOM node's text content set by `props.content`. * * @param {string} id ID of the node to update. * @param {string} content Text content. * @internal */ updateTextContentByID: function(id, content) { var node = ReactID.getNode(id); node[textContentAccessor] = content; }, /** * Replaces a DOM node that exists in the document with markup. * * @param {string} id ID of child to be replaced. * @param {string} markup Dangerous markup to inject in place of child. * @internal * @see {Danger.dangerouslyReplaceNodeWithMarkup} */ dangerouslyReplaceNodeWithMarkupByID: function(id, markup) { var node = ReactID.getNode(id); DOMChildrenOperations.dangerouslyReplaceNodeWithMarkup(node, markup); }, /** * TODO: We only actually *need* to purge the cache when we remove elements. * Detect if any elements were removed instead of blindly purging. */ manageChildrenByParentID: function(parentID, domOperations) { var parent = ReactID.getNode(parentID); DOMChildrenOperations.manageChildren(parent, domOperations); } }; module.exports = ReactDOMIDOperations;
jordwalke/npm-react-core
modules/ReactDOMIDOperations.js
JavaScript
apache-2.0
5,510
from django.core.mail import send_mail from django.core.handlers.base import BaseHandler from django.test.client import RequestFactory, FakePayload import collections ##################################################### ### Utility functions def flat_attr(attrs, level=0): '''Flattens the attribute map to a string ready to be put into a start tag. The map can have embedded maps and/or lists, such as a style attribute with multiple items.''' if attrs == None: return '' elif isinstance(attrs, str): return attrs elif isinstance(attrs, collections.Mapping) and level == 0: # dict return ' '.join( '%s="%s"' % (k, flat_attr(v, level+1)) for k, v in attrs.items() if v ) elif isinstance(attrs, collections.Mapping) and level > 0: # dict return ' '.join( '%s: %s;' % (k, flat_attr(v, level+1)) for k, v in attrs.items() if v ) elif isinstance(attrs, collections.Iterable): # list return ' '.join( flat_attr(v, level+1) for v in attrs if v ) else: return str(attrs) ######################################################## ### Helper methods for running celery tasks JSON_SERIALIZABLE = ( dict, list, tuple, str, bytes, int, float, bool, type(None) ) BODY_KEY = 'island_body_cached' def get_fake_request(meta): '''Retrieves a fake request using the given request.META. This allows celery tasks to have a "request" to use in code.''' # if the body was cached in the meta, put it back as the wsgi.input if BODY_KEY in meta: meta['wsgi.input'] = FakePayload(meta[BODY_KEY]) # create a basic request using the Django testing framework request = RequestFactory().request(**meta) # run middleware on it handler = BaseHandler() handler.load_middleware() for middleware_method in handler._request_middleware: response = middleware_method(request) if response: raise Exception("Middleware cannot return a response with a FakeRequest.") # return the request return request def prepare_fake_meta(request, include_body=False): '''Removes any values in the dictionary that can't be serialized. This is done in preparation for sending the request.META to a celery task.''' if request == None: return {} meta = dict([ (k,v) for k,v in request.META.items() if isinstance(v, JSON_SERIALIZABLE) ]) # save the body so we can make it the input when getting the fake request if include_body and request.body: meta[BODY_KEY] = request.body return meta
lanesawyer/island
lib/__init__.py
Python
apache-2.0
2,512
// Copyright 2020, OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package protocol // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" import ( "sort" "time" "go.opentelemetry.io/collector/model/pdata" "gonum.org/v1/gonum/stat" ) var ( statsDDefaultPercentiles = []float64{0, 10, 50, 90, 95, 100} ) func buildCounterMetric(parsedMetric statsDMetric, isMonotonicCounter bool, timeNow, lastIntervalTime time.Time) pdata.InstrumentationLibraryMetrics { ilm := pdata.NewInstrumentationLibraryMetrics() nm := ilm.Metrics().AppendEmpty() nm.SetName(parsedMetric.description.name) if parsedMetric.unit != "" { nm.SetUnit(parsedMetric.unit) } nm.SetDataType(pdata.MetricDataTypeSum) nm.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) nm.Sum().SetIsMonotonic(isMonotonicCounter) dp := nm.Sum().DataPoints().AppendEmpty() dp.SetIntVal(parsedMetric.counterValue()) dp.SetStartTimestamp(pdata.NewTimestampFromTime(lastIntervalTime)) dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) for i := parsedMetric.description.attrs.Iter(); i.Next(); { dp.Attributes().InsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) } return ilm } func buildGaugeMetric(parsedMetric statsDMetric, timeNow time.Time) pdata.InstrumentationLibraryMetrics { ilm := pdata.NewInstrumentationLibraryMetrics() nm := ilm.Metrics().AppendEmpty() nm.SetName(parsedMetric.description.name) if parsedMetric.unit != "" { nm.SetUnit(parsedMetric.unit) } nm.SetDataType(pdata.MetricDataTypeGauge) dp := nm.Gauge().DataPoints().AppendEmpty() dp.SetDoubleVal(parsedMetric.gaugeValue()) dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) for i := parsedMetric.description.attrs.Iter(); i.Next(); { dp.Attributes().InsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) } return ilm } func buildSummaryMetric(desc statsDMetricDescription, summary summaryMetric, startTime, timeNow time.Time, percentiles []float64, ilm pdata.InstrumentationLibraryMetrics) { nm := ilm.Metrics().AppendEmpty() nm.SetName(desc.name) nm.SetDataType(pdata.MetricDataTypeSummary) dp := nm.Summary().DataPoints().AppendEmpty() count := float64(0) sum := float64(0) for i := range summary.points { c := summary.weights[i] count += c sum += summary.points[i] * c } // Note: count is rounded here, see note in counterValue(). dp.SetCount(uint64(count)) dp.SetSum(sum) dp.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) for i := desc.attrs.Iter(); i.Next(); { dp.Attributes().InsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) } sort.Sort(dualSorter{summary.points, summary.weights}) for _, pct := range percentiles { eachQuantile := dp.QuantileValues().AppendEmpty() eachQuantile.SetQuantile(pct / 100) eachQuantile.SetValue(stat.Quantile(pct/100, stat.Empirical, summary.points, summary.weights)) } } func (s statsDMetric) counterValue() int64 { x := s.asFloat // Note statds counters are always represented as integers. // There is no statsd specification that says what should or // shouldn't be done here. Rounding may occur for sample // rates that are not integer reciprocals. Recommendation: // use integer reciprocal sampling rates. if 0 < s.sampleRate && s.sampleRate < 1 { x = x / s.sampleRate } return int64(x) } func (s statsDMetric) gaugeValue() float64 { // sampleRate does not have effect for gauge points. return s.asFloat } func (s statsDMetric) summaryValue() summaryRaw { count := 1.0 if 0 < s.sampleRate && s.sampleRate < 1 { count /= s.sampleRate } return summaryRaw{ value: s.asFloat, count: count, } } type dualSorter struct { values, weights []float64 } func (d dualSorter) Len() int { return len(d.values) } func (d dualSorter) Swap(i, j int) { d.values[i], d.values[j] = d.values[j], d.values[i] d.weights[i], d.weights[j] = d.weights[j], d.weights[i] } func (d dualSorter) Less(i, j int) bool { return d.values[i] < d.values[j] }
open-telemetry/opentelemetry-collector-contrib
receiver/statsdreceiver/protocol/metric_translator.go
GO
apache-2.0
4,638
// web/js/app/views/disabled-toggle-row.js /* global Ember, App */ 'use strict'; App.DisabledToggleRowView = Ember.View.extend({ templateName: 'disabledtogglerow', tagName: 'tr', classNames: ['no-hover'] });
gophronesis/penny-sae
old/web/js/app/views/disabled-toggle-row.js
JavaScript
apache-2.0
216
/* ----------------------------------------------------------------------------- * Rule_cmdMonitorEnter.java * ----------------------------------------------------------------------------- * * Producer : com.parse2.aparse.Parser 2.3 * Produced : Fri Apr 12 10:40:21 MUT 2013 * * ----------------------------------------------------------------------------- */ package com.litecoding.smali2java.parser.cmd; import java.util.ArrayList; import com.litecoding.smali2java.builder.Visitor; import com.litecoding.smali2java.parser.ParserContext; import com.litecoding.smali2java.parser.Rule; import com.litecoding.smali2java.parser.Terminal_StringValue; import com.litecoding.smali2java.parser.smali.Rule_codeRegister; import com.litecoding.smali2java.parser.smali.Rule_commentSequence; import com.litecoding.smali2java.parser.smali.Rule_optPadding; import com.litecoding.smali2java.parser.smali.Rule_padding; import com.litecoding.smali2java.parser.text.Rule_CRLF; final public class Rule_cmdMonitorEnter extends Rule { private Rule_cmdMonitorEnter(String spelling, ArrayList<Rule> rules) { super(spelling, rules); } public Object accept(Visitor visitor) { return visitor.visit(this); } public static Rule_cmdMonitorEnter parse(ParserContext context) { context.push("cmdMonitorEnter"); boolean parsed = true; int s0 = context.index; ArrayList<Rule> e0 = new ArrayList<Rule>(); Rule rule; parsed = false; if (!parsed) { { ArrayList<Rule> e1 = new ArrayList<Rule>(); int s1 = context.index; parsed = true; if (parsed) { boolean f1 = true; int c1 = 0; for (int i1 = 0; i1 < 1 && f1; i1++) { rule = Rule_optPadding.parse(context); if ((f1 = rule != null)) { e1.add(rule); c1++; } } parsed = c1 == 1; } if (parsed) { boolean f1 = true; int c1 = 0; for (int i1 = 0; i1 < 1 && f1; i1++) { rule = Terminal_StringValue.parse(context, "monitor-enter"); if ((f1 = rule != null)) { e1.add(rule); c1++; } } parsed = c1 == 1; } if (parsed) { boolean f1 = true; int c1 = 0; for (int i1 = 0; i1 < 1 && f1; i1++) { rule = Rule_padding.parse(context); if ((f1 = rule != null)) { e1.add(rule); c1++; } } parsed = c1 == 1; } if (parsed) { boolean f1 = true; int c1 = 0; for (int i1 = 0; i1 < 1 && f1; i1++) { rule = Rule_codeRegister.parse(context); if ((f1 = rule != null)) { e1.add(rule); c1++; } } parsed = c1 == 1; } if (parsed) { boolean f1 = true; int c1 = 0; for (int i1 = 0; i1 < 1 && f1; i1++) { rule = Rule_optPadding.parse(context); if ((f1 = rule != null)) { e1.add(rule); c1++; } } parsed = c1 == 1; } if (parsed) { boolean f1 = true; @SuppressWarnings("unused") int c1 = 0; for (int i1 = 0; i1 < 1 && f1; i1++) { int g1 = context.index; parsed = false; if (!parsed) { { ArrayList<Rule> e2 = new ArrayList<Rule>(); int s2 = context.index; parsed = true; if (parsed) { boolean f2 = true; int c2 = 0; for (int i2 = 0; i2 < 1 && f2; i2++) { rule = Rule_padding.parse(context); if ((f2 = rule != null)) { e2.add(rule); c2++; } } parsed = c2 == 1; } if (parsed) { boolean f2 = true; int c2 = 0; for (int i2 = 0; i2 < 1 && f2; i2++) { rule = Rule_commentSequence.parse(context); if ((f2 = rule != null)) { e2.add(rule); c2++; } } parsed = c2 == 1; } if (parsed) e1.addAll(e2); else context.index = s2; } } f1 = context.index > g1; if (parsed) c1++; } parsed = true; } if (parsed) { boolean f1 = true; int c1 = 0; for (int i1 = 0; i1 < 1 && f1; i1++) { rule = Rule_CRLF.parse(context); if ((f1 = rule != null)) { e1.add(rule); c1++; } } parsed = c1 == 1; } if (parsed) e0.addAll(e1); else context.index = s1; } } rule = null; if (parsed) rule = new Rule_cmdMonitorEnter(context.text.substring(s0, context.index), e0); else context.index = s0; context.pop("cmdMonitorEnter", parsed); return (Rule_cmdMonitorEnter)rule; } } /* ----------------------------------------------------------------------------- * eof * ----------------------------------------------------------------------------- */
Crysty-Yui/smali2java
src/main/java/com/litecoding/smali2java/parser/cmd/Rule_cmdMonitorEnter.java
Java
apache-2.0
5,903
package integration import ( "testing" kapi "k8s.io/kubernetes/pkg/apis/core" testutil "github.com/openshift/origin/test/util" testserver "github.com/openshift/origin/test/util/server" ) func TestAlwaysPullImagesOn(t *testing.T) { masterConfig, err := testserver.DefaultMasterOptions() if err != nil { t.Fatalf("error creating config: %v", err) } defer testserver.CleanupMasterEtcd(t, masterConfig) masterConfig.KubernetesMasterConfig.APIServerArguments["enable-admission-plugins"] = append( masterConfig.KubernetesMasterConfig.APIServerArguments["enable-admission-plugins"], "AlwaysPullImages") kubeConfigFile, err := testserver.StartConfiguredMaster(masterConfig) if err != nil { t.Fatalf("error starting server: %v", err) } kubeClientset, err := testutil.GetClusterAdminKubeInternalClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() _, err = kubeClientset.Core().Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil { t.Fatalf("error getting client config: %v", err) } testPod := &kapi.Pod{} testPod.GenerateName = "test" testPod.Spec.Containers = []kapi.Container{ { Name: "container", Image: "openshift/origin-pod:notlatest", ImagePullPolicy: kapi.PullNever, }, } actualPod, err := kubeClientset.Core().Pods(testutil.Namespace()).Create(testPod) if err != nil { t.Fatalf("unexpected error: %v", err) } if actualPod.Spec.Containers[0].ImagePullPolicy != kapi.PullAlways { t.Errorf("expected %v, got %v", kapi.PullAlways, actualPod.Spec.Containers[0].ImagePullPolicy) } } func TestAlwaysPullImagesOff(t *testing.T) { masterConfig, kubeConfigFile, err := testserver.StartTestMaster() if err != nil { t.Fatalf("error starting server: %v", err) } defer testserver.CleanupMasterEtcd(t, masterConfig) kubeClientset, err := testutil.GetClusterAdminKubeInternalClient(kubeConfigFile) if err != nil { t.Fatalf("error getting client: %v", err) } ns := &kapi.Namespace{} ns.Name = testutil.Namespace() _, err = kubeClientset.Core().Namespaces().Create(ns) if err != nil { t.Fatalf("error creating namespace: %v", err) } if err := testserver.WaitForPodCreationServiceAccounts(kubeClientset, testutil.Namespace()); err != nil { t.Fatalf("error getting client config: %v", err) } testPod := &kapi.Pod{} testPod.GenerateName = "test" testPod.Spec.Containers = []kapi.Container{ { Name: "container", Image: "openshift/origin-pod:notlatest", ImagePullPolicy: kapi.PullNever, }, } actualPod, err := kubeClientset.Core().Pods(testutil.Namespace()).Create(testPod) if err != nil { t.Fatalf("unexpected error: %v", err) } if actualPod.Spec.Containers[0].ImagePullPolicy != kapi.PullNever { t.Errorf("expected %v, got %v", kapi.PullNever, actualPod.Spec.Containers[0].ImagePullPolicy) } }
linzhaoming/origin
test/integration/admissionconfig_test.go
GO
apache-2.0
3,061
using System; using System.Collections.Generic; using System.Linq; using System.Web; using System.Web.UI; using System.Web.UI.WebControls; using IbaMonitoring; using safnet.iba.Business.AppFacades; using System.Data; using safnet.iba.Business.Entities; using safnet.iba.Static; using safnet.iba.Business.DataTransferObjects; using safnet.iba.Web; public partial class SiteResults : IbaPage { private Guid siteId; protected void Page_Load(object sender, EventArgs e) { IbaMasterPage.ExceptionHandler((IbaMasterPage)Master, () => { addHandlers(); GetSiteId(); if (!Page.IsPostBack) { this.State.SiteResultsTable = ResultsFacade.SpeciesSitePivotTable(siteId, int.Parse(this.State.SelectedYear)); bindSite(siteId); //bindMap(siteId); } bindSiteTable(); }); } private void GetSiteId() { string siteIdString = @Request.QueryString["siteId"]; if (!Guid.TryParse(siteIdString, out siteId) || !GlobalMap.SiteList.Exists(x => x.Id.Equals(siteId))) { siteId = Guid.Empty; } } private void addHandlers() { this.AvailableYears.ErrorOccurred += new EventHandler<ErrorEventArgs>(AvailableYears_ErrorOccurred); this.AvailableYears.YearChanged += new EventHandler(AvailableYears_YearChanged); this.SupplementalDataSource.Selecting += new ObjectDataSourceSelectingEventHandler(SupplementalDataSource_Selecting); this.SupplementalDataSource.Selected += new ObjectDataSourceStatusEventHandler(SupplementalDataSource_Selected); this.SpeciesWeekHistogramGrid.RowDataBound += new GridViewRowEventHandler(SpeciesWeekHistogramGrid_RowDataBound); } private void AvailableYears_YearChanged(object sender, EventArgs e) { IbaMasterPage.ExceptionHandler((IbaMasterPage)Master, () => { //SupplementalDataSource.Select(); SupplementalRepeater.DataBind(); this.State.SiteResultsTable = ResultsFacade.SpeciesSitePivotTable(siteId, int.Parse(this.State.SelectedYear)); bindSiteTable(); }); } private void AvailableYears_ErrorOccurred(object sender, ErrorEventArgs e) { IbaMasterPage.ExceptionHandler((IbaMasterPage)Master, () => { throw e.Exception; }); } private void SpeciesWeekHistogramGrid_RowDataBound(object sender, GridViewRowEventArgs e) { if (e.Row.RowType == DataControlRowType.DataRow) { string cssClass = string.Empty; foreach (TableCell cell in e.Row.Cells) { if (e.Row.Cells.GetCellIndex(cell) == e.Row.Cells.Count - 1) { break; // this is the Grand Total cell and it should be skipped } base.AssignCellColor(cssClass, cell); } } } private void SupplementalDataSource_Selected(object sender, ObjectDataSourceStatusEventArgs e) { IbaMasterPage.ExceptionHandler((IbaMasterPage)Master, () => { if (e.ReturnValue == null) { this.NoSupplementals.Visible = true; } }); } private void SupplementalDataSource_Selecting(object sender, ObjectDataSourceSelectingEventArgs e) { IbaMasterPage.ExceptionHandler((IbaMasterPage)Master, () => { e.InputParameters.Add("siteId", siteId.ToString()); e.InputParameters.Add("year", int.Parse(this.State.SelectedYear)); }); } protected void bindSite(Guid siteId) { Site sp = GlobalMap.SiteList.Single(x => x.Id.Equals(siteId)); this.SiteName.Text = sp.Name; } protected void bindSiteTable() { SpeciesWeekHistogramGrid.Columns.Clear(); foreach (DataColumn column in this.State.SiteResultsTable.Columns) { if (column.ColumnName != "Species Name" && column.ColumnName != "Grand Total") { BoundField newField = new BoundField() { AccessibleHeaderText = column.ColumnName, DataField = column.ColumnName, SortExpression = column.ColumnName, ShowHeader = true, HeaderText = column.ColumnName }; SpeciesWeekHistogramGrid.Columns.Add(newField); } } // Species Name needs to be the first column, and Grand Total as the last column SpeciesWeekHistogramGrid.Columns.Insert(0, new BoundField() { AccessibleHeaderText = "Species Name", DataField = "Species Name", SortExpression = "Species Name", ShowHeader = true, HeaderText = "Species Name" }); SpeciesWeekHistogramGrid.Columns.Add(new BoundField() { AccessibleHeaderText = "Grand Total", DataField = "Grand Total", SortExpression = "Grand Total", ShowHeader = true, HeaderText = "Grand Total" }); SpeciesWeekHistogramGrid.DataSource = this.State.SiteResultsTable; SpeciesWeekHistogramGrid.DataBind(); } private SortedDictionary<string, int> _chartValues = new SortedDictionary<string, int>(); }
stephenfuqua/IbaMonitoring
IbaMonitoring/results/Site.aspx.cs
C#
apache-2.0
5,537
/*- * Copyright (C) 2013-2014 The JBromo Authors. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package org.jbromo.dao.common; import java.io.Serializable; /** * Define a DAO interface. * @author qjafcunuas */ public interface IDao extends Serializable { }
qjafcunuas/jbromo
jbromo-dao/jbromo-dao-lib/src/main/java/org/jbromo/dao/common/IDao.java
Java
apache-2.0
1,297
// defaultable require() wrapper tests // // Copyright 2011 Jason Smith, Jarrett Cruger and contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. var test = require('tap').test , defaultable = require('../defaultable') , D = defaultable ; var m0dule = module; test('require function', function(t) { var _require = require; defaultable({}, function(mod, exp, DEFS, require) { t.ok(require, 'Defaultable provides a require paramenter') t.type(require, 'function', 'provided require is a function') t.equal(require.length, _require.length, 'Proved require() has the correct arity') t.equal(require.name, _require.name, 'Provided require() is named correctly') t.isNot(require, _require, 'Provided require() is not the normal require()') }) t.end(); }) test('Exporting required modules', function(t) { t.doesNotThrow(go, 'No problems with a defaultable re-exporting another defaultable'); function go() { var mod; mod = require('./mod/defaultable_reexporter'); t.type(mod.defaults, 'function', 'Re-exported defaults exists') t.ok(mod.defaults._defaultable, 'Re-exporteed .defaults are mine') t.equal(mod.defaultable_example, 'Defaultable dependency example', 'Re-exported defaults works') mod = mod.defaults({'value': 'New value'}) t.type(mod.defaults, 'function', 'Re-exported re-defaulted defaults exists') t.ok(mod.defaults._defaultable, 'Re-exporteed re-defaulted .defaults are mine') t.equal(mod.defaultable_example, 'New value', 'Re-exported defaults override works') } t.end(); }) test('requiring defaultable modules passes defaults to them', function(t) { function i_require_stuff(_mod, exps, _DEF, require) { exps.is = require('./mod/is_defaultable'); exps.is_not = require('./mod/is_not_defaultable'); exps.legacy = require('./mod/legacy_defaults'); exps.fresh = require('./mod/fresh_defaultable'); } var mod; var defs = { 'should': 'first' }; t.doesNotThrow(function() { mod = D(m0dule, defs, i_require_stuff) }, 'Defaultable and non-defaultable modules are usable') check_mod('first'); mod = mod.defaults({should:'second'}); check_mod('second'); mod = mod.defaults({should:'third'}); check_mod('third'); t.end(); function check_mod(should_val) { t.type(mod.legacy.defaults, 'function', 'Legacy modules can export .defaults()') t.notOk(mod.legacy.defaults._defaultable, 'Legacy modules .defaults are not mine') t.throws(mod.legacy.defaults, 'Legacy .defaults() function runs like it always has') t.type(mod.is_not.get, 'function', 'Normal modules still export normally') t.equal(mod.is_not.get(), 'normal', 'Normal modules export normal stuff') t.notOk(mod.is_not.defaults, 'Normal modules do not have a defaults() function') t.equal(Object.keys(mod.is_not).length, 2, 'Normal modules export the same exact stuff') t.notOk(mod.is_not.req._defaultable, 'Normal modules require is not special') t.type(mod.is.get, 'function', 'Defaultable modules export normally') t.equal(mod.is.get('original'), 'value', 'Defaultable module still has its defaults') t.equal(mod.is.get('should'), should_val, 'Defaultable module inherits defaults with require() ' + should_val) t.type(mod.is.defaults, 'function', 'Defaultable modules still have defaults() functions') t.ok(mod.is.defaults._defaultable, 'Defaultable modules default() functions are recognizable') t.equal(Object.keys(mod.is).length, 3+1, 'Defaultable modules export the same stuff, plus defaults()') t.ok(mod.is.req._defaultable, 'Defaultable modules get the special require') t.equal(mod.is.dep(), 'Example dependency', 'Defaultable module can require stuff from node_modules/') t.type(mod.fresh.get, 'function', 'Fresh defaultable module still exports normally') t.type(mod.fresh.defaults, 'function', 'Fresh defaultable module still has defaults() function') t.ok(mod.fresh.defaults._defaultable, 'Fresh defautlable module defaults() is recognizable') t.equal(mod.fresh.get('should'), 'always fresh', 'Fresh defaultable module defauts not changed by require') var fresh2 = mod.fresh.defaults({'should':should_val}); t.equal(fresh2.get('should'), should_val, 'Fresh defaultable module can set defaults normally') } })
nodejitsu/defaultable
t/require.js
JavaScript
apache-2.0
4,847
<html> <body bgcolor="white" text="black" link="#666699" alink=#000000 vlink="#333333"> <title>Condor Warnings Graph Generator</title> <center><h2>Condor Warnings Graph Generator</h2> <?php # TODO - clean these up and provide a config $base_dir = "/home/cndrauto/warnings"; $branch_data_file = "$base_dir/data/branches"; $platforms_data_file = "$base_dir/data/platforms"; $platforms[] = ""; $fr = fopen ($platforms_data_file, "r"); if ($fr){ while ($results = fgets($fr, 4096)){ $platforms = split(" ", $results); } } fclose($fr); $branches[] = ""; $fr = fopen ($branch_data_file, "r"); if ($fr){ while ($results = fgets($fr, 4096)){ $branches = split(" ", $results); } } fclose($fr); ?> <br><br> <form method="get" action="condor-warnings-graph.php"> <table width=600 border=0 bgcolor=white> <tr bgcolor=white width=600> <!-- Branch Area --> <td width=200 align=center> <SELECT name="branch"> <?php foreach($branches as $branch) { echo "<OPTION VALUE=$branch> $branch </OPTION>"; } ?> </SELECT> Branch </td> <!-- Platform Area --> <td width=200 align=center> <SELECT name="platform"> <?php foreach($platforms as $platform) { echo "<OPTION VALUE=$platform> $platform </OPTION>"; } echo "<OPTION VALUE=all SELECTED > all </OPTION>"; echo "<OPTION VALUE=total> total </OPTION>"; echo "<OPTION VALUE=all,total> all,total </OPTION>"; ?> </SELECT> Platform </td> <!-- Warning Type Area --> <td width=200 align=center> <SELECT name="warning_type"> <?php echo "<OPTION VALUE=total> total </OPTION>"; echo "<OPTION VALUE=unique> unique </OPTION>"; ?> </SELECT> Warning Type </td> </tr> </table> <br> <input type="submit" name="search" value="Submit Search"></input> </form> <hr> </body> </html>
clalancette/condor-dcloud
nmi_tools/www/results/condor-warnings.php
PHP
apache-2.0
1,876
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handles all processes relating to instances (guest vms). The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to creating instances. It is responsible for building a disk image, launching it via the underlying virtualization driver, responding to calls to check its state, attaching persistent storage, and terminating it. """ import base64 import contextlib import functools import socket import sys import time import traceback import uuid from cinderclient import exceptions as cinder_exception import eventlet.event from eventlet import greenthread import eventlet.semaphore import eventlet.timeout from keystoneclient import exceptions as keystone_exception from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import strutils from oslo_utils import timeutils import six from nova import block_device from nova.cells import rpcapi as cells_rpcapi from nova.cloudpipe import pipelib from nova import compute from nova.compute import build_results from nova.compute import power_state from nova.compute import resource_tracker from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova import conductor from nova import consoleauth import nova.context from nova import exception from nova import hooks from nova.i18n import _ from nova.i18n import _LE from nova.i18n import _LI from nova.i18n import _LW from nova import image from nova.image import glance from nova import manager from nova import network from nova.network import model as network_model from nova.network.security_group import openstack_driver from nova import objects from nova.objects import base as obj_base from nova.openstack.common import log as logging from nova.openstack.common import periodic_task from nova import paths from nova import rpc from nova import safe_utils from nova.scheduler import rpcapi as scheduler_rpcapi from nova import utils from nova.virt import block_device as driver_block_device from nova.virt import driver from nova.virt import event as virtevent from nova.virt import storage_users from nova.virt import virtapi from nova import volume from nova.volume import encryptors compute_opts = [ cfg.StrOpt('console_host', default=socket.gethostname(), help='Console proxy host to use to connect ' 'to instances on this host.'), cfg.StrOpt('default_access_ip_network_name', help='Name of network to use to set access IPs for instances'), cfg.BoolOpt('defer_iptables_apply', default=False, help='Whether to batch up the application of IPTables rules' ' during a host restart and apply all at the end of the' ' init phase'), cfg.StrOpt('instances_path', default=paths.state_path_def('instances'), help='Where instances are stored on disk'), cfg.BoolOpt('instance_usage_audit', default=False, help="Generate periodic compute.instance.exists" " notifications"), cfg.IntOpt('live_migration_retry_count', default=30, help="Number of 1 second retries needed in live_migration"), cfg.BoolOpt('resume_guests_state_on_host_boot', default=False, help='Whether to start guests that were running before the ' 'host rebooted'), cfg.IntOpt('network_allocate_retries', default=0, help="Number of times to retry network allocation on failures"), cfg.IntOpt('max_concurrent_builds', default=10, help='Maximum number of instance builds to run concurrently'), cfg.IntOpt('block_device_allocate_retries', default=60, help='Number of times to retry block device' ' allocation on failures') ] interval_opts = [ cfg.IntOpt('bandwidth_poll_interval', default=600, help='Interval to pull network bandwidth usage info. Not ' 'supported on all hypervisors. Set to -1 to disable. ' 'Setting this to 0 will run at the default rate.'), cfg.IntOpt('sync_power_state_interval', default=600, help='Interval to sync power states between the database and ' 'the hypervisor. Set to -1 to disable. ' 'Setting this to 0 will run at the default rate.'), cfg.IntOpt("heal_instance_info_cache_interval", default=60, help="Number of seconds between instance info_cache self " "healing updates"), cfg.IntOpt('reclaim_instance_interval', default=0, help='Interval in seconds for reclaiming deleted instances'), cfg.IntOpt('volume_usage_poll_interval', default=0, help='Interval in seconds for gathering volume usages'), cfg.IntOpt('shelved_poll_interval', default=3600, help='Interval in seconds for polling shelved instances to ' 'offload. Set to -1 to disable.' 'Setting this to 0 will run at the default rate.'), cfg.IntOpt('shelved_offload_time', default=0, help='Time in seconds before a shelved instance is eligible ' 'for removing from a host. -1 never offload, 0 offload ' 'when shelved'), cfg.IntOpt('instance_delete_interval', default=300, help='Interval in seconds for retrying failed instance file ' 'deletes. Set to -1 to disable. ' 'Setting this to 0 will run at the default rate.'), cfg.IntOpt('block_device_allocate_retries_interval', default=3, help='Waiting time interval (seconds) between block' ' device allocation retries on failures') ] timeout_opts = [ cfg.IntOpt("reboot_timeout", default=0, help="Automatically hard reboot an instance if it has been " "stuck in a rebooting state longer than N seconds. " "Set to 0 to disable."), cfg.IntOpt("instance_build_timeout", default=0, help="Amount of time in seconds an instance can be in BUILD " "before going into ERROR status. " "Set to 0 to disable."), cfg.IntOpt("rescue_timeout", default=0, help="Automatically unrescue an instance after N seconds. " "Set to 0 to disable."), cfg.IntOpt("resize_confirm_window", default=0, help="Automatically confirm resizes after N seconds. " "Set to 0 to disable."), cfg.IntOpt("shutdown_timeout", default=60, help="Total amount of time to wait in seconds for an instance " "to perform a clean shutdown."), ] running_deleted_opts = [ cfg.StrOpt("running_deleted_instance_action", default="reap", help="Action to take if a running deleted instance is detected." " Valid options are 'noop', 'log', 'shutdown', or 'reap'. " "Set to 'noop' to take no action."), cfg.IntOpt("running_deleted_instance_poll_interval", default=1800, help="Number of seconds to wait between runs of the cleanup " "task."), cfg.IntOpt("running_deleted_instance_timeout", default=0, help="Number of seconds after being deleted when a running " "instance should be considered eligible for cleanup."), ] instance_cleaning_opts = [ cfg.IntOpt('maximum_instance_delete_attempts', default=5, help='The number of times to attempt to reap an instance\'s ' 'files.'), ] CONF = cfg.CONF CONF.register_opts(compute_opts) CONF.register_opts(interval_opts) CONF.register_opts(timeout_opts) CONF.register_opts(running_deleted_opts) CONF.register_opts(instance_cleaning_opts) CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api') CONF.import_opt('console_topic', 'nova.console.rpcapi') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('vnc_enabled', 'nova.vnc') CONF.import_opt('enabled', 'nova.spice', group='spice') CONF.import_opt('enable', 'nova.cells.opts', group='cells') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('image_cache_manager_interval', 'nova.virt.imagecache') CONF.import_opt('enabled', 'nova.rdp', group='rdp') CONF.import_opt('html5_proxy_base_url', 'nova.rdp', group='rdp') CONF.import_opt('enabled', 'nova.console.serial', group='serial_console') CONF.import_opt('base_url', 'nova.console.serial', group='serial_console') LOG = logging.getLogger(__name__) get_notifier = functools.partial(rpc.get_notifier, service='compute') wrap_exception = functools.partial(exception.wrap_exception, get_notifier=get_notifier) @utils.expects_func_args('migration') def errors_out_migration(function): """Decorator to error out migration on failure.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except Exception: with excutils.save_and_reraise_exception(): migration = kwargs['migration'] status = migration.status if status not in ['migrating', 'post-migrating']: return migration.status = 'error' try: with migration.obj_as_admin(): migration.save() except Exception: LOG.debug('Error setting migration status ' 'for instance %s.', migration.instance_uuid, exc_info=True) return decorated_function @utils.expects_func_args('instance') def reverts_task_state(function): """Decorator to revert task_state on failure.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except exception.UnexpectedTaskStateError as e: # Note(maoy): unexpected task state means the current # task is preempted. Do not clear task state in this # case. with excutils.save_and_reraise_exception(): LOG.info(_LI("Task possibly preempted: %s"), e.format_message()) except Exception: with excutils.save_and_reraise_exception(): try: self._instance_update(context, kwargs['instance']['uuid'], task_state=None) except Exception: pass return decorated_function @utils.expects_func_args('instance') def wrap_instance_fault(function): """Wraps a method to catch exceptions related to instances. This decorator wraps a method to catch any exceptions having to do with an instance that may get thrown. It then logs an instance fault in the db. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): try: return function(self, context, *args, **kwargs) except exception.InstanceNotFound: raise except Exception as e: # NOTE(gtt): If argument 'instance' is in args rather than kwargs, # we will get a KeyError exception which will cover up the real # exception. So, we update kwargs with the values from args first. # then, we can get 'instance' from kwargs easily. kwargs.update(dict(zip(function.func_code.co_varnames[2:], args))) with excutils.save_and_reraise_exception(): compute_utils.add_instance_fault_from_exc(context, kwargs['instance'], e, sys.exc_info()) return decorated_function @utils.expects_func_args('instance') def wrap_instance_event(function): """Wraps a method to log the event taken on the instance, and result. This decorator wraps a method to log the start and result of an event, as part of an action taken on an instance. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): wrapped_func = utils.get_wrapped_function(function) keyed_args = safe_utils.getcallargs(wrapped_func, context, *args, **kwargs) instance_uuid = keyed_args['instance']['uuid'] event_name = 'compute_{0}'.format(function.func_name) with compute_utils.EventReporter(context, event_name, instance_uuid): return function(self, context, *args, **kwargs) return decorated_function @utils.expects_func_args('image_id', 'instance') def delete_image_on_error(function): """Used for snapshot related method to ensure the image created in compute.api is deleted when an error occurs. """ @functools.wraps(function) def decorated_function(self, context, image_id, instance, *args, **kwargs): try: return function(self, context, image_id, instance, *args, **kwargs) except Exception: with excutils.save_and_reraise_exception(): LOG.debug("Cleaning up image %s", image_id, exc_info=True, instance=instance) try: self.image_api.delete(context, image_id) except Exception: LOG.exception(_LE("Error while trying to clean up " "image %s"), image_id, instance=instance) return decorated_function # TODO(danms): Remove me after Icehouse # NOTE(mikal): if the method being decorated has more than one decorator, then # put this one first. Otherwise the various exception handling decorators do # not function correctly. def object_compat(function): """Wraps a method that expects a new-world instance This provides compatibility for callers passing old-style dict instances. """ @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): def _load_instance(instance_or_dict): if isinstance(instance_or_dict, dict): instance = objects.Instance._from_db_object( context, objects.Instance(), instance_or_dict, expected_attrs=metas) instance._context = context return instance return instance_or_dict metas = ['metadata', 'system_metadata'] try: kwargs['instance'] = _load_instance(kwargs['instance']) except KeyError: args = (_load_instance(args[0]),) + args[1:] migration = kwargs.get('migration') if isinstance(migration, dict): migration = objects.Migration._from_db_object( context.elevated(), objects.Migration(), migration) kwargs['migration'] = migration return function(self, context, *args, **kwargs) return decorated_function # TODO(danms): Remove me after Icehouse def aggregate_object_compat(function): """Wraps a method that expects a new-world aggregate.""" @functools.wraps(function) def decorated_function(self, context, *args, **kwargs): aggregate = kwargs.get('aggregate') if isinstance(aggregate, dict): aggregate = objects.Aggregate._from_db_object( context.elevated(), objects.Aggregate(), aggregate) kwargs['aggregate'] = aggregate return function(self, context, *args, **kwargs) return decorated_function class InstanceEvents(object): def __init__(self): self._events = {} @staticmethod def _lock_name(instance): return '%s-%s' % (instance.uuid, 'events') def prepare_for_instance_event(self, instance, event_name): """Prepare to receive an event for an instance. This will register an event for the given instance that we will wait on later. This should be called before initiating whatever action will trigger the event. The resulting eventlet.event.Event object should be wait()'d on to ensure completion. :param instance: the instance for which the event will be generated :param event_name: the name of the event we're expecting :returns: an event object that should be wait()'d on """ @utils.synchronized(self._lock_name(instance)) def _create_or_get_event(): if instance.uuid not in self._events: self._events.setdefault(instance.uuid, {}) return self._events[instance.uuid].setdefault( event_name, eventlet.event.Event()) LOG.debug('Preparing to wait for external event %(event)s', {'event': event_name}, instance=instance) return _create_or_get_event() def pop_instance_event(self, instance, event): """Remove a pending event from the wait list. This will remove a pending event from the wait list so that it can be used to signal the waiters to wake up. :param instance: the instance for which the event was generated :param event: the nova.objects.external_event.InstanceExternalEvent that describes the event :returns: the eventlet.event.Event object on which the waiters are blocked """ no_events_sentinel = object() no_matching_event_sentinel = object() @utils.synchronized(self._lock_name(instance)) def _pop_event(): events = self._events.get(instance.uuid) if not events: return no_events_sentinel _event = events.pop(event.key, None) if not events: del self._events[instance.uuid] if _event is None: return no_matching_event_sentinel return _event result = _pop_event() if result == no_events_sentinel: LOG.debug('No waiting events found dispatching %(event)s', {'event': event.key}, instance=instance) return None elif result == no_matching_event_sentinel: LOG.debug('No event matching %(event)s in %(events)s', {'event': event.key, 'events': self._events.get(instance.uuid, {}).keys()}, instance=instance) return None else: return result def clear_events_for_instance(self, instance): """Remove all pending events for an instance. This will remove all events currently pending for an instance and return them (indexed by event name). :param instance: the instance for which events should be purged :returns: a dictionary of {event_name: eventlet.event.Event} """ @utils.synchronized(self._lock_name(instance)) def _clear_events(): # NOTE(danms): Use getitem syntax for the instance until # all the callers are using objects return self._events.pop(instance['uuid'], {}) return _clear_events() class ComputeVirtAPI(virtapi.VirtAPI): def __init__(self, compute): super(ComputeVirtAPI, self).__init__() self._compute = compute def provider_fw_rule_get_all(self, context): return self._compute.conductor_api.provider_fw_rule_get_all(context) def _default_error_callback(self, event_name, instance): raise exception.NovaException(_('Instance event failed')) @contextlib.contextmanager def wait_for_instance_event(self, instance, event_names, deadline=300, error_callback=None): """Plan to wait for some events, run some code, then wait. This context manager will first create plans to wait for the provided event_names, yield, and then wait for all the scheduled events to complete. Note that this uses an eventlet.timeout.Timeout to bound the operation, so callers should be prepared to catch that failure and handle that situation appropriately. If the event is not received by the specified timeout deadline, eventlet.timeout.Timeout is raised. If the event is received but did not have a 'completed' status, a NovaException is raised. If an error_callback is provided, instead of raising an exception as detailed above for the failure case, the callback will be called with the event_name and instance, and can return True to continue waiting for the rest of the events, False to stop processing, or raise an exception which will bubble up to the waiter. :param instance: The instance for which an event is expected :param event_names: A list of event names. Each element can be a string event name or tuple of strings to indicate (name, tag). :param deadline: Maximum number of seconds we should wait for all of the specified events to arrive. :param error_callback: A function to be called if an event arrives """ if error_callback is None: error_callback = self._default_error_callback events = {} for event_name in event_names: if isinstance(event_name, tuple): name, tag = event_name event_name = objects.InstanceExternalEvent.make_key( name, tag) events[event_name] = ( self._compute.instance_events.prepare_for_instance_event( instance, event_name)) yield with eventlet.timeout.Timeout(deadline): for event_name, event in events.items(): actual_event = event.wait() if actual_event.status == 'completed': continue decision = error_callback(event_name, instance) if decision is False: break class ComputeManager(manager.Manager): """Manages the running instances from creation to destruction.""" target = messaging.Target(version='3.38') # How long to wait in seconds before re-issuing a shutdown # signal to a instance during power off. The overall # time to wait is set by CONF.shutdown_timeout. SHUTDOWN_RETRY_INTERVAL = 10 def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" self.virtapi = ComputeVirtAPI(self) self.network_api = network.API() self.volume_api = volume.API() self.image_api = image.API() self._last_host_check = 0 self._last_bw_usage_poll = 0 self._bw_usage_supported = True self._last_bw_usage_cell_update = 0 self.compute_api = compute.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.conductor_api = conductor.API() self.compute_task_api = conductor.ComputeTaskAPI() self.is_neutron_security_groups = ( openstack_driver.is_neutron_security_groups()) self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI() self.cells_rpcapi = cells_rpcapi.CellsAPI() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self._resource_tracker_dict = {} self.instance_events = InstanceEvents() self._sync_power_pool = eventlet.GreenPool() self._syncs_in_progress = {} if CONF.max_concurrent_builds != 0: self._build_semaphore = eventlet.semaphore.Semaphore( CONF.max_concurrent_builds) else: self._build_semaphore = compute_utils.UnlimitedSemaphore() super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) # NOTE(russellb) Load the driver last. It may call back into the # compute manager via the virtapi, so we want it to be fully # initialized before that happens. self.driver = driver.load_compute_driver(self.virtapi, compute_driver) self.use_legacy_block_device_info = \ self.driver.need_legacy_block_device_info def _get_resource_tracker(self, nodename): rt = self._resource_tracker_dict.get(nodename) if not rt: if not self.driver.node_is_available(nodename): raise exception.NovaException( _("%s is not a valid node managed by this " "compute host.") % nodename) rt = resource_tracker.ResourceTracker(self.host, self.driver, nodename) self._resource_tracker_dict[nodename] = rt return rt def _update_resource_tracker(self, context, instance): """Let the resource tracker know that an instance has changed state.""" if (instance['host'] == self.host and self.driver.node_is_available(instance['node'])): rt = self._get_resource_tracker(instance.get('node')) rt.update_usage(context, instance) def _instance_update(self, context, instance_uuid, **kwargs): """Update an instance in the database using kwargs as value.""" instance_ref = self.conductor_api.instance_update(context, instance_uuid, **kwargs) self._update_resource_tracker(context, instance_ref) return instance_ref def _set_instance_error_state(self, context, instance): instance_uuid = instance['uuid'] try: self._instance_update(context, instance_uuid, vm_state=vm_states.ERROR) except exception.InstanceNotFound: LOG.debug('Instance has been destroyed from under us while ' 'trying to set it to ERROR', instance_uuid=instance_uuid) def _set_instance_obj_error_state(self, context, instance): try: instance.vm_state = vm_states.ERROR instance.save() except exception.InstanceNotFound: LOG.debug('Instance has been destroyed from under us while ' 'trying to set it to ERROR', instance=instance) def _get_instances_on_driver(self, context, filters=None): """Return a list of instance records for the instances found on the hypervisor which satisfy the specified filters. If filters=None return a list of instance records for all the instances found on the hypervisor. """ if not filters: filters = {} try: driver_uuids = self.driver.list_instance_uuids() if len(driver_uuids) == 0: # Short circuit, don't waste a DB call return objects.InstanceList() filters['uuid'] = driver_uuids local_instances = objects.InstanceList.get_by_filters( context, filters, use_slave=True) return local_instances except NotImplementedError: pass # The driver doesn't support uuids listing, so we'll have # to brute force. driver_instances = self.driver.list_instances() instances = objects.InstanceList.get_by_filters(context, filters, use_slave=True) name_map = {instance.name: instance for instance in instances} local_instances = [] for driver_instance in driver_instances: instance = name_map.get(driver_instance) if not instance: continue local_instances.append(instance) return local_instances def _destroy_evacuated_instances(self, context): """Destroys evacuated instances. While nova-compute was down, the instances running on it could be evacuated to another host. Check that the instances reported by the driver are still associated with this host. If they are not, destroy them, with the exception of instances which are in the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH task state or RESIZED vm state. """ our_host = self.host filters = {'deleted': False} local_instances = self._get_instances_on_driver(context, filters) for instance in local_instances: if instance.host != our_host: if (instance.task_state in [task_states.MIGRATING, task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH] or instance.vm_state in [vm_states.RESIZED]): LOG.debug('Will not delete instance as its host (' '%(instance_host)s) is not equal to our ' 'host (%(our_host)s) but its task state is ' '(%(task_state)s) and vm state is ' '(%(vm_state)s)', {'instance_host': instance.host, 'our_host': our_host, 'task_state': instance.task_state, 'vm_state': instance.vm_state}, instance=instance) continue LOG.info(_LI('Deleting instance as its host (' '%(instance_host)s) is not equal to our ' 'host (%(our_host)s).'), {'instance_host': instance.host, 'our_host': our_host}, instance=instance) try: network_info = self._get_instance_nw_info(context, instance) bdi = self._get_instance_block_device_info(context, instance) destroy_disks = not (self._is_instance_storage_shared( context, instance)) except exception.InstanceNotFound: network_info = network_model.NetworkInfo() bdi = {} LOG.info(_LI('Instance has been marked deleted already, ' 'removing it from the hypervisor.'), instance=instance) # always destroy disks if the instance was deleted destroy_disks = True self.driver.destroy(context, instance, network_info, bdi, destroy_disks) def _is_instance_storage_shared(self, context, instance): shared_storage = True data = None try: data = self.driver.check_instance_shared_storage_local(context, instance) if data: shared_storage = (self.compute_rpcapi. check_instance_shared_storage(context, instance, data)) except NotImplementedError: LOG.warning(_LW('Hypervisor driver does not support ' 'instance shared storage check, ' 'assuming it\'s not on shared storage'), instance=instance) shared_storage = False except Exception: LOG.exception(_LE('Failed to check if instance shared'), instance=instance) finally: if data: self.driver.check_instance_shared_storage_cleanup(context, data) return shared_storage def _complete_partial_deletion(self, context, instance): """Complete deletion for instances in DELETED status but not marked as deleted in the DB """ instance.destroy() bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) quotas = objects.Quotas(context) project_id, user_id = objects.quotas.ids_from_instance(context, instance) quotas.reserve(context, project_id=project_id, user_id=user_id, instances=-1, cores=-instance.vcpus, ram=-instance.memory_mb) self._complete_deletion(context, instance, bdms, quotas, instance.system_metadata) def _complete_deletion(self, context, instance, bdms, quotas, system_meta): if quotas: quotas.commit() # ensure block device mappings are not leaked for bdm in bdms: bdm.destroy() self._notify_about_instance_usage(context, instance, "delete.end", system_metadata=system_meta) if CONF.vnc_enabled or CONF.spice.enabled: if CONF.cells.enable: self.cells_rpcapi.consoleauth_delete_tokens(context, instance.uuid) else: self.consoleauth_rpcapi.delete_tokens_for_instance(context, instance.uuid) def _init_instance(self, context, instance): '''Initialize this instance during service init.''' # Instances that are shut down, or in an error state can not be # initialized and are not attempted to be recovered. The exception # to this are instances that are in RESIZE_MIGRATING or DELETING, # which are dealt with further down. if (instance.vm_state == vm_states.SOFT_DELETED or (instance.vm_state == vm_states.ERROR and instance.task_state not in (task_states.RESIZE_MIGRATING, task_states.DELETING))): LOG.debug("Instance is in %s state.", instance.vm_state, instance=instance) return if instance.vm_state == vm_states.DELETED: try: self._complete_partial_deletion(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to complete a deletion') LOG.exception(msg, instance=instance) return if (instance.vm_state == vm_states.BUILDING or instance.task_state in [task_states.SCHEDULING, task_states.BLOCK_DEVICE_MAPPING, task_states.NETWORKING, task_states.SPAWNING]): # NOTE(dave-mcnally) compute stopped before instance was fully # spawned so set to ERROR state. This is safe to do as the state # may be set by the api but the host is not so if we get here the # instance has already been scheduled to this particular host. LOG.debug("Instance failed to spawn correctly, " "setting to ERROR state", instance=instance) instance.task_state = None instance.vm_state = vm_states.ERROR instance.save() return if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and instance.task_state in [task_states.REBUILDING, task_states.REBUILD_BLOCK_DEVICE_MAPPING, task_states.REBUILD_SPAWNING]): # NOTE(jichenjc) compute stopped before instance was fully # spawned so set to ERROR state. This is consistent to BUILD LOG.debug("Instance failed to rebuild correctly, " "setting to ERROR state", instance=instance) instance.task_state = None instance.vm_state = vm_states.ERROR instance.save() return if (instance.vm_state != vm_states.ERROR and instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING, task_states.IMAGE_PENDING_UPLOAD, task_states.IMAGE_UPLOADING, task_states.IMAGE_SNAPSHOT]): LOG.debug("Instance in transitional state %s at start-up " "clearing task state", instance.task_state, instance=instance) try: self._post_interrupted_snapshot_cleanup(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to cleanup snapshot.') LOG.exception(msg, instance=instance) instance.task_state = None instance.save() if instance.task_state == task_states.DELETING: try: LOG.info(_LI('Service started deleting the instance during ' 'the previous run, but did not finish. Restarting' ' the deletion now.'), instance=instance) instance.obj_load_attr('metadata') instance.obj_load_attr('system_metadata') bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # FIXME(comstud): This needs fixed. We should be creating # reservations and updating quotas, because quotas # wouldn't have been updated for this instance since it is # still in DELETING. See bug 1296414. # # Create a dummy quota object for now. quotas = objects.Quotas.from_reservations( context, None, instance=instance) self._delete_instance(context, instance, bdms, quotas) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to complete a deletion') LOG.exception(msg, instance=instance) self._set_instance_error_state(context, instance) return try_reboot, reboot_type = self._retry_reboot(context, instance) current_power_state = self._get_power_state(context, instance) if try_reboot: LOG.debug("Instance in transitional state (%(task_state)s) at " "start-up and power state is (%(power_state)s), " "triggering reboot", {'task_state': instance.task_state, 'power_state': current_power_state}, instance=instance) self.compute_rpcapi.reboot_instance(context, instance, block_device_info=None, reboot_type=reboot_type) return elif (current_power_state == power_state.RUNNING and instance.task_state in [task_states.REBOOT_STARTED, task_states.REBOOT_STARTED_HARD, task_states.PAUSING, task_states.UNPAUSING]): LOG.warning(_LW("Instance in transitional state " "(%(task_state)s) at start-up and power state " "is (%(power_state)s), clearing task state"), {'task_state': instance.task_state, 'power_state': current_power_state}, instance=instance) instance.task_state = None instance.vm_state = vm_states.ACTIVE instance.save() elif (current_power_state == power_state.PAUSED and instance.task_state == task_states.UNPAUSING): LOG.warning(_LW("Instance in transitional state " "(%(task_state)s) at start-up and power state " "is (%(power_state)s), clearing task state " "and unpausing the instance"), {'task_state': instance.task_state, 'power_state': current_power_state}, instance=instance) try: self.unpause_instance(context, instance) except NotImplementedError: # Some virt driver didn't support pause and unpause pass except Exception: LOG.exception(_LE('Failed to unpause instance'), instance=instance) return if instance.task_state == task_states.POWERING_OFF: try: LOG.debug("Instance in transitional state %s at start-up " "retrying stop request", instance.task_state, instance=instance) self.stop_instance(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to stop instance') LOG.exception(msg, instance=instance) return if instance.task_state == task_states.POWERING_ON: try: LOG.debug("Instance in transitional state %s at start-up " "retrying start request", instance.task_state, instance=instance) self.start_instance(context, instance) except Exception: # we don't want that an exception blocks the init_host msg = _LE('Failed to start instance') LOG.exception(msg, instance=instance) return net_info = compute_utils.get_nw_info_for_instance(instance) try: self.driver.plug_vifs(instance, net_info) except NotImplementedError as e: LOG.debug(e, instance=instance) except exception.VirtualInterfacePlugException: # we don't want an exception to block the init_host LOG.exception(_LE("Vifs plug failed"), instance=instance) self._set_instance_error_state(context, instance) return if instance.task_state == task_states.RESIZE_MIGRATING: # We crashed during resize/migration, so roll back for safety try: # NOTE(mriedem): check old_vm_state for STOPPED here, if it's # not in system_metadata we default to True for backwards # compatibility power_on = (instance.system_metadata.get('old_vm_state') != vm_states.STOPPED) block_dev_info = self._get_instance_block_device_info(context, instance) self.driver.finish_revert_migration(context, instance, net_info, block_dev_info, power_on) except Exception as e: LOG.exception(_LE('Failed to revert crashed migration'), instance=instance) finally: LOG.info(_LI('Instance found in migrating state during ' 'startup. Resetting task_state'), instance=instance) instance.task_state = None instance.save() if instance.task_state == task_states.MIGRATING: # Live migration did not complete, but instance is on this # host, so reset the state. instance.task_state = None instance.save(expected_task_state=[task_states.MIGRATING]) db_state = instance.power_state drv_state = self._get_power_state(context, instance) expect_running = (db_state == power_state.RUNNING and drv_state != db_state) LOG.debug('Current state is %(drv_state)s, state in DB is ' '%(db_state)s.', {'drv_state': drv_state, 'db_state': db_state}, instance=instance) if expect_running and CONF.resume_guests_state_on_host_boot: LOG.info(_LI('Rebooting instance after nova-compute restart.'), instance=instance) block_device_info = \ self._get_instance_block_device_info(context, instance) try: self.driver.resume_state_on_host_boot( context, instance, net_info, block_device_info) except NotImplementedError: LOG.warning(_LW('Hypervisor driver does not support ' 'resume guests'), instance=instance) except Exception: # NOTE(vish): The instance failed to resume, so we set the # instance to error and attempt to continue. LOG.warning(_LW('Failed to resume instance'), instance=instance) self._set_instance_error_state(context, instance) elif drv_state == power_state.RUNNING: # VMwareAPI drivers will raise an exception try: self.driver.ensure_filtering_rules_for_instance( instance, net_info) except NotImplementedError: LOG.warning(_LW('Hypervisor driver does not support ' 'firewall rules'), instance=instance) def _retry_reboot(self, context, instance): current_power_state = self._get_power_state(context, instance) current_task_state = instance.task_state retry_reboot = False reboot_type = compute_utils.get_reboot_type(current_task_state, current_power_state) pending_soft = (current_task_state == task_states.REBOOT_PENDING and instance.vm_state in vm_states.ALLOW_SOFT_REBOOT) pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD and instance.vm_state in vm_states.ALLOW_HARD_REBOOT) started_not_running = (current_task_state in [task_states.REBOOT_STARTED, task_states.REBOOT_STARTED_HARD] and current_power_state != power_state.RUNNING) if pending_soft or pending_hard or started_not_running: retry_reboot = True return retry_reboot, reboot_type def handle_lifecycle_event(self, event): LOG.info(_LI("VM %(state)s (Lifecycle Event)"), {'state': event.get_name()}, instance_uuid=event.get_instance_uuid()) context = nova.context.get_admin_context(read_deleted='yes') instance = objects.Instance.get_by_uuid(context, event.get_instance_uuid(), expected_attrs=[]) vm_power_state = None if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED: vm_power_state = power_state.SHUTDOWN elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED: vm_power_state = power_state.RUNNING elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED: vm_power_state = power_state.PAUSED elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED: vm_power_state = power_state.RUNNING else: LOG.warning(_LW("Unexpected power state %d"), event.get_transition()) if vm_power_state is not None: LOG.debug('Synchronizing instance power state after lifecycle ' 'event "%(event)s"; current vm_state: %(vm_state)s, ' 'current task_state: %(task_state)s, current DB ' 'power_state: %(db_power_state)s, VM power_state: ' '%(vm_power_state)s', dict(event=event.get_name(), vm_state=instance.vm_state, task_state=instance.task_state, db_power_state=instance.power_state, vm_power_state=vm_power_state), instance_uuid=instance.uuid) self._sync_instance_power_state(context, instance, vm_power_state) def handle_events(self, event): if isinstance(event, virtevent.LifecycleEvent): try: self.handle_lifecycle_event(event) except exception.InstanceNotFound: LOG.debug("Event %s arrived for non-existent instance. The " "instance was probably deleted.", event) else: LOG.debug("Ignoring event %s", event) def init_virt_events(self): self.driver.register_event_listener(self.handle_events) def init_host(self): """Initialization for a standalone compute service.""" self.driver.init_host(host=self.host) context = nova.context.get_admin_context() instances = objects.InstanceList.get_by_host( context, self.host, expected_attrs=['info_cache']) if CONF.defer_iptables_apply: self.driver.filter_defer_apply_on() self.init_virt_events() try: # checking that instance was not already evacuated to other host self._destroy_evacuated_instances(context) for instance in instances: self._init_instance(context, instance) finally: if CONF.defer_iptables_apply: self.driver.filter_defer_apply_off() def cleanup_host(self): self.driver.cleanup_host(host=self.host) def pre_start_hook(self): """After the service is initialized, but before we fully bring the service up by listening on RPC queues, make sure to update our available resources (and indirectly our available nodes). """ self.update_available_resource(nova.context.get_admin_context()) def _get_power_state(self, context, instance): """Retrieve the power state for the given instance.""" LOG.debug('Checking state', instance=instance) try: return self.driver.get_info(instance).state except exception.InstanceNotFound: return power_state.NOSTATE def get_console_topic(self, context): """Retrieves the console host for a project on this host. Currently this is just set in the flags for each compute host. """ # TODO(mdragon): perhaps make this variable by console_type? return '%s.%s' % (CONF.console_topic, CONF.console_host) def get_console_pool_info(self, context, console_type): return self.driver.get_console_pool_info(console_type) @wrap_exception() def refresh_security_group_rules(self, context, security_group_id): """Tell the virtualization driver to refresh security group rules. Passes straight through to the virtualization driver. """ return self.driver.refresh_security_group_rules(security_group_id) @wrap_exception() def refresh_security_group_members(self, context, security_group_id): """Tell the virtualization driver to refresh security group members. Passes straight through to the virtualization driver. """ return self.driver.refresh_security_group_members(security_group_id) @wrap_exception() def refresh_instance_security_rules(self, context, instance): """Tell the virtualization driver to refresh security rules for an instance. Passes straight through to the virtualization driver. Synchronise the call because we may still be in the middle of creating the instance. """ @utils.synchronized(instance['uuid']) def _sync_refresh(): try: return self.driver.refresh_instance_security_rules(instance) except NotImplementedError: LOG.warning(_LW('Hypervisor driver does not support ' 'security groups.'), instance=instance) return _sync_refresh() @wrap_exception() def refresh_provider_fw_rules(self, context): """This call passes straight through to the virtualization driver.""" return self.driver.refresh_provider_fw_rules() def _get_instance_nw_info(self, context, instance, use_slave=False): """Get a list of dictionaries of network data of an instance.""" if (not hasattr(instance, 'system_metadata') or len(instance['system_metadata']) == 0): # NOTE(danms): Several places in the code look up instances without # pulling system_metadata for performance, and call this function. # If we get an instance without it, re-fetch so that the call # to network_api (which requires it for instance_type) will # succeed. attrs = ['system_metadata'] instance = objects.Instance.get_by_uuid(context, instance['uuid'], expected_attrs=attrs, use_slave=use_slave) network_info = self.network_api.get_instance_nw_info(context, instance) return network_info def _await_block_device_map_created(self, context, vol_id): # TODO(yamahata): creating volume simultaneously # reduces creation time? # TODO(yamahata): eliminate dumb polling start = time.time() retries = CONF.block_device_allocate_retries if retries < 0: LOG.warning(_LW("Treating negative config value (%(retries)s) for " "'block_device_retries' as 0."), {'retries': retries}) # (1) treat negative config value as 0 # (2) the configured value is 0, one attempt should be made # (3) the configured value is > 0, then the total number attempts # is (retries + 1) attempts = 1 if retries >= 1: attempts = retries + 1 for attempt in range(1, attempts + 1): volume = self.volume_api.get(context, vol_id) volume_status = volume['status'] if volume_status not in ['creating', 'downloading']: if volume_status != 'available': LOG.warning(_LW("Volume id: %s finished being created but " "was not set as 'available'"), vol_id) return attempt greenthread.sleep(CONF.block_device_allocate_retries_interval) # NOTE(harlowja): Should only happen if we ran out of attempts raise exception.VolumeNotCreated(volume_id=vol_id, seconds=int(time.time() - start), attempts=attempts) def _decode_files(self, injected_files): """Base64 decode the list of files to inject.""" if not injected_files: return [] def _decode(f): path, contents = f try: decoded = base64.b64decode(contents) return path, decoded except TypeError: raise exception.Base64Exception(path=path) return [_decode(f) for f in injected_files] def _run_instance(self, context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, legacy_bdm_in_spec): """Launch a new instance with specified options.""" extra_usage_info = {} def notify(status, msg="", fault=None, **kwargs): """Send a create.{start,error,end} notification.""" type_ = "create.%(status)s" % dict(status=status) info = extra_usage_info.copy() info['message'] = msg self._notify_about_instance_usage(context, instance, type_, extra_usage_info=info, fault=fault, **kwargs) try: self._prebuild_instance(context, instance) if request_spec and request_spec.get('image'): image_meta = request_spec['image'] else: image_meta = {} extra_usage_info = {"image_name": image_meta.get('name', '')} notify("start") # notify that build is starting instance, network_info = self._build_instance(context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, image_meta, legacy_bdm_in_spec) notify("end", msg=_("Success"), network_info=network_info) except exception.RescheduledException as e: # Instance build encountered an error, and has been rescheduled. notify("error", fault=e) except exception.BuildAbortException as e: # Instance build aborted due to a non-failure LOG.info(e) notify("end", msg=e.format_message()) # notify that build is done except Exception as e: # Instance build encountered a non-recoverable error: with excutils.save_and_reraise_exception(): self._set_instance_error_state(context, instance) notify("error", fault=e) # notify that build failed def _prebuild_instance(self, context, instance): self._check_instance_exists(context, instance) try: self._start_building(context, instance) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): msg = _("Instance disappeared before we could start it") # Quickly bail out of here raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) def _validate_instance_group_policy(self, context, instance, filter_properties): # NOTE(russellb) Instance group policy is enforced by the scheduler. # However, there is a race condition with the enforcement of # anti-affinity. Since more than one instance may be scheduled at the # same time, it's possible that more than one instance with an # anti-affinity policy may end up here. This is a validation step to # make sure that starting the instance here doesn't violate the policy. scheduler_hints = filter_properties.get('scheduler_hints') or {} group_hint = scheduler_hints.get('group') if not group_hint: return @utils.synchronized(group_hint) def _do_validation(context, instance, group_hint): group = objects.InstanceGroup.get_by_hint(context, group_hint) if 'anti-affinity' not in group.policies: return group_hosts = group.get_hosts(context, exclude=[instance.uuid]) if self.host in group_hosts: msg = _("Anti-affinity instance group policy was violated.") raise exception.RescheduledException( instance_uuid=instance.uuid, reason=msg) _do_validation(context, instance, group_hint) def _build_instance(self, context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, image_meta, legacy_bdm_in_spec): original_context = context context = context.elevated() # NOTE(danms): This method is deprecated, but could be called, # and if it is, it will have an old megatuple for requested_networks. if requested_networks is not None: requested_networks_obj = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in requested_networks]) else: requested_networks_obj = None # If neutron security groups pass requested security # groups to allocate_for_instance() if request_spec and self.is_neutron_security_groups: security_groups = request_spec.get('security_group') else: security_groups = [] if node is None: node = self.driver.get_available_nodes(refresh=True)[0] LOG.debug("No node specified, defaulting to %s", node) network_info = None bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) # b64 decode the files to inject: injected_files_orig = injected_files injected_files = self._decode_files(injected_files) rt = self._get_resource_tracker(node) try: limits = filter_properties.get('limits', {}) with rt.instance_claim(context, instance, limits) as inst_claim: # NOTE(russellb) It's important that this validation be done # *after* the resource tracker instance claim, as that is where # the host is set on the instance. self._validate_instance_group_policy(context, instance, filter_properties) macs = self.driver.macs_for_instance(instance) dhcp_options = self.driver.dhcp_options_for_instance(instance) network_info = self._allocate_network(original_context, instance, requested_networks_obj, macs, security_groups, dhcp_options) # Verify that all the BDMs have a device_name set and assign a # default to the ones missing it with the help of the driver. self._default_block_device_names(context, instance, image_meta, bdms) instance.vm_state = vm_states.BUILDING instance.task_state = task_states.BLOCK_DEVICE_MAPPING instance.numa_topology = inst_claim.claimed_numa_topology instance.save() block_device_info = self._prep_block_device( context, instance, bdms) set_access_ip = (is_first_time and not instance.access_ip_v4 and not instance.access_ip_v6) flavor = None if filter_properties is not None: flavor = filter_properties.get('instance_type') instance = self._spawn(context, instance, image_meta, network_info, block_device_info, injected_files, admin_password, set_access_ip=set_access_ip, flavor=flavor) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): # the instance got deleted during the spawn # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) try: self._deallocate_network(context, instance) except Exception: msg = _LE('Failed to dealloc network ' 'for deleted instance') LOG.exception(msg, instance=instance) raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=_("Instance disappeared during build")) except (exception.UnexpectedTaskStateError, exception.VirtualInterfaceCreateException) as e: # Don't try to reschedule, just log and reraise. with excutils.save_and_reraise_exception(): LOG.debug(e.format_message(), instance=instance) # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) except exception.InvalidBDM: with excutils.save_and_reraise_exception(): if network_info is not None: network_info.wait(do_raise=False) try: self._deallocate_network(context, instance) except Exception: msg = _LE('Failed to dealloc network ' 'for failed instance') LOG.exception(msg, instance=instance) except Exception: exc_info = sys.exc_info() # try to re-schedule instance: # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) rescheduled = self._reschedule_or_error(original_context, instance, exc_info, requested_networks, admin_password, injected_files_orig, is_first_time, request_spec, filter_properties, bdms, legacy_bdm_in_spec) if rescheduled: # log the original build error self._log_original_error(exc_info, instance.uuid) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=six.text_type(exc_info[1])) else: # not re-scheduling, go to error: raise exc_info[0], exc_info[1], exc_info[2] # spawn success return instance, network_info def _log_original_error(self, exc_info, instance_uuid): LOG.error(_LE('Error: %s'), exc_info[1], instance_uuid=instance_uuid, exc_info=exc_info) def _reschedule_or_error(self, context, instance, exc_info, requested_networks, admin_password, injected_files, is_first_time, request_spec, filter_properties, bdms=None, legacy_bdm_in_spec=True): """Try to re-schedule the build or re-raise the original build error to error out the instance. """ original_context = context context = context.elevated() instance_uuid = instance.uuid rescheduled = False compute_utils.add_instance_fault_from_exc(context, instance, exc_info[1], exc_info=exc_info) self._notify_about_instance_usage(context, instance, 'instance.create.error', fault=exc_info[1]) try: LOG.debug("Clean up resource before rescheduling.", instance=instance) if bdms is None: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) self._shutdown_instance(context, instance, bdms, requested_networks) self._cleanup_volumes(context, instance.uuid, bdms) except Exception: # do not attempt retry if clean up failed: with excutils.save_and_reraise_exception(): self._log_original_error(exc_info, instance_uuid) try: method_args = (request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec) task_state = task_states.SCHEDULING rescheduled = self._reschedule(original_context, request_spec, filter_properties, instance, self.scheduler_rpcapi.run_instance, method_args, task_state, exc_info) except Exception: rescheduled = False LOG.exception(_LE("Error trying to reschedule"), instance_uuid=instance_uuid) return rescheduled def _reschedule(self, context, request_spec, filter_properties, instance, reschedule_method, method_args, task_state, exc_info=None): """Attempt to re-schedule a compute operation.""" instance_uuid = instance.uuid retry = filter_properties.get('retry', None) if not retry: # no retry information, do not reschedule. LOG.debug("Retry info not present, will not reschedule", instance_uuid=instance_uuid) return if not request_spec: LOG.debug("No request spec, will not reschedule", instance_uuid=instance_uuid) return LOG.debug("Re-scheduling %(method)s: attempt %(num)d", {'method': reschedule_method.func_name, 'num': retry['num_attempts']}, instance_uuid=instance_uuid) # reset the task state: self._instance_update(context, instance_uuid, task_state=task_state) if exc_info: # stringify to avoid circular ref problem in json serialization: retry['exc'] = traceback.format_exception_only(exc_info[0], exc_info[1]) reschedule_method(context, *method_args) return True @periodic_task.periodic_task def _check_instance_build_time(self, context): """Ensure that instances are not stuck in build.""" timeout = CONF.instance_build_timeout if timeout == 0: return filters = {'vm_state': vm_states.BUILDING, 'host': self.host} building_insts = objects.InstanceList.get_by_filters(context, filters, expected_attrs=[], use_slave=True) for instance in building_insts: if timeutils.is_older_than(instance['created_at'], timeout): self._set_instance_error_state(context, instance) LOG.warning(_LW("Instance build timed out. Set to error " "state."), instance=instance) def _check_instance_exists(self, context, instance): """Ensure an instance with the same name is not already present.""" if self.driver.instance_exists(instance): raise exception.InstanceExists(name=instance.name) def _start_building(self, context, instance): """Save the host and launched_on fields and log appropriately.""" LOG.audit(_('Starting instance...'), context=context, instance=instance) self._instance_update(context, instance.uuid, vm_state=vm_states.BUILDING, task_state=None, expected_task_state=(task_states.SCHEDULING, None)) def _allocate_network_async(self, context, instance, requested_networks, macs, security_groups, is_vpn, dhcp_options): """Method used to allocate networks in the background. Broken out for testing. """ LOG.debug("Allocating IP information in the background.", instance=instance) retries = CONF.network_allocate_retries if retries < 0: LOG.warning(_LW("Treating negative config value (%(retries)s) for " "'network_allocate_retries' as 0."), {'retries': retries}) retries = 0 attempts = retries + 1 retry_time = 1 for attempt in range(1, attempts + 1): try: nwinfo = self.network_api.allocate_for_instance( context, instance, vpn=is_vpn, requested_networks=requested_networks, macs=macs, security_groups=security_groups, dhcp_options=dhcp_options) LOG.debug('Instance network_info: |%s|', nwinfo, instance=instance) sys_meta = instance.system_metadata sys_meta['network_allocated'] = 'True' self._instance_update(context, instance.uuid, system_metadata=sys_meta) return nwinfo except Exception: exc_info = sys.exc_info() log_info = {'attempt': attempt, 'attempts': attempts} if attempt == attempts: LOG.exception(_LE('Instance failed network setup ' 'after %(attempts)d attempt(s)'), log_info) raise exc_info[0], exc_info[1], exc_info[2] LOG.warning(_LW('Instance failed network setup ' '(attempt %(attempt)d of %(attempts)d)'), log_info, instance=instance) time.sleep(retry_time) retry_time *= 2 if retry_time > 30: retry_time = 30 # Not reached. def _build_networks_for_instance(self, context, instance, requested_networks, security_groups): # If we're here from a reschedule the network may already be allocated. if strutils.bool_from_string( instance.system_metadata.get('network_allocated', 'False')): # NOTE(alex_xu): The network_allocated is True means the network # resource already allocated at previous scheduling, and the # network setup is cleanup at previous. After rescheduling, the # network resource need setup on the new host. self.network_api.setup_instance_network_on_host( context, instance, instance.host) return self._get_instance_nw_info(context, instance) if not self.is_neutron_security_groups: security_groups = [] macs = self.driver.macs_for_instance(instance) dhcp_options = self.driver.dhcp_options_for_instance(instance) network_info = self._allocate_network(context, instance, requested_networks, macs, security_groups, dhcp_options) if not instance.access_ip_v4 and not instance.access_ip_v6: # If CONF.default_access_ip_network_name is set, grab the # corresponding network and set the access ip values accordingly. # Note that when there are multiple ips to choose from, an # arbitrary one will be chosen. network_name = CONF.default_access_ip_network_name if not network_name: return network_info for vif in network_info: if vif['network']['label'] == network_name: for ip in vif.fixed_ips(): if ip['version'] == 4: instance.access_ip_v4 = ip['address'] if ip['version'] == 6: instance.access_ip_v6 = ip['address'] instance.save() break return network_info def _allocate_network(self, context, instance, requested_networks, macs, security_groups, dhcp_options): """Start network allocation asynchronously. Return an instance of NetworkInfoAsyncWrapper that can be used to retrieve the allocated networks when the operation has finished. """ # NOTE(comstud): Since we're allocating networks asynchronously, # this task state has little meaning, as we won't be in this # state for very long. instance.vm_state = vm_states.BUILDING instance.task_state = task_states.NETWORKING instance.save(expected_task_state=[None]) self._update_resource_tracker(context, instance) is_vpn = pipelib.is_vpn_image(instance.image_ref) return network_model.NetworkInfoAsyncWrapper( self._allocate_network_async, context, instance, requested_networks, macs, security_groups, is_vpn, dhcp_options) def _default_root_device_name(self, instance, image_meta, root_bdm): try: return self.driver.default_root_device_name(instance, image_meta, root_bdm) except NotImplementedError: return compute_utils.get_next_device_name(instance, []) def _default_device_names_for_instance(self, instance, root_device_name, *block_device_lists): try: self.driver.default_device_names_for_instance(instance, root_device_name, *block_device_lists) except NotImplementedError: compute_utils.default_device_names_for_instance( instance, root_device_name, *block_device_lists) def _default_block_device_names(self, context, instance, image_meta, block_devices): """Verify that all the devices have the device_name set. If not, provide a default name. It also ensures that there is a root_device_name and is set to the first block device in the boot sequence (boot_index=0). """ root_bdm = block_device.get_root_bdm(block_devices) if not root_bdm: return # Get the root_device_name from the root BDM or the instance root_device_name = None update_root_bdm = False if root_bdm.device_name: root_device_name = root_bdm.device_name instance.root_device_name = root_device_name elif instance.root_device_name: root_device_name = instance.root_device_name root_bdm.device_name = root_device_name update_root_bdm = True else: root_device_name = self._default_root_device_name(instance, image_meta, root_bdm) instance.root_device_name = root_device_name root_bdm.device_name = root_device_name update_root_bdm = True if update_root_bdm: root_bdm.save() ephemerals = filter(block_device.new_format_is_ephemeral, block_devices) swap = filter(block_device.new_format_is_swap, block_devices) block_device_mapping = filter( driver_block_device.is_block_device_mapping, block_devices) self._default_device_names_for_instance(instance, root_device_name, ephemerals, swap, block_device_mapping) def _prep_block_device(self, context, instance, bdms, do_check_attach=True): """Set up the block device for an instance with error logging.""" try: block_device_info = { 'root_device_name': instance['root_device_name'], 'swap': driver_block_device.convert_swap(bdms), 'ephemerals': driver_block_device.convert_ephemerals(bdms), 'block_device_mapping': ( driver_block_device.attach_block_devices( driver_block_device.convert_volumes(bdms), context, instance, self.volume_api, self.driver, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_snapshots(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_images(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach) + driver_block_device.attach_block_devices( driver_block_device.convert_blanks(bdms), context, instance, self.volume_api, self.driver, self._await_block_device_map_created, do_check_attach=do_check_attach)) } if self.use_legacy_block_device_info: for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'): block_device_info[bdm_type] = \ driver_block_device.legacy_block_devices( block_device_info[bdm_type]) # Get swap out of the list block_device_info['swap'] = driver_block_device.get_swap( block_device_info['swap']) return block_device_info except exception.OverQuota: msg = _LW('Failed to create block device for instance due to ' 'being over volume resource quota') LOG.warn(msg, instance=instance) raise exception.InvalidBDM() except Exception: LOG.exception(_LE('Instance failed block device setup'), instance=instance) raise exception.InvalidBDM() @object_compat def _spawn(self, context, instance, image_meta, network_info, block_device_info, injected_files, admin_password, set_access_ip=False, flavor=None): """Spawn an instance with error logging and update its power state.""" instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SPAWNING instance.save(expected_task_state=task_states.BLOCK_DEVICE_MAPPING) try: self.driver.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info, flavor=flavor) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Instance failed to spawn'), instance=instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() def _set_access_ip_values(): """Add access ip values for a given instance. If CONF.default_access_ip_network_name is set, this method will grab the corresponding network and set the access ip values accordingly. Note that when there are multiple ips to choose from, an arbitrary one will be chosen. """ network_name = CONF.default_access_ip_network_name if not network_name: return for vif in network_info: if vif['network']['label'] == network_name: for ip in vif.fixed_ips(): if ip['version'] == 4: instance.access_ip_v4 = ip['address'] if ip['version'] == 6: instance.access_ip_v6 = ip['address'] return if set_access_ip: _set_access_ip_values() network_info.wait(do_raise=True) instance.info_cache.network_info = network_info instance.save(expected_task_state=task_states.SPAWNING) return instance def _notify_about_instance_usage(self, context, instance, event_suffix, network_info=None, system_metadata=None, extra_usage_info=None, fault=None): compute_utils.notify_about_instance_usage( self.notifier, context, instance, event_suffix, network_info=network_info, system_metadata=system_metadata, extra_usage_info=extra_usage_info, fault=fault) def _deallocate_network(self, context, instance, requested_networks=None): LOG.debug('Deallocating network for instance', instance=instance) self.network_api.deallocate_for_instance( context, instance, requested_networks=requested_networks) def _get_instance_block_device_info(self, context, instance, refresh_conn_info=False, bdms=None): """Transform block devices to the driver block_device format.""" if not bdms: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance['uuid']) swap = driver_block_device.convert_swap(bdms) ephemerals = driver_block_device.convert_ephemerals(bdms) block_device_mapping = ( driver_block_device.convert_volumes(bdms) + driver_block_device.convert_snapshots(bdms) + driver_block_device.convert_images(bdms)) if not refresh_conn_info: # if the block_device_mapping has no value in connection_info # (returned as None), don't include in the mapping block_device_mapping = [ bdm for bdm in block_device_mapping if bdm.get('connection_info')] else: block_device_mapping = driver_block_device.refresh_conn_infos( block_device_mapping, context, instance, self.volume_api, self.driver) if self.use_legacy_block_device_info: swap = driver_block_device.legacy_block_devices(swap) ephemerals = driver_block_device.legacy_block_devices(ephemerals) block_device_mapping = driver_block_device.legacy_block_devices( block_device_mapping) # Get swap out of the list swap = driver_block_device.get_swap(swap) root_device_name = instance.get('root_device_name') return {'swap': swap, 'root_device_name': root_device_name, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping} # NOTE(mikal): No object_compat wrapper on this method because its # callers all pass objects already @wrap_exception() @reverts_task_state @wrap_instance_fault def build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None): # NOTE(danms): Remove this in v4.0 of the RPC API if (requested_networks and not isinstance(requested_networks, objects.NetworkRequestList)): requested_networks = objects.NetworkRequestList( objects=[objects.NetworkRequest.from_tuple(t) for t in requested_networks]) # NOTE(melwitt): Remove this in v4.0 of the RPC API flavor = filter_properties.get('instance_type') if flavor and not isinstance(flavor, objects.Flavor): # Code downstream may expect extra_specs to be populated since it # is receiving an object, so lookup the flavor to ensure this. flavor = objects.Flavor.get_by_id(context, flavor['id']) filter_properties = dict(filter_properties, instance_type=flavor) @utils.synchronized(instance.uuid) def _locked_do_build_and_run_instance(*args, **kwargs): # NOTE(danms): We grab the semaphore with the instance uuid # locked because we could wait in line to build this instance # for a while and we want to make sure that nothing else tries # to do anything with this instance while we wait. with self._build_semaphore: self._do_build_and_run_instance(*args, **kwargs) # NOTE(danms): We spawn here to return the RPC worker thread back to # the pool. Since what follows could take a really long time, we don't # want to tie up RPC workers. utils.spawn_n(_locked_do_build_and_run_instance, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node, limits) @hooks.add_hook('build_instance') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def _do_build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node=None, limits=None): try: LOG.audit(_('Starting instance...'), context=context, instance=instance) instance.vm_state = vm_states.BUILDING instance.task_state = None instance.save(expected_task_state= (task_states.SCHEDULING, None)) except exception.InstanceNotFound: msg = 'Instance disappeared before build.' LOG.debug(msg, instance=instance) return build_results.FAILED except exception.UnexpectedTaskStateError as e: LOG.debug(e.format_message(), instance=instance) return build_results.FAILED # b64 decode the files to inject: decoded_files = self._decode_files(injected_files) if limits is None: limits = {} if node is None: node = self.driver.get_available_nodes(refresh=True)[0] LOG.debug('No node specified, defaulting to %s', node, instance=instance) try: self._build_and_run_instance(context, instance, image, decoded_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties) return build_results.ACTIVE except exception.RescheduledException as e: LOG.debug(e.format_message(), instance=instance) retry = filter_properties.get('retry', None) if not retry: # no retry information, do not reschedule. LOG.debug("Retry info not present, will not reschedule", instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info()) self._set_instance_error_state(context, instance) return build_results.FAILED retry['exc'] = traceback.format_exception(*sys.exc_info()) # NOTE(comstud): Deallocate networks if the driver wants # us to do so. if self.driver.deallocate_networks_on_reschedule(instance): self._cleanup_allocated_networks(context, instance, requested_networks) else: # NOTE(alex_xu): Network already allocated and we don't # want to deallocate them before rescheduling. But we need # cleanup those network resource setup on this host before # rescheduling. self.network_api.cleanup_instance_network_on_host( context, instance, self.host) instance.task_state = task_states.SCHEDULING instance.save() self.compute_task_api.build_instances(context, [instance], image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping) return build_results.RESCHEDULED except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): msg = 'Instance disappeared during build.' LOG.debug(msg, instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) return build_results.FAILED except exception.BuildAbortException as e: LOG.exception(e.format_message(), instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) self._cleanup_volumes(context, instance.uuid, block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info()) self._set_instance_error_state(context, instance) return build_results.FAILED except Exception as e: # Should not reach here. msg = _LE('Unexpected build failure, not rescheduling build.') LOG.exception(msg, instance=instance) self._cleanup_allocated_networks(context, instance, requested_networks) self._cleanup_volumes(context, instance.uuid, block_device_mapping, raise_exc=False) compute_utils.add_instance_fault_from_exc(context, instance, e, sys.exc_info()) self._set_instance_error_state(context, instance) return build_results.FAILED def _build_and_run_instance(self, context, instance, image, injected_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties): image_name = image.get('name') self._notify_about_instance_usage(context, instance, 'create.start', extra_usage_info={'image_name': image_name}) try: rt = self._get_resource_tracker(node) with rt.instance_claim(context, instance, limits) as inst_claim: # NOTE(russellb) It's important that this validation be done # *after* the resource tracker instance claim, as that is where # the host is set on the instance. self._validate_instance_group_policy(context, instance, filter_properties) with self._build_resources(context, instance, requested_networks, security_groups, image, block_device_mapping) as resources: instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SPAWNING instance.numa_topology = inst_claim.claimed_numa_topology instance.save(expected_task_state= task_states.BLOCK_DEVICE_MAPPING) block_device_info = resources['block_device_info'] network_info = resources['network_info'] flavor = None if filter_properties is not None: flavor = filter_properties.get('instance_type') self.driver.spawn(context, instance, image, injected_files, admin_password, network_info=network_info, block_device_info=block_device_info, flavor=flavor) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as e: with excutils.save_and_reraise_exception(): self._notify_about_instance_usage(context, instance, 'create.end', fault=e) except exception.ComputeResourcesUnavailable as e: LOG.debug(e.format_message(), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=e.format_message()) except exception.BuildAbortException as e: with excutils.save_and_reraise_exception(): LOG.debug(e.format_message(), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) except (exception.FixedIpLimitExceeded, exception.NoMoreNetworks, exception.NoMoreFixedIps) as e: LOG.warning(_LW('No more network or fixed IP to be allocated'), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) msg = _('Failed to allocate the network(s) with error %s, ' 'not rescheduling.') % e.format_message() raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) except (exception.VirtualInterfaceCreateException, exception.VirtualInterfaceMacAddressException) as e: LOG.exception(_LE('Failed to allocate network(s)'), instance=instance) self._notify_about_instance_usage(context, instance, 'create.error', fault=e) msg = _('Failed to allocate the network(s), not rescheduling.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) except (exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.ImageNotActive, exception.ImageUnacceptable) as e: self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception as e: self._notify_about_instance_usage(context, instance, 'create.error', fault=e) raise exception.RescheduledException( instance_uuid=instance.uuid, reason=six.text_type(e)) # NOTE(alaski): This is only useful during reschedules, remove it now. instance.system_metadata.pop('network_allocated', None) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() try: instance.save(expected_task_state=task_states.SPAWNING) except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError) as e: with excutils.save_and_reraise_exception(): self._notify_about_instance_usage(context, instance, 'create.end', fault=e) self._notify_about_instance_usage(context, instance, 'create.end', extra_usage_info={'message': _('Success')}, network_info=network_info) @contextlib.contextmanager def _build_resources(self, context, instance, requested_networks, security_groups, image, block_device_mapping): resources = {} network_info = None try: network_info = self._build_networks_for_instance(context, instance, requested_networks, security_groups) resources['network_info'] = network_info except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): raise except exception.UnexpectedTaskStateError as e: raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: # Because this allocation is async any failures are likely to occur # when the driver accesses network_info during spawn(). LOG.exception(_LE('Failed to allocate network(s)'), instance=instance) msg = _('Failed to allocate the network(s), not rescheduling.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) try: # Verify that all the BDMs have a device_name set and assign a # default to the ones missing it with the help of the driver. self._default_block_device_names(context, instance, image, block_device_mapping) instance.vm_state = vm_states.BUILDING instance.task_state = task_states.BLOCK_DEVICE_MAPPING instance.save() block_device_info = self._prep_block_device(context, instance, block_device_mapping) resources['block_device_info'] = block_device_info except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): with excutils.save_and_reraise_exception() as ctxt: # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) except exception.UnexpectedTaskStateError as e: # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=e.format_message()) except Exception: LOG.exception(_LE('Failure prepping block device'), instance=instance) # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) msg = _('Failure prepping block device.') raise exception.BuildAbortException(instance_uuid=instance.uuid, reason=msg) try: yield resources except Exception as exc: with excutils.save_and_reraise_exception() as ctxt: if not isinstance(exc, (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError)): LOG.exception(_LE('Instance failed to spawn'), instance=instance) # Make sure the async call finishes if network_info is not None: network_info.wait(do_raise=False) try: self._shutdown_instance(context, instance, block_device_mapping, requested_networks, try_deallocate_networks=False) except Exception: ctxt.reraise = False msg = _('Could not clean up failed build,' ' not rescheduling') raise exception.BuildAbortException( instance_uuid=instance.uuid, reason=msg) def _cleanup_allocated_networks(self, context, instance, requested_networks): try: self._deallocate_network(context, instance, requested_networks) except Exception: msg = _LE('Failed to deallocate networks') LOG.exception(msg, instance=instance) return instance.system_metadata['network_allocated'] = 'False' try: instance.save() except exception.InstanceNotFound: # NOTE(alaski): It's possible that we're cleaning up the networks # because the instance was deleted. If that's the case then this # exception will be raised by instance.save() pass @object_compat @messaging.expected_exceptions(exception.BuildAbortException, exception.UnexpectedTaskStateError, exception.VirtualInterfaceCreateException, exception.RescheduledException) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def run_instance(self, context, instance, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, legacy_bdm_in_spec): # NOTE(alaski) This method should be deprecated when the scheduler and # compute rpc interfaces are bumped to 4.x, and slated for removal in # 5.x as it is no longer used. if filter_properties is None: filter_properties = {} @utils.synchronized(instance.uuid) def do_run_instance(): self._run_instance(context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance, legacy_bdm_in_spec) do_run_instance() def _try_deallocate_network(self, context, instance, requested_networks=None): try: # tear down allocated network structure self._deallocate_network(context, instance, requested_networks) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Failed to deallocate network for instance.'), instance=instance) self._set_instance_error_state(context, instance) def _get_power_off_values(self, context, instance, clean_shutdown): """Get the timing configuration for powering down this instance.""" if clean_shutdown: timeout = compute_utils.get_value_from_system_metadata(instance, key='image_os_shutdown_timeout', type=int, default=CONF.shutdown_timeout) retry_interval = self.SHUTDOWN_RETRY_INTERVAL else: timeout = 0 retry_interval = 0 return timeout, retry_interval def _power_off_instance(self, context, instance, clean_shutdown=True): """Power off an instance on this host.""" timeout, retry_interval = self._get_power_off_values(context, instance, clean_shutdown) self.driver.power_off(instance, timeout, retry_interval) def _shutdown_instance(self, context, instance, bdms, requested_networks=None, notify=True, try_deallocate_networks=True): """Shutdown an instance on this host. :param:context: security context :param:instance: a nova.objects.Instance object :param:bdms: the block devices for the instance to be torn down :param:requested_networks: the networks on which the instance has ports :param:notify: true if a final usage notification should be emitted :param:try_deallocate_networks: false if we should avoid trying to teardown networking """ context = context.elevated() LOG.audit(_('%(action_str)s instance') % {'action_str': 'Terminating'}, context=context, instance=instance) if notify: self._notify_about_instance_usage(context, instance, "shutdown.start") network_info = compute_utils.get_nw_info_for_instance(instance) # NOTE(vish) get bdms before destroying the instance vol_bdms = [bdm for bdm in bdms if bdm.is_volume] block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) # NOTE(melwitt): attempt driver destroy before releasing ip, may # want to keep ip allocated for certain failures try: self.driver.destroy(context, instance, network_info, block_device_info) except exception.InstancePowerOffFailure: # if the instance can't power off, don't release the ip with excutils.save_and_reraise_exception(): pass except Exception: with excutils.save_and_reraise_exception(): # deallocate ip and fail without proceeding to # volume api calls, preserving current behavior if try_deallocate_networks: self._try_deallocate_network(context, instance, requested_networks) if try_deallocate_networks: self._try_deallocate_network(context, instance, requested_networks) for bdm in vol_bdms: try: # NOTE(vish): actual driver detach done in driver.destroy, so # just tell cinder that we are done with it. connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, bdm.volume_id, connector) self.volume_api.detach(context, bdm.volume_id) except exception.DiskNotFound as exc: LOG.debug('Ignoring DiskNotFound: %s', exc, instance=instance) except exception.VolumeNotFound as exc: LOG.debug('Ignoring VolumeNotFound: %s', exc, instance=instance) except (cinder_exception.EndpointNotFound, keystone_exception.EndpointNotFound) as exc: LOG.warning(_LW('Ignoring EndpointNotFound: %s'), exc, instance=instance) if notify: self._notify_about_instance_usage(context, instance, "shutdown.end") def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True): exc_info = None for bdm in bdms: LOG.debug("terminating bdm %s", bdm, instance_uuid=instance_uuid) if bdm.volume_id and bdm.delete_on_termination: try: self.volume_api.delete(context, bdm.volume_id) except Exception as exc: exc_info = sys.exc_info() LOG.warning(_LW('Failed to delete volume: %(volume_id)s ' 'due to %(exc)s'), {'volume_id': bdm.volume_id, 'exc': exc}) if exc_info is not None and raise_exc: six.reraise(exc_info[0], exc_info[1], exc_info[2]) @hooks.add_hook("delete_instance") def _delete_instance(self, context, instance, bdms, quotas): """Delete an instance on this host. Commit or rollback quotas as necessary. :param context: nova request context :param instance: nova.objects.instance.Instance object :param bdms: nova.objects.block_device.BlockDeviceMappingList object :param quotas: nova.objects.quotas.Quotas object """ was_soft_deleted = instance.vm_state == vm_states.SOFT_DELETED if was_soft_deleted: # Instances in SOFT_DELETED vm_state have already had quotas # decremented. try: quotas.rollback() except Exception: pass try: events = self.instance_events.clear_events_for_instance(instance) if events: LOG.debug('Events pending at deletion: %(events)s', {'events': ','.join(events.keys())}, instance=instance) instance.info_cache.delete() self._notify_about_instance_usage(context, instance, "delete.start") self._shutdown_instance(context, instance, bdms) # NOTE(vish): We have already deleted the instance, so we have # to ignore problems cleaning up the volumes. It # would be nice to let the user know somehow that # the volume deletion failed, but it is not # acceptable to have an instance that can not be # deleted. Perhaps this could be reworked in the # future to set an instance fault the first time # and to only ignore the failure if the instance # is already in ERROR. self._cleanup_volumes(context, instance.uuid, bdms, raise_exc=False) # if a delete task succeeded, always update vm state and task # state without expecting task state to be DELETING instance.vm_state = vm_states.DELETED instance.task_state = None instance.power_state = power_state.NOSTATE instance.terminated_at = timeutils.utcnow() instance.save() self._update_resource_tracker(context, instance) system_meta = instance.system_metadata instance.destroy() except Exception: with excutils.save_and_reraise_exception(): quotas.rollback() self._complete_deletion(context, instance, bdms, quotas, system_meta) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def terminate_instance(self, context, instance, bdms, reservations): """Terminate an instance on this host.""" # NOTE (ndipanov): If we get non-object BDMs, just get them from the # db again, as this means they are sent in the old format and we want # to avoid converting them back when we can just get them. # Remove this when we bump the RPC major version to 4.0 if (bdms and any(not isinstance(bdm, obj_base.NovaObject) for bdm in bdms)): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) @utils.synchronized(instance.uuid) def do_terminate_instance(instance, bdms): try: self._delete_instance(context, instance, bdms, quotas) except exception.InstanceNotFound: LOG.info(_LI("Instance disappeared during terminate"), instance=instance) except Exception: # As we're trying to delete always go to Error if something # goes wrong that _delete_instance can't handle. with excutils.save_and_reraise_exception(): LOG.exception(_LE('Setting instance vm_state to ERROR'), instance=instance) self._set_instance_error_state(context, instance) do_terminate_instance(instance, bdms) # NOTE(johannes): This is probably better named power_off_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def stop_instance(self, context, instance, clean_shutdown=True): """Stopping an instance on this host.""" @utils.synchronized(instance.uuid) def do_stop_instance(): current_power_state = self._get_power_state(context, instance) LOG.debug('Stopping instance; current vm_state: %(vm_state)s, ' 'current task_state: %(task_state)s, current DB ' 'power_state: %(db_power_state)s, current VM ' 'power_state: %(current_power_state)s', dict(vm_state=instance.vm_state, task_state=instance.task_state, db_power_state=instance.power_state, current_power_state=current_power_state), instance_uuid=instance.uuid) # NOTE(mriedem): If the instance is already powered off, we are # possibly tearing down and racing with other operations, so we can # expect the task_state to be None if something else updates the # instance and we're not locking it. expected_task_state = [task_states.POWERING_OFF] # The list of power states is from _sync_instance_power_state. if current_power_state in (power_state.NOSTATE, power_state.SHUTDOWN, power_state.CRASHED): LOG.info(_LI('Instance is already powered off in the ' 'hypervisor when stop is called.'), instance=instance) expected_task_state.append(None) self._notify_about_instance_usage(context, instance, "power_off.start") self._power_off_instance(context, instance, clean_shutdown) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.STOPPED instance.task_state = None instance.save(expected_task_state=expected_task_state) self._notify_about_instance_usage(context, instance, "power_off.end") do_stop_instance() def _power_on(self, context, instance): network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info(context, instance) self.driver.power_on(context, instance, network_info, block_device_info) # NOTE(johannes): This is probably better named power_on_instance # so it matches the driver method, but because of other issues, we # can't use that name in grizzly. @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def start_instance(self, context, instance): """Starting an instance on this host.""" self._notify_about_instance_usage(context, instance, "power_on.start") self._power_on(context, instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.POWERING_ON) self._notify_about_instance_usage(context, instance, "power_on.end") @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def soft_delete_instance(self, context, instance, reservations): """Soft delete an instance on this host.""" quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: self._notify_about_instance_usage(context, instance, "soft_delete.start") try: self.driver.soft_delete(instance) except NotImplementedError: # Fallback to just powering off the instance if the # hypervisor doesn't implement the soft_delete method self.driver.power_off(instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.SOFT_DELETED instance.task_state = None instance.save(expected_task_state=[task_states.SOFT_DELETING]) except Exception: with excutils.save_and_reraise_exception(): quotas.rollback() quotas.commit() self._notify_about_instance_usage(context, instance, "soft_delete.end") @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def restore_instance(self, context, instance): """Restore a soft-deleted instance on this host.""" self._notify_about_instance_usage(context, instance, "restore.start") try: self.driver.restore(instance) except NotImplementedError: # Fallback to just powering on the instance if the hypervisor # doesn't implement the restore method self._power_on(context, instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.RESTORING) self._notify_about_instance_usage(context, instance, "restore.end") def _rebuild_default_impl(self, context, instance, image_meta, injected_files, admin_password, bdms, detach_block_devices, attach_block_devices, network_info=None, recreate=False, block_device_info=None, preserve_ephemeral=False): if preserve_ephemeral: # The default code path does not support preserving ephemeral # partitions. raise exception.PreserveEphemeralNotSupported() detach_block_devices(context, bdms) if not recreate: self.driver.destroy(context, instance, network_info, block_device_info=block_device_info) instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING instance.save(expected_task_state=[task_states.REBUILDING]) new_block_device_info = attach_block_devices(context, instance, bdms) instance.task_state = task_states.REBUILD_SPAWNING instance.save( expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING]) self.driver.spawn(context, instance, image_meta, injected_files, admin_password, network_info=network_info, block_device_info=new_block_device_info) @object_compat @messaging.expected_exceptions(exception.PreserveEphemeralNotSupported) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def rebuild_instance(self, context, instance, orig_image_ref, image_ref, injected_files, new_pass, orig_sys_metadata, bdms, recreate, on_shared_storage, preserve_ephemeral=False): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and remakes the VM with given 'metadata' and 'personalities'. :param context: `nova.RequestContext` object :param instance: Instance object :param orig_image_ref: Original image_ref before rebuild :param image_ref: New image_ref for rebuild :param injected_files: Files to inject :param new_pass: password to set on rebuilt instance :param orig_sys_metadata: instance system metadata from pre-rebuild :param bdms: block-device-mappings to use for rebuild :param recreate: True if the instance is being recreated (e.g. the hypervisor it was on failed) - cleanup of old state will be skipped. :param on_shared_storage: True if instance files on shared storage :param preserve_ephemeral: True if the default ephemeral storage partition must be preserved on rebuild """ context = context.elevated() # NOTE (ndipanov): If we get non-object BDMs, just get them from the # db again, as this means they are sent in the old format and we want # to avoid converting them back when we can just get them. # Remove this on the next major RPC version bump if (bdms and any(not isinstance(bdm, obj_base.NovaObject) for bdm in bdms)): bdms = None orig_vm_state = instance.vm_state with self._error_out_instance_on_exception(context, instance): LOG.audit(_("Rebuilding instance"), context=context, instance=instance) if recreate: if not self.driver.capabilities["supports_recreate"]: raise exception.InstanceRecreateNotSupported self._check_instance_exists(context, instance) # To cover case when admin expects that instance files are on # shared storage, but not accessible and vice versa if on_shared_storage != self.driver.instance_on_disk(instance): raise exception.InvalidSharedStorage( _("Invalid state of instance files on shared" " storage")) if on_shared_storage: LOG.info(_LI('disk on shared storage, recreating using' ' existing disk')) else: image_ref = orig_image_ref = instance.image_ref LOG.info(_LI("disk not on shared storage, rebuilding from:" " '%s'"), str(image_ref)) # NOTE(mriedem): On a recreate (evacuate), we need to update # the instance's host and node properties to reflect it's # destination node for the recreate. node_name = None try: compute_node = self._get_compute_info(context, self.host) node_name = compute_node.hypervisor_hostname except exception.ComputeHostNotFound: LOG.exception(_LE('Failed to get compute_info for %s'), self.host) finally: instance.host = self.host instance.node = node_name instance.save() if image_ref: image_meta = self.image_api.get(context, image_ref) else: image_meta = {} # This instance.exists message should contain the original # image_ref, not the new one. Since the DB has been updated # to point to the new one... we have to override it. # TODO(jaypipes): Move generate_image_url() into the nova.image.api orig_image_ref_url = glance.generate_image_url(orig_image_ref) extra_usage_info = {'image_ref_url': orig_image_ref_url} self.conductor_api.notify_usage_exists(context, instance, current_period=True, system_metadata=orig_sys_metadata, extra_usage_info=extra_usage_info) # This message should contain the new image_ref extra_usage_info = {'image_name': image_meta.get('name', '')} self._notify_about_instance_usage(context, instance, "rebuild.start", extra_usage_info=extra_usage_info) instance.power_state = self._get_power_state(context, instance) instance.task_state = task_states.REBUILDING instance.save(expected_task_state=[task_states.REBUILDING]) if recreate: self.network_api.setup_networks_on_host( context, instance, self.host) network_info = compute_utils.get_nw_info_for_instance(instance) if bdms is None: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = \ self._get_instance_block_device_info( context, instance, bdms=bdms) def detach_block_devices(context, bdms): for bdm in bdms: if bdm.is_volume: self.volume_api.detach(context, bdm.volume_id) files = self._decode_files(injected_files) kwargs = dict( context=context, instance=instance, image_meta=image_meta, injected_files=files, admin_password=new_pass, bdms=bdms, detach_block_devices=detach_block_devices, attach_block_devices=self._prep_block_device, block_device_info=block_device_info, network_info=network_info, preserve_ephemeral=preserve_ephemeral, recreate=recreate) try: self.driver.rebuild(**kwargs) except NotImplementedError: # NOTE(rpodolyaka): driver doesn't provide specialized version # of rebuild, fall back to the default implementation self._rebuild_default_impl(**kwargs) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=[task_states.REBUILD_SPAWNING]) if orig_vm_state == vm_states.STOPPED: LOG.info(_LI("bringing vm to original state: '%s'"), orig_vm_state, instance=instance) instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.POWERING_OFF instance.progress = 0 instance.save() self.stop_instance(context, instance) self._notify_about_instance_usage( context, instance, "rebuild.end", network_info=network_info, extra_usage_info=extra_usage_info) def _handle_bad_volumes_detached(self, context, instance, bad_devices, block_device_info): """Handle cases where the virt-layer had to detach non-working volumes in order to complete an operation. """ for bdm in block_device_info['block_device_mapping']: if bdm.get('mount_device') in bad_devices: try: volume_id = bdm['connection_info']['data']['volume_id'] except KeyError: continue # NOTE(sirp): ideally we'd just call # `compute_api.detach_volume` here but since that hits the # DB directly, that's off limits from within the # compute-manager. # # API-detach LOG.info(_LI("Detaching from volume api: %s"), volume_id) volume = self.volume_api.get(context, volume_id) self.volume_api.check_detach(context, volume) self.volume_api.begin_detaching(context, volume_id) # Manager-detach self.detach_volume(context, volume_id, instance) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def reboot_instance(self, context, instance, block_device_info, reboot_type): """Reboot an instance on this host.""" # acknowledge the request made it to the manager if reboot_type == "SOFT": instance.task_state = task_states.REBOOT_PENDING expected_states = (task_states.REBOOTING, task_states.REBOOT_PENDING, task_states.REBOOT_STARTED) else: instance.task_state = task_states.REBOOT_PENDING_HARD expected_states = (task_states.REBOOTING_HARD, task_states.REBOOT_PENDING_HARD, task_states.REBOOT_STARTED_HARD) context = context.elevated() LOG.audit(_("Rebooting instance"), context=context, instance=instance) block_device_info = self._get_instance_block_device_info(context, instance) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage(context, instance, "reboot.start") instance.power_state = self._get_power_state(context, instance) instance.save(expected_task_state=expected_states) if instance.power_state != power_state.RUNNING: state = instance.power_state running = power_state.RUNNING LOG.warning(_LW('trying to reboot a non-running instance:' ' (state: %(state)s expected: %(running)s)'), {'state': state, 'running': running}, context=context, instance=instance) def bad_volumes_callback(bad_devices): self._handle_bad_volumes_detached( context, instance, bad_devices, block_device_info) try: # Don't change it out of rescue mode if instance.vm_state == vm_states.RESCUED: new_vm_state = vm_states.RESCUED else: new_vm_state = vm_states.ACTIVE new_power_state = None if reboot_type == "SOFT": instance.task_state = task_states.REBOOT_STARTED expected_state = task_states.REBOOT_PENDING else: instance.task_state = task_states.REBOOT_STARTED_HARD expected_state = task_states.REBOOT_PENDING_HARD instance.save(expected_task_state=expected_state) self.driver.reboot(context, instance, network_info, reboot_type, block_device_info=block_device_info, bad_volumes_callback=bad_volumes_callback) except Exception as error: with excutils.save_and_reraise_exception() as ctxt: exc_info = sys.exc_info() # if the reboot failed but the VM is running don't # put it into an error state new_power_state = self._get_power_state(context, instance) if new_power_state == power_state.RUNNING: LOG.warning(_LW('Reboot failed but instance is running'), context=context, instance=instance) compute_utils.add_instance_fault_from_exc(context, instance, error, exc_info) self._notify_about_instance_usage(context, instance, 'reboot.error', fault=error) ctxt.reraise = False else: LOG.error(_LE('Cannot reboot instance: %s'), error, context=context, instance=instance) self._set_instance_obj_error_state(context, instance) if not new_power_state: new_power_state = self._get_power_state(context, instance) try: instance.power_state = new_power_state instance.vm_state = new_vm_state instance.task_state = None instance.save() except exception.InstanceNotFound: LOG.warning(_LW("Instance disappeared during reboot"), context=context, instance=instance) self._notify_about_instance_usage(context, instance, "reboot.end") @delete_image_on_error def _do_snapshot_instance(self, context, image_id, instance, rotation): if rotation < 0: raise exception.RotationRequiredForBackup() self._snapshot_instance(context, image_id, instance, task_states.IMAGE_BACKUP) @wrap_exception() @reverts_task_state @wrap_instance_fault def backup_instance(self, context, image_id, instance, backup_type, rotation): """Backup an instance on this host. :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around """ self._do_snapshot_instance(context, image_id, instance, rotation) self._rotate_backups(context, instance, backup_type, rotation) @wrap_exception() @reverts_task_state @wrap_instance_fault @delete_image_on_error def snapshot_instance(self, context, image_id, instance): """Snapshot an instance on this host. :param context: security context :param instance: a nova.objects.instance.Instance object :param image_id: glance.db.sqlalchemy.models.Image.Id """ # NOTE(dave-mcnally) the task state will already be set by the api # but if the compute manager has crashed/been restarted prior to the # request getting here the task state may have been cleared so we set # it again and things continue normally try: instance.task_state = task_states.IMAGE_SNAPSHOT instance.save( expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING) except exception.InstanceNotFound: # possibility instance no longer exists, no point in continuing LOG.debug("Instance not found, could not set state %s " "for instance.", task_states.IMAGE_SNAPSHOT, instance=instance) return except exception.UnexpectedDeletingTaskStateError: LOG.debug("Instance being deleted, snapshot cannot continue", instance=instance) return self._snapshot_instance(context, image_id, instance, task_states.IMAGE_SNAPSHOT) def _snapshot_instance(self, context, image_id, instance, expected_task_state): context = context.elevated() instance.power_state = self._get_power_state(context, instance) try: instance.save() LOG.audit(_('instance snapshotting'), context=context, instance=instance) if instance.power_state != power_state.RUNNING: state = instance.power_state running = power_state.RUNNING LOG.warning(_LW('trying to snapshot a non-running instance: ' '(state: %(state)s expected: %(running)s)'), {'state': state, 'running': running}, instance=instance) self._notify_about_instance_usage( context, instance, "snapshot.start") def update_task_state(task_state, expected_state=expected_task_state): instance.task_state = task_state instance.save(expected_task_state=expected_state) self.driver.snapshot(context, instance, image_id, update_task_state) instance.task_state = None instance.save(expected_task_state=task_states.IMAGE_UPLOADING) self._notify_about_instance_usage(context, instance, "snapshot.end") except (exception.InstanceNotFound, exception.UnexpectedDeletingTaskStateError): # the instance got deleted during the snapshot # Quickly bail out of here msg = 'Instance disappeared during snapshot' LOG.debug(msg, instance=instance) try: image_service = glance.get_default_image_service() image = image_service.show(context, image_id) if image['status'] != 'active': image_service.delete(context, image_id) except Exception: LOG.warning(_LW("Error while trying to clean up image %s"), image_id, instance=instance) except exception.ImageNotFound: instance.task_state = None instance.save() msg = _("Image not found during snapshot") LOG.warn(msg, instance=instance) def _post_interrupted_snapshot_cleanup(self, context, instance): self.driver.post_interrupted_snapshot_cleanup(context, instance) @object_compat @messaging.expected_exceptions(NotImplementedError) def volume_snapshot_create(self, context, instance, volume_id, create_info): self.driver.volume_snapshot_create(context, instance, volume_id, create_info) @object_compat @messaging.expected_exceptions(NotImplementedError) def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info): self.driver.volume_snapshot_delete(context, instance, volume_id, snapshot_id, delete_info) @wrap_instance_fault def _rotate_backups(self, context, instance, backup_type, rotation): """Delete excess backups associated to an instance. Instances are allowed a fixed number of backups (the rotation number); this method deletes the oldest backups that exceed the rotation threshold. :param context: security context :param instance: Instance dict :param backup_type: daily | weekly :param rotation: int representing how many backups to keep around; None if rotation shouldn't be used (as in the case of snapshots) """ filters = {'property-image_type': 'backup', 'property-backup_type': backup_type, 'property-instance_uuid': instance.uuid} images = self.image_api.get_all(context, filters=filters, sort_key='created_at', sort_dir='desc') num_images = len(images) LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)", {'num_images': num_images, 'rotation': rotation}, instance=instance) if num_images > rotation: # NOTE(sirp): this deletes all backups that exceed the rotation # limit excess = len(images) - rotation LOG.debug("Rotating out %d backups", excess, instance=instance) for i in xrange(excess): image = images.pop() image_id = image['id'] LOG.debug("Deleting image %s", image_id, instance=instance) self.image_api.delete(context, image_id) @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def set_admin_password(self, context, instance, new_pass): """Set the root/admin password for an instance on this host. This is generally only called by API password resets after an image has been built. @param context: Nova auth context. @param instance: Nova instance object. @param new_pass: The admin password for the instance. """ context = context.elevated() if new_pass is None: # Generate a random password new_pass = utils.generate_password() current_power_state = self._get_power_state(context, instance) expected_state = power_state.RUNNING if current_power_state != expected_state: instance.task_state = None instance.save(expected_task_state=task_states.UPDATING_PASSWORD) _msg = _('instance %s is not running') % instance.uuid raise exception.InstancePasswordSetFailed( instance=instance.uuid, reason=_msg) try: self.driver.set_admin_password(instance, new_pass) LOG.audit(_("Root password set"), instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) except NotImplementedError: LOG.warning(_LW('set_admin_password is not implemented ' 'by this driver or guest instance.'), instance=instance) instance.task_state = None instance.save( expected_task_state=task_states.UPDATING_PASSWORD) raise NotImplementedError(_('set_admin_password is not ' 'implemented by this driver or guest ' 'instance.')) except exception.UnexpectedTaskStateError: # interrupted by another (most likely delete) task # do not retry raise except Exception as e: # Catch all here because this could be anything. LOG.exception(_LE('set_admin_password failed: %s'), e, instance=instance) self._set_instance_obj_error_state(context, instance) # We create a new exception here so that we won't # potentially reveal password information to the # API caller. The real exception is logged above _msg = _('error setting admin password') raise exception.InstancePasswordSetFailed( instance=instance.uuid, reason=_msg) @wrap_exception() @reverts_task_state @wrap_instance_fault def inject_file(self, context, path, file_contents, instance): """Write a file to the specified path in an instance on this host.""" # NOTE(russellb) Remove this method, as well as the underlying virt # driver methods, when the compute rpc interface is bumped to 4.x # as it is no longer used. context = context.elevated() current_power_state = self._get_power_state(context, instance) expected_state = power_state.RUNNING if current_power_state != expected_state: LOG.warning(_LW('trying to inject a file into a non-running ' '(state: %(current_state)s expected: ' '%(expected_state)s)'), {'current_state': current_power_state, 'expected_state': expected_state}, instance=instance) LOG.audit(_('injecting file to %s'), path, instance=instance) self.driver.inject_file(instance, path, file_contents) def _get_rescue_image(self, context, instance, rescue_image_ref=None): """Determine what image should be used to boot the rescue VM.""" # 1. If rescue_image_ref is passed in, use that for rescue. # 2. Else, use the base image associated with instance's current image. # The idea here is to provide the customer with a rescue # environment which they are familiar with. # So, if they built their instance off of a Debian image, # their rescue VM will also be Debian. # 3. As a last resort, use instance's current image. if not rescue_image_ref: system_meta = utils.instance_sys_meta(instance) rescue_image_ref = system_meta.get('image_base_image_ref') if not rescue_image_ref: LOG.warning(_LW('Unable to find a different image to use for ' 'rescue VM, using instance\'s current image'), instance=instance) rescue_image_ref = instance.image_ref image_meta = compute_utils.get_image_metadata(context, self.image_api, rescue_image_ref, instance) # NOTE(belliott) bug #1227350 - xenapi needs the actual image id image_meta['id'] = rescue_image_ref return image_meta @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def rescue_instance(self, context, instance, rescue_password, rescue_image_ref=None, clean_shutdown=True): context = context.elevated() LOG.audit(_('Rescuing'), context=context, instance=instance) admin_password = (rescue_password if rescue_password else utils.generate_password()) network_info = self._get_instance_nw_info(context, instance) rescue_image_meta = self._get_rescue_image(context, instance, rescue_image_ref) extra_usage_info = {'rescue_image_name': rescue_image_meta.get('name', '')} self._notify_about_instance_usage(context, instance, "rescue.start", extra_usage_info=extra_usage_info, network_info=network_info) try: self._power_off_instance(context, instance, clean_shutdown) self.driver.rescue(context, instance, network_info, rescue_image_meta, admin_password) except Exception as e: LOG.exception(_LE("Error trying to Rescue Instance"), instance=instance) raise exception.InstanceNotRescuable( instance_id=instance.uuid, reason=_("Driver Error: %s") % e) self.conductor_api.notify_usage_exists(context, instance, current_period=True) instance.vm_state = vm_states.RESCUED instance.task_state = None instance.power_state = self._get_power_state(context, instance) instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESCUING) self._notify_about_instance_usage(context, instance, "rescue.end", extra_usage_info=extra_usage_info, network_info=network_info) @object_compat @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def unrescue_instance(self, context, instance): context = context.elevated() LOG.audit(_('Unrescuing'), context=context, instance=instance) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage(context, instance, "unrescue.start", network_info=network_info) with self._error_out_instance_on_exception(context, instance): self.driver.unrescue(instance, network_info) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.power_state = self._get_power_state(context, instance) instance.save(expected_task_state=task_states.UNRESCUING) self._notify_about_instance_usage(context, instance, "unrescue.end", network_info=network_info) @object_compat @wrap_exception() @wrap_instance_fault def change_instance_metadata(self, context, diff, instance): """Update the metadata published to the instance.""" LOG.debug("Changing instance metadata according to %r", diff, instance=instance) self.driver.change_instance_metadata(context, instance, diff) def _cleanup_stored_instance_types(self, instance, restore_old=False): """Clean up "old" and "new" instance_type information stored in instance's system_metadata. Optionally update the "current" instance_type to the saved old one first. Returns the updated system_metadata as a dict, the post-cleanup current instance type and the to-be dropped instance type. """ sys_meta = instance.system_metadata if restore_old: instance_type = instance.get_flavor('old') drop_instance_type = instance.get_flavor() instance.set_flavor(instance_type) else: instance_type = instance.get_flavor() drop_instance_type = instance.get_flavor('old') instance.delete_flavor('old') instance.delete_flavor('new') return sys_meta, instance_type, drop_instance_type @wrap_exception() @wrap_instance_event @wrap_instance_fault def confirm_resize(self, context, instance, reservations, migration): quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) @utils.synchronized(instance['uuid']) def do_confirm_resize(context, instance, migration_id): # NOTE(wangpan): Get the migration status from db, if it has been # confirmed, we do nothing and return here LOG.debug("Going to confirm migration %s", migration_id, context=context, instance=instance) try: # TODO(russellb) Why are we sending the migration object just # to turn around and look it up from the db again? migration = objects.Migration.get_by_id( context.elevated(), migration_id) except exception.MigrationNotFound: LOG.error(_LE("Migration %s is not found during confirmation"), migration_id, context=context, instance=instance) quotas.rollback() return if migration.status == 'confirmed': LOG.info(_LI("Migration %s is already confirmed"), migration_id, context=context, instance=instance) quotas.rollback() return elif migration.status not in ('finished', 'confirming'): LOG.warning(_LW("Unexpected confirmation status '%(status)s' " "of migration %(id)s, exit confirmation " "process"), {"status": migration.status, "id": migration_id}, context=context, instance=instance) quotas.rollback() return # NOTE(wangpan): Get the instance from db, if it has been # deleted, we do nothing and return here expected_attrs = ['metadata', 'system_metadata', 'flavor'] try: instance = objects.Instance.get_by_uuid( context, instance.uuid, expected_attrs=expected_attrs) except exception.InstanceNotFound: LOG.info(_LI("Instance is not found during confirmation"), context=context, instance=instance) quotas.rollback() return self._confirm_resize(context, instance, quotas, migration=migration) do_confirm_resize(context, instance, migration.id) def _confirm_resize(self, context, instance, quotas, migration=None): """Destroys the source instance.""" self._notify_about_instance_usage(context, instance, "resize.confirm.start") with self._error_out_instance_on_exception(context, instance, quotas=quotas): # NOTE(danms): delete stashed migration information sys_meta, instance_type, old_instance_type = ( self._cleanup_stored_instance_types(instance)) sys_meta.pop('old_vm_state', None) instance.system_metadata = sys_meta instance.save() # NOTE(tr3buchet): tear down networks on source host self.network_api.setup_networks_on_host(context, instance, migration.source_compute, teardown=True) network_info = self._get_instance_nw_info(context, instance) self.driver.confirm_migration(migration, instance, network_info) migration.status = 'confirmed' with migration.obj_as_admin(): migration.save() rt = self._get_resource_tracker(migration.source_node) rt.drop_resize_claim(context, instance, old_instance_type) # NOTE(mriedem): The old_vm_state could be STOPPED but the user # might have manually powered up the instance to confirm the # resize/migrate, so we need to check the current power state # on the instance and set the vm_state appropriately. We default # to ACTIVE because if the power state is not SHUTDOWN, we # assume _sync_instance_power_state will clean it up. p_state = instance.power_state vm_state = None if p_state == power_state.SHUTDOWN: vm_state = vm_states.STOPPED LOG.debug("Resized/migrated instance is powered off. " "Setting vm_state to '%s'.", vm_state, instance=instance) else: vm_state = vm_states.ACTIVE instance.vm_state = vm_state instance.task_state = None instance.save(expected_task_state=[None, task_states.DELETING]) self._notify_about_instance_usage( context, instance, "resize.confirm.end", network_info=network_info) quotas.commit() @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def revert_resize(self, context, instance, migration, reservations): """Destroys the new instance on the destination machine. Reverts the model changes, and powers on the old instance on the source machine. """ quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) # NOTE(comstud): A revert_resize is essentially a resize back to # the old size, so we need to send a usage event here. self.conductor_api.notify_usage_exists( context, instance, current_period=True) with self._error_out_instance_on_exception(context, instance, quotas=quotas): # NOTE(tr3buchet): tear down networks on destination host self.network_api.setup_networks_on_host(context, instance, teardown=True) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_start(context, instance, migration_p) network_info = self._get_instance_nw_info(context, instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) destroy_disks = not self._is_instance_storage_shared(context, instance) self.driver.destroy(context, instance, network_info, block_device_info, destroy_disks) self._terminate_volume_connections(context, instance, bdms) migration.status = 'reverted' with migration.obj_as_admin(): migration.save() rt = self._get_resource_tracker(instance.node) rt.drop_resize_claim(context, instance) self.compute_rpcapi.finish_revert_resize(context, instance, migration, migration.source_compute, quotas.reservations) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def finish_revert_resize(self, context, instance, reservations, migration): """Finishes the second half of reverting a resize. Bring the original source instance state back (active/shutoff) and revert the resized attributes in the database. """ quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) with self._error_out_instance_on_exception(context, instance, quotas=quotas): network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "resize.revert.start") sys_meta, instance_type, drop_instance_type = ( self._cleanup_stored_instance_types(instance, True)) # NOTE(mriedem): delete stashed old_vm_state information; we # default to ACTIVE for backwards compatibility if old_vm_state # is not set old_vm_state = sys_meta.pop('old_vm_state', vm_states.ACTIVE) instance.system_metadata = sys_meta instance.memory_mb = instance_type['memory_mb'] instance.vcpus = instance_type['vcpus'] instance.root_gb = instance_type['root_gb'] instance.ephemeral_gb = instance_type['ephemeral_gb'] instance.instance_type_id = instance_type['id'] instance.host = migration.source_compute instance.node = migration.source_node instance.save() migration.dest_compute = migration.source_compute with migration.obj_as_admin(): migration.save() self.network_api.setup_networks_on_host(context, instance, migration.source_compute) block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True) power_on = old_vm_state != vm_states.STOPPED self.driver.finish_revert_migration(context, instance, network_info, block_device_info, power_on) instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESIZE_REVERTING) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_finish(context, instance, migration_p) # if the original vm state was STOPPED, set it back to STOPPED LOG.info(_LI("Updating instance to original state: '%s'"), old_vm_state) if power_on: instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save() else: instance.task_state = task_states.POWERING_OFF instance.save() self.stop_instance(context, instance=instance) self._notify_about_instance_usage( context, instance, "resize.revert.end") quotas.commit() def _prep_resize(self, context, image, instance, instance_type, quotas, request_spec, filter_properties, node, clean_shutdown=True): if not filter_properties: filter_properties = {} if not instance['host']: self._set_instance_error_state(context, instance) msg = _('Instance has no source host') raise exception.MigrationError(reason=msg) same_host = instance['host'] == self.host if same_host and not CONF.allow_resize_to_same_host: self._set_instance_error_state(context, instance) msg = _('destination same as source!') raise exception.MigrationError(reason=msg) # NOTE(danms): Stash the new instance_type to avoid having to # look it up in the database later instance.set_flavor(instance_type, 'new') # NOTE(mriedem): Stash the old vm_state so we can set the # resized/reverted instance back to the same state later. vm_state = instance['vm_state'] LOG.debug('Stashing vm_state: %s', vm_state, instance=instance) instance.system_metadata['old_vm_state'] = vm_state instance.save() limits = filter_properties.get('limits', {}) rt = self._get_resource_tracker(node) with rt.resize_claim(context, instance, instance_type, image_meta=image, limits=limits) as claim: LOG.audit(_('Migrating'), context=context, instance=instance) self.compute_rpcapi.resize_instance( context, instance, claim.migration, image, instance_type, quotas.reservations, clean_shutdown) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def prep_resize(self, context, image, instance, instance_type, reservations, request_spec, filter_properties, node, clean_shutdown=True): """Initiates the process of moving a running instance to another host. Possibly changes the RAM and disk size in the process. """ if node is None: node = self.driver.get_available_nodes(refresh=True)[0] LOG.debug("No node specified, defaulting to %s", node, instance=instance) quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) with self._error_out_instance_on_exception(context, instance, quotas=quotas): self.conductor_api.notify_usage_exists( context, instance, current_period=True) self._notify_about_instance_usage( context, instance, "resize.prep.start") try: self._prep_resize(context, image, instance, instance_type, quotas, request_spec, filter_properties, node, clean_shutdown) # NOTE(dgenin): This is thrown in LibvirtDriver when the # instance to be migrated is backed by LVM. # Remove when LVM migration is implemented. except exception.MigrationPreCheckError: raise except Exception: # try to re-schedule the resize elsewhere: exc_info = sys.exc_info() self._reschedule_resize_or_reraise(context, image, instance, exc_info, instance_type, quotas, request_spec, filter_properties) finally: extra_usage_info = dict( new_instance_type=instance_type['name'], new_instance_type_id=instance_type['id']) self._notify_about_instance_usage( context, instance, "resize.prep.end", extra_usage_info=extra_usage_info) def _reschedule_resize_or_reraise(self, context, image, instance, exc_info, instance_type, quotas, request_spec, filter_properties): """Try to re-schedule the resize or re-raise the original error to error out the instance. """ if not request_spec: request_spec = {} if not filter_properties: filter_properties = {} rescheduled = False instance_uuid = instance['uuid'] try: reschedule_method = self.compute_task_api.resize_instance scheduler_hint = dict(filter_properties=filter_properties) method_args = (instance, None, scheduler_hint, instance_type, quotas.reservations) task_state = task_states.RESIZE_PREP rescheduled = self._reschedule(context, request_spec, filter_properties, instance, reschedule_method, method_args, task_state, exc_info) except Exception as error: rescheduled = False LOG.exception(_LE("Error trying to reschedule"), instance_uuid=instance_uuid) compute_utils.add_instance_fault_from_exc(context, instance, error, exc_info=sys.exc_info()) self._notify_about_instance_usage(context, instance, 'resize.error', fault=error) if rescheduled: self._log_original_error(exc_info, instance_uuid) compute_utils.add_instance_fault_from_exc(context, instance, exc_info[1], exc_info=exc_info) self._notify_about_instance_usage(context, instance, 'resize.error', fault=exc_info[1]) else: # not re-scheduling raise exc_info[0], exc_info[1], exc_info[2] @wrap_exception() @reverts_task_state @wrap_instance_event @errors_out_migration @wrap_instance_fault def resize_instance(self, context, instance, image, reservations, migration, instance_type, clean_shutdown=True): """Starts the migration of a running instance to another host.""" quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) with self._error_out_instance_on_exception(context, instance, quotas=quotas): if not instance_type: instance_type = objects.Flavor.get_by_id( context, migration['new_instance_type_id']) network_info = self._get_instance_nw_info(context, instance) migration.status = 'migrating' with migration.obj_as_admin(): migration.save() instance.task_state = task_states.RESIZE_MIGRATING instance.save(expected_task_state=task_states.RESIZE_PREP) self._notify_about_instance_usage( context, instance, "resize.start", network_info=network_info) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._get_instance_block_device_info( context, instance, bdms=bdms) timeout, retry_interval = self._get_power_off_values(context, instance, clean_shutdown) disk_info = self.driver.migrate_disk_and_power_off( context, instance, migration.dest_host, instance_type, network_info, block_device_info, timeout, retry_interval) self._terminate_volume_connections(context, instance, bdms) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_start(context, instance, migration_p) migration.status = 'post-migrating' with migration.obj_as_admin(): migration.save() instance.host = migration.dest_compute instance.node = migration.dest_node instance.task_state = task_states.RESIZE_MIGRATED instance.save(expected_task_state=task_states.RESIZE_MIGRATING) self.compute_rpcapi.finish_resize(context, instance, migration, image, disk_info, migration.dest_compute, reservations=quotas.reservations) self._notify_about_instance_usage(context, instance, "resize.end", network_info=network_info) self.instance_events.clear_events_for_instance(instance) def _terminate_volume_connections(self, context, instance, bdms): connector = self.driver.get_volume_connector(instance) for bdm in bdms: if bdm.is_volume: self.volume_api.terminate_connection(context, bdm.volume_id, connector) @staticmethod def _set_instance_info(instance, instance_type): instance.instance_type_id = instance_type['id'] instance.memory_mb = instance_type['memory_mb'] instance.vcpus = instance_type['vcpus'] instance.root_gb = instance_type['root_gb'] instance.ephemeral_gb = instance_type['ephemeral_gb'] instance.set_flavor(instance_type) def _finish_resize(self, context, instance, migration, disk_info, image): resize_instance = False old_instance_type_id = migration['old_instance_type_id'] new_instance_type_id = migration['new_instance_type_id'] old_instance_type = instance.get_flavor() # NOTE(mriedem): Get the old_vm_state so we know if we should # power on the instance. If old_vm_state is not set we need to default # to ACTIVE for backwards compatibility old_vm_state = instance.system_metadata.get('old_vm_state', vm_states.ACTIVE) instance.set_flavor(old_instance_type, 'old') if old_instance_type_id != new_instance_type_id: instance_type = instance.get_flavor('new') self._set_instance_info(instance, instance_type) resize_instance = True # NOTE(tr3buchet): setup networks on destination host self.network_api.setup_networks_on_host(context, instance, migration['dest_compute']) migration_p = obj_base.obj_to_primitive(migration) self.network_api.migrate_instance_finish(context, instance, migration_p) network_info = self._get_instance_nw_info(context, instance) instance.task_state = task_states.RESIZE_FINISH instance.save(expected_task_state=task_states.RESIZE_MIGRATED) self._notify_about_instance_usage( context, instance, "finish_resize.start", network_info=network_info) block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True) # NOTE(mriedem): If the original vm_state was STOPPED, we don't # automatically power on the instance after it's migrated power_on = old_vm_state != vm_states.STOPPED try: self.driver.finish_migration(context, migration, instance, disk_info, network_info, image, resize_instance, block_device_info, power_on) except Exception: with excutils.save_and_reraise_exception(): if resize_instance: self._set_instance_info(instance, old_instance_type) migration.status = 'finished' with migration.obj_as_admin(): migration.save() instance.vm_state = vm_states.RESIZED instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.RESIZE_FINISH) self._notify_about_instance_usage( context, instance, "finish_resize.end", network_info=network_info) @wrap_exception() @reverts_task_state @wrap_instance_event @errors_out_migration @wrap_instance_fault def finish_resize(self, context, disk_info, image, instance, reservations, migration): """Completes the migration process. Sets up the newly transferred disk and turns on the instance at its new host machine. """ quotas = objects.Quotas.from_reservations(context, reservations, instance=instance) try: self._finish_resize(context, instance, migration, disk_info, image) quotas.commit() except Exception: LOG.exception(_LE('Setting instance vm_state to ERROR'), instance=instance) with excutils.save_and_reraise_exception(): try: quotas.rollback() except Exception as qr_error: LOG.exception(_LE("Failed to rollback quota for failed " "finish_resize: %s"), qr_error, instance=instance) self._set_instance_error_state(context, instance) @object_compat @wrap_exception() @wrap_instance_fault def add_fixed_ip_to_instance(self, context, network_id, instance): """Calls network_api to add new fixed_ip to instance then injects the new network info and resets instance networking. """ self._notify_about_instance_usage( context, instance, "create_ip.start") network_info = self.network_api.add_fixed_ip_to_instance(context, instance, network_id) self._inject_network_info(context, instance, network_info) self.reset_network(context, instance) # NOTE(russellb) We just want to bump updated_at. See bug 1143466. instance.updated_at = timeutils.utcnow() instance.save() self._notify_about_instance_usage( context, instance, "create_ip.end", network_info=network_info) @object_compat @wrap_exception() @wrap_instance_fault def remove_fixed_ip_from_instance(self, context, address, instance): """Calls network_api to remove existing fixed_ip from instance by injecting the altered network info and resetting instance networking. """ self._notify_about_instance_usage( context, instance, "delete_ip.start") network_info = self.network_api.remove_fixed_ip_from_instance(context, instance, address) self._inject_network_info(context, instance, network_info) self.reset_network(context, instance) # NOTE(russellb) We just want to bump updated_at. See bug 1143466. instance.updated_at = timeutils.utcnow() instance.save() self._notify_about_instance_usage( context, instance, "delete_ip.end", network_info=network_info) @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def pause_instance(self, context, instance): """Pause an instance on this host.""" context = context.elevated() LOG.audit(_('Pausing'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'pause.start') self.driver.pause(instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.PAUSED instance.task_state = None instance.save(expected_task_state=task_states.PAUSING) self._notify_about_instance_usage(context, instance, 'pause.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def unpause_instance(self, context, instance): """Unpause a paused instance on this host.""" context = context.elevated() LOG.audit(_('Unpausing'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'unpause.start') self.driver.unpause(instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=task_states.UNPAUSING) self._notify_about_instance_usage(context, instance, 'unpause.end') @wrap_exception() def host_power_action(self, context, action): """Reboots, shuts down or powers up the host.""" return self.driver.host_power_action(action) @wrap_exception() def host_maintenance_mode(self, context, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ return self.driver.host_maintenance_mode(host, mode) @wrap_exception() def set_host_enabled(self, context, enabled): """Sets the specified host's ability to accept new instances.""" return self.driver.set_host_enabled(enabled) @wrap_exception() def get_host_uptime(self, context): """Returns the result of calling "uptime" on the target host.""" return self.driver.get_host_uptime() @object_compat @wrap_exception() @wrap_instance_fault def get_diagnostics(self, context, instance): """Retrieve diagnostics for an instance on this host.""" current_power_state = self._get_power_state(context, instance) if current_power_state == power_state.RUNNING: LOG.audit(_("Retrieving diagnostics"), context=context, instance=instance) return self.driver.get_diagnostics(instance) else: raise exception.InstanceInvalidState( attr='power_state', instance_uuid=instance.uuid, state=instance.power_state, method='get_diagnostics') @object_compat @wrap_exception() @wrap_instance_fault def get_instance_diagnostics(self, context, instance): """Retrieve diagnostics for an instance on this host.""" current_power_state = self._get_power_state(context, instance) if current_power_state == power_state.RUNNING: LOG.audit(_("Retrieving diagnostics"), context=context, instance=instance) diags = self.driver.get_instance_diagnostics(instance) return diags.serialize() else: raise exception.InstanceInvalidState( attr='power_state', instance_uuid=instance.uuid, state=instance.power_state, method='get_diagnostics') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def suspend_instance(self, context, instance): """Suspend the given instance.""" context = context.elevated() # Store the old state instance.system_metadata['old_vm_state'] = instance.vm_state self._notify_about_instance_usage(context, instance, 'suspend.start') with self._error_out_instance_on_exception(context, instance, instance_state=instance.vm_state): self.driver.suspend(instance) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.SUSPENDED instance.task_state = None instance.save(expected_task_state=task_states.SUSPENDING) self._notify_about_instance_usage(context, instance, 'suspend.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def resume_instance(self, context, instance): """Resume the given suspended instance.""" context = context.elevated() LOG.audit(_('Resuming'), context=context, instance=instance) self._notify_about_instance_usage(context, instance, 'resume.start') network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info( context, instance) with self._error_out_instance_on_exception(context, instance, instance_state=instance.vm_state): self.driver.resume(context, instance, network_info, block_device_info) instance.power_state = self._get_power_state(context, instance) # We default to the ACTIVE state for backwards compatibility instance.vm_state = instance.system_metadata.pop('old_vm_state', vm_states.ACTIVE) instance.task_state = None instance.save(expected_task_state=task_states.RESUMING) self._notify_about_instance_usage(context, instance, 'resume.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def shelve_instance(self, context, instance, image_id, clean_shutdown=True): """Shelve an instance. This should be used when you want to take a snapshot of the instance. It also adds system_metadata that can be used by a periodic task to offload the shelved instance after a period of time. :param context: request context :param instance: an Instance object :param image_id: an image id to snapshot to. :param clean_shutdown: give the GuestOS a chance to stop """ self.conductor_api.notify_usage_exists( context, instance, current_period=True) self._notify_about_instance_usage(context, instance, 'shelve.start') def update_task_state(task_state, expected_state=task_states.SHELVING): shelving_state_map = { task_states.IMAGE_PENDING_UPLOAD: task_states.SHELVING_IMAGE_PENDING_UPLOAD, task_states.IMAGE_UPLOADING: task_states.SHELVING_IMAGE_UPLOADING, task_states.SHELVING: task_states.SHELVING} task_state = shelving_state_map[task_state] expected_state = shelving_state_map[expected_state] instance.task_state = task_state instance.save(expected_task_state=expected_state) self._power_off_instance(context, instance, clean_shutdown) self.driver.snapshot(context, instance, image_id, update_task_state) instance.system_metadata['shelved_at'] = timeutils.strtime() instance.system_metadata['shelved_image_id'] = image_id instance.system_metadata['shelved_host'] = self.host instance.vm_state = vm_states.SHELVED instance.task_state = None if CONF.shelved_offload_time == 0: instance.task_state = task_states.SHELVING_OFFLOADING instance.power_state = self._get_power_state(context, instance) instance.save(expected_task_state=[ task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING]) self._notify_about_instance_usage(context, instance, 'shelve.end') if CONF.shelved_offload_time == 0: self.shelve_offload_instance(context, instance, clean_shutdown=False) @wrap_exception() @reverts_task_state @wrap_instance_fault def shelve_offload_instance(self, context, instance, clean_shutdown=True): """Remove a shelved instance from the hypervisor. This frees up those resources for use by other instances, but may lead to slower unshelve times for this instance. This method is used by volume backed instances since restoring them doesn't involve the potentially large download of an image. :param context: request context :param instance: nova.objects.instance.Instance :param clean_shutdown: give the GuestOS a chance to stop """ self._notify_about_instance_usage(context, instance, 'shelve_offload.start') self._power_off_instance(context, instance, clean_shutdown) current_power_state = self._get_power_state(context, instance) self.network_api.cleanup_instance_network_on_host(context, instance, instance.host) network_info = self._get_instance_nw_info(context, instance) block_device_info = self._get_instance_block_device_info(context, instance) self.driver.destroy(context, instance, network_info, block_device_info) instance.power_state = current_power_state instance.host = None instance.node = None instance.vm_state = vm_states.SHELVED_OFFLOADED instance.task_state = None instance.save(expected_task_state=[task_states.SHELVING, task_states.SHELVING_OFFLOADING]) self._notify_about_instance_usage(context, instance, 'shelve_offload.end') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def unshelve_instance(self, context, instance, image, filter_properties=None, node=None): """Unshelve the instance. :param context: request context :param instance: a nova.objects.instance.Instance object :param image: an image to build from. If None we assume a volume backed instance. :param filter_properties: dict containing limits, retry info etc. :param node: target compute node """ if filter_properties is None: filter_properties = {} @utils.synchronized(instance['uuid']) def do_unshelve_instance(): self._unshelve_instance(context, instance, image, filter_properties, node) do_unshelve_instance() def _unshelve_instance_key_scrub(self, instance): """Remove data from the instance that may cause side effects.""" cleaned_keys = dict( key_data=instance.key_data, auto_disk_config=instance.auto_disk_config) instance.key_data = None instance.auto_disk_config = False return cleaned_keys def _unshelve_instance_key_restore(self, instance, keys): """Restore previously scrubbed keys before saving the instance.""" instance.update(keys) def _unshelve_instance(self, context, instance, image, filter_properties, node): self._notify_about_instance_usage(context, instance, 'unshelve.start') instance.task_state = task_states.SPAWNING instance.save() bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) block_device_info = self._prep_block_device(context, instance, bdms, do_check_attach=False) scrubbed_keys = self._unshelve_instance_key_scrub(instance) if node is None: node = self.driver.get_available_nodes()[0] LOG.debug('No node specified, defaulting to %s', node, instance=instance) rt = self._get_resource_tracker(node) limits = filter_properties.get('limits', {}) if image: shelved_image_ref = instance.image_ref instance.image_ref = image['id'] self.network_api.setup_instance_network_on_host(context, instance, self.host) network_info = self._get_instance_nw_info(context, instance) try: with rt.instance_claim(context, instance, limits): flavor = None if filter_properties is not None: flavor = filter_properties.get('instance_type') self.driver.spawn(context, instance, image, injected_files=[], admin_password=None, network_info=network_info, block_device_info=block_device_info, flavor=flavor) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Instance failed to spawn'), instance=instance) if image: instance.image_ref = shelved_image_ref self.image_api.delete(context, image['id']) self._unshelve_instance_key_restore(instance, scrubbed_keys) instance.power_state = self._get_power_state(context, instance) instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.launched_at = timeutils.utcnow() instance.save(expected_task_state=task_states.SPAWNING) self._notify_about_instance_usage(context, instance, 'unshelve.end') @messaging.expected_exceptions(NotImplementedError) @wrap_instance_fault def reset_network(self, context, instance): """Reset networking on the given instance.""" LOG.debug('Reset network', context=context, instance=instance) self.driver.reset_network(instance) def _inject_network_info(self, context, instance, network_info): """Inject network info for the given instance.""" LOG.debug('Inject network info', context=context, instance=instance) LOG.debug('network_info to inject: |%s|', network_info, instance=instance) self.driver.inject_network_info(instance, network_info) @wrap_instance_fault def inject_network_info(self, context, instance): """Inject network info, but don't return the info.""" network_info = self._get_instance_nw_info(context, instance) self._inject_network_info(context, instance, network_info) @object_compat @messaging.expected_exceptions(NotImplementedError, exception.InstanceNotFound) @wrap_exception() @wrap_instance_fault def get_console_output(self, context, instance, tail_length): """Send the console output for the given instance.""" context = context.elevated() LOG.audit(_("Get console output"), context=context, instance=instance) output = self.driver.get_console_output(context, instance) if tail_length is not None: output = self._tail_log(output, tail_length) return output.decode('utf-8', 'replace').encode('ascii', 'replace') def _tail_log(self, log, length): try: length = int(length) except ValueError: length = 0 if length == 0: return '' else: return '\n'.join(log.split('\n')[-int(length):]) @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @object_compat @wrap_exception() @wrap_instance_fault def get_vnc_console(self, context, console_type, instance): """Return connection information for a vnc console.""" context = context.elevated() LOG.debug("Getting vnc console", instance=instance) token = str(uuid.uuid4()) if not CONF.vnc_enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'novnc': # For essex, novncproxy_base_url must include the full path # including the html file (like http://myhost/vnc_auto.html) access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token) elif console_type == 'xvpvnc': access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token) else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_vnc_console(context, instance) connect_info = console.get_connection_info(token, access_url) except exception.InstanceNotFound: if instance['vm_state'] != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @object_compat @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable) @wrap_exception() @wrap_instance_fault def get_spice_console(self, context, console_type, instance): """Return connection information for a spice console.""" context = context.elevated() LOG.debug("Getting spice console", instance=instance) token = str(uuid.uuid4()) if not CONF.spice.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'spice-html5': # For essex, spicehtml5proxy_base_url must include the full path # including the html file (like http://myhost/spice_auto.html) access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url, token) else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_spice_console(context, instance) connect_info = console.get_connection_info(token, access_url) except exception.InstanceNotFound: if instance['vm_state'] != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @object_compat @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_rdp_console(self, context, console_type, instance): """Return connection information for a RDP console.""" context = context.elevated() LOG.debug("Getting RDP console", instance=instance) token = str(uuid.uuid4()) if not CONF.rdp.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) if console_type == 'rdp-html5': access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url, token) else: raise exception.ConsoleTypeInvalid(console_type=console_type) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_rdp_console(context, instance) connect_info = console.get_connection_info(token, access_url) except exception.InstanceNotFound: if instance['vm_state'] != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @messaging.expected_exceptions( exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound, exception.ConsoleTypeUnavailable, exception.SocketPortRangeExhaustedException, exception.ImageSerialPortNumberInvalid, exception.ImageSerialPortNumberExceedFlavorValue, NotImplementedError) @wrap_exception() @wrap_instance_fault def get_serial_console(self, context, console_type, instance): """Returns connection information for a serial console.""" LOG.debug("Getting serial console", instance=instance) if not CONF.serial_console.enabled: raise exception.ConsoleTypeUnavailable(console_type=console_type) context = context.elevated() token = str(uuid.uuid4()) access_url = '%s?token=%s' % (CONF.serial_console.base_url, token) try: # Retrieve connect info from driver, and then decorate with our # access info token console = self.driver.get_serial_console(context, instance) connect_info = console.get_connection_info(token, access_url) except exception.InstanceNotFound: if instance.vm_state != vm_states.BUILDING: raise raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info @messaging.expected_exceptions(exception.ConsoleTypeInvalid, exception.InstanceNotReady, exception.InstanceNotFound) @object_compat @wrap_exception() @wrap_instance_fault def validate_console_port(self, ctxt, instance, port, console_type): if console_type == "spice-html5": console_info = self.driver.get_spice_console(ctxt, instance) elif console_type == "rdp-html5": console_info = self.driver.get_rdp_console(ctxt, instance) elif console_type == "serial": console_info = self.driver.get_serial_console(ctxt, instance) else: console_info = self.driver.get_vnc_console(ctxt, instance) return console_info.port == port @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def reserve_block_device_name(self, context, instance, device, volume_id, disk_bus=None, device_type=None, return_bdm_object=False): # NOTE(ndipanov): disk_bus and device_type will be set to None if not # passed (by older clients) and defaulted by the virt driver. Remove # default values on the next major RPC version bump. @utils.synchronized(instance['uuid']) def do_reserve(): bdms = ( objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid)) device_name = compute_utils.get_device_name_for_instance( context, instance, bdms, device) # NOTE(vish): create bdm here to avoid race condition bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', instance_uuid=instance.uuid, volume_id=volume_id or 'reserved', device_name=device_name, disk_bus=disk_bus, device_type=device_type) bdm.create() if return_bdm_object: return bdm else: return device_name return do_reserve() @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def attach_volume(self, context, volume_id, mountpoint, instance, bdm=None): """Attach a volume to an instance.""" if not bdm: bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm) @utils.synchronized(instance.uuid) def do_attach_volume(context, instance, driver_bdm): try: return self._attach_volume(context, instance, driver_bdm) except Exception: with excutils.save_and_reraise_exception(): bdm.destroy() do_attach_volume(context, instance, driver_bdm) def _attach_volume(self, context, instance, bdm): context = context.elevated() LOG.audit(_('Attaching volume %(volume_id)s to %(mountpoint)s'), {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, context=context, instance=instance) try: bdm.attach(context, instance, self.volume_api, self.driver, do_check_attach=False, do_driver_attach=True) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Failed to attach %(volume_id)s " "at %(mountpoint)s"), {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, context=context, instance=instance) self.volume_api.unreserve_volume(context, bdm.volume_id) info = {'volume_id': bdm.volume_id} self._notify_about_instance_usage( context, instance, "volume.attach", extra_usage_info=info) def _detach_volume(self, context, instance, bdm): """Do the actual driver detach using block device mapping.""" mp = bdm.device_name volume_id = bdm.volume_id LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'), {'volume_id': volume_id, 'mp': mp}, context=context, instance=instance) connection_info = jsonutils.loads(bdm.connection_info) # NOTE(vish): We currently don't use the serial when disconnecting, # but added for completeness in case we ever do. if connection_info and 'serial' not in connection_info: connection_info['serial'] = volume_id try: if not self.driver.instance_exists(instance): LOG.warning(_LW('Detaching volume from unknown instance'), context=context, instance=instance) encryption = encryptors.get_encryption_metadata( context, self.volume_api, volume_id, connection_info) self.driver.detach_volume(connection_info, instance, mp, encryption=encryption) except exception.DiskNotFound as err: LOG.warning(_LW('Ignoring DiskNotFound exception while detaching ' 'volume %(volume_id)s from %(mp)s: %(err)s'), {'volume_id': volume_id, 'mp': mp, 'err': err}, instance=instance) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Failed to detach volume %(volume_id)s ' 'from %(mp)s'), {'volume_id': volume_id, 'mp': mp}, context=context, instance=instance) self.volume_api.roll_detaching(context, volume_id) @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def detach_volume(self, context, volume_id, instance): """Detach a volume from an instance.""" bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) if CONF.volume_usage_poll_interval > 0: vol_stats = [] mp = bdm.device_name # Handle bootable volumes which will not contain /dev/ if '/dev/' in mp: mp = mp[5:] try: vol_stats = self.driver.block_stats(instance, mp) except NotImplementedError: pass if vol_stats: LOG.debug("Updating volume usage cache with totals", instance=instance) rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats self.conductor_api.vol_usage_update(context, volume_id, rd_req, rd_bytes, wr_req, wr_bytes, instance, update_totals=True) self._detach_volume(context, instance, bdm) connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, volume_id, connector) bdm.destroy() info = dict(volume_id=volume_id) self._notify_about_instance_usage( context, instance, "volume.detach", extra_usage_info=info) self.volume_api.detach(context.elevated(), volume_id) def _init_volume_connection(self, context, new_volume_id, old_volume_id, connector, instance, bdm): new_cinfo = self.volume_api.initialize_connection(context, new_volume_id, connector) old_cinfo = jsonutils.loads(bdm['connection_info']) if old_cinfo and 'serial' not in old_cinfo: old_cinfo['serial'] = old_volume_id new_cinfo['serial'] = old_cinfo['serial'] return (old_cinfo, new_cinfo) def _swap_volume(self, context, instance, bdm, connector, old_volume_id, new_volume_id): mountpoint = bdm['device_name'] failed = False new_cinfo = None resize_to = 0 try: old_cinfo, new_cinfo = self._init_volume_connection(context, new_volume_id, old_volume_id, connector, instance, bdm) old_vol_size = self.volume_api.get(context, old_volume_id)['size'] new_vol_size = self.volume_api.get(context, new_volume_id)['size'] if new_vol_size > old_vol_size: resize_to = new_vol_size self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint, resize_to) except Exception: failed = True with excutils.save_and_reraise_exception(): if new_cinfo: msg = _LE("Failed to swap volume %(old_volume_id)s " "for %(new_volume_id)s") LOG.exception(msg, {'old_volume_id': old_volume_id, 'new_volume_id': new_volume_id}, context=context, instance=instance) else: msg = _LE("Failed to connect to volume %(volume_id)s " "with volume at %(mountpoint)s") LOG.exception(msg, {'volume_id': new_volume_id, 'mountpoint': bdm['device_name']}, context=context, instance=instance) self.volume_api.roll_detaching(context, old_volume_id) self.volume_api.unreserve_volume(context, new_volume_id) finally: conn_volume = new_volume_id if failed else old_volume_id if new_cinfo: self.volume_api.terminate_connection(context, conn_volume, connector) # If Cinder initiated the swap, it will keep # the original ID comp_ret = self.volume_api.migrate_volume_completion( context, old_volume_id, new_volume_id, error=failed) return (comp_ret, new_cinfo) @wrap_exception() @reverts_task_state @wrap_instance_fault def swap_volume(self, context, old_volume_id, new_volume_id, instance): """Swap volume for an instance.""" context = context.elevated() bdm = objects.BlockDeviceMapping.get_by_volume_id( context, old_volume_id, instance_uuid=instance.uuid) connector = self.driver.get_volume_connector(instance) comp_ret, new_cinfo = self._swap_volume(context, instance, bdm, connector, old_volume_id, new_volume_id) save_volume_id = comp_ret['save_volume_id'] # Update bdm values = { 'connection_info': jsonutils.dumps(new_cinfo), 'delete_on_termination': False, 'source_type': 'volume', 'destination_type': 'volume', 'snapshot_id': None, 'volume_id': save_volume_id, 'volume_size': None, 'no_device': None} bdm.update(values) bdm.save() @wrap_exception() def remove_volume_connection(self, context, volume_id, instance): """Remove a volume connection using the volume api.""" # NOTE(vish): We don't want to actually mark the volume # detached, or delete the bdm, just remove the # connection from this host. # NOTE(PhilDay): Can't use object_compat decorator here as # instance is not the second parameter if isinstance(instance, dict): metas = ['metadata', 'system_metadata'] instance = objects.Instance._from_db_object( context, objects.Instance(), instance, expected_attrs=metas) instance._context = context try: bdm = objects.BlockDeviceMapping.get_by_volume_id( context, volume_id) self._detach_volume(context, instance, bdm) connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, volume_id, connector) except exception.NotFound: pass @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def attach_interface(self, context, instance, network_id, port_id, requested_ip): """Use hotplug to add an network adapter to an instance.""" network_info = self.network_api.allocate_port_for_instance( context, instance, port_id, network_id, requested_ip) if len(network_info) != 1: LOG.error(_LE('allocate_port_for_instance returned %(ports)s ' 'ports'), dict(ports=len(network_info))) raise exception.InterfaceAttachFailed( instance_uuid=instance.uuid) image_ref = instance.get('image_ref') image_meta = compute_utils.get_image_metadata( context, self.image_api, image_ref, instance) self.driver.attach_interface(instance, image_meta, network_info[0]) return network_info[0] @object_compat @wrap_exception() @reverts_task_state @wrap_instance_fault def detach_interface(self, context, instance, port_id): """Detach an network adapter from an instance.""" network_info = instance.info_cache.network_info condemned = None for vif in network_info: if vif['id'] == port_id: condemned = vif break if condemned is None: raise exception.PortNotFound(_("Port %s is not " "attached") % port_id) self.network_api.deallocate_port_for_instance(context, instance, port_id) self.driver.detach_interface(instance, condemned) def _get_compute_info(self, context, host): return objects.ComputeNode.get_first_node_by_host_for_old_compat( context, host) @wrap_exception() def check_instance_shared_storage(self, ctxt, instance, data): """Check if the instance files are shared :param ctxt: security context :param instance: dict of instance data :param data: result of driver.check_instance_shared_storage_local Returns True if instance disks located on shared storage and False otherwise. """ return self.driver.check_instance_shared_storage_remote(ctxt, data) @wrap_exception() @wrap_instance_fault def check_can_live_migrate_destination(self, ctxt, instance, block_migration, disk_over_commit): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: dict of instance data :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit :returns: a dict containing migration info """ src_compute_info = obj_base.obj_to_primitive( self._get_compute_info(ctxt, instance.host)) dst_compute_info = obj_base.obj_to_primitive( self._get_compute_info(ctxt, CONF.host)) dest_check_data = self.driver.check_can_live_migrate_destination(ctxt, instance, src_compute_info, dst_compute_info, block_migration, disk_over_commit) migrate_data = {} try: migrate_data = self.compute_rpcapi.\ check_can_live_migrate_source(ctxt, instance, dest_check_data) finally: self.driver.check_can_live_migrate_destination_cleanup(ctxt, dest_check_data) if 'migrate_data' in dest_check_data: migrate_data.update(dest_check_data['migrate_data']) return migrate_data @wrap_exception() @wrap_instance_fault def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param ctxt: security context :param instance: dict of instance data :param dest_check_data: result of check_can_live_migrate_destination :returns: a dict containing migration info """ is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt, instance) dest_check_data['is_volume_backed'] = is_volume_backed block_device_info = self._get_instance_block_device_info( ctxt, instance, refresh_conn_info=True) return self.driver.check_can_live_migrate_source(ctxt, instance, dest_check_data, block_device_info) @object_compat @wrap_exception() @wrap_instance_fault def pre_live_migration(self, context, instance, block_migration, disk, migrate_data): """Preparations for live migration at dest host. :param context: security context :param instance: dict of instance data :param block_migration: if true, prepare for block migration :param migrate_data: if not None, it is a dict which holds data required for live migration without shared storage. """ block_device_info = self._get_instance_block_device_info( context, instance, refresh_conn_info=True) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.pre.start", network_info=network_info) pre_live_migration_data = self.driver.pre_live_migration(context, instance, block_device_info, network_info, disk, migrate_data) # NOTE(tr3buchet): setup networks on destination host self.network_api.setup_networks_on_host(context, instance, self.host) # Creating filters to hypervisors and firewalls. # An example is that nova-instance-instance-xxx, # which is written to libvirt.xml(Check "virsh nwfilter-list") # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. self.driver.ensure_filtering_rules_for_instance(instance, network_info) self._notify_about_instance_usage( context, instance, "live_migration.pre.end", network_info=network_info) return pre_live_migration_data @wrap_exception() @wrap_instance_fault def live_migration(self, context, dest, instance, block_migration, migrate_data): """Executing live migration. :param context: security context :param instance: a nova.objects.instance.Instance object :param dest: destination host :param block_migration: if true, prepare for block migration :param migrate_data: implementation specific params """ # NOTE(danms): since instance is not the first parameter, we can't # use @object_compat on this method. Since this is the only example, # we do this manually instead of complicating the decorator if not isinstance(instance, obj_base.NovaObject): expected = ['metadata', 'system_metadata', 'security_groups', 'info_cache'] instance = objects.Instance._from_db_object( context, objects.Instance(), instance, expected_attrs=expected) # Create a local copy since we'll be modifying the dictionary migrate_data = dict(migrate_data or {}) try: if block_migration: block_device_info = self._get_instance_block_device_info( context, instance) disk = self.driver.get_instance_disk_info( instance, block_device_info=block_device_info) else: disk = None pre_migration_data = self.compute_rpcapi.pre_live_migration( context, instance, block_migration, disk, dest, migrate_data) migrate_data['pre_live_migration_result'] = pre_migration_data except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE('Pre live migration failed at %s'), dest, instance=instance) self._rollback_live_migration(context, instance, dest, block_migration, migrate_data) # Executing live migration # live_migration might raises exceptions, but # nothing must be recovered in this version. self.driver.live_migration(context, instance, dest, self._post_live_migration, self._rollback_live_migration, block_migration, migrate_data) def _live_migration_cleanup_flags(self, block_migration, migrate_data): """Determine whether disks or intance path need to be cleaned up after live migration (at source on success, at destination on rollback) Block migration needs empty image at destination host before migration starts, so if any failure occurs, any empty images has to be deleted. Also Volume backed live migration w/o shared storage needs to delete newly created instance-xxx dir on the destination as a part of its rollback process :param block_migration: if true, it was a block migration :param migrate_data: implementation specific data :returns: (bool, bool) -- do_cleanup, destroy_disks """ # NOTE(angdraug): block migration wouldn't have been allowed if either # block storage or instance path were shared is_shared_block_storage = not block_migration is_shared_instance_path = not block_migration if migrate_data: is_shared_block_storage = migrate_data.get( 'is_shared_block_storage', is_shared_block_storage) is_shared_instance_path = migrate_data.get( 'is_shared_instance_path', is_shared_instance_path) # No instance booting at source host, but instance dir # must be deleted for preparing next block migration # must be deleted for preparing next live migration w/o shared storage do_cleanup = block_migration or not is_shared_instance_path destroy_disks = not is_shared_block_storage return (do_cleanup, destroy_disks) @wrap_exception() @wrap_instance_fault def _post_live_migration(self, ctxt, instance, dest, block_migration=False, migrate_data=None): """Post operations for live migration. This method is called from live_migration and mainly updating database record. :param ctxt: security context :param instance: instance dict :param dest: destination host :param block_migration: if true, prepare for block migration :param migrate_data: if not None, it is a dict which has data required for live migration without shared storage """ LOG.info(_LI('_post_live_migration() is started..'), instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( ctxt, instance['uuid']) # Cleanup source host post live-migration block_device_info = self._get_instance_block_device_info( ctxt, instance, bdms=bdms) self.driver.post_live_migration(ctxt, instance, block_device_info, migrate_data) # Detaching volumes. connector = self.driver.get_volume_connector(instance) for bdm in bdms: # NOTE(vish): We don't want to actually mark the volume # detached, or delete the bdm, just remove the # connection from this host. # remove the volume connection without detaching from hypervisor # because the instance is not running anymore on the current host if bdm.is_volume: self.volume_api.terminate_connection(ctxt, bdm.volume_id, connector) # Releasing vlan. # (not necessary in current implementation?) network_info = self._get_instance_nw_info(ctxt, instance) self._notify_about_instance_usage(ctxt, instance, "live_migration._post.start", network_info=network_info) # Releasing security group ingress rule. self.driver.unfilter_instance(instance, network_info) migration = {'source_compute': self.host, 'dest_compute': dest, } self.network_api.migrate_instance_start(ctxt, instance, migration) destroy_vifs = False try: self.driver.post_live_migration_at_source(ctxt, instance, network_info) except NotImplementedError as ex: LOG.debug(ex, instance=instance) # For all hypervisors other than libvirt, there is a possibility # they are unplugging networks from source node in the cleanup # method destroy_vifs = True # Define domain at destination host, without doing it, # pause/suspend/terminate do not work. self.compute_rpcapi.post_live_migration_at_destination(ctxt, instance, block_migration, dest) do_cleanup, destroy_disks = self._live_migration_cleanup_flags( block_migration, migrate_data) if do_cleanup: self.driver.cleanup(ctxt, instance, network_info, destroy_disks=destroy_disks, migrate_data=migrate_data, destroy_vifs=destroy_vifs) # NOTE(tr3buchet): tear down networks on source host self.network_api.setup_networks_on_host(ctxt, instance, self.host, teardown=True) self.instance_events.clear_events_for_instance(instance) # NOTE(timello): make sure we update available resources on source # host even before next periodic task. self.update_available_resource(ctxt) self._notify_about_instance_usage(ctxt, instance, "live_migration._post.end", network_info=network_info) LOG.info(_LI('Migrating instance to %s finished successfully.'), dest, instance=instance) LOG.info(_LI("You may see the error \"libvirt: QEMU error: " "Domain not found: no domain with matching name.\" " "This error can be safely ignored."), instance=instance) if CONF.vnc_enabled or CONF.spice.enabled or CONF.rdp.enabled: if CONF.cells.enable: self.cells_rpcapi.consoleauth_delete_tokens(ctxt, instance['uuid']) else: self.consoleauth_rpcapi.delete_tokens_for_instance(ctxt, instance['uuid']) @object_compat @wrap_exception() @wrap_instance_fault def post_live_migration_at_destination(self, context, instance, block_migration): """Post operations for live migration . :param context: security context :param instance: Instance dict :param block_migration: if true, prepare for block migration """ LOG.info(_LI('Post operation of migration started'), instance=instance) # NOTE(tr3buchet): setup networks on destination host # this is called a second time because # multi_host does not create the bridge in # plug_vifs self.network_api.setup_networks_on_host(context, instance, self.host) migration = {'source_compute': instance['host'], 'dest_compute': self.host, } self.network_api.migrate_instance_finish(context, instance, migration) network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.post.dest.start", network_info=network_info) block_device_info = self._get_instance_block_device_info(context, instance) self.driver.post_live_migration_at_destination(context, instance, network_info, block_migration, block_device_info) # Restore instance state current_power_state = self._get_power_state(context, instance) node_name = None try: compute_node = self._get_compute_info(context, self.host) node_name = compute_node.hypervisor_hostname except exception.ComputeHostNotFound: LOG.exception(_LE('Failed to get compute_info for %s'), self.host) finally: instance.host = self.host instance.power_state = current_power_state instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.node = node_name instance.save(expected_task_state=task_states.MIGRATING) # NOTE(vish): this is necessary to update dhcp self.network_api.setup_networks_on_host(context, instance, self.host) self._notify_about_instance_usage( context, instance, "live_migration.post.dest.end", network_info=network_info) @wrap_exception() @wrap_instance_fault def _rollback_live_migration(self, context, instance, dest, block_migration, migrate_data=None): """Recovers Instance/volume state from migrating -> running. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param dest: This method is called from live migration src host. This param specifies destination host. :param block_migration: if true, prepare for block migration :param migrate_data: if not none, contains implementation specific data. """ instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.save(expected_task_state=[task_states.MIGRATING]) # NOTE(tr3buchet): setup networks on source host (really it's re-setup) self.network_api.setup_networks_on_host(context, instance, self.host) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance['uuid']) for bdm in bdms: if bdm.is_volume: self.compute_rpcapi.remove_volume_connection( context, instance, bdm.volume_id, dest) self._notify_about_instance_usage(context, instance, "live_migration._rollback.start") do_cleanup, destroy_disks = self._live_migration_cleanup_flags( block_migration, migrate_data) if do_cleanup: self.compute_rpcapi.rollback_live_migration_at_destination( context, instance, dest, destroy_disks=destroy_disks, migrate_data=migrate_data) self._notify_about_instance_usage(context, instance, "live_migration._rollback.end") @object_compat @wrap_exception() @wrap_instance_fault def rollback_live_migration_at_destination(self, context, instance, destroy_disks=True, migrate_data=None): """Cleaning up image directory that is created pre_live_migration. :param context: security context :param instance: a nova.objects.instance.Instance object sent over rpc """ network_info = self._get_instance_nw_info(context, instance) self._notify_about_instance_usage( context, instance, "live_migration.rollback.dest.start", network_info=network_info) # NOTE(tr3buchet): tear down networks on destination host self.network_api.setup_networks_on_host(context, instance, self.host, teardown=True) # NOTE(vish): The mapping is passed in so the driver can disconnect # from remote volumes if necessary block_device_info = self._get_instance_block_device_info(context, instance) self.driver.rollback_live_migration_at_destination( context, instance, network_info, block_device_info, destroy_disks=destroy_disks, migrate_data=migrate_data) self._notify_about_instance_usage( context, instance, "live_migration.rollback.dest.end", network_info=network_info) @periodic_task.periodic_task( spacing=CONF.heal_instance_info_cache_interval) def _heal_instance_info_cache(self, context): """Called periodically. On every call, try to update the info_cache's network information for another instance by calling to the network manager. This is implemented by keeping a cache of uuids of instances that live on this host. On each call, we pop one off of a list, pull the DB record, and try the call to the network API. If anything errors don't fail, as it's possible the instance has been deleted, etc. """ heal_interval = CONF.heal_instance_info_cache_interval if not heal_interval: return instance_uuids = getattr(self, '_instance_uuids_to_heal', []) instance = None LOG.debug('Starting heal instance info cache') if not instance_uuids: # The list of instances to heal is empty so rebuild it LOG.debug('Rebuilding the list of instances to heal') db_instances = objects.InstanceList.get_by_host( context, self.host, expected_attrs=[], use_slave=True) for inst in db_instances: # We don't want to refresh the cache for instances # which are building or deleting so don't put them # in the list. If they are building they will get # added to the list next time we build it. if (inst.vm_state == vm_states.BUILDING): LOG.debug('Skipping network cache update for instance ' 'because it is Building.', instance=inst) continue if (inst.task_state == task_states.DELETING): LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) continue if not instance: # Save the first one we find so we don't # have to get it again instance = inst else: instance_uuids.append(inst['uuid']) self._instance_uuids_to_heal = instance_uuids else: # Find the next valid instance on the list while instance_uuids: try: inst = objects.Instance.get_by_uuid( context, instance_uuids.pop(0), expected_attrs=['system_metadata', 'info_cache'], use_slave=True) except exception.InstanceNotFound: # Instance is gone. Try to grab another. continue # Check the instance hasn't been migrated if inst.host != self.host: LOG.debug('Skipping network cache update for instance ' 'because it has been migrated to another ' 'host.', instance=inst) # Check the instance isn't being deleting elif inst.task_state == task_states.DELETING: LOG.debug('Skipping network cache update for instance ' 'because it is being deleted.', instance=inst) else: instance = inst break if instance: # We have an instance now to refresh try: # Call to network API to get instance info.. this will # force an update to the instance's info_cache self._get_instance_nw_info(context, instance, use_slave=True) LOG.debug('Updated the network info_cache for instance', instance=instance) except exception.InstanceNotFound: # Instance is gone. LOG.debug('Instance no longer exists. Unable to refresh', instance=instance) return except Exception: LOG.error(_LE('An error occurred while refreshing the network ' 'cache.'), instance=instance, exc_info=True) else: LOG.debug("Didn't find any instances for network info cache " "update.") @periodic_task.periodic_task def _poll_rebooting_instances(self, context): if CONF.reboot_timeout > 0: filters = {'task_state': [task_states.REBOOTING, task_states.REBOOT_STARTED, task_states.REBOOT_PENDING], 'host': self.host} rebooting = objects.InstanceList.get_by_filters( context, filters, expected_attrs=[], use_slave=True) to_poll = [] for instance in rebooting: if timeutils.is_older_than(instance['updated_at'], CONF.reboot_timeout): to_poll.append(instance) self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll) @periodic_task.periodic_task def _poll_rescued_instances(self, context): if CONF.rescue_timeout > 0: filters = {'vm_state': vm_states.RESCUED, 'host': self.host} rescued_instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=["system_metadata"], use_slave=True) to_unrescue = [] for instance in rescued_instances: if timeutils.is_older_than(instance['launched_at'], CONF.rescue_timeout): to_unrescue.append(instance) for instance in to_unrescue: self.compute_api.unrescue(context, instance) @periodic_task.periodic_task def _poll_unconfirmed_resizes(self, context): if CONF.resize_confirm_window == 0: return migrations = objects.MigrationList.get_unconfirmed_by_dest_compute( context, CONF.resize_confirm_window, self.host, use_slave=True) migrations_info = dict(migration_count=len(migrations), confirm_window=CONF.resize_confirm_window) if migrations_info["migration_count"] > 0: LOG.info(_LI("Found %(migration_count)d unconfirmed migrations " "older than %(confirm_window)d seconds"), migrations_info) def _set_migration_to_error(migration, reason, **kwargs): LOG.warning(_LW("Setting migration %(migration_id)s to error: " "%(reason)s"), {'migration_id': migration['id'], 'reason': reason}, **kwargs) migration.status = 'error' with migration.obj_as_admin(): migration.save() for migration in migrations: instance_uuid = migration.instance_uuid LOG.info(_LI("Automatically confirming migration " "%(migration_id)s for instance %(instance_uuid)s"), {'migration_id': migration.id, 'instance_uuid': instance_uuid}) expected_attrs = ['metadata', 'system_metadata'] try: instance = objects.Instance.get_by_uuid(context, instance_uuid, expected_attrs=expected_attrs, use_slave=True) except exception.InstanceNotFound: reason = (_("Instance %s not found") % instance_uuid) _set_migration_to_error(migration, reason) continue if instance.vm_state == vm_states.ERROR: reason = _("In ERROR state") _set_migration_to_error(migration, reason, instance=instance) continue # race condition: The instance in DELETING state should not be # set the migration state to error, otherwise the instance in # to be deleted which is in RESIZED state # will not be able to confirm resize if instance.task_state in [task_states.DELETING, task_states.SOFT_DELETING]: msg = ("Instance being deleted or soft deleted during resize " "confirmation. Skipping.") LOG.debug(msg, instance=instance) continue # race condition: This condition is hit when this method is # called between the save of the migration record with a status of # finished and the save of the instance object with a state of # RESIZED. The migration record should not be set to error. if instance.task_state == task_states.RESIZE_FINISH: msg = ("Instance still resizing during resize " "confirmation. Skipping.") LOG.debug(msg, instance=instance) continue vm_state = instance.vm_state task_state = instance.task_state if vm_state != vm_states.RESIZED or task_state is not None: reason = (_("In states %(vm_state)s/%(task_state)s, not " "RESIZED/None") % {'vm_state': vm_state, 'task_state': task_state}) _set_migration_to_error(migration, reason, instance=instance) continue try: self.compute_api.confirm_resize(context, instance, migration=migration) except Exception as e: LOG.info(_LI("Error auto-confirming resize: %s. " "Will retry later."), e, instance=instance) @periodic_task.periodic_task(spacing=CONF.shelved_poll_interval) def _poll_shelved_instances(self, context): filters = {'vm_state': vm_states.SHELVED, 'host': self.host} shelved_instances = objects.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], use_slave=True) to_gc = [] for instance in shelved_instances: sys_meta = instance.system_metadata shelved_at = timeutils.parse_strtime(sys_meta['shelved_at']) if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time): to_gc.append(instance) for instance in to_gc: try: instance.task_state = task_states.SHELVING_OFFLOADING instance.save() self.shelve_offload_instance(context, instance, clean_shutdown=False) except Exception: LOG.exception(_LE('Periodic task failed to offload instance.'), instance=instance) @periodic_task.periodic_task def _instance_usage_audit(self, context): if not CONF.instance_usage_audit: return if compute_utils.has_audit_been_run(context, self.conductor_api, self.host): return begin, end = utils.last_completed_audit_period() instances = objects.InstanceList.get_active_by_window_joined( context, begin, end, host=self.host, expected_attrs=['system_metadata', 'info_cache', 'metadata'], use_slave=True) num_instances = len(instances) errors = 0 successes = 0 LOG.info(_LI("Running instance usage audit for" " host %(host)s from %(begin_time)s to " "%(end_time)s. %(number_instances)s" " instances."), dict(host=self.host, begin_time=begin, end_time=end, number_instances=num_instances)) start_time = time.time() compute_utils.start_instance_usage_audit(context, self.conductor_api, begin, end, self.host, num_instances) for instance in instances: try: self.conductor_api.notify_usage_exists( context, instance, ignore_missing_network_data=False) successes += 1 except Exception: LOG.exception(_LE('Failed to generate usage ' 'audit for instance ' 'on host %s'), self.host, instance=instance) errors += 1 compute_utils.finish_instance_usage_audit(context, self.conductor_api, begin, end, self.host, errors, "Instance usage audit ran " "for host %s, %s instances " "in %s seconds." % ( self.host, num_instances, time.time() - start_time)) @periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval) def _poll_bandwidth_usage(self, context): if not self._bw_usage_supported: return prev_time, start_time = utils.last_completed_audit_period() curr_time = time.time() if (curr_time - self._last_bw_usage_poll > CONF.bandwidth_poll_interval): self._last_bw_usage_poll = curr_time LOG.info(_LI("Updating bandwidth usage cache")) cells_update_interval = CONF.cells.bandwidth_update_interval if (cells_update_interval > 0 and curr_time - self._last_bw_usage_cell_update > cells_update_interval): self._last_bw_usage_cell_update = curr_time update_cells = True else: update_cells = False instances = objects.InstanceList.get_by_host(context, self.host, use_slave=True) try: bw_counters = self.driver.get_all_bw_counters(instances) except NotImplementedError: # NOTE(mdragon): Not all hypervisors have bandwidth polling # implemented yet. If they don't it doesn't break anything, # they just don't get the info in the usage events. # NOTE(PhilDay): Record that its not supported so we can # skip fast on future calls rather than waste effort getting # the list of instances. LOG.warning(_LW("Bandwidth usage not supported by " "hypervisor.")) self._bw_usage_supported = False return refreshed = timeutils.utcnow() for bw_ctr in bw_counters: # Allow switching of greenthreads between queries. greenthread.sleep(0) bw_in = 0 bw_out = 0 last_ctr_in = None last_ctr_out = None usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac( context, bw_ctr['uuid'], bw_ctr['mac_address'], start_period=start_time, use_slave=True) if usage: bw_in = usage.bw_in bw_out = usage.bw_out last_ctr_in = usage.last_ctr_in last_ctr_out = usage.last_ctr_out else: usage = (objects.BandwidthUsage. get_by_instance_uuid_and_mac( context, bw_ctr['uuid'], bw_ctr['mac_address'], start_period=prev_time, use_slave=True)) if usage: last_ctr_in = usage.last_ctr_in last_ctr_out = usage.last_ctr_out if last_ctr_in is not None: if bw_ctr['bw_in'] < last_ctr_in: # counter rollover bw_in += bw_ctr['bw_in'] else: bw_in += (bw_ctr['bw_in'] - last_ctr_in) if last_ctr_out is not None: if bw_ctr['bw_out'] < last_ctr_out: # counter rollover bw_out += bw_ctr['bw_out'] else: bw_out += (bw_ctr['bw_out'] - last_ctr_out) objects.BandwidthUsage(context=context).create( bw_ctr['uuid'], bw_ctr['mac_address'], bw_in, bw_out, bw_ctr['bw_in'], bw_ctr['bw_out'], start_period=start_time, last_refreshed=refreshed, update_cells=update_cells) def _get_host_volume_bdms(self, context, use_slave=False): """Return all block device mappings on a compute host.""" compute_host_bdms = [] instances = objects.InstanceList.get_by_host(context, self.host, use_slave=use_slave) for instance in instances: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid, use_slave=use_slave) instance_bdms = [bdm for bdm in bdms if bdm.is_volume] compute_host_bdms.append(dict(instance=instance, instance_bdms=instance_bdms)) return compute_host_bdms def _update_volume_usage_cache(self, context, vol_usages): """Updates the volume usage cache table with a list of stats.""" for usage in vol_usages: # Allow switching of greenthreads between queries. greenthread.sleep(0) self.conductor_api.vol_usage_update(context, usage['volume'], usage['rd_req'], usage['rd_bytes'], usage['wr_req'], usage['wr_bytes'], usage['instance']) @periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval) def _poll_volume_usage(self, context, start_time=None): if CONF.volume_usage_poll_interval == 0: return if not start_time: start_time = utils.last_completed_audit_period()[1] compute_host_bdms = self._get_host_volume_bdms(context, use_slave=True) if not compute_host_bdms: return LOG.debug("Updating volume usage cache") try: vol_usages = self.driver.get_all_volume_usage(context, compute_host_bdms) except NotImplementedError: return self._update_volume_usage_cache(context, vol_usages) @periodic_task.periodic_task(spacing=CONF.sync_power_state_interval, run_immediately=True) def _sync_power_states(self, context): """Align power states between the database and the hypervisor. To sync power state data we make a DB call to get the number of virtual machines known by the hypervisor and if the number matches the number of virtual machines known by the database, we proceed in a lazy loop, one database record at a time, checking if the hypervisor has the same power state as is in the database. """ db_instances = objects.InstanceList.get_by_host(context, self.host, expected_attrs=[], use_slave=True) num_vm_instances = self.driver.get_num_instances() num_db_instances = len(db_instances) if num_vm_instances != num_db_instances: LOG.warning(_LW("While synchronizing instance power states, found " "%(num_db_instances)s instances in the database " "and %(num_vm_instances)s instances on the " "hypervisor."), {'num_db_instances': num_db_instances, 'num_vm_instances': num_vm_instances}) def _sync(db_instance): # NOTE(melwitt): This must be synchronized as we query state from # two separate sources, the driver and the database. # They are set (in stop_instance) and read, in sync. @utils.synchronized(db_instance.uuid) def query_driver_power_state_and_sync(): self._query_driver_power_state_and_sync(context, db_instance) try: query_driver_power_state_and_sync() except Exception: LOG.exception(_LE("Periodic sync_power_state task had an " "error while processing an instance."), instance=db_instance) self._syncs_in_progress.pop(db_instance.uuid) for db_instance in db_instances: # process syncs asynchronously - don't want instance locking to # block entire periodic task thread uuid = db_instance.uuid if uuid in self._syncs_in_progress: LOG.debug('Sync already in progress for %s' % uuid) else: LOG.debug('Triggering sync for uuid %s' % uuid) self._syncs_in_progress[uuid] = True self._sync_power_pool.spawn_n(_sync, db_instance) def _query_driver_power_state_and_sync(self, context, db_instance): if db_instance.task_state is not None: LOG.info(_LI("During sync_power_state the instance has a " "pending task (%(task)s). Skip."), {'task': db_instance.task_state}, instance=db_instance) return # No pending tasks. Now try to figure out the real vm_power_state. try: vm_instance = self.driver.get_info(db_instance) vm_power_state = vm_instance.state except exception.InstanceNotFound: vm_power_state = power_state.NOSTATE # Note(maoy): the above get_info call might take a long time, # for example, because of a broken libvirt driver. try: self._sync_instance_power_state(context, db_instance, vm_power_state, use_slave=True) except exception.InstanceNotFound: # NOTE(hanlind): If the instance gets deleted during sync, # silently ignore. pass def _sync_instance_power_state(self, context, db_instance, vm_power_state, use_slave=False): """Align instance power state between the database and hypervisor. If the instance is not found on the hypervisor, but is in the database, then a stop() API will be called on the instance. """ # We re-query the DB to get the latest instance info to minimize # (not eliminate) race condition. db_instance.refresh(use_slave=use_slave) db_power_state = db_instance.power_state vm_state = db_instance.vm_state if self.host != db_instance.host: # on the sending end of nova-compute _sync_power_state # may have yielded to the greenthread performing a live # migration; this in turn has changed the resident-host # for the VM; However, the instance is still active, it # is just in the process of migrating to another host. # This implies that the compute source must relinquish # control to the compute destination. LOG.info(_LI("During the sync_power process the " "instance has moved from " "host %(src)s to host %(dst)s"), {'src': db_instance.host, 'dst': self.host}, instance=db_instance) return elif db_instance.task_state is not None: # on the receiving end of nova-compute, it could happen # that the DB instance already report the new resident # but the actual VM has not showed up on the hypervisor # yet. In this case, let's allow the loop to continue # and run the state sync in a later round LOG.info(_LI("During sync_power_state the instance has a " "pending task (%(task)s). Skip."), {'task': db_instance.task_state}, instance=db_instance) return if vm_power_state != db_power_state: # power_state is always updated from hypervisor to db db_instance.power_state = vm_power_state db_instance.save() db_power_state = vm_power_state # Note(maoy): Now resolve the discrepancy between vm_state and # vm_power_state. We go through all possible vm_states. if vm_state in (vm_states.BUILDING, vm_states.RESCUED, vm_states.RESIZED, vm_states.SUSPENDED, vm_states.ERROR): # TODO(maoy): we ignore these vm_state for now. pass elif vm_state == vm_states.ACTIVE: # The only rational power state should be RUNNING if vm_power_state in (power_state.SHUTDOWN, power_state.CRASHED): LOG.warning(_LW("Instance shutdown by itself. Calling the " "stop API. Current vm_state: %(vm_state)s, " "current task_state: %(task_state)s, " "current DB power_state: %(db_power_state)s, " "current VM power_state: %(vm_power_state)s"), {'vm_state': vm_state, 'task_state': db_instance.task_state, 'db_power_state': db_power_state, 'vm_power_state': vm_power_state}, instance=db_instance) try: # Note(maoy): here we call the API instead of # brutally updating the vm_state in the database # to allow all the hooks and checks to be performed. if db_instance.shutdown_terminate: self.compute_api.delete(context, db_instance) else: self.compute_api.stop(context, db_instance) except Exception: # Note(maoy): there is no need to propagate the error # because the same power_state will be retrieved next # time and retried. # For example, there might be another task scheduled. LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_power_state == power_state.SUSPENDED: LOG.warning(_LW("Instance is suspended unexpectedly. Calling " "the stop API."), instance=db_instance) try: self.compute_api.stop(context, db_instance) except Exception: LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_power_state == power_state.PAUSED: # Note(maoy): a VM may get into the paused state not only # because the user request via API calls, but also # due to (temporary) external instrumentations. # Before the virt layer can reliably report the reason, # we simply ignore the state discrepancy. In many cases, # the VM state will go back to running after the external # instrumentation is done. See bug 1097806 for details. LOG.warning(_LW("Instance is paused unexpectedly. Ignore."), instance=db_instance) elif vm_power_state == power_state.NOSTATE: # Occasionally, depending on the status of the hypervisor, # which could be restarting for example, an instance may # not be found. Therefore just log the condition. LOG.warning(_LW("Instance is unexpectedly not found. Ignore."), instance=db_instance) elif vm_state == vm_states.STOPPED: if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN, power_state.CRASHED): LOG.warning(_LW("Instance is not stopped. Calling " "the stop API. Current vm_state: %(vm_state)s," " current task_state: %(task_state)s, " "current DB power_state: %(db_power_state)s, " "current VM power_state: %(vm_power_state)s"), {'vm_state': vm_state, 'task_state': db_instance.task_state, 'db_power_state': db_power_state, 'vm_power_state': vm_power_state}, instance=db_instance) try: # NOTE(russellb) Force the stop, because normally the # compute API would not allow an attempt to stop a stopped # instance. self.compute_api.force_stop(context, db_instance) except Exception: LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_state == vm_states.PAUSED: if vm_power_state in (power_state.SHUTDOWN, power_state.CRASHED): LOG.warning(_LW("Paused instance shutdown by itself. Calling " "the stop API."), instance=db_instance) try: self.compute_api.force_stop(context, db_instance) except Exception: LOG.exception(_LE("error during stop() in " "sync_power_state."), instance=db_instance) elif vm_state in (vm_states.SOFT_DELETED, vm_states.DELETED): if vm_power_state not in (power_state.NOSTATE, power_state.SHUTDOWN): # Note(maoy): this should be taken care of periodically in # _cleanup_running_deleted_instances(). LOG.warning(_LW("Instance is not (soft-)deleted."), instance=db_instance) @periodic_task.periodic_task def _reclaim_queued_deletes(self, context): """Reclaim instances that are queued for deletion.""" interval = CONF.reclaim_instance_interval if interval <= 0: LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...") return # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414. # The only case that the quota might be inconsistent is # the compute node died between set instance state to SOFT_DELETED # and quota commit to DB. When compute node starts again # it will have no idea the reservation is committed or not or even # expired, since it's a rare case, so marked as todo. quotas = objects.Quotas.from_reservations(context, None) filters = {'vm_state': vm_states.SOFT_DELETED, 'task_state': None, 'host': self.host} instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS, use_slave=True) for instance in instances: if self._deleted_old_enough(instance, interval): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) LOG.info(_LI('Reclaiming deleted instance'), instance=instance) try: self._delete_instance(context, instance, bdms, quotas) except Exception as e: LOG.warning(_LW("Periodic reclaim failed to delete " "instance: %s"), e, instance=instance) @periodic_task.periodic_task def update_available_resource(self, context): """See driver.get_available_resource() Periodic process that keeps that the compute host's understanding of resource availability and usage in sync with the underlying hypervisor. :param context: security context """ new_resource_tracker_dict = {} nodenames = set(self.driver.get_available_nodes()) for nodename in nodenames: rt = self._get_resource_tracker(nodename) rt.update_available_resource(context) new_resource_tracker_dict[nodename] = rt # Delete orphan compute node not reported by driver but still in db compute_nodes_in_db = self._get_compute_nodes_in_db(context, use_slave=True) for cn in compute_nodes_in_db: if cn.hypervisor_hostname not in nodenames: LOG.audit(_("Deleting orphan compute node %s") % cn.id) cn.destroy() self._resource_tracker_dict = new_resource_tracker_dict def _get_compute_nodes_in_db(self, context, use_slave=False): try: return objects.ComputeNodeList.get_all_by_host(context, self.host, use_slave=use_slave) except exception.NotFound: LOG.error(_LE("No compute node record for host %s"), self.host) return [] @periodic_task.periodic_task( spacing=CONF.running_deleted_instance_poll_interval) def _cleanup_running_deleted_instances(self, context): """Cleanup any instances which are erroneously still running after having been deleted. Valid actions to take are: 1. noop - do nothing 2. log - log which instances are erroneously running 3. reap - shutdown and cleanup any erroneously running instances 4. shutdown - power off *and disable* any erroneously running instances The use-case for this cleanup task is: for various reasons, it may be possible for the database to show an instance as deleted but for that instance to still be running on a host machine (see bug https://bugs.launchpad.net/nova/+bug/911366). This cleanup task is a cross-hypervisor utility for finding these zombied instances and either logging the discrepancy (likely what you should do in production), or automatically reaping the instances (more appropriate for dev environments). """ action = CONF.running_deleted_instance_action if action == "noop": return # NOTE(sirp): admin contexts don't ordinarily return deleted records with utils.temporary_mutation(context, read_deleted="yes"): for instance in self._running_deleted_instances(context): if action == "log": LOG.warning(_LW("Detected instance with name label " "'%s' which is marked as " "DELETED but still present on host."), instance['name'], instance=instance) elif action == 'shutdown': LOG.info(_LI("Powering off instance with name label " "'%s' which is marked as " "DELETED but still present on host."), instance['name'], instance=instance) try: try: # disable starting the instance self.driver.set_bootable(instance, False) except NotImplementedError: LOG.warning(_LW("set_bootable is not implemented " "for the current driver")) # and power it off self.driver.power_off(instance) except Exception: msg = _("Failed to power off instance") LOG.warn(msg, instance=instance, exc_info=True) elif action == 'reap': LOG.info(_LI("Destroying instance with name label " "'%s' which is marked as " "DELETED but still present on host."), instance['name'], instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid, use_slave=True) self.instance_events.clear_events_for_instance(instance) try: self._shutdown_instance(context, instance, bdms, notify=False) self._cleanup_volumes(context, instance['uuid'], bdms) except Exception as e: LOG.warning(_LW("Periodic cleanup failed to delete " "instance: %s"), e, instance=instance) else: raise Exception(_("Unrecognized value '%s'" " for CONF.running_deleted_" "instance_action") % action) def _running_deleted_instances(self, context): """Returns a list of instances nova thinks is deleted, but the hypervisor thinks is still running. """ timeout = CONF.running_deleted_instance_timeout filters = {'deleted': True, 'soft_deleted': False, 'host': self.host} instances = self._get_instances_on_driver(context, filters) return [i for i in instances if self._deleted_old_enough(i, timeout)] def _deleted_old_enough(self, instance, timeout): deleted_at = instance['deleted_at'] if isinstance(instance, obj_base.NovaObject) and deleted_at: deleted_at = deleted_at.replace(tzinfo=None) return (not deleted_at or timeutils.is_older_than(deleted_at, timeout)) @contextlib.contextmanager def _error_out_instance_on_exception(self, context, instance, quotas=None, instance_state=vm_states.ACTIVE): instance_uuid = instance['uuid'] try: yield except NotImplementedError as error: with excutils.save_and_reraise_exception(): if quotas: quotas.rollback() LOG.info(_LI("Setting instance back to %(state)s after: " "%(error)s"), {'state': instance_state, 'error': error}, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, vm_state=instance_state, task_state=None) except exception.InstanceFaultRollback as error: if quotas: quotas.rollback() LOG.info(_LI("Setting instance back to ACTIVE after: %s"), error, instance_uuid=instance_uuid) self._instance_update(context, instance_uuid, vm_state=vm_states.ACTIVE, task_state=None) raise error.inner_exception except Exception: LOG.exception(_LE('Setting instance vm_state to ERROR'), instance_uuid=instance_uuid) with excutils.save_and_reraise_exception(): if quotas: quotas.rollback() self._set_instance_error_state(context, instance) @aggregate_object_compat @wrap_exception() def add_aggregate_host(self, context, aggregate, host, slave_info): """Notify hypervisor of change (for hypervisor pools).""" try: self.driver.add_to_aggregate(context, aggregate, host, slave_info=slave_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'add_aggregate_host') except exception.AggregateError: with excutils.save_and_reraise_exception(): self.driver.undo_aggregate_operation( context, aggregate.delete_host, aggregate, host) @aggregate_object_compat @wrap_exception() def remove_aggregate_host(self, context, host, slave_info, aggregate): """Removes a host from a physical hypervisor pool.""" try: self.driver.remove_from_aggregate(context, aggregate, host, slave_info=slave_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'remove_aggregate_host') except (exception.AggregateError, exception.InvalidAggregateAction) as e: with excutils.save_and_reraise_exception(): self.driver.undo_aggregate_operation( context, aggregate.add_host, aggregate, host, isinstance(e, exception.AggregateError)) def _process_instance_event(self, instance, event): _event = self.instance_events.pop_instance_event(instance, event) if _event: LOG.debug('Processing event %(event)s', {'event': event.key}, instance=instance) _event.send(event) @wrap_exception() def external_instance_event(self, context, instances, events): # NOTE(danms): Some event types are handled by the manager, such # as when we're asked to update the instance's info_cache. If it's # not one of those, look for some thread(s) waiting for the event and # unblock them if so. for event in events: instance = [inst for inst in instances if inst.uuid == event.instance_uuid][0] LOG.debug('Received event %(event)s', {'event': event.key}, instance=instance) if event.name == 'network-changed': self.network_api.get_instance_nw_info(context, instance) else: self._process_instance_event(instance, event) @periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval, external_process_ok=True) def _run_image_cache_manager_pass(self, context): """Run a single pass of the image cache manager.""" if not self.driver.capabilities["has_imagecache"]: return # Determine what other nodes use this storage storage_users.register_storage_use(CONF.instances_path, CONF.host) nodes = storage_users.get_storage_users(CONF.instances_path) # Filter all_instances to only include those nodes which share this # storage path. # TODO(mikal): this should be further refactored so that the cache # cleanup code doesn't know what those instances are, just a remote # count, and then this logic should be pushed up the stack. filters = {'deleted': False, 'soft_deleted': True, 'host': nodes} filtered_instances = objects.InstanceList.get_by_filters(context, filters, expected_attrs=[], use_slave=True) self.driver.manage_image_cache(context, filtered_instances) @periodic_task.periodic_task(spacing=CONF.instance_delete_interval) def _run_pending_deletes(self, context): """Retry any pending instance file deletes.""" LOG.debug('Cleaning up deleted instances') filters = {'deleted': True, 'soft_deleted': False, 'host': CONF.host, 'cleaned': False} attrs = ['info_cache', 'security_groups', 'system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=attrs, use_slave=True) LOG.debug('There are %d instances to clean', len(instances)) for instance in instances: attempts = int(instance.system_metadata.get('clean_attempts', '0')) LOG.debug('Instance has had %(attempts)s of %(max)s ' 'cleanup attempts', {'attempts': attempts, 'max': CONF.maximum_instance_delete_attempts}, instance=instance) if attempts < CONF.maximum_instance_delete_attempts: success = self.driver.delete_instance_files(instance) instance.system_metadata['clean_attempts'] = str(attempts + 1) if success: instance.cleaned = True with utils.temporary_mutation(context, read_deleted='yes'): instance.save()
sajeeshcs/nested_quota_final
nova/compute/manager.py
Python
apache-2.0
289,419
/* * 3D City Database - The Open Source CityGML Database * https://www.3dcitydb.org/ * * Copyright 2013 - 2021 * Chair of Geoinformatics * Technical University of Munich, Germany * https://www.lrg.tum.de/gis/ * * The 3D City Database is jointly developed with the following * cooperation partners: * * Virtual City Systems, Berlin <https://vc.systems/> * M.O.S.S. Computer Grafik Systeme GmbH, Taufkirchen <http://www.moss.de/> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.citydb.gui.components.checkboxtree; import javax.swing.*; /** * The model for a quadristate CheckBox. Available states are UNCHECKED, * CHECKED, GREY_CHECKED, GREY_UNCHECKED * * @author boldrini */ public class QuadristateButtonModel extends DefaultButtonModel { public enum State { CHECKED, GREY_CHECKED, GREY_UNCHECKED, UNCHECKED } public QuadristateButtonModel() { super(); setState(State.UNCHECKED); } /** * The current state is embedded in the selection / armed state of the * model. We return the CHECKED state when the checkbox is selected but not * armed, GREY_CHECKED state when the checkbox is selected and armed (grey) * and UNCHECKED when the checkbox is deselected. */ public State getState() { if (isSelected() && !isArmed()) { // CHECKED return State.CHECKED; } else if (isSelected() && isArmed()) { // GREY_CHECKED return State.GREY_CHECKED; } else if (!isSelected() && isArmed()) { // GREY_UNCHECKED return State.GREY_UNCHECKED; } else { // (!isSelected() && !isArmed()){ // UNCHECKED return State.UNCHECKED; } } // public void setSelected(boolean b) { // if (b) { // setState(State.CHECKED); // } else { // setState(State.UNCHECKED); // } // } /** * We rotate between UNCHECKED, CHECKED, GREY_UNCHECKED, GREY_CHECKED. */ public void nextState() { switch (getState()) { case UNCHECKED: setState(State.CHECKED); break; case CHECKED: setState(State.GREY_UNCHECKED); break; case GREY_UNCHECKED: setState(State.GREY_CHECKED); break; case GREY_CHECKED: setState(State.UNCHECKED); break; } } /** * Filter: No one may change the armed status except us. */ @Override public void setArmed(boolean b) { } public void setState(State state) { switch (state) { case UNCHECKED: super.setArmed(false); setPressed(false); setSelected(false); break; case CHECKED: super.setArmed(false); setPressed(false); setSelected(true); break; case GREY_UNCHECKED: super.setArmed(true); setPressed(true); setSelected(false); break; case GREY_CHECKED: super.setArmed(true); setPressed(true); setSelected(true); break; } } }
3dcitydb/importer-exporter
impexp-client-gui/src/main/java/org/citydb/gui/components/checkboxtree/QuadristateButtonModel.java
Java
apache-2.0
3,849
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ambari.server.controller.internal; import java.text.MessageFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import com.google.inject.persist.Transactional; import org.apache.ambari.server.AmbariException; import org.apache.ambari.server.ObjectNotFoundException; import org.apache.ambari.server.api.resources.OperatingSystemResourceDefinition; import org.apache.ambari.server.api.resources.RepositoryResourceDefinition; import org.apache.ambari.server.api.services.AmbariMetaInfo; import org.apache.ambari.server.controller.spi.NoSuchParentResourceException; import org.apache.ambari.server.controller.spi.NoSuchResourceException; import org.apache.ambari.server.controller.spi.Predicate; import org.apache.ambari.server.controller.spi.Request; import org.apache.ambari.server.controller.spi.RequestStatus; import org.apache.ambari.server.controller.spi.Resource; import org.apache.ambari.server.controller.spi.Resource.Type; import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException; import org.apache.ambari.server.controller.spi.SystemException; import org.apache.ambari.server.controller.spi.UnsupportedPropertyException; import org.apache.ambari.server.controller.utilities.PropertyHelper; import org.apache.ambari.server.orm.dao.ClusterVersionDAO; import org.apache.ambari.server.orm.dao.RepositoryVersionDAO; import org.apache.ambari.server.orm.dao.StackDAO; import org.apache.ambari.server.orm.entities.ClusterVersionEntity; import org.apache.ambari.server.orm.entities.OperatingSystemEntity; import org.apache.ambari.server.orm.entities.RepositoryEntity; import org.apache.ambari.server.orm.entities.RepositoryVersionEntity; import org.apache.ambari.server.orm.entities.StackEntity; import org.apache.ambari.server.state.OperatingSystemInfo; import org.apache.ambari.server.state.RepositoryVersionState; import org.apache.ambari.server.state.StackId; import org.apache.ambari.server.state.StackInfo; import org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper; import org.apache.commons.lang.ObjectUtils; import org.apache.commons.lang.StringUtils; import com.google.common.collect.Lists; import com.google.gson.Gson; import com.google.inject.Inject; /** * Resource provider for repository versions resources. */ public class RepositoryVersionResourceProvider extends AbstractResourceProvider { // ----- Property ID constants --------------------------------------------- public static final String REPOSITORY_VERSION_ID_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "id"); public static final String REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "stack_name"); public static final String REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "stack_version"); public static final String REPOSITORY_VERSION_REPOSITORY_VERSION_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "repository_version"); public static final String REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "display_name"); public static final String REPOSITORY_VERSION_UPGRADE_PACK_PROPERTY_ID = PropertyHelper.getPropertyId("RepositoryVersions", "upgrade_pack"); public static final String SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID = new OperatingSystemResourceDefinition().getPluralName(); public static final String SUBRESOURCE_REPOSITORIES_PROPERTY_ID = new RepositoryResourceDefinition().getPluralName(); @SuppressWarnings("serial") private static Set<String> pkPropertyIds = new HashSet<String>() { { add(REPOSITORY_VERSION_ID_PROPERTY_ID); } }; @SuppressWarnings("serial") public static Set<String> propertyIds = new HashSet<String>() { { add(REPOSITORY_VERSION_ID_PROPERTY_ID); add(REPOSITORY_VERSION_REPOSITORY_VERSION_PROPERTY_ID); add(REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID); add(REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID); add(REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID); add(REPOSITORY_VERSION_UPGRADE_PACK_PROPERTY_ID); add(SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID); } }; @SuppressWarnings("serial") public static Map<Type, String> keyPropertyIds = new HashMap<Type, String>() { { put(Type.Stack, REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID); put(Type.StackVersion, REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID); put(Type.RepositoryVersion, REPOSITORY_VERSION_ID_PROPERTY_ID); } }; @Inject private Gson gson; @Inject private RepositoryVersionDAO repositoryVersionDAO; @Inject private ClusterVersionDAO clusterVersionDAO; @Inject private AmbariMetaInfo ambariMetaInfo; @Inject private RepositoryVersionHelper repositoryVersionHelper; /** * Data access object used for lookup up stacks. */ @Inject private StackDAO stackDAO; /** * Create a new resource provider. * */ public RepositoryVersionResourceProvider() { super(propertyIds, keyPropertyIds); } @Override public RequestStatus createResources(final Request request) throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException { for (final Map<String, Object> properties : request.getProperties()) { createResources(new Command<Void>() { @Override public Void invoke() throws AmbariException { final String[] requiredProperties = { REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID, SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID, REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID, REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID, REPOSITORY_VERSION_REPOSITORY_VERSION_PROPERTY_ID }; for (String propertyName: requiredProperties) { if (properties.get(propertyName) == null) { throw new AmbariException("Property " + propertyName + " should be provided"); } } final RepositoryVersionEntity entity = toRepositoryVersionEntity(properties); if (repositoryVersionDAO.findByDisplayName(entity.getDisplayName()) != null) { throw new AmbariException("Repository version with name " + entity.getDisplayName() + " already exists"); } if (repositoryVersionDAO.findByStackAndVersion(entity.getStack(), entity.getVersion()) != null) { throw new AmbariException("Repository version for stack " + entity.getStack() + " and version " + entity.getVersion() + " already exists"); } validateRepositoryVersion(entity); repositoryVersionDAO.create(entity); notifyCreate(Resource.Type.RepositoryVersion, request); return null; } }); } return getRequestStatus(null); } @Override public Set<Resource> getResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { final Set<Resource> resources = new HashSet<Resource>(); final Set<String> requestedIds = getRequestPropertyIds(request, predicate); final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate); List<RepositoryVersionEntity> requestedEntities = new ArrayList<RepositoryVersionEntity>(); for (Map<String, Object> propertyMap: propertyMaps) { final StackId stackId = getStackInformationFromUrl(propertyMap); if (stackId != null && propertyMaps.size() == 1 && propertyMap.get(REPOSITORY_VERSION_ID_PROPERTY_ID) == null) { requestedEntities.addAll(repositoryVersionDAO.findByStack(stackId)); } else { final Long id; try { id = Long.parseLong(propertyMap.get(REPOSITORY_VERSION_ID_PROPERTY_ID).toString()); } catch (Exception ex) { throw new SystemException("Repository version should have numerical id"); } final RepositoryVersionEntity entity = repositoryVersionDAO.findByPK(id); if (entity == null) { throw new NoSuchResourceException("There is no repository version with id " + id); } else { requestedEntities.add(entity); } } } for (RepositoryVersionEntity entity: requestedEntities) { final Resource resource = new ResourceImpl(Resource.Type.RepositoryVersion); setResourceProperty(resource, REPOSITORY_VERSION_ID_PROPERTY_ID, entity.getId(), requestedIds); setResourceProperty(resource, REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID, entity.getStackName(), requestedIds); setResourceProperty(resource, REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID, entity.getStackVersion(), requestedIds); setResourceProperty(resource, REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID, entity.getDisplayName(), requestedIds); setResourceProperty(resource, REPOSITORY_VERSION_UPGRADE_PACK_PROPERTY_ID, entity.getUpgradePackage(), requestedIds); setResourceProperty(resource, REPOSITORY_VERSION_REPOSITORY_VERSION_PROPERTY_ID, entity.getVersion(), requestedIds); resources.add(resource); } return resources; } @Override @Transactional public RequestStatus updateResources(Request request, Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { final Set<Map<String, Object>> propertyMaps = request.getProperties(); modifyResources(new Command<Void>() { @Override public Void invoke() throws AmbariException { for (Map<String, Object> propertyMap : propertyMaps) { final Long id; try { id = Long.parseLong(propertyMap.get(REPOSITORY_VERSION_ID_PROPERTY_ID).toString()); } catch (Exception ex) { throw new AmbariException("Repository version should have numerical id"); } final RepositoryVersionEntity entity = repositoryVersionDAO.findByPK(id); if (entity == null) { throw new ObjectNotFoundException("There is no repository version with id " + id); } if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(REPOSITORY_VERSION_UPGRADE_PACK_PROPERTY_ID)))) { StackEntity stackEntity = entity.getStack(); String stackName = stackEntity.getStackName(); String stackVersion = stackEntity.getStackVersion(); final List<ClusterVersionEntity> clusterVersionEntities = clusterVersionDAO.findByStackAndVersion( stackName, stackVersion, entity.getVersion()); if (!clusterVersionEntities.isEmpty()) { final ClusterVersionEntity firstClusterVersion = clusterVersionEntities.get(0); throw new AmbariException("Upgrade pack can't be changed for repository version which is " + firstClusterVersion.getState().name() + " on cluster " + firstClusterVersion.getClusterEntity().getClusterName()); } final String upgradePackage = propertyMap.get(REPOSITORY_VERSION_UPGRADE_PACK_PROPERTY_ID).toString(); entity.setUpgradePackage(upgradePackage); } List<OperatingSystemEntity> operatingSystemEntities = null; if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID)))) { final Object operatingSystems = propertyMap.get(SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID); final String operatingSystemsJson = gson.toJson(operatingSystems); try { operatingSystemEntities = repositoryVersionHelper.parseOperatingSystems(operatingSystemsJson); } catch (Exception ex) { throw new AmbariException("Json structure for operating systems is incorrect", ex); } entity.setOperatingSystems(operatingSystemsJson); } if (StringUtils.isNotBlank(ObjectUtils.toString(propertyMap.get(REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID)))) { entity.setDisplayName(propertyMap.get(REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID).toString()); } validateRepositoryVersion(entity); repositoryVersionDAO.merge(entity); // // Update metaInfo table as well // if (operatingSystemEntities != null) { String stackName = entity.getStackName(); String stackVersion = entity.getStackVersion(); for (OperatingSystemEntity osEntity : operatingSystemEntities) { List<RepositoryEntity> repositories = osEntity.getRepositories(); for (RepositoryEntity repository : repositories) { ambariMetaInfo.updateRepoBaseURL(stackName, stackVersion, osEntity.getOsType(), repository.getRepositoryId(), repository.getBaseUrl()); } } } } return null; } }); return getRequestStatus(null); } @Override public RequestStatus deleteResources(Predicate predicate) throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException { final Set<Map<String, Object>> propertyMaps = getPropertyMaps(predicate); final List<RepositoryVersionEntity> entitiesToBeRemoved = new ArrayList<RepositoryVersionEntity>(); for (Map<String, Object> propertyMap : propertyMaps) { final Long id; try { id = Long.parseLong(propertyMap.get(REPOSITORY_VERSION_ID_PROPERTY_ID).toString()); } catch (Exception ex) { throw new SystemException("Repository version should have numerical id"); } final RepositoryVersionEntity entity = repositoryVersionDAO.findByPK(id); if (entity == null) { throw new NoSuchResourceException("There is no repository version with id " + id); } StackEntity stackEntity = entity.getStack(); String stackName = stackEntity.getStackName(); String stackVersion = stackEntity.getStackVersion(); final List<ClusterVersionEntity> clusterVersionEntities = clusterVersionDAO.findByStackAndVersion( stackName, stackVersion, entity.getVersion()); final List<RepositoryVersionState> forbiddenToDeleteStates = Lists.newArrayList( RepositoryVersionState.CURRENT, RepositoryVersionState.INSTALLED, RepositoryVersionState.INSTALLING, RepositoryVersionState.UPGRADED, RepositoryVersionState.UPGRADING); for (ClusterVersionEntity clusterVersionEntity : clusterVersionEntities) { if (clusterVersionEntity.getRepositoryVersion().getId().equals(id) && forbiddenToDeleteStates.contains(clusterVersionEntity.getState())) { throw new SystemException("Repository version can't be deleted as it is " + clusterVersionEntity.getState().name() + " on cluster " + clusterVersionEntity.getClusterEntity().getClusterName()); } } entitiesToBeRemoved.add(entity); } for (RepositoryVersionEntity entity: entitiesToBeRemoved) { repositoryVersionDAO.remove(entity); } return getRequestStatus(null); } @Override protected Set<String> getPKPropertyIds() { return pkPropertyIds; } /** * Validates newly created repository versions to contain actual information. * * @param repositoryVersion repository version * @throws AmbariException exception with error message */ protected void validateRepositoryVersion(RepositoryVersionEntity repositoryVersion) throws AmbariException { final StackId requiredStack = new StackId(repositoryVersion.getStack()); final String stackName = requiredStack.getStackName(); final String stackMajorVersion = requiredStack.getStackVersion(); final String stackFullName = requiredStack.getStackId(); // check that stack exists final StackInfo stackInfo = ambariMetaInfo.getStack(stackName, stackMajorVersion); if (stackInfo.getUpgradePacks() == null) { throw new AmbariException("Stack " + stackFullName + " doesn't have upgrade packages"); } // List of all repo urls that are already added at stack Set<String> existingRepoUrls = new HashSet<String>(); List<RepositoryVersionEntity> existingRepoVersions = repositoryVersionDAO.findByStack(requiredStack); for (RepositoryVersionEntity existingRepoVersion : existingRepoVersions) { for (OperatingSystemEntity operatingSystemEntity : existingRepoVersion.getOperatingSystems()) { for (RepositoryEntity repositoryEntity : operatingSystemEntity.getRepositories()) { if (! repositoryEntity.getRepositoryId().startsWith("HDP-UTILS") && // HDP-UTILS is shared between repo versions ! existingRepoVersion.getId().equals(repositoryVersion.getId())) { // Allow modifying already defined repo version existingRepoUrls.add(repositoryEntity.getBaseUrl()); } } } } // check that repositories contain only supported operating systems final Set<String> osSupported = new HashSet<String>(); for (OperatingSystemInfo osInfo: ambariMetaInfo.getOperatingSystems(stackName, stackMajorVersion)) { osSupported.add(osInfo.getOsType()); } final Set<String> osRepositoryVersion = new HashSet<String>(); for (OperatingSystemEntity os: repositoryVersion.getOperatingSystems()) { osRepositoryVersion.add(os.getOsType()); for (RepositoryEntity repositoryEntity : os.getRepositories()) { String baseUrl = repositoryEntity.getBaseUrl(); if (existingRepoUrls.contains(baseUrl)) { throw new AmbariException("Base url " + baseUrl + " is already defined for another repository version. " + "Setting up base urls that contain the same versions of components will cause rolling upgrade to fail."); } } } if (osRepositoryVersion.isEmpty()) { throw new AmbariException("At least one set of repositories for OS should be provided"); } for (String os: osRepositoryVersion) { if (!osSupported.contains(os)) { throw new AmbariException("Operating system type " + os + " is not supported by stack " + stackFullName); } } if (!RepositoryVersionEntity.isVersionInStack(repositoryVersion.getStackId(), repositoryVersion.getVersion())) { throw new AmbariException(MessageFormat.format("Version {0} needs to belong to stack {1}", repositoryVersion.getVersion(), repositoryVersion.getStackName() + "-" + repositoryVersion.getStackVersion())); } } /** * Transforms map of json properties to repository version entity. * * @param properties json map * @return constructed entity * @throws AmbariException if some properties are missing or json has incorrect structure */ protected RepositoryVersionEntity toRepositoryVersionEntity(Map<String, Object> properties) throws AmbariException { final RepositoryVersionEntity entity = new RepositoryVersionEntity(); final String stackName = properties.get(REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID).toString(); final String stackVersion = properties.get(REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID).toString(); StackEntity stackEntity = stackDAO.find(stackName, stackVersion); entity.setDisplayName(properties.get(REPOSITORY_VERSION_DISPLAY_NAME_PROPERTY_ID).toString()); entity.setStack(stackEntity); entity.setVersion(properties.get(REPOSITORY_VERSION_REPOSITORY_VERSION_PROPERTY_ID).toString()); final Object operatingSystems = properties.get(SUBRESOURCE_OPERATING_SYSTEMS_PROPERTY_ID); final String operatingSystemsJson = gson.toJson(operatingSystems); try { repositoryVersionHelper.parseOperatingSystems(operatingSystemsJson); } catch (Exception ex) { throw new AmbariException("Json structure for operating systems is incorrect", ex); } entity.setOperatingSystems(operatingSystemsJson); entity.setUpgradePackage(repositoryVersionHelper.getUpgradePackageName(stackName, stackVersion, entity.getVersion())); return entity; } protected StackId getStackInformationFromUrl(Map<String, Object> propertyMap) { if (propertyMap.containsKey(REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID) && propertyMap.containsKey(REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID)) { return new StackId(propertyMap.get(REPOSITORY_VERSION_STACK_NAME_PROPERTY_ID).toString(), propertyMap.get(REPOSITORY_VERSION_STACK_VERSION_PROPERTY_ID).toString()); } return null; } }
zouzhberk/ambaridemo
demo-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java
Java
apache-2.0
21,584
//Spanform.h //Part of Spanform, by James Halliday, copyright 2000, all rights reserved #if !defined(AFX_SPANFORM_H__B9125B04_DA5C_11D2_A630_D5FD7E869135__INCLUDED_) #define AFX_SPANFORM_H__B9125B04_DA5C_11D2_A630_D5FD7E869135__INCLUDED_ #if _MSC_VER > 1000 #pragma once #endif // _MSC_VER > 1000 #ifndef __AFXWIN_H__ #error include 'stdafx.h' before including this file for PCH #endif #include "resource.h" // main symbols class CSpanformApp : public CWinApp { public: virtual BOOL PreTranslateMessage(MSG* pMsg); //my dummy function void MyDummyNew(); CSpanformApp(); //pitch arrays int pitch[9]; CString spitch[9]; //true if audio file is playing at the moment BOOL weplaying; //true if ANY file audio is open, since we only open one at a time BOOL weopen; // Overrides // ClassWizard generated virtual function overrides //{{AFX_VIRTUAL(CSpanformApp) public: virtual BOOL InitInstance(); //}}AFX_VIRTUAL // Implementation //{{AFX_MSG(CSpanformApp) afx_msg void OnAppAbout(); afx_msg void OnFileCreate(); afx_msg void OnFileOpen(); //}}AFX_MSG DECLARE_MESSAGE_MAP() }; ///////////////////////////////////////////////////////////////////////////// //{{AFX_INSERT_LOCATION}} // Microsoft Visual C++ will insert additional declarations immediately before the previous line. #endif // !defined(AFX_SPANFORM_H__B9125B04_DA5C_11D2_A630_D5FD7E869135__INCLUDED_)
malictus/spanform
source/SPANFORM.H
C++
apache-2.0
1,453
/* * Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.cloudtrail.model; import javax.annotation.Generated; /** * <p> * The number of tags per trail has exceeded the permitted amount. Currently, the limit is 50. * </p> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class TagsLimitExceededException extends com.amazonaws.services.cloudtrail.model.AWSCloudTrailException { private static final long serialVersionUID = 1L; /** * Constructs a new TagsLimitExceededException with the specified error message. * * @param message * Describes the error encountered. */ public TagsLimitExceededException(String message) { super(message); } }
jentfoo/aws-sdk-java
aws-java-sdk-cloudtrail/src/main/java/com/amazonaws/services/cloudtrail/model/TagsLimitExceededException.java
Java
apache-2.0
1,274
package com.ibm.streamsx.sparkmllib.tree; import org.apache.spark.SparkContext; import org.apache.spark.mllib.linalg.Vector; import org.apache.spark.mllib.tree.model.GradientBoostedTreesModel; import com.ibm.streams.operator.model.InputPortSet; import com.ibm.streams.operator.model.OutputPortSet; import com.ibm.streamsx.sparkmllib.AbstractSparkMLlibListToDoubleOperator; //@PrimitiveOperator(description="This operator provides support for analysis of incoming tuple data against Apache Spark's decision tree ensembles machine learning library.") @InputPortSet(cardinality=1,description="This input port is required. The operator expects an attribute of type list<float64> that will be used as input to the gradient boosted trees algorithm.") @OutputPortSet(cardinality=1,description="This output port is required. The operator passes through all attributes on the input port as-is to the output port. In addition, it expects an attribute called 'analysisResult' of type float64.") public class SparkEnsembleGradientBoostedTrees extends AbstractSparkMLlibListToDoubleOperator<GradientBoostedTreesModel> { @Override protected GradientBoostedTreesModel loadModel(SparkContext sc, String modelPath) { return GradientBoostedTreesModel.load(sc, modelPath); } @Override protected double performOperation(Vector features) { return getModel().predict(features); } }
jibaro/streamsx.sparkMLLib
com.ibm.streamsx.sparkmllib/impl/java/src/com/ibm/streamsx/sparkmllib/tree/SparkEnsembleGradientBoostedTrees.java
Java
apache-2.0
1,380
/* * Copyright 2016-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.haskell; import com.facebook.buck.rules.CommandTool; import com.facebook.buck.rules.ConstantToolProvider; import com.facebook.buck.rules.ToolProvider; import com.facebook.buck.util.immutables.BuckStyleImmutable; import com.google.common.collect.ImmutableList; import org.immutables.value.Value; @Value.Immutable @BuckStyleImmutable abstract class AbstractFakeHaskellConfig implements HaskellConfig { public static final FakeHaskellConfig DEFAULT = FakeHaskellConfig.builder().build(); @Override @Value.Default public ToolProvider getCompiler() { return new ConstantToolProvider(new CommandTool.Builder().build()); } @Override public abstract ImmutableList<String> getCompilerFlags(); @Override @Value.Default public ToolProvider getLinker() { return new ConstantToolProvider(new CommandTool.Builder().build()); } @Override public abstract ImmutableList<String> getLinkerFlags(); @Override @Value.Default public boolean shouldCacheLinks() { return true; } }
Dominator008/buck
test/com/facebook/buck/haskell/AbstractFakeHaskellConfig.java
Java
apache-2.0
1,646
$(document).ready(function() { $('#fieldrent_maintain').DataTable( { "aLengthMenu" : [ 2, 4, 6, 8, 10 ], //动态指定分页后每页显示的记录数。 "lengthChange" : true, //是否启用改变每页显示多少条数据的控件 "bSort" : false, "iDisplayLength" : 8, //默认每页显示多少条记录 "dom" : 'ftipr<"bottom"l>', "ajax" : { "url" : "landRentInfo.do", "type" : "POST" }, "aoColumns" : [ { "mData" : "id", "orderable" : true, // 禁用排序 "sDefaultContent" : "", "sWidth" : "6%", }, { "mData" : "startTime", "orderable" : true, // 禁用排序 "sDefaultContent" : "", "sWidth" : "6%" }, { "mData" : "endTime", "orderable" : true, // 禁用排序 "sDefaultContent" : "", "sWidth" : "6%" }, { "mData" : "bname", "orderable" : false, // 禁用排序 "sDefaultContent" : "", "sWidth" : "10%" }, { "mData" : "lid", "orderable" : true, // 禁用排序 "sDefaultContent" : "", "sWidth" : "8%" }, { "mData" : "name", "orderable" : true, // 禁用排序 "sDefaultContent" : "", "sWidth" : "8%" }, { "mData" : "deptName", "orderable" : true, // 禁用排序 "sDefaultContent" : "", "sWidth" : "8%" }, { "mData" : "times", "orderable" : true, // 禁用排序 "sDefaultContent" : "", "sWidth" : "8%" }, { "mData" : "planting", "orderable" : true, // 禁用排序 "sDefaultContent" : "", "sWidth" : "8%" }, { "mData" : "lr_id", "orderable" : false, // 禁用排序 "sDefaultContent" : '', "sWidth" : "5%", "render" : function(data, type, row) { //render改变该列样式,4个参数,其中参数数量是可变的。 return data = '<span class="glyphicon glyphicon-pencil" data-id='+data+' data-toggle="modal" data-target="#myModal3"></span>'; } } //data指该行获取到的该列数据 //row指该行,可用row.name或row[2]获取第3列字段名为name的值 //type调用数据类型,可用类型“filter”,"display","type","sort",具体用法还未研究 //meta包含请求行索引,列索引,tables各参数等信息 ], "columnDefs" : [{ "orderable" : false, // 禁用排序 "targets" : [0], // 指定的列 "data" : "id", "render" : function(data, type, row) { data=row.lr_id; return '<input type="checkbox" value="'+ data + '" name="idname" />'; } }], "language" : { "lengthMenu" : "每页 _MENU_ 条记录", "zeroRecords" : "没有找到记录", "info" : "第 _PAGE_ 页 ( 总共 _PAGES_ 页 )", "infoEmpty" : "无记录", "infoFiltered" : "(从 _MAX_ 条记录过滤)", "sSearch" : "模糊查询:", "oPaginate" : { "sFirst" : "首页", "sPrevious" : " 上一页 ", "sNext" : " 下一页 ", "sLast" : " 尾页 " } } }); }); /* 全选,反选按钮 */ /* * allCkBox2(); function allCkBox2(id){ var tableBox = * document.getElementById(id||"fieldrent_maintain"), ck = * tableBox.getElementsByClassName("ck"), ckAll = * tableBox.getElementsByClassName("ck-all")[0], ckRe = * tableBox.getElementsByClassName("ck-re")[0]; ckAll.onchange = function(){ * allCk(this.checked); }; ckRe.onchange = function(){ reCk(); }; function * allCk(bool){ for(var i =0; i<ck.length;i++){ ck[i].checked = bool; } } * * function reCk(){ for(var i =0; i<ck.length;i++){ ck[i].checked ? * ck[i].checked = false : ck[i].checked = true; } } } */
pange123/PB_Management
前台界面/pbweb/js/myNeed/rentMaintain.js
JavaScript
apache-2.0
3,837
package org.cavebeetle.stream.impl; public interface StreamState<T> { T head(); StreamState<T> tail(); boolean isEmpty(); }
Hilco-Wijbenga/smarter-maven
maven-pom-plugin/src/main/java/org/cavebeetle/stream/impl/StreamState.java
Java
apache-2.0
139
// Copyright (C) 2008, by David W. Jeske // All Rights Reserved. using System; using System.IO; using System.Reflection; // This class makes it easy to deal with serialization and deserialization by using reflection.... // // HOWEVER, MSDN says: // // The GetFields method does not return fields in a particular order, such as alphabetical or // declaration order. Your code must not depend on the order in which fields are returned, because // that order varies. // // Which mwans depending on this code is bad, and is violating some possibility in .NET design. // // http://msdn.microsoft.com/en-us/library/6ztex2dc.aspx // // // ALSO, this implementation does not see internal members. namespace Bend { public static partial class Util { // how do we copy from struct to the Stream ? // all the solutions seem bad... // http://www.codeproject.com/KB/cs/C__Poiter.aspx // http://dotnetjunkies.com/WebLog/chris.taylor/articles/9016.aspx // // here is a better solution using reflection.. // http://www.megasolutions.net/cSharp/Using-reflection-to-show-all-fields-values-of-a-struct_-69838.aspx public static void writeStruct<E>(E obj, BinaryWriter w) where E : struct { Type otype = obj.GetType(); // MemberInfo[] members = otype.GetMembers(); FieldInfo[] fields = otype.GetFields(); foreach (FieldInfo f in fields) { Type t = f.FieldType; if (t == typeof(System.UInt32)) { w.Write((System.UInt32)f.GetValue(obj)); } else if (t == typeof(System.Int32)) { w.Write((System.Int32)f.GetValue(obj)); } else if (t == typeof(System.Int64)) { w.Write((System.Int64)f.GetValue(obj)); } else { throw new Exception("BinStruct unimplemented type: " + t.ToString()); } } } public static void writeStruct<E>(E obj, Stream output) where E : struct{ BinaryWriter w = new BinaryWriter(output); writeStruct<E>(obj, w); } public static void writeStruct<E>(E obj, out byte[] outbuf) where E : struct { MemoryStream ms = new MemoryStream(); writeStruct(obj, ms); outbuf = ms.ToArray(); } // http://mmarinov.blogspot.com/2007/01/reflection-modify-value-types-by.html public static E readStruct<E>(BinaryReader r) where E: struct { E val = new E(); // TypedReference vref = __makeref(val); Object oval = (Object)val; Type vtype = typeof(E); FieldInfo[] fields = vtype.GetFields(); foreach (FieldInfo f in fields) { Type t = f.FieldType; if (t == typeof(System.UInt32)) { // f.SetValueDirect(vref, r.ReadUInt32()); f.SetValue(oval, r.ReadUInt32()); } else if (t == typeof(System.Int32)) { // f.SetValueDirect(vref, r.ReadInt32()); f.SetValue(oval, r.ReadInt32()); } else if (t == typeof(System.Int64)) { f.SetValue(oval, r.ReadInt64()); } else { throw new Exception("BinStruct unimplemented type: " + t.ToString()); } } // return val; return (E)oval; } public static E readStruct<E>(Stream input) where E : struct { BinaryReader r = new BinaryReader(input); return readStruct<E>(r); } public static int structSize<T>(ref T stobj) where T : struct { int size = 0; Type otype = stobj.GetType(); // MemberInfo[] members = otype.GetMembers(); FieldInfo[] fields = otype.GetFields(); foreach (FieldInfo f in fields) { Type t = f.FieldType; if (t == typeof(System.UInt32)) { size += sizeof(System.UInt32); } else if (t == typeof(System.Int32)) { size += sizeof(System.Int32); } else if (t == typeof(System.Int64)) { size += sizeof(System.Int64); } else { throw new Exception("BinStruct unimplemented type: " + t.ToString()); } } return size; } } } namespace BendTests { using Bend; using NUnit.Framework; public partial class A00_UtilTest { struct Test { public uint a; public uint b; } [Test] public void T00_structWriteRead() { // test struct write/read Test st; st.a = 1; st.b = 2; MemoryStream stout = new MemoryStream(); Util.writeStruct(st, stout); byte[] data = stout.ToArray(); // dump the struct if (true) { System.Console.WriteLine("struct conversion size: " + data.Length); System.Console.WriteLine("contents: "); foreach (byte b in data) { System.Console.Write((int)b); System.Console.Write(" "); } } Test st2; MemoryStream msin = new MemoryStream(data); st2 = Util.readStruct<Test>(msin); Assert.AreEqual(msin.Position, msin.Length, "struct conversion didn't consume entire buffer"); Assert.AreEqual(st, st2, "struct conversion test failed"); } } }
jeske/StepsDB-alpha
Bend/Util/BinStruct.cs
C#
apache-2.0
5,900
/* * Copyright 2009 ZXing authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.oakzmm.demoapp.zxing.camera; import android.graphics.Bitmap; import com.google.zxing.LuminanceSource; /** * This object extends LuminanceSource around an array of YUV data returned from the camera driver, * with the option to crop to a rectangle within the full data. This can be used to exclude * superfluous pixels around the perimeter and speed up decoding. * * It works for any pixel format where the Y channel is planar and appears first, including * YCbCr_420_SP and YCbCr_422_SP. * * @author dswitkin@google.com (Daniel Switkin) */ public final class PlanarYUVLuminanceSource extends LuminanceSource { private final byte[] yuvData; private final int dataWidth; private final int dataHeight; private final int left; private final int top; public PlanarYUVLuminanceSource(byte[] yuvData, int dataWidth, int dataHeight, int left, int top, int width, int height) { super(width, height); if (left + width > dataWidth || top + height > dataHeight) { throw new IllegalArgumentException("Crop rectangle does not fit within image data."); } this.yuvData = yuvData; this.dataWidth = dataWidth; this.dataHeight = dataHeight; this.left = left; this.top = top; } @Override public byte[] getRow(int y, byte[] row) { if (y < 0 || y >= getHeight()) { throw new IllegalArgumentException("Requested row is outside the image: " + y); } int width = getWidth(); if (row == null || row.length < width) { row = new byte[width]; } int offset = (y + top) * dataWidth + left; System.arraycopy(yuvData, offset, row, 0, width); return row; } @Override public byte[] getMatrix() { int width = getWidth(); int height = getHeight(); // If the caller asks for the entire underlying image, save the copy and give them the // original data. The docs specifically warn that result.length must be ignored. if (width == dataWidth && height == dataHeight) { return yuvData; } int area = width * height; byte[] matrix = new byte[area]; int inputOffset = top * dataWidth + left; // If the width matches the full width of the underlying data, perform a single copy. if (width == dataWidth) { System.arraycopy(yuvData, inputOffset, matrix, 0, area); return matrix; } // Otherwise copy one cropped row at a time. byte[] yuv = yuvData; for (int y = 0; y < height; y++) { int outputOffset = y * width; System.arraycopy(yuv, inputOffset, matrix, outputOffset, width); inputOffset += dataWidth; } return matrix; } @Override public boolean isCropSupported() { return true; } public int getDataWidth() { return dataWidth; } public int getDataHeight() { return dataHeight; } public Bitmap renderCroppedGreyscaleBitmap() { int width = getWidth(); int height = getHeight(); int[] pixels = new int[width * height]; byte[] yuv = yuvData; int inputOffset = top * dataWidth + left; for (int y = 0; y < height; y++) { int outputOffset = y * width; for (int x = 0; x < width; x++) { int grey = yuv[inputOffset + x] & 0xff; pixels[outputOffset + x] = 0xFF000000 | (grey * 0x00010101); } inputOffset += dataWidth; } Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888); bitmap.setPixels(pixels, 0, width, 0, 0, width, height); return bitmap; } }
macouen/lunzi
app/src/main/java/com/oakzmm/demoapp/zxing/camera/PlanarYUVLuminanceSource.java
Java
apache-2.0
4,082
'use strict'; // mocha defines to avoid JSHint breakage /* global describe, it, before, beforeEach, after, afterEach */ var assert = require('../utils/assert.js'); var server = require('../utils/server.js'); var preq = require('preq'); var P = require('bluebird'); var simple_service = require('../../mods/simple_service'); describe('simple_service', function () { this.timeout(20000); before(function () { return server.start(); }); // A test page that includes the current date, so that it changes if // re-rendered more than a second apart. var testPage = server.config.baseURL + '/service/test/User:GWicke%2fDate'; function hasTextContentType(res) { assert.contentType(res, 'text/html'); } var slice; it('retrieve content from backend service', function () { var tid1; var tid2; return preq.get({ uri: testPage }) .then(function (res) { assert.deepEqual(res.status, 200); tid1 = res.headers.etag; hasTextContentType(res); // Delay for 1s to make sure that the content differs on // re-render, then force a re-render and check that it happened. slice = server.config.logStream.slice(); return P.delay(1100) .then(function() { return preq.get({ uri: testPage, headers: { 'cache-control': 'no-cache' } }); }); }) .then(function (res) { tid2 = res.headers.etag; assert.notDeepEqual(tid2, tid1); assert.notDeepEqual(tid2, undefined); hasTextContentType(res); slice.halt(); assert.remoteRequests(slice, true); // delay for 1s to let the content change on re-render slice = server.config.logStream.slice(); // Check retrieval of a stored render return P.delay(1100) .then(function() { return preq.get({ uri: testPage, }); }); }) .then(function (res) { var tid3 = res.headers.etag; assert.deepEqual(tid3, tid2); assert.notDeepEqual(tid3, undefined); // Check that there were no remote requests slice.halt(); assert.remoteRequests(slice, false); hasTextContentType(res); }); }); it('validates config: checks parallel returning requests', function() { return P.try(function() { simple_service({ paths: { test_path: { get: { on_request: [ { get_one: { request: { uri: 'http://en.wikipedia.org/wiki/One' }, return: '{$.get_one}' }, get_two: { request: { uri: 'http://en.wikipedia.org/wiki/Two' }, return: '{$.get_two}' } } ] } } } }) }) .then(function() { throw new Error('Should throw error'); }, function(e) { // Error expected assert.deepEqual(/^Invalid spec\. Returning requests cannot be parallel\..*/.test(e.message), true); }); }); it('validates config: requires either return or request', function() { return P.try(function() { simple_service({ paths: { test_path: { get: { on_request: [ { get_one: {} } ] } } } }) }) .then(function() { throw new Error('Should throw error'); }, function(e) { // Error expected assert.deepEqual(/^Invalid spec\. Either request or return must be specified\..*/.test(e.message), true); }); }); it('validates config: requires request for return_if', function() { return P.try(function() { simple_service({ paths: { test_path: { get: { on_request: [ { get_one: { return_if: { status: '5xx' }, return: '$.request' } } ] } } } }) }) .then(function() { throw new Error('Should throw error'); }, function(e) { // Error expected assert.deepEqual(/^Invalid spec\. return_if should have a matching request\..*/.test(e.message), true); }); }); it('validates config: requires request for catch', function() { return P.try(function() { simple_service({ paths: { test_path: { get: { on_request: [ { get_one: { catch: { status: '5xx' }, return: '$.request' } } ] } } } }) }) .then(function() { throw new Error('Should throw error'); }, function(e) { // Error expected assert.deepEqual(/^Invalid spec\. catch should have a matching request\..*/.test(e.message), true); }); }); });
physikerwelt/restbase
test/features/simple_service.js
JavaScript
apache-2.0
6,685
/* * Copyright 2017 Keval Patel. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.kevalpatel2106.smartlens; import android.content.Intent; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v7.app.AppCompatActivity; import com.kevalpatel2106.smartlens.dashboard.Dashboard; /** * Created by Keval on 30-Jan-17. * * @author {@link 'https://github.com/kevalpatel2106'} */ public class SplashScreen extends AppCompatActivity { @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); startActivity(new Intent(this, Dashboard.class)); } }
kevalpatel2106/smart-lens
app/src/main/java/com/kevalpatel2106/smartlens/SplashScreen.java
Java
apache-2.0
1,188
package fr.inria.oak.effisto.Loader.StorageHandler; //import java.util.ArrayList; public class OracleNoSQLStorageHandler extends Handler{ }
YifanLi/Effisto
src/fr/inria/oak/effisto/Loader/StorageHandler/OracleNoSQLStorageHandler.java
Java
apache-2.0
146
// Copyright 2019 The Oppia Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @fileoverview Service to handle common code for suggestion modal display. */ import { Injectable } from '@angular/core'; import { downgradeInjectable } from '@angular/upgrade/static'; export interface IParamDict { action: string; audioUpdateRequired?: boolean; commitMessage?: string; reviewMessage: string; } @Injectable({ providedIn: 'root' }) export class SuggestionModalService { SUGGESTION_ACCEPTED_MSG: string = ( 'This suggestion has already been accepted.'); SUGGESTION_REJECTED_MSG: string = ( 'This suggestion has already been rejected.'); SUGGESTION_INVALID_MSG: string = ( 'This suggestion was made for a state that no longer exists.' + ' It cannot be accepted.'); UNSAVED_CHANGES_MSG: string = ( 'You have unsaved changes to this exploration. Please save/discard your ' + 'unsaved changes if you wish to accept.'); ACTION_ACCEPT_SUGGESTION: string = 'accept'; ACTION_REJECT_SUGGESTION: string = 'reject'; ACTION_RESUBMIT_SUGGESTION: string = 'resubmit'; SUGGESTION_ACCEPTED: string = 'accepted'; SUGGESTION_REJECTED: string = 'rejected'; // TODO(YashJipkate): Replace 'any' with the exact type. This has been kept as // 'any' since '$uibModalInstance' is a AngularJS native object and does not // have a TS interface. acceptSuggestion($uibModalInstance: any, paramDict: IParamDict): void { $uibModalInstance.close(paramDict); } // TODO(YashJipkate): Replace 'any' with the exact type. This has been kept as // 'any' since '$uibModalInstance' is a AngularJS native object and does not // have a TS interface. rejectSuggestion($uibModalInstance: any, paramDict: IParamDict): void { $uibModalInstance.close(paramDict); } // TODO(YashJipkate): Replace 'any' with the exact type. This has been kept as // 'any' since '$uibModalInstance' is a AngularJS native object and does not // have a TS interface. cancelSuggestion($uibModalInstance: any): void { $uibModalInstance.dismiss('cancel'); } } angular.module('oppia').factory( 'SuggestionModalService', downgradeInjectable(SuggestionModalService));
souravbadami/oppia
core/templates/dev/head/services/SuggestionModalService.ts
TypeScript
apache-2.0
2,735
/* * Copyright 2020 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.optaplanner.examples.coachshuttlegathering.domain; import org.optaplanner.examples.coachshuttlegathering.domain.location.RoadLocation; import org.optaplanner.examples.common.domain.AbstractPersistable; import com.thoughtworks.xstream.annotations.XStreamAlias; import com.thoughtworks.xstream.annotations.XStreamInclude; @XStreamAlias("CsgBus") @XStreamInclude({ Coach.class, Shuttle.class }) public abstract class Bus extends AbstractPersistable implements BusOrStop { protected String name; protected RoadLocation departureLocation; protected int capacity; protected int mileageCost; // Shadow variables protected BusStop nextStop; private int passengerQuantityTotal = 0; public String getName() { return name; } public void setName(String name) { this.name = name; } public RoadLocation getDepartureLocation() { return departureLocation; } public void setDepartureLocation(RoadLocation departureLocation) { this.departureLocation = departureLocation; } public int getCapacity() { return capacity; } public void setCapacity(int capacity) { this.capacity = capacity; } public int getMileageCost() { return mileageCost; } public void setMileageCost(int mileageCost) { this.mileageCost = mileageCost; } @Override public BusStop getNextStop() { return nextStop; } @Override public void setNextStop(BusStop nextStop) { this.nextStop = nextStop; } public Integer getPassengerQuantityTotal() { return passengerQuantityTotal; } public void setPassengerQuantityTotal(final Integer passengerQuantityTotal) { this.passengerQuantityTotal = passengerQuantityTotal == null ? 0 : passengerQuantityTotal; } // ************************************************************************ // Complex methods // ************************************************************************ public abstract int getSetupCost(); @Override public RoadLocation getLocation() { return departureLocation; } @Override public Bus getBus() { return this; } public abstract int getDistanceFromTo(RoadLocation sourceLocation, RoadLocation targetLocation); public abstract int getDurationFromTo(RoadLocation sourceLocation, RoadLocation targetLocation); public abstract StopOrHub getDestination(); @Override public String toString() { return name; } }
droolsjbpm/optaplanner
optaplanner-examples/src/main/java/org/optaplanner/examples/coachshuttlegathering/domain/Bus.java
Java
apache-2.0
3,204
package com.alibaba.fastjson.parser.deserializer; import java.lang.reflect.Constructor; import java.lang.reflect.Type; import java.util.HashMap; import java.util.Map; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.JSONException; import com.alibaba.fastjson.parser.DefaultJSONParser; import com.alibaba.fastjson.parser.Feature; import com.alibaba.fastjson.parser.JSONLexer; import com.alibaba.fastjson.parser.JSONToken; import com.alibaba.fastjson.parser.ParserConfig; import com.alibaba.fastjson.util.TypeUtils; public class ThrowableDeserializer extends JavaBeanDeserializer { public ThrowableDeserializer(ParserConfig mapping, Class<?> clazz){ super(mapping, clazz, clazz); } @SuppressWarnings("unchecked") public <T> T deserialze(DefaultJSONParser parser, Type type, Object fieldName) { JSONLexer lexer = parser.lexer; if (lexer.token() == JSONToken.NULL) { lexer.nextToken(); return null; } if (parser.getResolveStatus() == DefaultJSONParser.TypeNameRedirect) { parser.setResolveStatus(DefaultJSONParser.NONE); } else { if (lexer.token() != JSONToken.LBRACE) { throw new JSONException("syntax error"); } } Throwable cause = null; Class<?> exClass = null; if (type != null && type instanceof Class) { Class<?> clazz = (Class<?>) type; if (Throwable.class.isAssignableFrom(clazz)) { exClass = clazz; } } String message = null; StackTraceElement[] stackTrace = null; Map<String, Object> otherValues = new HashMap<String, Object>(); for (;;) { // lexer.scanSymbol String key = lexer.scanSymbol(parser.getSymbolTable()); if (key == null) { if (lexer.token() == JSONToken.RBRACE) { lexer.nextToken(JSONToken.COMMA); break; } if (lexer.token() == JSONToken.COMMA) { if (lexer.isEnabled(Feature.AllowArbitraryCommas)) { continue; } } } lexer.nextTokenWithColon(JSONToken.LITERAL_STRING); if (JSON.DEFAULT_TYPE_KEY.equals(key)) { if (lexer.token() == JSONToken.LITERAL_STRING) { String exClassName = lexer.stringVal(); exClass = TypeUtils.loadClass(exClassName, parser.getConfig().getDefaultClassLoader()); } else { throw new JSONException("syntax error"); } lexer.nextToken(JSONToken.COMMA); } else if ("message".equals(key)) { if (lexer.token() == JSONToken.NULL) { message = null; } else if (lexer.token() == JSONToken.LITERAL_STRING) { message = lexer.stringVal(); } else { throw new JSONException("syntax error"); } lexer.nextToken(); } else if ("cause".equals(key)) { cause = deserialze(parser, null, "cause"); } else if ("stackTrace".equals(key)) { stackTrace = parser.parseObject(StackTraceElement[].class); } else { // TODO otherValues.put(key, parser.parse()); } if (lexer.token() == JSONToken.RBRACE) { lexer.nextToken(JSONToken.COMMA); break; } } Throwable ex = null; if (exClass == null) { ex = new Exception(message, cause); } else { try { ex = createException(message, cause, exClass); if (ex == null) { ex = new Exception(message, cause); } } catch (Exception e) { throw new JSONException("create instance error", e); } } if (stackTrace != null) { ex.setStackTrace(stackTrace); } return (T) ex; } private Throwable createException(String message, Throwable cause, Class<?> exClass) throws Exception { Constructor<?> defaultConstructor = null; Constructor<?> messageConstructor = null; Constructor<?> causeConstructor = null; for (Constructor<?> constructor : exClass.getConstructors()) { Class<?>[] types = constructor.getParameterTypes(); if (types.length == 0) { defaultConstructor = constructor; continue; } if (types.length == 1 && types[0] == String.class) { messageConstructor = constructor; continue; } if (types.length == 2 && types[0] == String.class && types[1] == Throwable.class) { causeConstructor = constructor; continue; } } if (causeConstructor != null) { return (Throwable) causeConstructor.newInstance(message, cause); } if (messageConstructor != null) { return (Throwable) messageConstructor.newInstance(message); } if (defaultConstructor != null) { return (Throwable) defaultConstructor.newInstance(); } return null; } public int getFastMatchToken() { return JSONToken.LBRACE; } }
coraldane/fastjson
src/main/java/com/alibaba/fastjson/parser/deserializer/ThrowableDeserializer.java
Java
apache-2.0
5,555
#!/usr/bin/env python2.7 # Copyright 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. """ Demonstrate how to obtain routing information of one network device. 1. Select a configured device from the inventory. 2. Execute the command and print the output. 3. Print the command syntax and output field descriptions. """ from __future__ import print_function from inspect import cleandoc from logging import log, WARN from nxapi.http import cli_show, connect, disconnect, print_command_reference, session_device_url from nxapi.context import sys_exit, EX_OK, EX_TEMPFAIL from nxapi.render import print_table from example import inventory_config from collections import OrderedDict command = 'sh routing' def demonstrate(session): """ Execute a command, print the output, return 'true' if successful. """ response = cli_show(session, command) for c in response: print('Output for command:', c) output = response[c] table_vrf = output['TABLE_vrf'] display_table = [] rows_vrf = table_vrf['ROW_vrf'] if not isinstance(rows_vrf, list): rows_vrf = [rows_vrf] for row_vrf in rows_vrf: display_vrf = OrderedDict() keys = [k for k in row_vrf if not k.startswith('TABLE')] for k in sorted(keys): display_vrf[k] = row_vrf[k] table_addrf = row_vrf['TABLE_addrf'] rows_addrf = table_addrf['ROW_addrf'] if not isinstance(rows_addrf, list): rows_addrf = [rows_addrf] for row_addrf in rows_addrf: display_addrf = OrderedDict(display_vrf) keys = [k for k in row_addrf if not k.startswith('TABLE')] for k in sorted(keys): display_addrf[k] = row_addrf[k] table_prefix = row_addrf['TABLE_prefix'] rows_prefix = table_prefix['ROW_prefix'] if not isinstance(rows_prefix, list): rows_prefix = [rows_prefix] for row_prefix in rows_prefix: display_prefix = OrderedDict(display_addrf) keys = [k for k in row_prefix if not k.startswith('TABLE')] for k in sorted(keys): display_prefix[k] = row_prefix[k] table_path = row_prefix['TABLE_path'] rows_path = table_path['ROW_path'] if not isinstance(rows_path, list): rows_path = [rows_path] for row_path in rows_path: display_path = OrderedDict(display_prefix) keys = [k for k in row_path if not k.startswith('TABLE')] for k in sorted(keys): display_path[k] = row_path[k] display_table.append(display_path) print_table(display_table) print() return True def main(): """ Oversee the sequence of tasks as per the documentation of this script. """ print(cleandoc(__doc__)) print() print('Select an appropriate device from those available.') print_table(inventory_config) print() for device_config in inventory_config: try: http_session = connect(**device_config) try: print('Connected to', session_device_url(http_session)) print() demonstrate(http_session) return EX_OK if print_command_reference(http_session, command) else EX_TEMPFAIL finally: disconnect(http_session) except IOError: log(WARN, 'Unable to connect to Nexus device %s', str(device_config)) continue print("There are no suitable network devices. Demonstration cancelled.") return EX_TEMPFAIL if __name__ == "__main__": sys_exit(main())
SivagnanamCiena/nxapi-learning-labs
python/example/show_routing.py
Python
apache-2.0
4,441
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.sling.replication.transport; import org.apache.sling.replication.communication.ReplicationEndpoint; import org.apache.sling.replication.queue.ReplicationQueueProcessor; import org.apache.sling.replication.serialization.ReplicationPackage; import org.apache.sling.replication.transport.authentication.TransportAuthenticationProvider; /** * A <code>TransportHandler</code> is responsible for implementing the transport of a * {@link ReplicationPackage}s to / from another instance described by a {@link ReplicationEndpoint} */ public interface TransportHandler { /** * Executes the transport of a given {@link ReplicationPackage} to a specific {@link ReplicationEndpoint} using this * transport and the supplied {@link TransportAuthenticationProvider} for authenticating the endpoint * * @param agentName a replication agent name * @param replicationPackage a {@link ReplicationPackage} to transport * @throws ReplicationTransportException if any error occurs during the transport */ void transport(String agentName, ReplicationPackage replicationPackage) throws ReplicationTransportException; /** * Enables response processing for this <code>TransportHandler</code> for a certain <code>ReplicationAgent</code> * * @param agentName the name of the <code>ReplicationAgent</code> * @param responseProcessor a <code>ReplicationQueueProcessor</code> that is called by the <code>TransportHandler</code> * whenever a response is received */ void enableProcessing(String agentName, ReplicationQueueProcessor responseProcessor); /** * * Disables response processing for this <code>TransportHandler</code> * * @param agentName the name of the <code>ReplicationAgent</code> */ void disableProcessing(String agentName); }
MRivas-XumaK/slingBuild
contrib/extensions/replication/core/src/main/java/org/apache/sling/replication/transport/TransportHandler.java
Java
apache-2.0
2,686
// Copyright (c) 2018 Cisco and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ksr import ( "encoding/json" "fmt" "sync" "testing" "time" "github.com/contiv/vpp/plugins/ksr/model/ksrapi" "github.com/ligato/cn-infra/health/statuscheck/model/status" "github.com/onsi/gomega" coreV1 "k8s.io/api/core/v1" networkingV1 "k8s.io/api/networking/v1" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" "github.com/contiv/vpp/plugins/ksr/model/policy" "github.com/ligato/cn-infra/logging" ) type PolicyTestVars struct { k8sListWatch *mockK8sListWatch mockKvBroker *mockKeyProtoValBroker policyReflector *PolicyReflector policyTestData []networkingV1.NetworkPolicy reflectorRegistry ReflectorRegistry } var policyTestVars PolicyTestVars func TestPolicyReflector(t *testing.T) { gomega.RegisterTestingT(t) policyTestVars.k8sListWatch = &mockK8sListWatch{} policyTestVars.mockKvBroker = newMockKeyProtoValBroker() policyTestVars.reflectorRegistry = ReflectorRegistry{ reflectors: make(map[string]*Reflector), lock: sync.RWMutex{}, } policyTestVars.policyReflector = &PolicyReflector{ Reflector: Reflector{ Log: logging.ForPlugin("policy-reflector"), K8sClientset: &kubernetes.Clientset{}, K8sListWatch: policyTestVars.k8sListWatch, Broker: policyTestVars.mockKvBroker, dsSynced: false, objType: policyObjType, ReflectorRegistry: &policyTestVars.reflectorRegistry, }, } var pprotTCP coreV1.Protocol = "TCP" policyTestVars.policyTestData = []networkingV1.NetworkPolicy{ // Test data 0: mocks a new object to be added or a "pre-existing" // object that is updated during sync { ObjectMeta: metaV1.ObjectMeta{ Name: "test-network-policy", Namespace: "default", SelfLink: "/apis/networking/v1/namespaces/default/networkpolicies/test-network-policy", UID: "44a9312f-f99f-11e7-b9b5-0800271d72be", ResourceVersion: "692693", Generation: 1, CreationTimestamp: metaV1.Date(2018, 01, 14, 18, 53, 37, 0, time.FixedZone("PST", -800)), }, Spec: networkingV1.NetworkPolicySpec{ PodSelector: metaV1.LabelSelector{ MatchLabels: map[string]string{"role": "db"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, Ingress: []networkingV1.NetworkPolicyIngressRule{ { Ports: []networkingV1.NetworkPolicyPort{ { Protocol: &pprotTCP, Port: &intstr.IntOrString{ Type: intstr.Int, IntVal: 6372, }, }, }, From: []networkingV1.NetworkPolicyPeer{ { IPBlock: &networkingV1.IPBlock{ CIDR: "172.17.0.0/16", Except: []string{ "172.17.1.0/24", "172.17.3.0/24", }, }, }, { NamespaceSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"project": "myproject"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, { PodSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"role": "frontend"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, }, }, }, Egress: []networkingV1.NetworkPolicyEgressRule{ { Ports: []networkingV1.NetworkPolicyPort{ { Protocol: &pprotTCP, Port: &intstr.IntOrString{ Type: intstr.Int, IntVal: 5978, }, }, }, To: []networkingV1.NetworkPolicyPeer{ { IPBlock: &networkingV1.IPBlock{ CIDR: "10.0.0.0/24", }, }, }, }, }, PolicyTypes: []networkingV1.PolicyType{ "Ingress", "Egress", }, }, }, // Test data 1: mocks a pre-existing object in the data store that is // updated during the mark-and-sweep synchronization test because its // counterpart in the K8s cache has changed. { ObjectMeta: metaV1.ObjectMeta{ Name: "access-nginx", Namespace: "default", SelfLink: "/apis/networking/v1/namespaces/default/networkpolicies/access-nginx", UID: "4c4a8d72-f9bc-11e7-b9b5-0800271d72be", ResourceVersion: "706490", Generation: 1, CreationTimestamp: metaV1.Date(2018, 01, 14, 18, 53, 37, 0, time.FixedZone("PST", -800)), }, Spec: networkingV1.NetworkPolicySpec{ PodSelector: metaV1.LabelSelector{ MatchLabels: map[string]string{"run": "nginx"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, Ingress: []networkingV1.NetworkPolicyIngressRule{ { From: []networkingV1.NetworkPolicyPeer{ { PodSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, }, }, }, Egress: []networkingV1.NetworkPolicyEgressRule{ { Ports: []networkingV1.NetworkPolicyPort{ { Protocol: &pprotTCP, Port: &intstr.IntOrString{ Type: intstr.Int, IntVal: 5978, }, }, }, To: []networkingV1.NetworkPolicyPeer{ { NamespaceSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"name": "name"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, { PodSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"run": "nginx"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, }, }, }, PolicyTypes: []networkingV1.PolicyType{ "Ingress", "Egress", }, }, }, // Test data 2: mocks a pre-existing "stale" object in the data store // that is deleted during the mark-and-sweep synchronization test // because its counterpart no longer exists in the K8s cache. { ObjectMeta: metaV1.ObjectMeta{ Name: "redis-allow-services", Namespace: "default", SelfLink: "/apis/networking/v1/namespaces/default/networkpolicies/redis-allow-services", UID: "5a091b3c-f9c1-11e7-b9b5-0800271d72be", ResourceVersion: "708875", Generation: 1, CreationTimestamp: metaV1.Date(2018, 01, 14, 18, 53, 37, 0, time.FixedZone("PST", -800)), }, Spec: networkingV1.NetworkPolicySpec{ PodSelector: metaV1.LabelSelector{ MatchLabels: map[string]string{"app": "bookstore", "role": "db"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, Ingress: []networkingV1.NetworkPolicyIngressRule{ { From: []networkingV1.NetworkPolicyPeer{ { PodSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"app": "bookstore", "role": "db"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, { PodSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"app": "bookstore", "role": "api"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, { PodSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"app": "inventory", "role": "web"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, }, }, }, PolicyTypes: []networkingV1.PolicyType{ "Ingress", }, }, }, } // The mock function returns two K8s mock endpoints instances: // - a new endpoints instance to be added to the data store // - a modified endpoints instance, where and existing instance in the // data store is to be updated MockK8sCache.ListFunc = func() []interface{} { return []interface{}{ // Updated value mock &policyTestVars.policyTestData[0], // New value mock &policyTestVars.policyTestData[1], } } // Pre-populate the mock data store with pre-existing data that is supposed // to be updated during resync. k8sPolicy1 := &policyTestVars.policyTestData[1] protoPolicy1 := policyTestVars.policyReflector.policyToProto(k8sPolicy1) checkPolicyToProtoTranslation(t, protoPolicy1, k8sPolicy1) protoPolicy1.Pods.MatchLabel = append(protoPolicy1.Pods.MatchLabel, &policy.Policy_Label{Key: "key", Value: "value"}) policyTestVars.mockKvBroker.Put(policy.Key(k8sPolicy1.GetName(), k8sPolicy1.GetNamespace()), protoPolicy1) // Pre-populate the mock data store with "stale" data that is supposed to // be deleted during resync. k8sPolicy2 := &policyTestVars.policyTestData[2] protoPolicy2 := policyTestVars.policyReflector.policyToProto(k8sPolicy2) checkPolicyToProtoTranslation(t, protoPolicy2, k8sPolicy2) policyTestVars.mockKvBroker.Put(policy.Key(k8sPolicy2.GetName(), k8sPolicy2.GetNamespace()), protoPolicy2) statsBefore := *policyTestVars.policyReflector.GetStats() // Clear the reflector list (i.e. apply the policy resync tests only to // the policy reflector) stopCh := make(chan struct{}) var wg sync.WaitGroup err := policyTestVars.policyReflector.Init(stopCh, &wg) gomega.Expect(err).To(gomega.BeNil()) policyTestVars.policyReflector.startDataStoreResync() // Wait for the initial sync to finish for { if policyTestVars.policyReflector.HasSynced() { break } time.Sleep(time.Millisecond * 100) } statsAfter := *policyTestVars.policyReflector.GetStats() gomega.Expect(policyTestVars.mockKvBroker.ds).Should(gomega.HaveLen(2)) gomega.Expect(statsBefore.Adds + 1).Should(gomega.BeNumerically("==", statsAfter.Adds)) gomega.Expect(statsBefore.Updates + 1).Should(gomega.BeNumerically("==", statsAfter.Updates)) gomega.Expect(statsBefore.Deletes + 1).Should(gomega.BeNumerically("==", statsAfter.Deletes)) policyTestVars.mockKvBroker.ClearDs() t.Run("addDeletePolicy", testAddDeletePolicy) policyTestVars.mockKvBroker.ClearDs() t.Run("updatePolicy", testUpdatePolicy) // The following tests check the KSR resync feature under various failure // scenarios. These tests exercise mostly the core KSR Reflector code. We // only perform them on policies, as the core KSR reflector code is common // for all reflectors. policyTestVars.mockKvBroker.ClearDs() t.Run("testResyncPolicyAddFail", testResyncPolicyAddFail) policyTestVars.mockKvBroker.ClearDs() t.Run("testResyncPolicySingleDeleteFail", testResyncPolicyDeleteFail) policyTestVars.mockKvBroker.ClearDs() t.Run("testResyncPolicyUpdateFail", testResyncPolicyUpdateFail) policyTestVars.mockKvBroker.ClearDs() t.Run("testResyncPolicyAddFailAndDataStoreDown", testResyncPolicyAddFailAndDataStoreDown) policyTestVars.mockKvBroker.ClearDs() t.Run("testResyncPolicyDataStoreDownThenAdd", testResyncPolicyDataStoreDownThenAdd) policyTestVars.mockKvBroker.ClearDs() t.Run("testResyncPolicyTransientDsError", testResyncPolicyTransientDsError) } func testAddDeletePolicy(t *testing.T) { // Test the policy add operation for _, k8sPolicy := range policyTestVars.policyTestData { // Take a snapshot of counters adds := policyTestVars.policyReflector.GetStats().Adds argErrs := policyTestVars.policyReflector.GetStats().ArgErrors // Test add with wrong argument type policyTestVars.k8sListWatch.Add(k8sPolicy) gomega.Expect(argErrs + 1).To(gomega.Equal(policyTestVars.policyReflector.GetStats().ArgErrors)) gomega.Expect(adds).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) // Test add where everything should be good policyTestVars.k8sListWatch.Add(&k8sPolicy) key := policy.Key(k8sPolicy.GetName(), k8sPolicy.GetNamespace()) protoPolicy := &policy.Policy{} found, _, err := policyTestVars.mockKvBroker.GetValue(key, protoPolicy) gomega.Expect(found).To(gomega.BeTrue()) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(adds + 1).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(protoPolicy).NotTo(gomega.BeNil()) checkPolicyToProtoTranslation(t, protoPolicy, &k8sPolicy) } // Test the policy delete operation for _, k8sPolicy := range policyTestVars.policyTestData { // Take a snapshot of counters dels := policyTestVars.policyReflector.GetStats().Deletes argErrs := policyTestVars.policyReflector.GetStats().ArgErrors // Test delete with wrong argument type policyTestVars.k8sListWatch.Delete(k8sPolicy) gomega.Expect(argErrs + 1).To(gomega.Equal(policyTestVars.policyReflector.GetStats().ArgErrors)) gomega.Expect(dels).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Deletes)) // Test delete where everything should be good policyTestVars.k8sListWatch.Delete(&k8sPolicy) gomega.Expect(dels + 1).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Deletes)) key := policy.Key(k8sPolicy.GetName(), k8sPolicy.GetNamespace()) protoPolicy := &policy.Policy{} found, _, err := policyTestVars.mockKvBroker.GetValue(key, protoPolicy) gomega.Expect(found).To(gomega.BeFalse()) gomega.Ω(err).Should(gomega.Succeed()) } policyTestVars.policyReflector.Log.Infof("%s: data sync done, gauges: %+v", policyTestVars.policyReflector.objType, policyTestVars.policyReflector.stats) } func testUpdatePolicy(t *testing.T) { // Prepare test data k8sPolicyOld := &policyTestVars.policyTestData[0] tmpBuf, err := json.Marshal(k8sPolicyOld) gomega.Ω(err).Should(gomega.Succeed()) k8sPolicyNew := &networkingV1.NetworkPolicy{} err = json.Unmarshal(tmpBuf, k8sPolicyNew) gomega.Ω(err).Should(gomega.Succeed()) // Take a snapshot of counters upds := policyTestVars.policyReflector.GetStats().Updates argErrs := policyTestVars.policyReflector.GetStats().ArgErrors // Test update with wrong argument type policyTestVars.k8sListWatch.Update(*k8sPolicyOld, *k8sPolicyNew) gomega.Expect(argErrs + 1).To(gomega.Equal(policyTestVars.policyReflector.GetStats().ArgErrors)) gomega.Expect(upds).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Updates)) // Ensure that there is no update if old and new values are the same policyTestVars.k8sListWatch.Update(k8sPolicyOld, k8sPolicyNew) gomega.Expect(upds).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Updates)) // Test update where everything should be good k8sPolicyNew.Spec.Egress = append(k8sPolicyNew.Spec.Egress, networkingV1.NetworkPolicyEgressRule{ Ports: []networkingV1.NetworkPolicyPort{ { Port: &intstr.IntOrString{ Type: intstr.String, StrVal: "my_name", }, }, }, To: []networkingV1.NetworkPolicyPeer{ { NamespaceSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"key1": "name1"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, { PodSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"key2": "name2"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, }, }) policyTestVars.k8sListWatch.Update(k8sPolicyOld, k8sPolicyNew) gomega.Expect(upds + 1).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Updates)) key := policy.Key(k8sPolicyOld.GetName(), k8sPolicyOld.GetNamespace()) protoPolicyNew := &policy.Policy{} found, _, err := policyTestVars.mockKvBroker.GetValue(key, protoPolicyNew) gomega.Expect(found).To(gomega.BeTrue()) gomega.Ω(err).Should(gomega.Succeed()) checkPolicyToProtoTranslation(t, protoPolicyNew, k8sPolicyNew) policyTestVars.policyReflector.Log.Infof("%s: data sync done, gauges: %+v", policyTestVars.policyReflector.objType, policyTestVars.policyReflector.stats) } func testResyncPolicyAddFail(t *testing.T) { // Set the mock K8s cache to expect 3 values. MockK8sCache.ListFunc = func() []interface{} { return []interface{}{ &policyTestVars.policyTestData[0], &policyTestVars.policyTestData[1], &policyTestVars.policyTestData[2], } } // Take a snapshot of reflector counters sSnap := *policyTestVars.policyReflector.GetStats() // Add two elements policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[0]) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[1]) gomega.Expect(policyTestVars.mockKvBroker.ds).Should(gomega.HaveLen(2)) gomega.Expect(sSnap.Adds + 2).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(sSnap.AddErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().AddErrors)) // Injecting two errors into the broker and one error in the Lister will test // the data sync good path and all error paths policyTestVars.mockKvBroker.injectListError(fmt.Errorf("%s", "Lister test error"), 1) policyTestVars.mockKvBroker.injectReadWriteError(fmt.Errorf("%s", "Read/write test error"), 2) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[2]) // Wait for the resync to finish for { if policyTestVars.policyReflector.HasSynced() { break } time.Sleep(time.Millisecond * 100) } policyTestVars.policyReflector.Log.Infof("*** data sync done:\nsSnap: %+v\ngauges: %+v", sSnap, policyTestVars.policyReflector.stats) gomega.Expect(sSnap.Adds + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(sSnap.Updates).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Updates)) gomega.Expect(sSnap.Deletes).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Deletes)) gomega.Expect(sSnap.Resyncs + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Resyncs)) gomega.Expect(sSnap.AddErrors + 2).To(gomega.Equal(policyTestVars.policyReflector.GetStats().AddErrors)) gomega.Expect(sSnap.UpdErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().UpdErrors)) gomega.Expect(sSnap.DelErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().DelErrors)) gomega.Expect(sSnap.ResErrors + 2).To(gomega.Equal(policyTestVars.policyReflector.GetStats().ResErrors)) gomega.Expect(policyTestVars.mockKvBroker.ds).Should(gomega.HaveLen(3)) key := policy.Key(policyTestVars.policyTestData[2].GetName(), policyTestVars.policyTestData[2].GetNamespace()) protoPolicy := &policy.Policy{} found, _, err := policyTestVars.mockKvBroker.GetValue(key, protoPolicy) gomega.Expect(found).To(gomega.BeTrue()) gomega.Ω(err).Should(gomega.Succeed()) checkPolicyToProtoTranslation(t, protoPolicy, &policyTestVars.policyTestData[2]) } func testResyncPolicyDeleteFail(t *testing.T) { // Set the mock K8s cache to expect 3 values. MockK8sCache.ListFunc = func() []interface{} { return []interface{}{ &policyTestVars.policyTestData[0], &policyTestVars.policyTestData[2], } } // Take a snapshot of reflector counters sSnap := *policyTestVars.policyReflector.GetStats() // Add three elements policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[0]) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[1]) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[2]) gomega.Expect(sSnap.Adds + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(len(policyTestVars.mockKvBroker.ds)).To(gomega.Equal(3)) // Injecting two errors into the broker and one error in the Lister will test // the data sync good path and all error paths policyTestVars.mockKvBroker.injectListError(fmt.Errorf("%s", "Lister test error"), 1) policyTestVars.mockKvBroker.injectReadWriteError(fmt.Errorf("%s", "Read/write test error"), 2) // Delete an element, write error happens during delete policyTestVars.k8sListWatch.Delete(&policyTestVars.policyTestData[1]) // Wait for the resync to finish for { if policyTestVars.policyReflector.HasSynced() { break } time.Sleep(time.Millisecond * 100) } policyTestVars.policyReflector.Log.Infof("*** data sync done:\nsSnap: %+v\ngauges: %+v", sSnap, policyTestVars.policyReflector.stats) gomega.Expect(sSnap.Adds + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(sSnap.Updates).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Updates)) gomega.Expect(sSnap.Deletes + 1).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Deletes)) gomega.Expect(sSnap.Resyncs + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Resyncs)) gomega.Expect(sSnap.AddErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().AddErrors)) gomega.Expect(sSnap.UpdErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().UpdErrors)) gomega.Expect(sSnap.DelErrors + 2).To(gomega.Equal(policyTestVars.policyReflector.GetStats().DelErrors)) gomega.Expect(sSnap.ResErrors + 2).To(gomega.Equal(policyTestVars.policyReflector.GetStats().ResErrors)) gomega.Expect(policyTestVars.mockKvBroker.ds).Should(gomega.HaveLen(2)) key := policy.Key(policyTestVars.policyTestData[1].GetName(), policyTestVars.policyTestData[2].GetNamespace()) protoPolicy := &policy.Policy{} found, _, err := policyTestVars.mockKvBroker.GetValue(key, protoPolicy) gomega.Expect(found).To(gomega.BeFalse()) gomega.Ω(err).Should(gomega.Succeed()) } func testResyncPolicyUpdateFail(t *testing.T) { // Deep copy an existing (old) policy into an updaged (new) policy k8sPolicyOld := &policyTestVars.policyTestData[0] tmpBuf, err := json.Marshal(k8sPolicyOld) gomega.Ω(err).Should(gomega.Succeed()) k8sPolicyNew := &networkingV1.NetworkPolicy{} err = json.Unmarshal(tmpBuf, k8sPolicyNew) gomega.Ω(err).Should(gomega.Succeed()) // Take a snapshot of reflector counters sSnap := *policyTestVars.policyReflector.GetStats() // Add three elements policyTestVars.k8sListWatch.Add(k8sPolicyOld) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[1]) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[2]) gomega.Expect(sSnap.Adds + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) // Test update where everything should be good k8sPolicyNew.Spec.Egress = append(k8sPolicyNew.Spec.Egress, networkingV1.NetworkPolicyEgressRule{ Ports: []networkingV1.NetworkPolicyPort{ { Port: &intstr.IntOrString{ Type: intstr.String, StrVal: "my_name", }, }, }, To: []networkingV1.NetworkPolicyPeer{ { NamespaceSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"key1": "name1"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, { PodSelector: &metaV1.LabelSelector{ MatchLabels: map[string]string{"key2": "name2"}, MatchExpressions: []metaV1.LabelSelectorRequirement{}, }, }, }, }) // Set the mock K8s cache to expect 3 values. MockK8sCache.ListFunc = func() []interface{} { return []interface{}{ k8sPolicyNew, &policyTestVars.policyTestData[2], &policyTestVars.policyTestData[1], } } // Injecting two errors into the broker and one error in the Lister will test // the data sync good path and all error paths policyTestVars.mockKvBroker.injectListError(fmt.Errorf("%s", "Lister test error"), 1) policyTestVars.mockKvBroker.injectReadWriteError(fmt.Errorf("%s", "Read/write test error"), 2) // Delete an element, write error happens during delete policyTestVars.k8sListWatch.Update(k8sPolicyOld, k8sPolicyNew) // Wait for the resync to finish for { if policyTestVars.policyReflector.HasSynced() { break } time.Sleep(time.Millisecond * 100) } policyTestVars.policyReflector.Log.Infof("*** data sync done:\nsSnap: %+v\ngauges: %+v", sSnap, policyTestVars.policyReflector.stats) gomega.Expect(sSnap.Adds + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(sSnap.Deletes).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Deletes)) gomega.Expect(policyTestVars.policyReflector.GetStats().Updates).To(gomega.Equal(sSnap.Updates + 1)) gomega.Expect(sSnap.Resyncs + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Resyncs)) gomega.Expect(sSnap.AddErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().AddErrors)) gomega.Expect(sSnap.DelErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().DelErrors)) gomega.Expect(sSnap.UpdErrors + 2).To(gomega.Equal(policyTestVars.policyReflector.GetStats().UpdErrors)) gomega.Expect(sSnap.ResErrors + 2).To(gomega.Equal(policyTestVars.policyReflector.GetStats().ResErrors)) gomega.Expect(policyTestVars.mockKvBroker.ds).Should(gomega.HaveLen(3)) key := policy.Key(k8sPolicyNew.GetName(), k8sPolicyNew.GetNamespace()) protoPolicy := &policy.Policy{} found, _, err := policyTestVars.mockKvBroker.GetValue(key, protoPolicy) gomega.Expect(found).To(gomega.BeTrue()) gomega.Ω(err).Should(gomega.Succeed()) checkPolicyToProtoTranslation(t, protoPolicy, k8sPolicyNew) } func testResyncPolicyAddFailAndDataStoreDown(t *testing.T) { // Set the mock K8s cache to expect 3 values. MockK8sCache.ListFunc = func() []interface{} { return []interface{}{ &policyTestVars.policyTestData[0], &policyTestVars.policyTestData[1], &policyTestVars.policyTestData[2], } } // Take a snapshot of counters sSnap := *policyTestVars.policyReflector.GetStats() // Add two elements policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[0]) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[1]) gomega.Expect(policyTestVars.mockKvBroker.ds).Should(gomega.HaveLen(2)) gomega.Expect(sSnap.Adds + 2).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(sSnap.AddErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().AddErrors)) // Injecting and "infinite number" of errors into the broker will keep // data sync in the rmark-and-sweep etry loop so that we can inject the // 'data store down' signal and thus abort the loop. // Injecting two errors into the broker and one error in the Lister will test // the data sync good path and all error paths policyTestVars.mockKvBroker.injectListError(fmt.Errorf("%s", "Lister test error"), 1) policyTestVars.mockKvBroker.injectReadWriteError(fmt.Errorf("%s", "Read/write test error"), 1000) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[2]) // Emulate the data store down/up sequence go func() { time.Sleep(time.Second) policyTestVars.reflectorRegistry.dataStoreDownEvent() time.Sleep(time.Second) policyTestVars.mockKvBroker.clearReadWriteError() policyTestVars.reflectorRegistry.dataStoreUpEvent() }() // Wait for the resync to finish for { if policyTestVars.policyReflector.HasSynced() { break } time.Sleep(time.Millisecond * 100) } policyTestVars.policyReflector.Log.Infof("*** data sync done:\nsSnap: %+v\ngauges: %+v", sSnap, policyTestVars.policyReflector.stats) gomega.Expect(sSnap.Adds + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(sSnap.Updates).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Updates)) gomega.Expect(policyTestVars.policyReflector.GetStats().AddErrors - sSnap.AddErrors). To(gomega.Equal(policyTestVars.policyReflector.GetStats().ResErrors - sSnap.ResErrors)) gomega.Expect(policyTestVars.mockKvBroker.ds).Should(gomega.HaveLen(3)) key := policy.Key(policyTestVars.policyTestData[2].GetName(), policyTestVars.policyTestData[2].GetNamespace()) protoPolicy := &policy.Policy{} found, _, err := policyTestVars.mockKvBroker.GetValue(key, protoPolicy) gomega.Expect(found).To(gomega.BeTrue()) gomega.Ω(err).Should(gomega.Succeed()) checkPolicyToProtoTranslation(t, protoPolicy, &policyTestVars.policyTestData[2]) } func testResyncPolicyDataStoreDownThenAdd(t *testing.T) { // Set the mock K8s cache to expect 3 values. MockK8sCache.ListFunc = func() []interface{} { return []interface{}{ &policyTestVars.policyTestData[2], &policyTestVars.policyTestData[0], &policyTestVars.policyTestData[1], } } // Take a snapshot of reflector counters sSnap := *policyTestVars.policyReflector.GetStats() // Add two elements policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[0]) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[1]) gomega.Expect(policyTestVars.mockKvBroker.ds).Should(gomega.HaveLen(2)) gomega.Expect(sSnap.Adds + 2).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(sSnap.AddErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().AddErrors)) etcdMonitor := EtcdMonitor{ status: status.OperationalState_OK, lastRev: 0, broker: policyTestVars.mockKvBroker, rr: &policyTestVars.reflectorRegistry, } // Emulate a 'data store down' event etcdMonitor.processEtcdMonitorEvent(status.OperationalState_ERROR) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[2]) // Emulate a 'data store up' event etcdMonitor.processEtcdMonitorEvent(status.OperationalState_OK) // Wait for the resync to finish for { if policyTestVars.policyReflector.HasSynced() { break } time.Sleep(time.Millisecond * 100) } policyTestVars.policyReflector.Log.Infof("*** data sync done:\nsSnap: %+v\ngauges: %+v", sSnap, policyTestVars.policyReflector.stats) gomega.Expect(sSnap.Adds + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(sSnap.Updates).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Updates)) gomega.Expect(sSnap.Deletes).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Deletes)) gomega.Expect(sSnap.Resyncs + 1).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Resyncs)) gomega.Expect(sSnap.AddErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().AddErrors)) gomega.Expect(sSnap.UpdErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().UpdErrors)) gomega.Expect(sSnap.DelErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().DelErrors)) gomega.Expect(sSnap.ResErrors).To(gomega.Equal(policyTestVars.policyReflector.GetStats().ResErrors)) gomega.Expect(policyTestVars.mockKvBroker.ds).Should(gomega.HaveLen(3)) key := policy.Key(policyTestVars.policyTestData[2].GetName(), policyTestVars.policyTestData[2].GetNamespace()) protoPolicy := &policy.Policy{} found, _, err := policyTestVars.mockKvBroker.GetValue(key, protoPolicy) gomega.Expect(found).To(gomega.BeTrue()) gomega.Ω(err).Should(gomega.Succeed()) checkPolicyToProtoTranslation(t, protoPolicy, &policyTestVars.policyTestData[2]) } func testResyncPolicyTransientDsError(t *testing.T) { // Set the mock K8s cache to expect 3 values. MockK8sCache.ListFunc = func() []interface{} { return []interface{}{ &policyTestVars.policyTestData[2], &policyTestVars.policyTestData[1], &policyTestVars.policyTestData[0], } } // Take a snapshot of reflector counters sSnap := *policyTestVars.policyReflector.GetStats() // Add three elements policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[0]) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[1]) policyTestVars.k8sListWatch.Add(&policyTestVars.policyTestData[2]) gomega.Expect(sSnap.Adds + 3).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(len(policyTestVars.mockKvBroker.ds)).To(gomega.Equal(3)) etcdMonitor := EtcdMonitor{ status: status.OperationalState_OK, lastRev: 0, broker: policyTestVars.mockKvBroker, rr: &policyTestVars.reflectorRegistry, } // Do the initial check for transient errors etcdMonitor.checkEtcdTransientError() stsKey := ksrapi.Key("gauges") ksrStats := ksrapi.Stats{} found, rev, err := policyTestVars.mockKvBroker.GetValue(stsKey, &ksrStats) gomega.Expect(found).To(gomega.BeTrue()) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(rev).Should(gomega.BeNumerically("==", 1)) // Do another check for transient errors etcdMonitor.checkEtcdTransientError() found, rev, err = policyTestVars.mockKvBroker.GetValue(stsKey, &ksrStats) gomega.Expect(found).To(gomega.BeTrue()) gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(rev).Should(gomega.BeNumerically("==", 2)) // Emulate a transient data store error with data loss policyTestVars.mockKvBroker.ClearDs() gomega.Expect(len(policyTestVars.mockKvBroker.ds)).To(gomega.Equal(0)) // Do another check for transient errors which should detect a transient // error and trigger a data store resync. etcdMonitor.checkEtcdTransientError() // Wait for data store to finish for { if policyTestVars.policyReflector.HasSynced() { break } time.Sleep(time.Millisecond * 100) } policyTestVars.policyReflector.Log.Infof("*** data sync done:\nsSnap: %+v\ngauges: %+v", sSnap, policyTestVars.policyReflector.stats) // Check that the data store resync happened and that we have have the // data back in the data store gomega.Expect(sSnap.Adds + 6).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Adds)) gomega.Expect(len(policyTestVars.mockKvBroker.ds)).To(gomega.Equal(3)) gomega.Expect(sSnap.Resyncs + 1).To(gomega.Equal(policyTestVars.policyReflector.GetStats().Resyncs)) } // checkPolicyToProtoTranslation checks whether the translation of K8s policy // into the Contiv-VPP protobuf format is correct. func checkPolicyToProtoTranslation(t *testing.T, protoNp *policy.Policy, k8sNp *networkingV1.NetworkPolicy) { gomega.Expect(protoNp.Name).To(gomega.Equal(k8sNp.GetName())) gomega.Expect(protoNp.Namespace).To(gomega.Equal(k8sNp.GetNamespace())) gomega.Expect(len(protoNp.Label)).To(gomega.Equal(len(k8sNp.Labels))) // Check labels for _, lbl := range protoNp.Label { gomega.Expect(lbl.Value).To(gomega.Equal(k8sNp.Labels[lbl.Key])) } // Check pod selectors checkLabelSelector(protoNp.Pods, &k8sNp.Spec.PodSelector) // Check policy type checkPolicyType(protoNp.PolicyType, k8sNp.Spec.PolicyTypes) // Check ingress rules gomega.Expect(len(protoNp.IngressRule)).To(gomega.Equal(len(k8sNp.Spec.Ingress))) for i, rule := range protoNp.IngressRule { // Check port translations checkRulePorts(rule.Port, k8sNp.Spec.Ingress[i].Ports) // Check peer translations checkRulePeers(rule.From, k8sNp.Spec.Ingress[i].From) } // Check egress rules gomega.Expect(len(protoNp.EgressRule)).To(gomega.Equal(len(k8sNp.Spec.Egress))) for i, rule := range protoNp.EgressRule { // Check port translations checkRulePorts(rule.Port, k8sNp.Spec.Egress[i].Ports) // Check peer translations checkRulePeers(rule.To, k8sNp.Spec.Egress[i].To) } } // checkLabelSelector checks whether the translation of K8s label selector // into the Contiv-VPP protobuf format is correct. func checkLabelSelector(protoLbl *policy.Policy_LabelSelector, k8sLbl *metaV1.LabelSelector) { gomega.Expect(len(protoLbl.MatchLabel)).To(gomega.Equal(len(k8sLbl.MatchLabels))) for _, lbl := range protoLbl.MatchLabel { gomega.Expect(lbl.Value).To(gomega.Equal(k8sLbl.MatchLabels[lbl.Key])) } gomega.Expect(len(protoLbl.MatchExpression)).To(gomega.Equal(len(k8sLbl.MatchExpressions))) for i, expr := range protoLbl.MatchExpression { k8sExpr := k8sLbl.MatchExpressions[i] gomega.Expect(expr.Key).To(gomega.Equal(k8sExpr.Key)) gomega.Expect(expr.Value).To(gomega.BeEquivalentTo(k8sExpr.Values)) gomega.Expect(expr.Operator.String()).To(gomega.BeEquivalentTo(k8sExpr.Operator)) } } // checkRulePorts checks whether the translation of K8s policy rules ports // into the Contiv-VPP protobuf format is correct. func checkRulePorts(protoPorts []*policy.Policy_Port, k8sPorts []networkingV1.NetworkPolicyPort) { gomega.Expect(len(protoPorts)).To(gomega.Equal(len(k8sPorts))) for j, protoPort := range protoPorts { switch protoPort.Port.Type { case policy.Policy_Port_PortNameOrNumber_NUMBER: gomega.Expect(k8sPorts[j].Port.Type).To(gomega.Equal(intstr.Int)) gomega.Expect(protoPort.Port.Number).To(gomega.Equal(k8sPorts[j].Port.IntVal)) case policy.Policy_Port_PortNameOrNumber_NAME: gomega.Expect(k8sPorts[j].Port.Type).To(gomega.Equal(intstr.String)) gomega.Expect(protoPort.Port.Name).To(gomega.Equal(k8sPorts[j].Port.StrVal)) default: gomega.Panic() } if k8sPorts[j].Protocol == nil { gomega.Expect(protoPort.Protocol).To(gomega.BeNumerically("==", policy.Policy_Port_TCP)) } else { gomega.Expect(protoPort.Protocol.String()).To(gomega.BeEquivalentTo(*k8sPorts[j].Protocol)) } } } // checkRulePeers checks whether the translation of K8s policy rules peers // into the Contiv-VPP protobuf format is correct. func checkRulePeers(protoPeers []*policy.Policy_Peer, k8sPeers []networkingV1.NetworkPolicyPeer) { gomega.Expect(len(protoPeers)).To(gomega.Equal(len(k8sPeers))) for j, protoPeer := range protoPeers { k8sPeer := k8sPeers[j] // Check pod selector translation if protoPeer.Pods != nil { checkLabelSelector(protoPeer.Pods, k8sPeer.PodSelector) } else { gomega.Expect(k8sPeer.PodSelector).Should(gomega.BeNil()) } // Check Namespace selector translation if protoPeer.Namespaces != nil { checkLabelSelector(protoPeer.Namespaces, k8sPeer.NamespaceSelector) } else { gomega.Expect(k8sPeer.NamespaceSelector).Should(gomega.BeNil()) } // Check IP Block translation if protoPeer.IpBlock != nil { gomega.Expect(protoPeer.IpBlock.Cidr).To(gomega.Equal(k8sPeer.IPBlock.CIDR)) gomega.Expect(protoPeer.IpBlock.Except).To(gomega.BeEquivalentTo(k8sPeer.IPBlock.Except)) } else { gomega.Expect(k8sPeer.IPBlock).Should(gomega.BeNil()) } } } // checkPolicyType checks whether the translation of K8s policy type into // the Contiv-VPP protobuf format is correct. func checkPolicyType(protoPtype policy.Policy_PolicyType, k8sPtypes []networkingV1.PolicyType) { switch protoPtype { case policy.Policy_INGRESS: gomega.Expect(len(k8sPtypes)).To(gomega.Equal(1)) gomega.Expect(k8sPtypes[0]).To(gomega.BeEquivalentTo(networkingV1.PolicyTypeIngress)) case policy.Policy_EGRESS: gomega.Expect(len(k8sPtypes)).To(gomega.Equal(1)) gomega.Expect(k8sPtypes[0]).To(gomega.BeEquivalentTo(networkingV1.PolicyTypeEgress)) case policy.Policy_INGRESS_AND_EGRESS: gomega.Expect(len(k8sPtypes)).To(gomega.Equal(2)) gomega.Expect(stringsInSlice([]networkingV1.PolicyType{ networkingV1.PolicyTypeEgress, networkingV1.PolicyTypeIngress, }, k8sPtypes)).To(gomega.BeTrue()) case policy.Policy_DEFAULT: gomega.Expect(len(k8sPtypes)).To(gomega.Equal(0)) } } // stringsInSlice ensures that K8sPolicyTypes contains all policy types // listed in 'pd'. func stringsInSlice(pd []networkingV1.PolicyType, K8sPolicyTypes []networkingV1.PolicyType) bool { loop: for _, s := range pd { for _, v := range K8sPolicyTypes { if v == s { continue loop } } return false } return true }
rastislavszabo/vpp
plugins/ksr/policy_reflector_test.go
GO
apache-2.0
39,020
/** * Copyright (C) 2015 The Gravitee team (http://gravitee.io) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.gravitee.rest.api.service.impl; import io.gravitee.rest.api.service.ApiKeyGenerator; import java.util.UUID; import org.springframework.stereotype.Component; /** * @author David BRASSELY (david.brassely at graviteesource.com) * @author GraviteeSource Team */ @Component public class UUIDApiKeyGenerator implements ApiKeyGenerator { @Override public String generate() { return UUID.randomUUID().toString(); } }
gravitee-io/gravitee-management-rest-api
gravitee-rest-api-service/src/main/java/io/gravitee/rest/api/service/impl/UUIDApiKeyGenerator.java
Java
apache-2.0
1,077
package com.sfl.pms.services.payment.method.group; import com.sfl.pms.services.payment.method.AbstractPaymentMethodDefinitionService; import com.sfl.pms.services.payment.method.AbstractPaymentMethodDefinitionServiceIntegrationTest; import com.sfl.pms.services.payment.method.dto.group.GroupPaymentMethodDefinitionDto; import com.sfl.pms.services.payment.method.exception.PaymentMethodDefinitionAlreadyExistsException; import com.sfl.pms.services.payment.method.model.group.GroupPaymentMethodDefinition; import org.junit.Test; import org.springframework.beans.factory.annotation.Autowired; import static org.junit.Assert.*; /** * User: Ruben Dilanyan * Company: SFL LLC * Date: 7/25/15 * Time: 2:19 PM */ public class GroupPaymentMethodDefinitionServiceIntegrationTest extends AbstractPaymentMethodDefinitionServiceIntegrationTest<GroupPaymentMethodDefinition> { /* Dependencies */ @Autowired private GroupPaymentMethodDefinitionService groupPaymentMethodDefinitionService; /* Constructors */ public GroupPaymentMethodDefinitionServiceIntegrationTest() { } /* Test methods */ @Test public void testGetPaymentMethodDefinitionForLookupParameters() { // Prepare data final GroupPaymentMethodDefinitionDto paymentMethodDefinitionDto = getServicesTestHelper().createGroupPaymentMethodDefinitionDto(); final GroupPaymentMethodDefinition paymentMethodDefinition = groupPaymentMethodDefinitionService.createPaymentMethodDefinition(paymentMethodDefinitionDto); // Try to load payment method for look up parameters GroupPaymentMethodDefinition result = groupPaymentMethodDefinitionService.getPaymentMethodDefinitionForLookupParameters(paymentMethodDefinition.getPaymentMethodGroupType(), paymentMethodDefinitionDto.getCurrency(), paymentMethodDefinitionDto.getPaymentProviderType()); assertEquals(paymentMethodDefinition, result); // Flush, clear and assert again flushAndClear(); result = groupPaymentMethodDefinitionService.getPaymentMethodDefinitionForLookupParameters(paymentMethodDefinition.getPaymentMethodGroupType(), paymentMethodDefinitionDto.getCurrency(), paymentMethodDefinitionDto.getPaymentProviderType()); assertEquals(paymentMethodDefinition, result); } @Test public void testCheckIfPaymentMethodDefinitionExistsForLookupParameters() { // Prepare data final GroupPaymentMethodDefinitionDto paymentMethodDefinitionDto = getServicesTestHelper().createGroupPaymentMethodDefinitionDto(); // Check if payment method definition exists boolean result = groupPaymentMethodDefinitionService.checkIfPaymentMethodDefinitionExistsForLookupParameters(paymentMethodDefinitionDto.getPaymentMethodGroupType(), paymentMethodDefinitionDto.getCurrency(), paymentMethodDefinitionDto.getPaymentProviderType()); assertFalse(result); // Create payment method definition final GroupPaymentMethodDefinition paymentMethodDefinition = groupPaymentMethodDefinitionService.createPaymentMethodDefinition(paymentMethodDefinitionDto); assertNotNull(paymentMethodDefinition); result = groupPaymentMethodDefinitionService.checkIfPaymentMethodDefinitionExistsForLookupParameters(paymentMethodDefinitionDto.getPaymentMethodGroupType(), paymentMethodDefinitionDto.getCurrency(), paymentMethodDefinitionDto.getPaymentProviderType()); assertTrue(result); // Flush, clear, reload and assert again flushAndClear(); result = groupPaymentMethodDefinitionService.checkIfPaymentMethodDefinitionExistsForLookupParameters(paymentMethodDefinitionDto.getPaymentMethodGroupType(), paymentMethodDefinitionDto.getCurrency(), paymentMethodDefinitionDto.getPaymentProviderType()); assertTrue(result); } @Test public void testCreatePaymentMethodDefinition() { // Prepare data final GroupPaymentMethodDefinitionDto paymentMethodDefinitionDto = getServicesTestHelper().createGroupPaymentMethodDefinitionDto(); // Create payment method definition GroupPaymentMethodDefinition paymentMethodDefinition = groupPaymentMethodDefinitionService.createPaymentMethodDefinition(paymentMethodDefinitionDto); getServicesTestHelper().assertGroupPaymentMethodDefinition(paymentMethodDefinition, paymentMethodDefinitionDto); // Flush, clear, reload and assert flushAndClear(); paymentMethodDefinition = groupPaymentMethodDefinitionService.getPaymentMethodDefinitionById(paymentMethodDefinition.getId()); getServicesTestHelper().assertGroupPaymentMethodDefinition(paymentMethodDefinition, paymentMethodDefinitionDto); } @Test public void testCreatePaymentMethodDefinitionWhenItAlreadyExists() { // Prepare data final GroupPaymentMethodDefinitionDto paymentMethodDefinitionDto = getServicesTestHelper().createGroupPaymentMethodDefinitionDto(); // Create payment method definition final GroupPaymentMethodDefinition paymentMethodDefinition = groupPaymentMethodDefinitionService.createPaymentMethodDefinition(paymentMethodDefinitionDto); assertNotNull(paymentMethodDefinition); // Try to create again try { groupPaymentMethodDefinitionService.createPaymentMethodDefinition(paymentMethodDefinitionDto); fail("Exception should be thrown"); } catch (final PaymentMethodDefinitionAlreadyExistsException ex) { // Expected } // Flush, clear and try again flushAndClear(); try { groupPaymentMethodDefinitionService.createPaymentMethodDefinition(paymentMethodDefinitionDto); fail("Exception should be thrown"); } catch (final PaymentMethodDefinitionAlreadyExistsException ex) { // Expected } } /* Utility methods */ @Override protected GroupPaymentMethodDefinition getInstance() { return getServicesTestHelper().createGroupPaymentMethodDefinition(); } @Override protected AbstractPaymentMethodDefinitionService<GroupPaymentMethodDefinition> getService() { return groupPaymentMethodDefinitionService; } }
sflpro/ms_payment
services/services_integrationtest/src/test/java/com/sfl/pms/services/payment/method/group/GroupPaymentMethodDefinitionServiceIntegrationTest.java
Java
apache-2.0
6,210
package org.zaproxy.zap.authentication; import java.awt.GridBagLayout; import java.awt.Insets; import java.net.InetAddress; import java.net.URI; import java.net.UnknownHostException; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.swing.JLabel; import net.sf.json.JSONObject; import org.apache.commons.configuration.Configuration; import org.apache.commons.configuration.ConfigurationException; import org.apache.commons.httpclient.NTCredentials; import org.apache.commons.httpclient.auth.AuthScope; import org.apache.log4j.Logger; import org.parosproxy.paros.Constant; import org.parosproxy.paros.db.DatabaseException; import org.parosproxy.paros.db.RecordContext; import org.parosproxy.paros.extension.ExtensionHook; import org.parosproxy.paros.model.Session; import org.zaproxy.zap.authentication.UsernamePasswordAuthenticationCredentials.UsernamePasswordAuthenticationCredentialsOptionsPanel; import org.zaproxy.zap.extension.api.ApiDynamicActionImplementor; import org.zaproxy.zap.extension.api.ApiException; import org.zaproxy.zap.extension.api.ApiResponse; import org.zaproxy.zap.extension.authentication.AuthenticationAPI; import org.zaproxy.zap.model.Context; import org.zaproxy.zap.session.SessionManagementMethod; import org.zaproxy.zap.session.WebSession; import org.zaproxy.zap.users.User; import org.zaproxy.zap.utils.ApiUtils; import org.zaproxy.zap.utils.ZapPortNumberSpinner; import org.zaproxy.zap.utils.ZapTextField; import org.zaproxy.zap.view.LayoutHelper; /** * The implementation for an {@link AuthenticationMethodType} where the Users are authenticated * through HTTP Authentication. * * @see <a href="http://www.w3.org/Protocols/HTTP/1.0/spec.html#AA">HTTP/1.0 - Access Authentication</a> */ public class HttpAuthenticationMethodType extends AuthenticationMethodType { public static final String CONTEXT_CONFIG_AUTH_HTTP = AuthenticationMethod.CONTEXT_CONFIG_AUTH + ".http"; public static final String CONTEXT_CONFIG_AUTH_HTTP_HOSTNAME = CONTEXT_CONFIG_AUTH_HTTP + ".hostname"; public static final String CONTEXT_CONFIG_AUTH_HTTP_REALM = CONTEXT_CONFIG_AUTH_HTTP + ".realm"; public static final String CONTEXT_CONFIG_AUTH_HTTP_PORT = CONTEXT_CONFIG_AUTH_HTTP + ".port"; private static final Logger log = Logger.getLogger(HttpAuthenticationMethodType.class); /** The unique identifier of the method. */ private static final int METHOD_IDENTIFIER = 3; /** The human readable Authentication method's name. */ private static final String METHOD_NAME = Constant.messages.getString("authentication.method.http.name"); /** The Authentication method's name used in the API. */ private static final String API_METHOD_NAME = "httpAuthentication"; /** * The implementation for an {@link AuthenticationMethodType} where the Users are authenticated * through HTTP Authentication. */ public static class HttpAuthenticationMethod extends AuthenticationMethod { public HttpAuthenticationMethod() { super(); } protected String hostname; protected int port = 80; protected String realm; public void setHostname(String hostname) { this.hostname = hostname; } public void setPort(int port) { this.port = port; } public void setRealm(String realm) { this.realm = realm; } @Override public boolean isConfigured() { return hostname != null && !hostname.isEmpty() && realm != null && !realm.isEmpty(); } @Override protected AuthenticationMethod duplicate() { HttpAuthenticationMethod method = new HttpAuthenticationMethod(); method.hostname = this.hostname; method.port = this.port; method.realm = this.realm; return method; } @Override public AuthenticationCredentials createAuthenticationCredentials() { return new UsernamePasswordAuthenticationCredentials(); } @Override public AuthenticationMethodType getType() { return new HttpAuthenticationMethodType(); } @Override public WebSession authenticate(SessionManagementMethod sessionManagementMethod, AuthenticationCredentials credentials, User user) throws UnsupportedAuthenticationCredentialsException { WebSession session = user.getAuthenticatedSession(); if (session == null) session = sessionManagementMethod.createEmptyWebSession(); // type check if (!(credentials instanceof UsernamePasswordAuthenticationCredentials)) { throw new UnsupportedAuthenticationCredentialsException( "Form based authentication method only supports " + UsernamePasswordAuthenticationCredentials.class.getSimpleName()); } UsernamePasswordAuthenticationCredentials userCredentials = (UsernamePasswordAuthenticationCredentials) credentials; AuthScope stateAuthScope = null; NTCredentials stateCredentials = null; try { stateAuthScope = new AuthScope(this.hostname, this.port, (this.realm == null || this.realm.isEmpty()) ? AuthScope.ANY_REALM : this.realm); stateCredentials = new NTCredentials(userCredentials.getUsername(), userCredentials.getPassword(), InetAddress.getLocalHost().getCanonicalHostName(), this.realm); session.getHttpState().setCredentials(stateAuthScope, stateCredentials); } catch (UnknownHostException e1) { log.error(e1.getMessage(), e1); } return session; } @Override public ApiResponse getApiResponseRepresentation() { Map<String, String> values = new HashMap<>(); values.put("methodName", API_METHOD_NAME); values.put("host", this.hostname); values.put("port", Integer.toString(this.port)); values.put("realm", this.realm); return new AuthMethodApiResponseRepresentation<>(values); } } /** * The Options Panel used for configuring a {@link HttpAuthenticationMethod}. */ private static class HttpAuthenticationMethodOptionsPanel extends AbstractAuthenticationMethodOptionsPanel { private static final long serialVersionUID = 4341092284683481288L; private static final String HOSTNAME_LABEL = Constant.messages .getString("authentication.method.http.field.label.hostname"); private static final String PORT_LABEL = Constant.messages .getString("authentication.method.http.field.label.port"); private static final String REALM_LABEL = Constant.messages .getString("authentication.method.http.field.label.realm"); private ZapTextField hostnameField; private ZapTextField realmField; private ZapPortNumberSpinner portNumberSpinner; private HttpAuthenticationMethod method; public HttpAuthenticationMethodOptionsPanel() { super(); initialize(); } private void initialize() { this.setLayout(new GridBagLayout()); this.add(new JLabel(HOSTNAME_LABEL), LayoutHelper.getGBC(0, 0, 1, 0.0d)); this.hostnameField = new ZapTextField(); this.add(this.hostnameField, LayoutHelper.getGBC(1, 0, 1, 1.0d, new Insets(0, 0, 0, 10))); this.add(new JLabel(PORT_LABEL), LayoutHelper.getGBC(2, 0, 1, 0.0d)); this.portNumberSpinner = new ZapPortNumberSpinner(80); this.add(this.portNumberSpinner, LayoutHelper.getGBC(3, 0, 1, 0.0d)); this.add(new JLabel(REALM_LABEL), LayoutHelper.getGBC(0, 1, 1, 0.0d)); this.realmField = new ZapTextField(); this.add(this.realmField, LayoutHelper.getGBC(1, 1, 1, 1.0d, new Insets(0, 0, 0, 10))); } @Override public void validateFields() throws IllegalStateException { try { new URI(hostnameField.getText()); } catch (Exception ex) { hostnameField.requestFocusInWindow(); throw new IllegalStateException( Constant.messages.getString("authentication.method.http.dialog.error.url.text")); } } @Override public void saveMethod() { getMethod().hostname = hostnameField.getText(); getMethod().port = portNumberSpinner.getValue(); getMethod().realm = realmField.getText(); } @Override public void bindMethod(AuthenticationMethod method) throws UnsupportedAuthenticationMethodException { this.method = (HttpAuthenticationMethod) method; this.hostnameField.setText(this.method.hostname); this.portNumberSpinner.setValue(this.method.port); this.realmField.setText(this.method.realm); } @Override public HttpAuthenticationMethod getMethod() { return method; } } @Override public HttpAuthenticationMethod createAuthenticationMethod(int contextId) { return new HttpAuthenticationMethod(); } @Override public String getName() { return METHOD_NAME; } @Override public int getUniqueIdentifier() { return METHOD_IDENTIFIER; } @Override public AbstractAuthenticationMethodOptionsPanel buildOptionsPanel(Context uiSharedContext) { return new HttpAuthenticationMethodOptionsPanel(); } @Override public boolean hasOptionsPanel() { return true; } @Override public AbstractCredentialsOptionsPanel<? extends AuthenticationCredentials> buildCredentialsOptionsPanel( AuthenticationCredentials credentials, Context uiSharedContext) { return new UsernamePasswordAuthenticationCredentialsOptionsPanel( (UsernamePasswordAuthenticationCredentials) credentials); } @Override public boolean hasCredentialsOptionsPanel() { return true; } @Override public boolean isTypeForMethod(AuthenticationMethod method) { return (method instanceof HttpAuthenticationMethod); } @Override public void hook(ExtensionHook extensionHook) { // Nothing to hook } @Override public AuthenticationMethod loadMethodFromSession(Session session, int contextId) throws DatabaseException { HttpAuthenticationMethod method = createAuthenticationMethod(contextId); List<String> hostnames = session.getContextDataStrings(contextId, RecordContext.TYPE_AUTH_METHOD_FIELD_1); if (hostnames != null && hostnames.size() > 0) { method.hostname = hostnames.get(0); } List<String> realms = session .getContextDataStrings(contextId, RecordContext.TYPE_AUTH_METHOD_FIELD_2); if (realms != null && realms.size() > 0) { method.realm = realms.get(0); } List<String> ports = session.getContextDataStrings(contextId, RecordContext.TYPE_AUTH_METHOD_FIELD_3); if (ports != null && ports.size() > 0) { try { method.port = Integer.parseInt(ports.get(0)); } catch (Exception ex) { log.error("Unable to load HttpAuthenticationMethod. ", ex); } } return method; } @Override public void persistMethodToSession(Session session, int contextId, AuthenticationMethod authMethod) throws UnsupportedAuthenticationMethodException, DatabaseException { if (!(authMethod instanceof HttpAuthenticationMethod)) throw new UnsupportedAuthenticationMethodException("Http Authentication type only supports: " + HttpAuthenticationMethod.class); HttpAuthenticationMethod method = (HttpAuthenticationMethod) authMethod; session.setContextData(contextId, RecordContext.TYPE_AUTH_METHOD_FIELD_1, method.hostname); session.setContextData(contextId, RecordContext.TYPE_AUTH_METHOD_FIELD_2, method.realm); session.setContextData(contextId, RecordContext.TYPE_AUTH_METHOD_FIELD_3, Integer.toString(method.port)); } /* API related constants and methods. */ private static final String PARAM_HOSTNAME = "hostname"; private static final String PARAM_REALM = "realm"; private static final String PARAM_PORT = "port"; @Override public AuthenticationCredentials createAuthenticationCredentials() { return new UsernamePasswordAuthenticationCredentials(); } @Override public ApiDynamicActionImplementor getSetMethodForContextApiAction() { return new ApiDynamicActionImplementor(API_METHOD_NAME, new String[] { PARAM_HOSTNAME, PARAM_REALM }, new String[] { PARAM_PORT }) { @Override public void handleAction(JSONObject params) throws ApiException { Context context = ApiUtils.getContextByParamId(params, AuthenticationAPI.PARAM_CONTEXT_ID); HttpAuthenticationMethod method = createAuthenticationMethod(context.getIndex()); method.hostname = ApiUtils.getNonEmptyStringParam(params, PARAM_HOSTNAME); try { new URI(method.hostname); } catch (Exception ex) { throw new ApiException(ApiException.Type.ILLEGAL_PARAMETER, PARAM_HOSTNAME); } if(params.containsKey(PARAM_REALM)) method.realm=params.getString(PARAM_REALM); if (params.containsKey(PARAM_PORT)) try { String portString = params.getString(PARAM_PORT); method.port = Integer.parseInt(portString); } catch (Exception ex) { throw new ApiException(ApiException.Type.ILLEGAL_PARAMETER, PARAM_PORT); } if (!context.getAuthenticationMethod().isSameType(method)) apiChangedAuthenticationMethodForContext(context.getIndex()); context.setAuthenticationMethod(method); } }; } @Override public ApiDynamicActionImplementor getSetCredentialsForUserApiAction() { return UsernamePasswordAuthenticationCredentials.getSetCredentialsForUserApiAction(this); } @Override public void exportData(Configuration config, AuthenticationMethod authMethod) { if (!(authMethod instanceof HttpAuthenticationMethod)) { throw new UnsupportedAuthenticationMethodException( "HTTP based authentication type only supports: " + HttpAuthenticationMethod.class.getName()); } HttpAuthenticationMethod method = (HttpAuthenticationMethod) authMethod; config.setProperty(CONTEXT_CONFIG_AUTH_HTTP_HOSTNAME, method.hostname); config.setProperty(CONTEXT_CONFIG_AUTH_HTTP_REALM, method.realm); config.setProperty(CONTEXT_CONFIG_AUTH_HTTP_PORT, method.port); } @Override public void importData(Configuration config, AuthenticationMethod authMethod) throws ConfigurationException { if (!(authMethod instanceof HttpAuthenticationMethod)) { throw new UnsupportedAuthenticationMethodException( "HTTP based authentication type only supports: " + HttpAuthenticationMethod.class.getName()); } HttpAuthenticationMethod method = (HttpAuthenticationMethod) authMethod; method.hostname = config.getString(CONTEXT_CONFIG_AUTH_HTTP_HOSTNAME); method.realm = config.getString(CONTEXT_CONFIG_AUTH_HTTP_REALM); method.port = config.getInt(CONTEXT_CONFIG_AUTH_HTTP_PORT); } }
zapbot/zaproxy
src/org/zaproxy/zap/authentication/HttpAuthenticationMethodType.java
Java
apache-2.0
14,019
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.route53recoveryreadiness.model.transform; import java.util.Map; import java.util.List; import javax.annotation.Generated; import com.amazonaws.SdkClientException; import com.amazonaws.services.route53recoveryreadiness.model.*; import com.amazonaws.protocol.*; import com.amazonaws.annotation.SdkInternalApi; /** * RecoveryGroupOutputMarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") @SdkInternalApi public class RecoveryGroupOutputMarshaller { private static final MarshallingInfo<List> CELLS_BINDING = MarshallingInfo.builder(MarshallingType.LIST).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("cells").build(); private static final MarshallingInfo<String> RECOVERYGROUPARN_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("recoveryGroupArn").build(); private static final MarshallingInfo<String> RECOVERYGROUPNAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING) .marshallLocation(MarshallLocation.PAYLOAD).marshallLocationName("recoveryGroupName").build(); private static final MarshallingInfo<Map> TAGS_BINDING = MarshallingInfo.builder(MarshallingType.MAP).marshallLocation(MarshallLocation.PAYLOAD) .marshallLocationName("tags").build(); private static final RecoveryGroupOutputMarshaller instance = new RecoveryGroupOutputMarshaller(); public static RecoveryGroupOutputMarshaller getInstance() { return instance; } /** * Marshall the given parameter object. */ public void marshall(RecoveryGroupOutput recoveryGroupOutput, ProtocolMarshaller protocolMarshaller) { if (recoveryGroupOutput == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(recoveryGroupOutput.getCells(), CELLS_BINDING); protocolMarshaller.marshall(recoveryGroupOutput.getRecoveryGroupArn(), RECOVERYGROUPARN_BINDING); protocolMarshaller.marshall(recoveryGroupOutput.getRecoveryGroupName(), RECOVERYGROUPNAME_BINDING); protocolMarshaller.marshall(recoveryGroupOutput.getTags(), TAGS_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } } }
aws/aws-sdk-java
aws-java-sdk-route53recoveryreadiness/src/main/java/com/amazonaws/services/route53recoveryreadiness/model/transform/RecoveryGroupOutputMarshaller.java
Java
apache-2.0
3,021
package net.jgp.labs.spark.l250_map.l000; import java.io.Serializable; import org.apache.spark.api.java.function.ForeachFunction; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; public class ForEachBookApp implements Serializable { private static final long serialVersionUID = -4250231621481140775L; private final class BookPrinter implements ForeachFunction<Row> { private static final long serialVersionUID = -3680381094052442862L; @Override public void call(Row r) throws Exception { System.out.println(r.getString(2) + " can be bought at " + r.getString( 4)); } } public static void main(String[] args) { ForEachBookApp app = new ForEachBookApp(); app.start(); } private void start() { SparkSession spark = SparkSession.builder().appName("For Each Book").master( "local").getOrCreate(); String filename = "data/books.csv"; Dataset<Row> df = spark.read().format("csv").option("inferSchema", "true") .option("header", "true") .load(filename); df.show(); df.foreach(new BookPrinter()); } }
jgperrin/net.jgp.labs.spark
src/main/java/net/jgp/labs/spark/l250_map/l000/ForEachBookApp.java
Java
apache-2.0
1,162
package acceptance import ( "gopkg.in/yaml.v2" "log" "os" "github.com/onsi/gomega/gexec" "github.com/pivotal-cf/go-pivnet/v7" "github.com/pivotal-cf/go-pivnet/v7/logshim" "github.com/pivotal-cf/pivnet-resource/v3/gp" "github.com/robdimsdale/sanitizer" "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var ( inPath string checkPath string outPath string endpoint string refreshToken string productSlug string artifactName string artifactPath string artifactDigest string pivnetClient *gp.Client additionalSynchronizedBeforeSuite func(SuiteEnv) ) func TestAcceptance(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Acceptance Suite") } type SuiteEnv struct { InPath string CheckPath string OutPath string Endpoint string RefreshToken string ProductSlug string ArtifactName string ArtifactPath string ArtifactDigest string } var _ = SynchronizedBeforeSuite(func() []byte { suiteEnv := SuiteEnv{} var err error By("Getting product slug from environment variables") suiteEnv.ProductSlug = os.Getenv("PRODUCT_SLUG") Expect(suiteEnv.ProductSlug).NotTo(BeEmpty(), "$PRODUCT_SLUG must be provided") By("Getting artifact name from environment variables") suiteEnv.ArtifactName = os.Getenv("ARTIFACT_NAME") Expect(suiteEnv.ArtifactName).NotTo(BeEmpty(), "$ARTIFACT_NAME must be provided") By("Getting artifact path from environment variables") suiteEnv.ArtifactPath = os.Getenv("ARTIFACT_PATH") Expect(suiteEnv.ArtifactPath).NotTo(BeEmpty(), "$ARTIFACT_PATH must be provided") By("Getting artifact digest from environment variables") suiteEnv.ArtifactDigest = os.Getenv("ARTIFACT_DIGEST") Expect(suiteEnv.ArtifactDigest).NotTo(BeEmpty(), "$ARTIFACT_DIGEST must be provided") By("Getting endpoint from environment variables") suiteEnv.Endpoint = os.Getenv("PIVNET_ENDPOINT") Expect(suiteEnv.Endpoint).NotTo(BeEmpty(), "$PIVNET_ENDPOINT must be provided") By("Getting refresh token from environment variables") suiteEnv.RefreshToken = os.Getenv("PIVNET_RESOURCE_REFRESH_TOKEN") Expect(suiteEnv.RefreshToken).NotTo(BeEmpty(), "$PIVNET_RESOURCE_REFRESH_TOKEN must be provided") By("Compiling check binary") suiteEnv.CheckPath, err = gexec.Build("github.com/pivotal-cf/pivnet-resource/v3/cmd/check", "-race") Expect(err).NotTo(HaveOccurred()) By("Compiling out binary") suiteEnv.OutPath, err = gexec.Build("github.com/pivotal-cf/pivnet-resource/v3/cmd/out", "-race") Expect(err).NotTo(HaveOccurred()) By("Compiling in binary") suiteEnv.InPath, err = gexec.Build("github.com/pivotal-cf/pivnet-resource/v3/cmd/in") Expect(err).NotTo(HaveOccurred()) By("Sanitizing suite setup output") ls := getLogShim() By("Creating pivnet client for suite setup") pivnetClient = getClient(ls, suiteEnv.Endpoint, suiteEnv.RefreshToken) if additionalSynchronizedBeforeSuite != nil { additionalSynchronizedBeforeSuite(suiteEnv) } envBytes, err := yaml.Marshal(suiteEnv) Expect(err).ShouldNot(HaveOccurred()) return envBytes }, func(envBytes []byte) { suiteEnv := SuiteEnv{} err := yaml.Unmarshal(envBytes, &suiteEnv) Expect(err).ShouldNot(HaveOccurred()) inPath = suiteEnv.InPath checkPath = suiteEnv.CheckPath outPath = suiteEnv.OutPath endpoint = suiteEnv.Endpoint refreshToken = suiteEnv.RefreshToken productSlug = suiteEnv.ProductSlug artifactName = suiteEnv.ArtifactName artifactPath = suiteEnv.ArtifactPath artifactDigest = suiteEnv.ArtifactDigest By("Sanitizing acceptance test output") ls := getLogShim() By("Creating pivnet client (for out-of-band operations)") pivnetClient = getClient(ls, suiteEnv.Endpoint, suiteEnv.RefreshToken) }) func getLogShim() *logshim.LogShim { sanitized := map[string]string{ refreshToken: "***sanitized-refresh-token***", } sanitizedWriter := sanitizer.NewSanitizer(sanitized, GinkgoWriter) GinkgoWriter = sanitizedWriter testLogger := log.New(sanitizedWriter, "", log.LstdFlags) verbose := true return logshim.NewLogShim(testLogger, testLogger, verbose) } func getClient(ls *logshim.LogShim, endpoint string, refreshToken string) *gp.Client { clientConfig := pivnet.ClientConfig{ Host: endpoint, UserAgent: "pivnet-resource/integration-test", } return gp.NewClient(pivnet.NewAccessTokenOrLegacyToken(refreshToken, endpoint, false), clientConfig, ls) } var _ = SynchronizedAfterSuite(func() { }, func() { gexec.CleanupBuildArtifacts() })
pivotal-cf-experimental/pivnet-resource
acceptance/acceptance_suite_test.go
GO
apache-2.0
4,448
/** * Copyright © 2013 enioka. All rights reserved * Authors: Marc-Antoine GOUILLART (marc-antoine.gouillart@enioka.com) * Pierre COPPEE (pierre.coppee@enioka.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.enioka.jqm.model; import java.io.Serializable; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import com.enioka.jqm.jdbc.DatabaseException; import com.enioka.jqm.jdbc.DbConn; public class RPermission implements Serializable { private static final long serialVersionUID = 1234354709423603792L; private Integer id; private String name; private int role; public Integer getId() { return id; } void setId(Integer id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getRole() { return role; } public void setRole(int role) { this.role = role; } public static List<RPermission> select(DbConn cnx, String query_key, Object... args) { List<RPermission> res = new ArrayList<RPermission>(); ResultSet rs = null; try { rs = cnx.runSelect(query_key, args); while (rs.next()) { RPermission tmp = new RPermission(); tmp.id = rs.getInt(1); tmp.name = rs.getString(2); tmp.role = rs.getInt(3); res.add(tmp); } } catch (SQLException e) { throw new DatabaseException(e); } finally { cnx.closeQuietly(rs); } return res; } }
enioka/jqm
jqm-all/jqm-model/src/main/java/com/enioka/jqm/model/RPermission.java
Java
apache-2.0
2,300
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the EC2 Credentials service. This service allows the creation of access/secret credentials used for the ec2 interop layer of OpenStack. A user can create as many access/secret pairs, each of which is mapped to a specific project. This is required because OpenStack supports a user belonging to multiple projects, whereas the signatures created on ec2-style requests don't allow specification of which project the user wishes to act upon. To complete the cycle, we provide a method that OpenStack services can use to validate a signature and get a corresponding OpenStack token. This token allows method calls to other services within the context the access/secret was created. As an example, Nova requests Keystone to validate the signature of a request, receives a token, and then makes a request to Glance to list images needed to perform the requested task. """ import abc import sys import uuid from keystoneclient.contrib.ec2 import utils as ec2_utils from oslo_serialization import jsonutils import six from six.moves import http_client from keystone.common import controller from keystone.common import dependency from keystone.common import utils from keystone.common import wsgi from keystone import exception from keystone.i18n import _ CRED_TYPE_EC2 = 'ec2' @dependency.requires('assignment_api', 'catalog_api', 'credential_api', 'identity_api', 'resource_api', 'role_api', 'token_provider_api') @six.add_metaclass(abc.ABCMeta) class Ec2ControllerCommon(object): def check_signature(self, creds_ref, credentials): signer = ec2_utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) # NOTE(davechen): credentials.get('signature') is not guaranteed to # exist, we need check it explicitly. if credentials.get('signature'): if utils.auth_str_equal(credentials['signature'], signature): return True # NOTE(vish): Some client libraries don't use the port when signing # requests, so try again without port. elif ':' in credentials['host']: hostname, _port = credentials['host'].split(':') credentials['host'] = hostname # NOTE(davechen): we need reinitialize 'signer' to avoid # contaminated status of signature, this is similar with # other programming language libraries, JAVA for example. signer = ec2_utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) if utils.auth_str_equal(credentials['signature'], signature): return True raise exception.Unauthorized( message=_('Invalid EC2 signature.')) else: raise exception.Unauthorized( message=_('EC2 signature not supplied.')) # Raise the exception when credentials.get('signature') is None else: raise exception.Unauthorized( message=_('EC2 signature not supplied.')) @abc.abstractmethod def authenticate(self, context, credentials=None, ec2Credentials=None): """Validate a signed EC2 request and provide a token. Other services (such as Nova) use this **admin** call to determine if a request they signed received is from a valid user. If it is a valid signature, an OpenStack token that maps to the user/tenant is returned to the caller, along with all the other details returned from a normal token validation call. The returned token is useful for making calls to other OpenStack services within the context of the request. :param context: standard context :param credentials: dict of ec2 signature :param ec2Credentials: DEPRECATED dict of ec2 signature :returns: token: OpenStack token equivalent to access key along with the corresponding service catalog and roles """ raise exception.NotImplemented() def _authenticate(self, credentials=None, ec2credentials=None): """Common code shared between the V2 and V3 authenticate methods. :returns: user_ref, tenant_ref, roles_ref, catalog_ref """ # FIXME(ja): validate that a service token was used! # NOTE(termie): backwards compat hack if not credentials and ec2credentials: credentials = ec2credentials if 'access' not in credentials: raise exception.Unauthorized( message=_('EC2 signature not supplied.')) creds_ref = self._get_credentials(credentials['access']) self.check_signature(creds_ref, credentials) # TODO(termie): don't create new tokens every time # TODO(termie): this is copied from TokenController.authenticate tenant_ref = self.resource_api.get_project(creds_ref['tenant_id']) user_ref = self.identity_api.get_user(creds_ref['user_id']) # Validate that the auth info is valid and nothing is disabled try: self.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref) self.resource_api.assert_domain_enabled( domain_id=user_ref['domain_id']) self.resource_api.assert_project_enabled( project_id=tenant_ref['id'], project=tenant_ref) except AssertionError as e: six.reraise(exception.Unauthorized, exception.Unauthorized(e), sys.exc_info()[2]) roles = self.assignment_api.get_roles_for_user_and_project( user_ref['id'], tenant_ref['id'] ) if not roles: raise exception.Unauthorized( message=_('User not valid for tenant.')) roles_ref = [self.role_api.get_role(role_id) for role_id in roles] catalog_ref = self.catalog_api.get_catalog( user_ref['id'], tenant_ref['id']) return user_ref, tenant_ref, roles_ref, catalog_ref def create_credential(self, request, user_id, tenant_id): """Create a secret/access pair for use with ec2 style auth. Generates a new set of credentials that map the user/tenant pair. :param request: current request :param user_id: id of user :param tenant_id: id of tenant :returns: credential: dict of ec2 credential """ self.identity_api.get_user(user_id) self.resource_api.get_project(tenant_id) blob = {'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex, 'trust_id': request.context.trust_id} credential_id = utils.hash_access_key(blob['access']) cred_ref = {'user_id': user_id, 'project_id': tenant_id, 'blob': jsonutils.dumps(blob), 'id': credential_id, 'type': CRED_TYPE_EC2} self.credential_api.create_credential(credential_id, cred_ref) return {'credential': self._convert_v3_to_ec2_credential(cred_ref)} def get_credentials(self, user_id): """List all credentials for a user. :param user_id: id of user :returns: credentials: list of ec2 credential dicts """ self.identity_api.get_user(user_id) credential_refs = self.credential_api.list_credentials_for_user( user_id, type=CRED_TYPE_EC2) return {'credentials': [self._convert_v3_to_ec2_credential(credential) for credential in credential_refs]} def get_credential(self, user_id, credential_id): """Retrieve a user's access/secret pair by the access key. Grab the full access/secret pair for a given access key. :param user_id: id of user :param credential_id: access key for credentials :returns: credential: dict of ec2 credential """ self.identity_api.get_user(user_id) return {'credential': self._get_credentials(credential_id)} def delete_credential(self, user_id, credential_id): """Delete a user's access/secret pair. Used to revoke a user's access/secret pair :param user_id: id of user :param credential_id: access key for credentials :returns: bool: success """ self.identity_api.get_user(user_id) self._get_credentials(credential_id) ec2_credential_id = utils.hash_access_key(credential_id) return self.credential_api.delete_credential(ec2_credential_id) @staticmethod def _convert_v3_to_ec2_credential(credential): # Prior to bug #1259584 fix, blob was stored unserialized # but it should be stored as a json string for compatibility # with the v3 credentials API. Fall back to the old behavior # for backwards compatibility with existing DB contents try: blob = jsonutils.loads(credential['blob']) except TypeError: blob = credential['blob'] return {'user_id': credential.get('user_id'), 'tenant_id': credential.get('project_id'), 'access': blob.get('access'), 'secret': blob.get('secret'), 'trust_id': blob.get('trust_id')} def _get_credentials(self, credential_id): """Return credentials from an ID. :param credential_id: id of credential :raises keystone.exception.Unauthorized: when credential id is invalid or when the credential type is not ec2 :returns: credential: dict of ec2 credential. """ ec2_credential_id = utils.hash_access_key(credential_id) cred = self.credential_api.get_credential(ec2_credential_id) if not cred or cred['type'] != CRED_TYPE_EC2: raise exception.Unauthorized( message=_('EC2 access key not found.')) return self._convert_v3_to_ec2_credential(cred) def render_token_data_response(self, token_id, token_data): """Render token data HTTP response. Stash token ID into the X-Subject-Token header. """ status = (http_client.OK, http_client.responses[http_client.OK]) headers = [('X-Subject-Token', token_id)] return wsgi.render_response(body=token_data, status=status, headers=headers) @dependency.requires('policy_api', 'token_provider_api') class Ec2Controller(Ec2ControllerCommon, controller.V2Controller): @controller.v2_ec2_deprecated def authenticate(self, request, credentials=None, ec2Credentials=None): (user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref) = self._authenticate(credentials=credentials, ec2credentials=ec2Credentials) # NOTE(morganfainberg): Make sure the data is in correct form since it # might be consumed external to Keystone and this is a v2.0 controller. # The token provider does not explicitly care about user_ref version # in this case, but the data is stored in the token itself and should # match the version user_ref = self.v3_to_v2_user(user_ref) auth_token_data = dict(user=user_ref, tenant=tenant_ref, metadata=metadata_ref, id='placeholder') (token_id, token_data) = self.token_provider_api.issue_v2_token( auth_token_data, roles_ref, catalog_ref) return token_data @controller.v2_ec2_deprecated def get_credential(self, request, user_id, credential_id): if not self._is_admin(request): self._assert_identity(request.context_dict, user_id) return super(Ec2Controller, self).get_credential(user_id, credential_id) @controller.v2_ec2_deprecated def get_credentials(self, request, user_id): if not self._is_admin(request): self._assert_identity(request.context_dict, user_id) return super(Ec2Controller, self).get_credentials(user_id) @controller.v2_ec2_deprecated def create_credential(self, request, user_id, tenant_id): if not self._is_admin(request): self._assert_identity(request.context_dict, user_id) return super(Ec2Controller, self).create_credential( request, user_id, tenant_id) @controller.v2_ec2_deprecated def delete_credential(self, request, user_id, credential_id): if not self._is_admin(request): self._assert_identity(request.context_dict, user_id) self._assert_owner(user_id, credential_id) return super(Ec2Controller, self).delete_credential(user_id, credential_id) def _assert_identity(self, context, user_id): """Check that the provided token belongs to the user. :param context: standard context :param user_id: id of user :raises keystone.exception.Forbidden: when token is invalid """ token_ref = utils.get_token_ref(context) if token_ref.user_id != user_id: raise exception.Forbidden(_('Token belongs to another user')) def _is_admin(self, request): """Wrap admin assertion error return statement. :param context: standard context :returns: bool: success """ try: # NOTE(morganfainberg): policy_api is required for assert_admin # to properly perform policy enforcement. self.assert_admin(request) return True except (exception.Forbidden, exception.Unauthorized): return False def _assert_owner(self, user_id, credential_id): """Ensure the provided user owns the credential. :param user_id: expected credential owner :param credential_id: id of credential object :raises keystone.exception.Forbidden: on failure """ ec2_credential_id = utils.hash_access_key(credential_id) cred_ref = self.credential_api.get_credential(ec2_credential_id) if user_id != cred_ref['user_id']: raise exception.Forbidden(_('Credential belongs to another user')) @dependency.requires('policy_api', 'token_provider_api') class Ec2ControllerV3(Ec2ControllerCommon, controller.V3Controller): collection_name = 'credentials' member_name = 'credential' def _check_credential_owner_and_user_id_match(self, request, prep_info, user_id, credential_id): # NOTE(morganfainberg): this method needs to capture the arguments of # the method that is decorated with @controller.protected() (with # exception of the first argument ('context') since the protected # method passes in *args, **kwargs. In this case, it is easier to see # the expected input if the argspec is `user_id` and `credential_id` # explicitly (matching the :class:`.ec2_delete_credential()` method # below). ref = {} credential_id = utils.hash_access_key(credential_id) ref['credential'] = self.credential_api.get_credential(credential_id) # NOTE(morganfainberg): policy_api is required for this # check_protection to properly be able to perform policy enforcement. self.check_protection(request, prep_info, ref) def authenticate(self, context, credentials=None, ec2Credentials=None): (user_ref, project_ref, roles_ref, catalog_ref) = self._authenticate( credentials=credentials, ec2credentials=ec2Credentials ) method_names = ['ec2credential'] token_id, token_data = self.token_provider_api.issue_token( user_ref['id'], method_names, project_id=project_ref['id']) return self.render_token_data_response(token_id, token_data) @controller.protected(callback=_check_credential_owner_and_user_id_match) def ec2_get_credential(self, request, user_id, credential_id): ref = super(Ec2ControllerV3, self).get_credential(user_id, credential_id) return Ec2ControllerV3.wrap_member(request.context_dict, ref['credential']) @controller.protected() def ec2_list_credentials(self, request, user_id): refs = super(Ec2ControllerV3, self).get_credentials(user_id) return Ec2ControllerV3.wrap_collection(request.context_dict, refs['credentials']) @controller.protected() def ec2_create_credential(self, request, user_id, tenant_id): ref = super(Ec2ControllerV3, self).create_credential( request, user_id, tenant_id) return Ec2ControllerV3.wrap_member(request.context_dict, ref['credential']) @controller.protected(callback=_check_credential_owner_and_user_id_match) def ec2_delete_credential(self, request, user_id, credential_id): return super(Ec2ControllerV3, self).delete_credential(user_id, credential_id) @classmethod def _add_self_referential_link(cls, context, ref): path = '/users/%(user_id)s/credentials/OS-EC2/%(credential_id)s' url = cls.base_url(context, path) % { 'user_id': ref['user_id'], 'credential_id': ref['access']} ref.setdefault('links', {}) ref['links']['self'] = url
ilay09/keystone
keystone/contrib/ec2/controllers.py
Python
apache-2.0
18,455
<?php /** * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ namespace Amazon\Pay\Helper; use Magento\Framework\App\Helper\AbstractHelper; class Email extends AbstractHelper { /** * @var \Magento\Store\Model\StoreManagerInterface */ private $storeManager; /** * @var \Magento\Framework\Mail\Template\TransportBuilder */ private $transportBuilder; /** * @var \Amazon\Pay\Logger\AsyncIpnLogger */ private $asyncLogger; /** * @param Context $context * @param TransportBuilder $transportBuilder * @param StoreManagerInterface $storeManager * @param \Amazon\Pay\Logger\AsyncIpnLogger $asyncLogger */ public function __construct( \Magento\Framework\App\Helper\Context $context, \Magento\Framework\Mail\Template\TransportBuilder $transportBuilder, \Magento\Store\Model\StoreManagerInterface $storeManager, \Amazon\Pay\Logger\AsyncIpnLogger $asyncLogger ) { parent::__construct($context); $this->transportBuilder = $transportBuilder; $this->storeManager = $storeManager; $this->asyncLogger = $asyncLogger; } /** * @param Order $order * * @return void */ public function sendPaymentDeclinedEmail(\Magento\Sales\Model\Order $order) { try { $storeName = $this->getStoreName($order->getStoreId()); $transport = $this->transportBuilder ->setTemplateIdentifier('amazon_pay_payment_declined') ->setTemplateOptions( [ 'area' => \Magento\Framework\App\Area::AREA_FRONTEND, 'store' => $order->getStoreId() ] ) ->setFrom('general') ->setTemplateVars(['storeName' => $storeName, 'incrementId' => $order->getIncrementId()]) ->addTo($order->getCustomerEmail(), $order->getCustomerName()) ->getTransport(); $transport->sendMessage(); $this->asyncLogger->info('Payment declined email sent for Order #' . $order->getIncrementId()); } catch (\Exception $e) { $error = $order->getIncrementId() . '-' . $e->getMessage(); $this->asyncLogger->info('Cannot send payment declined email for Order #' . $error); } } protected function getStoreName($storeId) { $store = $this->storeManager->getStore($storeId); return $store->getFrontendName(); } }
amzn/amazon-payments-magento-2-plugin
Helper/Email.php
PHP
apache-2.0
3,079
import React, { useEffect, useState, forwardRef, useImperativeHandle, ForwardRefRenderFunction, useCallback, } from 'react'; import * as WorkflowJobNodes from './JobNodes'; import { ChartNodeType, JobNode, ChartNodeStatus, ChartNodes, ChartElements, JobNodeRawData, } from './types'; import { convertExecutionStateToStatus, convertToChartElements, RawDataCol } from './helpers'; import ReactFlow, { Background, BackgroundVariant, isNode, OnLoadParams, FlowElement, useStoreActions, useStoreState, Controls, ReactFlowState, } from 'react-flow-renderer'; import { Container } from './elements'; import { ChartWorkflowConfig } from 'typings/workflow'; import { cloneDeep } from 'lodash'; import { useResizeObserver } from 'hooks'; import { Side } from 'typings/app'; import { WORKFLOW_JOB_NODE_CHANNELS } from './JobNodes/shared'; import PubSub from 'pubsub-js'; type Props = { workflowConfig: ChartWorkflowConfig; nodeType: ChartNodeType; nodeInitialStatus?: ChartNodeStatus; side?: Side; // NOTE: When the nodeType is 'fork', side is required selectable?: boolean; onJobClick?: (node: JobNode) => void; onCanvasClick?: () => void; }; type UpdateInheritanceParams = { id: string; whetherInherit: boolean; }; type UpdateStatusParams = { id: string; status: ChartNodeStatus; }; type UpdateDisabledParams = { id: string; disabled: boolean; }; export type ChartExposedRef = { nodes: ChartNodes; updateNodeStatusById: (params: UpdateStatusParams) => void; updateNodeDisabledById: (params: UpdateDisabledParams) => void; updateNodeInheritanceById: (params: UpdateInheritanceParams) => void; setSelectedNodes: (nodes: ChartNodes) => void; }; const WorkflowJobsCanvas: ForwardRefRenderFunction<ChartExposedRef | undefined, Props> = ( { workflowConfig, nodeType, side, selectable = true, nodeInitialStatus = ChartNodeStatus.Pending, onJobClick, onCanvasClick, }, parentRef, ) => { const isForkMode = nodeType === 'fork'; if (isForkMode && !side) { console.error( "[WorkflowJobsCanvas]: Detect that current type is FORK but the `side` prop has't been assigned", ); } const [chartInstance, setChartInstance] = useState<OnLoadParams>(); const [elements, setElements] = useState<ChartElements>([]); // ☢️ WARNING: since we using react-flow hooks here, // an ReactFlowProvider is REQUIRED to wrap this component inside const jobNodes = (useStoreState((store: ReactFlowState) => store.nodes) as unknown) as ChartNodes; const setSelectedElements = useStoreActions((actions) => actions.setSelectedElements); // To decide if need to re-generate jobElements, look out that re-gen elements // will lead all nodes loose it's status!!💣 // // Q: why not put workflowConfig directly as the dependent? // A: At workflowConfig's inner may contains variables' value // and will change during user configuring, but we do not want that lead // re-generate chart elements const workflowIdentifyString = workflowConfig.job_definitions .map((item) => item.name + item.mark || '') .concat(workflowConfig.variables?.map((item) => item.name) || []) .join('|'); useEffect(() => { const jobElements = convertToChartElements( { /** * In workflow detail page workflowConfig.job_definitions are not only job_definitions * they will contain execution details as well */ jobs: workflowConfig.job_definitions, variables: workflowConfig.variables || [], data: { side, status({ raw }: RawDataCol) { if (nodeType === 'execution') { /** If it's execution detail page, show job state always */ return convertExecutionStateToStatus((raw as JobNodeRawData).state); } return nodeInitialStatus; }, }, }, { type: nodeType, selectable }, ); setElements(jobElements); // eslint-disable-next-line }, [nodeType, selectable, workflowIdentifyString]); const resizeHandler = useCallback(() => { chartInstance?.fitView(); }, [chartInstance]); const resizeTargetRef = useResizeObserver(resizeHandler); useImperativeHandle(parentRef, () => { return { nodes: jobNodes, updateNodeStatusById: updateNodeStatus, updateNodeDisabledById: updateNodeDisabled, updateNodeInheritanceById: updateNodeInheritance, setSelectedNodes: setSelectedElements, }; }); return ( <Container ref={resizeTargetRef as any}> <ReactFlow elements={elements} onLoad={onLoad} onElementClick={(_, element: FlowElement) => onElementsClick(element)} onPaneClick={onCanvasClick} nodesDraggable={false} zoomOnScroll={false} zoomOnDoubleClick={false} minZoom={1} maxZoom={1} nodeTypes={WorkflowJobNodes} > <Background variant={BackgroundVariant.Dots} gap={12} size={1} color="#E1E6ED" /> <Controls showZoom={false} showInteractive={false} /> </ReactFlow> </Container> ); function onElementsClick(element: FlowElement) { if (isNode(element)) { onJobClick && onJobClick(element as JobNode); } } function onLoad(_reactFlowInstance: OnLoadParams) { setChartInstance(_reactFlowInstance!); // Fit view at next tick // TODO: implement nextTick setImmediate(() => { _reactFlowInstance!.fitView(); }); } function updateNodeStatus(params: UpdateStatusParams) { if (!params.id) return; setElements((els) => { return (els as ChartElements).map((el) => { if (el.id === params.id) { el.data = { ...el.data, status: params.status, }; } return el; }); }); } function updateNodeDisabled(params: UpdateDisabledParams) { if (!params.id) return; setElements((els) => { return (els as ChartElements).map((el) => { if (el.id === params.id) { el.data = { ...el.data, disabled: params.disabled, }; } return el; }); }); } function updateNodeInheritance({ id, whetherInherit }: UpdateInheritanceParams) { if (nodeType !== 'fork' || !id) { return; } const nextElements = cloneDeep(elements as JobNode[]); const target = nextElements.find((item) => item.id === id); if (!target) return; /** * If a node choose reuse flag, * all nodes it DEPEND on should be flagged as reused as well */ if (whetherInherit === true) { target.data.inherited = true; const itDependsOn = target?.data.raw.dependencies.map((item) => item.source); for (let i = nextElements.length - 1; i--; i >= 0) { const item = nextElements[i]; if (!isNode(item) || item.data.isGlobal) continue; if (itDependsOn.includes(item.id)) { if (item.data.raw.is_federated) { PubSub.publish(WORKFLOW_JOB_NODE_CHANNELS.change_inheritance, { id: item.id, data: item.data, whetherInherit, }); } item.data.inherited = true; itDependsOn.push(...item.data.raw.dependencies.map((item) => item.source)); } } } /** * If a node choose non-reuse flag, * all nodes DEPEND on it should be flagged as non-reuse too */ if (whetherInherit === false) { target.data.inherited = false; // Collect dependent chain const depsChainCollected: string[] = []; depsChainCollected.push(target?.id); nextElements.forEach((item) => { if (!isNode(item) || item.data.isGlobal) return; const hasAnyDependentOnPrevs = item.data.raw.dependencies.find((dep) => { return depsChainCollected.includes(dep.source); }); if (hasAnyDependentOnPrevs) { if (item.data.raw.is_federated) { PubSub.publish(WORKFLOW_JOB_NODE_CHANNELS.change_inheritance, { id: item.id, data: item.data, whetherInherit, }); } item.data.inherited = false; depsChainCollected.push(item.id); } }); } setElements(nextElements); } }; export default forwardRef(WorkflowJobsCanvas);
bytedance/fedlearner
web_console_v2/client/src/components/WorkflowJobsCanvas/index.tsx
TypeScript
apache-2.0
8,401
import {downgradeInjectable} from '@angular/upgrade/static'; import {Http} from '@angular/http'; import 'rxjs/add/operator/toPromise'; import {API} from '../../../constants'; import {Injectable} from "@angular/core"; declare var angular : any; @Injectable() class CommandService { constructor(private http : Http) { } command(params : {db, language? : any, query : any, limit?}) { let startTime = new Date().getTime(); params.limit = params.limit || 20; params.language = params.language || 'sql'; let url = API + 'command/' + params.db + "/" + params.language + "/-/" + params.limit + '?format=rid,type,version,class,graph'; params.query = params.query.trim(); return this.http.post(url, params.query).toPromise(); } } angular.module('command.services', []).factory( `CommandService`, downgradeInjectable(CommandService)); export {CommandService};
orientechnologies/orientdb-studio
src/app/core/services/command.service.ts
TypeScript
apache-2.0
893
/* * Copyright (c) 2016 Mastermay * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.lostg.spectre.annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * 修饰方法中的参数,用于接收路径中通配符的参数。 * * @author Mastermay * @see RequestMapping */ @Target({ ElementType.PARAMETER }) @Retention(RetentionPolicy.RUNTIME) public @interface Param { int value() default 0; }
mastermay/Spectre
src/main/java/com/lostg/spectre/annotation/Param.java
Java
apache-2.0
1,052
/* * Copyright (C) 2017-2019 Dremio Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dremio.common; import java.util.Arrays; /** * Convenient way of obtaining and manipulating stack traces for debugging. */ public class StackTrace { private final StackTraceElement[] stackTraceElements; /** * Constructor. Captures the current stack trace. */ public StackTrace() { // skip over the first element so that we don't include this constructor call final StackTraceElement[] stack = Thread.currentThread().getStackTrace(); stackTraceElements = Arrays.copyOfRange(stack, 1, stack.length - 1); } /** * Write the stack trace to a StringBuilder. * @param sb * where to write it * @param indent * how many double spaces to indent each line */ public void writeToBuilder(final StringBuilder sb, final int indent) { // create the indentation string final char[] indentation = new char[indent * 2]; Arrays.fill(indentation, ' '); // write the stack trace in standard Java format for(StackTraceElement ste : stackTraceElements) { sb.append(indentation) .append("at ") .append(ste.getClassName()) .append('.') .append(ste.getMethodName()) .append('(') .append(ste.getFileName()) .append(':') .append(Integer.toString(ste.getLineNumber())) .append(")\n"); } } @Override public String toString() { final StringBuilder sb = new StringBuilder(); writeToBuilder(sb, 0); return sb.toString(); } }
dremio/dremio-oss
common/src/main/java/com/dremio/common/StackTrace.java
Java
apache-2.0
2,127
/** * Copyright 2014 Tomas Rodriguez * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.talberto.easybeans.gen; import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertThat; import java.util.List; import java.util.Set; import org.junit.Test; import com.github.talberto.easybeans.gen.MetaImport; import com.github.talberto.easybeans.gen.MetaType; import com.google.common.collect.Sets; import com.google.common.reflect.TypeToken; /** * * @author Tomas Rodriguez (rodriguez@progiweb.com) * */ public class MetaTypeTest { static final MetaImport sListImport = new MetaImport("java.util.List"); static final MetaImport sStringImport = new MetaImport("java.lang.String"); @Test public void listOfString() { @SuppressWarnings("serial") MetaType type = MetaType.of(new TypeToken<List<String>>() {}); Set<MetaImport> expectedImports = Sets.newHashSet(sListImport, sStringImport); assertThat("The MetaType.name is wrong", type.getName(), equalTo("List<String>")); assertThat("The MetaType.getImports are wrong", type.getImports(), equalTo(expectedImports)); } @Test public void string() { @SuppressWarnings("serial") MetaType type = MetaType.of(new TypeToken<String>() {}); Set<MetaImport> expectedImports = Sets.newHashSet(sStringImport); assertThat("The MetaType.name is wrong", type.getName(), equalTo("String")); assertThat("The MetaType.getImports are wrong", type.getImports(), equalTo(expectedImports)); } }
talberto/easybeans
gen/src/test/java/com/github/talberto/easybeans/gen/MetaTypeTest.java
Java
apache-2.0
2,073
/*global QUnit */ sap.ui.define([ "sap/m/Button", "sap/m/CheckBox", "sap/m/OverflowToolbar", "sap/m/OverflowToolbarButton", "sap/m/Panel", "sap/ui/dt/DesignTime", "sap/ui/dt/OverlayRegistry", "sap/ui/fl/write/api/ChangesWriteAPI", "sap/ui/rta/command/CommandFactory", "sap/ui/rta/plugin/Combine", "sap/ui/rta/Utils", "sap/ui/thirdparty/sinon-4", "test-resources/sap/ui/rta/qunit/RtaQunitUtils", "sap/ui/core/mvc/View", "sap/ui/core/Core" ], function( Button, CheckBox, OverflowToolbar, OverflowToolbarButton, Panel, DesignTime, OverlayRegistry, ChangesWriteAPI, CommandFactory, CombinePlugin, Utils, sinon, RtaQunitUtils, View, oCore ) { "use strict"; var DEFAULT_DTM = "default"; var oMockedAppComponent = RtaQunitUtils.createAndStubAppComponent(sinon, "Dummy"); var sandbox = sinon.createSandbox(); var fnSetOverlayDesigntimeMetadata = function (oOverlay, oDesignTimeMetadata, bEnabled) { bEnabled = bEnabled === undefined || bEnabled === null ? true : bEnabled; if (oDesignTimeMetadata === DEFAULT_DTM) { oDesignTimeMetadata = { actions: { combine: { changeType: "combineStuff", changeOnRelevantContainer: true, isEnabled: bEnabled } } }; } oOverlay.setDesignTimeMetadata(oDesignTimeMetadata); }; //Designtime Metadata with fake isEnabled function (returns false) var oDesignTimeMetadata1 = { actions: { combine: { changeType: "combineStuff", changeOnRelevantContainer: true, isEnabled: function() { return false; } } } }; //Designtime Metadata with fake isEnabled function (returns true) var oDesignTimeMetadata2 = { actions: { combine: { changeType: "combineStuff", changeOnRelevantContainer: true, isEnabled: function() { return true; } } } }; // DesignTime Metadata without changeType var oDesignTimeMetadata3 = { actions: { combine: { changeOnRelevantContainer: true, isEnabled: true } } }; // DesignTime Metadata without changeOnRelevantContainer var oDesigntimeMetadata4 = { actions: { combine: { changeType: "combineStuff", isEnabled: function() { return true; } } } }; //DesignTime Metadata with different changeType var oDesignTimeMetadata5 = { actions: { combine: { changeType: "combineOtherStuff", changeOnRelevantContainer: true, isEnabled: true } } }; QUnit.module("Given a designTime and combine plugin are instantiated", { beforeEach: function(assert) { var done = assert.async(); sandbox.stub(ChangesWriteAPI, "getChangeHandler").resolves(); this.oCommandFactory = new CommandFactory(); this.oCombinePlugin = new CombinePlugin({ commandFactory: this.oCommandFactory }); this.oButton1 = new Button("button1"); this.oButton2 = new Button("button2"); this.oButton3 = new Button("button3"); this.oButton4 = new Button("button4"); this.oButton5 = new Button("button5"); this.oPanel = new Panel("panel", { content: [ this.oButton1, this.oButton2, this.oButton3, this.oButton4 ] }); this.oPanel2 = new Panel("panel2", { content: [ this.oButton5 ] }); this.oOverflowToolbarButton1 = new OverflowToolbarButton("owerflowbutton1"); this.oButton6 = new Button("button6"); this.oCheckBox1 = new CheckBox("checkbox1"); this.OverflowToolbar = new OverflowToolbar("OWFlToolbar", { content: [ this.oOverflowToolbarButton1, this.oButton6, this.oCheckBox1 ] }); this.oView = new View({ content: [ this.oPanel, this.oPanel2, this.OverflowToolbar ] }).placeAt("qunit-fixture"); oCore.applyChanges(); this.oDesignTime = new DesignTime({ rootElements: [this.oPanel, this.oPanel2, this.OverflowToolbar], plugins: [this.oCombinePlugin], designTimeMetadata: { "sap.m.Button": { actions: { combine: { changeType: "combineStuff", changeOnRelevantContainer: true, isEnabled: true } } }, "sap.m.OverflowToolbarButton": { actions: { combine: { changeType: "combineStuff", changeOnRelevantContainer: true, isEnabled: true } } }, "sap.m.CheckBox": { actions: { combine: { changeType: "combineOtherStuff", changeOnRelevantContainer: true, isEnabled: true } } } } }); this.oDesignTime.attachEventOnce("synced", function() { this.oButton1Overlay = OverlayRegistry.getOverlay(this.oButton1); this.oButton2Overlay = OverlayRegistry.getOverlay(this.oButton2); this.oButton3Overlay = OverlayRegistry.getOverlay(this.oButton3); this.oButton4Overlay = OverlayRegistry.getOverlay(this.oButton4); this.oButton5Overlay = OverlayRegistry.getOverlay(this.oButton5); this.oButton6Overlay = OverlayRegistry.getOverlay(this.oButton6); this.oPanelOverlay = OverlayRegistry.getOverlay(this.oPanel); this.oPanel2Overlay = OverlayRegistry.getOverlay(this.oPanel2); this.oOverflowToolbarButton1Overlay = OverlayRegistry.getOverlay(this.oOverflowToolbarButton1); this.oCheckBox1Overlay = OverlayRegistry.getOverlay(this.oCheckBox1); this.OverflowToolbarOverlay = OverlayRegistry.getOverlay(this.OverflowToolbar); done(); }.bind(this)); }, afterEach: function() { sandbox.restore(); this.oDesignTime.destroy(); this.oPanel.destroy(); this.oPanel2.destroy(); this.OverflowToolbar.destroy(); } }, function() { QUnit.test("when an overlay has no combine action in designTime metadata", function(assert) { fnSetOverlayDesigntimeMetadata(this.oButton1Overlay, {}); fnSetOverlayDesigntimeMetadata(this.oButton2Overlay, {}); assert.strictEqual( this.oCombinePlugin.isAvailable([this.oButton1Overlay]), false, "isAvailable is called and returns false" ); assert.strictEqual( this.oCombinePlugin.isEnabled([this.oButton1Overlay]), false, "isEnabled is called and returns false" ); return Promise.resolve() .then(this.oCombinePlugin._isEditable.bind(this.oCombinePlugin, this.oButton1Overlay)) .then(function(bEditable) { assert.strictEqual( bEditable, false, "then the overlay is not editable" ); }); }); QUnit.test("when an overlay has a combine action in designTime metadata", function(assert) { fnSetOverlayDesigntimeMetadata(this.oButton1Overlay, DEFAULT_DTM); fnSetOverlayDesigntimeMetadata(this.oButton2Overlay, oDesignTimeMetadata2); sandbox.stub(Utils, "checkSourceTargetBindingCompatibility").returns(true); sandbox.stub(this.oCombinePlugin, "hasChangeHandler").resolves(true); assert.strictEqual( this.oCombinePlugin.isAvailable([this.oButton1Overlay, this.oButton2Overlay]), true, "isAvailable is called and returns true" ); assert.strictEqual( this.oCombinePlugin.isEnabled([this.oButton1Overlay, this.oButton2Overlay]), true, "isEnabled is called and returns true" ); return this.oCombinePlugin._isEditable(this.oButton1Overlay) .then(function(bEditable) { assert.strictEqual( bEditable, true, "then the overlay is editable" ); }); }); QUnit.test("when two elements have different binding context", function(assert) { sandbox.stub(Utils, "checkSourceTargetBindingCompatibility").returns(false); assert.strictEqual( this.oCombinePlugin.isAvailable([this.oButton1Overlay, this.oButton2Overlay]), true, "isAvailable is called and returns true" ); assert.strictEqual( this.oCombinePlugin.isEnabled([this.oButton1Overlay, this.oButton2Overlay]), false, "isEnabled is called and returns false" ); }); QUnit.test("when only one control is specified", function(assert) { fnSetOverlayDesigntimeMetadata(this.oButton1Overlay, DEFAULT_DTM); assert.strictEqual( this.oCombinePlugin.isAvailable([this.oButton1Overlay]), false, "isAvailable is called and returns false" ); assert.strictEqual( this.oCombinePlugin.isEnabled([this.oButton1Overlay]), false, "isEnabled is called and returns false" ); }); QUnit.test("when controls which enabled function delivers false are specified", function(assert) { fnSetOverlayDesigntimeMetadata(this.oButton1Overlay, oDesignTimeMetadata1); fnSetOverlayDesigntimeMetadata(this.oButton2Overlay, oDesignTimeMetadata1); assert.strictEqual( this.oCombinePlugin.isAvailable([this.oButton1Overlay, this.oButton2Overlay]), true, "isAvailable is called and returns true" ); assert.strictEqual( this.oCombinePlugin.isEnabled([this.oButton1Overlay, this.oButton2Overlay]), false, "isEnabled is called and returns false" ); }); QUnit.test("when a control without change type is specified", function(assert) { fnSetOverlayDesigntimeMetadata(this.oButton1Overlay, DEFAULT_DTM); fnSetOverlayDesigntimeMetadata(this.oButton4Overlay, oDesignTimeMetadata3); assert.strictEqual( this.oCombinePlugin.isAvailable([this.oButton1Overlay]), false, "isAvailable is called and returns false" ); assert.strictEqual( this.oCombinePlugin.isEnabled([this.oButton1Overlay]), false, "isEnabled is called and returns false" ); }); QUnit.test("when controls from different relevant containers are specified", function(assert) { fnSetOverlayDesigntimeMetadata(this.oButton1Overlay, DEFAULT_DTM); fnSetOverlayDesigntimeMetadata(this.oButton5Overlay, DEFAULT_DTM); assert.strictEqual( this.oCombinePlugin.isAvailable([this.oButton1Overlay]), false, "isAvailable is called and returns false" ); assert.strictEqual( this.oCombinePlugin.isEnabled([this.oButton1Overlay]), false, "isEnabled is called and returns false" ); }); QUnit.test("when handleCombine is called with two elements, being triggered on the second element", function(assert) { var oFireElementModifiedSpy = sandbox.spy(this.oCombinePlugin, "fireElementModified"); var oGetCommandForSpy = sandbox.spy(this.oCommandFactory, "getCommandFor"); fnSetOverlayDesigntimeMetadata(this.oButton1Overlay, DEFAULT_DTM); fnSetOverlayDesigntimeMetadata(this.oButton2Overlay, DEFAULT_DTM); return this.oCombinePlugin.handleCombine([this.oButton1Overlay, this.oButton2Overlay], this.oButton2) .then(function() { assert.ok(oFireElementModifiedSpy.calledOnce, "fireElementModified is called once"); assert.ok(oGetCommandForSpy.calledWith(this.oButton2), "command creation is triggered with correct context element"); }.bind(this)) .catch(function (oError) { assert.ok(false, "catch must never be called - Error: " + oError); }); }); QUnit.test("when an overlay has a combine action designTime metadata which has no changeOnRelevantContainer", function(assert) { fnSetOverlayDesigntimeMetadata(this.oButton1Overlay, oDesigntimeMetadata4); return Promise.resolve() .then(this.oCombinePlugin._isEditable.bind(this.oCombinePlugin, this.oButton1Overlay)) .then(function(bEditable) { assert.strictEqual(bEditable, false, "then the overlay is not editable"); }); }); QUnit.test("when Controls of different type with same change type are specified", function (assert) { assert.expect(9); fnSetOverlayDesigntimeMetadata(this.oOverflowToolbarButton1Overlay, DEFAULT_DTM); fnSetOverlayDesigntimeMetadata(this.oButton6Overlay, DEFAULT_DTM); sandbox.stub(Utils, "checkSourceTargetBindingCompatibility").returns(true); assert.strictEqual( this.oCombinePlugin.isAvailable([this.oOverflowToolbarButton1Overlay, this.oButton6Overlay]), true, "isAvailable is called and returns true" ); assert.strictEqual( this.oCombinePlugin.isEnabled([this.oOverflowToolbarButton1Overlay, this.oButton6Overlay]), true, "isEnabled is called and returns true" ); var bIsAvailable = true; sinon.stub(this.oCombinePlugin, "isAvailable").callsFake(function (aElementOverlays) { assert.equal(aElementOverlays[0].getId(), this.oButton6Overlay.getId(), "the 'available' function calls isAvailable with the correct overlay"); return bIsAvailable; }.bind(this)); sinon.stub(this.oCombinePlugin, "handleCombine").callsFake(function (aElementOverlays, oCombineElement) { assert.equal(aElementOverlays[0].getId(), this.oButton6Overlay.getId(), "the 'handler' method is called with the right overlay"); assert.equal(oCombineElement.getId(), this.oButton6.getId(), "the 'handler' method is called with the right combine element"); }.bind(this)); var aMenuItems = this.oCombinePlugin.getMenuItems([this.oButton6Overlay]); assert.equal(aMenuItems[0].id, "CTX_GROUP_FIELDS", "'getMenuItems' returns the context menu item for the plugin"); aMenuItems[0].handler([this.oButton6Overlay], { contextElement: this.oButton6 }); aMenuItems[0].enabled([this.oButton6Overlay]); bIsAvailable = false; assert.equal(this.oCombinePlugin.getMenuItems([this.oButton6Overlay]).length, 0, "and if plugin is not available for the overlay, no menu items are returned"); }); QUnit.test("when Controls of different type with different change type are specified", function(assert) { fnSetOverlayDesigntimeMetadata(this.oOverflowToolbarButton1Overlay, DEFAULT_DTM); fnSetOverlayDesigntimeMetadata(this.oCheckBox1Overlay, oDesignTimeMetadata5); assert.strictEqual( this.oCombinePlugin.isAvailable([this.oOverflowToolbarButton1Overlay, this.oCheckBox1Overlay]), false, "isAvailable is called and returns false" ); assert.strictEqual( this.oCombinePlugin.isEnabled([this.oOverflowToolbarButton1Overlay, this.oCheckBox1Overlay]), false, "isEnabled is called and returns false" ); }); QUnit.test("when the relevant container does not have a stable id", function(assert) { fnSetOverlayDesigntimeMetadata(this.oOverflowToolbarButton1Overlay, DEFAULT_DTM); sandbox.stub(this.oCombinePlugin, "hasStableId").callsFake(function(oOverlay) { if (oOverlay === this.OverflowToolbarOverlay) { return false; } return true; }.bind(this)); return this.oCombinePlugin._isEditable(this.oOverflowToolbarButton1Overlay) .then(function(bEditable) { assert.strictEqual( bEditable, false, "_isEditable returns false" ); }); }); }); QUnit.done(function() { oMockedAppComponent.destroy(); jQuery("#qunit-fixture").hide(); }); });
SAP/openui5
src/sap.ui.rta/test/sap/ui/rta/qunit/plugin/Combine.qunit.js
JavaScript
apache-2.0
14,533
using System; using System.IO; using System.Web.Mvc; namespace FileCounterMVC.Controllers { public class HomeController : Controller { public ActionResult Index() { string siteFolder; int fileCount; if (Environment.GetEnvironmentVariable("home") != null) { // Maps to the physical path of your site in Azure siteFolder = Environment.ExpandEnvironmentVariables(@"%HOME%\site\wwwroot"); } else { // Maps to the current sites root physical path. Allows us to run locally. siteFolder = Server.MapPath("/"); } fileCount = Directory.GetFiles(siteFolder, "*.*", SearchOption.AllDirectories).Length; return View(model: fileCount); } } }
projectkudu/FileCounterMVC
FileCounterMVC/Controllers/HomeController.cs
C#
apache-2.0
891
/* * Copyright 2008-2009 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl2.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.kfs.module.purap.document.validation.impl; import java.util.List; import org.apache.commons.lang.StringUtils; import org.kuali.kfs.module.purap.PurapKeyConstants; import org.kuali.kfs.module.purap.PurapPropertyConstants; import org.kuali.kfs.module.purap.businessobject.PurchaseOrderVendorStipulation; import org.kuali.kfs.module.purap.document.PurchaseOrderDocument; import org.kuali.kfs.sys.KFSConstants; import org.kuali.kfs.sys.document.validation.GenericValidation; import org.kuali.kfs.sys.document.validation.event.AttributedDocumentEvent; import org.kuali.rice.krad.util.GlobalVariables; public class PurchaseOrderProcessVendorStipulationValidation extends GenericValidation { /** * Validation for the Stipulation tab. * * @param poDocument the purchase order document to be validated * @return boolean false if the vendor stipulation description is blank. */ public boolean validate(AttributedDocumentEvent event) { boolean valid = true; List<PurchaseOrderVendorStipulation> stipulations = ((PurchaseOrderDocument)event.getDocument()).getPurchaseOrderVendorStipulations(); for (int i = 0; i < stipulations.size(); i++) { PurchaseOrderVendorStipulation stipulation = stipulations.get(i); if (StringUtils.isBlank(stipulation.getVendorStipulationDescription())) { GlobalVariables.getMessageMap().putError(KFSConstants.DOCUMENT_PROPERTY_NAME + "." + PurapPropertyConstants.VENDOR_STIPULATION + "[" + i + "]." + PurapPropertyConstants.VENDOR_STIPULATION_DESCRIPTION, PurapKeyConstants.ERROR_STIPULATION_DESCRIPTION); valid = false; } } return valid; } }
Ariah-Group/Finance
af_webapp/src/main/java/org/kuali/kfs/module/purap/document/validation/impl/PurchaseOrderProcessVendorStipulationValidation.java
Java
apache-2.0
2,352
package org.asteriskjava.manager.internal; import org.asteriskjava.manager.event.UserEvent; import org.asteriskjava.util.AstUtil; import org.asteriskjava.util.Log; import org.asteriskjava.util.LogFactory; import org.asteriskjava.util.ReflectionUtil; import java.lang.reflect.Constructor; import java.lang.reflect.Method; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; /** * Abstract base class for reflection based builders. */ abstract class AbstractBuilder { protected final Log logger = LogFactory.getLog(getClass()); @SuppressWarnings("unchecked") protected void setAttributes(Object target, Map<String, Object> attributes, Set<String> ignoredAttributes) { Map<String, Method> setters; setters = ReflectionUtil.getSetters(target.getClass()); for (Map.Entry<String, Object> entry : attributes.entrySet()) { Object value; final Class<?> dataType; Method setter; String setterName; if (ignoredAttributes != null && ignoredAttributes.contains(entry.getKey())) { continue; } setterName = ReflectionUtil.stripIllegalCharacters(entry.getKey()); /* * The source property needs special handling as it is already * defined in java.util.EventObject (the base class of * ManagerEvent), so we have to translate it. */ if ("source".equals(setterName)) { setterName = "src"; } /* * The class property needs to be renamed. It is used in MusicOnHoldEvent. */ if ("class".equals(setterName)) { setterName = "classname"; } setter = setters.get(setterName); if (setter == null && !setterName.endsWith("s")) // no exact match => try plural { setter = setters.get(setterName + "s"); // but only for maps if (setter != null && ! (setter.getParameterTypes()[0].isAssignableFrom(Map.class))) { setter = null; } } // it seems silly to warn if it's a user event -- maybe it was intentional if (setter == null && !(target instanceof UserEvent)) { logger.debug("Unable to set property '" + entry.getKey() + "' to '" + entry.getValue() + "' on " + target.getClass().getName() + ": no setter. Please fix"); } if (setter == null) { continue; } dataType = setter.getParameterTypes()[0]; if (dataType == Boolean.class) { value = AstUtil.isTrue(entry.getValue()); } else if (dataType.isAssignableFrom(String.class)) { value = entry.getValue(); if (AstUtil.isNull(value)) { value = null; } } else if (dataType.isAssignableFrom(Map.class)) { if (entry.getValue() instanceof List) { List<String> list = (List<String>) entry.getValue(); value = buildMap(list.toArray(new String[list.size()])); } else if (entry.getValue() instanceof String) { value = buildMap((String) entry.getValue()); } else { value = null; } } else { try { Constructor<?> constructor = dataType.getConstructor(new Class[]{String.class}); value = constructor.newInstance(entry.getValue()); } catch (Exception e) { logger.error("Unable to convert value '" + entry.getValue() + "' of property '" + entry.getKey() + "' on " + target.getClass().getName() + " to required type " + dataType, e); continue; } } try { setter.invoke(target, value); } catch (Exception e) { logger.error("Unable to set property '" + entry.getKey() + "' to '" + entry.getValue() + "' on " + target.getClass().getName(), e); } } } private Map<String, String> buildMap(String... lines) { if (lines == null) { return null; } final Map<String, String> map = new LinkedHashMap<String, String>(); for (String line : lines) { final int index = line.indexOf('='); if (index > 0) { final String key = line.substring(0, index); final String value = line.substring(index + 1, line.length()); map.put(key, value); } else { logger.warn("Malformed line '" + line + "' for a map property"); } } return map; } }
xvart/asterisk-java
src/main/java/org/asteriskjava/manager/internal/AbstractBuilder.java
Java
apache-2.0
5,390
package com.eeontheway.android.applocker.main; import android.os.Bundle; import android.app.Fragment; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import com.eeontheway.android.applocker.R; /** * A simple {@link Fragment} subclass. */ public class SummaryFragment extends Fragment { public SummaryFragment() { // Required empty public constructor } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment return inflater.inflate(R.layout.fragment_summary, container, false); } }
lhzheng880828/AndroidApp
AppLockPro/app/src/main/java/com/eeontheway/android/applocker/main/SummaryFragment.java
Java
apache-2.0
730
'use strict'; const _ = require('lodash'); /** A location. @param options.address string The top address line of the delivery pickup options. @param options.address_2 string The second address line of the delivery pickup options such as the apartment number. This field is optional. @param options.city string The city of the delivery pickup options. @param options.state string The state of the delivery pickup options such as “CA”. @param options.postal_code string The postal code of the delivery pickup options. @param options.country string The country of the delivery pickup options such as “US". */ class Location { constructor(options) { _.each(options, (value, key) => { if (key == 'country') { if (!value || value.length != 2) return false; } this[key] = value; }); } } module.exports = Location;
mjk/uber-rush
lib/Location.js
JavaScript
apache-2.0
871
/** * hub-detect * * Copyright (C) 2019 Black Duck Software, Inc. * http://www.blackducksoftware.com/ * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.blackducksoftware.integration.hub.detect.detector.sbt; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.synopsys.integration.bdio.graph.MutableDependencyGraph; import com.synopsys.integration.bdio.graph.MutableMapDependencyGraph; import com.synopsys.integration.bdio.model.dependency.Dependency; import com.synopsys.integration.bdio.model.externalid.ExternalId; import com.synopsys.integration.bdio.model.externalid.ExternalIdFactory; public class SbtDependencyResolver { private final Logger logger = LoggerFactory.getLogger(SbtDependencyResolver.class); public ExternalIdFactory externalIdFactory; public SbtDependencyResolver(final ExternalIdFactory externalIdFactory) { this.externalIdFactory = externalIdFactory; } public SbtDependencyModule resolveReport(final SbtReport report) { final ExternalId rootId = externalIdFactory.createMavenExternalId(report.getOrganisation(), report.getModule(), report.getRevision()); logger.debug("Created external id: " + rootId.toString()); final MutableDependencyGraph graph = new MutableMapDependencyGraph(); logger.debug("Dependencies found: " + report.getDependencies().size()); report.getDependencies().forEach(module -> { logger.debug("Revisions found: " + module.getRevisions().size()); module.getRevisions().forEach(revision -> { logger.debug("Callers found: " + revision.getCallers().size()); final ExternalId id = externalIdFactory.createMavenExternalId(module.getOrganisation(), module.getName(), revision.getName()); final Dependency child = new Dependency(module.getName(), revision.getName(), id); revision.getCallers().forEach(caller -> { final ExternalId parentId = externalIdFactory.createMavenExternalId(caller.getOrganisation(), caller.getName(), caller.getRevision()); final Dependency parent = new Dependency(caller.getName(), caller.getRevision(), parentId); logger.debug("Caller id: " + parentId.toString()); if (rootId.equals(parentId)) { graph.addChildToRoot(child); } else { graph.addParentWithChild(parent, child); } }); }); }); final SbtDependencyModule module = new SbtDependencyModule(); module.name = report.getModule(); module.version = report.getRevision(); module.org = report.getOrganisation(); module.graph = graph; module.configuration = report.getConfiguration(); return module; } }
blackducksoftware/hub-detect
hub-detect/src/main/groovy/com/blackducksoftware/integration/hub/detect/detector/sbt/SbtDependencyResolver.java
Java
apache-2.0
3,713
#!/usr/bin/env python import os import sys import time sys.path.append(os.path.join(os.path.dirname(__file__), "../../pox")) import argparse from collections import defaultdict import networkx as nx from pox.lib.packet.ethernet import ethernet from pox.openflow.libopenflow_01 import ofp_flow_mod_command_rev_map from pox.openflow.libopenflow_01 import OFPT_HELLO from pox.openflow.libopenflow_01 import OFPT_FEATURES_REQUEST from pox.openflow.libopenflow_01 import OFPT_FEATURES_REPLY from pox.openflow.libopenflow_01 import OFPT_SET_CONFIG from pox.openflow.libopenflow_01 import OFPFC_DELETE_STRICT from pox.openflow.libopenflow_01 import OFPT_STATS_REQUEST from pox.openflow.libopenflow_01 import OFPT_VENDOR from pox.openflow.libopenflow_01 import OFPT_GET_CONFIG_REQUEST from pox.openflow.libopenflow_01 import OFPT_GET_CONFIG_REPLY from pox.openflow.libopenflow_01 import OFPT_STATS_REPLY from hb_utils import pkt_info from hb_shadow_table import ShadowFlowTable from hb_race_detector import RaceDetector from hb_race_detector import predecessor_types # To make sure all events are registered from hb_json_event import * from hb_events import * from hb_sts_events import * from hb_utils import dfs_edge_filter from hb_utils import just_mid_iter from hb_utils import pretty_match # # Do not import any STS types! We would like to be able to run this offline # from a trace file without having to depend on STS. # OFP_COMMANDS = {v: k for k, v in ofp_flow_mod_command_rev_map.iteritems()} # OF Message types to skip from the trace SKIP_MSGS = [OFPT_HELLO, OFPT_VENDOR, OFPT_FEATURES_REQUEST, OFPT_FEATURES_REPLY, OFPT_SET_CONFIG, OFPT_GET_CONFIG_REQUEST, OFPT_GET_CONFIG_REPLY, OFPT_STATS_REQUEST, OFPT_STATS_REPLY] class HappensBeforeGraph(object): def __init__(self, results_dir=None, add_hb_time=False, rw_delta=5, ww_delta=1, filter_rw=False, ignore_ethertypes=None, no_race=False, alt_barr=False, disable_path_cache=True, data_deps=False, verify_and_minimize_only=False, is_minimized=False): self.results_dir = results_dir self.g = nx.DiGraph() self.disable_path_cache = disable_path_cache self._cached_paths = None self._cached_reverse_paths = None self.events_by_id = dict() self.events_with_reads_writes = list() self.events_by_pid_out = defaultdict(list) self.events_by_mid_out = defaultdict(list) # events that have a mid_in/mid_in and are still looking for a pid_out/mid_out to match self.events_pending_pid_in = defaultdict(list) self.events_pending_mid_in = defaultdict(list) # for barrier pre rule self.events_before_next_barrier = defaultdict(list) # for barrier post rule self.most_recent_barrier = dict() # for races self.race_detector = RaceDetector( self, filter_rw=filter_rw, add_hb_time=add_hb_time, ww_delta=ww_delta, rw_delta=rw_delta) self.ww_delta = ww_delta self.rw_delta = rw_delta # Only mark time edges in the RaceDetetcor self.add_hb_time = False # Just to keep track of how many HB edges where added based on time self._time_hb_rw_edges_counter = 0 self._time_hb_ww_edges_counter = 0 self.ignore_ethertypes = check_list(ignore_ethertypes) self.no_race = no_race self.packet_traces = None self.host_sends = {} # Handled messages from the controller to the switch self.msg_handles = {} # Messages from the switch to the controller self.msgs = {} self.alt_barr = alt_barr self.versions = {} # add read-after-write dependency edges self.data_deps = data_deps self.shadow_tables = dict() self.covered_races = dict() self.verify_and_minimize_only = verify_and_minimize_only self.is_minimized = is_minimized @property def events(self): for _, data in self.g.nodes_iter(True): yield data['event'] @property def predecessors(self): """Get predecessor events for all events. """ for eid, data in self.g.nodes(data=True): this_predecessors = set() for pred in self.g.predecessors_iter(eid): this_predecessors.add(self.g.node[pred]['event']) yield (data['event'],this_predecessors) def _add_to_lookup_tables(self, event): if hasattr(event, 'pid_out'): for x in event.pid_out: self.events_by_pid_out[x].append(event) if hasattr(event, 'mid_out'): for x in event.mid_out: self.events_by_mid_out[x].append(event) self.lookup_tables = [ #( field name, # condition to be included, # search key #), (self.events_pending_pid_in, lambda x: hasattr(x, 'pid_in'), lambda x: x.pid_in ), (self.events_pending_mid_in, lambda x: hasattr(x, 'mid_in'), lambda x: x.mid_in ), ] for entry in self.lookup_tables: table, condition, key = entry if condition(event): table[key(event)].append(event) def _update_event_is_linked_pid_in(self, event): if event in self.events_pending_pid_in[event.pid_in]: self.events_pending_pid_in[event.pid_in].remove(event) def _update_event_is_linked_mid_in(self, event): if event in self.events_pending_mid_in[event.mid_in]: self.events_pending_mid_in[event.mid_in].remove(event) def update_path_cache(self): print "Updating has_path path cache..." self._cached_paths = nx.all_pairs_shortest_path_length(self.g) def has_path(self, src_eid, dst_eid, bidirectional=True, use_path_cache=True): if self.disable_path_cache or not use_path_cache: return nx.has_path(self.g, src_eid, dst_eid) or (bidirectional and nx.has_path(self.g, dst_eid, src_eid)) else: if self._cached_paths is None: self.update_path_cache() if dst_eid in self._cached_paths[src_eid]: return True if bidirectional: if src_eid in self._cached_paths[dst_eid]: return True return False def _add_edge(self, before, after, sanity_check=True, update_path_cache=True, **attrs): if sanity_check and before.type not in predecessor_types[after.type]: print "Warning: Not a valid HB edge: "+before.typestr+" ("+str(before.eid)+") < "+after.typestr+" ("+str(after.eid)+")" assert False src, dst = before.eid, after.eid if self.g.has_edge(src, dst): rel = self.g.edge[src][dst]['rel'] # Allow edge to be added multiple times because of the same relation # This is useful for time based edges if rel != attrs['rel']: raise ValueError( "Edge already added %d->%d and relation: %s" % (src, dst, rel)) self.g.add_edge(before.eid, after.eid, attrs) if update_path_cache: # TODO(jm): do incremental update later. But for now, this is sufficient. self._cached_paths = None def _rule_01_pid(self, event): # pid_out -> pid_in if hasattr(event, 'pid_in'): if event.pid_in in self.events_by_pid_out: for other in self.events_by_pid_out[event.pid_in]: self._add_edge(other, event, rel='pid') self._update_event_is_linked_pid_in(event) # TODO(jm): remove by reordering first # recheck events added in an order different from the trace order if hasattr(event, 'pid_out'): for pid_out in event.pid_out: if pid_out in self.events_pending_pid_in: for other in self.events_pending_pid_in[pid_out][:]: # copy list [:], so we can remove from it self._add_edge(event, other, rel='pid') self._update_event_is_linked_pid_in(other) def _rule_02_mid(self, event): # mid_out -> mid_in if hasattr(event, 'mid_in'): if event.mid_in in self.events_by_mid_out: for other in self.events_by_mid_out[event.mid_in]: self._add_edge(other, event, rel='mid') self._update_event_is_linked_mid_in(event) # TODO(jm): remove by reordering first # recheck events added in an order different from the trace order (mainly controller events as they are asynchronously logged) if hasattr(event, 'mid_out'): for mid_out in event.mid_out: if mid_out in self.events_pending_mid_in: for other in self.events_pending_mid_in[mid_out][:]: # copy list [:], so we can remove from it self._add_edge(event, other, rel='mid') self._update_event_is_linked_mid_in(other) def _rule_03_barrier_pre(self, event): if event.type == 'HbMessageHandle': if event.msg_type_str == "OFPT_BARRIER_REQUEST": for other in self.events_before_next_barrier[event.dpid]: self._add_edge(other, event, rel='barrier_pre') del self.events_before_next_barrier[event.dpid] else: self.events_before_next_barrier[event.dpid].append(event) def _rule_04_barrier_post(self, event): if event.type == 'HbMessageHandle': if event.msg_type_str == "OFPT_BARRIER_REQUEST": self.most_recent_barrier[event.dpid] = event else: if event.dpid in self.most_recent_barrier: other = self.most_recent_barrier[event.dpid] self._add_edge(other, event, rel='barrier_post') def _find_triggering_HbControllerHandle_for_alternative_barrier(self, event): """ Returns the HbControllerHandle that is responsible for triggering this event event (HbMessageHandle) <- (HbControllerSend) <- trigger (HbControllerHandle) """ preds = self.g.predecessors(event.eid) if len(preds) > 0: candidates = filter(lambda x: self.g.node[x]['event'].type == "HbControllerSend", preds) assert len(candidates) <= 1 # at most one HbControllerSend exists if len(candidates) == 1: send_event_eid = candidates[0] assert self.g.node[send_event_eid]['event'].type == "HbControllerSend" preds = self.g.predecessors(send_event_eid) candidates = filter(lambda x: self.g.node[x]['event'].type == "HbControllerHandle", preds) assert len(candidates) <= 1 # at most one HbControllerHandle exists if len(candidates) == 1: handle_event_eid = candidates[0] assert self.g.node[handle_event_eid]['event'].type == "HbControllerHandle" return handle_event_eid return None def _rule_03b_alternative_barrier_pre(self, event): """ Instead of using the dpid for barriers, this uses the eid of the predecessor HbControllerSend (if it exists). """ if event.type == 'HbMessageHandle': ctrl_handle_eid = self._find_triggering_HbControllerHandle_for_alternative_barrier(event) if ctrl_handle_eid is not None: if event.msg_type_str == "OFPT_BARRIER_REQUEST": for other in self.events_before_next_barrier[ctrl_handle_eid]: self._add_edge(other, event, rel='barrier_pre') del self.events_before_next_barrier[ctrl_handle_eid] else: self.events_before_next_barrier[ctrl_handle_eid].append(event) elif event.type == 'HbControllerSend': succ = self.g.successors(event.eid) for i in succ: self._rule_03b_alternative_barrier_pre(self.g.node[i]['event']) self._rule_04b_alternative_barrier_post(self.g.node[i]['event']) def _rule_04b_alternative_barrier_post(self, event): """ Instead of using the dpid for barriers, this uses the eid of the predecessor HbControllerSend (if it exists). """ if event.type == 'HbMessageHandle': ctrl_handle_eid = self._find_triggering_HbControllerHandle_for_alternative_barrier(event) if ctrl_handle_eid is not None: if event.msg_type_str == "OFPT_BARRIER_REQUEST": self.most_recent_barrier[ctrl_handle_eid] = event else: if ctrl_handle_eid in self.most_recent_barrier: other = self.most_recent_barrier[ctrl_handle_eid] self._add_edge(other, event, rel='barrier_post') elif event.type == 'HbControllerSend': succ = self.g.successors(event.eid) for i in succ: self._rule_03b_alternative_barrier_pre(self.g.node[i]['event']) self._rule_04b_alternative_barrier_post(self.g.node[i]['event']) def _rule_05_flow_removed(self, event): if isinstance(event, HbAsyncFlowExpiry): assert len(event.operations) == 1 expiry = event.operations[0] flow_table = expiry.flow_table # the flow table before the removal flow_mod = expiry.flow_mod # the removed entry reason = expiry.reason # Either idle or hard timeout. Deletes are not handled duration = expiry.duration_sec*10^9 + expiry.duration_nsec # TODO(JM): Handle deletes a different way? Currently deletes are recorded # to the trace as async switch events, same as timeouts. This means # that the instrumentation does NOT add a HB edge between the delete # operation itself and the async delete notification to the controller. # We might want to add such an edge, to do this we need the hb_logger # to link the two events already during instrumentation, as this is # almost impossible to do here as we do not have enough information # and the events might be recorded out of order in the trace. # TODO(jm): We should implement read-after-write data dependency edges # also for flow expiry messages, i.e. flows expire *after* they # have been written. This information is already partially # available in the hb_shadow_table module, but not currently # used for flow expiry. # create "dummy" operation that acts as a strict delete class DummyObject(object): pass dummy_event = DummyObject() dummy_op = DummyObject() dummy_event.eid = event.eid dummy_op.flow_mod = ofp_flow_mod(match=flow_mod.match,priority=flow_mod.priority,command=OFPFC_DELETE_STRICT) # Find other write events in the graph. for e in self.events: if e == event: continue # Skip none switch event if type(e) != HbMessageHandle: continue kw_ops = [] kr_ops = [] # Find the write ops for op in e.operations: if type(op) == TraceSwitchFlowTableWrite: kw_ops.append(op) elif type(op) == TraceSwitchFlowTableRead: kr_ops.append(op) if (not kw_ops) and (not kr_ops): continue # Make the edge for kw_op in kw_ops: # Skip if events commute anyway if self.race_detector.commutativity_checker.check_commutativity_ww( e, kw_op, dummy_event, dummy_op): continue delta = abs(expiry.t - kw_op.t) if delta > self.ww_delta: self._time_hb_ww_edges_counter += 1 self._add_edge(e, event, sanity_check=False, rel='time') break for kr_op in kr_ops: # Skip if events commute anyway if self.race_detector.commutativity_checker.check_commutativity_rw( e, kr_op, dummy_event, dummy_op): continue delta = abs(expiry.t - kr_op.t) if delta > self.rw_delta: self._time_hb_rw_edges_counter += 1 self._add_edge(e, event, sanity_check=False, rel='time') break def _rule_06_time_rw(self, event): if type(event) not in [HbPacketHandle]: return packet_match = ofp_match.from_packet(event.packet, event.in_port) operations = [] # Get all the read operations in the event # For OF 1.0 should be only one op, but more for OF1.3 for op in event.operations: if type(op) == TraceSwitchFlowTableRead: operations.append(op) for e in self.events: if type(e) != HbMessageHandle: continue for op in e.operations: if type(op) != TraceSwitchFlowTableWrite: continue if not op.flow_mod.match.matches_with_wildcards(packet_match, consider_other_wildcards=False): continue delta = abs(op.t - operations[0].t) if (delta > self.rw_delta): self._time_hb_rw_edges_counter += 1 self._add_edge(e, event, sanity_check=False, rel='time') break def _rule_07_time_ww(self, event): if type(event) not in [HbMessageHandle]: return i_ops = [] # Get all the write operations in the event # For OF 1.0 should be only one op, but more for OF1.3 for op in event.operations: if type(op) == TraceSwitchFlowTableWrite: i_ops.append(op) # No write operations in the event, just skip if not i_ops: return # Find other write events in the graph. for e in self.events: if e == event: continue # Skip none switch event if type(e) != HbMessageHandle: continue k_ops = [] # Find the write ops for op in e.operations: if type(op) == TraceSwitchFlowTableWrite: k_ops.append(op) if not k_ops: continue # Make the edge for i_op in i_ops: for k_op in k_ops: # Skip if events commute anyway if self.race_detector.commutativity_checker.check_commutativity_ww( event, i_op, e, k_op): continue delta = abs(i_op.t - k_op.t) if delta > self.ww_delta: self._time_hb_ww_edges_counter += 1 self._add_edge(e, event, sanity_check=False, rel='time') break def _update_edges(self, event): self._rule_01_pid(event) self._rule_02_mid(event) if self.alt_barr: self._rule_03b_alternative_barrier_pre(event) self._rule_04b_alternative_barrier_post(event) else: self._rule_03_barrier_pre(event) self._rule_04_barrier_post(event) self._rule_05_flow_removed(event) if self.add_hb_time: self._rule_06_time_rw(event) self._rule_07_time_ww(event) def _update_shadow_tables(self, event): if event.dpid not in self.shadow_tables: self.shadow_tables[event.dpid] = ShadowFlowTable(event.dpid, self.is_minimized) self.shadow_tables[event.dpid].apply_event(event) def unpack_line(self, line): # Skip empty lines and the ones start with '#' if not line or line.startswith('#'): return # TODO(jm): I did some tests to see why loading events is so slow. # JsonEvent.from_json is the slow part, everything else # (including json.loads()) is blazing fast. # We might want to speed that up a bit. event = JsonEvent.from_json(json.loads(line)) return event def add_line(self, line): event = self.unpack_line(line) if event: self.add_event(event) def add_event(self, event): assert event.eid not in self.events_by_id if self.ignore_ethertypes: packet = None if hasattr(event, 'packet'): packet = event.packet if type(event) == HbMessageHandle and getattr(event.msg, 'data', None): packet = ethernet(event.msg.data) if packet and packet.type in self.ignore_ethertypes: # print "Filtered PKT in ignore_ethertypes" return msg_type = getattr(event, 'msg_type', None) if msg_type in SKIP_MSGS: return self.g.add_node(event.eid, event=event) self.events_by_id[event.eid] = event self._add_to_lookup_tables(event) if hasattr(event, 'operations'): for op in event.operations: if type(op) in [TraceSwitchFlowTableRead, TraceSwitchFlowTableWrite]: # TODO(jm): Add TraceSwitchFlowTableEntryExpiry events here as well. # But before we can do that, we need to assign monotonicially increasing # eids to the expiry events as well in hb_logger self.events_with_reads_writes.append(event.eid) break def _handle_HbAsyncFlowExpiry(event): if self.data_deps: self._update_shadow_tables(event) self._update_edges(event) def _handle_HbPacketHandle(event): if self.data_deps: self._update_shadow_tables(event) self._update_edges(event) def _handle_HbPacketSend(event): self._update_edges(event) def _handle_HbMessageHandle(event): if self.data_deps: self._update_shadow_tables(event) self._update_edges(event) self.msg_handles[event.eid] = event def _handle_HbMessageSend(event): self._update_edges(event) self.msgs[event.eid] = event def _handle_HbHostHandle(event): self._update_edges(event) def _handle_HbHostSend(event): self._update_edges(event) self.host_sends[event.eid] = event def _handle_HbControllerHandle(event): self._update_edges(event) def _handle_HbControllerSend(event): self._update_edges(event) def _handle_default(event): assert False pass handlers = {'HbAsyncFlowExpiry': _handle_HbAsyncFlowExpiry, 'HbPacketHandle': _handle_HbPacketHandle, 'HbPacketSend': _handle_HbPacketSend, 'HbMessageHandle': _handle_HbMessageHandle, 'HbMessageSend': _handle_HbMessageSend, 'HbHostHandle': _handle_HbHostHandle, 'HbHostSend': _handle_HbHostSend, 'HbControllerHandle': _handle_HbControllerHandle, 'HbControllerSend': _handle_HbControllerSend, } handlers.get(event.type, _handle_default)(event) def load_trace(self, filename): self.g = nx.DiGraph() self.events_by_id = dict() unpacked_events = list() with open(filename) as f: for line in f: event = self.unpack_line(line) if event: unpacked_events.append(event) print "Read " + str(len(unpacked_events)) + " events." for event in unpacked_events: self.add_event(event) print "Added " + str(len(list(self.events))) + " events." def verify_and_minimize_trace(self, filename): unpacked_events = 0 outfilename = filename + ".min" with open(filename + ".min", 'w') as fout: with open(filename) as f: for line in f: event = self.unpack_line(line) if event: unpacked_events += 1 has_reads_writes = False if hasattr(event, 'operations'): for op in event.operations: if type(op) in [TraceSwitchFlowTableRead, TraceSwitchFlowTableWrite, TraceSwitchFlowTableEntryExpiry]: has_reads_writes = True break if type(event) in [HbAsyncFlowExpiry, HbPacketHandle, HbMessageHandle]: self._update_shadow_tables(event) # cleanup operations if hasattr(event, 'operations'): for op in event.operations: if hasattr(op, "flow_table"): delattr(op, "flow_table") # cleanup attributes fout.write(str(event.to_json()) + '\n') fout.flush() print "Verified, minimized, and wrote " + str(unpacked_events) + " events to "+str(outfilename) def store_graph(self, filename="hb.dot", print_packets=False): if self.results_dir is not None: filename = os.path.join(self.results_dir,filename) self.prep_draw(self.g, print_packets) nx.write_dot(self.g, os.path.join(self.results_dir, filename)) @staticmethod def prep_draw(g, print_packets, allow_none_event=False): """ Adds proper annotation for the graph to make drawing it more pleasant. """ for eid, data in g.nodes_iter(data=True): event = data.get('event', None) if not event and allow_none_event: label = "N %s" % eid shape = "oval" g.node[eid]['label'] = label g.node[eid]['shape'] = shape continue label = "ID %d \\n %s" % (eid, event.type) if hasattr(event, 'hid'): label += "\\nHID: " + str(event.hid) if hasattr(event, 'dpid'): label += "\\nDPID: " + str(event.dpid) shape = "oval" op = None if hasattr(event, 'operations'): for x in event.operations: if x.type == 'TraceSwitchFlowTableWrite': op = "FlowTableWrite" op += "\\nCMD: " + OFP_COMMANDS[x.flow_mod.command] op += "\\nMatch: " + pretty_match(x.flow_mod.match) op += "\\nActions: " + str(x.flow_mod.actions) label += "\\nt: " + repr(x.t) shape = 'box' g.node[eid]['style'] = 'bold' break if x.type == 'TraceSwitchFlowTableRead': op = "FlowTableRead" label += "\\nt: " + repr(x.t) shape = 'box' break if hasattr(event, 'msg') and getattr(event.msg, 'actions', None): op = "\\nActions: " + str(event.msg.actions) cmd_type = data.get('cmd_type') if cmd_type: label += "\\n%s" % cmd_type if op: label += "\\nOp: %s" % op if hasattr(event, 'msg_type'): label += "\\nMsgType: " + event.msg_type_str if getattr(event, 'msg', None): label += "\\nXID: %d" % event.msg.xid if hasattr(event, 'in_port'): label += "\\nInPort: " + str(event.in_port) if hasattr(event, 'out_port') and not isinstance(event.out_port, basestring): label += "\\nOut Port: " + str(event.out_port) if hasattr(event, 'buffer_id'): label += "\\nBufferId: " + str(event.buffer_id) if print_packets and hasattr(event, 'packet'): pkt = pkt_info(event.packet) label += "\\nPkt: " + pkt if print_packets and getattr(event, 'msg', None): if getattr(event.msg, 'data', None): pkt = pkt_info(ethernet(event.msg.data)) label += "\\nPkt: " + pkt g.node[eid]['label'] = label g.node[eid]['shape'] = shape for src, dst, data in g.edges_iter(data=True): g.edge[src][dst]['label'] = data['rel'] if data['rel'] == 'race': if data['harmful']: g.edge[src][dst]['color'] = 'red' g.edge[src][dst]['style'] = 'bold' else: g.edge[src][dst]['style'] = 'dotted' elif data['rel'] == 'covered': g.edge[src][dst]['color'] = 'blue' g.edge[src][dst]['style'] = 'bold' def extract_traces(self, g): """ Given HB graph g, this method return a list of subgraph starting from a HostSend event and all the subsequent nodes that happened after it. This method will exclude all the nodes connected because of time and the nodes connected after HostHandle. """ traces = [] # Sort host sends by eid, this will make the output follow the trace order eids = self.host_sends.keys() eids = sorted(eids) for eid in eids: nodes = list(nx.dfs_preorder_nodes(g, eid)) # Remove other HostSends for node in nodes: if eid != node and isinstance(g.node[node]['event'], HbHostSend): nodes.remove(node) subg = nx.DiGraph(g.subgraph(nodes), host_send=g.node[eid]['event']) traces.append(subg) for i, subg in enumerate(traces): for src, dst, data in subg.edges(data=True): if data['rel'] in ['time', 'race']: subg.remove_edge(src, dst) # Remove disconnected subgraph host_send = subg.graph['host_send'] nodes = nx.dfs_preorder_nodes(subg, host_send.eid) traces[i] = nx.DiGraph(subg.subgraph(nodes), host_send=host_send) self.packet_traces = traces return traces def store_traces(self, results_dir, print_packets=True, subgraphs=None): if not subgraphs: subgraphs = self.extract_traces(self.g) for i in range(len(subgraphs)): subg = subgraphs[i] send = subg.graph['host_send'] HappensBeforeGraph.prep_draw(subg, print_packets) nx.write_dot(subg, "%s/trace_%s_%s_%04d.dot" % (results_dir, str(send.packet.src), str(send.packet.dst), send.eid)) def get_racing_events(self, trace, ignore_other_traces=True): """ For a given packet trace, return all the races that races with its events """ # Set of all events that are part of a harmful race all_harmful = set([event.eid for event in self.race_detector.racing_events_harmful]) # Set of event ids of a packet trace eids = set(trace.nodes()) # All events in packet trace that are also part of a race racing_eids = sorted(list(eids.intersection(all_harmful))) # Get the actual reported race; # will get us the other event that has been part of the race rw_races_with_trace = list() for race in self.race_detector.races_harmful_with_covered: if race.rtype == 'r/w': # i_event is read, k_event is write if race.i_event.eid in racing_eids or race.k_event.eid in racing_eids: # We don't care about write on the packet trace that races with reads # on other packet traces. The other traces will be reported anyway. # logical implication: ignore_other_traces ==> race.i_event.eid in racing_eids if (not ignore_other_traces) or (race.i_event.eid in racing_eids): rw_races_with_trace.append(race) # make sure the races are sorted first by read, then by write. The default # sort on the namedtuple already does this return sorted(rw_races_with_trace) def get_all_packet_traces_with_races(self): """ Finds all the races related each packet trace """ races = list() for trace in self.packet_traces: racing_events = self.get_racing_events(trace, True) if len(racing_events) > 0: races.append((trace, racing_events,)) return races def summarize_per_packet_inconsistent(self, traces_races): """ If two packets are inconsistent, but they race with the same set of writes, then only one will be reported """ # TODO(jm): This does not take into account the order of the writes or the path the packets took. Do we care? result = {} removed = defaultdict(list) for trace, races, versions in traces_races: # First get the writes writes = [] for race in races: if isinstance(race.i_op, TraceSwitchFlowTableWrite): writes.append(race.i_op.eid) if isinstance(race.k_op, TraceSwitchFlowTableWrite): writes.append(race.k_op.eid) key = (tuple(sorted(writes))) if key in result: removed[key].append((trace, races, versions)) else: result[key] = (trace, races, versions) return result.values() def print_racing_packet_trace(self, trace, races, label, show_covered=True): """ first is the trace second is the list of races """ host_send = trace.graph['host_send'] g = nx.DiGraph(trace, host_send= host_send) for race in races: if not g.has_node(race.i_event.eid): g.add_node(race.i_event.eid, event=race.i_event) if not g.has_node(race.k_event.eid): g.add_node(race.k_event.eid, event=race.k_event) if show_covered and race in self.covered_races: for path in nx.all_simple_paths(self.g, race.i_event.eid, race.k_event.eid): for src, dst in zip(path, path[1:]): g.node[src] = self.g.node[src] g.node[dst] = self.g.node[dst] g.add_edge(src, dst, self.g.edge[src][dst]) for path in nx.all_simple_paths(self.g, race.k_event.eid, race.i_event.eid): for src, dst in zip(path, path[1:]): g.node[src] = self.g.node[src] g.node[dst] = self.g.node[dst] g.add_edge(src, dst, self.g.edge[src][dst]) g.add_edge(race.i_event.eid, race.k_event.eid, rel='covered', harmful=True) else: #if not g.has_edge(race.i_event.eid, race.k_event.eid) and not \ # g.has_edge(race.k_event.eid, race.i_event.eid): g.add_edge(race.i_event.eid, race.k_event.eid, rel='race', harmful=True) self.prep_draw(g, TraceSwitchPacketUpdateBegin) src = str(host_send.packet.src) dst = str(host_send.packet.dst) name = "%s_%s_%s_%s.dot" %(label, src, dst, host_send.eid) name = os.path.join(self.results_dir, name) print "Storing packet %s for %s->%s in %s " % (label, src, dst, name) nx.write_dot(g, name) def races_graph(self): races = self.race_detector.races_harmful races_graph = nx.DiGraph() for rtype, i_event, i_op, k_event, k_op in races: races_graph.add_node(i_event.eid, event=i_event) races_graph.add_node(k_event.eid, event=k_event) races_graph.add_edge(i_event.eid, k_event.eid, rel='race', harmful=True) return races_graph def save_races_graph(self, print_pkts=True, name=None): if not name: name = "just_races.dot" graph = self.races_graph() self.prep_draw(graph, print_pkts) print "Saving all races graph in", name nx.write_dot(graph, os.path.join(self.results_dir, name)) def find_covered_races(self): """ Go through events in trace order, add a RaW dependency and then check if there are any races that are part of: - the set of predecessors of W, and - the set of successors of R These are now ordered so we can add them to the list. """ if self.covered_races: return self.covered_races covered_races = dict() data_dep_races = set() time_races = set() remaining_harmful_races = set() # remove all races that were already removed due to time based rules for r in self.race_detector.races_harmful_with_covered: if self.has_path(r.i_event.eid, r.k_event.eid, bidirectional=True): # race is not a race anymore time_races.add(r) else: # race is still a race and can become covered when adding data deps remaining_harmful_races.add(r) # check for monotonically increasing eids, i.e. the list must be sorted assert all(x <= y for x, y in zip(self.events_with_reads_writes, self.events_with_reads_writes[1:])) for eid in self.events_with_reads_writes: event = self.events_by_id[eid] dpid = event.dpid shadow_table = self.shadow_tables[dpid] if hasattr(event, 'operations'): has_reads = False for op in event.operations: if type(op) in [TraceSwitchFlowTableRead]: has_reads = True if has_reads: # add RaW dependencies (HB edge from event containing W -> event containing R) for write_eid in shadow_table.data_deps[event.eid]: write_event = self.events_by_id[write_eid] if self.g.has_edge(write_event.eid, event.eid): assert self.g.get_edge_data(write_event.eid, event.eid)['rel'] == 'time' else: self._add_edge(write_event, event, sanity_check=False, rel='dep_raw') # Should we check this after adding *all* dependencies or after each. E.g. for events with a read and a write. # includes write_eid itself write_succs = set(nx.dfs_preorder_nodes(self.g, write_eid)) for r in remaining_harmful_races: # TODO(jm): get rid of this loop here, lots of unnecessary looping # is there a path from our write to the the race if r.i_event.eid in write_succs or r.k_event.eid in write_succs: # ignore races that we just removed using the data dep edge. if (r.i_event == event and r.k_event == write_event) or (r.i_event == write_event and r.k_event == event): data_dep_races.add(r) else: # only add a covered race the first time if r not in covered_races and r not in data_dep_races: if self.has_path(r.i_event.eid, r.k_event.eid, bidirectional=True, use_path_cache=False): # race is not a race anymore self.race_detector._races_harmful.remove(r) self.race_detector.covered_races.append(r) covered_races[r] = (eid, write_eid) self.covered_races = covered_races return self.covered_races def _get_versions_for_races(self, races): # assume races is ordered! assert all(races[i] < races[i+1] for i in xrange(len(races)-1)) versions_for_race = defaultdict(set) for race in races: # get versions for each race for version, cmds in self.versions.iteritems(): if race.i_event.eid in cmds or race.k_event.eid in cmds: versions_for_race[race].add(version) return versions_for_race def _is_inconsistent_packet_entry_version(self, trace, race, dpids_affected): trace_nodes = nx.dfs_preorder_nodes(trace, trace.graph['host_send'].eid) trace_dpids = [getattr(self.g.node[node]['event'], 'dpid', None) for node in trace_nodes] racing_dpid = race.i_event.dpid # which switches/nodes does the packet traverse before hitting this 1 uncovered race? none_racing_dpids = set([x for x in trace_dpids[:trace_dpids.index(racing_dpid)] if x is not None]) return not dpids_affected.intersection(none_racing_dpids) def find_per_packet_inconsistent(self, covered_races=None, summarize=True): """ Returns the following sets of packet traces. 1) all packet traces that race with a write event 2) all per-packet TRUE inconsistent traces 3) Covered packet traces (trace with races cannot happen because of HB) 4) Packet traces with races with first switch on version update 5) Summarize traces after removing covered and trimming traces that races with the same writes all packet traces =TRUE inconsistent traces + covered + entry switch races summazied = all per-packet inconsistent traces - repeatd all per-packet inconsistent traces """ # list of (trace, races), ordered by trace order packet_races = self.get_all_packet_traces_with_races() inconsistent_packet_traces = [] consistent_packet_traces_covered = [] consistent_packet_entry_version = [] summarized = [] dpids_for_version = {} for version, cmds in self.versions.iteritems(): dpids_for_version[version] = set([getattr(self.g.node[cmd]['event'], 'dpid', None) for cmd in cmds]) for trace, races in packet_races: uncovered_races = [race for race in races if race not in covered_races] uncovered_races_dpids = list(set([race.i_event.dpid for race in uncovered_races])) versions_for_race = self._get_versions_for_races(uncovered_races) racing_versions = sorted(list(set(versions_for_race.keys()))) # check if all the races are actually covered if not uncovered_races: consistent_packet_traces_covered.append((trace, races, racing_versions)) elif len(uncovered_races_dpids) == 1: # check entry is_entry = True for race in uncovered_races: version = list(versions_for_race[race])[0] affected_dpids = dpids_for_version[version] is_entry = self._is_inconsistent_packet_entry_version(trace, race, affected_dpids) # If only one of the races is not entry then even though the races # are one switch, one of them makes this trace inconsistent. if not is_entry: break has_covered = len(races) > len(uncovered_races) if is_entry: if has_covered: consistent_packet_traces_covered.append((trace, races, racing_versions)) else: consistent_packet_entry_version.append((trace, races, racing_versions)) else: inconsistent_packet_traces.append((trace, races, racing_versions)) else: inconsistent_packet_traces.append((trace, races, racing_versions)) if summarize: summarized = self.summarize_per_packet_inconsistent(inconsistent_packet_traces) assert len(packet_races) == len(inconsistent_packet_traces) + \ len(consistent_packet_entry_version) + \ len(consistent_packet_traces_covered) return packet_races, inconsistent_packet_traces, \ consistent_packet_traces_covered, \ consistent_packet_entry_version, summarized def find_barrier_replies(self): barrier_replies = [] for eid in self.msgs: if self.msgs[eid].msg_type_str != 'OFPT_BARRIER_REPLY': continue nodes = [] # TODO(jm): Are we sure just_mid_iter is correct? What about packets sent # out by a PACKET_OUT that then trigger a PACKET_IN -> ... -> BARRIER_REPLY?find_barrier_replies edges = dfs_edge_filter(self.g, eid, just_mid_iter) for src, dst in edges: src_event = self.g.node[src]['event'] dst_event = self.g.node[dst]['event'] if isinstance(src_event, HbMessageHandle): nodes.append(src_event) #self.g.node[src]['cmd_type'] = "Reactive to %d" % eid if isinstance(dst_event, HbMessageHandle): nodes.append(dst_event) #self.g.node[dst]['cmd_type'] = "Reactive to %d" % eid # Get unique and sort by time nodes = sorted(list(set(nodes)), key=lambda n: n.operations[0].t if n.operations else 0) barrier_replies.append((self.msgs[eid], nodes)) return barrier_replies def find_reactive_versions2(self): considered = [] cmds = [] ordered_msgs = OrderedDict() #sorted_msgs = sorted(self.msgs.values(), key=lambda m: m.operations[0].t if getattr(m, 'operations', None) else 0) sorted_msgs = sorted(self.msgs.values(), key=lambda m: m.eid) for m in sorted_msgs: ordered_msgs[m.eid] = m for eid in ordered_msgs: if self.msgs[eid].msg_type_str == 'OFPT_BARRIER_REPLY': continue if eid in considered: continue else: considered.append(eid) nodes = [] # TODO(jm): Are we sure just_mid_iter is correct? What about packets sent # out by a PACKET_OUT that then trigger a PACKET_IN -> ... -> BARRIER_REPLY?find_barrier_replies #edges = dfs_edge_filter(self.g, eid, just_mid_iter, filter_msg_type='OFPT_PACKET_IN') edges = dfs_edge_filter(self.g, eid, just_mid_iter) for src, dst in edges: src_event = self.g.node[src]['event'] dst_event = self.g.node[dst]['event'] if isinstance(dst_event, HbMessageSend): considered.append(dst_event.eid) if isinstance(src_event, HbMessageHandle) and src_event.eid not in considered: nodes.append(src_event) self.g.node[src]['cmd_type'] = "Reactive to %d" % eid if isinstance(dst_event, HbMessageHandle) and dst_event.eid not in considered: nodes.append(dst_event) self.g.node[dst]['cmd_type'] = "Reactive to %d" % eid # Get unique and sort by time nodes = sorted(list(set(nodes)), key=lambda n: n.operations[0].t if n.operations else 0) for n in nodes: considered.append(n.eid) cmds.append((self.msgs[eid], nodes)) for l, (x, i) in enumerate(cmds): for k, (y, j) in enumerate(cmds): if l == k: continue assert set(i).intersection(j), "l %s and k %s" % (l, k) return cmds def find_reactive_versions(self): cmds = [] considered = [] cv = dict() for eid in self.msgs: if self.msgs[eid].msg_type_str == 'OFPT_BARRIER_REPLY': continue nodes = [] # TODO(jm): Are we sure just_mid_iter is correct? What about packets sent # out by a PACKET_OUT that then trigger a PACKET_IN -> ... -> BARRIER_REPLY?find_barrier_replies edges = dfs_edge_filter(self.g, eid, just_mid_iter, filter_msg_type=HbMessageSend) for src, dst in edges: src_event = self.g.node[src]['event'] dst_event = self.g.node[dst]['event'] if isinstance(src_event, HbMessageHandle): nodes.append(src_event) self.g.node[src]['cmd_type'] = "Reactive to %d" % eid if isinstance(dst_event, HbMessageHandle): nodes.append(dst_event) self.g.node[dst]['cmd_type'] = "Reactive to %d" % eid # Get unique and sort by time nodes = sorted(list(set(nodes)), key=lambda n: n.operations[0].t if n.operations else 0) for n in nodes: assert n.eid not in considered, "For event %d at eid %d it was considered at %d" % (n.eid, eid, cv[n.eid]) considered.append(n.eid) cv[n.eid] = eid considered.append(n.eid) cmds.append((self.msgs[eid], nodes)) for l, (x, i) in enumerate(cmds): for k, (y, j) in enumerate(cmds): if l == k: continue assert not set(i).intersection(j), "l %s and k%s" % (l, k) return cmds def find_proactive_cmds(self, reactive_versions=None): """ Proactive is all the cmds that were not in the reactive set """ # TODO(jm): At the end of the trace, some of the controller instrumentation might not be there, so some of the commands at the very end could be reactive. Cut them off somehow? if not reactive_versions: reactive_versions = self.find_reactive_versions() reactive_cmds = [] for msgs, cmds in reactive_versions: for cmd in cmds: reactive_cmds.append(cmd.eid) proactive_eid = set(self.msg_handles.keys()).difference(set(reactive_cmds)) proactive = [self.g.node[eid]['event'] for eid in list(proactive_eid)] for cmd in proactive: self.g.node[cmd.eid]['cmd_type'] = 'Proactive' proactive.sort(key=lambda n: n.operations[0].t) return proactive def cluster_cmds(self, cmds): """ Cluster the update commands by time. """ # Cluster by time from scipy.cluster.hierarchy import fclusterdata # TODO(jm): Should we add a setting for the threshold, or use STS rounds instead of time? features = [[e.operations[0].t] for e in cmds] result = fclusterdata(features, 0.8, criterion="distance") clustered = defaultdict(list) for i in range(len(cmds)): clustered[result[i]].append(cmds[i]) # just trying to order the versions ordered = sorted(clustered.keys(), key= lambda i: clustered[i][0].operations[0].t) clustered_ordered = dict() for i in range(len(ordered)): clustered_ordered[i] = clustered[ordered[i]] self.clustered_cmds = clustered_ordered return clustered_ordered def find_versions(self): """ Find all versions, reactive or proactive """ if self.versions: return self.versions reactive = self.find_reactive_versions() proactive = self.find_proactive_cmds(reactive) self.cluster_cmds(proactive) # Consider all proactive and reactive versions versions = {} for version, events in self.clustered_cmds.iteritems(): versions[version] = list(set([event.eid for event in events])) for pktin, events in reactive: versions[pktin] = list(set([event.eid for event in events])) # Now merge versions if one contains a response to a barrier request # from previous version # TODO(jm): Perhaps we should not just consider barrier replies, but also flow removed messages for explicit deletes? Are there more such replies? barrier_replies = self.find_barrier_replies() replies_by_xid = {} # (dpid, xid) -> cmds replies_by_xid_versions = {} # (dpid, xid) -> versions requests_by_xid = {} # (dpid, xid) -> version # Sort replies by dpid and xid for rep, cmds in barrier_replies: key = (rep.dpid, rep.msg.xid) replies_by_xid[key] = [event.eid for event in cmds] replies_by_xid_versions[key] = [] reactive_cmds = set(replies_by_xid[key]) for v, v_cmds in versions.iteritems(): if reactive_cmds.intersection(v_cmds): replies_by_xid_versions[key].append(v) # Sort requests by dpid and xid for v, v_cmds in versions.iteritems(): for v_cmd in v_cmds: event = self.g.node[v_cmd]['event'] if event.msg_type_str == 'OFPT_BARRIER_REQUEST': requests_by_xid[(event.dpid, event.msg.xid)] = v for key, version in requests_by_xid.iteritems(): if version not in versions: continue # already merged if key not in replies_by_xid: continue new_cmds = versions[version] for v in replies_by_xid_versions[key]: if v == version: continue # we already considered the first version if v not in versions: continue # already merged new_cmds += versions[v] del versions[v] # Sort cmds by time, just to make it nicer for version in versions: versions[version].sort(key=lambda x: self.g.node[x]['event'].operations[0].t) versions = dict([k, v] for k, v in versions.iteritems() if v) self.versions = versions return versions def find_inconsistent_updates(self): """Try to find if two versions race with each other""" versions = self.find_versions() # TODO(jm): Could we check the races directly instead of creating the ww_races variable? racing_versions_tuples = [] racing_versions_dict = {} ww_races = defaultdict(list) for race in self.race_detector.races_harmful_with_covered: if race.rtype == 'w/w': ww_races[race.i_event.eid].append(race.k_event.eid) ww_races[race.k_event.eid].append(race.i_event.eid) racing_events = [] for version, cmds in versions.iteritems(): for cmd in cmds: if cmd in ww_races: for other in ww_races[cmd]: if other not in cmds: racing_events.append((cmd, other)) racing_versions = [] for eid1, eid2 in racing_events: v1 = None v2 = None for version, cmds in versions.iteritems(): if eid1 in cmds: v1 = version if eid2 in cmds: v2 = version if v1 and v2 and v1 != v2: break racing_versions.append((v1, v2, (eid1, eid2), (versions[v1], versions[v2]))) if set([v1, v2]) not in racing_versions_tuples: racing_versions_tuples.append(set([v1, v2])) ordered_versions = (v1, v2) er1 = eid1 er2 = eid2 if ordered_versions not in racing_versions_dict: ordered_versions = (v2, v1) er1 = eid2 er2 = eid1 if ordered_versions not in racing_versions_dict: racing_versions_dict[ordered_versions] = [[], []] if er1 not in racing_versions_dict[ordered_versions][0] and\ er2 not in racing_versions_dict[ordered_versions][1]: racing_versions_dict[ordered_versions][0].append(er1) racing_versions_dict[ordered_versions][1].append(er2) return racing_versions, racing_versions_tuples, racing_versions_dict def print_versions(self, versions, selected_versions=[]): # Printing versions if not selected_versions: selected_versions = versions.keys() for v, cmds in versions.iteritems(): if v not in selected_versions: continue print "IN Version", v if isinstance(v, HbMessageSend): print "React to Msg: ", v.msg_type_str for cmd in cmds: node = self.g.node[cmd]['event'] match = '' if getattr(node.msg, 'match', None): match = node.msg.show().replace('\n', ' ') of_cmd = '' if hasattr(node.msg, 'command'): of_cmd = OFP_COMMANDS[node.msg.command] print "\t eid", node.eid, " dpid:", node.dpid, " xid:", node.msg.xid ,\ " cmd:", node.msg_type_str, of_cmd, ' ',\ pretty_match(getattr(node.msg, 'match', None)),\ getattr(node.msg, 'actions', None) def print_covered_races(self): print "Covered races:" eids = [] race_edges = [] nodes_on_path = [] for r,v in self.covered_races.iteritems(): print "Race (r/w): ", r.rtype, r.i_event.eid, r.k_event.eid, ", covered by data dep w -> r: ", v eids.append(r.i_event.eid) eids.append(r.k_event.eid) race_edges.append((r.i_event.eid, r.k_event.eid)) eids.append(v[0]) eids.append(v[1]) for path in nx.all_simple_paths(self.g, r.i_event.eid, r.k_event.eid): nodes_on_path.extend(path) for path in nx.all_simple_paths(self.g, r.k_event.eid, r.i_event.eid): nodes_on_path.extend(path) nodes_on_path = list(set(nodes_on_path)) sub_nodes = nodes_on_path + eids subg = self.g.subgraph(list(set(sub_nodes))) for i, k in race_edges: subg.add_edge(k, i, rel='covered') self.prep_draw(subg, True) nx.write_dot(subg, os.path.join(self.results_dir, 'covered_races.dot')) def racing_versions_graph(self, v1, cmd1, v2, cmd2): nodes = [] extra_nodes = [] extra_edges = [] nodes.extend(cmd1) nodes.extend(cmd2) if hasattr(v1, 'eid') and self.g.has_node(v1.eid): nodes.append(v1.eid) for eid in cmd1: nodes.append(eid) extra_edges.append((v1.eid, eid)) else: vid = 'Proactive%d' % v1 extra_nodes.append(vid) for eid in cmd1: extra_edges.append((vid, eid)) if hasattr(v2, 'eid') and self.g.has_node(v2.eid): nodes.append(v2.eid) for eid in cmd2: nodes.append(eid) extra_edges.append((v2.eid, eid)) else: vid = 'Proactive%d' % v2 extra_nodes.append(vid) for eid in cmd2: extra_edges.append((vid, eid)) vg = self.g.subgraph(nodes) for n in extra_nodes: vg.add_node(n) for src, dst in extra_edges: vg.add_edge(src, dst, rel='version') races = self.race_detector.races_harmful for rtype, i_event, i_op, k_event, k_op in races: if i_event.eid in nodes and k_event.eid in nodes: vg.add_edge(i_event.eid, k_event.eid, rel='race', harmful=True) vg.add_edge(k_event.eid, i_event.eid, rel='race', harmful=True) self.prep_draw(vg, True, allow_none_event=True) return vg class Main(object): def __init__(self, filename, print_pkt, add_hb_time=True, rw_delta=5, ww_delta=5, filter_rw=False, ignore_ethertypes=None, no_race=False, alt_barr=False, verbose=True, ignore_first=False, disable_path_cache=False, data_deps=False, no_dot_files=False, verify_and_minimize_only=False, is_minimized=False): self.filename = os.path.realpath(filename) self.results_dir = os.path.dirname(self.filename) self.output_filename = self.results_dir + "/" + "hb.dot" self.print_pkt = print_pkt self.add_hb_time = add_hb_time self.rw_delta = rw_delta self.ww_delta = ww_delta self.filter_rw = filter_rw self.ignore_ethertypes = ignore_ethertypes self.no_race = no_race self.alt_barr = alt_barr self.verbose = verbose self.ignore_first = ignore_first self.disable_path_cache = disable_path_cache self.data_deps = data_deps self.no_dot_files = no_dot_files self.verify_and_minimize_only = verify_and_minimize_only self.is_minimized = is_minimized def run(self): self.graph = HappensBeforeGraph(results_dir=self.results_dir, add_hb_time=self.add_hb_time, rw_delta=self.rw_delta, ww_delta=self.ww_delta, filter_rw=self.filter_rw, ignore_ethertypes=self.ignore_ethertypes, no_race=self.no_race, alt_barr=self.alt_barr, disable_path_cache=self.disable_path_cache, data_deps=self.data_deps, verify_and_minimize_only=self.verify_and_minimize_only, is_minimized=self.is_minimized) import resource # from guppy import hpy # import objgraph import gc #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss t0 = time.time() if self.verify_and_minimize_only: self.graph.verify_and_minimize_trace(self.filename) #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss else: self.graph.load_trace(self.filename) #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss t1 = time.time() self.graph.race_detector.detect_races(verbose=True) #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss self.graph.update_path_cache() # the race detector doesn't do it, so we do it ourself. #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss self.graph.race_detector.print_races(self.verbose) #gc.collect() #print 'Memory usage: %s (kb)' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss t2 = time.time() packet_traces = self.graph.extract_traces(self.graph.g) t3 = time.time() reactive_cmds = self.graph.find_reactive_versions() t4 = time.time() proactive_cmds = self.graph.find_proactive_cmds(reactive_cmds) versions = self.graph.find_versions() t5 = time.time() if self.data_deps: covered_races = self.graph.find_covered_races() else: covered_races = dict() t6 = time.time() packet_races, inconsistent_packet_traces, \ inconsistent_packet_traces_covered, \ inconsistent_packet_entry_version, summarized = \ self.graph.find_per_packet_inconsistent(covered_races, True) t7 = time.time() racing_versions, racing_versions_tuples, racing_versions_tuples_dict = self.graph.find_inconsistent_updates() t8 = time.time() if not self.no_dot_files: self.graph.store_traces(self.results_dir, print_packets=True, subgraphs=packet_traces) print "Saving HB graph to:", self.output_filename self.graph.store_graph(self.output_filename, self.print_pkt) # Print traces for trace, races in packet_races: self.graph.print_racing_packet_trace(trace, races, label='incoherent', show_covered=False) for trace, races, _ in inconsistent_packet_traces: self.graph.print_racing_packet_trace(trace, races, label='incoherent_remaining') for trace, races, _ in inconsistent_packet_traces_covered: self.graph.print_racing_packet_trace(trace, races, label='covered') for trace, races, _ in inconsistent_packet_entry_version: self.graph.print_racing_packet_trace(trace, races, label='entry') for trace, races, _ in summarized: #self.graph.print_racing_packet_trace(trace, races, label='summarized') pass self.graph.save_races_graph(self.print_pkt) # self.graph.print_versions(versions) # self.graph.print_covered_races() num_writes = len(self.graph.race_detector.write_operations) num_read = len(self.graph.race_detector.read_operations) num_ops = num_writes + num_read num_harmful = self.graph.race_detector.total_harmful num_commute = self.graph.race_detector.total_commute num_races = self.graph.race_detector.total_races num_time_filtered_races = self.graph.race_detector.total_time_filtered_races num_covered = self.graph.race_detector.total_covered num_time_edges = self.graph.race_detector.time_edges_counter num_per_pkt_races = len(packet_races) num_per_pkt_inconsistent = len(inconsistent_packet_traces) num_per_pkt_inconsistent_covered = len(inconsistent_packet_traces_covered) num_per_pkt_entry_version_race = len(inconsistent_packet_entry_version) num_per_pkt_inconsistent_no_repeat = len(summarized) load_time = t1 - t0 detect_races_time = t2 - t1 extract_traces_time = t3 - t2 find_reactive_cmds_time = t4 - t3 find_proactive_cmds_time = t5 - t4 find_covered_races_time = t6 - t5 per_packet_inconsistent_time = t7 - t6 find_inconsistent_update_time = t8 - t7 ##### Final time, everything else is just print statements t_final = time.time() total_time = t_final - t0 print "\n######## Update isolation violations ########" for counter, (v1, v2) in enumerate(racing_versions_tuples_dict): if not self.no_dot_files: rvg = self.graph.racing_versions_graph(v1, racing_versions_tuples_dict[(v1, v2)][0], v2, racing_versions_tuples_dict[(v1, v2)][1]) rvg_path = os.path.join(self.results_dir, 'isolation_violation_%d.dot' % counter) print "Saving update isolation violation graph to %s" % rvg_path nx.write_dot(rvg, rvg_path) if hasattr(v1, 'eid'): pv1 = "React to event %s, %s" % (v1.eid , getattr(v1, 'msg_type_str', '')) else: pv1 = "Practive version %d" % v1 if hasattr(v2, 'eid'): pv2 = "React to event %d" % v2.eid else: pv2 = "Practive version %d" % v2 print "V1:{}".format(pv1) print "\tEventing racing: {}".format(racing_versions_tuples_dict[(v1, v2)][0]) print "V2:{}".format(pv2) print "\tEventing racing: {}".format(racing_versions_tuples_dict[(v1, v2)][1]) print "" print "\n########## Summary ###########" print "* Race analysis *" print "\tTotal number of events in the trace:", self.graph.g.number_of_nodes() print "\tTotal number of events with read operations:", num_read print "\tTotal number of events with write operations:", num_writes print "\tTotal number of events with read or write operations:", num_ops print "\tTotal number of observed races without any filters:", num_races print "\tTotal number of commuting races:", num_commute print "\tTotal number of races filtered by Time HB edges:", num_time_filtered_races print "\tTotal number of races covered by data dependency:", num_covered print "\tRemaining number of races after applying all enabled filters: %d (%.02f%%)" % (num_harmful, (num_harmful / float(num_races) * 100)) print "\n\n" print "* Properties analysis *" print "\tNumber of observed network updates:", len(versions) print "\tNumber of update isolation violations:", len(racing_versions_tuples) print "" print "\tTotal number of packets in the traces:", len(self.graph.host_sends) print "\tNumber of packet coherence violations:", len(packet_races) print "\tNumber of packet coherence violations filtered due covered races: ", len(inconsistent_packet_traces_covered) print "\tNumber of packet coherence but only on the first switch in the update: ", len(inconsistent_packet_entry_version) print "\tNumber of packet coherence violations after filtering covered races: ", len(inconsistent_packet_traces) #print "\tNumber of packet inconsistencies after trimming repeated races: ", len(summarized) #print "\tNumber of packet inconsistent updates: ", len(racing_versions) #print "\tNumber of races: ", self.graph.race_detector.total_races #print "\tNumber of races filtered by time: ", self.graph.race_detector.total_time_filtered_races #print "\tNumber of commuting races: ", len(self.graph.race_detector.races_commute) #print "\tNumber of harmful races: ", len(self.graph.race_detector.races_harmful) #print "\tNumber of covered races: ", self.graph.race_detector.total_covered #print "Number of versions:", len(versions) print "* Timing information *" print "\tDone. Time elapsed:",total_time,"s" print "\tload_trace:", load_time, "s" print "\tdetect_races:", detect_races_time, "s" print "\textract_traces_time:", extract_traces_time, "s" print "\tfind_reactive_cmds_time:", find_reactive_cmds_time, "s" print "\tfind_proactive_cmds_time:", find_proactive_cmds_time, "s" print "\tfind_covered_races_time:", find_covered_races_time, "s" print "\tper_packet_inconsistent_time:", per_packet_inconsistent_time, "s" print "\tfind_inconsistent_update_time:", find_inconsistent_update_time, "s" #print "print_races:"+(str(t3-t2))+"s" #print "store_graph:"+(str(t4-t3))+"s" #print "Extracting Packet traces time: "+ (str(t5 - t4)) + "s" #print "Finding inconsistent traces time: "+ (str(t6 - t5)) + "s" # Printing dat file hbt = self.add_hb_time rw_delta = self.rw_delta if self.add_hb_time else 'inf' ww_delta = self.ww_delta if self.add_hb_time else 'inf' file_name = "results_hbt_%s_altbarr_%s_dep_%s_rw_%s_ww_%s.dat" % (hbt, self.alt_barr, self.data_deps, rw_delta, ww_delta) file_name = os.path.join(self.results_dir, file_name) timings_file_name = "timings_hbt_%s_altbarr_%s_dep_%s_rw_%s_ww_%s.dat" % (hbt, self.alt_barr, self.data_deps, rw_delta, ww_delta) timings_file_name = os.path.join(self.results_dir, timings_file_name) def write_general_info_to_file(f): # General info f.write('key,value\n') f.write('rw_delta,%s\n' % rw_delta) f.write('ww_delta,%s\n' % ww_delta) f.write('alt_barr,%s\n' % self.alt_barr) f.write('data_deps,%s\n' % self.data_deps) with open(file_name, 'w') as f: write_general_info_to_file(f) # Operations f.write('num_events,%d\n' % self.graph.g.number_of_nodes()) f.write('num_edges,%d\n' % self.graph.g.number_of_edges()) f.write('num_read,%d\n' % num_read) f.write('num_writes,%d\n' % num_writes) f.write('num_ops,%d\n' % num_ops) # HB time edges f.write('num_time_edges,%d\n' % num_time_edges) # Races info # One last check assert num_races == num_commute + num_covered + num_harmful + num_time_filtered_races f.write('num_races,%d\n' % num_races) f.write('num_harmful,%d\n' % num_harmful) f.write('num_commute,%d\n' % num_commute) f.write('num_time_filtered_races,%d\n' % num_time_filtered_races) f.write('num_covered,%d\n' % num_covered) # Inconsistency f.write('num_pkts,%d\n' % len(self.graph.host_sends)) assert len(self.graph.host_sends) >= num_per_pkt_races assert num_per_pkt_races == num_per_pkt_inconsistent + num_per_pkt_inconsistent_covered + num_per_pkt_entry_version_race f.write('num_per_pkt_races,%d\n' % num_per_pkt_races) f.write('num_per_pkt_inconsistent,%d\n' % num_per_pkt_inconsistent) f.write('num_per_pkt_inconsistent_covered,%d\n' % num_per_pkt_inconsistent_covered) f.write('num_per_pkt_entry_version_race,%d\n' % num_per_pkt_entry_version_race) f.write('num_per_pkt_inconsistent_no_repeat,%d\n' % num_per_pkt_inconsistent_no_repeat) f.write('num_versions,%d\n' % len(versions)) f.write('num_racing_versions,%d\n' % len(racing_versions_tuples)) with open(timings_file_name, 'w') as f: write_general_info_to_file(f) # Times f.write('total_time_sec,%f\n'% total_time) f.write('load_time_sec,%f\n' % load_time ) f.write('detect_races_time_sec,%f\n' % detect_races_time ) f.write('extract_traces_time_sec,%f\n' % extract_traces_time ) f.write('find_reactive_cmds_time_sec,%f\n' % find_reactive_cmds_time ) f.write('find_proactive_cmds_time_sec,%f\n' % find_proactive_cmds_time ) f.write('find_covered_races_time,%f\n' % find_covered_races_time ) f.write('per_packet_inconsistent_time_sec,%f\n' % per_packet_inconsistent_time ) f.write('find_inconsistent_update_time_sec,%f\n' % find_inconsistent_update_time ) def auto_int(x): return int(x, 0) if __name__ == '__main__': empty_delta = 1000000 parser = argparse.ArgumentParser() parser.add_argument('trace_file', help='Trace file produced by the instrumented sts, usually "hb.json"') parser.add_argument('--no-hbt', dest='no_hbt', action='store_true', default=False, help="Don't add HB edges based on time") parser.add_argument('--time-delta', dest='delta', default=2, type=int, help="delta time (in secs) for adding HB edges based on time") parser.add_argument('--pkt', dest='print_pkt', action='store_true', default=False, help="Print packet headers in the produced dot files") parser.add_argument('--rw_delta', dest='rw_delta', default=2, type=int, help="delta time (in secs) for adding HB edges based on time") parser.add_argument('--ww_delta', dest='ww_delta', default=2, type=int, help="delta time (in secs) for adding HB edges based on time") parser.add_argument('--filter_rw', dest='filter_rw', action='store_true', default=False, help="Filter Read/Write operations with HB relations") parser.add_argument('--ignore-ethertypes', dest='ignore_ethertypes', nargs='*', type=auto_int, default=0, help='Ether types to ignore from the graph') parser.add_argument('--no-race', dest='no_race', action='store_true', default=False, help="Don't add edge between racing events in the visualized graph") parser.add_argument('--alt-barr', dest='alt_barr', action='store_true', default=False, help="Use alternative barrier rules for purely reactive controllers") parser.add_argument('-v', dest='verbose', action='store_true', default=False, help="Print all commute and harmful races") parser.add_argument('--ignore-first', dest='ignore_first', action='store_true', default=False, help="Ignore the first race for per-packet consistency check") parser.add_argument('--disable-path-cache', dest='disable_path_cache', action='store_true', default=False, help="Disable using all_pairs_shortest_path_length() preprocessing.") parser.add_argument('--data-deps', dest='data_deps', action='store_true', default=False, help="Use shadow tables for adding data dependency edges between reads/writes.") parser.add_argument('--no-dot-files', dest='no_dot_files', action='store_true', default=False, help="Do not write any .dot files to the disk.") parser.add_argument('--verify-and-minimize-only', dest='verify_and_minimize_only', action='store_true', default=False, help="Verify the input trace, then write out a minimized version.") parser.add_argument('--is-minimized', dest='is_minimized', action='store_true', default=False, help="Process a minimized trace.") # TODO(jm): Make option naming consistent (use _ everywhere, not a mixture of - and _). args = parser.parse_args() if not args.no_hbt: if args.delta == empty_delta: assert args.rw_delta == args.ww_delta else: args.rw_delta = args.ww_delta = args.delta main = Main(args.trace_file, print_pkt=args.print_pkt, add_hb_time=not args.no_hbt, rw_delta=args.rw_delta, ww_delta=args.ww_delta, filter_rw=args.filter_rw, ignore_ethertypes=args.ignore_ethertypes, no_race=args.no_race, alt_barr=args.alt_barr, verbose=args.verbose, ignore_first=args.ignore_first, disable_path_cache=args.disable_path_cache, data_deps=args.data_deps, no_dot_files=args.no_dot_files, verify_and_minimize_only=args.verify_and_minimize_only, is_minimized=args.is_minimized) main.run()
jmiserez/sts
sts/happensbefore/hb_graph.py
Python
apache-2.0
72,338
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import os import threading import urlparse from collections import namedtuple from six.moves import range from pants.base.build_environment import get_buildroot from pants.cache.artifact_cache import ArtifactCacheError from pants.cache.local_artifact_cache import LocalArtifactCache, TempLocalArtifactCache from pants.cache.pinger import BestUrlSelector, Pinger from pants.cache.resolver import NoopResolver, Resolver, RESTfulResolver from pants.cache.restful_artifact_cache import RESTfulArtifactCache from pants.subsystem.subsystem import Subsystem class EmptyCacheSpecError(ArtifactCacheError): pass class LocalCacheSpecRequiredError(ArtifactCacheError): pass class CacheSpecFormatError(ArtifactCacheError): pass class InvalidCacheSpecError(ArtifactCacheError): pass class RemoteCacheSpecRequiredError(ArtifactCacheError): pass class TooManyCacheSpecsError(ArtifactCacheError): pass CacheSpec = namedtuple('CacheSpec', ['local', 'remote']) class CacheSetup(Subsystem): options_scope = 'cache' @classmethod def register_options(cls, register): super(CacheSetup, cls).register_options(register) default_cache = [os.path.join(get_buildroot(), '.cache')] register('--read', type=bool, default=True, help='Read build artifacts from cache, if available.') register('--write', type=bool, default=True, help='Write build artifacts to cache, if available.') register('--overwrite', advanced=True, type=bool, help='If writing build artifacts to cache, overwrite existing artifacts ' 'instead of skipping them.') register('--resolver', advanced=True, choices=['none', 'rest'], default='none', help='Select which resolver strategy to use for discovering URIs that access ' 'artifact caches. none: use URIs from static config options, i.e. ' '--read-from, --write-to. rest: look up URIs by querying a RESTful ' 'URL, which is a remote address from --read-from, --write-to.') register('--read-from', advanced=True, type=list, default=default_cache, help='The URIs of artifact caches to read directly from. Each entry is a URL of ' 'a RESTful cache, a path of a filesystem cache, or a pipe-separated list of ' 'alternate caches to choose from. This list is also used as input to ' 'the resolver. When resolver is \'none\' list is used as is.') register('--write-to', advanced=True, type=list, default=default_cache, help='The URIs of artifact caches to write directly to. Each entry is a URL of' 'a RESTful cache, a path of a filesystem cache, or a pipe-separated list of ' 'alternate caches to choose from. This list is also used as input to ' 'the resolver. When resolver is \'none\' list is used as is.') register('--compression-level', advanced=True, type=int, default=5, help='The gzip compression level (0-9) for created artifacts.') register('--max-entries-per-target', advanced=True, type=int, default=8, help='Maximum number of old cache files to keep per task target pair') register('--pinger-timeout', advanced=True, type=float, default=0.5, help='number of seconds before pinger times out') register('--pinger-tries', advanced=True, type=int, default=2, help='number of times pinger tries a cache') @classmethod def create_cache_factory_for_task(cls, task, pinger=None, resolver=None): return CacheFactory(cls.scoped_instance(task).get_options(), task.context.log, task.stable_name(), pinger=pinger, resolver=resolver) class CacheFactory(object): def __init__(self, options, log, stable_name, pinger=None, resolver=None): """Create a cache factory from settings. :param options: Task's scoped options. :param log: Task's context log. :param stable_name: Task's stable name. :param pinger: Pinger to choose the best remote artifact cache URL. :param resolver: Resolver to look up remote artifact cache URLs. :return: cache factory. """ self._options = options self._log = log self._stable_name = stable_name # Created on-demand. self._read_cache = None self._write_cache = None # Protects local filesystem setup, and assignment to the references above. self._cache_setup_lock = threading.Lock() # Caches are supposed to be close, and we don't want to waste time pinging on no-op builds. # So we ping twice with a short timeout. # TODO: Make lazy. self._pinger = pinger or Pinger(timeout=self._options.pinger_timeout, tries=self._options.pinger_tries) # resolver is also close but failing to resolve might have broader impact than # single ping failure, therefore use a higher timeout with more retries. if resolver: self._resolver = resolver elif self._options.resolver == 'rest': self._resolver = RESTfulResolver(timeout=1.0, tries=3) else: self._resolver = NoopResolver() def read_cache_available(self): return self._options.read and bool(self._options.read_from) and self.get_read_cache() def write_cache_available(self): return self._options.write and bool(self._options.write_to) and self.get_write_cache() def overwrite(self): return self._options.overwrite def get_read_cache(self): """Returns the read cache for this setup, creating it if necessary. Returns None if no read cache is configured. """ if self._options.read_from and not self._read_cache: cache_spec = self._resolve(self._sanitize_cache_spec(self._options.read_from)) if cache_spec: with self._cache_setup_lock: self._read_cache = self._do_create_artifact_cache(cache_spec, 'will read from') return self._read_cache def get_write_cache(self): """Returns the write cache for this setup, creating it if necessary. Returns None if no read cache is configured. """ if self._options.write_to and not self._write_cache: cache_spec = self._resolve(self._sanitize_cache_spec(self._options.write_to)) if cache_spec: with self._cache_setup_lock: self._write_cache = self._do_create_artifact_cache(cache_spec, 'will write to') return self._write_cache # VisibleForTesting def _sanitize_cache_spec(self, spec): if not isinstance(spec, (list, tuple)): raise InvalidCacheSpecError('Invalid artifact cache spec type: {0} ({1})'.format( type(spec), spec)) if not spec: raise EmptyCacheSpecError() if len(spec) > 2: raise TooManyCacheSpecsError('Too many artifact cache specs: ({0})'.format(spec)) local_specs = [s for s in spec if self.is_local(s)] remote_specs = [s for s in spec if self.is_remote(s)] if not local_specs and not remote_specs: raise CacheSpecFormatError('Invalid cache spec: {0}, must be either local or remote' .format(spec)) if len(spec) == 2: if not local_specs: raise LocalCacheSpecRequiredError('One of two cache specs must be a local cache path.') if not remote_specs: raise RemoteCacheSpecRequiredError('One of two cache specs must be a remote spec.') local_spec = local_specs[0] if len(local_specs) > 0 else None remote_spec = remote_specs[0] if len(remote_specs) > 0 else None return CacheSpec(local=local_spec, remote=remote_spec) # VisibleForTesting def _resolve(self, spec): """Attempt resolving cache URIs when a remote spec is provided. """ if not spec.remote: return spec try: resolved_urls = self._resolver.resolve(spec.remote) if resolved_urls: # keep the bar separated list of URLs convention return CacheSpec(local=spec.local, remote='|'.join(resolved_urls)) # no-op return spec except Resolver.ResolverError as e: self._log.warn('Error while resolving from {0}: {1}'.format(spec.remote, str(e))) # If for some reason resolver fails we continue to use local cache if spec.local: return CacheSpec(local=spec.local, remote=None) # resolver fails but there is no local cache return None @staticmethod def is_local(string_spec): return string_spec.startswith('/') or string_spec.startswith('~') @staticmethod def is_remote(string_spec): # both artifact cache and resolver use REST, add new protocols here once they are supported return string_spec.startswith('http://') or string_spec.startswith('https://') def get_available_urls(self, urls): """Return reachable urls sorted by their ping times.""" netloc_to_url = {urlparse.urlparse(url).netloc: url for url in urls} pingtimes = self._pinger.pings(netloc_to_url.keys()) # List of pairs (host, time in ms). self._log.debug('Artifact cache server ping times: {}' .format(', '.join(['{}: {:.6f} secs'.format(*p) for p in pingtimes]))) sorted_pingtimes = sorted(pingtimes, key=lambda x: x[1]) available_urls = [netloc_to_url[netloc] for netloc, pingtime in sorted_pingtimes if pingtime < Pinger.UNREACHABLE] self._log.debug('Available cache servers: {0}'.format(available_urls)) return available_urls def _do_create_artifact_cache(self, spec, action): """Returns an artifact cache for the specified spec. spec can be: - a path to a file-based cache root. - a URL of a RESTful cache root. - a bar-separated list of URLs, where we'll pick the one with the best ping times. - A list or tuple of two specs, local, then remote, each as described above """ compression = self._options.compression_level if compression not in range(10): raise ValueError('compression_level must be an integer 0-9: {}'.format(compression)) artifact_root = self._options.pants_workdir def create_local_cache(parent_path): path = os.path.join(parent_path, self._stable_name) self._log.debug('{0} {1} local artifact cache at {2}' .format(self._stable_name, action, path)) return LocalArtifactCache(artifact_root, path, compression, self._options.max_entries_per_target) def create_remote_cache(remote_spec, local_cache): urls = self.get_available_urls(remote_spec.split('|')) if len(urls) > 0: best_url_selector = BestUrlSelector(['{}/{}'.format(url.rstrip('/'), self._stable_name) for url in urls]) local_cache = local_cache or TempLocalArtifactCache(artifact_root, compression) return RESTfulArtifactCache(artifact_root, best_url_selector, local_cache) local_cache = create_local_cache(spec.local) if spec.local else None remote_cache = create_remote_cache(spec.remote, local_cache) if spec.remote else None if remote_cache: return remote_cache return local_cache
dbentley/pants
src/python/pants/cache/cache_setup.py
Python
apache-2.0
11,380
package org.sagebionetworks.repo.manager.backup.daemon; import java.io.File; import java.io.IOException; import java.util.List; import org.sagebionetworks.repo.manager.backup.Progress; import org.sagebionetworks.repo.model.UserInfo; import org.sagebionetworks.repo.model.migration.MigrationType; /** * Abstraction for a backup driver. * @author jmhill * */ public interface BackupDriver { /** * Write the objects identified by the passed list to the provided zip file. * * @param user * @param destination * @param progress * @param type * @param idsToBackup * @return * @throws IOException * @throws InterruptedException */ public boolean writeBackup(UserInfo user, File destination, Progress progress, MigrationType type, List<Long> idsToBackup) throws IOException, InterruptedException; /** * * @param user * @param source * @param progress * @param type * @return * @throws IOException * @throws InterruptedException * @throws Exception */ public boolean restoreFromBackup(UserInfo user, File source, Progress progress) throws IOException, InterruptedException, Exception; }
hhu94/Synapse-Repository-Services
services/repository-managers/src/main/java/org/sagebionetworks/repo/manager/backup/daemon/BackupDriver.java
Java
apache-2.0
1,183
# -*- coding: utf-8 -*- ''' The match module allows for match routines to be run and determine target specs ''' # Import python libs import logging # Import salt libs import salt.minion __func_alias__ = { 'list_': 'list' } log = logging.getLogger(__name__) def compound(tgt): ''' Return True if the minion matches the given compound target CLI Example: .. code-block:: bash salt '*' match.compound 'L@cheese,foo and *' ''' __opts__['grains'] = __grains__ matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.compound_match(tgt) except Exception as exc: log.exception(exc) return False def ipcidr(tgt): ''' Return True if the minion matches the given ipcidr target CLI Example: .. code-block:: bash salt '*' match.ipcidr '192.168.44.0/24' ''' matcher = salt.minion.Matcher({'grains': __grains__}, __salt__) try: return matcher.ipcidr_match(tgt) except Exception as exc: log.exception(exc) return False def pillar(tgt, delim=':'): ''' Return True if the minion matches the given pillar target. The ``delim`` argument can be used to specify a different delimiter. CLI Example: .. code-block:: bash salt '*' match.pillar 'cheese:foo' salt '*' match.pillar 'clone_url|https://github.com/saltstack/salt.git' delim='|' .. versionchanged:: 0.16.4 ``delim`` argument added ''' matcher = salt.minion.Matcher({'pillar': __pillar__}, __salt__) try: return matcher.pillar_match(tgt, delim=delim) except Exception as exc: log.exception(exc) return False def data(tgt): ''' Return True if the minion matches the given data target CLI Example: .. code-block:: bash salt '*' match.data 'spam:eggs' ''' matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.data_match(tgt) except Exception as exc: log.exception(exc) return False def grain_pcre(tgt, delim=':'): ''' Return True if the minion matches the given grain_pcre target. The ``delim`` argument can be used to specify a different delimiter. CLI Example: .. code-block:: bash salt '*' match.grain_pcre 'os:Fedo.*' salt '*' match.grain_pcre 'ipv6|2001:.*' delim='|' .. versionchanged:: 0.16.4 ``delim`` argument added ''' matcher = salt.minion.Matcher({'grains': __grains__}, __salt__) try: return matcher.grain_pcre_match(tgt, delim=delim) except Exception as exc: log.exception(exc) return False def grain(tgt, delim=':'): ''' Return True if the minion matches the given grain target. The ``delim`` argument can be used to specify a different delimiter. CLI Example: .. code-block:: bash salt '*' match.grain 'os:Ubuntu' salt '*' match.grain_pcre 'ipv6|2001:db8::ff00:42:8329' delim='|' .. versionchanged:: 0.16.4 ``delim`` argument added ''' matcher = salt.minion.Matcher({'grains': __grains__}, __salt__) try: return matcher.grain_match(tgt, delim=delim) except Exception as exc: log.exception(exc) return False def list_(tgt): ''' Return True if the minion matches the given list target CLI Example: .. code-block:: bash salt '*' match.list 'server1,server2' ''' matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.list_match(tgt) except Exception as exc: log.exception(exc) return False def pcre(tgt): ''' Return True if the minion matches the given pcre target CLI Example: .. code-block:: bash salt '*' match.pcre '.*' ''' matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.pcre_match(tgt) except Exception as exc: log.exception(exc) return False def glob(tgt): ''' Return True if the minion matches the given glob target CLI Example: .. code-block:: bash salt '*' match.glob '*' ''' matcher = salt.minion.Matcher(__opts__, __salt__) try: return matcher.glob_match(tgt) except Exception as exc: log.exception(exc) return False
victorywang80/Maintenance
saltstack/src/salt/modules/match.py
Python
apache-2.0
4,350
/* * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/cloud/orgpolicy/v2/constraint.proto package com.google.cloud.orgpolicy.v2; /** * * * <pre> * A `constraint` describes a way to restrict resource's configuration. For * example, you could enforce a constraint that controls which cloud services * can be activated across an organization, or whether a Compute Engine instance * can have serial port connections established. `Constraints` can be configured * by the organization's policy administrator to fit the needs of the * organization by setting a `policy` that includes `constraints` at different * locations in the organization's resource hierarchy. Policies are inherited * down the resource hierarchy from higher levels, but can also be overridden. * For details about the inheritance rules please read about * [`policies`][google.cloud.OrgPolicy.v2.Policy]. * `Constraints` have a default behavior determined by the `constraint_default` * field, which is the enforcement behavior that is used in the absence of a * `policy` being defined or inherited for the resource in question. * </pre> * * Protobuf type {@code google.cloud.orgpolicy.v2.Constraint} */ public final class Constraint extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.orgpolicy.v2.Constraint) ConstraintOrBuilder { private static final long serialVersionUID = 0L; // Use Constraint.newBuilder() to construct. private Constraint(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private Constraint() { name_ = ""; displayName_ = ""; description_ = ""; constraintDefault_ = 0; } @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new Constraint(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private Constraint( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 10: { java.lang.String s = input.readStringRequireUtf8(); name_ = s; break; } case 18: { java.lang.String s = input.readStringRequireUtf8(); displayName_ = s; break; } case 26: { java.lang.String s = input.readStringRequireUtf8(); description_ = s; break; } case 32: { int rawValue = input.readEnum(); constraintDefault_ = rawValue; break; } case 42: { com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.Builder subBuilder = null; if (constraintTypeCase_ == 5) { subBuilder = ((com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_) .toBuilder(); } constraintType_ = input.readMessage( com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom( (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_); constraintType_ = subBuilder.buildPartial(); } constraintTypeCase_ = 5; break; } case 50: { com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.Builder subBuilder = null; if (constraintTypeCase_ == 6) { subBuilder = ((com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_) .toBuilder(); } constraintType_ = input.readMessage( com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.parser(), extensionRegistry); if (subBuilder != null) { subBuilder.mergeFrom( (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_); constraintType_ = subBuilder.buildPartial(); } constraintTypeCase_ = 6; break; } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.orgpolicy.v2.Constraint.class, com.google.cloud.orgpolicy.v2.Constraint.Builder.class); } /** * * * <pre> * Specifies the default behavior in the absence of any `Policy` for the * `Constraint`. This must not be `CONSTRAINT_DEFAULT_UNSPECIFIED`. * Immutable after creation. * </pre> * * Protobuf enum {@code google.cloud.orgpolicy.v2.Constraint.ConstraintDefault} */ public enum ConstraintDefault implements com.google.protobuf.ProtocolMessageEnum { /** * * * <pre> * This is only used for distinguishing unset values and should never be * used. * </pre> * * <code>CONSTRAINT_DEFAULT_UNSPECIFIED = 0;</code> */ CONSTRAINT_DEFAULT_UNSPECIFIED(0), /** * * * <pre> * Indicate that all values are allowed for list constraints. * Indicate that enforcement is off for boolean constraints. * </pre> * * <code>ALLOW = 1;</code> */ ALLOW(1), /** * * * <pre> * Indicate that all values are denied for list constraints. * Indicate that enforcement is on for boolean constraints. * </pre> * * <code>DENY = 2;</code> */ DENY(2), UNRECOGNIZED(-1), ; /** * * * <pre> * This is only used for distinguishing unset values and should never be * used. * </pre> * * <code>CONSTRAINT_DEFAULT_UNSPECIFIED = 0;</code> */ public static final int CONSTRAINT_DEFAULT_UNSPECIFIED_VALUE = 0; /** * * * <pre> * Indicate that all values are allowed for list constraints. * Indicate that enforcement is off for boolean constraints. * </pre> * * <code>ALLOW = 1;</code> */ public static final int ALLOW_VALUE = 1; /** * * * <pre> * Indicate that all values are denied for list constraints. * Indicate that enforcement is on for boolean constraints. * </pre> * * <code>DENY = 2;</code> */ public static final int DENY_VALUE = 2; public final int getNumber() { if (this == UNRECOGNIZED) { throw new java.lang.IllegalArgumentException( "Can't get the number of an unknown enum value."); } return value; } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static ConstraintDefault valueOf(int value) { return forNumber(value); } /** * @param value The numeric wire value of the corresponding enum entry. * @return The enum associated with the given numeric wire value. */ public static ConstraintDefault forNumber(int value) { switch (value) { case 0: return CONSTRAINT_DEFAULT_UNSPECIFIED; case 1: return ALLOW; case 2: return DENY; default: return null; } } public static com.google.protobuf.Internal.EnumLiteMap<ConstraintDefault> internalGetValueMap() { return internalValueMap; } private static final com.google.protobuf.Internal.EnumLiteMap<ConstraintDefault> internalValueMap = new com.google.protobuf.Internal.EnumLiteMap<ConstraintDefault>() { public ConstraintDefault findValueByNumber(int number) { return ConstraintDefault.forNumber(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { if (this == UNRECOGNIZED) { throw new java.lang.IllegalStateException( "Can't get the descriptor of an unrecognized enum value."); } return getDescriptor().getValues().get(ordinal()); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { return getDescriptor(); } public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { return com.google.cloud.orgpolicy.v2.Constraint.getDescriptor().getEnumTypes().get(0); } private static final ConstraintDefault[] VALUES = values(); public static ConstraintDefault valueOf( com.google.protobuf.Descriptors.EnumValueDescriptor desc) { if (desc.getType() != getDescriptor()) { throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); } if (desc.getIndex() == -1) { return UNRECOGNIZED; } return VALUES[desc.getIndex()]; } private final int value; private ConstraintDefault(int value) { this.value = value; } // @@protoc_insertion_point(enum_scope:google.cloud.orgpolicy.v2.Constraint.ConstraintDefault) } public interface ListConstraintOrBuilder extends // @@protoc_insertion_point(interface_extends:google.cloud.orgpolicy.v2.Constraint.ListConstraint) com.google.protobuf.MessageOrBuilder { /** * * * <pre> * Indicates whether values grouped into categories can be used in * `Policy.allowed_values` and `Policy.denied_values`. For example, * `"in:Python"` would match any value in the 'Python' group. * </pre> * * <code>bool supports_in = 1;</code> * * @return The supportsIn. */ boolean getSupportsIn(); /** * * * <pre> * Indicates whether subtrees of Cloud Resource Manager resource hierarchy * can be used in `Policy.allowed_values` and `Policy.denied_values`. For * example, `"under:folders/123"` would match any resource under the * 'folders/123' folder. * </pre> * * <code>bool supports_under = 2;</code> * * @return The supportsUnder. */ boolean getSupportsUnder(); } /** * * * <pre> * A `Constraint` that allows or disallows a list of string values, which are * configured by an Organization's policy administrator with a `Policy`. * </pre> * * Protobuf type {@code google.cloud.orgpolicy.v2.Constraint.ListConstraint} */ public static final class ListConstraint extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.orgpolicy.v2.Constraint.ListConstraint) ListConstraintOrBuilder { private static final long serialVersionUID = 0L; // Use ListConstraint.newBuilder() to construct. private ListConstraint(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private ListConstraint() {} @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new ListConstraint(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private ListConstraint( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { supportsIn_ = input.readBool(); break; } case 16: { supportsUnder_ = input.readBool(); break; } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_ListConstraint_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_ListConstraint_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.class, com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.Builder.class); } public static final int SUPPORTS_IN_FIELD_NUMBER = 1; private boolean supportsIn_; /** * * * <pre> * Indicates whether values grouped into categories can be used in * `Policy.allowed_values` and `Policy.denied_values`. For example, * `"in:Python"` would match any value in the 'Python' group. * </pre> * * <code>bool supports_in = 1;</code> * * @return The supportsIn. */ @java.lang.Override public boolean getSupportsIn() { return supportsIn_; } public static final int SUPPORTS_UNDER_FIELD_NUMBER = 2; private boolean supportsUnder_; /** * * * <pre> * Indicates whether subtrees of Cloud Resource Manager resource hierarchy * can be used in `Policy.allowed_values` and `Policy.denied_values`. For * example, `"under:folders/123"` would match any resource under the * 'folders/123' folder. * </pre> * * <code>bool supports_under = 2;</code> * * @return The supportsUnder. */ @java.lang.Override public boolean getSupportsUnder() { return supportsUnder_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (supportsIn_ != false) { output.writeBool(1, supportsIn_); } if (supportsUnder_ != false) { output.writeBool(2, supportsUnder_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (supportsIn_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, supportsIn_); } if (supportsUnder_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(2, supportsUnder_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.orgpolicy.v2.Constraint.ListConstraint)) { return super.equals(obj); } com.google.cloud.orgpolicy.v2.Constraint.ListConstraint other = (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) obj; if (getSupportsIn() != other.getSupportsIn()) return false; if (getSupportsUnder() != other.getSupportsUnder()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + SUPPORTS_IN_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSupportsIn()); hash = (37 * hash) + SUPPORTS_UNDER_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getSupportsUnder()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder( com.google.cloud.orgpolicy.v2.Constraint.ListConstraint prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * * <pre> * A `Constraint` that allows or disallows a list of string values, which are * configured by an Organization's policy administrator with a `Policy`. * </pre> * * Protobuf type {@code google.cloud.orgpolicy.v2.Constraint.ListConstraint} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.cloud.orgpolicy.v2.Constraint.ListConstraint) com.google.cloud.orgpolicy.v2.Constraint.ListConstraintOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_ListConstraint_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_ListConstraint_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.class, com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.Builder.class); } // Construct using com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } @java.lang.Override public Builder clear() { super.clear(); supportsIn_ = false; supportsUnder_ = false; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_ListConstraint_descriptor; } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ListConstraint getDefaultInstanceForType() { return com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.getDefaultInstance(); } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ListConstraint build() { com.google.cloud.orgpolicy.v2.Constraint.ListConstraint result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ListConstraint buildPartial() { com.google.cloud.orgpolicy.v2.Constraint.ListConstraint result = new com.google.cloud.orgpolicy.v2.Constraint.ListConstraint(this); result.supportsIn_ = supportsIn_; result.supportsUnder_ = supportsUnder_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) { return mergeFrom((com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.orgpolicy.v2.Constraint.ListConstraint other) { if (other == com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.getDefaultInstance()) return this; if (other.getSupportsIn() != false) { setSupportsIn(other.getSupportsIn()); } if (other.getSupportsUnder() != false) { setSupportsUnder(other.getSupportsUnder()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.cloud.orgpolicy.v2.Constraint.ListConstraint parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private boolean supportsIn_; /** * * * <pre> * Indicates whether values grouped into categories can be used in * `Policy.allowed_values` and `Policy.denied_values`. For example, * `"in:Python"` would match any value in the 'Python' group. * </pre> * * <code>bool supports_in = 1;</code> * * @return The supportsIn. */ @java.lang.Override public boolean getSupportsIn() { return supportsIn_; } /** * * * <pre> * Indicates whether values grouped into categories can be used in * `Policy.allowed_values` and `Policy.denied_values`. For example, * `"in:Python"` would match any value in the 'Python' group. * </pre> * * <code>bool supports_in = 1;</code> * * @param value The supportsIn to set. * @return This builder for chaining. */ public Builder setSupportsIn(boolean value) { supportsIn_ = value; onChanged(); return this; } /** * * * <pre> * Indicates whether values grouped into categories can be used in * `Policy.allowed_values` and `Policy.denied_values`. For example, * `"in:Python"` would match any value in the 'Python' group. * </pre> * * <code>bool supports_in = 1;</code> * * @return This builder for chaining. */ public Builder clearSupportsIn() { supportsIn_ = false; onChanged(); return this; } private boolean supportsUnder_; /** * * * <pre> * Indicates whether subtrees of Cloud Resource Manager resource hierarchy * can be used in `Policy.allowed_values` and `Policy.denied_values`. For * example, `"under:folders/123"` would match any resource under the * 'folders/123' folder. * </pre> * * <code>bool supports_under = 2;</code> * * @return The supportsUnder. */ @java.lang.Override public boolean getSupportsUnder() { return supportsUnder_; } /** * * * <pre> * Indicates whether subtrees of Cloud Resource Manager resource hierarchy * can be used in `Policy.allowed_values` and `Policy.denied_values`. For * example, `"under:folders/123"` would match any resource under the * 'folders/123' folder. * </pre> * * <code>bool supports_under = 2;</code> * * @param value The supportsUnder to set. * @return This builder for chaining. */ public Builder setSupportsUnder(boolean value) { supportsUnder_ = value; onChanged(); return this; } /** * * * <pre> * Indicates whether subtrees of Cloud Resource Manager resource hierarchy * can be used in `Policy.allowed_values` and `Policy.denied_values`. For * example, `"under:folders/123"` would match any resource under the * 'folders/123' folder. * </pre> * * <code>bool supports_under = 2;</code> * * @return This builder for chaining. */ public Builder clearSupportsUnder() { supportsUnder_ = false; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.orgpolicy.v2.Constraint.ListConstraint) } // @@protoc_insertion_point(class_scope:google.cloud.orgpolicy.v2.Constraint.ListConstraint) private static final com.google.cloud.orgpolicy.v2.Constraint.ListConstraint DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.orgpolicy.v2.Constraint.ListConstraint(); } public static com.google.cloud.orgpolicy.v2.Constraint.ListConstraint getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<ListConstraint> PARSER = new com.google.protobuf.AbstractParser<ListConstraint>() { @java.lang.Override public ListConstraint parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new ListConstraint(input, extensionRegistry); } }; public static com.google.protobuf.Parser<ListConstraint> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<ListConstraint> getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ListConstraint getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface BooleanConstraintOrBuilder extends // @@protoc_insertion_point(interface_extends:google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) com.google.protobuf.MessageOrBuilder {} /** * * * <pre> * A `Constraint` that is either enforced or not. * For example a constraint `constraints/compute.disableSerialPortAccess`. * If it is enforced on a VM instance, serial port connections will not be * opened to that instance. * </pre> * * Protobuf type {@code google.cloud.orgpolicy.v2.Constraint.BooleanConstraint} */ public static final class BooleanConstraint extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) BooleanConstraintOrBuilder { private static final long serialVersionUID = 0L; // Use BooleanConstraint.newBuilder() to construct. private BooleanConstraint(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private BooleanConstraint() {} @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new BooleanConstraint(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private BooleanConstraint( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_BooleanConstraint_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_BooleanConstraint_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.class, com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.Builder.class); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint)) { return super.equals(obj); } com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint other = (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) obj; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder( com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * * <pre> * A `Constraint` that is either enforced or not. * For example a constraint `constraints/compute.disableSerialPortAccess`. * If it is enforced on a VM instance, serial port connections will not be * opened to that instance. * </pre> * * Protobuf type {@code google.cloud.orgpolicy.v2.Constraint.BooleanConstraint} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraintOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_BooleanConstraint_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_BooleanConstraint_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.class, com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.Builder.class); } // Construct using com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } @java.lang.Override public Builder clear() { super.clear(); return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_BooleanConstraint_descriptor; } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint getDefaultInstanceForType() { return com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.getDefaultInstance(); } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint build() { com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint buildPartial() { com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint result = new com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint(this); onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) { return mergeFrom((com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint other) { if (other == com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.getDefaultInstance()) return this; this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) } // @@protoc_insertion_point(class_scope:google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) private static final com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint(); } public static com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<BooleanConstraint> PARSER = new com.google.protobuf.AbstractParser<BooleanConstraint>() { @java.lang.Override public BooleanConstraint parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new BooleanConstraint(input, extensionRegistry); } }; public static com.google.protobuf.Parser<BooleanConstraint> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<BooleanConstraint> getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private int constraintTypeCase_ = 0; private java.lang.Object constraintType_; public enum ConstraintTypeCase implements com.google.protobuf.Internal.EnumLite, com.google.protobuf.AbstractMessage.InternalOneOfEnum { LIST_CONSTRAINT(5), BOOLEAN_CONSTRAINT(6), CONSTRAINTTYPE_NOT_SET(0); private final int value; private ConstraintTypeCase(int value) { this.value = value; } /** * @param value The number of the enum to look for. * @return The enum associated with the given number. * @deprecated Use {@link #forNumber(int)} instead. */ @java.lang.Deprecated public static ConstraintTypeCase valueOf(int value) { return forNumber(value); } public static ConstraintTypeCase forNumber(int value) { switch (value) { case 5: return LIST_CONSTRAINT; case 6: return BOOLEAN_CONSTRAINT; case 0: return CONSTRAINTTYPE_NOT_SET; default: return null; } } public int getNumber() { return this.value; } }; public ConstraintTypeCase getConstraintTypeCase() { return ConstraintTypeCase.forNumber(constraintTypeCase_); } public static final int NAME_FIELD_NUMBER = 1; private volatile java.lang.Object name_; /** * * * <pre> * Immutable. The resource name of the Constraint. Must be in one of * the following forms: * * `projects/{project_number}/constraints/{constraint_name}` * * `folders/{folder_id}/constraints/{constraint_name}` * * `organizations/{organization_id}/constraints/{constraint_name}` * For example, "/projects/123/constraints/compute.disableSerialPortAccess". * </pre> * * <code>string name = 1 [(.google.api.field_behavior) = IMMUTABLE];</code> * * @return The name. */ @java.lang.Override public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); name_ = s; return s; } } /** * * * <pre> * Immutable. The resource name of the Constraint. Must be in one of * the following forms: * * `projects/{project_number}/constraints/{constraint_name}` * * `folders/{folder_id}/constraints/{constraint_name}` * * `organizations/{organization_id}/constraints/{constraint_name}` * For example, "/projects/123/constraints/compute.disableSerialPortAccess". * </pre> * * <code>string name = 1 [(.google.api.field_behavior) = IMMUTABLE];</code> * * @return The bytes for name. */ @java.lang.Override public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int DISPLAY_NAME_FIELD_NUMBER = 2; private volatile java.lang.Object displayName_; /** * * * <pre> * The human readable name. * Mutable. * </pre> * * <code>string display_name = 2;</code> * * @return The displayName. */ @java.lang.Override public java.lang.String getDisplayName() { java.lang.Object ref = displayName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); displayName_ = s; return s; } } /** * * * <pre> * The human readable name. * Mutable. * </pre> * * <code>string display_name = 2;</code> * * @return The bytes for displayName. */ @java.lang.Override public com.google.protobuf.ByteString getDisplayNameBytes() { java.lang.Object ref = displayName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); displayName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int DESCRIPTION_FIELD_NUMBER = 3; private volatile java.lang.Object description_; /** * * * <pre> * Detailed description of what this `Constraint` controls as well as how and * where it is enforced. * Mutable. * </pre> * * <code>string description = 3;</code> * * @return The description. */ @java.lang.Override public java.lang.String getDescription() { java.lang.Object ref = description_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); description_ = s; return s; } } /** * * * <pre> * Detailed description of what this `Constraint` controls as well as how and * where it is enforced. * Mutable. * </pre> * * <code>string description = 3;</code> * * @return The bytes for description. */ @java.lang.Override public com.google.protobuf.ByteString getDescriptionBytes() { java.lang.Object ref = description_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); description_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int CONSTRAINT_DEFAULT_FIELD_NUMBER = 4; private int constraintDefault_; /** * * * <pre> * The evaluation behavior of this constraint in the absence of 'Policy'. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault constraint_default = 4;</code> * * @return The enum numeric value on the wire for constraintDefault. */ @java.lang.Override public int getConstraintDefaultValue() { return constraintDefault_; } /** * * * <pre> * The evaluation behavior of this constraint in the absence of 'Policy'. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault constraint_default = 4;</code> * * @return The constraintDefault. */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault getConstraintDefault() { @SuppressWarnings("deprecation") com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault result = com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault.valueOf(constraintDefault_); return result == null ? com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault.UNRECOGNIZED : result; } public static final int LIST_CONSTRAINT_FIELD_NUMBER = 5; /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> * * @return Whether the listConstraint field is set. */ @java.lang.Override public boolean hasListConstraint() { return constraintTypeCase_ == 5; } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> * * @return The listConstraint. */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ListConstraint getListConstraint() { if (constraintTypeCase_ == 5) { return (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_; } return com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.getDefaultInstance(); } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ListConstraintOrBuilder getListConstraintOrBuilder() { if (constraintTypeCase_ == 5) { return (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_; } return com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.getDefaultInstance(); } public static final int BOOLEAN_CONSTRAINT_FIELD_NUMBER = 6; /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> * * @return Whether the booleanConstraint field is set. */ @java.lang.Override public boolean hasBooleanConstraint() { return constraintTypeCase_ == 6; } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> * * @return The booleanConstraint. */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint getBooleanConstraint() { if (constraintTypeCase_ == 6) { return (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_; } return com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.getDefaultInstance(); } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraintOrBuilder getBooleanConstraintOrBuilder() { if (constraintTypeCase_ == 6) { return (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_; } return com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.getDefaultInstance(); } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(displayName_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, displayName_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(description_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 3, description_); } if (constraintDefault_ != com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault.CONSTRAINT_DEFAULT_UNSPECIFIED .getNumber()) { output.writeEnum(4, constraintDefault_); } if (constraintTypeCase_ == 5) { output.writeMessage( 5, (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_); } if (constraintTypeCase_ == 6) { output.writeMessage( 6, (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(name_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(displayName_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, displayName_); } if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(description_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, description_); } if (constraintDefault_ != com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault.CONSTRAINT_DEFAULT_UNSPECIFIED .getNumber()) { size += com.google.protobuf.CodedOutputStream.computeEnumSize(4, constraintDefault_); } if (constraintTypeCase_ == 5) { size += com.google.protobuf.CodedOutputStream.computeMessageSize( 5, (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_); } if (constraintTypeCase_ == 6) { size += com.google.protobuf.CodedOutputStream.computeMessageSize( 6, (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.orgpolicy.v2.Constraint)) { return super.equals(obj); } com.google.cloud.orgpolicy.v2.Constraint other = (com.google.cloud.orgpolicy.v2.Constraint) obj; if (!getName().equals(other.getName())) return false; if (!getDisplayName().equals(other.getDisplayName())) return false; if (!getDescription().equals(other.getDescription())) return false; if (constraintDefault_ != other.constraintDefault_) return false; if (!getConstraintTypeCase().equals(other.getConstraintTypeCase())) return false; switch (constraintTypeCase_) { case 5: if (!getListConstraint().equals(other.getListConstraint())) return false; break; case 6: if (!getBooleanConstraint().equals(other.getBooleanConstraint())) return false; break; case 0: default: } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); hash = (37 * hash) + DISPLAY_NAME_FIELD_NUMBER; hash = (53 * hash) + getDisplayName().hashCode(); hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; hash = (53 * hash) + getDescription().hashCode(); hash = (37 * hash) + CONSTRAINT_DEFAULT_FIELD_NUMBER; hash = (53 * hash) + constraintDefault_; switch (constraintTypeCase_) { case 5: hash = (37 * hash) + LIST_CONSTRAINT_FIELD_NUMBER; hash = (53 * hash) + getListConstraint().hashCode(); break; case 6: hash = (37 * hash) + BOOLEAN_CONSTRAINT_FIELD_NUMBER; hash = (53 * hash) + getBooleanConstraint().hashCode(); break; case 0: default: } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom(java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom(java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.orgpolicy.v2.Constraint parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.orgpolicy.v2.Constraint parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(com.google.cloud.orgpolicy.v2.Constraint prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * * <pre> * A `constraint` describes a way to restrict resource's configuration. For * example, you could enforce a constraint that controls which cloud services * can be activated across an organization, or whether a Compute Engine instance * can have serial port connections established. `Constraints` can be configured * by the organization's policy administrator to fit the needs of the * organization by setting a `policy` that includes `constraints` at different * locations in the organization's resource hierarchy. Policies are inherited * down the resource hierarchy from higher levels, but can also be overridden. * For details about the inheritance rules please read about * [`policies`][google.cloud.OrgPolicy.v2.Policy]. * `Constraints` have a default behavior determined by the `constraint_default` * field, which is the enforcement behavior that is used in the absence of a * `policy` being defined or inherited for the resource in question. * </pre> * * Protobuf type {@code google.cloud.orgpolicy.v2.Constraint} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.cloud.orgpolicy.v2.Constraint) com.google.cloud.orgpolicy.v2.ConstraintOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.orgpolicy.v2.Constraint.class, com.google.cloud.orgpolicy.v2.Constraint.Builder.class); } // Construct using com.google.cloud.orgpolicy.v2.Constraint.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } @java.lang.Override public Builder clear() { super.clear(); name_ = ""; displayName_ = ""; description_ = ""; constraintDefault_ = 0; constraintTypeCase_ = 0; constraintType_ = null; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.orgpolicy.v2.ConstraintProto .internal_static_google_cloud_orgpolicy_v2_Constraint_descriptor; } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint getDefaultInstanceForType() { return com.google.cloud.orgpolicy.v2.Constraint.getDefaultInstance(); } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint build() { com.google.cloud.orgpolicy.v2.Constraint result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint buildPartial() { com.google.cloud.orgpolicy.v2.Constraint result = new com.google.cloud.orgpolicy.v2.Constraint(this); result.name_ = name_; result.displayName_ = displayName_; result.description_ = description_; result.constraintDefault_ = constraintDefault_; if (constraintTypeCase_ == 5) { if (listConstraintBuilder_ == null) { result.constraintType_ = constraintType_; } else { result.constraintType_ = listConstraintBuilder_.build(); } } if (constraintTypeCase_ == 6) { if (booleanConstraintBuilder_ == null) { result.constraintType_ = constraintType_; } else { result.constraintType_ = booleanConstraintBuilder_.build(); } } result.constraintTypeCase_ = constraintTypeCase_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.orgpolicy.v2.Constraint) { return mergeFrom((com.google.cloud.orgpolicy.v2.Constraint) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.orgpolicy.v2.Constraint other) { if (other == com.google.cloud.orgpolicy.v2.Constraint.getDefaultInstance()) return this; if (!other.getName().isEmpty()) { name_ = other.name_; onChanged(); } if (!other.getDisplayName().isEmpty()) { displayName_ = other.displayName_; onChanged(); } if (!other.getDescription().isEmpty()) { description_ = other.description_; onChanged(); } if (other.constraintDefault_ != 0) { setConstraintDefaultValue(other.getConstraintDefaultValue()); } switch (other.getConstraintTypeCase()) { case LIST_CONSTRAINT: { mergeListConstraint(other.getListConstraint()); break; } case BOOLEAN_CONSTRAINT: { mergeBooleanConstraint(other.getBooleanConstraint()); break; } case CONSTRAINTTYPE_NOT_SET: { break; } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.cloud.orgpolicy.v2.Constraint parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.cloud.orgpolicy.v2.Constraint) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int constraintTypeCase_ = 0; private java.lang.Object constraintType_; public ConstraintTypeCase getConstraintTypeCase() { return ConstraintTypeCase.forNumber(constraintTypeCase_); } public Builder clearConstraintType() { constraintTypeCase_ = 0; constraintType_ = null; onChanged(); return this; } private java.lang.Object name_ = ""; /** * * * <pre> * Immutable. The resource name of the Constraint. Must be in one of * the following forms: * * `projects/{project_number}/constraints/{constraint_name}` * * `folders/{folder_id}/constraints/{constraint_name}` * * `organizations/{organization_id}/constraints/{constraint_name}` * For example, "/projects/123/constraints/compute.disableSerialPortAccess". * </pre> * * <code>string name = 1 [(.google.api.field_behavior) = IMMUTABLE];</code> * * @return The name. */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); name_ = s; return s; } else { return (java.lang.String) ref; } } /** * * * <pre> * Immutable. The resource name of the Constraint. Must be in one of * the following forms: * * `projects/{project_number}/constraints/{constraint_name}` * * `folders/{folder_id}/constraints/{constraint_name}` * * `organizations/{organization_id}/constraints/{constraint_name}` * For example, "/projects/123/constraints/compute.disableSerialPortAccess". * </pre> * * <code>string name = 1 [(.google.api.field_behavior) = IMMUTABLE];</code> * * @return The bytes for name. */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * * <pre> * Immutable. The resource name of the Constraint. Must be in one of * the following forms: * * `projects/{project_number}/constraints/{constraint_name}` * * `folders/{folder_id}/constraints/{constraint_name}` * * `organizations/{organization_id}/constraints/{constraint_name}` * For example, "/projects/123/constraints/compute.disableSerialPortAccess". * </pre> * * <code>string name = 1 [(.google.api.field_behavior) = IMMUTABLE];</code> * * @param value The name to set. * @return This builder for chaining. */ public Builder setName(java.lang.String value) { if (value == null) { throw new NullPointerException(); } name_ = value; onChanged(); return this; } /** * * * <pre> * Immutable. The resource name of the Constraint. Must be in one of * the following forms: * * `projects/{project_number}/constraints/{constraint_name}` * * `folders/{folder_id}/constraints/{constraint_name}` * * `organizations/{organization_id}/constraints/{constraint_name}` * For example, "/projects/123/constraints/compute.disableSerialPortAccess". * </pre> * * <code>string name = 1 [(.google.api.field_behavior) = IMMUTABLE];</code> * * @return This builder for chaining. */ public Builder clearName() { name_ = getDefaultInstance().getName(); onChanged(); return this; } /** * * * <pre> * Immutable. The resource name of the Constraint. Must be in one of * the following forms: * * `projects/{project_number}/constraints/{constraint_name}` * * `folders/{folder_id}/constraints/{constraint_name}` * * `organizations/{organization_id}/constraints/{constraint_name}` * For example, "/projects/123/constraints/compute.disableSerialPortAccess". * </pre> * * <code>string name = 1 [(.google.api.field_behavior) = IMMUTABLE];</code> * * @param value The bytes for name to set. * @return This builder for chaining. */ public Builder setNameBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); name_ = value; onChanged(); return this; } private java.lang.Object displayName_ = ""; /** * * * <pre> * The human readable name. * Mutable. * </pre> * * <code>string display_name = 2;</code> * * @return The displayName. */ public java.lang.String getDisplayName() { java.lang.Object ref = displayName_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); displayName_ = s; return s; } else { return (java.lang.String) ref; } } /** * * * <pre> * The human readable name. * Mutable. * </pre> * * <code>string display_name = 2;</code> * * @return The bytes for displayName. */ public com.google.protobuf.ByteString getDisplayNameBytes() { java.lang.Object ref = displayName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); displayName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * * <pre> * The human readable name. * Mutable. * </pre> * * <code>string display_name = 2;</code> * * @param value The displayName to set. * @return This builder for chaining. */ public Builder setDisplayName(java.lang.String value) { if (value == null) { throw new NullPointerException(); } displayName_ = value; onChanged(); return this; } /** * * * <pre> * The human readable name. * Mutable. * </pre> * * <code>string display_name = 2;</code> * * @return This builder for chaining. */ public Builder clearDisplayName() { displayName_ = getDefaultInstance().getDisplayName(); onChanged(); return this; } /** * * * <pre> * The human readable name. * Mutable. * </pre> * * <code>string display_name = 2;</code> * * @param value The bytes for displayName to set. * @return This builder for chaining. */ public Builder setDisplayNameBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); displayName_ = value; onChanged(); return this; } private java.lang.Object description_ = ""; /** * * * <pre> * Detailed description of what this `Constraint` controls as well as how and * where it is enforced. * Mutable. * </pre> * * <code>string description = 3;</code> * * @return The description. */ public java.lang.String getDescription() { java.lang.Object ref = description_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); description_ = s; return s; } else { return (java.lang.String) ref; } } /** * * * <pre> * Detailed description of what this `Constraint` controls as well as how and * where it is enforced. * Mutable. * </pre> * * <code>string description = 3;</code> * * @return The bytes for description. */ public com.google.protobuf.ByteString getDescriptionBytes() { java.lang.Object ref = description_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); description_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * * * <pre> * Detailed description of what this `Constraint` controls as well as how and * where it is enforced. * Mutable. * </pre> * * <code>string description = 3;</code> * * @param value The description to set. * @return This builder for chaining. */ public Builder setDescription(java.lang.String value) { if (value == null) { throw new NullPointerException(); } description_ = value; onChanged(); return this; } /** * * * <pre> * Detailed description of what this `Constraint` controls as well as how and * where it is enforced. * Mutable. * </pre> * * <code>string description = 3;</code> * * @return This builder for chaining. */ public Builder clearDescription() { description_ = getDefaultInstance().getDescription(); onChanged(); return this; } /** * * * <pre> * Detailed description of what this `Constraint` controls as well as how and * where it is enforced. * Mutable. * </pre> * * <code>string description = 3;</code> * * @param value The bytes for description to set. * @return This builder for chaining. */ public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); description_ = value; onChanged(); return this; } private int constraintDefault_ = 0; /** * * * <pre> * The evaluation behavior of this constraint in the absence of 'Policy'. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault constraint_default = 4;</code> * * @return The enum numeric value on the wire for constraintDefault. */ @java.lang.Override public int getConstraintDefaultValue() { return constraintDefault_; } /** * * * <pre> * The evaluation behavior of this constraint in the absence of 'Policy'. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault constraint_default = 4;</code> * * @param value The enum numeric value on the wire for constraintDefault to set. * @return This builder for chaining. */ public Builder setConstraintDefaultValue(int value) { constraintDefault_ = value; onChanged(); return this; } /** * * * <pre> * The evaluation behavior of this constraint in the absence of 'Policy'. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault constraint_default = 4;</code> * * @return The constraintDefault. */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault getConstraintDefault() { @SuppressWarnings("deprecation") com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault result = com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault.valueOf(constraintDefault_); return result == null ? com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault.UNRECOGNIZED : result; } /** * * * <pre> * The evaluation behavior of this constraint in the absence of 'Policy'. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault constraint_default = 4;</code> * * @param value The constraintDefault to set. * @return This builder for chaining. */ public Builder setConstraintDefault( com.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault value) { if (value == null) { throw new NullPointerException(); } constraintDefault_ = value.getNumber(); onChanged(); return this; } /** * * * <pre> * The evaluation behavior of this constraint in the absence of 'Policy'. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ConstraintDefault constraint_default = 4;</code> * * @return This builder for chaining. */ public Builder clearConstraintDefault() { constraintDefault_ = 0; onChanged(); return this; } private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.orgpolicy.v2.Constraint.ListConstraint, com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.Builder, com.google.cloud.orgpolicy.v2.Constraint.ListConstraintOrBuilder> listConstraintBuilder_; /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> * * @return Whether the listConstraint field is set. */ @java.lang.Override public boolean hasListConstraint() { return constraintTypeCase_ == 5; } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> * * @return The listConstraint. */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ListConstraint getListConstraint() { if (listConstraintBuilder_ == null) { if (constraintTypeCase_ == 5) { return (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_; } return com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.getDefaultInstance(); } else { if (constraintTypeCase_ == 5) { return listConstraintBuilder_.getMessage(); } return com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.getDefaultInstance(); } } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> */ public Builder setListConstraint( com.google.cloud.orgpolicy.v2.Constraint.ListConstraint value) { if (listConstraintBuilder_ == null) { if (value == null) { throw new NullPointerException(); } constraintType_ = value; onChanged(); } else { listConstraintBuilder_.setMessage(value); } constraintTypeCase_ = 5; return this; } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> */ public Builder setListConstraint( com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.Builder builderForValue) { if (listConstraintBuilder_ == null) { constraintType_ = builderForValue.build(); onChanged(); } else { listConstraintBuilder_.setMessage(builderForValue.build()); } constraintTypeCase_ = 5; return this; } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> */ public Builder mergeListConstraint( com.google.cloud.orgpolicy.v2.Constraint.ListConstraint value) { if (listConstraintBuilder_ == null) { if (constraintTypeCase_ == 5 && constraintType_ != com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.getDefaultInstance()) { constraintType_ = com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.newBuilder( (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_) .mergeFrom(value) .buildPartial(); } else { constraintType_ = value; } onChanged(); } else { if (constraintTypeCase_ == 5) { listConstraintBuilder_.mergeFrom(value); } listConstraintBuilder_.setMessage(value); } constraintTypeCase_ = 5; return this; } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> */ public Builder clearListConstraint() { if (listConstraintBuilder_ == null) { if (constraintTypeCase_ == 5) { constraintTypeCase_ = 0; constraintType_ = null; onChanged(); } } else { if (constraintTypeCase_ == 5) { constraintTypeCase_ = 0; constraintType_ = null; } listConstraintBuilder_.clear(); } return this; } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> */ public com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.Builder getListConstraintBuilder() { return getListConstraintFieldBuilder().getBuilder(); } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.ListConstraintOrBuilder getListConstraintOrBuilder() { if ((constraintTypeCase_ == 5) && (listConstraintBuilder_ != null)) { return listConstraintBuilder_.getMessageOrBuilder(); } else { if (constraintTypeCase_ == 5) { return (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_; } return com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.getDefaultInstance(); } } /** * * * <pre> * Defines this constraint as being a ListConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.ListConstraint list_constraint = 5;</code> */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.orgpolicy.v2.Constraint.ListConstraint, com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.Builder, com.google.cloud.orgpolicy.v2.Constraint.ListConstraintOrBuilder> getListConstraintFieldBuilder() { if (listConstraintBuilder_ == null) { if (!(constraintTypeCase_ == 5)) { constraintType_ = com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.getDefaultInstance(); } listConstraintBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.orgpolicy.v2.Constraint.ListConstraint, com.google.cloud.orgpolicy.v2.Constraint.ListConstraint.Builder, com.google.cloud.orgpolicy.v2.Constraint.ListConstraintOrBuilder>( (com.google.cloud.orgpolicy.v2.Constraint.ListConstraint) constraintType_, getParentForChildren(), isClean()); constraintType_ = null; } constraintTypeCase_ = 5; onChanged(); ; return listConstraintBuilder_; } private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint, com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.Builder, com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraintOrBuilder> booleanConstraintBuilder_; /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> * * @return Whether the booleanConstraint field is set. */ @java.lang.Override public boolean hasBooleanConstraint() { return constraintTypeCase_ == 6; } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> * * @return The booleanConstraint. */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint getBooleanConstraint() { if (booleanConstraintBuilder_ == null) { if (constraintTypeCase_ == 6) { return (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_; } return com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.getDefaultInstance(); } else { if (constraintTypeCase_ == 6) { return booleanConstraintBuilder_.getMessage(); } return com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.getDefaultInstance(); } } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> */ public Builder setBooleanConstraint( com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint value) { if (booleanConstraintBuilder_ == null) { if (value == null) { throw new NullPointerException(); } constraintType_ = value; onChanged(); } else { booleanConstraintBuilder_.setMessage(value); } constraintTypeCase_ = 6; return this; } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> */ public Builder setBooleanConstraint( com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.Builder builderForValue) { if (booleanConstraintBuilder_ == null) { constraintType_ = builderForValue.build(); onChanged(); } else { booleanConstraintBuilder_.setMessage(builderForValue.build()); } constraintTypeCase_ = 6; return this; } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> */ public Builder mergeBooleanConstraint( com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint value) { if (booleanConstraintBuilder_ == null) { if (constraintTypeCase_ == 6 && constraintType_ != com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint .getDefaultInstance()) { constraintType_ = com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.newBuilder( (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_) .mergeFrom(value) .buildPartial(); } else { constraintType_ = value; } onChanged(); } else { if (constraintTypeCase_ == 6) { booleanConstraintBuilder_.mergeFrom(value); } booleanConstraintBuilder_.setMessage(value); } constraintTypeCase_ = 6; return this; } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> */ public Builder clearBooleanConstraint() { if (booleanConstraintBuilder_ == null) { if (constraintTypeCase_ == 6) { constraintTypeCase_ = 0; constraintType_ = null; onChanged(); } } else { if (constraintTypeCase_ == 6) { constraintTypeCase_ = 0; constraintType_ = null; } booleanConstraintBuilder_.clear(); } return this; } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> */ public com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.Builder getBooleanConstraintBuilder() { return getBooleanConstraintFieldBuilder().getBuilder(); } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> */ @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraintOrBuilder getBooleanConstraintOrBuilder() { if ((constraintTypeCase_ == 6) && (booleanConstraintBuilder_ != null)) { return booleanConstraintBuilder_.getMessageOrBuilder(); } else { if (constraintTypeCase_ == 6) { return (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_; } return com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.getDefaultInstance(); } } /** * * * <pre> * Defines this constraint as being a BooleanConstraint. * </pre> * * <code>.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint boolean_constraint = 6;</code> */ private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint, com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.Builder, com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraintOrBuilder> getBooleanConstraintFieldBuilder() { if (booleanConstraintBuilder_ == null) { if (!(constraintTypeCase_ == 6)) { constraintType_ = com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.getDefaultInstance(); } booleanConstraintBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint, com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint.Builder, com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraintOrBuilder>( (com.google.cloud.orgpolicy.v2.Constraint.BooleanConstraint) constraintType_, getParentForChildren(), isClean()); constraintType_ = null; } constraintTypeCase_ = 6; onChanged(); ; return booleanConstraintBuilder_; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.orgpolicy.v2.Constraint) } // @@protoc_insertion_point(class_scope:google.cloud.orgpolicy.v2.Constraint) private static final com.google.cloud.orgpolicy.v2.Constraint DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.orgpolicy.v2.Constraint(); } public static com.google.cloud.orgpolicy.v2.Constraint getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<Constraint> PARSER = new com.google.protobuf.AbstractParser<Constraint>() { @java.lang.Override public Constraint parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new Constraint(input, extensionRegistry); } }; public static com.google.protobuf.Parser<Constraint> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<Constraint> getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.orgpolicy.v2.Constraint getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }
googleapis/java-orgpolicy
proto-google-cloud-orgpolicy-v2/src/main/java/com/google/cloud/orgpolicy/v2/Constraint.java
Java
apache-2.0
108,172
/* * Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.ec2.model.transform; import javax.xml.stream.events.XMLEvent; import javax.annotation.Generated; import com.amazonaws.services.ec2.model.*; import com.amazonaws.transform.Unmarshaller; import com.amazonaws.transform.StaxUnmarshallerContext; import com.amazonaws.transform.SimpleTypeStaxUnmarshallers.*; /** * IdFormat StAX Unmarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class IdFormatStaxUnmarshaller implements Unmarshaller<IdFormat, StaxUnmarshallerContext> { public IdFormat unmarshall(StaxUnmarshallerContext context) throws Exception { IdFormat idFormat = new IdFormat(); int originalDepth = context.getCurrentDepth(); int targetDepth = originalDepth + 1; if (context.isStartOfDocument()) targetDepth += 1; while (true) { XMLEvent xmlEvent = context.nextEvent(); if (xmlEvent.isEndDocument()) return idFormat; if (xmlEvent.isAttribute() || xmlEvent.isStartElement()) { if (context.testExpression("resource", targetDepth)) { idFormat.setResource(StringStaxUnmarshaller.getInstance().unmarshall(context)); continue; } if (context.testExpression("useLongIds", targetDepth)) { idFormat.setUseLongIds(BooleanStaxUnmarshaller.getInstance().unmarshall(context)); continue; } if (context.testExpression("deadline", targetDepth)) { idFormat.setDeadline(DateStaxUnmarshaller.getInstance().unmarshall(context)); continue; } } else if (xmlEvent.isEndElement()) { if (context.getCurrentDepth() < originalDepth) { return idFormat; } } } } private static IdFormatStaxUnmarshaller instance; public static IdFormatStaxUnmarshaller getInstance() { if (instance == null) instance = new IdFormatStaxUnmarshaller(); return instance; } }
dagnir/aws-sdk-java
aws-java-sdk-ec2/src/main/java/com/amazonaws/services/ec2/model/transform/IdFormatStaxUnmarshaller.java
Java
apache-2.0
2,733
// Copyright 2013 Michel Kraemer // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package $pkg; import java.util.Arrays; import java.util.Map; <% if (!noJsonObject) { %> import java.util.Collection; import de.undercouch.citeproc.helper.json.JsonBuilder; import de.undercouch.citeproc.helper.json.JsonObject; <% } %> /** * $description * @author Michel Kraemer */ public class $name <% if (!noJsonObject) { %>implements JsonObject<% } %> { <% for (p in requiredProps) { %>private final ${p.type} ${p.normalizedName}; <% } %> <% for (p in props) { %>private final ${p.type} ${p.normalizedName}; <% } %> public $name(<% if (requiredProps.size > 1) { for (p in requiredProps[0..-2]) { %>${p.type} ${p.normalizedName},<% } } %><% if (!requiredProps.empty) { %> ${toEllipse.call(requiredProps[-1].type)} ${requiredProps[-1].normalizedName} <% } %>) { <% for (p in requiredProps) { %>this.${p.normalizedName} = ${p.normalizedName}; <% } %> <% for (p in props) { %>this.${p.normalizedName} = <% if (p.defval) { %>${p.defval}<% } else { %>null<% } %>; <% } %> } <% if (!props.empty) { %> public $name(<% for (p in requiredProps) { %>${p.type} ${p.normalizedName},<% } %> ${props.collect({ p -> p.type + ' ' + p.normalizedName }).join(',')}) { <% for (p in requiredProps) { %>this.${p.normalizedName} = ${p.normalizedName}; <% } %> <% for (p in props) { %>this.${p.normalizedName} = ${p.normalizedName}; <% } %> } <% } %> <% for (p in requiredProps) { %>/** * @return the <% if (shortname) { %>${shortname}'s <% } %>${p.name} */ public ${p.type} ${toGetter.call(p.normalizedName)}() { return ${p.normalizedName}; } <% } %> <% for (p in props) { %>/** * @return the <% if (shortname) { %>${shortname}'s <% } %>${p.name} */ public ${p.type} ${toGetter.call(p.normalizedName)}() { return ${p.normalizedName}; } <% } %> <% if (!noJsonObject) { %> @Override public Object toJson(JsonBuilder builder) { <% for (p in requiredProps) { %>builder.add("${p.name}", ${p.normalizedName});<% } %> <% for (p in props) { %>if (${p.normalizedName} != null) { builder.add("${p.name}", ${p.normalizedName}); } <% } %> return builder.build(); } /** * Converts a JSON object to a $name object. <% if (!requiredProps.empty) { %>The JSON object must at least contain the following required properties: ${requiredProps.collect({ p -> '<code>' + p.name + '</code>' }).join(',')}<% } %> * @param obj the JSON object to convert * @return the converted $name object */ @SuppressWarnings("unchecked") public static $name fromJson(Map<String, Object> obj) { <% for (p in requiredProps) { %>${p.type} ${p.normalizedName};<% } %> <% def castTemplate = { type, v -> def castBefore if (type.equals('String')) { castBefore = '' } else if (type.equals('int') || type.equals('Integer')) { castBefore = 'toInt(' } else if (type.equals('boolean') || type.equals('Boolean')) { castBefore = 'toBool(' } else { castBefore = '(' + type + ')' } def castAfter if (type.equals('String')) { castAfter = '.toString()' } else if (type.equals('int') || type.equals('Integer') || type.equals('boolean') || type.equals('Boolean')) { castAfter = ')' } else { castAfter = '' } return castBefore + v + castAfter } def fromJsonTemplate = { type, v -> return "${type}.fromJson((Map<String, Object>)$v)" } def constructArrayTemplate = { p -> def r = '' r += """\ if (v instanceof Map) { v = ((Map<?, ?>)v).values(); } else if (!(v instanceof Collection)) { throw new IllegalArgumentException("`${p.name}' must be an array"); } Collection<?> cv = (Collection<?>)v; ${p.normalizedName} = """ if (p.arrayArrayType) { r += "new ${p.typeNoArrayNoArray}[cv.size()][];" } else { r += "new ${p.typeNoArray}[cv.size()];" } r += """\ int i = 0; for (Object vo : cv) {""" if (p.arrayArrayType) { r += """\ if (vo instanceof Map) { vo = ((Map<?, ?>)vo).values(); } else if (!(vo instanceof Collection)) { throw new IllegalArgumentException("`${p.name}' must be an array of arrays"); } Collection<?> icv = (Collection<?>)vo; ${p.normalizedName}[i] = new ${p.typeNoArrayNoArray}[icv.size()]; int j = 0; for (Object ivo : icv) {""" if (p.cslType) { r += "${p.normalizedName}[i][j] = " + fromJsonTemplate(p.typeNoArrayNoArray, "ivo") + ";" } else { r += "${p.normalizedName}[i][j] = " + castTemplate(p.typeNoArrayNoArray, "ivo") + ";" } r += "++j;" r += "}" } else { if (p.cslType) { r += """\ if (!(vo instanceof Map)) { throw new IllegalArgumentException("`${p.name}' must be an array of objects"); } ${p.normalizedName}[i] = """ + fromJsonTemplate(p.typeNoArray, "vo") + ";" } else { r += "${p.normalizedName}[i] = " + castTemplate(p.typeNoArray, "vo") + ";" } } r += "++i;" r += "}" return r } def propertyTemplate = { p, v -> def r = '' if (!p.required) { r += "${p.type} ${p.normalizedName};" } if (p.enumType) { r += "${p.normalizedName} = ${p.type}.fromString(${v}.toString());" } else if (p.cslType) { if (p.arrayType) { r += constructArrayTemplate(p) } else { r += "if (!($v instanceof Map)) {" r += "\tthrow new IllegalArgumentException(\"`${p.name}' must be an object\");" r += '}' r += "${p.normalizedName} = " + fromJsonTemplate(p.type, v) + ';' } } else { if (p.arrayType) { r += constructArrayTemplate(p) } else { r += "${p.normalizedName} = " + castTemplate(p.type, v) + ';' } } if (!p.required) { r += "builder.${p.normalizedName}(${p.normalizedName});" } return r } %> <% for (p in requiredProps) { %>{ Object v = obj.get("${p.name}"); if (v == null) { throw new IllegalArgumentException("Missing property `${p.name}'"); } <% out << propertyTemplate(p, 'v') %> }<% } %> ${name}Builder builder = new ${name}Builder(${requiredProps.collect({ p -> p.normalizedName }).join(',')}); <% for (p in props) { %>{ Object v = obj.get("${p.name}"); if (v != null) { <% out << propertyTemplate(p, 'v') %> }<% if (p.defval) { %> else { builder.${p.normalizedName}(${p.defval}); } <% } %> }<% } %> ${additionalFromJsonCode.join('\n')} return builder.build(); } private static int toInt(Object o) { if (o instanceof CharSequence) { return Integer.parseInt(o.toString()); } return ((Number)o).intValue(); } private static boolean toBool(Object o) { if (o instanceof String) { return Boolean.parseBoolean((String)o); } else if (o instanceof Number) { return ((Number)o).intValue() != 0; } return (Boolean)o; } <% } %> ${additionalMethods.join('\n')} @Override public int hashCode() { int result = 1; <% for (p in props) { %>result = 31 * result + <% if (p.arrayArrayType) { %>Arrays.deepHashCode(${p.normalizedName});<% } else if (p.arrayType) { %>Arrays.hashCode(${p.normalizedName});<% } else { %>((${p.normalizedName} == null) ? 0 : ${p.normalizedName}.hashCode());<% } %> <% } %> return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (!(obj instanceof $name)) return false; $name other = ($name) obj; <% for (p in props) { %> <% if (p.arrayArrayType) { %> if (!Arrays.deepEquals(${p.normalizedName}, other.${p.normalizedName})) return false; <% } else if (p.arrayType) { %> if (!Arrays.equals(${p.normalizedName}, other.${p.normalizedName})) return false; <% } else { %> if (${p.normalizedName} == null) { if (other.${p.normalizedName} != null) return false; } else if (!${p.normalizedName}.equals(other.${p.normalizedName})) return false; <% } %> <% } %> return true; } }
Klortho/citeproc-java
citeproc-java/templates/Object.java
Java
apache-2.0
8,535
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.docdb.model; import java.io.Serializable; import javax.annotation.Generated; /** * <p> * Represents the output of <a>DescribeOrderableDBInstanceOptions</a>. * </p> * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/docdb-2014-10-31/DescribeOrderableDBInstanceOptions" * target="_top">AWS API Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class DescribeOrderableDBInstanceOptionsResult extends com.amazonaws.AmazonWebServiceResult<com.amazonaws.ResponseMetadata> implements Serializable, Cloneable { /** * <p> * The options that are available for a particular orderable instance. * </p> */ private java.util.List<OrderableDBInstanceOption> orderableDBInstanceOptions; /** * <p> * An optional pagination token provided by a previous request. If this parameter is specified, the response * includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. * </p> */ private String marker; /** * <p> * The options that are available for a particular orderable instance. * </p> * * @return The options that are available for a particular orderable instance. */ public java.util.List<OrderableDBInstanceOption> getOrderableDBInstanceOptions() { return orderableDBInstanceOptions; } /** * <p> * The options that are available for a particular orderable instance. * </p> * * @param orderableDBInstanceOptions * The options that are available for a particular orderable instance. */ public void setOrderableDBInstanceOptions(java.util.Collection<OrderableDBInstanceOption> orderableDBInstanceOptions) { if (orderableDBInstanceOptions == null) { this.orderableDBInstanceOptions = null; return; } this.orderableDBInstanceOptions = new java.util.ArrayList<OrderableDBInstanceOption>(orderableDBInstanceOptions); } /** * <p> * The options that are available for a particular orderable instance. * </p> * <p> * <b>NOTE:</b> This method appends the values to the existing list (if any). Use * {@link #setOrderableDBInstanceOptions(java.util.Collection)} or * {@link #withOrderableDBInstanceOptions(java.util.Collection)} if you want to override the existing values. * </p> * * @param orderableDBInstanceOptions * The options that are available for a particular orderable instance. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeOrderableDBInstanceOptionsResult withOrderableDBInstanceOptions(OrderableDBInstanceOption... orderableDBInstanceOptions) { if (this.orderableDBInstanceOptions == null) { setOrderableDBInstanceOptions(new java.util.ArrayList<OrderableDBInstanceOption>(orderableDBInstanceOptions.length)); } for (OrderableDBInstanceOption ele : orderableDBInstanceOptions) { this.orderableDBInstanceOptions.add(ele); } return this; } /** * <p> * The options that are available for a particular orderable instance. * </p> * * @param orderableDBInstanceOptions * The options that are available for a particular orderable instance. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeOrderableDBInstanceOptionsResult withOrderableDBInstanceOptions(java.util.Collection<OrderableDBInstanceOption> orderableDBInstanceOptions) { setOrderableDBInstanceOptions(orderableDBInstanceOptions); return this; } /** * <p> * An optional pagination token provided by a previous request. If this parameter is specified, the response * includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. * </p> * * @param marker * An optional pagination token provided by a previous request. If this parameter is specified, the response * includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. */ public void setMarker(String marker) { this.marker = marker; } /** * <p> * An optional pagination token provided by a previous request. If this parameter is specified, the response * includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. * </p> * * @return An optional pagination token provided by a previous request. If this parameter is specified, the response * includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. */ public String getMarker() { return this.marker; } /** * <p> * An optional pagination token provided by a previous request. If this parameter is specified, the response * includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. * </p> * * @param marker * An optional pagination token provided by a previous request. If this parameter is specified, the response * includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. * @return Returns a reference to this object so that method calls can be chained together. */ public DescribeOrderableDBInstanceOptionsResult withMarker(String marker) { setMarker(marker); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getOrderableDBInstanceOptions() != null) sb.append("OrderableDBInstanceOptions: ").append(getOrderableDBInstanceOptions()).append(","); if (getMarker() != null) sb.append("Marker: ").append(getMarker()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof DescribeOrderableDBInstanceOptionsResult == false) return false; DescribeOrderableDBInstanceOptionsResult other = (DescribeOrderableDBInstanceOptionsResult) obj; if (other.getOrderableDBInstanceOptions() == null ^ this.getOrderableDBInstanceOptions() == null) return false; if (other.getOrderableDBInstanceOptions() != null && other.getOrderableDBInstanceOptions().equals(this.getOrderableDBInstanceOptions()) == false) return false; if (other.getMarker() == null ^ this.getMarker() == null) return false; if (other.getMarker() != null && other.getMarker().equals(this.getMarker()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getOrderableDBInstanceOptions() == null) ? 0 : getOrderableDBInstanceOptions().hashCode()); hashCode = prime * hashCode + ((getMarker() == null) ? 0 : getMarker().hashCode()); return hashCode; } @Override public DescribeOrderableDBInstanceOptionsResult clone() { try { return (DescribeOrderableDBInstanceOptionsResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } }
aws/aws-sdk-java
aws-java-sdk-docdb/src/main/java/com/amazonaws/services/docdb/model/DescribeOrderableDBInstanceOptionsResult.java
Java
apache-2.0
8,644
/* * Copyright 2015 Red Hat, Inc. and/or its affiliates * and other contributors as indicated by the @author tags. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.hawkular.inventory.api.test; import org.hawkular.inventory.api.Environments; import org.hawkular.inventory.api.Feeds; import org.hawkular.inventory.api.Inventory; import org.hawkular.inventory.api.MetricTypes; import org.hawkular.inventory.api.Metrics; import org.hawkular.inventory.api.Relationships; import org.hawkular.inventory.api.ResourceTypes; import org.hawkular.inventory.api.Resources; import org.hawkular.inventory.api.Tenants; import org.mockito.Mockito; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.anyVararg; import static org.mockito.Mockito.when; /** * @author Lukas Krejci * @since 0.0.1 */ public class InventoryMock { public static Inventory inventory; public static Environments.Multiple environmentsMultiple; public static Environments.Read environmentsRead; public static Environments.ReadWrite environmentsReadWrite; public static Environments.Single environmentsSingle; public static Feeds.Multiple feedsMultiple; public static Feeds.Read feedsRead; public static Feeds.ReadWrite feedsReadWrite; public static Feeds.Single feedsSingle; public static Metrics.Multiple metricsMultiple; public static Metrics.Read metricsRead; public static Metrics.ReadAssociate metricsReadAssociate; public static Metrics.ReadWrite metricsReadWrite; public static Metrics.Single metricsSingle; public static MetricTypes.Multiple metricTypesMultiple; public static MetricTypes.Read metricTypesRead; public static MetricTypes.ReadAssociate metricTypesReadAssociate; public static MetricTypes.ReadWrite metricTypesReadWrite; public static MetricTypes.Single metricTypesSingle; public static Relationships.Multiple relationshipsMultiple; public static Relationships.Read relationshipsRead; public static Relationships.ReadWrite relationshipsReadWrite; public static Relationships.Single relationshipsSingle; public static Resources.Multiple resourcesMultiple; public static Resources.Read resourcesRead; public static Resources.ReadWrite resourcesReadWrite; public static Resources.Single resourcesSingle; public static ResourceTypes.Multiple resourceTypesMultiple; public static ResourceTypes.Read resourceTypesRead; public static ResourceTypes.ReadWrite resourceTypesReadWrite; public static ResourceTypes.Single resourceTypesSingle; public static Tenants.Multiple tenantsMultiple; public static Tenants.Read tenantsRead; public static Tenants.ReadWrite tenantsReadWrite; public static Tenants.Single tenantsSingle; public static void rewire() { inventory = Mockito.mock(Inventory.class); environmentsMultiple = Mockito.mock(Environments.Multiple.class); environmentsRead = Mockito.mock(Environments.Read.class); environmentsReadWrite = Mockito.mock(Environments.ReadWrite.class); environmentsSingle = Mockito.mock(Environments.Single.class); feedsMultiple = Mockito.mock(Feeds.Multiple.class); feedsRead = Mockito.mock(Feeds.Read.class); feedsReadWrite = Mockito.mock(Feeds.ReadWrite.class); feedsSingle = Mockito.mock(Feeds.Single.class); metricsMultiple = Mockito.mock(Metrics.Multiple.class); metricsRead = Mockito.mock(Metrics.Read.class); metricsReadAssociate = Mockito.mock(Metrics.ReadAssociate.class); metricsReadWrite = Mockito.mock(Metrics.ReadWrite.class); metricsSingle = Mockito.mock(Metrics.Single.class); metricTypesMultiple = Mockito.mock(MetricTypes.Multiple.class); metricTypesRead = Mockito.mock(MetricTypes.Read.class); metricTypesReadAssociate = Mockito.mock(MetricTypes.ReadAssociate.class); metricTypesReadWrite = Mockito.mock(MetricTypes.ReadWrite.class); metricTypesSingle = Mockito.mock(MetricTypes.Single.class); relationshipsMultiple = Mockito.mock(Relationships.Multiple.class); relationshipsRead = Mockito.mock(Relationships.Read.class); relationshipsReadWrite = Mockito.mock(Relationships.ReadWrite.class); relationshipsSingle = Mockito.mock(Relationships.Single.class); resourcesMultiple = Mockito.mock(Resources.Multiple.class); resourcesRead = Mockito.mock(Resources.Read.class); resourcesReadWrite = Mockito.mock(Resources.ReadWrite.class); resourcesSingle = Mockito.mock(Resources.Single.class); resourceTypesMultiple = Mockito.mock(ResourceTypes.Multiple.class); resourceTypesRead = Mockito.mock(ResourceTypes.Read.class); resourceTypesReadWrite = Mockito.mock(ResourceTypes.ReadWrite.class); resourceTypesSingle = Mockito.mock(ResourceTypes.Single.class); tenantsMultiple = Mockito.mock(Tenants.Multiple.class); tenantsRead = Mockito.mock(Tenants.Read.class); tenantsReadWrite = Mockito.mock(Tenants.ReadWrite.class); tenantsSingle = Mockito.mock(Tenants.Single.class); when(inventory.tenants()).thenReturn(tenantsReadWrite); when(environmentsMultiple.feeds()).thenReturn(feedsRead); when(environmentsMultiple.feedlessMetrics()).thenReturn(metricsRead); when(environmentsMultiple.relationships()).thenReturn(relationshipsRead); when(environmentsMultiple.relationships(any())).thenReturn(relationshipsRead); when(environmentsMultiple.feedlessResources()).thenReturn(resourcesRead); when(environmentsRead.get(any())).thenReturn(environmentsSingle); when(environmentsRead.getAll(anyVararg())).thenReturn(environmentsMultiple); when(environmentsReadWrite.get(any())).thenReturn(environmentsSingle); when(environmentsReadWrite.getAll(anyVararg())).thenReturn(environmentsMultiple); when(environmentsSingle.feeds()).thenReturn(feedsReadWrite); when(environmentsSingle.feedlessMetrics()).thenReturn(metricsReadWrite); when(environmentsSingle.relationships()).thenReturn(relationshipsReadWrite); when(environmentsSingle.relationships(any())).thenReturn(relationshipsReadWrite); when(environmentsSingle.feedlessResources()).thenReturn(resourcesReadWrite); when(feedsMultiple.relationships()).thenReturn(relationshipsRead); when(feedsMultiple.relationships(anyVararg())).thenReturn(relationshipsRead); when(feedsMultiple.resources()).thenReturn(resourcesRead); when(feedsMultiple.metrics()).thenReturn(metricsRead); when(feedsRead.get(any())).thenReturn(feedsSingle); when(feedsRead.getAll(anyVararg())).thenReturn(feedsMultiple); when(feedsReadWrite.get(any())).thenReturn(feedsSingle); when(feedsReadWrite.getAll(anyVararg())).thenReturn(feedsMultiple); when(feedsSingle.relationships()).thenReturn(relationshipsReadWrite); when(feedsSingle.relationships(any())).thenReturn(relationshipsReadWrite); when(feedsSingle.resources()).thenReturn(resourcesReadWrite); when(feedsSingle.metrics()).thenReturn(metricsReadWrite); when(metricsMultiple.relationships()).thenReturn(relationshipsRead); when(metricsMultiple.relationships(any())).thenReturn(relationshipsRead); when(metricsRead.get(any())).thenReturn(metricsSingle); when(metricsRead.getAll(anyVararg())).thenReturn(metricsMultiple); when(metricsReadAssociate.get(any())).thenReturn(metricsSingle); when(metricsReadAssociate.getAll(anyVararg())).thenReturn(metricsMultiple); when(metricsReadWrite.get(any())).thenReturn(metricsSingle); when(metricsReadWrite.getAll(anyVararg())).thenReturn(metricsMultiple); when(metricsSingle.relationships()).thenReturn(relationshipsReadWrite); when(metricsSingle.relationships(any())).thenReturn(relationshipsReadWrite); when(metricTypesMultiple.metrics()).thenReturn(metricsRead); when(metricTypesMultiple.relationships()).thenReturn(relationshipsRead); when(metricTypesMultiple.relationships(any())).thenReturn(relationshipsRead); when(metricTypesRead.get(any())).thenReturn(metricTypesSingle); when(metricTypesRead.getAll(anyVararg())).thenReturn(metricTypesMultiple); when(metricTypesReadAssociate.get(any())).thenReturn(metricTypesSingle); when(metricTypesReadAssociate.getAll(anyVararg())).thenReturn(metricTypesMultiple); when(metricTypesReadWrite.get(any())).thenReturn(metricTypesSingle); when(metricTypesReadWrite.getAll(anyVararg())).thenReturn(metricTypesMultiple); when(metricTypesSingle.metrics()).thenReturn(metricsRead); when(metricTypesSingle.relationships()).thenReturn(relationshipsReadWrite); when(metricTypesSingle.relationships(any())).thenReturn(relationshipsReadWrite); when(relationshipsMultiple.environments()).thenReturn(environmentsRead); when(relationshipsMultiple.feeds()).thenReturn(feedsRead); when(relationshipsMultiple.metrics()).thenReturn(metricsRead); when(relationshipsMultiple.metricTypes()).thenReturn(metricTypesRead); when(relationshipsMultiple.resources()).thenReturn(resourcesRead); when(relationshipsMultiple.resourceTypes()).thenReturn(resourceTypesRead); when(relationshipsMultiple.tenants()).thenReturn(tenantsRead); when(relationshipsRead.get(any())).thenReturn(relationshipsSingle); when(relationshipsRead.getAll(anyVararg())).thenReturn(relationshipsMultiple); when(relationshipsRead.named(anyString())).thenReturn(relationshipsMultiple); when(relationshipsRead.named(Mockito.<Relationships.WellKnown>any())).thenReturn(relationshipsMultiple); when(relationshipsReadWrite.named(anyString())).thenReturn(relationshipsMultiple); when(relationshipsReadWrite.named(Mockito.<Relationships.WellKnown>any())).thenReturn(relationshipsMultiple); when(relationshipsReadWrite.get(any())).thenReturn(relationshipsSingle); when(relationshipsReadWrite.getAll(anyVararg())).thenReturn(relationshipsMultiple); when(resourcesMultiple.metrics()).thenReturn(metricsRead); when(resourcesMultiple.relationships()).thenReturn(relationshipsRead); when(resourcesMultiple.relationships(any())).thenReturn(relationshipsRead); when(resourcesRead.get(any())).thenReturn(resourcesSingle); when(resourcesRead.getAll(anyVararg())).thenReturn(resourcesMultiple); when(resourcesReadWrite.get(any())).thenReturn(resourcesSingle); when(resourcesReadWrite.getAll(anyVararg())).thenReturn(resourcesMultiple); when(resourcesSingle.metrics()).thenReturn(metricsReadAssociate); when(resourcesSingle.relationships()).thenReturn(relationshipsReadWrite); when(resourcesSingle.relationships(any())).thenReturn(relationshipsReadWrite); when(resourceTypesMultiple.metricTypes()).thenReturn(metricTypesRead); when(resourceTypesMultiple.relationships()).thenReturn(relationshipsRead); when(resourceTypesMultiple.relationships(any())).thenReturn(relationshipsRead); when(resourceTypesRead.get(any())).thenReturn(resourceTypesSingle); when(resourceTypesRead.getAll(anyVararg())).thenReturn(resourceTypesMultiple); when(resourceTypesReadWrite.get(any())).thenReturn(resourceTypesSingle); when(resourceTypesReadWrite.getAll(anyVararg())).thenReturn(resourceTypesMultiple); when(resourceTypesSingle.metricTypes()).thenReturn(metricTypesReadAssociate); when(resourceTypesSingle.relationships()).thenReturn(relationshipsReadWrite); when(resourceTypesSingle.relationships(any())).thenReturn(relationshipsReadWrite); when(tenantsReadWrite.get(anyString())).thenReturn(tenantsSingle); when(tenantsReadWrite.getAll(anyVararg())).thenReturn(tenantsMultiple); when(tenantsRead.get(anyString())).thenReturn(tenantsSingle); when(tenantsRead.getAll(anyVararg())).thenReturn(tenantsMultiple); when(tenantsSingle.environments()).thenReturn(environmentsReadWrite); when(tenantsSingle.metricTypes()).thenReturn(metricTypesReadWrite); when(tenantsSingle.relationships()).thenReturn(relationshipsReadWrite); when(tenantsSingle.relationships(any())).thenReturn(relationshipsReadWrite); when(tenantsSingle.resourceTypes()).thenReturn(resourceTypesReadWrite); when(tenantsMultiple.environments()).thenReturn(environmentsRead); when(tenantsMultiple.metricTypes()).thenReturn(metricTypesRead); when(tenantsMultiple.relationships()).thenReturn(relationshipsRead); when(tenantsMultiple.relationships(any())).thenReturn(relationshipsRead); when(tenantsMultiple.resourceTypes()).thenReturn(resourceTypesRead); } }
Jiri-Kremser/hawkular-inventory
api/src/test/java/org/hawkular/inventory/api/test/InventoryMock.java
Java
apache-2.0
13,519
package com.blooms.kingsite.common.persistence.dialect.db; import com.blooms.kingsite.common.persistence.dialect.Dialect; /** * DB2的分页数据库方言实现 * * @author poplar.yfyang * @version 1.0 2010-10-10 下午12:31 * @since JDK 1.5 */ public class DB2Dialect implements Dialect { @Override public boolean supportsLimit() { return true; } private static String getRowNumber(String sql) { StringBuilder rownumber = new StringBuilder(50) .append("rownumber() over("); int orderByIndex = sql.toLowerCase().indexOf("order by"); if (orderByIndex > 0 && !hasDistinct(sql)) { rownumber.append(sql.substring(orderByIndex)); } rownumber.append(") as rownumber_,"); return rownumber.toString(); } private static boolean hasDistinct(String sql) { return sql.toLowerCase().contains("select distinct"); } @Override public String getLimitString(String sql, int offset, int limit) { return getLimitString(sql, offset, Integer.toString(offset), Integer.toString(limit)); } /** * 将sql变成分页sql语句,提供将offset及limit使用占位符号(placeholder)替换. * <pre> * 如mysql * dialect.getLimitString("select * from user", 12, ":offset",0,":limit") 将返回 * select * from user limit :offset,:limit * </pre> * * @param sql 实际SQL语句 * @param offset 分页开始纪录条数 * @param offsetPlaceholder 分页开始纪录条数-占位符号 * @param limitPlaceholder 分页纪录条数占位符号 * @return 包含占位符的分页sql */ public String getLimitString(String sql, int offset, String offsetPlaceholder, String limitPlaceholder) { int startOfSelect = sql.toLowerCase().indexOf("select"); StringBuilder pagingSelect = new StringBuilder(sql.length() + 100) .append(sql.substring(0, startOfSelect)) //add the comment .append("select * from ( select ") //nest the main query in an outer select .append(getRowNumber(sql)); //add the rownnumber bit into the outer query select list if (hasDistinct(sql)) { pagingSelect.append(" row_.* from ( ") //add another (inner) nested select .append(sql.substring(startOfSelect)) //add the main query .append(" ) as row_"); //close off the inner nested select } else { pagingSelect.append(sql.substring(startOfSelect + 6)); //add the main query } pagingSelect.append(" ) as temp_ where rownumber_ "); //add the restriction to the outer select if (offset > 0) { // int end = offset + limit; String endString = offsetPlaceholder + "+" + limitPlaceholder; pagingSelect.append("between ").append(offsetPlaceholder) .append("+1 and ").append(endString); } else { pagingSelect.append("<= ").append(limitPlaceholder); } return pagingSelect.toString(); } }
kailee2014/kingsite
src/main/java/com/blooms/kingsite/common/persistence/dialect/db/DB2Dialect.java
Java
apache-2.0
3,132
package com.cowthan.algrithm.algs4; /*********************************************************************************** * Compilation: javac MSD.java * Execution: java MSD < input.txt * * Reads extended ASCII string from standard input and MSD radix sorts them. * * % java MSD < shells.txt * are * by * sea * seashells * seashells * sells * sells * she * she * shells * shore * surely * the * the * ***********************************************************************************/ import com.cowthan.algrithm.std.BinaryStdIn; import com.cowthan.algrithm.std.In; import com.cowthan.algrithm.std.Out; import com.cowthan.algrithm.std.Picture; import com.cowthan.algrithm.std.StdIn; import com.cowthan.algrithm.std.StdOut; import com.cowthan.algrithm.std.StdRandom; public class MSD { private static final int R = 256; // extended ASCII alphabet size private static final int CUTOFF = 15; // cutoff to insertion sort // sort array of strings public static void sort(String[] a) { int N = a.length; String[] aux = new String[N]; sort(a, 0, N-1, 0, aux); } // return dth character of s, -1 if d = length of string private static int charAt(String s, int d) { assert d >= 0 && d <= s.length(); if (d == s.length()) return -1; return s.charAt(d); } // sort from a[lo] to a[hi], starting at the dth character private static void sort(String[] a, int lo, int hi, int d, String[] aux) { // cutoff to insertion sort for small subarrays if (hi <= lo + CUTOFF) { insertion(a, lo, hi, d); return; } // compute frequency counts int[] count = new int[R+2]; for (int i = lo; i <= hi; i++) { int c = charAt(a[i], d); count[c+2]++; } // transform counts to indicies for (int r = 0; r < R+1; r++) count[r+1] += count[r]; // distribute for (int i = lo; i <= hi; i++) { int c = charAt(a[i], d); aux[count[c+1]++] = a[i]; } // copy back for (int i = lo; i <= hi; i++) a[i] = aux[i - lo]; // recursively sort for each character for (int r = 0; r < R; r++) sort(a, lo + count[r], lo + count[r+1] - 1, d+1, aux); } // return dth character of s, -1 if d = length of string private static void insertion(String[] a, int lo, int hi, int d) { for (int i = lo; i <= hi; i++) for (int j = i; j > lo && less(a[j], a[j-1], d); j--) exch(a, j, j-1); } // exchange a[i] and a[j] private static void exch(String[] a, int i, int j) { String temp = a[i]; a[i] = a[j]; a[j] = temp; } // is v less than w, starting at character d private static boolean less(String v, String w, int d) { assert v.substring(0, d).equals(w.substring(0, d)); return v.substring(d).compareTo(w.substring(d)) < 0; } public static void main(String[] args) { String[] a = StdIn.readStrings(); int N = a.length; sort(a); for (int i = 0; i < N; i++) StdOut.println(a[i]); } }
cowthan/JavaAyo
src/com/cowthan/algrithm/algs4/MSD.java
Java
apache-2.0
3,276
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.infrastructure.io; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.util.Random; /** * An implementation of the DataInputStream interface. This instance is * completely thread unsafe. * * Author : Avinash Lakshman ( alakshman@facebook.com) & Prashant Malik ( * pmalik@facebook.com ) */ public final class DataInputBuffer extends DataInputStream { /* * This is a clone of the ByteArrayInputStream class w/o any method being * synchronized. */ public static class FastByteArrayInputStream extends InputStream { /** * An array of bytes that was provided by the creator of the stream. * Elements <code>buf[0]</code> through <code>buf[count-1]</code> are the * only bytes that can ever be read from the stream; element * <code>buf[pos]</code> is the next byte to be read. */ protected byte buf[]; /** * The index of the next character to read from the input stream buffer. * This value should always be nonnegative and not larger than the value of * <code>count</code>. The next byte to be read from the input stream buffer * will be <code>buf[pos]</code>. */ protected int pos; /** * The currently marked position in the stream. ByteArrayInputStream objects * are marked at position zero by default when constructed. They may be * marked at another position within the buffer by the <code>mark()</code> * method. The current buffer position is set to this point by the * <code>reset()</code> method. * <p> * If no mark has been set, then the value of mark is the offset passed to * the constructor (or 0 if the offset was not supplied). * * @since JDK1.1 */ protected int mark = 0; /** * The index one greater than the last valid character in the input stream * buffer. This value should always be nonnegative and not larger than the * length of <code>buf</code>. It is one greater than the position of the * last byte within <code>buf</code> that can ever be read from the input * stream buffer. */ protected int count; public FastByteArrayInputStream() { buf = new byte[0]; } /** * Creates a <code>ByteArrayInputStream</code> so that it uses * <code>buf</code> as its buffer array. The buffer array is not copied. The * initial value of <code>pos</code> is <code>0</code> and the initial value * of <code>count</code> is the length of <code>buf</code>. * * @param buf * the input buffer. */ public FastByteArrayInputStream(byte buf[]) { this.buf = buf; this.pos = 0; this.count = buf.length; } /** * Creates <code>ByteArrayInputStream</code> that uses <code>buf</code> as * its buffer array. The initial value of <code>pos</code> is * <code>offset</code> and the initial value of <code>count</code> is the * minimum of <code>offset+length</code> and <code>buf.length</code>. The * buffer array is not copied. The buffer's mark is set to the specified * offset. * * @param buf * the input buffer. * @param offset * the offset in the buffer of the first byte to read. * @param length * the maximum number of bytes to read from the buffer. */ public FastByteArrayInputStream(byte buf[], int offset, int length) { this.buf = buf; this.pos = offset; this.count = Math.min(offset + length, buf.length); this.mark = offset; } public final void setBytes(byte[] bytes) { buf = bytes; pos = 0; count = bytes.length; } /** * Reads the next byte of data from this input stream. The value byte is * returned as an <code>int</code> in the range <code>0</code> to * <code>255</code>. If no byte is available because the end of the stream * has been reached, the value <code>-1</code> is returned. * <p> * This <code>read</code> method cannot block. * * @return the next byte of data, or <code>-1</code> if the end of the * stream has been reached. */ public final int read() { return (pos < count) ? (buf[pos++] & 0xFF) : -1; } /** * Reads up to <code>len</code> bytes of data into an array of bytes from * this input stream. If <code>pos</code> equals <code>count</code>, then * <code>-1</code> is returned to indicate end of file. Otherwise, the * number <code>k</code> of bytes read is equal to the smaller of * <code>len</code> and <code>count-pos</code>. If <code>k</code> is * positive, then bytes <code>buf[pos]</code> through * <code>buf[pos+k-1]</code> are copied into <code>b[off]</code> through * <code>b[off+k-1]</code> in the manner performed by * <code>System.arraycopy</code>. The value <code>k</code> is added into * <code>pos</code> and <code>k</code> is returned. * <p> * This <code>read</code> method cannot block. * * @param b * the buffer into which the data is read. * @param off * the start offset in the destination array <code>b</code> * @param len * the maximum number of bytes read. * @return the total number of bytes read into the buffer, or * <code>-1</code> if there is no more data because the end of the * stream has been reached. * @exception NullPointerException * If <code>b</code> is <code>null</code>. * @exception IndexOutOfBoundsException * If <code>off</code> is negative, <code>len</code> is * negative, or <code>len</code> is greater than * <code>b.length - off</code> */ public final int read(byte b[], int off, int len) { if (b == null) { throw new NullPointerException(); } else if (off < 0 || len < 0 || len > b.length - off) { throw new IndexOutOfBoundsException(); } if (pos >= count) { return -1; } if (pos + len > count) { len = count - pos; } if (len <= 0) { return 0; } System.arraycopy(buf, pos, b, off, len); pos += len; return len; } /** * Skips <code>n</code> bytes of input from this input stream. Fewer bytes * might be skipped if the end of the input stream is reached. The actual * number <code>k</code> of bytes to be skipped is equal to the smaller of * <code>n</code> and <code>count-pos</code>. The value <code>k</code> is * added into <code>pos</code> and <code>k</code> is returned. * * @param n * the number of bytes to be skipped. * @return the actual number of bytes skipped. */ public final long skip(long n) { if (pos + n > count) { n = count - pos; } if (n < 0) { return 0; } pos += n; return n; } /** * Returns the number of remaining bytes that can be read (or skipped over) * from this input stream. * <p> * The value returned is <code>count&nbsp;- pos</code>, which is the number * of bytes remaining to be read from the input buffer. * * @return the number of remaining bytes that can be read (or skipped over) * from this input stream without blocking. */ public final int available() { return count - pos; } /** * Tests if this <code>InputStream</code> supports mark/reset. The * <code>markSupported</code> method of <code>ByteArrayInputStream</code> * always returns <code>true</code>. * * @since JDK1.1 */ public final boolean markSupported() { return true; } /** * Set the current marked position in the stream. ByteArrayInputStream * objects are marked at position zero by default when constructed. They may * be marked at another position within the buffer by this method. * <p> * If no mark has been set, then the value of the mark is the offset passed * to the constructor (or 0 if the offset was not supplied). * * <p> * Note: The <code>readAheadLimit</code> for this class has no meaning. * * @since JDK1.1 */ public final void mark(int readAheadLimit) { mark = pos; } /** * Resets the buffer to the marked position. The marked position is 0 unless * another position was marked or an offset was specified in the * constructor. */ public final void reset() { pos = mark; } /** * Closing a <tt>ByteArrayInputStream</tt> has no effect. The methods in * this class can be called after the stream has been closed without * generating an <tt>IOException</tt>. * <p> */ public final void close() throws IOException { } } private static class Buffer extends FastByteArrayInputStream { public Buffer() { super(new byte[] {}); } public void reset(byte[] input, int start, int length) { this.buf = input; this.count = start + length; this.mark = start; this.pos = start; } public int getPosition() { return pos; } public int getLength() { return count; } } private Buffer buffer; /** Constructs a new empty buffer. */ public DataInputBuffer() { this(new Buffer()); } private DataInputBuffer(Buffer buffer) { super(buffer); this.buffer = buffer; } /** Resets the data that the buffer reads. */ public void reset(byte[] input, int length) { buffer.reset(input, 0, length); } /** Resets the data that the buffer reads. */ public void reset(byte[] input, int start, int length) { buffer.reset(input, start, length); } /** Returns the current position in the input. */ public int getPosition() { return buffer.getPosition(); } /** Returns the length of the input. */ public int getLength() { return buffer.getLength(); } public static void main(String[] args) throws Throwable { Random random = new Random(); byte[] bytes = new byte[64 * 1024 * 1024]; random.nextBytes(bytes); for (int i = 0; i < 16; ++i) { int pos = 0; int count = bytes.length; long start2 = System.currentTimeMillis(); while (true) { int value = (pos < count) ? (bytes[pos] & 0xff) : -1; ++pos; if (value == -1) break; } System.out.println("TIME TAKEN : " + (System.currentTimeMillis() - start2)); FastByteArrayInputStream bis = new FastByteArrayInputStream(bytes); int read = 0; long start = System.currentTimeMillis(); while (true) { read = bis.read(); if (read == -1) break; } System.out .println("TIME TAKEN : " + (System.currentTimeMillis() - start)); } /* * DataOutputBuffer bufOut = new DataOutputBuffer(); * bufOut.writeUTF("Avinash"); bufOut.writeInt(4110241024); DataInputBuffer * bufIn = new DataInputBuffer(); bufIn.reset(bufOut.getData(), * bufOut.getLength()); System.out.println(bufIn.readUTF()); * System.out.println(bufIn.readInt()); */ } }
toddlipcon/helenus
src/java/com/facebook/infrastructure/io/DataInputBuffer.java
Java
apache-2.0
12,501
<?php /** * CakePHP(tm) : Rapid Development Framework (http://cakephp.org) * Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org) * * Licensed under The MIT License * For full copyright and license information, please see the LICENSE.txt * Redistributions of files must retain the above copyright notice. * * @copyright Copyright (c) Cake Software Foundation, Inc. (http://cakefoundation.org) * @link http://cakephp.org CakePHP(tm) Project * @since 2.0.0 * @license http://www.opensource.org/licenses/mit-license.php MIT License */ namespace Cake\Console; /** * Object wrapper for outputting information from a shell application. * Can be connected to any stream resource that can be used with fopen() * * Can generate colorized output on consoles that support it. There are a few * built in styles * * - `error` Error messages. * - `warning` Warning messages. * - `info` Informational messages. * - `comment` Additional text. * - `question` Magenta text used for user prompts * * By defining styles with addStyle() you can create custom console styles. * * ### Using styles in output * * You can format console output using tags with the name of the style to apply. From inside a shell object * * ``` * $this->out('<warning>Overwrite:</warning> foo.php was overwritten.'); * ``` * * This would create orange 'Overwrite:' text, while the rest of the text would remain the normal color. * See ConsoleOutput::styles() to learn more about defining your own styles. Nested styles are not supported * at this time. * */ class ConsoleOutput { /** * Raw output constant - no modification of output text. * * @var int */ const RAW = 0; /** * Plain output - tags will be stripped. * * @var int */ const PLAIN = 1; /** * Color output - Convert known tags in to ANSI color escape codes. * * @var int */ const COLOR = 2; /** * Constant for a newline. * * @var string */ const LF = PHP_EOL; /** * File handle for output. * * @var resource */ protected $_output; /** * The current output type. Manipulated with ConsoleOutput::outputAs(); * * @var int */ protected $_outputAs = self::COLOR; /** * text colors used in colored output. * * @var array */ protected static $_foregroundColors = [ 'black' => 30, 'red' => 31, 'green' => 32, 'yellow' => 33, 'blue' => 34, 'magenta' => 35, 'cyan' => 36, 'white' => 37 ]; /** * background colors used in colored output. * * @var array */ protected static $_backgroundColors = [ 'black' => 40, 'red' => 41, 'green' => 42, 'yellow' => 43, 'blue' => 44, 'magenta' => 45, 'cyan' => 46, 'white' => 47 ]; /** * formatting options for colored output * * @var string */ protected static $_options = [ 'bold' => 1, 'underline' => 4, 'blink' => 5, 'reverse' => 7, ]; /** * Styles that are available as tags in console output. * You can modify these styles with ConsoleOutput::styles() * * @var array */ protected static $_styles = [ 'emergency' => ['text' => 'red', 'underline' => true], 'alert' => ['text' => 'red', 'underline' => true], 'critical' => ['text' => 'red', 'underline' => true], 'error' => ['text' => 'red', 'underline' => true], 'warning' => ['text' => 'yellow'], 'info' => ['text' => 'cyan'], 'debug' => ['text' => 'yellow'], 'success' => ['text' => 'green'], 'comment' => ['text' => 'blue'], 'question' => ['text' => 'magenta'], 'notice' => ['text' => 'cyan'] ]; /** * Construct the output object. * * Checks for a pretty console environment. Ansicon allows pretty consoles * on windows, and is supported. * * @param string $stream The identifier of the stream to write output to. */ public function __construct($stream = 'php://stdout') { $this->_output = fopen($stream, 'w'); if ((DS === '\\' && !(bool)env('ANSICON')) || (function_exists('posix_isatty') && !posix_isatty($this->_output)) ) { $this->_outputAs = self::PLAIN; } } /** * Outputs a single or multiple messages to stdout. If no parameters * are passed, outputs just a newline. * * @param string|array $message A string or an array of strings to output * @param int $newlines Number of newlines to append * @return int Returns the number of bytes returned from writing to stdout. */ public function write($message, $newlines = 1) { if (is_array($message)) { $message = implode(static::LF, $message); } return $this->_write($this->styleText($message . str_repeat(static::LF, $newlines))); } /** * Apply styling to text. * * @param string $text Text with styling tags. * @return string String with color codes added. */ public function styleText($text) { if ($this->_outputAs == static::RAW) { return $text; } if ($this->_outputAs == static::PLAIN) { $tags = implode('|', array_keys(static::$_styles)); return preg_replace('#</?(?:' . $tags . ')>#', '', $text); } return preg_replace_callback( '/<(?P<tag>[a-z0-9-_]+)>(?P<text>.*?)<\/(\1)>/ims', [$this, '_replaceTags'], $text ); } /** * Replace tags with color codes. * * @param array $matches An array of matches to replace. * @return string */ protected function _replaceTags($matches) { $style = $this->styles($matches['tag']); if (empty($style)) { return '<' . $matches['tag'] . '>' . $matches['text'] . '</' . $matches['tag'] . '>'; } $styleInfo = []; if (!empty($style['text']) && isset(static::$_foregroundColors[$style['text']])) { $styleInfo[] = static::$_foregroundColors[$style['text']]; } if (!empty($style['background']) && isset(static::$_backgroundColors[$style['background']])) { $styleInfo[] = static::$_backgroundColors[$style['background']]; } unset($style['text'], $style['background']); foreach ($style as $option => $value) { if ($value) { $styleInfo[] = static::$_options[$option]; } } return "\033[" . implode($styleInfo, ';') . 'm' . $matches['text'] . "\033[0m"; } /** * Writes a message to the output stream. * * @param string $message Message to write. * @return bool success */ protected function _write($message) { return fwrite($this->_output, $message); } /** * Get the current styles offered, or append new ones in. * * ### Get a style definition * * ``` * $output->styles('error'); * ``` * * ### Get all the style definitions * * ``` * $output->styles(); * ``` * * ### Create or modify an existing style * * ``` * $output->styles('annoy', ['text' => 'purple', 'background' => 'yellow', 'blink' => true]); * ``` * * ### Remove a style * * ``` * $this->output->styles('annoy', false); * ``` * * @param string|null $style The style to get or create. * @param array|bool|null $definition The array definition of the style to change or create a style * or false to remove a style. * @return mixed If you are getting styles, the style or null will be returned. If you are creating/modifying * styles true will be returned. */ public function styles($style = null, $definition = null) { if ($style === null && $definition === null) { return static::$_styles; } if (is_string($style) && $definition === null) { return isset(static::$_styles[$style]) ? static::$_styles[$style] : null; } if ($definition === false) { unset(static::$_styles[$style]); return true; } static::$_styles[$style] = $definition; return true; } /** * Get/Set the output type to use. The output type how formatting tags are treated. * * @param int|null $type The output type to use. Should be one of the class constants. * @return mixed Either null or the value if getting. */ public function outputAs($type = null) { if ($type === null) { return $this->_outputAs; } $this->_outputAs = $type; } /** * Clean up and close handles */ public function __destruct() { if (is_resource($this->_output)) { fclose($this->_output); } } }
thaysmelo/mobile_maravilhas-pe
sistema/vendor/cakephp/cakephp/src/Console/ConsoleOutput.php
PHP
apache-2.0
9,166
package com.privatecloud.constants; public class AppConstants { public static final String ROLE_USER = "ROLE_USER"; public static final String ROLE_ADMIN = "ROLE_ADMIN"; }
rashmithajajur/PrivateCloud-master
src/main/java/com/privatecloud/constants/AppConstants.java
Java
apache-2.0
184
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. #nullable disable using Microsoft.CodeAnalysis.EmbeddedLanguages.VirtualChars; using Microsoft.CodeAnalysis.Text; namespace Microsoft.CodeAnalysis.EmbeddedLanguages.Common { internal static class EmbeddedSyntaxHelpers { public static TextSpan GetSpan<TSyntaxKind>(EmbeddedSyntaxToken<TSyntaxKind> token1, EmbeddedSyntaxToken<TSyntaxKind> token2) where TSyntaxKind : struct => GetSpan(token1.VirtualChars[0], token2.VirtualChars.Last()); public static TextSpan GetSpan(VirtualCharSequence virtualChars) => GetSpan(virtualChars[0], virtualChars.Last()); public static TextSpan GetSpan(VirtualChar firstChar, VirtualChar lastChar) => TextSpan.FromBounds(firstChar.Span.Start, lastChar.Span.End); } }
brettfo/roslyn
src/Workspaces/SharedUtilitiesAndExtensions/Compiler/Core/EmbeddedLanguages/Common/EmbeddedSyntaxHelpers.cs
C#
apache-2.0
980
package com.dmelnyk.workinukraine.utils.di; import android.content.Context; import com.dmelnyk.workinukraine.utils.CityUtils; import dagger.Module; import dagger.Provides; /** * Created by dmitry on 30.03.17. */ @Module public class CityModule { @Provides CityUtils provideCityUtils(Context context) { return new CityUtils(context.getApplicationContext()); } }
DmitryMelnyk/WorkInUkraine
app/src/main/java/com/dmelnyk/workinukraine/utils/di/CityModule.java
Java
apache-2.0
390
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.ode.bpel.engine; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Timer; import java.util.TimerTask; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import javax.wsdl.Definition; import javax.wsdl.Operation; import javax.wsdl.PortType; import javax.xml.namespace.QName; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.ode.bpel.dao.MessageExchangeDAO; import org.apache.ode.bpel.dao.ProcessDAO; import org.apache.ode.bpel.dao.ProcessInstanceDAO; import org.apache.ode.bpel.engine.fc.DeploymentUnitNameGenerator; import org.apache.ode.bpel.evt.BpelEvent; import org.apache.ode.bpel.extensions.sync.Constants; import org.apache.ode.bpel.iapi.BpelEngine; import org.apache.ode.bpel.iapi.BpelEngineException; import org.apache.ode.bpel.iapi.ContextException; import org.apache.ode.bpel.iapi.Endpoint; import org.apache.ode.bpel.iapi.Message; import org.apache.ode.bpel.iapi.MessageExchange; import org.apache.ode.bpel.iapi.MessageExchange.FailureType; import org.apache.ode.bpel.iapi.MessageExchange.MessageExchangePattern; import org.apache.ode.bpel.iapi.MessageExchange.Status; import org.apache.ode.bpel.iapi.MyRoleMessageExchange; import org.apache.ode.bpel.iapi.MyRoleMessageExchange.CorrelationStatus; import org.apache.ode.bpel.iapi.PartnerRoleMessageExchange; import org.apache.ode.bpel.iapi.ProcessState; import org.apache.ode.bpel.iapi.Scheduler; import org.apache.ode.bpel.iapi.Scheduler.JobDetails; import org.apache.ode.bpel.iapi.Scheduler.JobType; import org.apache.ode.bpel.iapi.fc.ProcessConfLoader; import org.apache.ode.bpel.intercept.InterceptorInvoker; import org.apache.ode.bpel.intercept.MessageExchangeInterceptor; import org.apache.ode.bpel.intercept.ProcessCountThrottler; import org.apache.ode.bpel.intercept.ProcessSizeThrottler; import org.apache.ode.bpel.o.OConstants; import org.apache.ode.bpel.o.OPartnerLink; import org.apache.ode.bpel.o.OProcess; import org.apache.ode.bpel.runtime.InvalidProcessException; import org.apache.ode.bpel.util.fc.ProcessRegistry; import org.apache.ode.fc.dao.FCManagementDAO; import org.apache.ode.scheduler.simple.ZJobList; import org.apache.ode.utils.DOMUtils; import org.apache.ode.utils.Namespaces; import org.apache.ode.utils.ZZBool; import org.apache.ode.utils.fc.FCConstants; import org.apache.ode.utils.msg.MessageBundle; import org.w3c.dom.Document; import org.w3c.dom.Element; /** * Implementation of the {@link BpelEngine} interface: provides the server * methods that should be invoked in the context of a transaction. * * @author mszefler * @author Matthieu Riou <mriou at apache dot org> */ public class BpelEngineImpl implements BpelEngine { private static final Log __log = LogFactory.getLog(BpelEngineImpl.class); /** RNG, for delays */ private Random _random = new Random(System.currentTimeMillis()); private static double _delayMean = 0; // @stmz Timer timer; ZZBool zzbool; ZJobList jobList; public static Logger logger = Logger.getLogger("Log-XML"); static { try { String delay = System.getenv("ODE_DEBUG_TX_DELAY"); if (delay != null && delay.length() > 0) { _delayMean = Double.valueOf(delay); __log.info("Stochastic debugging delay activated. Delay (Mean)=" + _delayMean + "ms."); } } catch (Throwable t) { if (__log.isDebugEnabled()) { __log.debug( "Could not read ODE_DEBUG_TX_DELAY environment variable; assuming 0 (mean) delay", t); } else { __log.info("Could not read ODE_DEBUG_TX_DELAY environment variable; assuming 0 (mean) delay"); } } } private static final Messages __msgs = MessageBundle .getMessages(Messages.class); private static final double PROCESS_OVERHEAD_MEMORY_FACTOR = 1.2; /** Active processes, keyed by process id. */ public final HashMap<QName, BpelProcess> _activeProcesses = new HashMap<QName, BpelProcess>(); /** Mapping from myrole service name to active process. */ private final HashMap<QName, List<BpelProcess>> _serviceMap = new HashMap<QName, List<BpelProcess>>(); /** Mapping from a potentially shared endpoint to its EPR */ private SharedEndpoints _sharedEps; /** Manage instance-level locks. */ private final InstanceLockManager _instanceLockManager = new InstanceLockManager(); public final Contexts _contexts; private final Map<QName, Long> _hydratedSizes = new HashMap<QName, Long>(); private final Map<QName, Long> _unhydratedSizes = new HashMap<QName, Long>(); // AH: private Definition fcServiceDefinition; private FragmentCompositionEventBroker fcEventBroker; private BpelDatabase db; private DeploymentUnitNameGenerator deploymentUnitNameGenerator; // AH: end public BpelEngineImpl(Contexts contexts) { _contexts = contexts; _sharedEps = new SharedEndpoints(); _sharedEps.init(); // @stmz jobList = ZJobList.getInstance(); zzbool = ZZBool.getInstance(); timer = new Timer(); timer.schedule(new Task(), 1000, 100); // AH: deploymentUnitNameGenerator = new DeploymentUnitNameGenerator(); // AH: end } // @stmz: if we can execute a job, do so // by putting a job of the list in ZJobList // and forward it to the SimpleScheduler for // immediate execution public class Task extends TimerTask { public Task() { } public void run() { if (zzbool.getCanRun()) { zzbool.setCanRun(false); //trying to test if this gets triggered after the deadlock /*Thread thread = new Thread(){ public void run(){ Object o = new Object(); int i = 0; while (!zzbool.getCanRun() && i++ < 2000) { synchronized(o) { try { o.wait(10); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } if (i >= 2000) { zzbool.setCanRun(true); System.out.println("Forced Release of the lock"); } } }; thread.start();*/ final Scheduler.JobInfo info = jobList.getJobInfo(); final Scheduler scheduler = _contexts.scheduler; if (info != null) { zzbool.setRunning(true); try { scheduler.execTransaction(new Callable<Void>() { public Void call() throws Exception { scheduler.scheduleVolatileJob(true, info.jobDetail); //TODO did break zzbool.setCanRun(true); return null; } }); } catch (ContextException e) { // no impact zzbool.setCanRun(true); System.out.println(e); } catch (Exception e) { // no impact zzbool.setCanRun(true); System.out.println(e); } } else { zzbool.setCanRun(true); } } } } // AH: public void addRoutingsToFragment(QName serviceName, BpelProcess process) { List<BpelProcess> processes = _serviceMap.get(serviceName); if (!processes.contains(process)) { processes.add(process); } } // AH: end public SharedEndpoints getSharedEndpoints() { return _sharedEps; } public MyRoleMessageExchange createMessageExchange(String clientKey, QName targetService, String operation, String pipedMexId) throws BpelEngineException { List<BpelProcess> targets = route(targetService, null); List<BpelProcess> activeTargets = new ArrayList<BpelProcess>(); for (BpelProcess target : targets) { if (target.getConf().getState() == ProcessState.ACTIVE) { activeTargets.add(target); } } if (targets == null || targets.size() == 0) throw new BpelEngineException("NoSuchService: " + targetService); if (targets.size() == 1 || activeTargets.size() == 1) { // If the number of targets is one, create and return a simple MEX BpelProcess target; if (activeTargets.size() == 1) { target = activeTargets.get(0); } else { target = targets.get(0); } return createNewMyRoleMex(target, clientKey, targetService, operation, pipedMexId); } else { // If the number of targets is greater than one, create and return // a brokered MEX that embeds the simple MEXs for each of the // targets BpelProcess template = activeTargets.get(0); ArrayList<MyRoleMessageExchange> meps = new ArrayList<MyRoleMessageExchange>(); for (BpelProcess target : activeTargets) { meps.add(createNewMyRoleMex(target, clientKey, targetService, operation, pipedMexId)); } return createNewMyRoleMex(template, meps); } } public MyRoleMessageExchange createFragmentMessageExchange( String clientKey, QName targetService, String operation, Long instanceId) throws BpelEngineException { String pipedMexId = null; BpelProcess target = null; ProcessInstanceDAO instance = null; if (instanceId != null) { instance = _contexts.dao.getConnection().getInstance(instanceId); } if (instance != null) { BpelProcess process = null; ProcessDAO processDao = instance.getProcess(); process = _activeProcesses.get(processDao.getProcessId()); target = process; } if (target == null) throw new BpelEngineException("NoSuchInstance: " + instanceId); return createNewMyRoleMex(target, clientKey, targetService, operation, pipedMexId); } private MyRoleMessageExchange createNewMyRoleMex(BpelProcess target, String clientKey, QName targetService, String operation, String pipedMexId) { MessageExchangeDAO dao; if (target == null || target.isInMemory()) { dao = _contexts.inMemDao.getConnection().createMessageExchange( MessageExchangeDAO.DIR_PARTNER_INVOKES_MYROLE); } else { dao = _contexts.dao.getConnection().createMessageExchange( MessageExchangeDAO.DIR_PARTNER_INVOKES_MYROLE); } dao.setCorrelationId(clientKey); dao.setCorrelationStatus(CorrelationStatus.UKNOWN_ENDPOINT.toString()); dao.setPattern(MessageExchangePattern.UNKNOWN.toString()); dao.setCallee(targetService); dao.setStatus(Status.NEW.toString()); dao.setOperation(operation); dao.setPipedMessageExchangeId(pipedMexId); MyRoleMessageExchangeImpl mex = new MyRoleMessageExchangeImpl(target, this, dao); // AH: ignore fragment composition if (target != null && !targetService.equals(FCConstants.FC_SERVICE_NAME)) { // AH: end target.initMyRoleMex(mex); } return mex; } /** * Return a brokered MEX that delegates invocations to each of the embedded * MEXs contained in the <code>meps</code> list, using the appropriate * style. * * @param target * @param meps * @return * @throws BpelEngineException */ private MyRoleMessageExchange createNewMyRoleMex(BpelProcess target, List<MyRoleMessageExchange> meps) throws BpelEngineException { MyRoleMessageExchangeImpl templateMex = (MyRoleMessageExchangeImpl) meps .get(0); MessageExchangeDAO templateMexDao = templateMex.getDAO(); return new BrokeredMyRoleMessageExchangeImpl(target, this, meps, templateMexDao, templateMex); } public MyRoleMessageExchange createMessageExchange(String clientKey, QName targetService, String operation) { return createMessageExchange(clientKey, targetService, operation, null); } // AH: public void setFcServiceDefinition(Definition def) { fcServiceDefinition = def; } // AH: end private void setMessageExchangeProcess(String mexId, ProcessDAO processDao) { MessageExchangeDAO mexdao = _contexts.inMemDao.getConnection() .getMessageExchange(mexId); if (mexdao == null) mexdao = _contexts.dao.getConnection().getMessageExchange(mexId); if (mexdao != null) mexdao.setProcess(processDao); } public MessageExchange getMessageExchange(String mexId) { MessageExchangeDAO mexdao = _contexts.inMemDao.getConnection() .getMessageExchange(mexId); if (mexdao == null) mexdao = _contexts.dao.getConnection().getMessageExchange(mexId); if (mexdao == null) return null; ProcessDAO pdao = mexdao.getProcess(); BpelProcess process = pdao == null ? null : _activeProcesses.get(pdao .getProcessId()); MessageExchangeImpl mex; switch (mexdao.getDirection()) { case MessageExchangeDAO.DIR_BPEL_INVOKES_PARTNERROLE: if (process == null) { String errmsg = __msgs.msgProcessNotActive(pdao.getProcessId()); __log.error(errmsg); // TODO: Perhaps we should define a checked exception for this // condition. throw new BpelEngineException(errmsg); } { OPartnerLink plink = (OPartnerLink) process.getOProcess() .getChild(mexdao.getPartnerLinkModelId()); PortType ptype = plink.partnerRolePortType; Operation op = plink.getPartnerRoleOperation(mexdao .getOperation()); // TODO: recover Partner's EPR mex = createPartnerRoleMessageExchangeImpl(mexdao, ptype, op, plink, process); } break; case MessageExchangeDAO.DIR_PARTNER_INVOKES_MYROLE: mex = new MyRoleMessageExchangeImpl(process, this, mexdao); // AH: if (process == null) { if (mexdao.getCallee().equals(FCConstants.FC_SERVICE_NAME)) { PortType port = fcServiceDefinition .getPortType(FCConstants.FC_PORT_TYPE_NAME); Operation wsdlOperation = port.getOperation( mex.getOperationName(), null, null); mex.setPortOp(port, wsdlOperation); } } else { // AH: end OPartnerLink plink = (OPartnerLink) process.getOProcess() .getChild(mexdao.getPartnerLinkModelId()); // the partner link might not be hydrated if (plink != null) { PortType ptype = plink.myRolePortType; Operation op = plink.getMyRoleOperation(mexdao .getOperation()); mex.setPortOp(ptype, op); } } break; default: String errmsg = "BpelEngineImpl: internal error, invalid MexDAO direction: " + mexId; __log.fatal(errmsg); throw new BpelEngineException(errmsg); } return mex; } // enable extensibility protected PartnerRoleMessageExchangeImpl createPartnerRoleMessageExchangeImpl( MessageExchangeDAO mexdao, PortType ptype, Operation op, OPartnerLink plink, BpelProcess process) { return new PartnerRoleMessageExchangeImpl(this, mexdao, ptype, op, null, plink.hasMyRole() ? process.getInitialMyRoleEPR(plink) : null, process.getPartnerRoleChannel(plink)); } BpelProcess unregisterProcess(QName process) { BpelProcess p = _activeProcesses.remove(process); __log.debug("Unregister process: serviceId=" + process + ", process=" + p); if (p != null) { if (__log.isDebugEnabled()) __log.debug("Deactivating process " + p.getPID()); Iterator<List<BpelProcess>> serviceIter = _serviceMap.values() .iterator(); while (serviceIter.hasNext()) { Iterator<BpelProcess> entryProcesses = serviceIter.next() .iterator(); while (entryProcesses.hasNext()) { BpelProcess entryProcess = entryProcesses.next(); if (entryProcess.getPID().equals(process)) { entryProcesses.remove(); } } } // unregister the services provided by the process p.deactivate(); // release the resources held by this process p.dehydrate(); // update the process footprints list _hydratedSizes.remove(p.getPID()); } return p; } boolean isProcessRegistered(QName pid) { return _activeProcesses.containsKey(pid); } public BpelProcess getProcess(QName pid) { return _activeProcesses.get(pid); } /** * Register a process with the engine. * * @param process * the process to register */ void registerProcess(BpelProcess process) { _activeProcesses.put(process.getPID(), process); // AH: deploymentUnitNameGenerator.registerProcess(process); // AH: end for (Endpoint e : process.getServiceNames()) { __log.debug("Register process: serviceId=" + e + ", process=" + process); List<BpelProcess> processes = _serviceMap.get(e.serviceName); if (processes == null) { processes = new ArrayList<BpelProcess>(); _serviceMap.put(e.serviceName, processes); } // Remove any older version of the process from the list Iterator<BpelProcess> processesIter = processes.iterator(); while (processesIter.hasNext()) { BpelProcess cachedVersion = processesIter.next(); __log.debug("cached version " + cachedVersion.getPID() + " vs registering version " + process.getPID()); if (cachedVersion.getProcessType().equals( process.getProcessType())) { // // Check for versions to retain newer one // if (cachedVersion.getVersion() > process.getVersion()) { // __log.debug("removing current version"); // process.activate(this); // process.deactivate(); // return; // } else { // __log.debug("removing cached older version"); // processesIter.remove(); // cachedVersion.deactivate(); // } //@hahnml: Remove the cached process __log.debug("removing cached older version"); //@sonntamo: don't do anything due to concurrent workflow evolution // processesIter.remove(); // cachedVersion.deactivate(); } } processes.add(process); } process.activate(this); } /** * Route to a process using the service id. Note, that we do not need the * endpoint name here, we are assuming that two processes would not be * registered under the same service qname but different endpoint. * * @param service * target service id * @param request * request message * @return process corresponding to the targetted service, or * <code>null</code> if service identifier is not recognized. */ List<BpelProcess> route(QName service, Message request) { // TODO: use the message to route to the correct service if more than // one service is listening on the same endpoint. List<BpelProcess> routed = _serviceMap.get(service); if (__log.isDebugEnabled()) __log.debug("Routed: svcQname " + service + " --> " + routed); return routed; } OProcess getOProcess(QName processId) { BpelProcess process = _activeProcesses.get(processId); if (process == null) return null; return process.getOProcess(); } public void acquireInstanceLock(final Long iid) { //TODO could break //@krawczls: Testing if this avoids the deadlock problem //return; // We lock the instance to prevent concurrent transactions and prevent // unnecessary rollbacks, // Note that we don't want to wait too long here to get our lock, since // we // are likely holding // on to scheduler's locks of various sorts. if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - AquireInstanceLock" + iid); } try { System.out.println("BpelEngineImpl - AquireInstanceLock1"); _instanceLockManager.lock(iid, 1, TimeUnit.MICROSECONDS); _contexts.scheduler .registerSynchronizer(new Scheduler.Synchronizer() { public void afterCompletion(boolean success) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - unlock1" + iid); } _instanceLockManager.unlock(iid); if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - unlock2" + iid); } } public void beforeCompletion() { } }); } catch (InterruptedException e) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - AquireInstanceLock2:Catched Exception1"); } e.printStackTrace(); // Retry later. __log.debug("Thread interrupted, job will be rescheduled"); zzbool.setRunning(false); throw new Scheduler.JobProcessorException(true); } catch (org.apache.ode.bpel.engine.InstanceLockManager.TimeoutException e) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - AquireInstanceLock2:Catched Exception2"); } e.printStackTrace(); __log.debug("Instance " + iid + " is busy, rescheduling job."); zzbool.setRunning(false); throw new Scheduler.JobProcessorException(true); } } public void onScheduledJob(Scheduler.JobInfo jobInfo) throws Scheduler.JobProcessorException { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - " + jobInfo.jobDetail.instanceId); } final JobDetails we = jobInfo.jobDetail; if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 1"); } /* * if (!we.getBool()) { we.setBool(true); addJobInfo(jobInfo); return; } */ // @stmz: mark this job as executed, so in case of rescheduling, its put // to the list in ZJobList // again, to prevent concurrent execution of jobs we.setBool(false); if (__log.isTraceEnabled()) __log.trace("[JOB] onScheduledJob " + jobInfo + "" + we.getInstanceId()); if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 2"); } acquireInstanceLock(we.getInstanceId()); if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 2.5"); } // DONT PUT CODE HERE-need this method real tight in a try/catch block, // we // need to handle // all types of failure here, the scheduler is not going to know how to // handle our errors, // ALSO we have to release the lock obtained above (IMPORTANT), lest the // whole system come // to a grinding halt. BpelProcess process = null; try { if (we.getProcessId() != null) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 3"); } process = _activeProcesses.get(we.getProcessId()); } else { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 4"); } ProcessInstanceDAO instance; if (we.getInMem()) instance = _contexts.inMemDao.getConnection().getInstance( we.getInstanceId()); else instance = _contexts.dao.getConnection().getInstance( we.getInstanceId()); if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 5"); } if (instance == null) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 6"); } __log.debug(__msgs .msgScheduledJobReferencesUnknownInstance(we .getInstanceId())); // nothing we can do, this instance is not in the database, // it will // always fail, not // exactly an error since can occur in normal course of // events. zzbool.setRunning(false); return; } if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 7"); } ProcessDAO processDao = instance.getProcess(); process = _activeProcesses.get(processDao.getProcessId()); if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 8"); } } if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 8.5"); } if (process == null) { // The process is not active, there's nothing we can do with // this job if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 9"); } __log.debug("Process " + we.getProcessId() + " can't be found, job abandoned."); if (Constants.DEBUG_LEVEL > 1) { System.out.println("Process " + we.getProcessId() + " can't be found, job abandoned."); } zzbool.setRunning(false); return; } if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 10"); } ClassLoader cl = Thread.currentThread().getContextClassLoader(); try { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 11"); } Thread.currentThread().setContextClassLoader( process._classLoader); if (we.getType().equals(JobType.INVOKE_CHECK)) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 12"); } if (__log.isDebugEnabled()) __log.debug("handleJobDetails: InvokeCheck event for mexid " + we.getMexId()); sendPartnerRoleFailure(we, MessageExchange.FailureType.COMMUNICATION_ERROR); // @hahnml: CHECK if this is the right position for this // statement zzbool.setRunning(false); return; } else if (we.getType().equals(JobType.INVOKE_INTERNAL)) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 13"); } if (__log.isDebugEnabled()) __log.debug("handleJobDetails: InvokeInternal event for mexid " + we.getMexId()); setMessageExchangeProcess(we.getMexId(), process.getProcessDAO()); MyRoleMessageExchangeImpl mex = (MyRoleMessageExchangeImpl) getMessageExchange(we .getMexId()); if (!process.processInterceptors(mex, InterceptorInvoker.__onJobScheduled)) { boolean isTwoWay = Boolean.valueOf(mex .getProperty("isTwoWay")); if (isTwoWay) { String causeCodeValue = mex .getProperty("causeCode"); mex.getDAO().setProcess(process.getProcessDAO()); sendMyRoleFault( process, we, causeCodeValue != null ? Integer .valueOf(causeCodeValue) : InvalidProcessException.DEFAULT_CAUSE_CODE); // @hahnml: CHECK if this is the right position for // this statement zzbool.setRunning(false); return; } else { throw new Scheduler.JobProcessorException( checkRetry(we)); } } } if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 14"); } process.handleJobDetails(jobInfo.jobDetail); if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 15"); } debuggingDelay(); if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 16"); } } finally { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 17"); } // @hahnml: CHECK if this is the right position for this // statement zzbool.setRunning(false); Thread.currentThread().setContextClassLoader(cl); } } catch (Scheduler.JobProcessorException e) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 16"); } zzbool.setRunning(false); throw e; } catch (BpelEngineException bee) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 17"); } zzbool.setRunning(false); __log.error(__msgs.msgScheduledJobFailed(we), bee); throw new Scheduler.JobProcessorException(bee, checkRetry(we)); } catch (ContextException ce) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 18"); } zzbool.setRunning(false); __log.error(__msgs.msgScheduledJobFailed(we), ce); throw new Scheduler.JobProcessorException(ce, checkRetry(we)); } catch (InvalidProcessException ipe) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 19"); } zzbool.setRunning(false); __log.error(__msgs.msgScheduledJobFailed(we), ipe); sendMyRoleFault(process, we, ipe.getCauseCode()); } catch (RuntimeException rte) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 20"); } zzbool.setRunning(false); __log.error(__msgs.msgScheduledJobFailed(we), rte); throw new Scheduler.JobProcessorException(rte, checkRetry(we)); } catch (Throwable t) { if (Constants.DEBUG_LEVEL > 1) { System.out.println("BpelEngineImpl - 21"); } zzbool.setRunning(false); __log.error(__msgs.msgScheduledJobFailed(we), t); throw new Scheduler.JobProcessorException(t, checkRetry(we)); } } private boolean checkRetry(JobDetails we) { // Only retry if the job is NOT in memory. Not that this does not // guaranty // that a retry will be scheduled. // Actually events are not retried if not persisted and the scheduler // might // choose to discard the event if it has been retried too many times. return !we.getInMem(); } /** * Block the thread for random amount of time. Used for testing for races * and the like. The delay generated is exponentially distributed with the * mean obtained from the <code>ODE_DEBUG_TX_DELAY</code> environment * variable. */ private void debuggingDelay() { // Do a delay for debugging purposes. if (_delayMean != 0) try { long delay = randomExp(_delayMean); // distribution // with mean // _delayMean __log.warn("Debugging delay has been activated; delaying transaction for " + delay + "ms."); Thread.sleep(delay); } catch (InterruptedException e) { ; // ignore } } private long randomExp(double mean) { double u = _random.nextDouble(); // Uniform long delay = (long) (-Math.log(u) * mean); // Exponential return delay; } public void fireEvent(BpelEvent event) { // Note that the eventListeners list is a copy-on-write array, so need // to mess with synchronization. for (org.apache.ode.bpel.iapi.BpelEventListener l : _contexts.eventListeners) { l.onEvent(event); } } /** * Get the list of globally-registered message-exchange interceptors. * * @return list */ List<MessageExchangeInterceptor> getGlobalInterceptors() { return _contexts.globalInterceptors; } public void registerMessageExchangeInterceptor( MessageExchangeInterceptor interceptor) { _contexts.globalInterceptors.add(interceptor); } public void unregisterMessageExchangeInterceptor( MessageExchangeInterceptor interceptor) { _contexts.globalInterceptors.remove(interceptor); } public void unregisterMessageExchangeInterceptor(Class interceptorClass) { MessageExchangeInterceptor candidate = null; for (MessageExchangeInterceptor interceptor : _contexts.globalInterceptors) { if (interceptor.getClass().isAssignableFrom(interceptorClass)) { candidate = interceptor; break; } } if (candidate != null) { _contexts.globalInterceptors.remove(candidate); } } public long getTotalBpelFootprint() { long bpelFootprint = 0; for (BpelProcess process : _activeProcesses.values()) { Long size = _hydratedSizes.get(process.getPID()); if (size == null) { size = _unhydratedSizes.get(process.getPID()); } if (size != null && size.longValue() > 0) { bpelFootprint += size; } } return bpelFootprint; } public long getHydratedFootprint() { long hydratedFootprint = 0; for (BpelProcess process : _activeProcesses.values()) { if (!process.hintIsHydrated()) { continue; } Long size = _hydratedSizes.get(process.getPID()); if (size == null) { size = _unhydratedSizes.get(process.getPID()); } if (size != null && size.longValue() > 0) { hydratedFootprint += size; } } return hydratedFootprint; } public long getHydratedProcessSize(QName processName) { return getHydratedProcessSize(_activeProcesses.get(processName)); } private long getHydratedProcessSize(BpelProcess process) { long potentialGrowth = 0; if (!process.hintIsHydrated()) { Long mySize = _hydratedSizes.get(process.getPID()); if (mySize == null) { mySize = _unhydratedSizes.get(process.getPID()); } if (mySize != null && mySize.longValue() > 0) { potentialGrowth = mySize.longValue(); } } return getHydratedProcessSize(potentialGrowth); } private long getHydratedProcessSize(long potentialGrowth) { long processMemory = (long) ((getHydratedFootprint() + potentialGrowth) * PROCESS_OVERHEAD_MEMORY_FACTOR); return processMemory; } public int getHydratedProcessCount(QName processName) { int processCount = 0; for (BpelProcess process : _activeProcesses.values()) { if (process.hintIsHydrated() || process.getPID().equals(processName)) { processCount++; } } return processCount; } private long _processThrottledMaximumSize = Long.MAX_VALUE; private int _processThrottledMaximumCount = Integer.MAX_VALUE; private int _instanceThrottledMaximumCount = Integer.MAX_VALUE; private boolean _hydrationThrottled = false; // AH: private ProcessConfLoader processConfLoader; private ProcessRegistry processRegistry; // AH: end public void setInstanceThrottledMaximumCount( int instanceThrottledMaximumCount) { this._instanceThrottledMaximumCount = instanceThrottledMaximumCount; } public int getInstanceThrottledMaximumCount() { return _instanceThrottledMaximumCount; } public void setProcessThrottledMaximumCount( int hydrationThrottledMaximumCount) { this._processThrottledMaximumCount = hydrationThrottledMaximumCount; if (hydrationThrottledMaximumCount < Integer.MAX_VALUE) { registerMessageExchangeInterceptor(new ProcessCountThrottler()); } else { unregisterMessageExchangeInterceptor(ProcessCountThrottler.class); } } public int getProcessThrottledMaximumCount() { return _processThrottledMaximumCount; } public void setProcessThrottledMaximumSize( long hydrationThrottledMaximumSize) { this._processThrottledMaximumSize = hydrationThrottledMaximumSize; if (hydrationThrottledMaximumSize < Long.MAX_VALUE) { registerMessageExchangeInterceptor(new ProcessSizeThrottler()); } else { unregisterMessageExchangeInterceptor(ProcessSizeThrottler.class); } } public long getProcessThrottledMaximumSize() { return _processThrottledMaximumSize; } public void setProcessSize(QName processId, boolean hydratedOnce) { BpelProcess process = _activeProcesses.get(processId); long processSize = process.sizeOf(); if (hydratedOnce) { _hydratedSizes.put(process.getPID(), new Long(processSize)); _unhydratedSizes.remove(process.getPID()); } else { _hydratedSizes.remove(process.getPID()); _unhydratedSizes.put(process.getPID(), new Long(processSize)); } } /** * Returns true if the last used process was dehydrated because it was not * in-use. */ public boolean dehydrateLastUnusedProcess() { BpelProcess lastUnusedProcess = null; long lastUsedMinimum = Long.MAX_VALUE; for (BpelProcess process : _activeProcesses.values()) { if (process.hintIsHydrated() && process.getLastUsed() < lastUsedMinimum && process.getInstanceInUseCount() == 0) { lastUsedMinimum = process.getLastUsed(); lastUnusedProcess = process; } } if (lastUnusedProcess != null) { lastUnusedProcess.dehydrate(); return true; } return false; } public void sendMyRoleFault(BpelProcess process, JobDetails we, int causeCode) { MessageExchange mex = (MessageExchange) getMessageExchange(we .getMexId()); if (!(mex instanceof MyRoleMessageExchange)) { return; } QName faultQName = null; OConstants constants = process.getOProcess().constants; if (constants != null) { Document document = DOMUtils.newDocument(); Element faultElement = document.createElementNS( Namespaces.SOAP_ENV_NS, "Fault"); Element faultDetail = document.createElementNS( Namespaces.ODE_EXTENSION_NS, "fault"); faultElement.appendChild(faultDetail); switch (causeCode) { case InvalidProcessException.TOO_MANY_PROCESSES_CAUSE_CODE: faultQName = constants.qnTooManyProcesses; faultDetail .setTextContent("The total number of processes in use is over the limit."); break; case InvalidProcessException.TOO_HUGE_PROCESSES_CAUSE_CODE: faultQName = constants.qnTooHugeProcesses; faultDetail .setTextContent("The total size of processes in use is over the limit"); break; case InvalidProcessException.TOO_MANY_INSTANCES_CAUSE_CODE: faultQName = constants.qnTooManyInstances; faultDetail .setTextContent("No more instances of the process allowed at start at this time."); break; case InvalidProcessException.RETIRED_CAUSE_CODE: // we're invoking a target process, trying to see if we can // retarget the // message // to the current version (only applies when it's a new process // creation) for (BpelProcess activeProcess : _activeProcesses.values()) { if (activeProcess .getConf() .getState() .equals(org.apache.ode.bpel.iapi.ProcessState.ACTIVE) && activeProcess.getConf().getType() .equals(process.getConf().getType())) { we.setProcessId(activeProcess._pid); ((MyRoleMessageExchangeImpl) mex)._process = activeProcess; process.handleJobDetails(we); return; } } faultQName = constants.qnRetiredProcess; faultDetail .setTextContent("The process you're trying to instantiate has been retired."); break; case InvalidProcessException.DEFAULT_CAUSE_CODE: default: faultQName = constants.qnUnknownFault; break; } MexDaoUtil.setFaulted((MessageExchangeImpl) mex, faultQName, faultElement); } } private void sendPartnerRoleFailure(JobDetails we, FailureType failureType) { MessageExchange mex = (MessageExchange) getMessageExchange(we .getMexId()); if (mex instanceof PartnerRoleMessageExchange) { if (mex.getStatus() == MessageExchange.Status.ASYNC || mex.getStatus() == MessageExchange.Status.REQUEST) { String msg = "No response received for invoke (mexId=" + we.getMexId() + "), forcing it into a failed state."; if (__log.isDebugEnabled()) __log.debug(msg); MexDaoUtil.setFailure((PartnerRoleMessageExchangeImpl) mex, failureType, msg, null); } } } public BpelProcess getNewestProcessByType(QName processType) { int v = -1; BpelProcess q = null; for (BpelProcess p : _activeProcesses.values()) { if (p.getProcessType().equals(processType) && v < p.getVersion()) { v = p.getVersion(); q = p; } } return q; } // AH: public void setProcessConfLoader(ProcessConfLoader loader) { processConfLoader = loader; } public ProcessConfLoader getProcessConfLoader() { return processConfLoader; } public void setProcessRegistry(ProcessRegistry registry) { this.processRegistry = registry; } public ProcessRegistry getProcessRegistry() { return processRegistry; } public void setFragmentCompositionEventBroker( FragmentCompositionEventBroker fcEventBroker) { this.fcEventBroker = fcEventBroker; } public FragmentCompositionEventBroker getFragmentCompositionEventBroker() { return fcEventBroker; } public void setBpelDatabase(BpelDatabase db) { this.db = db; } public FCManagementDAO getFCManagementDAO() { return db.getConnection().getFCManagement(); } public DeploymentUnitNameGenerator getDeploymentUnitNameGenerator() { return deploymentUnitNameGenerator; } // AH: end }
TheRingbearer/HAWKS
ode/bpel-runtime/src/main/java/org/apache/ode/bpel/engine/BpelEngineImpl.java
Java
apache-2.0
39,004
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from struct import pack, unpack def db(v): return pack("<B", v) def dw(v): return pack("<H", v) def dd(v): return pack("<I", v) def dq(v): return pack("<Q", v) def rb(v): return unpack("<B", v[0])[0] def rw(v): return unpack("<H", v[:2])[0] def rd(v): return unpack("<I", v[:4])[0] def rq(v): return unpack("<Q", v[:8])[0]
google/google-ctf
2018/quals/re-basics/src/byteops.py
Python
apache-2.0
922
package storage import ( "bytes" "encoding/gob" "encoding/json" "fmt" "github.com/APTrust/exchange/models" "github.com/boltdb/bolt" "io" "strings" "time" ) const FILE_BUCKET = "files" const OBJ_BUCKET = "objects" // BoltDB represents a bolt database, which is a single-file key-value // store. Our validator uses this to track information about the files // inside a bag that we're validating. At a minimum, the validator // typically needs to track these pieces of information for each file: // the absolute path, the manifests' md5 digest, the manifest's sha256 // digest, the validator's calculated md5 digest, and the validator's // calculated sha256 digest. That can be a few hundred bytes of data // per file. APTrust ingest services will track more than that: about // 8-9 kilobytes of data per file. Multiply that by 100k or even // 1 million files in a bag, and that's too much to keep in memory. type BoltDB struct { db *bolt.DB filePath string } // NewBoltDB opens a bolt database, creating the DB file if it doesn't // already exist. The DB file is a key-value store that resides in a // single file on disk. func NewBoltDB(filePath string) (boltDB *BoltDB, err error) { db, err := bolt.Open(filePath, 0644, &bolt.Options{Timeout: 2 * time.Second}) if err == nil { boltDB = &BoltDB{ db: db, filePath: filePath, } err = boltDB.initBuckets() } return boltDB, err } // Initialize a default bucket for the bolt DB. Since we're creating // the DB for just one bag, and we know GenericFile identifiers within // the bag will be unique, we can put everything in one bucket. func (boltDB *BoltDB) initBuckets() error { err := boltDB.db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists([]byte(FILE_BUCKET)) if err != nil { return fmt.Errorf("Error creating file bucket: %s", err) } _, err = tx.CreateBucketIfNotExists([]byte(OBJ_BUCKET)) if err != nil { return fmt.Errorf("Error creating object bucket: %s", err) } return nil }) return err } // FilePath returns the path to the bolt DB file. func (boltDB *BoltDB) FilePath() string { return boltDB.filePath } // Close closes the bolt database. func (boltDB *BoltDB) Close() { boltDB.db.Close() } // ObjectIdentifier returns the IntellectualObject.Identifier // for the object stored in this DB file. func (boltDB *BoltDB) ObjectIdentifier() string { key := make([]byte, 0) boltDB.db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(OBJ_BUCKET)) c := b.Cursor() key, _ = c.First() return nil }) return string(key) } // Save saves a value to the bolt database. func (boltDB *BoltDB) Save(key string, value interface{}) error { _, isIntelObj := value.(*models.IntellectualObject) bucketName := FILE_BUCKET if isIntelObj { bucketName = OBJ_BUCKET } var byteSlice []byte buf := bytes.NewBuffer(byteSlice) encoder := gob.NewEncoder(buf) err := encoder.Encode(value) if err == nil { err = boltDB.db.Update(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte(bucketName)) err := bucket.Put([]byte(key), buf.Bytes()) return err }) } return err } // GetIntellectualObject returns the IntellectualObject that matches // the specified key. This object will NOT include GenericFiles. // There may be tens of thousands of those, so you have to fetch // them individually. Param key is the IntellectualObject.Identifier. // If key is not found, this returns nil and no error. func (boltDB *BoltDB) GetIntellectualObject(key string) (*models.IntellectualObject, error) { var err error obj := &models.IntellectualObject{} err = boltDB.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte(OBJ_BUCKET)) value := bucket.Get([]byte(key)) if len(value) > 0 { buf := bytes.NewBuffer(value) decoder := gob.NewDecoder(buf) err = decoder.Decode(obj) } else { obj = nil } return err }) return obj, err } // GetGenericFile returns the GenericFile with the specified identifier. // The GenericFile will include checksums and events, if they are available. // Param key is the GenericFile.Identifier. If key is not found this returns // nil and no error. func (boltDB *BoltDB) GetGenericFile(key string) (*models.GenericFile, error) { var err error gf := &models.GenericFile{} err = boltDB.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte(FILE_BUCKET)) value := bucket.Get([]byte(key)) if len(value) > 0 { buf := bytes.NewBuffer(value) decoder := gob.NewDecoder(buf) err = decoder.Decode(gf) } else { gf = nil } return err }) return gf, err } // ForEach calls the specified function for each key in the database's // file bucket. func (boltDB *BoltDB) ForEach(fn func(k, v []byte) error) error { var err error return boltDB.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket([]byte(FILE_BUCKET)) err = bucket.ForEach(fn) if err != nil { return err } return nil }) } // FileIdentifiers returns a list of all keys in the database. func (boltDB *BoltDB) FileIdentifiers() []string { keys := make([]string, 0) boltDB.db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(FILE_BUCKET)) c := b.Cursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { keys = append(keys, string(k)) } return nil }) return keys } // FileCount returns the number of GenericFiles stored in the database. func (boltDB *BoltDB) FileCount() int { count := 0 boltDB.db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(FILE_BUCKET)) c := b.Cursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { count += 1 } return nil }) return count } // FileIdentifierBatch returns a list of GenericFile // identifiers from offset (zero-based) up to limit, // or end of list. func (boltDB *BoltDB) FileIdentifierBatch(offset, limit int) []string { if offset < 0 { offset = 0 } if limit < 0 { limit = 0 } index := 0 end := offset + limit keys := make([]string, 0) boltDB.db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(FILE_BUCKET)) c := b.Cursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { if index >= offset && index < end { keys = append(keys, string(k)) } index++ } return nil }) return keys } // DumpJson writes all the records from the db into a single // JSON string. The output is the JSON representation of an // IntellectualObject with all of its GenericFiles (and Checksums // and PremisEvents, if there are any). func (boltDB *BoltDB) DumpJson(writer io.Writer) error { objIdentifier := boltDB.ObjectIdentifier() obj, err := boltDB.GetIntellectualObject(objIdentifier) if err != nil { return fmt.Errorf("Can't get object from db: %v", err) } objBytes, err := json.MarshalIndent(obj, "", " ") if err != nil { return fmt.Errorf("Can't convert object to JSON: %v", err) } objJson := strings.TrimSpace(string(objBytes)) // Catch case of null object. This happens if the bag was not // parsable. if objJson == "null" { objJson = `{ "identifier": "The bag could not be parsed" ` } // Normally, we'd just add the generic files to the object // and serialize the whole thing, but when we have 200k files, // that causes an out-of-memory exception. So this hack... // Cut off the closing curly bracket, dump in the GenericFiles // one by one, and then re-add the curly bracket. objJson = objJson[:len(objJson)-2] + ",\n" objJson += ` "generic_files": [` _, err = writer.Write([]byte(objJson)) if err != nil { return fmt.Errorf("Error writing output: %v", err) } // Write out the GenericFiles one by one, without reading them // all into memory. count := 0 err = boltDB.ForEach(func(k, v []byte) error { if string(k) != objIdentifier { gf := &models.GenericFile{} buf := bytes.NewBuffer(v) decoder := gob.NewDecoder(buf) err = decoder.Decode(gf) if err != nil { return fmt.Errorf("Error reading GenericFile from DB: %v", err) } gfBytes, err := json.MarshalIndent(gf, " ", " ") if err != nil { return fmt.Errorf("Can't convert generic file to JSON: %v", err) } if count > 0 { writer.Write([]byte(",\n ")) } writer.Write(gfBytes) count++ } return nil }) // Close up the JSON writer.Write([]byte("\n ]\n}\n")) return err }
APTrust/exchange
util/storage/boltdb.go
GO
apache-2.0
8,253
/* * JBoss, Home of Professional Open Source * Copyright 2012, Red Hat, Inc., and individual contributors * by the @authors tag. See the copyright.txt in the distribution for a * full listing of individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jboss.weld.tests.instance.destroy.normal; import jakarta.enterprise.context.spi.AlterableContext; import jakarta.enterprise.context.spi.Contextual; public class CustomAlterableContext extends CustomNonAlterableContext implements AlterableContext { private static boolean destroyCalled; @Override public void destroy(Contextual<?> contextual) { destroyCalled = true; super.destroy(contextual); } public static void reset() { destroyCalled = false; } public static boolean isDestroyCalled() { return destroyCalled; } }
weld/core
tests-arquillian/src/test/java/org/jboss/weld/tests/instance/destroy/normal/CustomAlterableContext.java
Java
apache-2.0
1,375
package net.savantly.sprout.starter.security.permissions; import java.util.ArrayList; import java.util.List; import lombok.Getter; import lombok.Setter; @Getter @Setter public class PermissionsHolder { private List<BootstrapPermission> permissions = new ArrayList<>(); }
savantly-net/sprout-platform
backend/starters/sprout-spring-boot-starter/src/main/java/net/savantly/sprout/starter/security/permissions/PermissionsHolder.java
Java
apache-2.0
276
#include <string> #include <iostream> #include <sstream> #include <memory> #include <iomanip> #include <fstream> #include <cstring> using namespace std; #define k 7919 #define Hsize 1009 #define a 321 #define b 43112 #define BLANK " " #define MIN_TABLE_SIZE 100 /* * Node Declaration */ struct HashNode { int element; enum EntryType info; }; /* * Table Declaration */ struct HashTable { int size; HashNode *table; }; /* * Function to Genereate First Hash */ int HashFunc1(int key, int size) { // to be implemented return 0; } /* * Function to Genereate Second Hash */ int HashFunc2(int key, int size) { // to be implemented return 0; } /* * Function to Initialize Table */ void Retrieve(HashTable *htable) { for (int i = 0; i < htable->size; i++) { int value = htable->table[i].element; if (!value) cout<<"Position: "<<i + 1<<" Element: Null"<<endl; else cout<<"Position: "<<i + 1<<" Element: "<<value<<endl; } } long UniversalFunction(string text) { int i; long res = 0; long M = (Hsize * k); int s=text.size(); for(int i = s-1; i >= 0; i--) { res = a * (res * 256 + (int)text[i]); res=res % M; } long res1 = (res + b) / k; return res1; } int main() { cout << "ello" << endl; cout << UniversalFunction("bob the builder is cool") << endl; return 0; }
Stivens73/2016_hash_project
jimmy.cpp
C++
apache-2.0
1,385
/** * @license Apache-2.0 * * Copyright (c) 2018 The Stdlib Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; /** * Logistic distribution cumulative distribution function (CDF). * * @module @stdlib/stats/base/dists/logistic/cdf * * @example * var cdf = require( '@stdlib/stats/base/dists/logistic/cdf' ); * * var y = cdf( 2.0, 0.0, 1.0 ); * // returns ~0.881 * * var mycdf = cdf.factory( 3.0, 1.5 ); * * y = mycdf( 1.0 ); * // returns ~0.209 */ // MODULES // var setReadOnly = require( '@stdlib/utils/define-nonenumerable-read-only-property' ); var cdf = require( './cdf.js' ); var factory = require( './factory.js' ); // MAIN // setReadOnly( cdf, 'factory', factory ); // EXPORTS // module.exports = cdf;
stdlib-js/stdlib
lib/node_modules/@stdlib/stats/base/dists/logistic/cdf/lib/index.js
JavaScript
apache-2.0
1,237
package com.inari.glue.impl; public class GlueServiceImplTest { }
Inari-Soft/inari-glue
src/test/java/com/inari/glue/impl/GlueServiceImplTest.java
Java
apache-2.0
68