code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30
values | license stringclasses 15
values | size int64 3 1.01M |
|---|---|---|---|---|---|
<!--
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- [START picker_hello_world] -->
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta charset="utf-8" />
<title>Google Picker Example</title>
<script type="text/javascript">
// The Browser API key obtained from the Google API Console.
// Replace with your own Browser API key, or your own key.
var developerKey = 'xxxxxxxYYYYYYYY-12345678';
// The Client ID obtained from the Google API Console. Replace with your own Client ID.
var clientId = "1234567890-abcdefghijklmnopqrstuvwxyz.apps.googleusercontent.com"
// Replace with your own project number from console.developers.google.com.
// See "Project number" under "IAM & Admin" > "Settings"
var appId = "1234567890";
// Scope to use to access user's Drive items.
var scope = ['https://www.googleapis.com/auth/drive.file'];
var pickerApiLoaded = false;
var oauthToken;
// Use the Google API Loader script to load the google.picker script.
function loadPicker() {
gapi.load('auth', {'callback': onAuthApiLoad});
gapi.load('picker', {'callback': onPickerApiLoad});
}
function onAuthApiLoad() {
window.gapi.auth.authorize(
{
'client_id': clientId,
'scope': scope,
'immediate': false
},
handleAuthResult);
}
function onPickerApiLoad() {
pickerApiLoaded = true;
createPicker();
}
function handleAuthResult(authResult) {
if (authResult && !authResult.error) {
oauthToken = authResult.access_token;
createPicker();
}
}
// Create and render a Picker object for searching images.
function createPicker() {
if (pickerApiLoaded && oauthToken) {
var view = new google.picker.View(google.picker.ViewId.DOCS);
view.setMimeTypes("image/png,image/jpeg,image/jpg");
var picker = new google.picker.PickerBuilder()
.enableFeature(google.picker.Feature.NAV_HIDDEN)
.enableFeature(google.picker.Feature.MULTISELECT_ENABLED)
.setAppId(appId)
.setOAuthToken(oauthToken)
.addView(view)
.addView(new google.picker.DocsUploadView())
.setDeveloperKey(developerKey)
.setCallback(pickerCallback)
.build();
picker.setVisible(true);
}
}
// A simple callback implementation.
function pickerCallback(data) {
if (data.action == google.picker.Action.PICKED) {
var fileId = data.docs[0].id;
alert('The user selected: ' + fileId);
}
}
</script>
</head>
<body>
<div id="result"></div>
<!-- The Google API Loader script. -->
<script type="text/javascript" src="https://apis.google.com/js/api.js?onload=loadPicker"></script>
</body>
</html>
<!-- [END picker_hello_world] -->
| googleworkspace/browser-samples | drive/picker/helloworld.html | HTML | apache-2.0 | 3,433 |
package examples.Bricklet.Moisture;
import com.tinkerforge.BrickletMoisture;
import com.tinkerforge.IPConnection;
public class ExampleSimple {
private static final String host = "localhost";
private static final int port = 4223;
private static final String UID = "XYZ"; // Change to your UID
// Note: To make the examples code cleaner we do not handle exceptions. Exceptions you
// might normally want to catch are described in the documentation
public static void main(String args[]) throws Exception {
IPConnection ipcon = new IPConnection(); // Create IP connection
BrickletMoisture al = new BrickletMoisture(UID, ipcon); // Create device object
ipcon.connect(host, port); // Connect to brickd
// Don't use device before ipcon is connected
// Get current moisture value
int moisture = al.getMoistureValue(); // Can throw com.tinkerforge.TimeoutException
System.out.println("Moisture Value: " + moisture);
System.console().readLine("Press key to exit\n");
ipcon.disconnect();
}
}
| jaggr2/ch.bfh.fbi.mobiComp.17herz | com.tinkerforge/src/examples/Bricklet/Moisture/ExampleSimple.java | Java | apache-2.0 | 1,020 |
/**
* Copyright 2015 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package wherehows.ingestion.converters;
import com.linkedin.events.metadata.DatasetIdentifier;
import com.linkedin.events.metadata.DeploymentDetail;
import com.linkedin.events.metadata.MetadataChangeEvent;
import java.util.Collections;
import org.testng.annotations.Test;
import static org.testng.Assert.*;
public class KafkaLogCompactionConverterTest {
@Test
public void testConvert() {
MetadataChangeEvent event = new MetadataChangeEvent();
event.datasetIdentifier = new DatasetIdentifier();
event.datasetIdentifier.dataPlatformUrn = "urn:li:dataPlatform:kafka";
DeploymentDetail deployment = new DeploymentDetail();
deployment.additionalDeploymentInfo = Collections.singletonMap("EI", "compact");
event.deploymentInfo = Collections.singletonList(deployment);
MetadataChangeEvent newEvent = new KafkaLogCompactionConverter().convert(event);
assertEquals(newEvent.datasetIdentifier.dataPlatformUrn, "urn:li:dataPlatform:kafka-lc");
}
@Test
public void testNotConvert() {
KafkaLogCompactionConverter converter = new KafkaLogCompactionConverter();
MetadataChangeEvent event = new MetadataChangeEvent();
event.datasetIdentifier = new DatasetIdentifier();
event.datasetIdentifier.dataPlatformUrn = "foo";
DeploymentDetail deployment = new DeploymentDetail();
deployment.additionalDeploymentInfo = Collections.singletonMap("EI", "compact");
event.deploymentInfo = Collections.singletonList(deployment);
MetadataChangeEvent newEvent = converter.convert(event);
assertEquals(newEvent.datasetIdentifier.dataPlatformUrn, "foo");
event.datasetIdentifier.dataPlatformUrn = "urn:li:dataPlatform:kafka";
event.deploymentInfo = null;
newEvent = converter.convert(event);
assertEquals(newEvent.datasetIdentifier.dataPlatformUrn, "urn:li:dataPlatform:kafka");
event.datasetIdentifier.dataPlatformUrn = "urn:li:dataPlatform:kafka";
deployment.additionalDeploymentInfo = Collections.singletonMap("EI", "delete");
event.deploymentInfo = Collections.singletonList(deployment);
newEvent = converter.convert(event);
assertEquals(newEvent.datasetIdentifier.dataPlatformUrn, "urn:li:dataPlatform:kafka");
}
}
| alyiwang/WhereHows | wherehows-ingestion/src/test/java/wherehows/ingestion/converters/KafkaLogCompactionConverterTest.java | Java | apache-2.0 | 2,728 |
/* Copyright (C) 2013 Interactive Brokers LLC. All rights reserved. This code is subject to the terms
* and conditions of the IB API Non-Commercial License or the IB API Commercial License, as applicable. */
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace IBApi
{
public class EClientErrors
{
public static readonly CodeMsgPair AlreadyConnected = new CodeMsgPair(501, "Already Connected.");
public static readonly CodeMsgPair CONNECT_FAIL = new CodeMsgPair(502, "Couldn't connect to TWS. Confirm that \"Enable ActiveX and Socket Clients\" is enabled on the TWS \"Configure->API\" menu.");
public static readonly CodeMsgPair UPDATE_TWS = new CodeMsgPair(503, "The TWS is out of date and must be upgraded.");
public static readonly CodeMsgPair NOT_CONNECTED = new CodeMsgPair(504, "Not connected");
public static readonly CodeMsgPair UNKNOWN_ID = new CodeMsgPair(505, "Fatal Error: Unknown message id.");
public static readonly CodeMsgPair FAIL_SEND_REQMKT = new CodeMsgPair(510, "Request Market Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANMKT = new CodeMsgPair(511, "Cancel Market Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_ORDER = new CodeMsgPair(512, "Order Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_ACCT = new CodeMsgPair(513, "Account Update Request Sending Error -");
public static readonly CodeMsgPair FAIL_SEND_EXEC = new CodeMsgPair(514, "Request For Executions Sending Error -");
public static readonly CodeMsgPair FAIL_SEND_CORDER = new CodeMsgPair(515, "Cancel Order Sending Error -");
public static readonly CodeMsgPair FAIL_SEND_OORDER = new CodeMsgPair(516, "Request Open Order Sending Error -");
public static readonly CodeMsgPair UNKNOWN_CONTRACT = new CodeMsgPair(517, "Unknown contract. Verify the contract details supplied.");
public static readonly CodeMsgPair FAIL_SEND_REQCONTRACT = new CodeMsgPair(518, "Request Contract Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQMKTDEPTH = new CodeMsgPair(519, "Request Market Depth Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANMKTDEPTH = new CodeMsgPair(520, "Cancel Market Depth Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_SERVER_LOG_LEVEL = new CodeMsgPair(521, "Set Server Log Level Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_FA_REQUEST = new CodeMsgPair(522, "FA Information Request Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_FA_REPLACE = new CodeMsgPair(523, "FA Information Replace Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQSCANNER = new CodeMsgPair(524, "Request Scanner Subscription Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANSCANNER = new CodeMsgPair(525, "Cancel Scanner Subscription Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQSCANNERPARAMETERS = new CodeMsgPair(526, "Request Scanner Parameter Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQHISTDATA = new CodeMsgPair(527, "Request Historical Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANHISTDATA = new CodeMsgPair(528, "Request Historical Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQRTBARS = new CodeMsgPair(529, "Request Real-time Bar Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANRTBARS = new CodeMsgPair(530, "Cancel Real-time Bar Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQCURRTIME = new CodeMsgPair(531, "Request Current Time Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQFUNDDATA = new CodeMsgPair(532, "Request Fundamental Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANFUNDDATA = new CodeMsgPair(533, "Cancel Fundamental Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQCALCIMPLIEDVOLAT = new CodeMsgPair(534, "Request Calculate Implied Volatility Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQCALCOPTIONPRICE = new CodeMsgPair(535, "Request Calculate Option Price Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANCALCIMPLIEDVOLAT = new CodeMsgPair(536, "Cancel Calculate Implied Volatility Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANCALCOPTIONPRICE = new CodeMsgPair(537, "Cancel Calculate Option Price Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQGLOBALCANCEL = new CodeMsgPair(538, "Request Global Cancel Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQMARKETDATATYPE = new CodeMsgPair(539, "Request Market Data Type Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQPOSITIONS = new CodeMsgPair(540, "Request Positions Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANPOSITIONS = new CodeMsgPair(541, "Cancel Positions Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_REQACCOUNTDATA = new CodeMsgPair(542, "Request Account Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_CANACCOUNTDATA = new CodeMsgPair(543, "Cancel Account Data Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_VERIFYREQUEST = new CodeMsgPair(544, "Verify Request Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_VERIFYMESSAGE = new CodeMsgPair(545, "Verify Message Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_QUERYDISPLAYGROUPS = new CodeMsgPair(546, "Query Display Groups Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_SUBSCRIBETOGROUPEVENTS = new CodeMsgPair(547, "Subscribe To Group Events Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_UPDATEDISPLAYGROUP = new CodeMsgPair(548, "Update Display Group Sending Error - ");
public static readonly CodeMsgPair FAIL_SEND_UNSUBSCRIBEFROMGROUPEVENTS = new CodeMsgPair(549, "Unsubscribe From Group Events Sending Error - ");
public static readonly CodeMsgPair FAIL_GENERIC = new CodeMsgPair(-1, "Specific error message needs to be given for these requests! ");
}
public class CodeMsgPair
{
private int code;
private string message;
public CodeMsgPair(int code, string message)
{
this.code = code;
this.message = message;
}
public int Code
{
get { return code; }
}
public string Message
{
get { return message; }
}
}
}
| AvengersPy/MyPairs | testIBPython/csharpclient/EClientErrors.cs | C# | apache-2.0 | 6,913 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.util;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.model.ModelHelper;
/**
*
*/
public class DumpModelAsXmlChoiceFilterRouteTest extends ContextTestSupport {
public void testDumpModelAsXml() throws Exception {
String xml = ModelHelper.dumpModelAsXml(context.getRouteDefinition("myRoute"));
assertNotNull(xml);
log.info(xml);
assertTrue(xml.contains("<header>gold</header>"));
assertTrue(xml.contains("<header>extra-gold</header>"));
assertTrue(xml.contains("<simple>${body} contains Camel</simple>"));
}
@Override
protected RouteBuilder createRouteBuilder() throws Exception {
return new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:start").routeId("myRoute")
.to("log:input")
.choice()
.when().header("gold")
.to("mock:gold")
.filter().header("extra-gold")
.to("mock:extra-gold")
.endChoice()
.when().simple("${body} contains Camel")
.to("mock:camel")
.otherwise()
.to("mock:other")
.end()
.to("mock:result");
}
};
}
}
| engagepoint/camel | camel-core/src/test/java/org/apache/camel/util/DumpModelAsXmlChoiceFilterRouteTest.java | Java | apache-2.0 | 2,322 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.fineract.template.api;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.MultivaluedMap;
import javax.ws.rs.core.UriInfo;
import org.apache.fineract.commands.domain.CommandWrapper;
import org.apache.fineract.commands.service.CommandWrapperBuilder;
import org.apache.fineract.commands.service.PortfolioCommandSourceWritePlatformService;
import org.apache.fineract.infrastructure.core.api.ApiRequestParameterHelper;
import org.apache.fineract.infrastructure.core.data.CommandProcessingResult;
import org.apache.fineract.infrastructure.core.serialization.ApiRequestJsonSerializationSettings;
import org.apache.fineract.infrastructure.core.serialization.DefaultToApiJsonSerializer;
import org.apache.fineract.infrastructure.security.service.PlatformSecurityContext;
import org.apache.fineract.template.data.TemplateData;
import org.apache.fineract.template.domain.Template;
import org.apache.fineract.template.domain.TemplateEntity;
import org.apache.fineract.template.domain.TemplateType;
import org.apache.fineract.template.service.TemplateDomainService;
import org.apache.fineract.template.service.TemplateMergeService;
import org.codehaus.jackson.map.ObjectMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
@Path("/templates")
@Consumes({ MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_JSON })
@Component
@Scope("singleton")
public class TemplatesApiResource {
private final Set<String> RESPONSE_TEMPLATES_DATA_PARAMETERS = new HashSet<>(Arrays.asList("id"));
private final Set<String> RESPONSE_TEMPLATE_DATA_PARAMETERS = new HashSet<>(Arrays.asList("id", "entities", "types", "template"));
private final String RESOURCE_NAME_FOR_PERMISSION = "template";
private final PlatformSecurityContext context;
private final DefaultToApiJsonSerializer<Template> toApiJsonSerializer;
private final DefaultToApiJsonSerializer<TemplateData> templateDataApiJsonSerializer;
private final ApiRequestParameterHelper apiRequestParameterHelper;
private final TemplateDomainService templateService;
private final TemplateMergeService templateMergeService;
private final PortfolioCommandSourceWritePlatformService commandsSourceWritePlatformService;
@Autowired
public TemplatesApiResource(final PlatformSecurityContext context, final DefaultToApiJsonSerializer<Template> toApiJsonSerializer,
final DefaultToApiJsonSerializer<TemplateData> templateDataApiJsonSerializer,
final ApiRequestParameterHelper apiRequestParameterHelper, final TemplateDomainService templateService,
final TemplateMergeService templateMergeService,
final PortfolioCommandSourceWritePlatformService commandsSourceWritePlatformService) {
this.context = context;
this.toApiJsonSerializer = toApiJsonSerializer;
this.templateDataApiJsonSerializer = templateDataApiJsonSerializer;
this.apiRequestParameterHelper = apiRequestParameterHelper;
this.templateService = templateService;
this.templateMergeService = templateMergeService;
this.commandsSourceWritePlatformService = commandsSourceWritePlatformService;
}
@GET
public String retrieveAll(@DefaultValue("-1") @QueryParam("typeId") final int typeId,
@DefaultValue("-1") @QueryParam("entityId") final int entityId, @Context final UriInfo uriInfo) {
this.context.authenticatedUser().validateHasReadPermission(this.RESOURCE_NAME_FOR_PERMISSION);
// FIXME - we dont use the ORM when doing fetches - we write SQL and
// fetch through JDBC returning data to be serialized to JSON
List<Template> templates = new ArrayList<>();
if (typeId != -1 && entityId != -1) {
templates = this.templateService.getAllByEntityAndType(TemplateEntity.values()[entityId], TemplateType.values()[typeId]);
} else {
templates = this.templateService.getAll();
}
final ApiRequestJsonSerializationSettings settings = this.apiRequestParameterHelper.process(uriInfo.getQueryParameters());
return this.toApiJsonSerializer.serialize(settings, templates, this.RESPONSE_TEMPLATES_DATA_PARAMETERS);
}
@GET
@Path("template")
public String template(@Context final UriInfo uriInfo) {
this.context.authenticatedUser().validateHasReadPermission(this.RESOURCE_NAME_FOR_PERMISSION);
final TemplateData templateData = TemplateData.template();
final ApiRequestJsonSerializationSettings settings = this.apiRequestParameterHelper.process(uriInfo.getQueryParameters());
return this.templateDataApiJsonSerializer.serialize(settings, templateData, this.RESPONSE_TEMPLATES_DATA_PARAMETERS);
}
@POST
public String createTemplate(final String apiRequestBodyAsJson) {
final CommandWrapper commandRequest = new CommandWrapperBuilder().createTemplate().withJson(apiRequestBodyAsJson).build();
final CommandProcessingResult result = this.commandsSourceWritePlatformService.logCommandSource(commandRequest);
return this.toApiJsonSerializer.serialize(result);
}
@GET
@Path("{templateId}")
public String retrieveOne(@PathParam("templateId") final Long templateId, @Context final UriInfo uriInfo) {
this.context.authenticatedUser().validateHasReadPermission(this.RESOURCE_NAME_FOR_PERMISSION);
final Template template = this.templateService.findOneById(templateId);
final ApiRequestJsonSerializationSettings settings = this.apiRequestParameterHelper.process(uriInfo.getQueryParameters());
return this.toApiJsonSerializer.serialize(settings, template, this.RESPONSE_TEMPLATES_DATA_PARAMETERS);
}
@GET
@Path("{templateId}/template")
public String getTemplateByTemplate(@PathParam("templateId") final Long templateId, @Context final UriInfo uriInfo) {
this.context.authenticatedUser().validateHasReadPermission(this.RESOURCE_NAME_FOR_PERMISSION);
final TemplateData template = TemplateData.template(this.templateService.findOneById(templateId));
final ApiRequestJsonSerializationSettings settings = this.apiRequestParameterHelper.process(uriInfo.getQueryParameters());
return this.templateDataApiJsonSerializer.serialize(settings, template, this.RESPONSE_TEMPLATE_DATA_PARAMETERS);
}
@PUT
@Path("{templateId}")
public String saveTemplate(@PathParam("templateId") final Long templateId, final String apiRequestBodyAsJson) {
final CommandWrapper commandRequest = new CommandWrapperBuilder().updateTemplate(templateId).withJson(apiRequestBodyAsJson).build();
final CommandProcessingResult result = this.commandsSourceWritePlatformService.logCommandSource(commandRequest);
return this.toApiJsonSerializer.serialize(result);
}
@DELETE
@Path("{templateId}")
public String deleteTemplate(@PathParam("templateId") final Long templateId) {
final CommandWrapper commandRequest = new CommandWrapperBuilder().deleteTemplate(templateId).build();
final CommandProcessingResult result = this.commandsSourceWritePlatformService.logCommandSource(commandRequest);
return this.toApiJsonSerializer.serialize(result);
}
@POST
@Path("{templateId}")
@Produces({ MediaType.TEXT_HTML })
public String mergeTemplate(@PathParam("templateId") final Long templateId, @Context final UriInfo uriInfo,
final String apiRequestBodyAsJson) throws MalformedURLException, IOException {
final Template template = this.templateService.findOneById(templateId);
@SuppressWarnings("unchecked")
final HashMap<String, Object> result = new ObjectMapper().readValue(apiRequestBodyAsJson, HashMap.class);
final MultivaluedMap<String, String> parameters = uriInfo.getQueryParameters();
final Map<String, Object> parametersMap = new HashMap<>();
for (final Map.Entry<String, List<String>> entry : parameters.entrySet()) {
if (entry.getValue().size() == 1) {
parametersMap.put(entry.getKey(), entry.getValue().get(0));
} else {
parametersMap.put(entry.getKey(), entry.getValue());
}
}
parametersMap.put("BASE_URI", uriInfo.getBaseUri());
parametersMap.putAll(result);
return this.templateMergeService.compile(template, parametersMap);
}
} | RanjithKumar5550/RanMifos | fineract-provider/src/main/java/org/apache/fineract/template/api/TemplatesApiResource.java | Java | apache-2.0 | 9,902 |
#import <Foundation/Foundation.h>
#import "WXApmProtocol.h"
@interface WXApmImpl : NSObject <WXApmProtocol>
@end
| Hanks10100/incubator-weex | ios/playground/WeexDemo/extend/handler/WXApmImpl.h | C | apache-2.0 | 115 |
/*
* Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#if ENABLE(DFG_JIT)
#include "ObjectPropertyCondition.h"
#include "Watchpoint.h"
namespace JSC { namespace DFG {
class AdaptiveStructureWatchpoint : public Watchpoint {
public:
AdaptiveStructureWatchpoint(const ObjectPropertyCondition&, CodeBlock*);
const ObjectPropertyCondition& key() const { return m_key; }
void install();
protected:
void fireInternal(const FireDetail&) override;
private:
ObjectPropertyCondition m_key;
CodeBlock* m_codeBlock;
};
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
| alibaba/weex | weex_core/Source/include/JavaScriptCore/dfg/DFGAdaptiveStructureWatchpoint.h | C | apache-2.0 | 1,892 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.jsmpp.bean;
/**
* This is simple DataCoding. Only contains Alphabet (DEFAULT and 8-bit) and
* Message Class.
*
* @author uudashr
*
*/
public class SimpleDataCoding implements DataCoding {
private final Alphabet alphabet;
private final MessageClass messageClass;
/**
* Construct Data Coding using default Alphabet and
* {@link MessageClass#CLASS1} Message Class.
*/
public SimpleDataCoding() {
this(Alphabet.ALPHA_DEFAULT, MessageClass.CLASS1);
}
/**
* Construct Data Coding using specified Alphabet and Message Class.
*
* @param alphabet is the alphabet. Only support
* {@link Alphabet#ALPHA_DEFAULT} and {@link Alphabet#ALPHA_8_BIT}.
* @param messageClass
* @throws IllegalArgumentException if alphabet is <tt>null</tt> or using
* non {@link Alphabet#ALPHA_DEFAULT} and
* {@link Alphabet#ALPHA_8_BIT} alphabet or
* <code>messageClass</code> is null.
*/
public SimpleDataCoding(Alphabet alphabet, MessageClass messageClass) throws IllegalArgumentException {
if (alphabet == null) {
throw new IllegalArgumentException(
"Alphabet is mandatory, can't be null");
}
if (alphabet.equals(Alphabet.ALPHA_UCS2)
|| alphabet.isReserved()) {
throw new IllegalArgumentException(
"Supported alphabet for SimpleDataCoding does not include "
+ Alphabet.ALPHA_UCS2 + " or "
+ "reserved alphabet codes. Current alphabet is " + alphabet);
}
if (messageClass == null) {
throw new IllegalArgumentException(
"MessageClass is mandatory, can't be null");
}
this.alphabet = alphabet;
this.messageClass = messageClass;
}
public Alphabet getAlphabet() {
return alphabet;
}
public MessageClass getMessageClass() {
return messageClass;
}
public byte toByte() {
// base byte is 11110xxx or 0xf0, others injected
byte value = (byte)0xf0;
value |= alphabet.value();
value |= messageClass.value();
return value;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result
+ ((alphabet == null) ? 0 : alphabet.hashCode());
result = prime * result
+ ((messageClass == null) ? 0 : messageClass.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
SimpleDataCoding other = (SimpleDataCoding)obj;
if (alphabet == null) {
if (other.alphabet != null)
return false;
} else if (!alphabet.equals(other.alphabet))
return false;
if (messageClass == null) {
if (other.messageClass != null)
return false;
} else if (!messageClass.equals(other.messageClass))
return false;
return true;
}
@Override
public String toString() {
return "DataCoding:" + (0xff & toByte());
}
}
| amdtelecom/jsmpp | jsmpp/src/main/java/org/jsmpp/bean/SimpleDataCoding.java | Java | apache-2.0 | 3,967 |
/*
File: CASharedLibrary.h
Abstract: Part of CoreAudio Utility Classes
Version: 1.0.3
Disclaimer: IMPORTANT: This Apple software is supplied to you by Apple
Inc. ("Apple") in consideration of your agreement to the following
terms, and your use, installation, modification or redistribution of
this Apple software constitutes acceptance of these terms. If you do
not agree with these terms, please do not use, install, modify or
redistribute this Apple software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, Apple grants you a personal, non-exclusive
license, under Apple's copyrights in this original Apple software (the
"Apple Software"), to use, reproduce, modify and redistribute the Apple
Software, with or without modifications, in source and/or binary forms;
provided that if you redistribute the Apple Software in its entirety and
without modifications, you must retain this notice and the following
text and disclaimers in all such redistributions of the Apple Software.
Neither the name, trademarks, service marks or logos of Apple Inc. may
be used to endorse or promote products derived from the Apple Software
without specific prior written permission from Apple. Except as
expressly stated in this notice, no other rights or licenses, express or
implied, are granted by Apple herein, including but not limited to any
patent rights that may be infringed by your derivative works or by other
works in which the Apple Software may be incorporated.
The Apple Software is provided by Apple on an "AS IS" basis. APPLE
MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.
IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Copyright (C) 2013 Apple Inc. All Rights Reserved.
*/
#if !defined(__CASharedLibrary_h__)
#define __CASharedLibrary_h__
//=============================================================================
// CASharedLibrary
//=============================================================================
class CASharedLibrary
{
// Symbol Operations
public:
static void* LoadLibraryAndGetRoutineAddress(const char* inRoutineName, const char* inLibraryName, const char* inLibraryPath);
static void* GetRoutineAddressIfLibraryLoaded(const char* inRoutineName, const char* inLibraryName, const char* inLibraryPath);
};
#endif
| samplecount/methcla | external_libraries/CoreAudioUtilityClasses/CoreAudio/PublicUtility/CASharedLibrary.h | C | apache-2.0 | 3,085 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Memorystore operators"""
import os
from urllib.parse import urlparse
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_MEMORYSTORE
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_ARCHIVE_URL = os.environ.get("GCP_MEMORYSTORE_EXPORT_GCS_URL", "gs://test-memorystore/my-export.rdb")
GCP_ARCHIVE_URL_PARTS = urlparse(GCP_ARCHIVE_URL)
GCP_BUCKET_NAME = GCP_ARCHIVE_URL_PARTS.netloc
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_MEMORYSTORE)
class CloudMemorystoreSystemTest(GoogleSystemTest):
"""
System tests for Google Cloud Memorystore operators
It use a real service.
"""
@provide_gcp_context(GCP_MEMORYSTORE)
def setUp(self):
super().setUp()
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
@provide_gcp_context(GCP_MEMORYSTORE)
def test_run_example_dag_memorystore_redis(self):
self.run_dag('gcp_cloud_memorystore_redis', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def test_run_example_dag_memorystore_memcached(self):
self.run_dag('gcp_cloud_memorystore_memcached', CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_MEMORYSTORE)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown()
| airbnb/airflow | tests/providers/google/cloud/operators/test_cloud_memorystore_system.py | Python | apache-2.0 | 2,251 |
/*
* Copyright 2013-2014 Richard M. Hightower
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* __________ _____ __ .__
* \______ \ ____ ____ ____ /\ / \ _____ | | _|__| ____ ____
* | | _// _ \ / _ \ / \ \/ / \ / \\__ \ | |/ / |/ \ / ___\
* | | ( <_> | <_> ) | \ /\ / Y \/ __ \| <| | | \/ /_/ >
* |______ /\____/ \____/|___| / \/ \____|__ (____ /__|_ \__|___| /\___ /
* \/ \/ \/ \/ \/ \//_____/
* ____. ___________ _____ ______________.___.
* | |____ ___ _______ \_ _____/ / _ \ / _____/\__ | |
* | \__ \\ \/ /\__ \ | __)_ / /_\ \ \_____ \ / | |
* /\__| |/ __ \\ / / __ \_ | \/ | \/ \ \____ |
* \________(____ /\_/ (____ / /_______ /\____|__ /_______ / / ______|
* \/ \/ \/ \/ \/ \/
*/
package org.boon.validation.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention ( RetentionPolicy.RUNTIME )
@Target ( { ElementType.METHOD, ElementType.TYPE, ElementType.FIELD } )
public @interface Email {
String detailMessage() default "";
String summaryMessage() default "";
}
| wprice/boon | boon/src/main/java/org/boon/validation/annotations/Email.java | Java | apache-2.0 | 2,011 |
/*
* Copyright 2000-2015 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.usages.impl.rules;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.roots.GeneratedSourcesFilter;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.usageView.UsageInfo;
import com.intellij.usageView.UsageViewBundle;
import com.intellij.usages.*;
import com.intellij.usages.rules.PsiElementUsage;
import com.intellij.usages.rules.SingleParentUsageGroupingRule;
import com.intellij.usages.rules.UsageInFile;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
/**
* @author max
*/
public class NonCodeUsageGroupingRule extends SingleParentUsageGroupingRule {
private final Project myProject;
public NonCodeUsageGroupingRule(Project project) {
myProject = project;
}
private static class CodeUsageGroup extends UsageGroupBase {
private static final UsageGroup INSTANCE = new CodeUsageGroup();
private CodeUsageGroup() {
super(0);
}
@Override
@NotNull
public String getText(UsageView view) {
return view == null ? UsageViewBundle.message("node.group.code.usages") : view.getPresentation().getCodeUsagesString();
}
public String toString() {
//noinspection HardCodedStringLiteral
return "CodeUsages";
}
}
private static class UsageInGeneratedCodeGroup extends UsageGroupBase {
public static final UsageGroup INSTANCE = new UsageInGeneratedCodeGroup();
private UsageInGeneratedCodeGroup() {
super(3);
}
@Override
@NotNull
public String getText(UsageView view) {
return view == null ? UsageViewBundle.message("node.usages.in.generated.code") : view.getPresentation().getUsagesInGeneratedCodeString();
}
public String toString() {
return "UsagesInGeneratedCode";
}
}
private static class NonCodeUsageGroup extends UsageGroupBase {
public static final UsageGroup INSTANCE = new NonCodeUsageGroup();
private NonCodeUsageGroup() {
super(2);
}
@Override
@NotNull
public String getText(UsageView view) {
return view == null ? UsageViewBundle.message("node.non.code.usages") : view.getPresentation().getNonCodeUsagesString();
}
@Override
public void update() {
}
public String toString() {
//noinspection HardCodedStringLiteral
return "NonCodeUsages";
}
}
private static class DynamicUsageGroup extends UsageGroupBase {
public static final UsageGroup INSTANCE = new DynamicUsageGroup();
@NonNls private static final String DYNAMIC_CAPTION = "Dynamic usages";
public DynamicUsageGroup() {
super(1);
}
@Override
@NotNull
public String getText(UsageView view) {
if (view == null) {
return DYNAMIC_CAPTION;
}
else {
final String dynamicCodeUsagesString = view.getPresentation().getDynamicCodeUsagesString();
return dynamicCodeUsagesString == null ? DYNAMIC_CAPTION : dynamicCodeUsagesString;
}
}
public String toString() {
//noinspection HardCodedStringLiteral
return "DynamicUsages";
}
}
@Nullable
@Override
protected UsageGroup getParentGroupFor(@NotNull Usage usage, @NotNull UsageTarget[] targets) {
if (usage instanceof UsageInFile) {
VirtualFile file = ((UsageInFile)usage).getFile();
if (file != null && GeneratedSourcesFilter.isGeneratedSourceByAnyFilter(file, myProject)) {
return UsageInGeneratedCodeGroup.INSTANCE;
}
}
if (usage instanceof PsiElementUsage) {
if (usage instanceof UsageInfo2UsageAdapter) {
final UsageInfo usageInfo = ((UsageInfo2UsageAdapter)usage).getUsageInfo();
if (usageInfo.isDynamicUsage()) {
return DynamicUsageGroup.INSTANCE;
}
}
if (((PsiElementUsage)usage).isNonCodeUsage()) {
return NonCodeUsageGroup.INSTANCE;
}
else {
return CodeUsageGroup.INSTANCE;
}
}
return null;
}
}
| signed/intellij-community | platform/usageView/src/com/intellij/usages/impl/rules/NonCodeUsageGroupingRule.java | Java | apache-2.0 | 4,612 |
(function () {
function remap(fromValue, fromMin, fromMax, toMin, toMax) {
// Compute the range of the data
var fromRange = fromMax - fromMin,
toRange = toMax - toMin,
toValue;
// If either range is 0, then the value can only be mapped to 1 value
if (fromRange === 0) {
return toMin + toRange / 2;
}
if (toRange === 0) {
return toMin;
}
// (1) untranslate, (2) unscale, (3) rescale, (4) retranslate
toValue = (fromValue - fromMin) / fromRange;
toValue = (toRange * toValue) + toMin;
return toValue;
}
/**
* Enhance Filter. Adjusts the colors so that they span the widest
* possible range (ie 0-255). Performs w*h pixel reads and w*h pixel
* writes.
* @function
* @name Enhance
* @memberof Kinetic.Filters
* @param {Object} imageData
* @author ippo615
* @example
* node.cache();
* node.filters([Kinetic.Filters.Enhance]);
* node.enhance(0.4);
*/
Kinetic.Filters.Enhance = function (imageData) {
var data = imageData.data,
nSubPixels = data.length,
rMin = data[0], rMax = rMin, r,
gMin = data[1], gMax = gMin, g,
bMin = data[2], bMax = bMin, b,
i;
// If we are not enhancing anything - don't do any computation
var enhanceAmount = this.enhance();
if( enhanceAmount === 0 ){ return; }
// 1st Pass - find the min and max for each channel:
for (i = 0; i < nSubPixels; i += 4) {
r = data[i + 0];
if (r < rMin) { rMin = r; }
else if (r > rMax) { rMax = r; }
g = data[i + 1];
if (g < gMin) { gMin = g; } else
if (g > gMax) { gMax = g; }
b = data[i + 2];
if (b < bMin) { bMin = b; } else
if (b > bMax) { bMax = b; }
//a = data[i + 3];
//if (a < aMin) { aMin = a; } else
//if (a > aMax) { aMax = a; }
}
// If there is only 1 level - don't remap
if( rMax === rMin ){ rMax = 255; rMin = 0; }
if( gMax === gMin ){ gMax = 255; gMin = 0; }
if( bMax === bMin ){ bMax = 255; bMin = 0; }
var rMid, rGoalMax,rGoalMin,
gMid, gGoalMax,gGoalMin,
bMid, bGoalMax,bGoalMin;
// If the enhancement is positive - stretch the histogram
if ( enhanceAmount > 0 ){
rGoalMax = rMax + enhanceAmount*(255-rMax);
rGoalMin = rMin - enhanceAmount*(rMin-0);
gGoalMax = gMax + enhanceAmount*(255-gMax);
gGoalMin = gMin - enhanceAmount*(gMin-0);
bGoalMax = bMax + enhanceAmount*(255-bMax);
bGoalMin = bMin - enhanceAmount*(bMin-0);
// If the enhancement is negative - compress the histogram
} else {
rMid = (rMax + rMin)*0.5;
rGoalMax = rMax + enhanceAmount*(rMax-rMid);
rGoalMin = rMin + enhanceAmount*(rMin-rMid);
gMid = (gMax + gMin)*0.5;
gGoalMax = gMax + enhanceAmount*(gMax-gMid);
gGoalMin = gMin + enhanceAmount*(gMin-gMid);
bMid = (bMax + bMin)*0.5;
bGoalMax = bMax + enhanceAmount*(bMax-bMid);
bGoalMin = bMin + enhanceAmount*(bMin-bMid);
}
// Pass 2 - remap everything, except the alpha
for (i = 0; i < nSubPixels; i += 4) {
data[i + 0] = remap(data[i + 0], rMin, rMax, rGoalMin, rGoalMax);
data[i + 1] = remap(data[i + 1], gMin, gMax, gGoalMin, gGoalMax);
data[i + 2] = remap(data[i + 2], bMin, bMax, bGoalMin, bGoalMax);
//data[i + 3] = remap(data[i + 3], aMin, aMax, aGoalMin, aGoalMax);
}
};
Kinetic.Factory.addGetterSetter(Kinetic.Node, 'enhance', 0, null, Kinetic.Factory.afterSetFilter);
/**
* get/set enhance. Use with {@link Kinetic.Filters.Enhance} filter.
* @name enhance
* @method
* @memberof Kinetic.Node.prototype
* @param {Float} amount
* @returns {Float}
*/
})();
| puyanLiu/LPYFramework | 前端练习/10canvas/文档/KineticJS-master/src/filters/Enhance.js | JavaScript | apache-2.0 | 4,121 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>CloudWatch Reference — boto v2.33.0</title>
<link rel="stylesheet" href="../_static/boto.css" type="text/css" />
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<script type="text/javascript">
var DOCUMENTATION_OPTIONS = {
URL_ROOT: '../',
VERSION: 'HEAD',
COLLAPSE_INDEX: false,
FILE_SUFFIX: '.html',
HAS_SOURCE: true
};
</script>
<script type="text/javascript" src="../_static/jquery.js"></script>
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<link rel="top" title="boto v2.33.0" href="../index.html" />
<link rel="next" title="Cognito Identity" href="cognito-identity.html" />
<link rel="prev" title="CloudTrail" href="cloudtrail.html" />
</head>
<body>
<div class="related">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="../genindex.html" title="General Index"
accesskey="I">index</a></li>
<li class="right" >
<a href="../py-modindex.html" title="Python Module Index"
>modules</a> |</li>
<li class="right" >
<a href="cognito-identity.html" title="Cognito Identity"
accesskey="N">next</a> |</li>
<li class="right" >
<a href="cloudtrail.html" title="CloudTrail"
accesskey="P">previous</a> |</li>
<li><a href="../index.html">boto v2.33.0</a> »</li>
</ul>
</div>
<div class="document">
<div class="documentwrapper">
<div class="bodywrapper">
<div class="body">
<div class="section" id="cloudwatch-reference">
<h1>CloudWatch Reference<a class="headerlink" href="#cloudwatch-reference" title="Permalink to this headline">¶</a></h1>
<div class="section" id="module-boto.ec2.cloudwatch">
<span id="boto-ec2-cloudwatch"></span><h2>boto.ec2.cloudwatch<a class="headerlink" href="#module-boto.ec2.cloudwatch" title="Permalink to this headline">¶</a></h2>
<p>This module provides an interface to the Elastic Compute Cloud (EC2)
CloudWatch service from AWS.</p>
<dl class="class">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection">
<em class="property">class </em><tt class="descclassname">boto.ec2.cloudwatch.</tt><tt class="descname">CloudWatchConnection</tt><big>(</big><em>aws_access_key_id=None</em>, <em>aws_secret_access_key=None</em>, <em>is_secure=True</em>, <em>port=None</em>, <em>proxy=None</em>, <em>proxy_port=None</em>, <em>proxy_user=None</em>, <em>proxy_pass=None</em>, <em>debug=0</em>, <em>https_connection_factory=None</em>, <em>region=None</em>, <em>path='/'</em>, <em>security_token=None</em>, <em>validate_certs=True</em>, <em>profile_name=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection" title="Permalink to this definition">¶</a></dt>
<dd><p>Init method to create a new connection to EC2 Monitoring Service.</p>
<p>B{Note:} The host argument is overridden by the host specified in the
boto configuration file.</p>
<dl class="attribute">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.APIVersion">
<tt class="descname">APIVersion</tt><em class="property"> = '2010-08-01'</em><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.APIVersion" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="attribute">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.DefaultRegionEndpoint">
<tt class="descname">DefaultRegionEndpoint</tt><em class="property"> = 'monitoring.us-east-1.amazonaws.com'</em><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.DefaultRegionEndpoint" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="attribute">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.DefaultRegionName">
<tt class="descname">DefaultRegionName</tt><em class="property"> = 'us-east-1'</em><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.DefaultRegionName" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.build_dimension_param">
<tt class="descname">build_dimension_param</tt><big>(</big><em>dimension</em>, <em>params</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.build_dimension_param" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.build_list_params">
<tt class="descname">build_list_params</tt><big>(</big><em>params</em>, <em>items</em>, <em>label</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.build_list_params" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.build_put_params">
<tt class="descname">build_put_params</tt><big>(</big><em>params</em>, <em>name</em>, <em>value=None</em>, <em>timestamp=None</em>, <em>unit=None</em>, <em>dimensions=None</em>, <em>statistics=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.build_put_params" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.create_alarm">
<tt class="descname">create_alarm</tt><big>(</big><em>alarm</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.create_alarm" title="Permalink to this definition">¶</a></dt>
<dd><p>Creates or updates an alarm and associates it with the specified Amazon
CloudWatch metric. Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.</p>
<p>When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.</p>
<p>When updating an existing alarm, its StateValue is left unchanged.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>alarm</strong> (<a class="reference internal" href="#boto.ec2.cloudwatch.alarm.MetricAlarm" title="boto.ec2.cloudwatch.alarm.MetricAlarm"><em>boto.ec2.cloudwatch.alarm.MetricAlarm</em></a>) – MetricAlarm object.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.delete_alarms">
<tt class="descname">delete_alarms</tt><big>(</big><em>alarms</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.delete_alarms" title="Permalink to this definition">¶</a></dt>
<dd><p>Deletes all specified alarms. In the event of an error, no
alarms are deleted.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>alarms</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#list" title="(in Python v2.7)"><em>list</em></a>) – List of alarm names.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.describe_alarm_history">
<tt class="descname">describe_alarm_history</tt><big>(</big><em>alarm_name=None</em>, <em>start_date=None</em>, <em>end_date=None</em>, <em>max_records=None</em>, <em>history_item_type=None</em>, <em>next_token=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.describe_alarm_history" title="Permalink to this definition">¶</a></dt>
<dd><p>Retrieves history for the specified alarm. Filter alarms by date range
or item type. If an alarm name is not specified, Amazon CloudWatch
returns histories for all of the owner’s alarms.</p>
<p>Amazon CloudWatch retains the history of deleted alarms for a period of
six weeks. If an alarm has been deleted, its history can still be
queried.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>alarm_name</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The name of the alarm.</li>
<li><strong>start_date</strong> (<a class="reference external" href="http://docs.python.org/library/datetime.html#module-datetime" title="(in Python v2.7)"><em>datetime</em></a>) – The starting date to retrieve alarm history.</li>
<li><strong>end_date</strong> (<a class="reference external" href="http://docs.python.org/library/datetime.html#module-datetime" title="(in Python v2.7)"><em>datetime</em></a>) – The starting date to retrieve alarm history.</li>
<li><strong>history_item_type</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The type of alarm histories to retreive
(ConfigurationUpdate | StateUpdate | Action)</li>
<li><strong>max_records</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#int" title="(in Python v2.7)"><em>int</em></a>) – The maximum number of alarm descriptions
to retrieve.</li>
<li><strong>next_token</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The token returned by a previous call to indicate
that there is more data.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p>:rtype list</p>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.describe_alarms">
<tt class="descname">describe_alarms</tt><big>(</big><em>action_prefix=None</em>, <em>alarm_name_prefix=None</em>, <em>alarm_names=None</em>, <em>max_records=None</em>, <em>state_value=None</em>, <em>next_token=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.describe_alarms" title="Permalink to this definition">¶</a></dt>
<dd><p>Retrieves alarms with the specified names. If no name is specified, all
alarms for the user are returned. Alarms can be retrieved by using only
a prefix for the alarm name, the alarm state, or a prefix for any
action.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>action_name</strong> – The action name prefix.</li>
<li><strong>alarm_name_prefix</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The alarm name prefix. AlarmNames cannot
be specified if this parameter is specified.</li>
<li><strong>alarm_names</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#list" title="(in Python v2.7)"><em>list</em></a>) – A list of alarm names to retrieve information for.</li>
<li><strong>max_records</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#int" title="(in Python v2.7)"><em>int</em></a>) – The maximum number of alarm descriptions
to retrieve.</li>
<li><strong>state_value</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The state value to be used in matching alarms.</li>
<li><strong>next_token</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The token returned by a previous call to
indicate that there is more data.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p>:rtype list</p>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.describe_alarms_for_metric">
<tt class="descname">describe_alarms_for_metric</tt><big>(</big><em>metric_name</em>, <em>namespace</em>, <em>period=None</em>, <em>statistic=None</em>, <em>dimensions=None</em>, <em>unit=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.describe_alarms_for_metric" title="Permalink to this definition">¶</a></dt>
<dd><p>Retrieves all alarms for a single metric. Specify a statistic, period,
or unit to filter the set of alarms further.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>metric_name</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The name of the metric</li>
<li><strong>namespace</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The namespace of the metric.</li>
<li><strong>period</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#int" title="(in Python v2.7)"><em>int</em></a>) – The period in seconds over which the statistic
is applied.</li>
<li><strong>statistic</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The statistic for the metric.</li>
<li><strong>dimension_filters</strong> – A dictionary containing name/value
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
name that you want to filter on, a list of values to
filter on or None if you want all metrics with that
Dimension name.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p>:rtype list</p>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.disable_alarm_actions">
<tt class="descname">disable_alarm_actions</tt><big>(</big><em>alarm_names</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.disable_alarm_actions" title="Permalink to this definition">¶</a></dt>
<dd><p>Disables actions for the specified alarms.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>alarms</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#list" title="(in Python v2.7)"><em>list</em></a>) – List of alarm names.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.enable_alarm_actions">
<tt class="descname">enable_alarm_actions</tt><big>(</big><em>alarm_names</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.enable_alarm_actions" title="Permalink to this definition">¶</a></dt>
<dd><p>Enables actions for the specified alarms.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>alarms</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#list" title="(in Python v2.7)"><em>list</em></a>) – List of alarm names.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.get_metric_statistics">
<tt class="descname">get_metric_statistics</tt><big>(</big><em>period</em>, <em>start_time</em>, <em>end_time</em>, <em>metric_name</em>, <em>namespace</em>, <em>statistics</em>, <em>dimensions=None</em>, <em>unit=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.get_metric_statistics" title="Permalink to this definition">¶</a></dt>
<dd><p>Get time-series data for one or more statistics of a given metric.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first simple">
<li><strong>period</strong> (<em>integer</em>) – The granularity, in seconds, of the returned datapoints.
Period must be at least 60 seconds and must be a multiple
of 60. The default value is 60.</li>
<li><strong>start_time</strong> (<a class="reference external" href="http://docs.python.org/library/datetime.html#module-datetime" title="(in Python v2.7)"><em>datetime</em></a>) – The time stamp to use for determining the
first datapoint to return. The value specified is
inclusive; results include datapoints with the time stamp
specified.</li>
<li><strong>end_time</strong> (<a class="reference external" href="http://docs.python.org/library/datetime.html#module-datetime" title="(in Python v2.7)"><em>datetime</em></a>) – The time stamp to use for determining the
last datapoint to return. The value specified is
exclusive; results will include datapoints up to the time
stamp specified.</li>
<li><strong>metric_name</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The metric name.</li>
<li><strong>namespace</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The metric’s namespace.</li>
<li><strong>statistics</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#list" title="(in Python v2.7)"><em>list</em></a>) – A list of statistics names Valid values:
Average | Sum | SampleCount | Maximum | Minimum</li>
<li><strong>dimensions</strong> (<a class="reference external" href="http://docs.python.org/library/stdtypes.html#dict" title="(in Python v2.7)"><em>dict</em></a>) – A dictionary of dimension key/values where
the key is the dimension name and the value
is either a scalar value or an iterator
of values to be associated with that
dimension.</li>
<li><strong>unit</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The unit for the metric. Value values are:
Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None</li>
</ul>
</td>
</tr>
<tr class="field-even field"><th class="field-name">Return type:</th><td class="field-body"><p class="first last">list</p>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.list_metrics">
<tt class="descname">list_metrics</tt><big>(</big><em>next_token=None</em>, <em>dimensions=None</em>, <em>metric_name=None</em>, <em>namespace=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.list_metrics" title="Permalink to this definition">¶</a></dt>
<dd><p>Returns a list of the valid metrics for which there is recorded
data available.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>next_token</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – A maximum of 500 metrics will be returned
at one time. If more results are available, the ResultSet
returned will contain a non-Null next_token attribute.
Passing that token as a parameter to list_metrics will
retrieve the next page of metrics.</li>
<li><strong>dimensions</strong> (<a class="reference external" href="http://docs.python.org/library/stdtypes.html#dict" title="(in Python v2.7)"><em>dict</em></a>) – A dictionary containing name/value
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
name that you want to filter on or None if you want all
metrics with that Dimension name. To be included in the
result a metric must contain all specified dimensions,
although the metric may contain additional dimensions beyond
the requested metrics. The Dimension names, and values must
be strings between 1 and 250 characters long. A maximum of
10 dimensions are allowed.</li>
<li><strong>metric_name</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – The name of the Metric to filter against. If None,
all Metric names will be returned.</li>
<li><strong>namespace</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – A Metric namespace to filter against (e.g. AWS/EC2).
If None, Metrics from all namespaces will be returned.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.put_metric_alarm">
<tt class="descname">put_metric_alarm</tt><big>(</big><em>alarm</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.put_metric_alarm" title="Permalink to this definition">¶</a></dt>
<dd><p>Creates or updates an alarm and associates it with the specified Amazon
CloudWatch metric. Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.</p>
<p>When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.</p>
<p>When updating an existing alarm, its StateValue is left unchanged.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>alarm</strong> (<a class="reference internal" href="#boto.ec2.cloudwatch.alarm.MetricAlarm" title="boto.ec2.cloudwatch.alarm.MetricAlarm"><em>boto.ec2.cloudwatch.alarm.MetricAlarm</em></a>) – MetricAlarm object.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.put_metric_data">
<tt class="descname">put_metric_data</tt><big>(</big><em>namespace</em>, <em>name</em>, <em>value=None</em>, <em>timestamp=None</em>, <em>unit=None</em>, <em>dimensions=None</em>, <em>statistics=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.put_metric_data" title="Permalink to this definition">¶</a></dt>
<dd><p>Publishes metric data points to Amazon CloudWatch. Amazon Cloudwatch
associates the data points with the specified metric. If the specified
metric does not exist, Amazon CloudWatch creates the metric. If a list
is specified for some, but not all, of the arguments, the remaining
arguments are repeated a corresponding number of times.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>namespace</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – The namespace of the metric.</li>
<li><strong>name</strong> (<em>str or list</em>) – The name of the metric.</li>
<li><strong>value</strong> (<em>float or list</em>) – The value for the metric.</li>
<li><strong>timestamp</strong> (<em>datetime or list</em>) – The time stamp used for the metric. If not specified,
the default value is set to the time the metric data was received.</li>
<li><strong>unit</strong> (<em>string or list</em>) – The unit of the metric. Valid Values: Seconds |
Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None</li>
<li><strong>dimensions</strong> (<a class="reference external" href="http://docs.python.org/library/stdtypes.html#dict" title="(in Python v2.7)"><em>dict</em></a>) – Add extra name value pairs to associate
with the metric, i.e.:
{‘name1’: value1, ‘name2’: (value2, value3)}</li>
<li><strong>statistics</strong> (<em>dict or list</em>) – <p>Use a statistic set instead of a value, for example:</p>
<div class="highlight-python"><div class="highlight"><pre><span class="p">{</span><span class="s">'maximum'</span><span class="p">:</span> <span class="mi">30</span><span class="p">,</span> <span class="s">'minimum'</span><span class="p">:</span> <span class="mi">1</span><span class="p">,</span> <span class="s">'samplecount'</span><span class="p">:</span> <span class="mi">100</span><span class="p">,</span> <span class="s">'sum'</span><span class="p">:</span> <span class="mi">10000</span><span class="p">}</span>
</pre></div>
</div>
</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.set_alarm_state">
<tt class="descname">set_alarm_state</tt><big>(</big><em>alarm_name</em>, <em>state_reason</em>, <em>state_value</em>, <em>state_reason_data=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.set_alarm_state" title="Permalink to this definition">¶</a></dt>
<dd><p>Temporarily sets the state of an alarm. When the updated StateValue
differs from the previous value, the action configured for the
appropriate state is invoked. This is not a permanent change. The next
periodic alarm check (in about a minute) will set the alarm to its
actual state.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>alarm_name</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – Descriptive name for alarm.</li>
<li><strong>state_reason</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – Human readable reason.</li>
<li><strong>state_value</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – OK | ALARM | INSUFFICIENT_DATA</li>
<li><strong>state_reason_data</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – Reason string (will be jsonified).</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.CloudWatchConnection.update_alarm">
<tt class="descname">update_alarm</tt><big>(</big><em>alarm</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.CloudWatchConnection.update_alarm" title="Permalink to this definition">¶</a></dt>
<dd><p>Creates or updates an alarm and associates it with the specified Amazon
CloudWatch metric. Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.</p>
<p>When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.</p>
<p>When updating an existing alarm, its StateValue is left unchanged.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>alarm</strong> (<a class="reference internal" href="#boto.ec2.cloudwatch.alarm.MetricAlarm" title="boto.ec2.cloudwatch.alarm.MetricAlarm"><em>boto.ec2.cloudwatch.alarm.MetricAlarm</em></a>) – MetricAlarm object.</td>
</tr>
</tbody>
</table>
</dd></dl>
</dd></dl>
<dl class="function">
<dt id="boto.ec2.cloudwatch.connect_to_region">
<tt class="descclassname">boto.ec2.cloudwatch.</tt><tt class="descname">connect_to_region</tt><big>(</big><em>region_name</em>, <em>**kw_params</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.connect_to_region" title="Permalink to this definition">¶</a></dt>
<dd><p>Given a valid region name, return a
<a class="reference internal" href="#boto.ec2.cloudwatch.CloudWatchConnection" title="boto.ec2.cloudwatch.CloudWatchConnection"><tt class="xref py py-class docutils literal"><span class="pre">boto.ec2.cloudwatch.CloudWatchConnection</span></tt></a>.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>region_name</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – The name of the region to connect to.</td>
</tr>
<tr class="field-even field"><th class="field-name">Return type:</th><td class="field-body"><tt class="xref py py-class docutils literal"><span class="pre">boto.ec2.CloudWatchConnection</span></tt> or <tt class="docutils literal"><span class="pre">None</span></tt></td>
</tr>
<tr class="field-odd field"><th class="field-name">Returns:</th><td class="field-body">A connection to the given region, or None if an invalid region
name is given</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="function">
<dt id="boto.ec2.cloudwatch.regions">
<tt class="descclassname">boto.ec2.cloudwatch.</tt><tt class="descname">regions</tt><big>(</big><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.regions" title="Permalink to this definition">¶</a></dt>
<dd><p>Get all available regions for the CloudWatch service.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Return type:</th><td class="field-body">list</td>
</tr>
<tr class="field-even field"><th class="field-name">Returns:</th><td class="field-body">A list of <tt class="xref py py-class docutils literal"><span class="pre">boto.RegionInfo</span></tt> instances</td>
</tr>
</tbody>
</table>
</dd></dl>
</div>
<div class="section" id="module-boto.ec2.cloudwatch.datapoint">
<span id="boto-ec2-cloudwatch-datapoint"></span><h2>boto.ec2.cloudwatch.datapoint<a class="headerlink" href="#module-boto.ec2.cloudwatch.datapoint" title="Permalink to this headline">¶</a></h2>
<dl class="class">
<dt id="boto.ec2.cloudwatch.datapoint.Datapoint">
<em class="property">class </em><tt class="descclassname">boto.ec2.cloudwatch.datapoint.</tt><tt class="descname">Datapoint</tt><big>(</big><em>connection=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.datapoint.Datapoint" title="Permalink to this definition">¶</a></dt>
<dd><dl class="method">
<dt id="boto.ec2.cloudwatch.datapoint.Datapoint.endElement">
<tt class="descname">endElement</tt><big>(</big><em>name</em>, <em>value</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.datapoint.Datapoint.endElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.datapoint.Datapoint.startElement">
<tt class="descname">startElement</tt><big>(</big><em>name</em>, <em>attrs</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.datapoint.Datapoint.startElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
</dd></dl>
</div>
<div class="section" id="module-boto.ec2.cloudwatch.metric">
<span id="boto-ec2-cloudwatch-metric"></span><h2>boto.ec2.cloudwatch.metric<a class="headerlink" href="#module-boto.ec2.cloudwatch.metric" title="Permalink to this headline">¶</a></h2>
<dl class="class">
<dt id="boto.ec2.cloudwatch.metric.Metric">
<em class="property">class </em><tt class="descclassname">boto.ec2.cloudwatch.metric.</tt><tt class="descname">Metric</tt><big>(</big><em>connection=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.metric.Metric" title="Permalink to this definition">¶</a></dt>
<dd><dl class="attribute">
<dt id="boto.ec2.cloudwatch.metric.Metric.Statistics">
<tt class="descname">Statistics</tt><em class="property"> = ['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount']</em><a class="headerlink" href="#boto.ec2.cloudwatch.metric.Metric.Statistics" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="attribute">
<dt id="boto.ec2.cloudwatch.metric.Metric.Units">
<tt class="descname">Units</tt><em class="property"> = ['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', None]</em><a class="headerlink" href="#boto.ec2.cloudwatch.metric.Metric.Units" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.metric.Metric.create_alarm">
<tt class="descname">create_alarm</tt><big>(</big><em>name</em>, <em>comparison</em>, <em>threshold</em>, <em>period</em>, <em>evaluation_periods</em>, <em>statistic</em>, <em>enabled=True</em>, <em>description=None</em>, <em>dimensions=None</em>, <em>alarm_actions=None</em>, <em>ok_actions=None</em>, <em>insufficient_data_actions=None</em>, <em>unit=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.metric.Metric.create_alarm" title="Permalink to this definition">¶</a></dt>
<dd><p>Creates or updates an alarm and associates it with this metric.
Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.</p>
<p>When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.</p>
<p>When updating an existing alarm, its StateValue is left unchanged.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>alarm</strong> (<a class="reference internal" href="#boto.ec2.cloudwatch.alarm.MetricAlarm" title="boto.ec2.cloudwatch.alarm.MetricAlarm"><em>boto.ec2.cloudwatch.alarm.MetricAlarm</em></a>) – MetricAlarm object.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.metric.Metric.describe_alarms">
<tt class="descname">describe_alarms</tt><big>(</big><em>period=None</em>, <em>statistic=None</em>, <em>dimensions=None</em>, <em>unit=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.metric.Metric.describe_alarms" title="Permalink to this definition">¶</a></dt>
<dd><p>Retrieves all alarms for this metric. Specify a statistic, period,
or unit to filter the set of alarms further.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>period</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#int" title="(in Python v2.7)"><em>int</em></a>) – The period in seconds over which the statistic
is applied.</li>
<li><strong>statistic</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The statistic for the metric.</li>
<li><strong>dimension_filters</strong> – A dictionary containing name/value
pairs that will be used to filter the results. The key in
the dictionary is the name of a Dimension. The value in
the dictionary is either a scalar value of that Dimension
name that you want to filter on, a list of values to
filter on or None if you want all metrics with that
Dimension name.</li>
</ul>
</td>
</tr>
</tbody>
</table>
<p>:rtype list</p>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.metric.Metric.endElement">
<tt class="descname">endElement</tt><big>(</big><em>name</em>, <em>value</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.metric.Metric.endElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.metric.Metric.query">
<tt class="descname">query</tt><big>(</big><em>start_time</em>, <em>end_time</em>, <em>statistics</em>, <em>unit=None</em>, <em>period=60</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.metric.Metric.query" title="Permalink to this definition">¶</a></dt>
<dd><table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>start_time</strong> (<a class="reference external" href="http://docs.python.org/library/datetime.html#module-datetime" title="(in Python v2.7)"><em>datetime</em></a>) – The time stamp to use for determining the
first datapoint to return. The value specified is
inclusive; results include datapoints with the time stamp
specified.</li>
<li><strong>end_time</strong> (<a class="reference external" href="http://docs.python.org/library/datetime.html#module-datetime" title="(in Python v2.7)"><em>datetime</em></a>) – The time stamp to use for determining the
last datapoint to return. The value specified is
exclusive; results will include datapoints up to the time
stamp specified.</li>
<li><strong>statistics</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#list" title="(in Python v2.7)"><em>list</em></a>) – A list of statistics names Valid values:
Average | Sum | SampleCount | Maximum | Minimum</li>
<li><strong>unit</strong> (<a class="reference external" href="http://docs.python.org/library/string.html#module-string" title="(in Python v2.7)"><em>string</em></a>) – The unit for the metric. Value values are:
Seconds | Microseconds | Milliseconds | Bytes | Kilobytes |
Megabytes | Gigabytes | Terabytes | Bits | Kilobits |
Megabits | Gigabits | Terabits | Percent | Count |
Bytes/Second | Kilobytes/Second | Megabytes/Second |
Gigabytes/Second | Terabytes/Second | Bits/Second |
Kilobits/Second | Megabits/Second | Gigabits/Second |
Terabits/Second | Count/Second | None</li>
<li><strong>period</strong> (<em>integer</em>) – The granularity, in seconds, of the returned datapoints.
Period must be at least 60 seconds and must be a multiple
of 60. The default value is 60.</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.metric.Metric.startElement">
<tt class="descname">startElement</tt><big>(</big><em>name</em>, <em>attrs</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.metric.Metric.startElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
</dd></dl>
</div>
<div class="section" id="module-boto.ec2.cloudwatch.alarm">
<span id="boto-ec2-cloudwatch-alarm"></span><h2>boto.ec2.cloudwatch.alarm<a class="headerlink" href="#module-boto.ec2.cloudwatch.alarm" title="Permalink to this headline">¶</a></h2>
<dl class="class">
<dt id="boto.ec2.cloudwatch.alarm.AlarmHistoryItem">
<em class="property">class </em><tt class="descclassname">boto.ec2.cloudwatch.alarm.</tt><tt class="descname">AlarmHistoryItem</tt><big>(</big><em>connection=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.AlarmHistoryItem" title="Permalink to this definition">¶</a></dt>
<dd><dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.AlarmHistoryItem.endElement">
<tt class="descname">endElement</tt><big>(</big><em>name</em>, <em>value</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.AlarmHistoryItem.endElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.AlarmHistoryItem.startElement">
<tt class="descname">startElement</tt><big>(</big><em>name</em>, <em>attrs</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.AlarmHistoryItem.startElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
</dd></dl>
<dl class="class">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm">
<em class="property">class </em><tt class="descclassname">boto.ec2.cloudwatch.alarm.</tt><tt class="descname">MetricAlarm</tt><big>(</big><em>connection=None</em>, <em>name=None</em>, <em>metric=None</em>, <em>namespace=None</em>, <em>statistic=None</em>, <em>comparison=None</em>, <em>threshold=None</em>, <em>period=None</em>, <em>evaluation_periods=None</em>, <em>unit=None</em>, <em>description=''</em>, <em>dimensions=None</em>, <em>alarm_actions=None</em>, <em>insufficient_data_actions=None</em>, <em>ok_actions=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm" title="Permalink to this definition">¶</a></dt>
<dd><p>Creates a new Alarm.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>name</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – Name of alarm.</li>
<li><strong>metric</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – Name of alarm’s associated metric.</li>
<li><strong>namespace</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – The namespace for the alarm’s metric.</li>
<li><strong>statistic</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – The statistic to apply to the alarm’s associated
metric.
Valid values: SampleCount|Average|Sum|Minimum|Maximum</li>
<li><strong>comparison</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – Comparison used to compare statistic with threshold.
Valid values: >= | > | < | <=</li>
<li><strong>threshold</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#float" title="(in Python v2.7)"><em>float</em></a>) – The value against which the specified statistic
is compared.</li>
<li><strong>period</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#int" title="(in Python v2.7)"><em>int</em></a>) – The period in seconds over which teh specified
statistic is applied.</li>
<li><strong>evaluation_periods</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#int" title="(in Python v2.7)"><em>int</em></a>) – The number of periods over which data is
compared to the specified threshold.</li>
<li><strong>unit</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – Allowed Values are:
Seconds|Microseconds|Milliseconds,
Bytes|Kilobytes|Megabytes|Gigabytes|Terabytes,
Bits|Kilobits|Megabits|Gigabits|Terabits,
Percent|Count|
Bytes/Second|Kilobytes/Second|Megabytes/Second|
Gigabytes/Second|Terabytes/Second,
Bits/Second|Kilobits/Second|Megabits/Second,
Gigabits/Second|Terabits/Second|Count/Second|None</li>
<li><strong>description</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – Description of MetricAlarm</li>
<li><strong>dimensions</strong> (<a class="reference external" href="http://docs.python.org/library/stdtypes.html#dict" title="(in Python v2.7)"><em>dict</em></a>) – <p>A dictionary of dimension key/values where
the key is the dimension name and the value
is either a scalar value or an iterator
of values to be associated with that
dimension.
Example: {</p>
<blockquote>
<div>‘InstanceId’: [‘i-0123456’, ‘i-0123457’],
‘LoadBalancerName’: ‘test-lb’</div></blockquote>
<p>}</p>
</li>
<li><strong>alarm_actions</strong> (<em>list of strs</em>) – A list of the ARNs of the actions to take in
ALARM state</li>
<li><strong>insufficient_data_actions</strong> (<em>list of strs</em>) – A list of the ARNs of the actions to
take in INSUFFICIENT_DATA state</li>
<li><strong>ok_actions</strong> (<em>list of strs</em>) – A list of the ARNs of the actions to take in OK state</li>
</ul>
</td>
</tr>
</tbody>
</table>
<dl class="attribute">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.ALARM">
<tt class="descname">ALARM</tt><em class="property"> = 'ALARM'</em><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.ALARM" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="attribute">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.INSUFFICIENT_DATA">
<tt class="descname">INSUFFICIENT_DATA</tt><em class="property"> = 'INSUFFICIENT_DATA'</em><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.INSUFFICIENT_DATA" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="attribute">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.OK">
<tt class="descname">OK</tt><em class="property"> = 'OK'</em><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.OK" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.add_alarm_action">
<tt class="descname">add_alarm_action</tt><big>(</big><em>action_arn=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.add_alarm_action" title="Permalink to this definition">¶</a></dt>
<dd><p>Adds an alarm action, represented as an SNS topic, to this alarm.
What do do when alarm is triggered.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>action_arn</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – SNS topics to which notification should be
sent if the alarm goes to state ALARM.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.add_insufficient_data_action">
<tt class="descname">add_insufficient_data_action</tt><big>(</big><em>action_arn=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.add_insufficient_data_action" title="Permalink to this definition">¶</a></dt>
<dd><p>Adds an insufficient_data action, represented as an SNS topic, to
this alarm. What to do when the insufficient_data state is reached.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>action_arn</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – SNS topics to which notification should be
sent if the alarm goes to state INSUFFICIENT_DATA.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.add_ok_action">
<tt class="descname">add_ok_action</tt><big>(</big><em>action_arn=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.add_ok_action" title="Permalink to this definition">¶</a></dt>
<dd><p>Adds an ok action, represented as an SNS topic, to this alarm. What
to do when the ok state is reached.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><strong>action_arn</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – SNS topics to which notification should be
sent if the alarm goes to state INSUFFICIENT_DATA.</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.delete">
<tt class="descname">delete</tt><big>(</big><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.delete" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.describe_history">
<tt class="descname">describe_history</tt><big>(</big><em>start_date=None</em>, <em>end_date=None</em>, <em>max_records=None</em>, <em>history_item_type=None</em>, <em>next_token=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.describe_history" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.disable_actions">
<tt class="descname">disable_actions</tt><big>(</big><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.disable_actions" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.enable_actions">
<tt class="descname">enable_actions</tt><big>(</big><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.enable_actions" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.endElement">
<tt class="descname">endElement</tt><big>(</big><em>name</em>, <em>value</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.endElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.set_state">
<tt class="descname">set_state</tt><big>(</big><em>value</em>, <em>reason</em>, <em>data=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.set_state" title="Permalink to this definition">¶</a></dt>
<dd><p>Temporarily sets the state of an alarm.</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">Parameters:</th><td class="field-body"><ul class="first last simple">
<li><strong>value</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – OK | ALARM | INSUFFICIENT_DATA</li>
<li><strong>reason</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – Reason alarm set (human readable).</li>
<li><strong>data</strong> (<a class="reference external" href="http://docs.python.org/library/functions.html#str" title="(in Python v2.7)"><em>str</em></a>) – Reason data (will be jsonified).</li>
</ul>
</td>
</tr>
</tbody>
</table>
</dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.startElement">
<tt class="descname">startElement</tt><big>(</big><em>name</em>, <em>attrs</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.startElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarm.update">
<tt class="descname">update</tt><big>(</big><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarm.update" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
</dd></dl>
<dl class="class">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarms">
<em class="property">class </em><tt class="descclassname">boto.ec2.cloudwatch.alarm.</tt><tt class="descname">MetricAlarms</tt><big>(</big><em>connection=None</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarms" title="Permalink to this definition">¶</a></dt>
<dd><p>Parses a list of MetricAlarms.</p>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarms.endElement">
<tt class="descname">endElement</tt><big>(</big><em>name</em>, <em>value</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarms.endElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
<dl class="method">
<dt id="boto.ec2.cloudwatch.alarm.MetricAlarms.startElement">
<tt class="descname">startElement</tt><big>(</big><em>name</em>, <em>attrs</em>, <em>connection</em><big>)</big><a class="headerlink" href="#boto.ec2.cloudwatch.alarm.MetricAlarms.startElement" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>
</dd></dl>
</div>
</div>
</div>
</div>
</div>
<div class="sphinxsidebar">
<div class="sphinxsidebarwrapper">
<h3><a href="../index.html">Table Of Contents</a></h3>
<ul>
<li><a class="reference internal" href="#">CloudWatch Reference</a><ul>
<li><a class="reference internal" href="#module-boto.ec2.cloudwatch">boto.ec2.cloudwatch</a></li>
<li><a class="reference internal" href="#module-boto.ec2.cloudwatch.datapoint">boto.ec2.cloudwatch.datapoint</a></li>
<li><a class="reference internal" href="#module-boto.ec2.cloudwatch.metric">boto.ec2.cloudwatch.metric</a></li>
<li><a class="reference internal" href="#module-boto.ec2.cloudwatch.alarm">boto.ec2.cloudwatch.alarm</a></li>
</ul>
</li>
</ul>
<h4>Previous topic</h4>
<p class="topless"><a href="cloudtrail.html"
title="previous chapter">CloudTrail</a></p>
<h4>Next topic</h4>
<p class="topless"><a href="cognito-identity.html"
title="next chapter">Cognito Identity</a></p>
<h3>This Page</h3>
<ul class="this-page-menu">
<li><a href="../_sources/ref/cloudwatch.txt"
rel="nofollow">Show Source</a></li>
</ul>
<div id="searchbox" style="display: none">
<h3>Quick search</h3>
<form class="search" action="../search.html" method="get">
<input type="text" name="q" />
<input type="submit" value="Go" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
<p class="searchtip" style="font-size: 90%">
Enter search terms or a module, class or function name.
</p>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script><div><a href="boto.pdf">PDF Version</a></div>
</div>
</div>
<div class="clearer"></div>
</div>
<div class="related">
<h3>Navigation</h3>
<ul>
<li class="right" style="margin-right: 10px">
<a href="../genindex.html" title="General Index"
>index</a></li>
<li class="right" >
<a href="../py-modindex.html" title="Python Module Index"
>modules</a> |</li>
<li class="right" >
<a href="cognito-identity.html" title="Cognito Identity"
>next</a> |</li>
<li class="right" >
<a href="cloudtrail.html" title="CloudTrail"
>previous</a> |</li>
<li><a href="../index.html">boto v2.33.0</a> »</li>
</ul>
</div>
<div class="footer">
© Copyright 2009,2010, Mitch Garnaat.
Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.2.3.
</div>
</body>
</html> | yg257/Pangea | templates/root/ec2/lib/boto-2.34.0/docs/build/html/ref/cloudwatch.html | HTML | apache-2.0 | 58,809 |
import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import AbstractNode, NodeLog
from osf.utils import permissions
from osf.utils.sanitize import strip_html
from osf_tests.factories import (
NodeFactory,
ProjectFactory,
OSFGroupFactory,
RegistrationFactory,
AuthUserFactory,
PrivateLinkFactory,
)
from tests.base import fake
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeChildrenList:
@pytest.fixture()
def private_project(self, user):
private_project = ProjectFactory()
private_project.add_contributor(
user,
permissions=permissions.WRITE
)
private_project.save()
return private_project
@pytest.fixture()
def component(self, user, private_project):
return NodeFactory(parent=private_project, creator=user)
@pytest.fixture()
def pointer(self):
return ProjectFactory()
@pytest.fixture()
def private_project_url(self, private_project):
return '/{}nodes/{}/children/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_component(self, user, public_project):
return NodeFactory(parent=public_project, creator=user, is_public=True)
@pytest.fixture()
def public_project_url(self, user, public_project):
return '/{}nodes/{}/children/'.format(API_BASE, public_project._id)
@pytest.fixture()
def view_only_link(self, private_project):
view_only_link = PrivateLinkFactory(name='node_view_only_link')
view_only_link.nodes.add(private_project)
view_only_link.save()
return view_only_link
def test_return_public_node_children_list(
self, app, public_component,
public_project_url):
# test_return_public_node_children_list_logged_out
res = app.get(public_project_url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
# test_return_public_node_children_list_logged_in
non_contrib = AuthUserFactory()
res = app.get(public_project_url, auth=non_contrib.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == public_component._id
def test_return_private_node_children_list(
self, app, user, component, private_project, private_project_url):
# test_return_private_node_children_list_logged_out
res = app.get(private_project_url, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.get(
private_project_url,
auth=non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_return_private_node_children_list_logged_in_contributor
res = app.get(private_project_url, auth=user.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == component._id
# test_return_private_node_children_osf_group_member_admin
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
private_project.add_osf_group(group, permissions.ADMIN)
res = app.get(private_project_url, auth=group_mem.auth)
assert res.status_code == 200
# Can view node children that you have implict admin permissions
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == component._id
def test_node_children_list_does_not_include_pointers(
self, app, user, component, private_project_url):
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_unauthorized_projects(
self, app, user, component, private_project, private_project_url):
NodeFactory(parent=private_project)
res = app.get(private_project_url, auth=user.auth)
assert len(res.json['data']) == 1
def test_node_children_list_does_not_include_deleted(
self, app, user, public_project, public_component,
component, public_project_url):
child_project = NodeFactory(parent=public_project, creator=user)
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id in ids
assert 2 == len(ids)
child_project.is_deleted = True
child_project.save()
res = app.get(public_project_url, auth=user.auth)
assert res.status_code == 200
ids = [node['id'] for node in res.json['data']]
assert child_project._id not in ids
assert 1 == len(ids)
def test_node_children_list_does_not_include_node_links(
self, app, user, public_project, public_component,
public_project_url):
pointed_to = ProjectFactory(is_public=True)
public_project.add_pointer(
pointed_to,
auth=Auth(public_project.creator)
)
res = app.get(public_project_url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert public_component._id in ids # sanity check
assert pointed_to._id not in ids
# Regression test for https://openscience.atlassian.net/browse/EMB-593
# Duplicates returned in child count
def test_node_children_related_counts_duplicate_query_results(self, app, user, public_project,
private_project, public_project_url):
user_2 = AuthUserFactory()
# Adding a child component
child = NodeFactory(parent=public_project, creator=user, is_public=True, category='software')
child.add_contributor(user_2, permissions.WRITE, save=True)
# Adding a grandchild
NodeFactory(parent=child, creator=user, is_public=True)
# Adding a node link
public_project.add_pointer(
private_project,
auth=Auth(public_project.creator)
)
# Assert NodeChildrenList returns one result
res = app.get(public_project_url, auth=user.auth)
assert len(res.json['data']) == 1
assert res.json['data'][0]['id'] == child._id
project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id)
res = app.get(project_url, auth=user.auth)
assert res.status_code == 200
# Verifying related_counts match direct children count (grandchildren not included, pointers not included)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_node_children_related_counts(self, app, user, public_project):
parent = ProjectFactory(creator=user, is_public=False)
user_2 = AuthUserFactory()
parent.add_contributor(user_2, permissions.ADMIN)
child = NodeFactory(parent=parent, creator=user_2, is_public=False, category='software')
NodeFactory(parent=child, creator=user_2, is_public=False)
# child has one component. `user` can view due to implict admin perms
component_url = '/{}nodes/{}/children/'.format(API_BASE, child._id, auth=user.auth)
res = app.get(component_url, auth=user.auth)
assert len(res.json['data']) == 1
project_url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, child._id)
res = app.get(project_url, auth=user.auth)
assert res.status_code == 200
# Nodes with implicit admin perms are also included in the count
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_child_counts_permissions(self, app, user, public_project):
NodeFactory(parent=public_project, creator=user)
url = '/{}nodes/{}/?related_counts=children'.format(API_BASE, public_project._id)
user_two = AuthUserFactory()
# Unauthorized
res = app.get(url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# Logged in noncontrib
res = app.get(url, auth=user_two.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# Logged in contrib
res = app.get(url, auth=user.auth)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
def test_private_node_children_with_view_only_link(self, user, app, private_project,
component, view_only_link, private_project_url):
# get node related_counts with vol before vol is attached to components
node_url = '/{}nodes/{}/?related_counts=children&view_only={}'.format(API_BASE,
private_project._id, view_only_link.key)
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 0
# view only link is not attached to components
view_only_link_url = '{}?view_only={}'.format(private_project_url, view_only_link.key)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert len(ids) == 0
assert component._id not in ids
# view only link is attached to components
view_only_link.nodes.add(component)
res = app.get(view_only_link_url)
ids = [node['id'] for node in res.json['data']]
assert res.status_code == 200
assert component._id in ids
assert 'contributors' in res.json['data'][0]['relationships']
assert 'implicit_contributors' in res.json['data'][0]['relationships']
assert 'bibliographic_contributors' in res.json['data'][0]['relationships']
# get node related_counts with vol once vol is attached to components
res = app.get(node_url)
assert res.json['data']['relationships']['children']['links']['related']['meta']['count'] == 1
# make private vol anonymous
view_only_link.anonymous = True
view_only_link.save()
res = app.get(view_only_link_url)
assert 'contributors' not in res.json['data'][0]['relationships']
assert 'implicit_contributors' not in res.json['data'][0]['relationships']
assert 'bibliographic_contributors' not in res.json['data'][0]['relationships']
# delete vol
view_only_link.is_deleted = True
view_only_link.save()
res = app.get(view_only_link_url, expect_errors=True)
assert res.status_code == 401
@pytest.mark.django_db
class TestNodeChildrenListFiltering:
def test_node_child_filtering(self, app, user):
project = ProjectFactory(creator=user)
title_one, title_two = fake.bs(), fake.bs()
component = NodeFactory(title=title_one, parent=project)
component_two = NodeFactory(title=title_two, parent=project)
url = '/{}nodes/{}/children/?filter[title]={}'.format(
API_BASE,
project._id,
title_one
)
res = app.get(url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert component._id in ids
assert component_two._id not in ids
@pytest.mark.django_db
class TestNodeChildCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child(self):
return {
'data': {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
}
def test_creates_child(self, app, user, project, child, url):
# test_creates_child_logged_out_user
res = app.post_json_api(url, child, expect_errors=True)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=permissions.READ,
auth=Auth(user), save=True
)
res = app.post_json_api(
url, child, auth=read_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url, child, auth=non_contrib.auth,
expect_errors=True
)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_creates_child_group_member_read
group_mem = AuthUserFactory()
group = OSFGroupFactory(creator=group_mem)
project.add_osf_group(group, permissions.READ)
res = app.post_json_api(
url, child, auth=group_mem.auth,
expect_errors=True
)
assert res.status_code == 403
project.update_osf_group(group, permissions.WRITE)
res = app.post_json_api(
url, child, auth=group_mem.auth,
expect_errors=True
)
assert res.status_code == 201
# test_creates_child_no_type
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_creates_child_incorrect_type
child = {
'data': {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
# test_creates_child_properties_not_nested
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project'
},
'category': 'project'
}
}
res = app.post_json_api(url, child, auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field is required.'
assert res.json['errors'][0]['source']['pointer'] == '/data/attributes/category'
def test_creates_child_logged_in_write_contributor(
self, app, user, project, child, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=permissions.WRITE,
auth=Auth(user),
save=True)
res = app.post_json_api(url, child, auth=write_contrib.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_creates_child_logged_in_owner(
self, app, user, project, child, url):
res = app.post_json_api(url, child, auth=user.auth)
assert res.status_code == 201
assert res.json['data']['attributes']['title'] == child['data']['attributes']['title']
assert res.json['data']['attributes']['description'] == child['data']['attributes']['description']
assert res.json['data']['attributes']['category'] == child['data']['attributes']['category']
project.reload()
assert res.json['data']['id'] == project.nodes[0]._id
assert project.nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
def test_creates_child_creates_child_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasonable</em> <strong>Project</strong>'
description = 'An <script>alert("even reasonabler")</script> child'
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}
}, auth=user.auth)
child_id = res.json['data']['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_create_child_on_a_registration(self, app, user, project):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}
}, auth=user.auth, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestNodeChildrenBulkCreate:
@pytest.fixture()
def project(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def url(self, project):
return '/{}nodes/{}/children/'.format(API_BASE, project._id)
@pytest.fixture()
def child_one(self):
return {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
@pytest.fixture()
def child_two(self):
return {
'type': 'nodes',
'attributes': {
'title': 'second child',
'description': 'this is my hypothesis',
'category': 'hypothesis'
}
}
def test_bulk_children_create_blank_request(self, app, user, url):
res = app.post_json_api(
url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
def test_bulk_creates_children_limits(self, app, user, child_one, url):
res = app.post_json_api(
url, {'data': [child_one] * 101},
auth=user.auth, expect_errors=True, bulk=True
)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
def test_bulk_creates_children_auth_errors(
self, app, user, project, child_one, child_two, url):
# test_bulk_creates_children_logged_out_user
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
expect_errors=True, bulk=True
)
assert res.status_code == 401
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_read_contributor
read_contrib = AuthUserFactory()
project.add_contributor(
read_contrib,
permissions=permissions.READ,
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=read_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
# test_bulk_creates_children_logged_in_non_contributor
non_contrib = AuthUserFactory()
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=non_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_logged_in_owner(
self, app, user, project, child_one, child_two, url):
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=user.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
nodes = project.nodes
assert res.json['data'][0]['id'] == nodes[0]._id
assert res.json['data'][1]['id'] == nodes[1]._id
assert nodes[0].logs.latest().action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_child_logged_in_write_contributor(
self, app, user, project, child_one, child_two, url):
write_contrib = AuthUserFactory()
project.add_contributor(
write_contrib,
permissions=permissions.WRITE,
auth=Auth(user),
save=True)
res = app.post_json_api(
url,
{'data': [child_one, child_two]},
auth=write_contrib.auth, bulk=True)
assert res.status_code == 201
assert res.json['data'][0]['attributes']['title'] == child_one['attributes']['title']
assert res.json['data'][0]['attributes']['description'] == child_one['attributes']['description']
assert res.json['data'][0]['attributes']['category'] == child_one['attributes']['category']
assert res.json['data'][1]['attributes']['title'] == child_two['attributes']['title']
assert res.json['data'][1]['attributes']['description'] == child_two['attributes']['description']
assert res.json['data'][1]['attributes']['category'] == child_two['attributes']['category']
project.reload()
child_id = res.json['data'][0]['id']
child_two_id = res.json['data'][1]['id']
nodes = project.nodes
assert child_id == nodes[0]._id
assert child_two_id == nodes[1]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
assert nodes[1].logs.latest().action == NodeLog.PROJECT_CREATED
def test_bulk_creates_children_and_sanitizes_html_logged_in_owner(
self, app, user, project, url):
title = '<em>Reasoning</em> <strong>Aboot Projects</strong>'
description = 'A <script>alert("super reasonable")</script> child'
res = app.post_json_api(url, {
'data': [{
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}]
}, auth=user.auth, bulk=True)
child_id = res.json['data'][0]['id']
assert res.status_code == 201
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = app.get(url, auth=user.auth)
assert res.json['data']['attributes']['title'] == strip_html(title)
assert res.json['data']['attributes']['description'] == strip_html(
description)
assert res.json['data']['attributes']['category'] == 'project'
project.reload()
child_id = res.json['data']['id']
assert child_id == project.nodes[0]._id
assert AbstractNode.load(child_id).logs.latest(
).action == NodeLog.PROJECT_CREATED
def test_cannot_bulk_create_children_on_a_registration(
self, app, user, project, child_two):
registration = RegistrationFactory(project=project, creator=user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = app.post_json_api(url, {
'data': [child_two, {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}]
}, auth=user.auth, expect_errors=True, bulk=True)
assert res.status_code == 404
project.reload()
assert len(project.nodes) == 0
def test_bulk_creates_children_payload_errors(
self, app, user, project, child_two, url):
# def test_bulk_creates_children_no_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/1/type'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_incorrect_type(self, app, user, project,
# child_two, url):
child = {
'data': [child_two, {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "nodes", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
project.reload()
assert len(project.nodes) == 0
# def test_bulk_creates_children_properties_not_nested(self, app, user,
# project, child_two, url):
child = {
'data': [child_two, {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}]
}
res = app.post_json_api(
url, child, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field is required.'
assert res.json['errors'][0]['source']['pointer'] == '/data/1/attributes/category'
project.reload()
assert len(project.nodes) == 0
| saradbowman/osf.io | api_tests/nodes/views/test_node_children_list.py | Python | apache-2.0 | 29,554 |
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
* Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.gamelift.model.transform;
import java.util.Map;
import java.util.Map.Entry;
import java.math.*;
import java.nio.ByteBuffer;
import com.amazonaws.services.gamelift.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* CreateAliasResult JSON Unmarshaller
*/
public class CreateAliasResultJsonUnmarshaller implements
Unmarshaller<CreateAliasResult, JsonUnmarshallerContext> {
public CreateAliasResult unmarshall(JsonUnmarshallerContext context)
throws Exception {
CreateAliasResult createAliasResult = new CreateAliasResult();
int originalDepth = context.getCurrentDepth();
String currentParentElement = context.getCurrentParentElement();
int targetDepth = originalDepth + 1;
JsonToken token = context.getCurrentToken();
if (token == null)
token = context.nextToken();
if (token == VALUE_NULL)
return null;
while (true) {
if (token == null)
break;
if (token == FIELD_NAME || token == START_OBJECT) {
if (context.testExpression("Alias", targetDepth)) {
context.nextToken();
createAliasResult.setAlias(AliasJsonUnmarshaller
.getInstance().unmarshall(context));
}
} else if (token == END_ARRAY || token == END_OBJECT) {
if (context.getLastParsedParentElement() == null
|| context.getLastParsedParentElement().equals(
currentParentElement)) {
if (context.getCurrentDepth() <= originalDepth)
break;
}
}
token = context.nextToken();
}
return createAliasResult;
}
private static CreateAliasResultJsonUnmarshaller instance;
public static CreateAliasResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new CreateAliasResultJsonUnmarshaller();
return instance;
}
}
| flofreud/aws-sdk-java | aws-java-sdk-gamelift/src/main/java/com/amazonaws/services/gamelift/model/transform/CreateAliasResultJsonUnmarshaller.java | Java | apache-2.0 | 2,842 |
// Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.android.desugar.io;
import static com.google.common.base.Preconditions.checkArgument;
import com.google.common.io.ByteStreams;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.zip.CRC32;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
/** Output provider is a zip file. */
class ZipOutputFileProvider implements OutputFileProvider {
private final ZipOutputStream out;
public ZipOutputFileProvider(Path root) throws IOException {
out = new ZipOutputStream(new BufferedOutputStream(Files.newOutputStream(root)));
}
@Override
public void copyFrom(String filename, InputFileProvider inputFileProvider) throws IOException {
// TODO(bazel-team): Avoid de- and re-compressing resource files
out.putNextEntry(inputFileProvider.getZipEntry(filename));
try (InputStream is = inputFileProvider.getInputStream(filename)) {
ByteStreams.copy(is, out);
}
out.closeEntry();
}
@Override
public void write(String filename, byte[] content) throws IOException {
checkArgument(filename.equals(DESUGAR_DEPS_FILENAME) || filename.endsWith(".class"),
"Expect file to be copied: %s", filename);
writeStoredEntry(out, filename, content);
}
@Override
public void close() throws IOException {
out.close();
}
private static void writeStoredEntry(ZipOutputStream out, String filename, byte[] content)
throws IOException {
// Need to pre-compute checksum for STORED (uncompressed) entries)
CRC32 checksum = new CRC32();
checksum.update(content);
ZipEntry result = new ZipEntry(filename);
result.setTime(0L); // Use stable timestamp Jan 1 1980
result.setCrc(checksum.getValue());
result.setSize(content.length);
result.setCompressedSize(content.length);
// Write uncompressed, since this is just an intermediary artifact that
// we will convert to .dex
result.setMethod(ZipEntry.STORED);
out.putNextEntry(result);
out.write(content);
out.closeEntry();
}
}
| dropbox/bazel | src/tools/android/java/com/google/devtools/build/android/desugar/io/ZipOutputFileProvider.java | Java | apache-2.0 | 2,761 |
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
.PHONY: build push
PREFIX = quay.io/fluentd_elasticsearch
IMAGE = elasticsearch
TAG = v7.16.2
build:
docker build --tag ${PREFIX}/${IMAGE}:${TAG} .
docker build --tag ${PREFIX}/${IMAGE}:latest .
push:
docker push ${PREFIX}/${IMAGE}:${TAG}
docker push ${PREFIX}/${IMAGE}:latest
| jingxu97/kubernetes | cluster/addons/fluentd-elasticsearch/es-image/Makefile | Makefile | apache-2.0 | 872 |
// Copyright 2009 the Sputnik authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
info: The String.prototype.charAt.length property has the attribute DontEnum
es5id: 15.5.4.4_A8
description: >
Checking if enumerating the String.prototype.charAt.length
property fails
---*/
//////////////////////////////////////////////////////////////////////////////
//CHECK#0
if (!(String.prototype.charAt.hasOwnProperty('length'))) {
$ERROR('#0: String.prototype.charAt.hasOwnProperty(\'length\') return true. Actual: '+String.prototype.charAt.hasOwnProperty('length'));
}
//
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// CHECK#1
if (String.prototype.charAt.propertyIsEnumerable('length')) {
$ERROR('#1: String.prototype.charAt.propertyIsEnumerable(\'length\') return false. Actual: '+String.prototype.charAt.propertyIsEnumerable('length'));
}
//
//////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////
// CHECK#2
var count=0;
for (var p in String.prototype.charAt){
if (p==="length") count++;
}
if (count !== 0) {
$ERROR('#2: count=0; for (p in String.prototype.charAt){if (p==="length") count++;}; count === 0. Actual: count ==='+count );
}
//
//////////////////////////////////////////////////////////////////////////////
| m0ppers/arangodb | 3rdParty/V8/V8-5.0.71.39/test/test262/data/test/built-ins/String/prototype/charAt/S15.5.4.4_A8.js | JavaScript | apache-2.0 | 1,510 |
package imagestreamimport
import (
"fmt"
"net/http"
"time"
"github.com/golang/glog"
gocontext "golang.org/x/net/context"
kapierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
kapi "k8s.io/kubernetes/pkg/api"
authorizationapi "github.com/openshift/origin/pkg/authorization/api"
"github.com/openshift/origin/pkg/client"
serverapi "github.com/openshift/origin/pkg/cmd/server/api"
"github.com/openshift/origin/pkg/dockerregistry"
"github.com/openshift/origin/pkg/image/api"
imageapiv1 "github.com/openshift/origin/pkg/image/api/v1"
"github.com/openshift/origin/pkg/image/importer"
"github.com/openshift/origin/pkg/image/registry/imagestream"
quotautil "github.com/openshift/origin/pkg/quota/util"
)
// ImporterFunc returns an instance of the importer that should be used per invocation.
type ImporterFunc func(r importer.RepositoryRetriever) importer.Interface
// ImporterDockerRegistryFunc returns an instance of a docker client that should be used per invocation of import,
// may be nil if no legacy import capability is required.
type ImporterDockerRegistryFunc func() dockerregistry.Client
// REST implements the RESTStorage interface for ImageStreamImport
type REST struct {
importFn ImporterFunc
streams imagestream.Registry
internalStreams rest.CreaterUpdater
images rest.Creater
secrets client.ImageStreamSecretsNamespacer
transport http.RoundTripper
insecureTransport http.RoundTripper
clientFn ImporterDockerRegistryFunc
strategy *strategy
sarClient client.SubjectAccessReviewInterface
}
// NewREST returns a REST storage implementation that handles importing images. The clientFn argument is optional
// if v1 Docker Registry importing is not required. Insecure transport is optional, and both transports should not
// include client certs unless you wish to allow the entire cluster to import using those certs.
func NewREST(importFn ImporterFunc, streams imagestream.Registry, internalStreams rest.CreaterUpdater,
images rest.Creater, secrets client.ImageStreamSecretsNamespacer,
transport, insecureTransport http.RoundTripper,
clientFn ImporterDockerRegistryFunc,
allowedImportRegistries *serverapi.AllowedRegistries,
registryFn api.DefaultRegistryFunc,
sarClient client.SubjectAccessReviewInterface,
) *REST {
return &REST{
importFn: importFn,
streams: streams,
internalStreams: internalStreams,
images: images,
secrets: secrets,
transport: transport,
insecureTransport: insecureTransport,
clientFn: clientFn,
strategy: NewStrategy(allowedImportRegistries, registryFn),
sarClient: sarClient,
}
}
// New is only implemented to make REST implement RESTStorage
func (r *REST) New() runtime.Object {
return &api.ImageStreamImport{}
}
func (r *REST) Create(ctx apirequest.Context, obj runtime.Object) (runtime.Object, error) {
isi, ok := obj.(*api.ImageStreamImport)
if !ok {
return nil, kapierrors.NewBadRequest(fmt.Sprintf("obj is not an ImageStreamImport: %#v", obj))
}
inputMeta := isi.ObjectMeta
if err := rest.BeforeCreate(r.strategy, ctx, obj); err != nil {
return nil, err
}
// Check if the user is allowed to create Images or ImageStreamMappings.
// In case the user is allowed to create them, do not validate the ImageStreamImport
// registry location against the registry whitelist, but instead allow to create any
// image from any registry.
user, ok := apirequest.UserFrom(ctx)
if !ok {
return nil, kapierrors.NewBadRequest("unable to get user from context")
}
isCreateImage, err := r.sarClient.Create(authorizationapi.AddUserToSAR(user,
&authorizationapi.SubjectAccessReview{
Action: authorizationapi.Action{
Verb: "create",
Group: api.GroupName,
Resource: "images",
},
},
))
if err != nil {
return nil, err
}
isCreateImageStreamMapping, err := r.sarClient.Create(authorizationapi.AddUserToSAR(user,
&authorizationapi.SubjectAccessReview{
Action: authorizationapi.Action{
Verb: "create",
Group: api.GroupName,
Resource: "imagestreammapping",
},
},
))
if err != nil {
return nil, err
}
if !isCreateImage.Allowed && !isCreateImageStreamMapping.Allowed {
if errs := r.strategy.ValidateAllowedRegistries(isi); len(errs) != 0 {
return nil, kapierrors.NewInvalid(api.Kind("ImageStreamImport"), isi.Name, errs)
}
}
namespace, ok := apirequest.NamespaceFrom(ctx)
if !ok {
return nil, kapierrors.NewBadRequest("a namespace must be specified to import images")
}
if r.clientFn != nil {
if client := r.clientFn(); client != nil {
ctx = apirequest.WithValue(ctx, importer.ContextKeyV1RegistryClient, client)
}
}
// only load secrets if we need them
credentials := importer.NewLazyCredentialsForSecrets(func() ([]kapi.Secret, error) {
secrets, err := r.secrets.ImageStreamSecrets(namespace).Secrets(isi.Name, metav1.ListOptions{})
if err != nil {
return nil, err
}
return secrets.Items, nil
})
importCtx := importer.NewContext(r.transport, r.insecureTransport).WithCredentials(credentials)
imports := r.importFn(importCtx)
if err := imports.Import(ctx.(gocontext.Context), isi); err != nil {
return nil, kapierrors.NewInternalError(err)
}
// if we encountered an error loading credentials and any images could not be retrieved with an access
// related error, modify the message.
// TODO: set a status cause
if err := credentials.Err(); err != nil {
for i, image := range isi.Status.Images {
switch image.Status.Reason {
case metav1.StatusReasonUnauthorized, metav1.StatusReasonForbidden:
isi.Status.Images[i].Status.Message = fmt.Sprintf("Unable to load secrets for this image: %v; (%s)", err, image.Status.Message)
}
}
if r := isi.Status.Repository; r != nil {
switch r.Status.Reason {
case metav1.StatusReasonUnauthorized, metav1.StatusReasonForbidden:
r.Status.Message = fmt.Sprintf("Unable to load secrets for this repository: %v; (%s)", err, r.Status.Message)
}
}
}
// TODO: perform the transformation of the image stream and return it with the ISI if import is false
// so that clients can see what the resulting object would look like.
if !isi.Spec.Import {
clearManifests(isi)
return isi, nil
}
create := false
stream, err := r.streams.GetImageStream(ctx, isi.Name, &metav1.GetOptions{})
if err != nil {
if !kapierrors.IsNotFound(err) {
return nil, err
}
// consistency check, stream must exist
if len(inputMeta.ResourceVersion) > 0 || len(inputMeta.UID) > 0 {
return nil, err
}
create = true
stream = &api.ImageStream{
ObjectMeta: metav1.ObjectMeta{
Name: isi.Name,
Namespace: namespace,
Generation: 0,
},
}
} else {
if len(inputMeta.ResourceVersion) > 0 && inputMeta.ResourceVersion != stream.ResourceVersion {
glog.V(4).Infof("DEBUG: mismatch between requested ResourceVersion %s and located ResourceVersion %s", inputMeta.ResourceVersion, stream.ResourceVersion)
return nil, kapierrors.NewConflict(api.Resource("imagestream"), inputMeta.Name, fmt.Errorf("the image stream was updated from %q to %q", inputMeta.ResourceVersion, stream.ResourceVersion))
}
if len(inputMeta.UID) > 0 && inputMeta.UID != stream.UID {
glog.V(4).Infof("DEBUG: mismatch between requested UID %s and located UID %s", inputMeta.UID, stream.UID)
return nil, kapierrors.NewNotFound(api.Resource("imagestream"), inputMeta.Name)
}
}
if stream.Annotations == nil {
stream.Annotations = make(map[string]string)
}
now := metav1.Now()
_, hasAnnotation := stream.Annotations[api.DockerImageRepositoryCheckAnnotation]
nextGeneration := stream.Generation + 1
original, err := kapi.Scheme.DeepCopy(stream)
if err != nil {
return nil, err
}
// walk the retrieved images, ensuring each one exists in etcd
importedImages := make(map[string]error)
updatedImages := make(map[string]*api.Image)
if spec := isi.Spec.Repository; spec != nil {
for i, status := range isi.Status.Repository.Images {
if checkImportFailure(status, stream, status.Tag, nextGeneration, now) {
continue
}
image := status.Image
ref, err := api.ParseDockerImageReference(image.DockerImageReference)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to parse image reference during import: %v", err))
continue
}
from, err := api.ParseDockerImageReference(spec.From.Name)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to parse from reference during import: %v", err))
continue
}
tag := ref.Tag
if len(status.Tag) > 0 {
tag = status.Tag
}
// we've imported a set of tags, ensure spec tag will point to this for later imports
from.ID, from.Tag = "", tag
if updated, ok := r.importSuccessful(ctx, image, stream, tag, from.Exact(), nextGeneration,
now, spec.ImportPolicy, spec.ReferencePolicy, importedImages, updatedImages); ok {
isi.Status.Repository.Images[i].Image = updated
}
}
}
for i, spec := range isi.Spec.Images {
if spec.To == nil {
continue
}
tag := spec.To.Name
// record a failure condition
status := isi.Status.Images[i]
if checkImportFailure(status, stream, tag, nextGeneration, now) {
// ensure that we have a spec tag set
ensureSpecTag(stream, tag, spec.From.Name, spec.ImportPolicy, spec.ReferencePolicy, false)
continue
}
// record success
image := status.Image
if updated, ok := r.importSuccessful(ctx, image, stream, tag, spec.From.Name, nextGeneration,
now, spec.ImportPolicy, spec.ReferencePolicy, importedImages, updatedImages); ok {
isi.Status.Images[i].Image = updated
}
}
// TODO: should we allow partial failure?
for _, err := range importedImages {
if err != nil {
return nil, err
}
}
clearManifests(isi)
// ensure defaulting is applied by round trip converting
// TODO: convert to using versioned types.
external, err := kapi.Scheme.ConvertToVersion(stream, imageapiv1.SchemeGroupVersion)
if err != nil {
return nil, err
}
kapi.Scheme.Default(external)
internal, err := kapi.Scheme.ConvertToVersion(external, api.SchemeGroupVersion)
if err != nil {
return nil, err
}
stream = internal.(*api.ImageStream)
// if and only if we have changes between the original and the imported stream, trigger
// an import
hasChanges := !kapi.Semantic.DeepEqual(original, stream)
if create {
stream.Annotations[api.DockerImageRepositoryCheckAnnotation] = now.UTC().Format(time.RFC3339)
glog.V(4).Infof("create new stream: %#v", stream)
obj, err = r.internalStreams.Create(ctx, stream)
} else {
if hasAnnotation && !hasChanges {
glog.V(4).Infof("stream did not change: %#v", stream)
obj, err = original.(*api.ImageStream), nil
} else {
if glog.V(4) {
glog.V(4).Infof("updating stream %s", diff.ObjectDiff(original, stream))
}
stream.Annotations[api.DockerImageRepositoryCheckAnnotation] = now.UTC().Format(time.RFC3339)
obj, _, err = r.internalStreams.Update(ctx, stream.Name, rest.DefaultUpdatedObjectInfo(stream, kapi.Scheme))
}
}
if err != nil {
// if we have am admission limit error then record the conditions on the original stream. Quota errors
// will be recorded by the importer.
if quotautil.IsErrorLimitExceeded(err) {
originalStream := original.(*api.ImageStream)
recordLimitExceededStatus(originalStream, stream, err, now, nextGeneration)
var limitErr error
obj, _, limitErr = r.internalStreams.Update(ctx, stream.Name, rest.DefaultUpdatedObjectInfo(originalStream, kapi.Scheme))
if limitErr != nil {
utilruntime.HandleError(fmt.Errorf("failed to record limit exceeded status in image stream %s/%s: %v", stream.Namespace, stream.Name, limitErr))
}
}
return nil, err
}
isi.Status.Import = obj.(*api.ImageStream)
return isi, nil
}
// recordLimitExceededStatus adds the limit err to any new tag.
func recordLimitExceededStatus(originalStream *api.ImageStream, newStream *api.ImageStream, err error, now metav1.Time, nextGeneration int64) {
for tag := range newStream.Status.Tags {
if _, ok := originalStream.Status.Tags[tag]; !ok {
api.SetTagConditions(originalStream, tag, newImportFailedCondition(err, nextGeneration, now))
}
}
}
func checkImportFailure(status api.ImageImportStatus, stream *api.ImageStream, tag string, nextGeneration int64, now metav1.Time) bool {
if status.Image != nil && status.Status.Status == metav1.StatusSuccess {
return false
}
message := status.Status.Message
if len(message) == 0 {
message = "unknown error prevented import"
}
condition := api.TagEventCondition{
Type: api.ImportSuccess,
Status: kapi.ConditionFalse,
Message: message,
Reason: string(status.Status.Reason),
Generation: nextGeneration,
LastTransitionTime: now,
}
if tag == "" {
if len(status.Tag) > 0 {
tag = status.Tag
} else if status.Image != nil {
if ref, err := api.ParseDockerImageReference(status.Image.DockerImageReference); err == nil {
tag = ref.Tag
}
}
}
if !api.HasTagCondition(stream, tag, condition) {
api.SetTagConditions(stream, tag, condition)
if tagRef, ok := stream.Spec.Tags[tag]; ok {
zero := int64(0)
tagRef.Generation = &zero
stream.Spec.Tags[tag] = tagRef
}
}
return true
}
// ensureSpecTag guarantees that the spec tag is set with the provided from, importPolicy and referencePolicy.
// If reset is passed, the tag will be overwritten.
func ensureSpecTag(stream *api.ImageStream, tag, from string, importPolicy api.TagImportPolicy,
referencePolicy api.TagReferencePolicy, reset bool) api.TagReference {
if stream.Spec.Tags == nil {
stream.Spec.Tags = make(map[string]api.TagReference)
}
specTag, ok := stream.Spec.Tags[tag]
if ok && !reset {
return specTag
}
specTag.From = &kapi.ObjectReference{
Kind: "DockerImage",
Name: from,
}
zero := int64(0)
specTag.Generation = &zero
specTag.ImportPolicy = importPolicy
specTag.ReferencePolicy = referencePolicy
stream.Spec.Tags[tag] = specTag
return specTag
}
// importSuccessful records a successful import into an image stream, setting the spec tag, status tag or conditions, and ensuring
// the image is created in etcd. Images are cached so they are not created multiple times in a row (when multiple tags point to the
// same image), and a failure to persist the image will be summarized before we update the stream. If an image was imported by this
// operation, it *replaces* the imported image (from the remote repository) with the updated image.
func (r *REST) importSuccessful(
ctx apirequest.Context,
image *api.Image, stream *api.ImageStream, tag string, from string, nextGeneration int64, now metav1.Time,
importPolicy api.TagImportPolicy, referencePolicy api.TagReferencePolicy,
importedImages map[string]error, updatedImages map[string]*api.Image,
) (*api.Image, bool) {
r.strategy.PrepareImageForCreate(image)
pullSpec, _ := api.MostAccuratePullSpec(image.DockerImageReference, image.Name, "")
tagEvent := api.TagEvent{
Created: now,
DockerImageReference: pullSpec,
Image: image.Name,
Generation: nextGeneration,
}
if stream.Spec.Tags == nil {
stream.Spec.Tags = make(map[string]api.TagReference)
}
// ensure the spec and status tag match the imported image
changed := api.DifferentTagEvent(stream, tag, tagEvent) || api.DifferentTagGeneration(stream, tag)
specTag, ok := stream.Spec.Tags[tag]
if changed || !ok {
specTag = ensureSpecTag(stream, tag, from, importPolicy, referencePolicy, true)
api.AddTagEventToImageStream(stream, tag, tagEvent)
}
// always reset the import policy
specTag.ImportPolicy = importPolicy
stream.Spec.Tags[tag] = specTag
// import or reuse the image, and ensure tag conditions are set
importErr, alreadyImported := importedImages[image.Name]
if importErr != nil {
api.SetTagConditions(stream, tag, newImportFailedCondition(importErr, nextGeneration, now))
} else {
api.SetTagConditions(stream, tag)
}
// create the image if it does not exist, otherwise cache the updated status from the store for use by other tags
if alreadyImported {
if updatedImage, ok := updatedImages[image.Name]; ok {
return updatedImage, true
}
return nil, false
}
updated, err := r.images.Create(ctx, image)
switch {
case kapierrors.IsAlreadyExists(err):
if err := api.ImageWithMetadata(image); err != nil {
glog.V(4).Infof("Unable to update image metadata during image import when image already exists %q: err", image.Name, err)
}
updated = image
fallthrough
case err == nil:
updatedImage := updated.(*api.Image)
updatedImages[image.Name] = updatedImage
//isi.Status.Repository.Images[i].Image = updatedImage
importedImages[image.Name] = nil
return updatedImage, true
default:
importedImages[image.Name] = err
}
return nil, false
}
// clearManifests unsets the manifest for each object that does not request it
func clearManifests(isi *api.ImageStreamImport) {
for i := range isi.Status.Images {
if !isi.Spec.Images[i].IncludeManifest {
if isi.Status.Images[i].Image != nil {
isi.Status.Images[i].Image.DockerImageManifest = ""
isi.Status.Images[i].Image.DockerImageConfig = ""
}
}
}
if isi.Spec.Repository != nil && !isi.Spec.Repository.IncludeManifest {
for i := range isi.Status.Repository.Images {
if isi.Status.Repository.Images[i].Image != nil {
isi.Status.Repository.Images[i].Image.DockerImageManifest = ""
isi.Status.Repository.Images[i].Image.DockerImageConfig = ""
}
}
}
}
func newImportFailedCondition(err error, gen int64, now metav1.Time) api.TagEventCondition {
c := api.TagEventCondition{
Type: api.ImportSuccess,
Status: kapi.ConditionFalse,
Message: err.Error(),
Generation: gen,
LastTransitionTime: now,
}
if status, ok := err.(kapierrors.APIStatus); ok {
s := status.Status()
c.Reason, c.Message = string(s.Reason), s.Message
}
return c
}
func invalidStatus(kind, position string, errs ...*field.Error) metav1.Status {
return kapierrors.NewInvalid(api.Kind(kind), position, errs).ErrStatus
}
| chlunde/origin | pkg/image/registry/imagestreamimport/rest.go | GO | apache-2.0 | 18,453 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
FROM gcr.io/oss-fuzz-base/base-builder
RUN apt-get update && apt-get install -y make autoconf automake libtool shtool
RUN git clone --depth 1 https://github.com/file/file.git
WORKDIR file
COPY build.sh magic_fuzzer.cc $SRC/
| googlefonts/oss-fuzz | projects/file/Dockerfile | Dockerfile | apache-2.0 | 884 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.scalajs.js.annotation
import scala.annotation.meta._
/** Marks the annotated class or object as being a member of the JavaScript
* global scope.
*
* The annotated class/object must also be annotated with `@js.native`, and
* therefore extend [[scala.scalajs.js.Any js.Any]].
*
* Given:
* {{{
* @js.native
* @JSGlobal
* class Foo extends js.Object
*
* @js.native
* @JSGlobal("Foobar")
* object Bar extends js.Object
*
* @js.native
* @JSGlobal("Lib.Babar")
* class Babar extends js.Object
* }}}
*
* The following mappings apply (`global` denotes the global scope):
*
* {{{
* Scala.js | JavaScript
* ------------------------+------------------
* new Foo() | new global.Foo()
* Bar | global.Foobar
* js.constructorOf[Babar] | global.Lib.Babar
* }}}
*
* @see [[http://www.scala-js.org/doc/calling-javascript.html Calling JavaScript from Scala.js]]
*/
@field @getter @setter
class JSGlobal extends scala.annotation.StaticAnnotation {
def this(name: String) = this()
}
| scala-js/scala-js | library/src/main/scala/scala/scalajs/js/annotation/JSGlobal.scala | Scala | apache-2.0 | 1,358 |
/**
* JBoss, Home of Professional Open Source.
* Copyright 2014-2022 Red Hat, Inc., and individual contributors
* as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jboss.pnc.coordinator.test;
import org.jboss.pnc.common.json.ConfigurationParseException;
import org.jboss.pnc.mock.repository.BuildConfigurationRepositoryMock;
import org.jboss.pnc.model.BuildConfiguration;
import org.jboss.pnc.model.BuildConfigurationSet;
import org.jboss.pnc.enums.RebuildMode;
import org.jboss.pnc.spi.datastore.DatastoreException;
import org.jboss.pnc.spi.exception.CoreException;
import org.junit.Before;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeoutException;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;
/**
* Group consists of configA,config B and configC. <br/>
* configC is independent, configB depends on configA. <br/>
*
*
* config1 is an "outside" dependency of configA
*
* <p>
* Author: Michal Szynkiewicz, michal.l.szynkiewicz@gmail.com Date: 9/14/16 Time: 12:09 PM
* </p>
*/
public class OutsideGroupDependentConfigsTest extends AbstractDependentBuildTest {
private BuildConfiguration config1;
private BuildConfiguration configA;
private BuildConfiguration configB;
private BuildConfigurationSet configSet;
@Before
public void initialize() throws DatastoreException, ConfigurationParseException {
config1 = buildConfig("1");
configA = buildConfig("A", config1);
configB = buildConfig("B", configA);
BuildConfiguration configC = buildConfig("C");
configSet = configSet(configA, configB, configC);
buildConfigurationRepository = spy(new BuildConfigurationRepositoryMock());
when(buildConfigurationRepository.queryWithPredicates(any()))
.thenReturn(new ArrayList<>(configSet.getBuildConfigurations()));
super.initialize();
saveConfig(config1);
configSet.getBuildConfigurations().forEach(this::saveConfig);
insertNewBuildRecords(config1, configA, configB, configC);
makeResult(configA).dependOn(config1);
}
@Test
public void shouldNotRebuildIfDependencyIsNotRebuilt()
throws CoreException, TimeoutException, InterruptedException {
build(configSet, RebuildMode.IMPLICIT_DEPENDENCY_CHECK);
waitForEmptyBuildQueue();
List<BuildConfiguration> configsWithTasks = getBuiltConfigs();
assertThat(configsWithTasks).isEmpty();
}
@Test
public void shouldRebuildOnlyDependent() throws CoreException, TimeoutException, InterruptedException {
insertNewBuildRecords(config1);
build(configSet, RebuildMode.IMPLICIT_DEPENDENCY_CHECK);
waitForEmptyBuildQueue();
List<BuildConfiguration> configsWithTasks = getBuiltConfigs();
assertThat(configsWithTasks).hasSameElementsAs(Arrays.asList(configA, configB));
}
} | project-ncl/pnc | build-coordinator/src/test/java/org/jboss/pnc/coordinator/test/OutsideGroupDependentConfigsTest.java | Java | apache-2.0 | 3,646 |
/** @prettier */
import { expect } from 'chai';
import { EMPTY, of, EmptyError, defer, throwError, Observable } from 'rxjs';
import { throwIfEmpty, mergeMap, retry, take } from 'rxjs/operators';
import { TestScheduler } from 'rxjs/testing';
import { observableMatcher } from '../helpers/observableMatcher';
/** @test {throwIfEmpty} */
describe('throwIfEmpty', () => {
let rxTestScheduler: TestScheduler;
beforeEach(() => {
rxTestScheduler = new TestScheduler(observableMatcher);
});
describe('with errorFactory', () => {
it('should error when empty', () => {
rxTestScheduler.run(({ cold, expectObservable }) => {
const source = cold('----|');
const expected = ' ----#';
const result = source.pipe(throwIfEmpty(() => new Error('test')));
expectObservable(result).toBe(expected, undefined, new Error('test'));
});
});
it('should throw if empty', () => {
const error = new Error('So empty inside');
let thrown: any;
EMPTY.pipe(throwIfEmpty(() => error)).subscribe({
error(err) {
thrown = err;
},
});
expect(thrown).to.equal(error);
});
it('should NOT throw if NOT empty', () => {
const error = new Error('So empty inside');
let thrown: any;
of('test')
.pipe(throwIfEmpty(() => error))
.subscribe({
error(err) {
thrown = err;
},
});
expect(thrown).to.be.undefined;
});
it('should pass values through', () => {
rxTestScheduler.run(({ cold, expectObservable, expectSubscriptions }) => {
const source = cold('----a---b---c---|');
const sub1 = ' ^---------------!';
const expected = ' ----a---b---c---|';
const result = source.pipe(throwIfEmpty(() => new Error('test')));
expectObservable(result).toBe(expected);
expectSubscriptions(source.subscriptions).toBe([sub1]);
});
});
it('should never when never', () => {
rxTestScheduler.run(({ cold, expectObservable, expectSubscriptions }) => {
const source = cold('-');
const sub1 = ' ^';
const expected = ' -';
const result = source.pipe(throwIfEmpty(() => new Error('test')));
expectObservable(result).toBe(expected);
expectSubscriptions(source.subscriptions).toBe([sub1]);
});
});
it('should error when empty', () => {
rxTestScheduler.run(({ cold, expectObservable, expectSubscriptions }) => {
const source = cold('----|');
const sub1 = ' ^---!';
const expected = ' ----#';
const result = source.pipe(throwIfEmpty(() => new Error('test')));
expectObservable(result).toBe(expected, undefined, new Error('test'));
expectSubscriptions(source.subscriptions).toBe([sub1]);
});
});
it('should throw if empty after retry', () => {
const error = new Error('So empty inside');
let thrown: any;
let sourceIsEmpty = false;
const source = defer(() => {
if (sourceIsEmpty) {
return EMPTY;
}
sourceIsEmpty = true;
return of(1, 2);
});
source
.pipe(
throwIfEmpty(() => error),
mergeMap((value) => {
if (value > 1) {
return throwError(() => new Error());
}
return of(value);
}),
retry(1)
)
.subscribe({
error(err) {
thrown = err;
},
});
expect(thrown).to.equal(error);
});
});
describe('without errorFactory', () => {
it('should throw EmptyError if empty', () => {
let thrown: any;
EMPTY.pipe(throwIfEmpty()).subscribe({
error(err) {
thrown = err;
},
});
expect(thrown).to.be.instanceof(EmptyError);
});
it('should NOT throw if NOT empty', () => {
let thrown: any;
of('test')
.pipe(throwIfEmpty())
.subscribe({
error(err) {
thrown = err;
},
});
expect(thrown).to.be.undefined;
});
it('should pass values through', () => {
rxTestScheduler.run(({ cold, expectObservable, expectSubscriptions }) => {
const source = cold('----a---b---c---|');
const sub1 = ' ^---------------!';
const expected = ' ----a---b---c---|';
const result = source.pipe(throwIfEmpty());
expectObservable(result).toBe(expected);
expectSubscriptions(source.subscriptions).toBe([sub1]);
});
});
it('should never when never', () => {
rxTestScheduler.run(({ cold, expectObservable, expectSubscriptions }) => {
const source = cold('-');
const sub1 = ' ^';
const expected = ' -';
const result = source.pipe(throwIfEmpty());
expectObservable(result).toBe(expected);
expectSubscriptions(source.subscriptions).toBe([sub1]);
});
});
it('should error when empty', () => {
rxTestScheduler.run(({ cold, expectObservable, expectSubscriptions }) => {
const source = cold('----|');
const sub1 = ' ^---!';
const expected = ' ----#';
const result = source.pipe(throwIfEmpty());
expectObservable(result).toBe(expected, undefined, new EmptyError());
expectSubscriptions(source.subscriptions).toBe([sub1]);
});
});
it('should throw if empty after retry', () => {
let thrown: any;
let sourceIsEmpty = false;
const source = defer(() => {
if (sourceIsEmpty) {
return EMPTY;
}
sourceIsEmpty = true;
return of(1, 2);
});
source
.pipe(
throwIfEmpty(),
mergeMap((value) => {
if (value > 1) {
return throwError(() => new Error());
}
return of(value);
}),
retry(1)
)
.subscribe({
error(err) {
thrown = err;
},
});
expect(thrown).to.be.instanceof(EmptyError);
});
});
it('should stop listening to a synchronous observable when unsubscribed', () => {
const sideEffects: number[] = [];
const synchronousObservable = new Observable<number>((subscriber) => {
// This will check to see if the subscriber was closed on each loop
// when the unsubscribe hits (from the `take`), it should be closed
for (let i = 0; !subscriber.closed && i < 10; i++) {
sideEffects.push(i);
subscriber.next(i);
}
});
synchronousObservable.pipe(throwIfEmpty(), take(3)).subscribe(() => {
/* noop */
});
expect(sideEffects).to.deep.equal([0, 1, 2]);
});
});
| ReactiveX/rxjs | spec/operators/throwIfEmpty-spec.ts | TypeScript | apache-2.0 | 6,821 |
--------------------------------------------------------------------------------
## Treebank Statistics (UD_Hungarian)
This relation is universal.
1 nodes (0%) are attached to their parents as `discourse`.
1 instances of `discourse` (100%) are right-to-left (child precedes parent).
Average distance between parent and child is 3.
The following 1 pairs of parts of speech are connected with `discourse`: [hu-pos/VERB]()-[hu-pos/NOUN]() (1; 100% instances).
~~~ conllu
# visual-style 23 bgColor:blue
# visual-style 23 fgColor:white
# visual-style 26 bgColor:blue
# visual-style 26 fgColor:white
# visual-style 26 23 discourse color:blue
1 A a DET _ Definite=Def|PronType=Art 3 det _ _
2 nagy nagy ADJ _ Case=Nom|Degree=Pos|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 3 amod:att _ _
3 elődök előd NOUN _ Case=Nom|Number=Plur|Number[psed]=None|Number[psor]=None|Person[psor]=None 11 nmod:att _ _
4 ( ( PUNCT _ _ 6 punct _ _
5 Sean Sean PROPN _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 6 name _ _
6 Connery Connery PROPN _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 3 appos _ _
7 , , PUNCT _ _ 6 punct _ _
8 Roger Roger PROPN _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 9 name _ _
9 Moore Moore PROPN _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 6 conj _ _
10 ) ) PUNCT _ _ 6 punct _ _
11 játékában játék NOUN _ Case=Ine|Number=Sing|Number[psed]=None|Number[psor]=Sing|Person[psor]=3 12 nmod:obl _ _
12 fellelhető fellelhető ADJ _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None|VerbForm=PartPres 13 amod:att _ _
13 irónia irónia NOUN _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 18 nsubj _ _
14 Brosnannek Brosnan PROPN _ Case=Gen|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 18 nmod:att _ _
15 ugyan ugyan ADV _ _ 18 advmod:mode _ _
16 nem nem ADV _ PronType=Neg 18 neg _ _
17 az az DET _ Definite=Def|PronType=Art 18 det _ _
18 erőssége erősség NOUN _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=Sing|Person[psor]=3 0 root _ _
19 , , PUNCT _ _ 18 punct _ _
20 de de CONJ _ _ 18 cc _ _
21 hál hál NOUN _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 23 nmod:att _ _
22 ' ' PUNCT _ _ 23 punct _ _
23 isten isten NOUN _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 26 discourse _ _
24 nem nem ADV _ PronType=Neg 26 neg _ _
25 is is ADV _ _ 26 advmod:mode _ _
26 erőlteti erőltet VERB _ Definite=Def|Mood=Ind|Number=Sing|Person=3|Tense=Pres|VerbForm=Fin|Voice=Act 18 conj _ _
27 sem sem CONJ _ _ 28 cc _ _
28 ő ő PRON _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person=3|Person[psor]=None|PronType=Prs 26 nsubj _ _
29 , , PUNCT _ _ 28 punct _ _
30 sem sem CONJ _ _ 28 cc _ _
31 a a DET _ Definite=Def|PronType=Art 32 det _ _
32 történet történet NOUN _ Case=Nom|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 33 nmod:att _ _
33 kiagyalói kiagyalói ADJ _ Case=Nom|Degree=Pos|Number=Sing|Number[psed]=None|Number[psor]=None|Person[psor]=None 28 conj _ _
34 . . PUNCT _ _ 18 punct _ _
~~~
| fginter/docs-fginterfork | _includes/stats/hu/dep/discourse.md | Markdown | apache-2.0 | 3,255 |
/**
* Autogenerated by Thrift Compiler (0.9.0)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
#include "hive_metastore_types.h"
#include <algorithm>
namespace Apache { namespace Hadoop { namespace Hive {
int _kHiveObjectTypeValues[] = {
HiveObjectType::GLOBAL,
HiveObjectType::DATABASE,
HiveObjectType::TABLE,
HiveObjectType::PARTITION,
HiveObjectType::COLUMN
};
const char* _kHiveObjectTypeNames[] = {
"GLOBAL",
"DATABASE",
"TABLE",
"PARTITION",
"COLUMN"
};
const std::map<int, const char*> _HiveObjectType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(5, _kHiveObjectTypeValues, _kHiveObjectTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kPrincipalTypeValues[] = {
PrincipalType::USER,
PrincipalType::ROLE,
PrincipalType::GROUP
};
const char* _kPrincipalTypeNames[] = {
"USER",
"ROLE",
"GROUP"
};
const std::map<int, const char*> _PrincipalType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kPrincipalTypeValues, _kPrincipalTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kPartitionEventTypeValues[] = {
PartitionEventType::LOAD_DONE
};
const char* _kPartitionEventTypeNames[] = {
"LOAD_DONE"
};
const std::map<int, const char*> _PartitionEventType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(1, _kPartitionEventTypeValues, _kPartitionEventTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kTxnStateValues[] = {
TxnState::COMMITTED,
TxnState::ABORTED,
TxnState::OPEN
};
const char* _kTxnStateNames[] = {
"COMMITTED",
"ABORTED",
"OPEN"
};
const std::map<int, const char*> _TxnState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kTxnStateValues, _kTxnStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kLockLevelValues[] = {
LockLevel::DB,
LockLevel::TABLE,
LockLevel::PARTITION
};
const char* _kLockLevelNames[] = {
"DB",
"TABLE",
"PARTITION"
};
const std::map<int, const char*> _LockLevel_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kLockLevelValues, _kLockLevelNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kLockStateValues[] = {
LockState::ACQUIRED,
LockState::WAITING,
LockState::ABORT,
LockState::NOT_ACQUIRED
};
const char* _kLockStateNames[] = {
"ACQUIRED",
"WAITING",
"ABORT",
"NOT_ACQUIRED"
};
const std::map<int, const char*> _LockState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(4, _kLockStateValues, _kLockStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kLockTypeValues[] = {
LockType::SHARED_READ,
LockType::SHARED_WRITE,
LockType::EXCLUSIVE
};
const char* _kLockTypeNames[] = {
"SHARED_READ",
"SHARED_WRITE",
"EXCLUSIVE"
};
const std::map<int, const char*> _LockType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kLockTypeValues, _kLockTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kCompactionTypeValues[] = {
CompactionType::MINOR,
CompactionType::MAJOR
};
const char* _kCompactionTypeNames[] = {
"MINOR",
"MAJOR"
};
const std::map<int, const char*> _CompactionType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kCompactionTypeValues, _kCompactionTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kGrantRevokeTypeValues[] = {
GrantRevokeType::GRANT,
GrantRevokeType::REVOKE
};
const char* _kGrantRevokeTypeNames[] = {
"GRANT",
"REVOKE"
};
const std::map<int, const char*> _GrantRevokeType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kGrantRevokeTypeValues, _kGrantRevokeTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kEventRequestTypeValues[] = {
EventRequestType::INSERT,
EventRequestType::UPDATE,
EventRequestType::DELETE
};
const char* _kEventRequestTypeNames[] = {
"INSERT",
"UPDATE",
"DELETE"
};
const std::map<int, const char*> _EventRequestType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kEventRequestTypeValues, _kEventRequestTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kFunctionTypeValues[] = {
FunctionType::JAVA
};
const char* _kFunctionTypeNames[] = {
"JAVA"
};
const std::map<int, const char*> _FunctionType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(1, _kFunctionTypeValues, _kFunctionTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
int _kResourceTypeValues[] = {
ResourceType::JAR,
ResourceType::FILE,
ResourceType::ARCHIVE
};
const char* _kResourceTypeNames[] = {
"JAR",
"FILE",
"ARCHIVE"
};
const std::map<int, const char*> _ResourceType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kResourceTypeValues, _kResourceTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
const char* Version::ascii_fingerprint = "07A9615F837F7D0A952B595DD3020972";
const uint8_t Version::binary_fingerprint[16] = {0x07,0xA9,0x61,0x5F,0x83,0x7F,0x7D,0x0A,0x95,0x2B,0x59,0x5D,0xD3,0x02,0x09,0x72};
uint32_t Version::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->version);
this->__isset.version = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->comments);
this->__isset.comments = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Version::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Version");
xfer += oprot->writeFieldBegin("version", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->version);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("comments", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->comments);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Version &a, Version &b) {
using ::std::swap;
swap(a.version, b.version);
swap(a.comments, b.comments);
swap(a.__isset, b.__isset);
}
const char* FieldSchema::ascii_fingerprint = "AB879940BD15B6B25691265F7384B271";
const uint8_t FieldSchema::binary_fingerprint[16] = {0xAB,0x87,0x99,0x40,0xBD,0x15,0xB6,0xB2,0x56,0x91,0x26,0x5F,0x73,0x84,0xB2,0x71};
uint32_t FieldSchema::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->name);
this->__isset.name = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->type);
this->__isset.type = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->comment);
this->__isset.comment = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t FieldSchema::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("FieldSchema");
xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->name);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->type);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("comment", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->comment);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(FieldSchema &a, FieldSchema &b) {
using ::std::swap;
swap(a.name, b.name);
swap(a.type, b.type);
swap(a.comment, b.comment);
swap(a.__isset, b.__isset);
}
const char* Type::ascii_fingerprint = "20DF02DE523C27F7066C7BD4D9120842";
const uint8_t Type::binary_fingerprint[16] = {0x20,0xDF,0x02,0xDE,0x52,0x3C,0x27,0xF7,0x06,0x6C,0x7B,0xD4,0xD9,0x12,0x08,0x42};
uint32_t Type::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->name);
this->__isset.name = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->type1);
this->__isset.type1 = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->type2);
this->__isset.type2 = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->fields.clear();
uint32_t _size0;
::apache::thrift::protocol::TType _etype3;
xfer += iprot->readListBegin(_etype3, _size0);
this->fields.resize(_size0);
uint32_t _i4;
for (_i4 = 0; _i4 < _size0; ++_i4)
{
xfer += this->fields[_i4].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.fields = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Type::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Type");
xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->name);
xfer += oprot->writeFieldEnd();
if (this->__isset.type1) {
xfer += oprot->writeFieldBegin("type1", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->type1);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.type2) {
xfer += oprot->writeFieldBegin("type2", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->type2);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.fields) {
xfer += oprot->writeFieldBegin("fields", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->fields.size()));
std::vector<FieldSchema> ::const_iterator _iter5;
for (_iter5 = this->fields.begin(); _iter5 != this->fields.end(); ++_iter5)
{
xfer += (*_iter5).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Type &a, Type &b) {
using ::std::swap;
swap(a.name, b.name);
swap(a.type1, b.type1);
swap(a.type2, b.type2);
swap(a.fields, b.fields);
swap(a.__isset, b.__isset);
}
const char* HiveObjectRef::ascii_fingerprint = "205CD8311CF3AA9EC161BAEF8D7C933C";
const uint8_t HiveObjectRef::binary_fingerprint[16] = {0x20,0x5C,0xD8,0x31,0x1C,0xF3,0xAA,0x9E,0xC1,0x61,0xBA,0xEF,0x8D,0x7C,0x93,0x3C};
uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast6;
xfer += iprot->readI32(ecast6);
this->objectType = (HiveObjectType::type)ecast6;
this->__isset.objectType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
this->__isset.dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->objectName);
this->__isset.objectName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partValues.clear();
uint32_t _size7;
::apache::thrift::protocol::TType _etype10;
xfer += iprot->readListBegin(_etype10, _size7);
this->partValues.resize(_size7);
uint32_t _i11;
for (_i11 = 0; _i11 < _size7; ++_i11)
{
xfer += iprot->readString(this->partValues[_i11]);
}
xfer += iprot->readListEnd();
}
this->__isset.partValues = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->columnName);
this->__isset.columnName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t HiveObjectRef::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("HiveObjectRef");
xfer += oprot->writeFieldBegin("objectType", ::apache::thrift::protocol::T_I32, 1);
xfer += oprot->writeI32((int32_t)this->objectType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("objectName", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->objectName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("partValues", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partValues.size()));
std::vector<std::string> ::const_iterator _iter12;
for (_iter12 = this->partValues.begin(); _iter12 != this->partValues.end(); ++_iter12)
{
xfer += oprot->writeString((*_iter12));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("columnName", ::apache::thrift::protocol::T_STRING, 5);
xfer += oprot->writeString(this->columnName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(HiveObjectRef &a, HiveObjectRef &b) {
using ::std::swap;
swap(a.objectType, b.objectType);
swap(a.dbName, b.dbName);
swap(a.objectName, b.objectName);
swap(a.partValues, b.partValues);
swap(a.columnName, b.columnName);
swap(a.__isset, b.__isset);
}
const char* PrivilegeGrantInfo::ascii_fingerprint = "A58923AF7294BE492D6F90E07E8CEE1F";
const uint8_t PrivilegeGrantInfo::binary_fingerprint[16] = {0xA5,0x89,0x23,0xAF,0x72,0x94,0xBE,0x49,0x2D,0x6F,0x90,0xE0,0x7E,0x8C,0xEE,0x1F};
uint32_t PrivilegeGrantInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->privilege);
this->__isset.privilege = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->createTime);
this->__isset.createTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->grantor);
this->__isset.grantor = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast13;
xfer += iprot->readI32(ecast13);
this->grantorType = (PrincipalType::type)ecast13;
this->__isset.grantorType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->grantOption);
this->__isset.grantOption = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t PrivilegeGrantInfo::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PrivilegeGrantInfo");
xfer += oprot->writeFieldBegin("privilege", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->privilege);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32(this->createTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->grantor);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 4);
xfer += oprot->writeI32((int32_t)this->grantorType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("grantOption", ::apache::thrift::protocol::T_BOOL, 5);
xfer += oprot->writeBool(this->grantOption);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PrivilegeGrantInfo &a, PrivilegeGrantInfo &b) {
using ::std::swap;
swap(a.privilege, b.privilege);
swap(a.createTime, b.createTime);
swap(a.grantor, b.grantor);
swap(a.grantorType, b.grantorType);
swap(a.grantOption, b.grantOption);
swap(a.__isset, b.__isset);
}
const char* HiveObjectPrivilege::ascii_fingerprint = "83D71969B23BD853E29DBA9D43B29AF8";
const uint8_t HiveObjectPrivilege::binary_fingerprint[16] = {0x83,0xD7,0x19,0x69,0xB2,0x3B,0xD8,0x53,0xE2,0x9D,0xBA,0x9D,0x43,0xB2,0x9A,0xF8};
uint32_t HiveObjectPrivilege::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->hiveObject.read(iprot);
this->__isset.hiveObject = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->principalName);
this->__isset.principalName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast14;
xfer += iprot->readI32(ecast14);
this->principalType = (PrincipalType::type)ecast14;
this->__isset.principalType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->grantInfo.read(iprot);
this->__isset.grantInfo = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t HiveObjectPrivilege::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("HiveObjectPrivilege");
xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 1);
xfer += this->hiveObject.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("principalName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->principalName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("principalType", ::apache::thrift::protocol::T_I32, 3);
xfer += oprot->writeI32((int32_t)this->principalType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("grantInfo", ::apache::thrift::protocol::T_STRUCT, 4);
xfer += this->grantInfo.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(HiveObjectPrivilege &a, HiveObjectPrivilege &b) {
using ::std::swap;
swap(a.hiveObject, b.hiveObject);
swap(a.principalName, b.principalName);
swap(a.principalType, b.principalType);
swap(a.grantInfo, b.grantInfo);
swap(a.__isset, b.__isset);
}
const char* PrivilegeBag::ascii_fingerprint = "BB89E4701B7B709B046A74C90B1147F2";
const uint8_t PrivilegeBag::binary_fingerprint[16] = {0xBB,0x89,0xE4,0x70,0x1B,0x7B,0x70,0x9B,0x04,0x6A,0x74,0xC9,0x0B,0x11,0x47,0xF2};
uint32_t PrivilegeBag::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->privileges.clear();
uint32_t _size15;
::apache::thrift::protocol::TType _etype18;
xfer += iprot->readListBegin(_etype18, _size15);
this->privileges.resize(_size15);
uint32_t _i19;
for (_i19 = 0; _i19 < _size15; ++_i19)
{
xfer += this->privileges[_i19].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.privileges = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t PrivilegeBag::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PrivilegeBag");
xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->privileges.size()));
std::vector<HiveObjectPrivilege> ::const_iterator _iter20;
for (_iter20 = this->privileges.begin(); _iter20 != this->privileges.end(); ++_iter20)
{
xfer += (*_iter20).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PrivilegeBag &a, PrivilegeBag &b) {
using ::std::swap;
swap(a.privileges, b.privileges);
swap(a.__isset, b.__isset);
}
const char* PrincipalPrivilegeSet::ascii_fingerprint = "08F75D2533906EA87BE34EA640856683";
const uint8_t PrincipalPrivilegeSet::binary_fingerprint[16] = {0x08,0xF7,0x5D,0x25,0x33,0x90,0x6E,0xA8,0x7B,0xE3,0x4E,0xA6,0x40,0x85,0x66,0x83};
uint32_t PrincipalPrivilegeSet::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->userPrivileges.clear();
uint32_t _size21;
::apache::thrift::protocol::TType _ktype22;
::apache::thrift::protocol::TType _vtype23;
xfer += iprot->readMapBegin(_ktype22, _vtype23, _size21);
uint32_t _i25;
for (_i25 = 0; _i25 < _size21; ++_i25)
{
std::string _key26;
xfer += iprot->readString(_key26);
std::vector<PrivilegeGrantInfo> & _val27 = this->userPrivileges[_key26];
{
_val27.clear();
uint32_t _size28;
::apache::thrift::protocol::TType _etype31;
xfer += iprot->readListBegin(_etype31, _size28);
_val27.resize(_size28);
uint32_t _i32;
for (_i32 = 0; _i32 < _size28; ++_i32)
{
xfer += _val27[_i32].read(iprot);
}
xfer += iprot->readListEnd();
}
}
xfer += iprot->readMapEnd();
}
this->__isset.userPrivileges = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->groupPrivileges.clear();
uint32_t _size33;
::apache::thrift::protocol::TType _ktype34;
::apache::thrift::protocol::TType _vtype35;
xfer += iprot->readMapBegin(_ktype34, _vtype35, _size33);
uint32_t _i37;
for (_i37 = 0; _i37 < _size33; ++_i37)
{
std::string _key38;
xfer += iprot->readString(_key38);
std::vector<PrivilegeGrantInfo> & _val39 = this->groupPrivileges[_key38];
{
_val39.clear();
uint32_t _size40;
::apache::thrift::protocol::TType _etype43;
xfer += iprot->readListBegin(_etype43, _size40);
_val39.resize(_size40);
uint32_t _i44;
for (_i44 = 0; _i44 < _size40; ++_i44)
{
xfer += _val39[_i44].read(iprot);
}
xfer += iprot->readListEnd();
}
}
xfer += iprot->readMapEnd();
}
this->__isset.groupPrivileges = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->rolePrivileges.clear();
uint32_t _size45;
::apache::thrift::protocol::TType _ktype46;
::apache::thrift::protocol::TType _vtype47;
xfer += iprot->readMapBegin(_ktype46, _vtype47, _size45);
uint32_t _i49;
for (_i49 = 0; _i49 < _size45; ++_i49)
{
std::string _key50;
xfer += iprot->readString(_key50);
std::vector<PrivilegeGrantInfo> & _val51 = this->rolePrivileges[_key50];
{
_val51.clear();
uint32_t _size52;
::apache::thrift::protocol::TType _etype55;
xfer += iprot->readListBegin(_etype55, _size52);
_val51.resize(_size52);
uint32_t _i56;
for (_i56 = 0; _i56 < _size52; ++_i56)
{
xfer += _val51[_i56].read(iprot);
}
xfer += iprot->readListEnd();
}
}
xfer += iprot->readMapEnd();
}
this->__isset.rolePrivileges = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PrincipalPrivilegeSet");
xfer += oprot->writeFieldBegin("userPrivileges", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(this->userPrivileges.size()));
std::map<std::string, std::vector<PrivilegeGrantInfo> > ::const_iterator _iter57;
for (_iter57 = this->userPrivileges.begin(); _iter57 != this->userPrivileges.end(); ++_iter57)
{
xfer += oprot->writeString(_iter57->first);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(_iter57->second.size()));
std::vector<PrivilegeGrantInfo> ::const_iterator _iter58;
for (_iter58 = _iter57->second.begin(); _iter58 != _iter57->second.end(); ++_iter58)
{
xfer += (*_iter58).write(oprot);
}
xfer += oprot->writeListEnd();
}
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("groupPrivileges", ::apache::thrift::protocol::T_MAP, 2);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(this->groupPrivileges.size()));
std::map<std::string, std::vector<PrivilegeGrantInfo> > ::const_iterator _iter59;
for (_iter59 = this->groupPrivileges.begin(); _iter59 != this->groupPrivileges.end(); ++_iter59)
{
xfer += oprot->writeString(_iter59->first);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(_iter59->second.size()));
std::vector<PrivilegeGrantInfo> ::const_iterator _iter60;
for (_iter60 = _iter59->second.begin(); _iter60 != _iter59->second.end(); ++_iter60)
{
xfer += (*_iter60).write(oprot);
}
xfer += oprot->writeListEnd();
}
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("rolePrivileges", ::apache::thrift::protocol::T_MAP, 3);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(this->rolePrivileges.size()));
std::map<std::string, std::vector<PrivilegeGrantInfo> > ::const_iterator _iter61;
for (_iter61 = this->rolePrivileges.begin(); _iter61 != this->rolePrivileges.end(); ++_iter61)
{
xfer += oprot->writeString(_iter61->first);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(_iter61->second.size()));
std::vector<PrivilegeGrantInfo> ::const_iterator _iter62;
for (_iter62 = _iter61->second.begin(); _iter62 != _iter61->second.end(); ++_iter62)
{
xfer += (*_iter62).write(oprot);
}
xfer += oprot->writeListEnd();
}
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PrincipalPrivilegeSet &a, PrincipalPrivilegeSet &b) {
using ::std::swap;
swap(a.userPrivileges, b.userPrivileges);
swap(a.groupPrivileges, b.groupPrivileges);
swap(a.rolePrivileges, b.rolePrivileges);
swap(a.__isset, b.__isset);
}
const char* GrantRevokePrivilegeRequest::ascii_fingerprint = "DF474A3CB526AD40DC0F2C3702F7AA2C";
const uint8_t GrantRevokePrivilegeRequest::binary_fingerprint[16] = {0xDF,0x47,0x4A,0x3C,0xB5,0x26,0xAD,0x40,0xDC,0x0F,0x2C,0x37,0x02,0xF7,0xAA,0x2C};
uint32_t GrantRevokePrivilegeRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast63;
xfer += iprot->readI32(ecast63);
this->requestType = (GrantRevokeType::type)ecast63;
this->__isset.requestType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->privileges.read(iprot);
this->__isset.privileges = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->revokeGrantOption);
this->__isset.revokeGrantOption = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t GrantRevokePrivilegeRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GrantRevokePrivilegeRequest");
xfer += oprot->writeFieldBegin("requestType", ::apache::thrift::protocol::T_I32, 1);
xfer += oprot->writeI32((int32_t)this->requestType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 2);
xfer += this->privileges.write(oprot);
xfer += oprot->writeFieldEnd();
if (this->__isset.revokeGrantOption) {
xfer += oprot->writeFieldBegin("revokeGrantOption", ::apache::thrift::protocol::T_BOOL, 3);
xfer += oprot->writeBool(this->revokeGrantOption);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GrantRevokePrivilegeRequest &a, GrantRevokePrivilegeRequest &b) {
using ::std::swap;
swap(a.requestType, b.requestType);
swap(a.privileges, b.privileges);
swap(a.revokeGrantOption, b.revokeGrantOption);
swap(a.__isset, b.__isset);
}
const char* GrantRevokePrivilegeResponse::ascii_fingerprint = "BF054652DEF86253C2BEE7D947F167DD";
const uint8_t GrantRevokePrivilegeResponse::binary_fingerprint[16] = {0xBF,0x05,0x46,0x52,0xDE,0xF8,0x62,0x53,0xC2,0xBE,0xE7,0xD9,0x47,0xF1,0x67,0xDD};
uint32_t GrantRevokePrivilegeResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->success);
this->__isset.success = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t GrantRevokePrivilegeResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GrantRevokePrivilegeResponse");
if (this->__isset.success) {
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 1);
xfer += oprot->writeBool(this->success);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GrantRevokePrivilegeResponse &a, GrantRevokePrivilegeResponse &b) {
using ::std::swap;
swap(a.success, b.success);
swap(a.__isset, b.__isset);
}
const char* Role::ascii_fingerprint = "70563A0628F75DF9555F4D24690B1E26";
const uint8_t Role::binary_fingerprint[16] = {0x70,0x56,0x3A,0x06,0x28,0xF7,0x5D,0xF9,0x55,0x5F,0x4D,0x24,0x69,0x0B,0x1E,0x26};
uint32_t Role::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->roleName);
this->__isset.roleName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->createTime);
this->__isset.createTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->ownerName);
this->__isset.ownerName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Role::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Role");
xfer += oprot->writeFieldBegin("roleName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->roleName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32(this->createTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("ownerName", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->ownerName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Role &a, Role &b) {
using ::std::swap;
swap(a.roleName, b.roleName);
swap(a.createTime, b.createTime);
swap(a.ownerName, b.ownerName);
swap(a.__isset, b.__isset);
}
const char* RolePrincipalGrant::ascii_fingerprint = "899BA3F6214DD1B79D27206BA857C772";
const uint8_t RolePrincipalGrant::binary_fingerprint[16] = {0x89,0x9B,0xA3,0xF6,0x21,0x4D,0xD1,0xB7,0x9D,0x27,0x20,0x6B,0xA8,0x57,0xC7,0x72};
uint32_t RolePrincipalGrant::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->roleName);
this->__isset.roleName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->principalName);
this->__isset.principalName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast64;
xfer += iprot->readI32(ecast64);
this->principalType = (PrincipalType::type)ecast64;
this->__isset.principalType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->grantOption);
this->__isset.grantOption = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->grantTime);
this->__isset.grantTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->grantorName);
this->__isset.grantorName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast65;
xfer += iprot->readI32(ecast65);
this->grantorPrincipalType = (PrincipalType::type)ecast65;
this->__isset.grantorPrincipalType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t RolePrincipalGrant::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("RolePrincipalGrant");
xfer += oprot->writeFieldBegin("roleName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->roleName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("principalName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->principalName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("principalType", ::apache::thrift::protocol::T_I32, 3);
xfer += oprot->writeI32((int32_t)this->principalType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("grantOption", ::apache::thrift::protocol::T_BOOL, 4);
xfer += oprot->writeBool(this->grantOption);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("grantTime", ::apache::thrift::protocol::T_I32, 5);
xfer += oprot->writeI32(this->grantTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("grantorName", ::apache::thrift::protocol::T_STRING, 6);
xfer += oprot->writeString(this->grantorName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("grantorPrincipalType", ::apache::thrift::protocol::T_I32, 7);
xfer += oprot->writeI32((int32_t)this->grantorPrincipalType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(RolePrincipalGrant &a, RolePrincipalGrant &b) {
using ::std::swap;
swap(a.roleName, b.roleName);
swap(a.principalName, b.principalName);
swap(a.principalType, b.principalType);
swap(a.grantOption, b.grantOption);
swap(a.grantTime, b.grantTime);
swap(a.grantorName, b.grantorName);
swap(a.grantorPrincipalType, b.grantorPrincipalType);
swap(a.__isset, b.__isset);
}
const char* GetRoleGrantsForPrincipalRequest::ascii_fingerprint = "D6FD826D949221396F4FFC3ECCD3D192";
const uint8_t GetRoleGrantsForPrincipalRequest::binary_fingerprint[16] = {0xD6,0xFD,0x82,0x6D,0x94,0x92,0x21,0x39,0x6F,0x4F,0xFC,0x3E,0xCC,0xD3,0xD1,0x92};
uint32_t GetRoleGrantsForPrincipalRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_principal_name = false;
bool isset_principal_type = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->principal_name);
isset_principal_name = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast66;
xfer += iprot->readI32(ecast66);
this->principal_type = (PrincipalType::type)ecast66;
isset_principal_type = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_principal_name)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_principal_type)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t GetRoleGrantsForPrincipalRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GetRoleGrantsForPrincipalRequest");
xfer += oprot->writeFieldBegin("principal_name", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->principal_name);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("principal_type", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32((int32_t)this->principal_type);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GetRoleGrantsForPrincipalRequest &a, GetRoleGrantsForPrincipalRequest &b) {
using ::std::swap;
swap(a.principal_name, b.principal_name);
swap(a.principal_type, b.principal_type);
}
const char* GetRoleGrantsForPrincipalResponse::ascii_fingerprint = "5926B4B3541A62E17663820C7E3BE690";
const uint8_t GetRoleGrantsForPrincipalResponse::binary_fingerprint[16] = {0x59,0x26,0xB4,0xB3,0x54,0x1A,0x62,0xE1,0x76,0x63,0x82,0x0C,0x7E,0x3B,0xE6,0x90};
uint32_t GetRoleGrantsForPrincipalResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_principalGrants = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->principalGrants.clear();
uint32_t _size67;
::apache::thrift::protocol::TType _etype70;
xfer += iprot->readListBegin(_etype70, _size67);
this->principalGrants.resize(_size67);
uint32_t _i71;
for (_i71 = 0; _i71 < _size67; ++_i71)
{
xfer += this->principalGrants[_i71].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_principalGrants = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_principalGrants)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t GetRoleGrantsForPrincipalResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GetRoleGrantsForPrincipalResponse");
xfer += oprot->writeFieldBegin("principalGrants", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->principalGrants.size()));
std::vector<RolePrincipalGrant> ::const_iterator _iter72;
for (_iter72 = this->principalGrants.begin(); _iter72 != this->principalGrants.end(); ++_iter72)
{
xfer += (*_iter72).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GetRoleGrantsForPrincipalResponse &a, GetRoleGrantsForPrincipalResponse &b) {
using ::std::swap;
swap(a.principalGrants, b.principalGrants);
}
const char* GetPrincipalsInRoleRequest::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t GetPrincipalsInRoleRequest::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t GetPrincipalsInRoleRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_roleName = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->roleName);
isset_roleName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_roleName)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t GetPrincipalsInRoleRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GetPrincipalsInRoleRequest");
xfer += oprot->writeFieldBegin("roleName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->roleName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GetPrincipalsInRoleRequest &a, GetPrincipalsInRoleRequest &b) {
using ::std::swap;
swap(a.roleName, b.roleName);
}
const char* GetPrincipalsInRoleResponse::ascii_fingerprint = "5926B4B3541A62E17663820C7E3BE690";
const uint8_t GetPrincipalsInRoleResponse::binary_fingerprint[16] = {0x59,0x26,0xB4,0xB3,0x54,0x1A,0x62,0xE1,0x76,0x63,0x82,0x0C,0x7E,0x3B,0xE6,0x90};
uint32_t GetPrincipalsInRoleResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_principalGrants = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->principalGrants.clear();
uint32_t _size73;
::apache::thrift::protocol::TType _etype76;
xfer += iprot->readListBegin(_etype76, _size73);
this->principalGrants.resize(_size73);
uint32_t _i77;
for (_i77 = 0; _i77 < _size73; ++_i77)
{
xfer += this->principalGrants[_i77].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_principalGrants = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_principalGrants)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t GetPrincipalsInRoleResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GetPrincipalsInRoleResponse");
xfer += oprot->writeFieldBegin("principalGrants", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->principalGrants.size()));
std::vector<RolePrincipalGrant> ::const_iterator _iter78;
for (_iter78 = this->principalGrants.begin(); _iter78 != this->principalGrants.end(); ++_iter78)
{
xfer += (*_iter78).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GetPrincipalsInRoleResponse &a, GetPrincipalsInRoleResponse &b) {
using ::std::swap;
swap(a.principalGrants, b.principalGrants);
}
const char* GrantRevokeRoleRequest::ascii_fingerprint = "907DEA796F2BA7AF76DC2566E75FAEE7";
const uint8_t GrantRevokeRoleRequest::binary_fingerprint[16] = {0x90,0x7D,0xEA,0x79,0x6F,0x2B,0xA7,0xAF,0x76,0xDC,0x25,0x66,0xE7,0x5F,0xAE,0xE7};
uint32_t GrantRevokeRoleRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast79;
xfer += iprot->readI32(ecast79);
this->requestType = (GrantRevokeType::type)ecast79;
this->__isset.requestType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->roleName);
this->__isset.roleName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->principalName);
this->__isset.principalName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast80;
xfer += iprot->readI32(ecast80);
this->principalType = (PrincipalType::type)ecast80;
this->__isset.principalType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->grantor);
this->__isset.grantor = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast81;
xfer += iprot->readI32(ecast81);
this->grantorType = (PrincipalType::type)ecast81;
this->__isset.grantorType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->grantOption);
this->__isset.grantOption = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t GrantRevokeRoleRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GrantRevokeRoleRequest");
xfer += oprot->writeFieldBegin("requestType", ::apache::thrift::protocol::T_I32, 1);
xfer += oprot->writeI32((int32_t)this->requestType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("roleName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->roleName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("principalName", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->principalName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("principalType", ::apache::thrift::protocol::T_I32, 4);
xfer += oprot->writeI32((int32_t)this->principalType);
xfer += oprot->writeFieldEnd();
if (this->__isset.grantor) {
xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 5);
xfer += oprot->writeString(this->grantor);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.grantorType) {
xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 6);
xfer += oprot->writeI32((int32_t)this->grantorType);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.grantOption) {
xfer += oprot->writeFieldBegin("grantOption", ::apache::thrift::protocol::T_BOOL, 7);
xfer += oprot->writeBool(this->grantOption);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GrantRevokeRoleRequest &a, GrantRevokeRoleRequest &b) {
using ::std::swap;
swap(a.requestType, b.requestType);
swap(a.roleName, b.roleName);
swap(a.principalName, b.principalName);
swap(a.principalType, b.principalType);
swap(a.grantor, b.grantor);
swap(a.grantorType, b.grantorType);
swap(a.grantOption, b.grantOption);
swap(a.__isset, b.__isset);
}
const char* GrantRevokeRoleResponse::ascii_fingerprint = "BF054652DEF86253C2BEE7D947F167DD";
const uint8_t GrantRevokeRoleResponse::binary_fingerprint[16] = {0xBF,0x05,0x46,0x52,0xDE,0xF8,0x62,0x53,0xC2,0xBE,0xE7,0xD9,0x47,0xF1,0x67,0xDD};
uint32_t GrantRevokeRoleResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->success);
this->__isset.success = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t GrantRevokeRoleResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GrantRevokeRoleResponse");
if (this->__isset.success) {
xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 1);
xfer += oprot->writeBool(this->success);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GrantRevokeRoleResponse &a, GrantRevokeRoleResponse &b) {
using ::std::swap;
swap(a.success, b.success);
swap(a.__isset, b.__isset);
}
const char* Database::ascii_fingerprint = "553495CAE243A1C583D5C3DD990AED53";
const uint8_t Database::binary_fingerprint[16] = {0x55,0x34,0x95,0xCA,0xE2,0x43,0xA1,0xC5,0x83,0xD5,0xC3,0xDD,0x99,0x0A,0xED,0x53};
uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->name);
this->__isset.name = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->description);
this->__isset.description = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->locationUri);
this->__isset.locationUri = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
uint32_t _size82;
::apache::thrift::protocol::TType _ktype83;
::apache::thrift::protocol::TType _vtype84;
xfer += iprot->readMapBegin(_ktype83, _vtype84, _size82);
uint32_t _i86;
for (_i86 = 0; _i86 < _size82; ++_i86)
{
std::string _key87;
xfer += iprot->readString(_key87);
std::string& _val88 = this->parameters[_key87];
xfer += iprot->readString(_val88);
}
xfer += iprot->readMapEnd();
}
this->__isset.parameters = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->privileges.read(iprot);
this->__isset.privileges = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->ownerName);
this->__isset.ownerName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast89;
xfer += iprot->readI32(ecast89);
this->ownerType = (PrincipalType::type)ecast89;
this->__isset.ownerType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Database::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Database");
xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->name);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("description", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->description);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("locationUri", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->locationUri);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 4);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
std::map<std::string, std::string> ::const_iterator _iter90;
for (_iter90 = this->parameters.begin(); _iter90 != this->parameters.end(); ++_iter90)
{
xfer += oprot->writeString(_iter90->first);
xfer += oprot->writeString(_iter90->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
if (this->__isset.privileges) {
xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 5);
xfer += this->privileges.write(oprot);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.ownerName) {
xfer += oprot->writeFieldBegin("ownerName", ::apache::thrift::protocol::T_STRING, 6);
xfer += oprot->writeString(this->ownerName);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.ownerType) {
xfer += oprot->writeFieldBegin("ownerType", ::apache::thrift::protocol::T_I32, 7);
xfer += oprot->writeI32((int32_t)this->ownerType);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Database &a, Database &b) {
using ::std::swap;
swap(a.name, b.name);
swap(a.description, b.description);
swap(a.locationUri, b.locationUri);
swap(a.parameters, b.parameters);
swap(a.privileges, b.privileges);
swap(a.ownerName, b.ownerName);
swap(a.ownerType, b.ownerType);
swap(a.__isset, b.__isset);
}
const char* SerDeInfo::ascii_fingerprint = "B1021C32A35A2AEFCD2F57A5424159A7";
const uint8_t SerDeInfo::binary_fingerprint[16] = {0xB1,0x02,0x1C,0x32,0xA3,0x5A,0x2A,0xEF,0xCD,0x2F,0x57,0xA5,0x42,0x41,0x59,0xA7};
uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->name);
this->__isset.name = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->serializationLib);
this->__isset.serializationLib = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
uint32_t _size91;
::apache::thrift::protocol::TType _ktype92;
::apache::thrift::protocol::TType _vtype93;
xfer += iprot->readMapBegin(_ktype92, _vtype93, _size91);
uint32_t _i95;
for (_i95 = 0; _i95 < _size91; ++_i95)
{
std::string _key96;
xfer += iprot->readString(_key96);
std::string& _val97 = this->parameters[_key96];
xfer += iprot->readString(_val97);
}
xfer += iprot->readMapEnd();
}
this->__isset.parameters = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t SerDeInfo::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("SerDeInfo");
xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->name);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("serializationLib", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->serializationLib);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 3);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
std::map<std::string, std::string> ::const_iterator _iter98;
for (_iter98 = this->parameters.begin(); _iter98 != this->parameters.end(); ++_iter98)
{
xfer += oprot->writeString(_iter98->first);
xfer += oprot->writeString(_iter98->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(SerDeInfo &a, SerDeInfo &b) {
using ::std::swap;
swap(a.name, b.name);
swap(a.serializationLib, b.serializationLib);
swap(a.parameters, b.parameters);
swap(a.__isset, b.__isset);
}
const char* Order::ascii_fingerprint = "EEBC915CE44901401D881E6091423036";
const uint8_t Order::binary_fingerprint[16] = {0xEE,0xBC,0x91,0x5C,0xE4,0x49,0x01,0x40,0x1D,0x88,0x1E,0x60,0x91,0x42,0x30,0x36};
uint32_t Order::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->col);
this->__isset.col = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->order);
this->__isset.order = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Order::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Order");
xfer += oprot->writeFieldBegin("col", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->col);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("order", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32(this->order);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Order &a, Order &b) {
using ::std::swap;
swap(a.col, b.col);
swap(a.order, b.order);
swap(a.__isset, b.__isset);
}
const char* SkewedInfo::ascii_fingerprint = "4BF2ED84BC3C3EB297A2AE2FA8427EB1";
const uint8_t SkewedInfo::binary_fingerprint[16] = {0x4B,0xF2,0xED,0x84,0xBC,0x3C,0x3E,0xB2,0x97,0xA2,0xAE,0x2F,0xA8,0x42,0x7E,0xB1};
uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->skewedColNames.clear();
uint32_t _size99;
::apache::thrift::protocol::TType _etype102;
xfer += iprot->readListBegin(_etype102, _size99);
this->skewedColNames.resize(_size99);
uint32_t _i103;
for (_i103 = 0; _i103 < _size99; ++_i103)
{
xfer += iprot->readString(this->skewedColNames[_i103]);
}
xfer += iprot->readListEnd();
}
this->__isset.skewedColNames = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->skewedColValues.clear();
uint32_t _size104;
::apache::thrift::protocol::TType _etype107;
xfer += iprot->readListBegin(_etype107, _size104);
this->skewedColValues.resize(_size104);
uint32_t _i108;
for (_i108 = 0; _i108 < _size104; ++_i108)
{
{
this->skewedColValues[_i108].clear();
uint32_t _size109;
::apache::thrift::protocol::TType _etype112;
xfer += iprot->readListBegin(_etype112, _size109);
this->skewedColValues[_i108].resize(_size109);
uint32_t _i113;
for (_i113 = 0; _i113 < _size109; ++_i113)
{
xfer += iprot->readString(this->skewedColValues[_i108][_i113]);
}
xfer += iprot->readListEnd();
}
}
xfer += iprot->readListEnd();
}
this->__isset.skewedColValues = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->skewedColValueLocationMaps.clear();
uint32_t _size114;
::apache::thrift::protocol::TType _ktype115;
::apache::thrift::protocol::TType _vtype116;
xfer += iprot->readMapBegin(_ktype115, _vtype116, _size114);
uint32_t _i118;
for (_i118 = 0; _i118 < _size114; ++_i118)
{
std::vector<std::string> _key119;
{
_key119.clear();
uint32_t _size121;
::apache::thrift::protocol::TType _etype124;
xfer += iprot->readListBegin(_etype124, _size121);
_key119.resize(_size121);
uint32_t _i125;
for (_i125 = 0; _i125 < _size121; ++_i125)
{
xfer += iprot->readString(_key119[_i125]);
}
xfer += iprot->readListEnd();
}
std::string& _val120 = this->skewedColValueLocationMaps[_key119];
xfer += iprot->readString(_val120);
}
xfer += iprot->readMapEnd();
}
this->__isset.skewedColValueLocationMaps = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("SkewedInfo");
xfer += oprot->writeFieldBegin("skewedColNames", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->skewedColNames.size()));
std::vector<std::string> ::const_iterator _iter126;
for (_iter126 = this->skewedColNames.begin(); _iter126 != this->skewedColNames.end(); ++_iter126)
{
xfer += oprot->writeString((*_iter126));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("skewedColValues", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(this->skewedColValues.size()));
std::vector<std::vector<std::string> > ::const_iterator _iter127;
for (_iter127 = this->skewedColValues.begin(); _iter127 != this->skewedColValues.end(); ++_iter127)
{
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*_iter127).size()));
std::vector<std::string> ::const_iterator _iter128;
for (_iter128 = (*_iter127).begin(); _iter128 != (*_iter127).end(); ++_iter128)
{
xfer += oprot->writeString((*_iter128));
}
xfer += oprot->writeListEnd();
}
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("skewedColValueLocationMaps", ::apache::thrift::protocol::T_MAP, 3);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_LIST, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->skewedColValueLocationMaps.size()));
std::map<std::vector<std::string> , std::string> ::const_iterator _iter129;
for (_iter129 = this->skewedColValueLocationMaps.begin(); _iter129 != this->skewedColValueLocationMaps.end(); ++_iter129)
{
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(_iter129->first.size()));
std::vector<std::string> ::const_iterator _iter130;
for (_iter130 = _iter129->first.begin(); _iter130 != _iter129->first.end(); ++_iter130)
{
xfer += oprot->writeString((*_iter130));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeString(_iter129->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(SkewedInfo &a, SkewedInfo &b) {
using ::std::swap;
swap(a.skewedColNames, b.skewedColNames);
swap(a.skewedColValues, b.skewedColValues);
swap(a.skewedColValueLocationMaps, b.skewedColValueLocationMaps);
swap(a.__isset, b.__isset);
}
const char* StorageDescriptor::ascii_fingerprint = "CA8C9AA5FE4C32643757D8639CEF0CD7";
const uint8_t StorageDescriptor::binary_fingerprint[16] = {0xCA,0x8C,0x9A,0xA5,0xFE,0x4C,0x32,0x64,0x37,0x57,0xD8,0x63,0x9C,0xEF,0x0C,0xD7};
uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->cols.clear();
uint32_t _size131;
::apache::thrift::protocol::TType _etype134;
xfer += iprot->readListBegin(_etype134, _size131);
this->cols.resize(_size131);
uint32_t _i135;
for (_i135 = 0; _i135 < _size131; ++_i135)
{
xfer += this->cols[_i135].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.cols = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->location);
this->__isset.location = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->inputFormat);
this->__isset.inputFormat = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->outputFormat);
this->__isset.outputFormat = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->compressed);
this->__isset.compressed = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->numBuckets);
this->__isset.numBuckets = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->serdeInfo.read(iprot);
this->__isset.serdeInfo = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 8:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->bucketCols.clear();
uint32_t _size136;
::apache::thrift::protocol::TType _etype139;
xfer += iprot->readListBegin(_etype139, _size136);
this->bucketCols.resize(_size136);
uint32_t _i140;
for (_i140 = 0; _i140 < _size136; ++_i140)
{
xfer += iprot->readString(this->bucketCols[_i140]);
}
xfer += iprot->readListEnd();
}
this->__isset.bucketCols = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 9:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->sortCols.clear();
uint32_t _size141;
::apache::thrift::protocol::TType _etype144;
xfer += iprot->readListBegin(_etype144, _size141);
this->sortCols.resize(_size141);
uint32_t _i145;
for (_i145 = 0; _i145 < _size141; ++_i145)
{
xfer += this->sortCols[_i145].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.sortCols = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 10:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
uint32_t _size146;
::apache::thrift::protocol::TType _ktype147;
::apache::thrift::protocol::TType _vtype148;
xfer += iprot->readMapBegin(_ktype147, _vtype148, _size146);
uint32_t _i150;
for (_i150 = 0; _i150 < _size146; ++_i150)
{
std::string _key151;
xfer += iprot->readString(_key151);
std::string& _val152 = this->parameters[_key151];
xfer += iprot->readString(_val152);
}
xfer += iprot->readMapEnd();
}
this->__isset.parameters = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 11:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->skewedInfo.read(iprot);
this->__isset.skewedInfo = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 12:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->storedAsSubDirectories);
this->__isset.storedAsSubDirectories = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("StorageDescriptor");
xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->cols.size()));
std::vector<FieldSchema> ::const_iterator _iter153;
for (_iter153 = this->cols.begin(); _iter153 != this->cols.end(); ++_iter153)
{
xfer += (*_iter153).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("location", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->location);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("inputFormat", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->inputFormat);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("outputFormat", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->outputFormat);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("compressed", ::apache::thrift::protocol::T_BOOL, 5);
xfer += oprot->writeBool(this->compressed);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("numBuckets", ::apache::thrift::protocol::T_I32, 6);
xfer += oprot->writeI32(this->numBuckets);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("serdeInfo", ::apache::thrift::protocol::T_STRUCT, 7);
xfer += this->serdeInfo.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("bucketCols", ::apache::thrift::protocol::T_LIST, 8);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->bucketCols.size()));
std::vector<std::string> ::const_iterator _iter154;
for (_iter154 = this->bucketCols.begin(); _iter154 != this->bucketCols.end(); ++_iter154)
{
xfer += oprot->writeString((*_iter154));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("sortCols", ::apache::thrift::protocol::T_LIST, 9);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->sortCols.size()));
std::vector<Order> ::const_iterator _iter155;
for (_iter155 = this->sortCols.begin(); _iter155 != this->sortCols.end(); ++_iter155)
{
xfer += (*_iter155).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 10);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
std::map<std::string, std::string> ::const_iterator _iter156;
for (_iter156 = this->parameters.begin(); _iter156 != this->parameters.end(); ++_iter156)
{
xfer += oprot->writeString(_iter156->first);
xfer += oprot->writeString(_iter156->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
if (this->__isset.skewedInfo) {
xfer += oprot->writeFieldBegin("skewedInfo", ::apache::thrift::protocol::T_STRUCT, 11);
xfer += this->skewedInfo.write(oprot);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.storedAsSubDirectories) {
xfer += oprot->writeFieldBegin("storedAsSubDirectories", ::apache::thrift::protocol::T_BOOL, 12);
xfer += oprot->writeBool(this->storedAsSubDirectories);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(StorageDescriptor &a, StorageDescriptor &b) {
using ::std::swap;
swap(a.cols, b.cols);
swap(a.location, b.location);
swap(a.inputFormat, b.inputFormat);
swap(a.outputFormat, b.outputFormat);
swap(a.compressed, b.compressed);
swap(a.numBuckets, b.numBuckets);
swap(a.serdeInfo, b.serdeInfo);
swap(a.bucketCols, b.bucketCols);
swap(a.sortCols, b.sortCols);
swap(a.parameters, b.parameters);
swap(a.skewedInfo, b.skewedInfo);
swap(a.storedAsSubDirectories, b.storedAsSubDirectories);
swap(a.__isset, b.__isset);
}
const char* Table::ascii_fingerprint = "29EFB2A5970EF572039E5D94CC78AA85";
const uint8_t Table::binary_fingerprint[16] = {0x29,0xEF,0xB2,0xA5,0x97,0x0E,0xF5,0x72,0x03,0x9E,0x5D,0x94,0xCC,0x78,0xAA,0x85};
uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tableName);
this->__isset.tableName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
this->__isset.dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->owner);
this->__isset.owner = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->createTime);
this->__isset.createTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->lastAccessTime);
this->__isset.lastAccessTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->retention);
this->__isset.retention = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->sd.read(iprot);
this->__isset.sd = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 8:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitionKeys.clear();
uint32_t _size157;
::apache::thrift::protocol::TType _etype160;
xfer += iprot->readListBegin(_etype160, _size157);
this->partitionKeys.resize(_size157);
uint32_t _i161;
for (_i161 = 0; _i161 < _size157; ++_i161)
{
xfer += this->partitionKeys[_i161].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.partitionKeys = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 9:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
uint32_t _size162;
::apache::thrift::protocol::TType _ktype163;
::apache::thrift::protocol::TType _vtype164;
xfer += iprot->readMapBegin(_ktype163, _vtype164, _size162);
uint32_t _i166;
for (_i166 = 0; _i166 < _size162; ++_i166)
{
std::string _key167;
xfer += iprot->readString(_key167);
std::string& _val168 = this->parameters[_key167];
xfer += iprot->readString(_val168);
}
xfer += iprot->readMapEnd();
}
this->__isset.parameters = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 10:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->viewOriginalText);
this->__isset.viewOriginalText = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 11:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->viewExpandedText);
this->__isset.viewExpandedText = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 12:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tableType);
this->__isset.tableType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 13:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->privileges.read(iprot);
this->__isset.privileges = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 14:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->temporary);
this->__isset.temporary = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Table");
xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->tableName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("owner", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->owner);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 4);
xfer += oprot->writeI32(this->createTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("lastAccessTime", ::apache::thrift::protocol::T_I32, 5);
xfer += oprot->writeI32(this->lastAccessTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("retention", ::apache::thrift::protocol::T_I32, 6);
xfer += oprot->writeI32(this->retention);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("sd", ::apache::thrift::protocol::T_STRUCT, 7);
xfer += this->sd.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitionKeys.size()));
std::vector<FieldSchema> ::const_iterator _iter169;
for (_iter169 = this->partitionKeys.begin(); _iter169 != this->partitionKeys.end(); ++_iter169)
{
xfer += (*_iter169).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
std::map<std::string, std::string> ::const_iterator _iter170;
for (_iter170 = this->parameters.begin(); _iter170 != this->parameters.end(); ++_iter170)
{
xfer += oprot->writeString(_iter170->first);
xfer += oprot->writeString(_iter170->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("viewOriginalText", ::apache::thrift::protocol::T_STRING, 10);
xfer += oprot->writeString(this->viewOriginalText);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("viewExpandedText", ::apache::thrift::protocol::T_STRING, 11);
xfer += oprot->writeString(this->viewExpandedText);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 12);
xfer += oprot->writeString(this->tableType);
xfer += oprot->writeFieldEnd();
if (this->__isset.privileges) {
xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 13);
xfer += this->privileges.write(oprot);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.temporary) {
xfer += oprot->writeFieldBegin("temporary", ::apache::thrift::protocol::T_BOOL, 14);
xfer += oprot->writeBool(this->temporary);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Table &a, Table &b) {
using ::std::swap;
swap(a.tableName, b.tableName);
swap(a.dbName, b.dbName);
swap(a.owner, b.owner);
swap(a.createTime, b.createTime);
swap(a.lastAccessTime, b.lastAccessTime);
swap(a.retention, b.retention);
swap(a.sd, b.sd);
swap(a.partitionKeys, b.partitionKeys);
swap(a.parameters, b.parameters);
swap(a.viewOriginalText, b.viewOriginalText);
swap(a.viewExpandedText, b.viewExpandedText);
swap(a.tableType, b.tableType);
swap(a.privileges, b.privileges);
swap(a.temporary, b.temporary);
swap(a.__isset, b.__isset);
}
const char* Partition::ascii_fingerprint = "31A52241B88A426C34087FE38343FF51";
const uint8_t Partition::binary_fingerprint[16] = {0x31,0xA5,0x22,0x41,0xB8,0x8A,0x42,0x6C,0x34,0x08,0x7F,0xE3,0x83,0x43,0xFF,0x51};
uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->values.clear();
uint32_t _size171;
::apache::thrift::protocol::TType _etype174;
xfer += iprot->readListBegin(_etype174, _size171);
this->values.resize(_size171);
uint32_t _i175;
for (_i175 = 0; _i175 < _size171; ++_i175)
{
xfer += iprot->readString(this->values[_i175]);
}
xfer += iprot->readListEnd();
}
this->__isset.values = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
this->__isset.dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tableName);
this->__isset.tableName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->createTime);
this->__isset.createTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->lastAccessTime);
this->__isset.lastAccessTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->sd.read(iprot);
this->__isset.sd = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
uint32_t _size176;
::apache::thrift::protocol::TType _ktype177;
::apache::thrift::protocol::TType _vtype178;
xfer += iprot->readMapBegin(_ktype177, _vtype178, _size176);
uint32_t _i180;
for (_i180 = 0; _i180 < _size176; ++_i180)
{
std::string _key181;
xfer += iprot->readString(_key181);
std::string& _val182 = this->parameters[_key181];
xfer += iprot->readString(_val182);
}
xfer += iprot->readMapEnd();
}
this->__isset.parameters = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 8:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->privileges.read(iprot);
this->__isset.privileges = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Partition");
xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
std::vector<std::string> ::const_iterator _iter183;
for (_iter183 = this->values.begin(); _iter183 != this->values.end(); ++_iter183)
{
xfer += oprot->writeString((*_iter183));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->tableName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 4);
xfer += oprot->writeI32(this->createTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("lastAccessTime", ::apache::thrift::protocol::T_I32, 5);
xfer += oprot->writeI32(this->lastAccessTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("sd", ::apache::thrift::protocol::T_STRUCT, 6);
xfer += this->sd.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
std::map<std::string, std::string> ::const_iterator _iter184;
for (_iter184 = this->parameters.begin(); _iter184 != this->parameters.end(); ++_iter184)
{
xfer += oprot->writeString(_iter184->first);
xfer += oprot->writeString(_iter184->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
if (this->__isset.privileges) {
xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 8);
xfer += this->privileges.write(oprot);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Partition &a, Partition &b) {
using ::std::swap;
swap(a.values, b.values);
swap(a.dbName, b.dbName);
swap(a.tableName, b.tableName);
swap(a.createTime, b.createTime);
swap(a.lastAccessTime, b.lastAccessTime);
swap(a.sd, b.sd);
swap(a.parameters, b.parameters);
swap(a.privileges, b.privileges);
swap(a.__isset, b.__isset);
}
const char* PartitionWithoutSD::ascii_fingerprint = "D79FA44499888D0E50B5625E0C536DEA";
const uint8_t PartitionWithoutSD::binary_fingerprint[16] = {0xD7,0x9F,0xA4,0x44,0x99,0x88,0x8D,0x0E,0x50,0xB5,0x62,0x5E,0x0C,0x53,0x6D,0xEA};
uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->values.clear();
uint32_t _size185;
::apache::thrift::protocol::TType _etype188;
xfer += iprot->readListBegin(_etype188, _size185);
this->values.resize(_size185);
uint32_t _i189;
for (_i189 = 0; _i189 < _size185; ++_i189)
{
xfer += iprot->readString(this->values[_i189]);
}
xfer += iprot->readListEnd();
}
this->__isset.values = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->createTime);
this->__isset.createTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->lastAccessTime);
this->__isset.lastAccessTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->relativePath);
this->__isset.relativePath = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
uint32_t _size190;
::apache::thrift::protocol::TType _ktype191;
::apache::thrift::protocol::TType _vtype192;
xfer += iprot->readMapBegin(_ktype191, _vtype192, _size190);
uint32_t _i194;
for (_i194 = 0; _i194 < _size190; ++_i194)
{
std::string _key195;
xfer += iprot->readString(_key195);
std::string& _val196 = this->parameters[_key195];
xfer += iprot->readString(_val196);
}
xfer += iprot->readMapEnd();
}
this->__isset.parameters = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->privileges.read(iprot);
this->__isset.privileges = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PartitionWithoutSD");
xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->values.size()));
std::vector<std::string> ::const_iterator _iter197;
for (_iter197 = this->values.begin(); _iter197 != this->values.end(); ++_iter197)
{
xfer += oprot->writeString((*_iter197));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32(this->createTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("lastAccessTime", ::apache::thrift::protocol::T_I32, 3);
xfer += oprot->writeI32(this->lastAccessTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("relativePath", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->relativePath);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
std::map<std::string, std::string> ::const_iterator _iter198;
for (_iter198 = this->parameters.begin(); _iter198 != this->parameters.end(); ++_iter198)
{
xfer += oprot->writeString(_iter198->first);
xfer += oprot->writeString(_iter198->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
if (this->__isset.privileges) {
xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_STRUCT, 6);
xfer += this->privileges.write(oprot);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) {
using ::std::swap;
swap(a.values, b.values);
swap(a.createTime, b.createTime);
swap(a.lastAccessTime, b.lastAccessTime);
swap(a.relativePath, b.relativePath);
swap(a.parameters, b.parameters);
swap(a.privileges, b.privileges);
swap(a.__isset, b.__isset);
}
const char* PartitionSpecWithSharedSD::ascii_fingerprint = "7BEE9305B42DCD083FF06BEE6DDC61CF";
const uint8_t PartitionSpecWithSharedSD::binary_fingerprint[16] = {0x7B,0xEE,0x93,0x05,0xB4,0x2D,0xCD,0x08,0x3F,0xF0,0x6B,0xEE,0x6D,0xDC,0x61,0xCF};
uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
uint32_t _size199;
::apache::thrift::protocol::TType _etype202;
xfer += iprot->readListBegin(_etype202, _size199);
this->partitions.resize(_size199);
uint32_t _i203;
for (_i203 = 0; _i203 < _size199; ++_i203)
{
xfer += this->partitions[_i203].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.partitions = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->sd.read(iprot);
this->__isset.sd = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PartitionSpecWithSharedSD");
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
std::vector<PartitionWithoutSD> ::const_iterator _iter204;
for (_iter204 = this->partitions.begin(); _iter204 != this->partitions.end(); ++_iter204)
{
xfer += (*_iter204).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("sd", ::apache::thrift::protocol::T_STRUCT, 2);
xfer += this->sd.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) {
using ::std::swap;
swap(a.partitions, b.partitions);
swap(a.sd, b.sd);
swap(a.__isset, b.__isset);
}
const char* PartitionListComposingSpec::ascii_fingerprint = "A048235CB9A257C8A74E3691BEFE0674";
const uint8_t PartitionListComposingSpec::binary_fingerprint[16] = {0xA0,0x48,0x23,0x5C,0xB9,0xA2,0x57,0xC8,0xA7,0x4E,0x36,0x91,0xBE,0xFE,0x06,0x74};
uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
uint32_t _size205;
::apache::thrift::protocol::TType _etype208;
xfer += iprot->readListBegin(_etype208, _size205);
this->partitions.resize(_size205);
uint32_t _i209;
for (_i209 = 0; _i209 < _size205; ++_i209)
{
xfer += this->partitions[_i209].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.partitions = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PartitionListComposingSpec");
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
std::vector<Partition> ::const_iterator _iter210;
for (_iter210 = this->partitions.begin(); _iter210 != this->partitions.end(); ++_iter210)
{
xfer += (*_iter210).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) {
using ::std::swap;
swap(a.partitions, b.partitions);
swap(a.__isset, b.__isset);
}
const char* PartitionSpec::ascii_fingerprint = "C3F548C24D072CF6422F25096143E3E8";
const uint8_t PartitionSpec::binary_fingerprint[16] = {0xC3,0xF5,0x48,0xC2,0x4D,0x07,0x2C,0xF6,0x42,0x2F,0x25,0x09,0x61,0x43,0xE3,0xE8};
uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
this->__isset.dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tableName);
this->__isset.tableName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->rootPath);
this->__isset.rootPath = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->sharedSDPartitionSpec.read(iprot);
this->__isset.sharedSDPartitionSpec = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->partitionList.read(iprot);
this->__isset.partitionList = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t PartitionSpec::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PartitionSpec");
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->tableName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("rootPath", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->rootPath);
xfer += oprot->writeFieldEnd();
if (this->__isset.sharedSDPartitionSpec) {
xfer += oprot->writeFieldBegin("sharedSDPartitionSpec", ::apache::thrift::protocol::T_STRUCT, 4);
xfer += this->sharedSDPartitionSpec.write(oprot);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.partitionList) {
xfer += oprot->writeFieldBegin("partitionList", ::apache::thrift::protocol::T_STRUCT, 5);
xfer += this->partitionList.write(oprot);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PartitionSpec &a, PartitionSpec &b) {
using ::std::swap;
swap(a.dbName, b.dbName);
swap(a.tableName, b.tableName);
swap(a.rootPath, b.rootPath);
swap(a.sharedSDPartitionSpec, b.sharedSDPartitionSpec);
swap(a.partitionList, b.partitionList);
swap(a.__isset, b.__isset);
}
const char* Index::ascii_fingerprint = "09EEF655216AC81802850988D6C470A6";
const uint8_t Index::binary_fingerprint[16] = {0x09,0xEE,0xF6,0x55,0x21,0x6A,0xC8,0x18,0x02,0x85,0x09,0x88,0xD6,0xC4,0x70,0xA6};
uint32_t Index::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->indexName);
this->__isset.indexName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->indexHandlerClass);
this->__isset.indexHandlerClass = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
this->__isset.dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->origTableName);
this->__isset.origTableName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->createTime);
this->__isset.createTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->lastAccessTime);
this->__isset.lastAccessTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->indexTableName);
this->__isset.indexTableName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 8:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->sd.read(iprot);
this->__isset.sd = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 9:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->parameters.clear();
uint32_t _size211;
::apache::thrift::protocol::TType _ktype212;
::apache::thrift::protocol::TType _vtype213;
xfer += iprot->readMapBegin(_ktype212, _vtype213, _size211);
uint32_t _i215;
for (_i215 = 0; _i215 < _size211; ++_i215)
{
std::string _key216;
xfer += iprot->readString(_key216);
std::string& _val217 = this->parameters[_key216];
xfer += iprot->readString(_val217);
}
xfer += iprot->readMapEnd();
}
this->__isset.parameters = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 10:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->deferredRebuild);
this->__isset.deferredRebuild = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Index::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Index");
xfer += oprot->writeFieldBegin("indexName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->indexName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("indexHandlerClass", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->indexHandlerClass);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("origTableName", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->origTableName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 5);
xfer += oprot->writeI32(this->createTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("lastAccessTime", ::apache::thrift::protocol::T_I32, 6);
xfer += oprot->writeI32(this->lastAccessTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("indexTableName", ::apache::thrift::protocol::T_STRING, 7);
xfer += oprot->writeString(this->indexTableName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("sd", ::apache::thrift::protocol::T_STRUCT, 8);
xfer += this->sd.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->parameters.size()));
std::map<std::string, std::string> ::const_iterator _iter218;
for (_iter218 = this->parameters.begin(); _iter218 != this->parameters.end(); ++_iter218)
{
xfer += oprot->writeString(_iter218->first);
xfer += oprot->writeString(_iter218->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("deferredRebuild", ::apache::thrift::protocol::T_BOOL, 10);
xfer += oprot->writeBool(this->deferredRebuild);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Index &a, Index &b) {
using ::std::swap;
swap(a.indexName, b.indexName);
swap(a.indexHandlerClass, b.indexHandlerClass);
swap(a.dbName, b.dbName);
swap(a.origTableName, b.origTableName);
swap(a.createTime, b.createTime);
swap(a.lastAccessTime, b.lastAccessTime);
swap(a.indexTableName, b.indexTableName);
swap(a.sd, b.sd);
swap(a.parameters, b.parameters);
swap(a.deferredRebuild, b.deferredRebuild);
swap(a.__isset, b.__isset);
}
const char* BooleanColumnStatsData::ascii_fingerprint = "EA2D65F1E0BB78760205682082304B41";
const uint8_t BooleanColumnStatsData::binary_fingerprint[16] = {0xEA,0x2D,0x65,0xF1,0xE0,0xBB,0x78,0x76,0x02,0x05,0x68,0x20,0x82,0x30,0x4B,0x41};
uint32_t BooleanColumnStatsData::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_numTrues = false;
bool isset_numFalses = false;
bool isset_numNulls = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numTrues);
isset_numTrues = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numFalses);
isset_numFalses = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numNulls);
isset_numNulls = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_numTrues)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_numFalses)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_numNulls)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t BooleanColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("BooleanColumnStatsData");
xfer += oprot->writeFieldBegin("numTrues", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->numTrues);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("numFalses", ::apache::thrift::protocol::T_I64, 2);
xfer += oprot->writeI64(this->numFalses);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3);
xfer += oprot->writeI64(this->numNulls);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) {
using ::std::swap;
swap(a.numTrues, b.numTrues);
swap(a.numFalses, b.numFalses);
swap(a.numNulls, b.numNulls);
}
const char* DoubleColumnStatsData::ascii_fingerprint = "DA7C011321D74C48396AA002E61A0CBB";
const uint8_t DoubleColumnStatsData::binary_fingerprint[16] = {0xDA,0x7C,0x01,0x13,0x21,0xD7,0x4C,0x48,0x39,0x6A,0xA0,0x02,0xE6,0x1A,0x0C,0xBB};
uint32_t DoubleColumnStatsData::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_numNulls = false;
bool isset_numDVs = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
xfer += iprot->readDouble(this->lowValue);
this->__isset.lowValue = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
xfer += iprot->readDouble(this->highValue);
this->__isset.highValue = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numNulls);
isset_numNulls = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numDVs);
isset_numDVs = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_numNulls)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_numDVs)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t DoubleColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("DoubleColumnStatsData");
if (this->__isset.lowValue) {
xfer += oprot->writeFieldBegin("lowValue", ::apache::thrift::protocol::T_DOUBLE, 1);
xfer += oprot->writeDouble(this->lowValue);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.highValue) {
xfer += oprot->writeFieldBegin("highValue", ::apache::thrift::protocol::T_DOUBLE, 2);
xfer += oprot->writeDouble(this->highValue);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3);
xfer += oprot->writeI64(this->numNulls);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("numDVs", ::apache::thrift::protocol::T_I64, 4);
xfer += oprot->writeI64(this->numDVs);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) {
using ::std::swap;
swap(a.lowValue, b.lowValue);
swap(a.highValue, b.highValue);
swap(a.numNulls, b.numNulls);
swap(a.numDVs, b.numDVs);
swap(a.__isset, b.__isset);
}
const char* LongColumnStatsData::ascii_fingerprint = "E685FC220B24E3B8B93604790DCB9AEA";
const uint8_t LongColumnStatsData::binary_fingerprint[16] = {0xE6,0x85,0xFC,0x22,0x0B,0x24,0xE3,0xB8,0xB9,0x36,0x04,0x79,0x0D,0xCB,0x9A,0xEA};
uint32_t LongColumnStatsData::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_numNulls = false;
bool isset_numDVs = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->lowValue);
this->__isset.lowValue = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->highValue);
this->__isset.highValue = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numNulls);
isset_numNulls = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numDVs);
isset_numDVs = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_numNulls)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_numDVs)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t LongColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("LongColumnStatsData");
if (this->__isset.lowValue) {
xfer += oprot->writeFieldBegin("lowValue", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->lowValue);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.highValue) {
xfer += oprot->writeFieldBegin("highValue", ::apache::thrift::protocol::T_I64, 2);
xfer += oprot->writeI64(this->highValue);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3);
xfer += oprot->writeI64(this->numNulls);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("numDVs", ::apache::thrift::protocol::T_I64, 4);
xfer += oprot->writeI64(this->numDVs);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(LongColumnStatsData &a, LongColumnStatsData &b) {
using ::std::swap;
swap(a.lowValue, b.lowValue);
swap(a.highValue, b.highValue);
swap(a.numNulls, b.numNulls);
swap(a.numDVs, b.numDVs);
swap(a.__isset, b.__isset);
}
const char* StringColumnStatsData::ascii_fingerprint = "D017B08C3DF12C3AB98788B2E67DAAB3";
const uint8_t StringColumnStatsData::binary_fingerprint[16] = {0xD0,0x17,0xB0,0x8C,0x3D,0xF1,0x2C,0x3A,0xB9,0x87,0x88,0xB2,0xE6,0x7D,0xAA,0xB3};
uint32_t StringColumnStatsData::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_maxColLen = false;
bool isset_avgColLen = false;
bool isset_numNulls = false;
bool isset_numDVs = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->maxColLen);
isset_maxColLen = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
xfer += iprot->readDouble(this->avgColLen);
isset_avgColLen = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numNulls);
isset_numNulls = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numDVs);
isset_numDVs = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_maxColLen)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_avgColLen)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_numNulls)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_numDVs)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t StringColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("StringColumnStatsData");
xfer += oprot->writeFieldBegin("maxColLen", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->maxColLen);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("avgColLen", ::apache::thrift::protocol::T_DOUBLE, 2);
xfer += oprot->writeDouble(this->avgColLen);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3);
xfer += oprot->writeI64(this->numNulls);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("numDVs", ::apache::thrift::protocol::T_I64, 4);
xfer += oprot->writeI64(this->numDVs);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(StringColumnStatsData &a, StringColumnStatsData &b) {
using ::std::swap;
swap(a.maxColLen, b.maxColLen);
swap(a.avgColLen, b.avgColLen);
swap(a.numNulls, b.numNulls);
swap(a.numDVs, b.numDVs);
}
const char* BinaryColumnStatsData::ascii_fingerprint = "22B0CB67183FCDB945892B9974518D06";
const uint8_t BinaryColumnStatsData::binary_fingerprint[16] = {0x22,0xB0,0xCB,0x67,0x18,0x3F,0xCD,0xB9,0x45,0x89,0x2B,0x99,0x74,0x51,0x8D,0x06};
uint32_t BinaryColumnStatsData::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_maxColLen = false;
bool isset_avgColLen = false;
bool isset_numNulls = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->maxColLen);
isset_maxColLen = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_DOUBLE) {
xfer += iprot->readDouble(this->avgColLen);
isset_avgColLen = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numNulls);
isset_numNulls = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_maxColLen)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_avgColLen)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_numNulls)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t BinaryColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("BinaryColumnStatsData");
xfer += oprot->writeFieldBegin("maxColLen", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->maxColLen);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("avgColLen", ::apache::thrift::protocol::T_DOUBLE, 2);
xfer += oprot->writeDouble(this->avgColLen);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3);
xfer += oprot->writeI64(this->numNulls);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) {
using ::std::swap;
swap(a.maxColLen, b.maxColLen);
swap(a.avgColLen, b.avgColLen);
swap(a.numNulls, b.numNulls);
}
const char* Decimal::ascii_fingerprint = "C4DDF6759F9B17C5C380806CE743DE8E";
const uint8_t Decimal::binary_fingerprint[16] = {0xC4,0xDD,0xF6,0x75,0x9F,0x9B,0x17,0xC5,0xC3,0x80,0x80,0x6C,0xE7,0x43,0xDE,0x8E};
uint32_t Decimal::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_unscaled = false;
bool isset_scale = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readBinary(this->unscaled);
isset_unscaled = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I16) {
xfer += iprot->readI16(this->scale);
isset_scale = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_unscaled)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_scale)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t Decimal::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Decimal");
xfer += oprot->writeFieldBegin("unscaled", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeBinary(this->unscaled);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("scale", ::apache::thrift::protocol::T_I16, 3);
xfer += oprot->writeI16(this->scale);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Decimal &a, Decimal &b) {
using ::std::swap;
swap(a.unscaled, b.unscaled);
swap(a.scale, b.scale);
}
const char* DecimalColumnStatsData::ascii_fingerprint = "B6D47E7A28922BFA93FE05E9F1B04748";
const uint8_t DecimalColumnStatsData::binary_fingerprint[16] = {0xB6,0xD4,0x7E,0x7A,0x28,0x92,0x2B,0xFA,0x93,0xFE,0x05,0xE9,0xF1,0xB0,0x47,0x48};
uint32_t DecimalColumnStatsData::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_numNulls = false;
bool isset_numDVs = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->lowValue.read(iprot);
this->__isset.lowValue = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->highValue.read(iprot);
this->__isset.highValue = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numNulls);
isset_numNulls = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->numDVs);
isset_numDVs = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_numNulls)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_numDVs)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t DecimalColumnStatsData::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("DecimalColumnStatsData");
if (this->__isset.lowValue) {
xfer += oprot->writeFieldBegin("lowValue", ::apache::thrift::protocol::T_STRUCT, 1);
xfer += this->lowValue.write(oprot);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.highValue) {
xfer += oprot->writeFieldBegin("highValue", ::apache::thrift::protocol::T_STRUCT, 2);
xfer += this->highValue.write(oprot);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("numNulls", ::apache::thrift::protocol::T_I64, 3);
xfer += oprot->writeI64(this->numNulls);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("numDVs", ::apache::thrift::protocol::T_I64, 4);
xfer += oprot->writeI64(this->numDVs);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) {
using ::std::swap;
swap(a.lowValue, b.lowValue);
swap(a.highValue, b.highValue);
swap(a.numNulls, b.numNulls);
swap(a.numDVs, b.numDVs);
swap(a.__isset, b.__isset);
}
const char* ColumnStatisticsData::ascii_fingerprint = "D079ACEA6EE0998D0A45CB65FF1EAADD";
const uint8_t ColumnStatisticsData::binary_fingerprint[16] = {0xD0,0x79,0xAC,0xEA,0x6E,0xE0,0x99,0x8D,0x0A,0x45,0xCB,0x65,0xFF,0x1E,0xAA,0xDD};
uint32_t ColumnStatisticsData::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->booleanStats.read(iprot);
this->__isset.booleanStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->longStats.read(iprot);
this->__isset.longStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->doubleStats.read(iprot);
this->__isset.doubleStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->stringStats.read(iprot);
this->__isset.stringStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->binaryStats.read(iprot);
this->__isset.binaryStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->decimalStats.read(iprot);
this->__isset.decimalStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t ColumnStatisticsData::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ColumnStatisticsData");
xfer += oprot->writeFieldBegin("booleanStats", ::apache::thrift::protocol::T_STRUCT, 1);
xfer += this->booleanStats.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("longStats", ::apache::thrift::protocol::T_STRUCT, 2);
xfer += this->longStats.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("doubleStats", ::apache::thrift::protocol::T_STRUCT, 3);
xfer += this->doubleStats.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("stringStats", ::apache::thrift::protocol::T_STRUCT, 4);
xfer += this->stringStats.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("binaryStats", ::apache::thrift::protocol::T_STRUCT, 5);
xfer += this->binaryStats.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("decimalStats", ::apache::thrift::protocol::T_STRUCT, 6);
xfer += this->decimalStats.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) {
using ::std::swap;
swap(a.booleanStats, b.booleanStats);
swap(a.longStats, b.longStats);
swap(a.doubleStats, b.doubleStats);
swap(a.stringStats, b.stringStats);
swap(a.binaryStats, b.binaryStats);
swap(a.decimalStats, b.decimalStats);
swap(a.__isset, b.__isset);
}
const char* ColumnStatisticsObj::ascii_fingerprint = "E49E62CFC71682004614EFEDAC3CD3F4";
const uint8_t ColumnStatisticsObj::binary_fingerprint[16] = {0xE4,0x9E,0x62,0xCF,0xC7,0x16,0x82,0x00,0x46,0x14,0xEF,0xED,0xAC,0x3C,0xD3,0xF4};
uint32_t ColumnStatisticsObj::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_colName = false;
bool isset_colType = false;
bool isset_statsData = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->colName);
isset_colName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->colType);
isset_colType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->statsData.read(iprot);
isset_statsData = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_colName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_colType)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_statsData)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t ColumnStatisticsObj::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ColumnStatisticsObj");
xfer += oprot->writeFieldBegin("colName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->colName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("colType", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->colType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("statsData", ::apache::thrift::protocol::T_STRUCT, 3);
xfer += this->statsData.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) {
using ::std::swap;
swap(a.colName, b.colName);
swap(a.colType, b.colType);
swap(a.statsData, b.statsData);
}
const char* ColumnStatisticsDesc::ascii_fingerprint = "261759FF6F8FAB53F941453007FE18CB";
const uint8_t ColumnStatisticsDesc::binary_fingerprint[16] = {0x26,0x17,0x59,0xFF,0x6F,0x8F,0xAB,0x53,0xF9,0x41,0x45,0x30,0x07,0xFE,0x18,0xCB};
uint32_t ColumnStatisticsDesc::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_isTblLevel = false;
bool isset_dbName = false;
bool isset_tableName = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->isTblLevel);
isset_isTblLevel = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
isset_dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tableName);
isset_tableName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->partName);
this->__isset.partName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->lastAnalyzed);
this->__isset.lastAnalyzed = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_isTblLevel)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_dbName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tableName)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t ColumnStatisticsDesc::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ColumnStatisticsDesc");
xfer += oprot->writeFieldBegin("isTblLevel", ::apache::thrift::protocol::T_BOOL, 1);
xfer += oprot->writeBool(this->isTblLevel);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->tableName);
xfer += oprot->writeFieldEnd();
if (this->__isset.partName) {
xfer += oprot->writeFieldBegin("partName", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->partName);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.lastAnalyzed) {
xfer += oprot->writeFieldBegin("lastAnalyzed", ::apache::thrift::protocol::T_I64, 5);
xfer += oprot->writeI64(this->lastAnalyzed);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) {
using ::std::swap;
swap(a.isTblLevel, b.isTblLevel);
swap(a.dbName, b.dbName);
swap(a.tableName, b.tableName);
swap(a.partName, b.partName);
swap(a.lastAnalyzed, b.lastAnalyzed);
swap(a.__isset, b.__isset);
}
const char* ColumnStatistics::ascii_fingerprint = "6682E234199B2CD3807B1ED420C6A7F8";
const uint8_t ColumnStatistics::binary_fingerprint[16] = {0x66,0x82,0xE2,0x34,0x19,0x9B,0x2C,0xD3,0x80,0x7B,0x1E,0xD4,0x20,0xC6,0xA7,0xF8};
uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_statsDesc = false;
bool isset_statsObj = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->statsDesc.read(iprot);
isset_statsDesc = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->statsObj.clear();
uint32_t _size219;
::apache::thrift::protocol::TType _etype222;
xfer += iprot->readListBegin(_etype222, _size219);
this->statsObj.resize(_size219);
uint32_t _i223;
for (_i223 = 0; _i223 < _size219; ++_i223)
{
xfer += this->statsObj[_i223].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_statsObj = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_statsDesc)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_statsObj)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ColumnStatistics");
xfer += oprot->writeFieldBegin("statsDesc", ::apache::thrift::protocol::T_STRUCT, 1);
xfer += this->statsDesc.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->statsObj.size()));
std::vector<ColumnStatisticsObj> ::const_iterator _iter224;
for (_iter224 = this->statsObj.begin(); _iter224 != this->statsObj.end(); ++_iter224)
{
xfer += (*_iter224).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ColumnStatistics &a, ColumnStatistics &b) {
using ::std::swap;
swap(a.statsDesc, b.statsDesc);
swap(a.statsObj, b.statsObj);
}
const char* AggrStats::ascii_fingerprint = "399BDBAF7503E0BFB5E1D99C83D790CD";
const uint8_t AggrStats::binary_fingerprint[16] = {0x39,0x9B,0xDB,0xAF,0x75,0x03,0xE0,0xBF,0xB5,0xE1,0xD9,0x9C,0x83,0xD7,0x90,0xCD};
uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_colStats = false;
bool isset_partsFound = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colStats.clear();
uint32_t _size225;
::apache::thrift::protocol::TType _etype228;
xfer += iprot->readListBegin(_etype228, _size225);
this->colStats.resize(_size225);
uint32_t _i229;
for (_i229 = 0; _i229 < _size225; ++_i229)
{
xfer += this->colStats[_i229].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_colStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->partsFound);
isset_partsFound = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_colStats)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_partsFound)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("AggrStats");
xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
std::vector<ColumnStatisticsObj> ::const_iterator _iter230;
for (_iter230 = this->colStats.begin(); _iter230 != this->colStats.end(); ++_iter230)
{
xfer += (*_iter230).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("partsFound", ::apache::thrift::protocol::T_I64, 2);
xfer += oprot->writeI64(this->partsFound);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(AggrStats &a, AggrStats &b) {
using ::std::swap;
swap(a.colStats, b.colStats);
swap(a.partsFound, b.partsFound);
}
const char* SetPartitionsStatsRequest::ascii_fingerprint = "635C0DA9A947DA57AAE693A5DFB86569";
const uint8_t SetPartitionsStatsRequest::binary_fingerprint[16] = {0x63,0x5C,0x0D,0xA9,0xA9,0x47,0xDA,0x57,0xAA,0xE6,0x93,0xA5,0xDF,0xB8,0x65,0x69};
uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_colStats = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colStats.clear();
uint32_t _size231;
::apache::thrift::protocol::TType _etype234;
xfer += iprot->readListBegin(_etype234, _size231);
this->colStats.resize(_size231);
uint32_t _i235;
for (_i235 = 0; _i235 < _size231; ++_i235)
{
xfer += this->colStats[_i235].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_colStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_colStats)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("SetPartitionsStatsRequest");
xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->colStats.size()));
std::vector<ColumnStatistics> ::const_iterator _iter236;
for (_iter236 = this->colStats.begin(); _iter236 != this->colStats.end(); ++_iter236)
{
xfer += (*_iter236).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) {
using ::std::swap;
swap(a.colStats, b.colStats);
}
const char* Schema::ascii_fingerprint = "5CFEE46C975F4E2368D905109B8E3B5B";
const uint8_t Schema::binary_fingerprint[16] = {0x5C,0xFE,0xE4,0x6C,0x97,0x5F,0x4E,0x23,0x68,0xD9,0x05,0x10,0x9B,0x8E,0x3B,0x5B};
uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->fieldSchemas.clear();
uint32_t _size237;
::apache::thrift::protocol::TType _etype240;
xfer += iprot->readListBegin(_etype240, _size237);
this->fieldSchemas.resize(_size237);
uint32_t _i241;
for (_i241 = 0; _i241 < _size237; ++_i241)
{
xfer += this->fieldSchemas[_i241].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.fieldSchemas = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->properties.clear();
uint32_t _size242;
::apache::thrift::protocol::TType _ktype243;
::apache::thrift::protocol::TType _vtype244;
xfer += iprot->readMapBegin(_ktype243, _vtype244, _size242);
uint32_t _i246;
for (_i246 = 0; _i246 < _size242; ++_i246)
{
std::string _key247;
xfer += iprot->readString(_key247);
std::string& _val248 = this->properties[_key247];
xfer += iprot->readString(_val248);
}
xfer += iprot->readMapEnd();
}
this->__isset.properties = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Schema");
xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->fieldSchemas.size()));
std::vector<FieldSchema> ::const_iterator _iter249;
for (_iter249 = this->fieldSchemas.begin(); _iter249 != this->fieldSchemas.end(); ++_iter249)
{
xfer += (*_iter249).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
std::map<std::string, std::string> ::const_iterator _iter250;
for (_iter250 = this->properties.begin(); _iter250 != this->properties.end(); ++_iter250)
{
xfer += oprot->writeString(_iter250->first);
xfer += oprot->writeString(_iter250->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Schema &a, Schema &b) {
using ::std::swap;
swap(a.fieldSchemas, b.fieldSchemas);
swap(a.properties, b.properties);
swap(a.__isset, b.__isset);
}
const char* EnvironmentContext::ascii_fingerprint = "5EA2D527ECA3BA20C77AFC023EE8C05F";
const uint8_t EnvironmentContext::binary_fingerprint[16] = {0x5E,0xA2,0xD5,0x27,0xEC,0xA3,0xBA,0x20,0xC7,0x7A,0xFC,0x02,0x3E,0xE8,0xC0,0x5F};
uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->properties.clear();
uint32_t _size251;
::apache::thrift::protocol::TType _ktype252;
::apache::thrift::protocol::TType _vtype253;
xfer += iprot->readMapBegin(_ktype252, _vtype253, _size251);
uint32_t _i255;
for (_i255 = 0; _i255 < _size251; ++_i255)
{
std::string _key256;
xfer += iprot->readString(_key256);
std::string& _val257 = this->properties[_key256];
xfer += iprot->readString(_val257);
}
xfer += iprot->readMapEnd();
}
this->__isset.properties = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("EnvironmentContext");
xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->properties.size()));
std::map<std::string, std::string> ::const_iterator _iter258;
for (_iter258 = this->properties.begin(); _iter258 != this->properties.end(); ++_iter258)
{
xfer += oprot->writeString(_iter258->first);
xfer += oprot->writeString(_iter258->second);
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(EnvironmentContext &a, EnvironmentContext &b) {
using ::std::swap;
swap(a.properties, b.properties);
swap(a.__isset, b.__isset);
}
const char* PartitionsByExprResult::ascii_fingerprint = "40B789CC91B508FE36600A14E3F80425";
const uint8_t PartitionsByExprResult::binary_fingerprint[16] = {0x40,0xB7,0x89,0xCC,0x91,0xB5,0x08,0xFE,0x36,0x60,0x0A,0x14,0xE3,0xF8,0x04,0x25};
uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_partitions = false;
bool isset_hasUnknownPartitions = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
uint32_t _size259;
::apache::thrift::protocol::TType _etype262;
xfer += iprot->readListBegin(_etype262, _size259);
this->partitions.resize(_size259);
uint32_t _i263;
for (_i263 = 0; _i263 < _size259; ++_i263)
{
xfer += this->partitions[_i263].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_partitions = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->hasUnknownPartitions);
isset_hasUnknownPartitions = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_partitions)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_hasUnknownPartitions)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PartitionsByExprResult");
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
std::vector<Partition> ::const_iterator _iter264;
for (_iter264 = this->partitions.begin(); _iter264 != this->partitions.end(); ++_iter264)
{
xfer += (*_iter264).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("hasUnknownPartitions", ::apache::thrift::protocol::T_BOOL, 2);
xfer += oprot->writeBool(this->hasUnknownPartitions);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PartitionsByExprResult &a, PartitionsByExprResult &b) {
using ::std::swap;
swap(a.partitions, b.partitions);
swap(a.hasUnknownPartitions, b.hasUnknownPartitions);
}
const char* PartitionsByExprRequest::ascii_fingerprint = "835944417A026FE6ABD0DF5A35BF52C5";
const uint8_t PartitionsByExprRequest::binary_fingerprint[16] = {0x83,0x59,0x44,0x41,0x7A,0x02,0x6F,0xE6,0xAB,0xD0,0xDF,0x5A,0x35,0xBF,0x52,0xC5};
uint32_t PartitionsByExprRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_dbName = false;
bool isset_tblName = false;
bool isset_expr = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
isset_dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tblName);
isset_tblName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readBinary(this->expr);
isset_expr = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->defaultPartitionName);
this->__isset.defaultPartitionName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I16) {
xfer += iprot->readI16(this->maxParts);
this->__isset.maxParts = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_dbName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tblName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_expr)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t PartitionsByExprRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PartitionsByExprRequest");
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->tblName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("expr", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeBinary(this->expr);
xfer += oprot->writeFieldEnd();
if (this->__isset.defaultPartitionName) {
xfer += oprot->writeFieldBegin("defaultPartitionName", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->defaultPartitionName);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.maxParts) {
xfer += oprot->writeFieldBegin("maxParts", ::apache::thrift::protocol::T_I16, 5);
xfer += oprot->writeI16(this->maxParts);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PartitionsByExprRequest &a, PartitionsByExprRequest &b) {
using ::std::swap;
swap(a.dbName, b.dbName);
swap(a.tblName, b.tblName);
swap(a.expr, b.expr);
swap(a.defaultPartitionName, b.defaultPartitionName);
swap(a.maxParts, b.maxParts);
swap(a.__isset, b.__isset);
}
const char* TableStatsResult::ascii_fingerprint = "CE3E8F0D9B310B8D33CB7A89A75F3E05";
const uint8_t TableStatsResult::binary_fingerprint[16] = {0xCE,0x3E,0x8F,0x0D,0x9B,0x31,0x0B,0x8D,0x33,0xCB,0x7A,0x89,0xA7,0x5F,0x3E,0x05};
uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_tableStats = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->tableStats.clear();
uint32_t _size265;
::apache::thrift::protocol::TType _etype268;
xfer += iprot->readListBegin(_etype268, _size265);
this->tableStats.resize(_size265);
uint32_t _i269;
for (_i269 = 0; _i269 < _size265; ++_i269)
{
xfer += this->tableStats[_i269].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_tableStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_tableStats)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("TableStatsResult");
xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->tableStats.size()));
std::vector<ColumnStatisticsObj> ::const_iterator _iter270;
for (_iter270 = this->tableStats.begin(); _iter270 != this->tableStats.end(); ++_iter270)
{
xfer += (*_iter270).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(TableStatsResult &a, TableStatsResult &b) {
using ::std::swap;
swap(a.tableStats, b.tableStats);
}
const char* PartitionsStatsResult::ascii_fingerprint = "FF175B50C5EF6F442D3AF25B06435A39";
const uint8_t PartitionsStatsResult::binary_fingerprint[16] = {0xFF,0x17,0x5B,0x50,0xC5,0xEF,0x6F,0x44,0x2D,0x3A,0xF2,0x5B,0x06,0x43,0x5A,0x39};
uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_partStats = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_MAP) {
{
this->partStats.clear();
uint32_t _size271;
::apache::thrift::protocol::TType _ktype272;
::apache::thrift::protocol::TType _vtype273;
xfer += iprot->readMapBegin(_ktype272, _vtype273, _size271);
uint32_t _i275;
for (_i275 = 0; _i275 < _size271; ++_i275)
{
std::string _key276;
xfer += iprot->readString(_key276);
std::vector<ColumnStatisticsObj> & _val277 = this->partStats[_key276];
{
_val277.clear();
uint32_t _size278;
::apache::thrift::protocol::TType _etype281;
xfer += iprot->readListBegin(_etype281, _size278);
_val277.resize(_size278);
uint32_t _i282;
for (_i282 = 0; _i282 < _size278; ++_i282)
{
xfer += _val277[_i282].read(iprot);
}
xfer += iprot->readListEnd();
}
}
xfer += iprot->readMapEnd();
}
isset_partStats = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_partStats)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PartitionsStatsResult");
xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1);
{
xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast<uint32_t>(this->partStats.size()));
std::map<std::string, std::vector<ColumnStatisticsObj> > ::const_iterator _iter283;
for (_iter283 = this->partStats.begin(); _iter283 != this->partStats.end(); ++_iter283)
{
xfer += oprot->writeString(_iter283->first);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(_iter283->second.size()));
std::vector<ColumnStatisticsObj> ::const_iterator _iter284;
for (_iter284 = _iter283->second.begin(); _iter284 != _iter283->second.end(); ++_iter284)
{
xfer += (*_iter284).write(oprot);
}
xfer += oprot->writeListEnd();
}
}
xfer += oprot->writeMapEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PartitionsStatsResult &a, PartitionsStatsResult &b) {
using ::std::swap;
swap(a.partStats, b.partStats);
}
const char* TableStatsRequest::ascii_fingerprint = "8E2AD6401E83558ECFD6A13D74DD0A3F";
const uint8_t TableStatsRequest::binary_fingerprint[16] = {0x8E,0x2A,0xD6,0x40,0x1E,0x83,0x55,0x8E,0xCF,0xD6,0xA1,0x3D,0x74,0xDD,0x0A,0x3F};
uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_dbName = false;
bool isset_tblName = false;
bool isset_colNames = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
isset_dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tblName);
isset_tblName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colNames.clear();
uint32_t _size285;
::apache::thrift::protocol::TType _etype288;
xfer += iprot->readListBegin(_etype288, _size285);
this->colNames.resize(_size285);
uint32_t _i289;
for (_i289 = 0; _i289 < _size285; ++_i289)
{
xfer += iprot->readString(this->colNames[_i289]);
}
xfer += iprot->readListEnd();
}
isset_colNames = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_dbName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tblName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_colNames)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("TableStatsRequest");
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->tblName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->colNames.size()));
std::vector<std::string> ::const_iterator _iter290;
for (_iter290 = this->colNames.begin(); _iter290 != this->colNames.end(); ++_iter290)
{
xfer += oprot->writeString((*_iter290));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(TableStatsRequest &a, TableStatsRequest &b) {
using ::std::swap;
swap(a.dbName, b.dbName);
swap(a.tblName, b.tblName);
swap(a.colNames, b.colNames);
}
const char* PartitionsStatsRequest::ascii_fingerprint = "5F51D90BC323BCE4B704B7D98EDA0BD4";
const uint8_t PartitionsStatsRequest::binary_fingerprint[16] = {0x5F,0x51,0xD9,0x0B,0xC3,0x23,0xBC,0xE4,0xB7,0x04,0xB7,0xD9,0x8E,0xDA,0x0B,0xD4};
uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_dbName = false;
bool isset_tblName = false;
bool isset_colNames = false;
bool isset_partNames = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
isset_dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tblName);
isset_tblName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->colNames.clear();
uint32_t _size291;
::apache::thrift::protocol::TType _etype294;
xfer += iprot->readListBegin(_etype294, _size291);
this->colNames.resize(_size291);
uint32_t _i295;
for (_i295 = 0; _i295 < _size291; ++_i295)
{
xfer += iprot->readString(this->colNames[_i295]);
}
xfer += iprot->readListEnd();
}
isset_colNames = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partNames.clear();
uint32_t _size296;
::apache::thrift::protocol::TType _etype299;
xfer += iprot->readListBegin(_etype299, _size296);
this->partNames.resize(_size296);
uint32_t _i300;
for (_i300 = 0; _i300 < _size296; ++_i300)
{
xfer += iprot->readString(this->partNames[_i300]);
}
xfer += iprot->readListEnd();
}
isset_partNames = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_dbName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tblName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_colNames)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_partNames)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("PartitionsStatsRequest");
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->tblName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->colNames.size()));
std::vector<std::string> ::const_iterator _iter301;
for (_iter301 = this->colNames.begin(); _iter301 != this->colNames.end(); ++_iter301)
{
xfer += oprot->writeString((*_iter301));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
std::vector<std::string> ::const_iterator _iter302;
for (_iter302 = this->partNames.begin(); _iter302 != this->partNames.end(); ++_iter302)
{
xfer += oprot->writeString((*_iter302));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(PartitionsStatsRequest &a, PartitionsStatsRequest &b) {
using ::std::swap;
swap(a.dbName, b.dbName);
swap(a.tblName, b.tblName);
swap(a.colNames, b.colNames);
swap(a.partNames, b.partNames);
}
const char* AddPartitionsResult::ascii_fingerprint = "5A689D0823E7BFBB60C799BA60065C31";
const uint8_t AddPartitionsResult::binary_fingerprint[16] = {0x5A,0x68,0x9D,0x08,0x23,0xE7,0xBF,0xBB,0x60,0xC7,0x99,0xBA,0x60,0x06,0x5C,0x31};
uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
uint32_t _size303;
::apache::thrift::protocol::TType _etype306;
xfer += iprot->readListBegin(_etype306, _size303);
this->partitions.resize(_size303);
uint32_t _i307;
for (_i307 = 0; _i307 < _size303; ++_i307)
{
xfer += this->partitions[_i307].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.partitions = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("AddPartitionsResult");
if (this->__isset.partitions) {
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
std::vector<Partition> ::const_iterator _iter308;
for (_iter308 = this->partitions.begin(); _iter308 != this->partitions.end(); ++_iter308)
{
xfer += (*_iter308).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(AddPartitionsResult &a, AddPartitionsResult &b) {
using ::std::swap;
swap(a.partitions, b.partitions);
swap(a.__isset, b.__isset);
}
const char* AddPartitionsRequest::ascii_fingerprint = "94F938D035892CF6873DEDB99358F069";
const uint8_t AddPartitionsRequest::binary_fingerprint[16] = {0x94,0xF9,0x38,0xD0,0x35,0x89,0x2C,0xF6,0x87,0x3D,0xED,0xB9,0x93,0x58,0xF0,0x69};
uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_dbName = false;
bool isset_tblName = false;
bool isset_parts = false;
bool isset_ifNotExists = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
isset_dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tblName);
isset_tblName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->parts.clear();
uint32_t _size309;
::apache::thrift::protocol::TType _etype312;
xfer += iprot->readListBegin(_etype312, _size309);
this->parts.resize(_size309);
uint32_t _i313;
for (_i313 = 0; _i313 < _size309; ++_i313)
{
xfer += this->parts[_i313].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_parts = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->ifNotExists);
isset_ifNotExists = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->needResult);
this->__isset.needResult = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_dbName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tblName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_parts)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_ifNotExists)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("AddPartitionsRequest");
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->tblName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->parts.size()));
std::vector<Partition> ::const_iterator _iter314;
for (_iter314 = this->parts.begin(); _iter314 != this->parts.end(); ++_iter314)
{
xfer += (*_iter314).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("ifNotExists", ::apache::thrift::protocol::T_BOOL, 4);
xfer += oprot->writeBool(this->ifNotExists);
xfer += oprot->writeFieldEnd();
if (this->__isset.needResult) {
xfer += oprot->writeFieldBegin("needResult", ::apache::thrift::protocol::T_BOOL, 5);
xfer += oprot->writeBool(this->needResult);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(AddPartitionsRequest &a, AddPartitionsRequest &b) {
using ::std::swap;
swap(a.dbName, b.dbName);
swap(a.tblName, b.tblName);
swap(a.parts, b.parts);
swap(a.ifNotExists, b.ifNotExists);
swap(a.needResult, b.needResult);
swap(a.__isset, b.__isset);
}
const char* DropPartitionsResult::ascii_fingerprint = "5A689D0823E7BFBB60C799BA60065C31";
const uint8_t DropPartitionsResult::binary_fingerprint[16] = {0x5A,0x68,0x9D,0x08,0x23,0xE7,0xBF,0xBB,0x60,0xC7,0x99,0xBA,0x60,0x06,0x5C,0x31};
uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitions.clear();
uint32_t _size315;
::apache::thrift::protocol::TType _etype318;
xfer += iprot->readListBegin(_etype318, _size315);
this->partitions.resize(_size315);
uint32_t _i319;
for (_i319 = 0; _i319 < _size315; ++_i319)
{
xfer += this->partitions[_i319].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.partitions = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("DropPartitionsResult");
if (this->__isset.partitions) {
xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->partitions.size()));
std::vector<Partition> ::const_iterator _iter320;
for (_iter320 = this->partitions.begin(); _iter320 != this->partitions.end(); ++_iter320)
{
xfer += (*_iter320).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(DropPartitionsResult &a, DropPartitionsResult &b) {
using ::std::swap;
swap(a.partitions, b.partitions);
swap(a.__isset, b.__isset);
}
const char* DropPartitionsExpr::ascii_fingerprint = "18B162B1D15D8D46509D3911A9F1C2AA";
const uint8_t DropPartitionsExpr::binary_fingerprint[16] = {0x18,0xB1,0x62,0xB1,0xD1,0x5D,0x8D,0x46,0x50,0x9D,0x39,0x11,0xA9,0xF1,0xC2,0xAA};
uint32_t DropPartitionsExpr::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_expr = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readBinary(this->expr);
isset_expr = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->partArchiveLevel);
this->__isset.partArchiveLevel = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_expr)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t DropPartitionsExpr::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("DropPartitionsExpr");
xfer += oprot->writeFieldBegin("expr", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeBinary(this->expr);
xfer += oprot->writeFieldEnd();
if (this->__isset.partArchiveLevel) {
xfer += oprot->writeFieldBegin("partArchiveLevel", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32(this->partArchiveLevel);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(DropPartitionsExpr &a, DropPartitionsExpr &b) {
using ::std::swap;
swap(a.expr, b.expr);
swap(a.partArchiveLevel, b.partArchiveLevel);
swap(a.__isset, b.__isset);
}
const char* RequestPartsSpec::ascii_fingerprint = "864492ECAB27996CD222AACDA10C292E";
const uint8_t RequestPartsSpec::binary_fingerprint[16] = {0x86,0x44,0x92,0xEC,0xAB,0x27,0x99,0x6C,0xD2,0x22,0xAA,0xCD,0xA1,0x0C,0x29,0x2E};
uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->names.clear();
uint32_t _size321;
::apache::thrift::protocol::TType _etype324;
xfer += iprot->readListBegin(_etype324, _size321);
this->names.resize(_size321);
uint32_t _i325;
for (_i325 = 0; _i325 < _size321; ++_i325)
{
xfer += iprot->readString(this->names[_i325]);
}
xfer += iprot->readListEnd();
}
this->__isset.names = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->exprs.clear();
uint32_t _size326;
::apache::thrift::protocol::TType _etype329;
xfer += iprot->readListBegin(_etype329, _size326);
this->exprs.resize(_size326);
uint32_t _i330;
for (_i330 = 0; _i330 < _size326; ++_i330)
{
xfer += this->exprs[_i330].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.exprs = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("RequestPartsSpec");
xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->names.size()));
std::vector<std::string> ::const_iterator _iter331;
for (_iter331 = this->names.begin(); _iter331 != this->names.end(); ++_iter331)
{
xfer += oprot->writeString((*_iter331));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->exprs.size()));
std::vector<DropPartitionsExpr> ::const_iterator _iter332;
for (_iter332 = this->exprs.begin(); _iter332 != this->exprs.end(); ++_iter332)
{
xfer += (*_iter332).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(RequestPartsSpec &a, RequestPartsSpec &b) {
using ::std::swap;
swap(a.names, b.names);
swap(a.exprs, b.exprs);
swap(a.__isset, b.__isset);
}
const char* DropPartitionsRequest::ascii_fingerprint = "EB263FBA01215C480A9A24C11D69E672";
const uint8_t DropPartitionsRequest::binary_fingerprint[16] = {0xEB,0x26,0x3F,0xBA,0x01,0x21,0x5C,0x48,0x0A,0x9A,0x24,0xC1,0x1D,0x69,0xE6,0x72};
uint32_t DropPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_dbName = false;
bool isset_tblName = false;
bool isset_parts = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
isset_dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tblName);
isset_tblName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->parts.read(iprot);
isset_parts = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->deleteData);
this->__isset.deleteData = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->ifExists);
this->__isset.ifExists = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->ignoreProtection);
this->__isset.ignoreProtection = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->environmentContext.read(iprot);
this->__isset.environmentContext = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 8:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->needResult);
this->__isset.needResult = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_dbName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tblName)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_parts)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t DropPartitionsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("DropPartitionsRequest");
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->tblName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_STRUCT, 3);
xfer += this->parts.write(oprot);
xfer += oprot->writeFieldEnd();
if (this->__isset.deleteData) {
xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 4);
xfer += oprot->writeBool(this->deleteData);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.ifExists) {
xfer += oprot->writeFieldBegin("ifExists", ::apache::thrift::protocol::T_BOOL, 5);
xfer += oprot->writeBool(this->ifExists);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.ignoreProtection) {
xfer += oprot->writeFieldBegin("ignoreProtection", ::apache::thrift::protocol::T_BOOL, 6);
xfer += oprot->writeBool(this->ignoreProtection);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.environmentContext) {
xfer += oprot->writeFieldBegin("environmentContext", ::apache::thrift::protocol::T_STRUCT, 7);
xfer += this->environmentContext.write(oprot);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.needResult) {
xfer += oprot->writeFieldBegin("needResult", ::apache::thrift::protocol::T_BOOL, 8);
xfer += oprot->writeBool(this->needResult);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(DropPartitionsRequest &a, DropPartitionsRequest &b) {
using ::std::swap;
swap(a.dbName, b.dbName);
swap(a.tblName, b.tblName);
swap(a.parts, b.parts);
swap(a.deleteData, b.deleteData);
swap(a.ifExists, b.ifExists);
swap(a.ignoreProtection, b.ignoreProtection);
swap(a.environmentContext, b.environmentContext);
swap(a.needResult, b.needResult);
swap(a.__isset, b.__isset);
}
const char* ResourceUri::ascii_fingerprint = "19B5240589E680301A7E32DF3971EFBE";
const uint8_t ResourceUri::binary_fingerprint[16] = {0x19,0xB5,0x24,0x05,0x89,0xE6,0x80,0x30,0x1A,0x7E,0x32,0xDF,0x39,0x71,0xEF,0xBE};
uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast333;
xfer += iprot->readI32(ecast333);
this->resourceType = (ResourceType::type)ecast333;
this->__isset.resourceType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->uri);
this->__isset.uri = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t ResourceUri::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ResourceUri");
xfer += oprot->writeFieldBegin("resourceType", ::apache::thrift::protocol::T_I32, 1);
xfer += oprot->writeI32((int32_t)this->resourceType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("uri", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->uri);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ResourceUri &a, ResourceUri &b) {
using ::std::swap;
swap(a.resourceType, b.resourceType);
swap(a.uri, b.uri);
swap(a.__isset, b.__isset);
}
const char* Function::ascii_fingerprint = "72279C515E70F888568542F97616ADB8";
const uint8_t Function::binary_fingerprint[16] = {0x72,0x27,0x9C,0x51,0x5E,0x70,0xF8,0x88,0x56,0x85,0x42,0xF9,0x76,0x16,0xAD,0xB8};
uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->functionName);
this->__isset.functionName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
this->__isset.dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->className);
this->__isset.className = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->ownerName);
this->__isset.ownerName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast334;
xfer += iprot->readI32(ecast334);
this->ownerType = (PrincipalType::type)ecast334;
this->__isset.ownerType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->createTime);
this->__isset.createTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast335;
xfer += iprot->readI32(ecast335);
this->functionType = (FunctionType::type)ecast335;
this->__isset.functionType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 8:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->resourceUris.clear();
uint32_t _size336;
::apache::thrift::protocol::TType _etype339;
xfer += iprot->readListBegin(_etype339, _size336);
this->resourceUris.resize(_size336);
uint32_t _i340;
for (_i340 = 0; _i340 < _size336; ++_i340)
{
xfer += this->resourceUris[_i340].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.resourceUris = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("Function");
xfer += oprot->writeFieldBegin("functionName", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->functionName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("className", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->className);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("ownerName", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->ownerName);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("ownerType", ::apache::thrift::protocol::T_I32, 5);
xfer += oprot->writeI32((int32_t)this->ownerType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 6);
xfer += oprot->writeI32(this->createTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("functionType", ::apache::thrift::protocol::T_I32, 7);
xfer += oprot->writeI32((int32_t)this->functionType);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->resourceUris.size()));
std::vector<ResourceUri> ::const_iterator _iter341;
for (_iter341 = this->resourceUris.begin(); _iter341 != this->resourceUris.end(); ++_iter341)
{
xfer += (*_iter341).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(Function &a, Function &b) {
using ::std::swap;
swap(a.functionName, b.functionName);
swap(a.dbName, b.dbName);
swap(a.className, b.className);
swap(a.ownerName, b.ownerName);
swap(a.ownerType, b.ownerType);
swap(a.createTime, b.createTime);
swap(a.functionType, b.functionType);
swap(a.resourceUris, b.resourceUris);
swap(a.__isset, b.__isset);
}
const char* TxnInfo::ascii_fingerprint = "6C5C0773A901CCA3BE9D085B3B47A767";
const uint8_t TxnInfo::binary_fingerprint[16] = {0x6C,0x5C,0x07,0x73,0xA9,0x01,0xCC,0xA3,0xBE,0x9D,0x08,0x5B,0x3B,0x47,0xA7,0x67};
uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_id = false;
bool isset_state = false;
bool isset_user = false;
bool isset_hostname = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->id);
isset_id = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast342;
xfer += iprot->readI32(ecast342);
this->state = (TxnState::type)ecast342;
isset_state = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->user);
isset_user = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->hostname);
isset_hostname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_id)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_state)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_user)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_hostname)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t TxnInfo::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("TxnInfo");
xfer += oprot->writeFieldBegin("id", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->id);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32((int32_t)this->state);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("user", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->user);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("hostname", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->hostname);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(TxnInfo &a, TxnInfo &b) {
using ::std::swap;
swap(a.id, b.id);
swap(a.state, b.state);
swap(a.user, b.user);
swap(a.hostname, b.hostname);
}
const char* GetOpenTxnsInfoResponse::ascii_fingerprint = "CCF769BBD33005B61F2079A6665E3B9C";
const uint8_t GetOpenTxnsInfoResponse::binary_fingerprint[16] = {0xCC,0xF7,0x69,0xBB,0xD3,0x30,0x05,0xB6,0x1F,0x20,0x79,0xA6,0x66,0x5E,0x3B,0x9C};
uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_txn_high_water_mark = false;
bool isset_open_txns = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->txn_high_water_mark);
isset_txn_high_water_mark = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->open_txns.clear();
uint32_t _size343;
::apache::thrift::protocol::TType _etype346;
xfer += iprot->readListBegin(_etype346, _size343);
this->open_txns.resize(_size343);
uint32_t _i347;
for (_i347 = 0; _i347 < _size343; ++_i347)
{
xfer += this->open_txns[_i347].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_open_txns = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_txn_high_water_mark)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_open_txns)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GetOpenTxnsInfoResponse");
xfer += oprot->writeFieldBegin("txn_high_water_mark", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->txn_high_water_mark);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->open_txns.size()));
std::vector<TxnInfo> ::const_iterator _iter348;
for (_iter348 = this->open_txns.begin(); _iter348 != this->open_txns.end(); ++_iter348)
{
xfer += (*_iter348).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GetOpenTxnsInfoResponse &a, GetOpenTxnsInfoResponse &b) {
using ::std::swap;
swap(a.txn_high_water_mark, b.txn_high_water_mark);
swap(a.open_txns, b.open_txns);
}
const char* GetOpenTxnsResponse::ascii_fingerprint = "590531FF1BE8611678B255374F6109EE";
const uint8_t GetOpenTxnsResponse::binary_fingerprint[16] = {0x59,0x05,0x31,0xFF,0x1B,0xE8,0x61,0x16,0x78,0xB2,0x55,0x37,0x4F,0x61,0x09,0xEE};
uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_txn_high_water_mark = false;
bool isset_open_txns = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->txn_high_water_mark);
isset_txn_high_water_mark = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_SET) {
{
this->open_txns.clear();
uint32_t _size349;
::apache::thrift::protocol::TType _etype352;
xfer += iprot->readSetBegin(_etype352, _size349);
uint32_t _i353;
for (_i353 = 0; _i353 < _size349; ++_i353)
{
int64_t _elem354;
xfer += iprot->readI64(_elem354);
this->open_txns.insert(_elem354);
}
xfer += iprot->readSetEnd();
}
isset_open_txns = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_txn_high_water_mark)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_open_txns)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("GetOpenTxnsResponse");
xfer += oprot->writeFieldBegin("txn_high_water_mark", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->txn_high_water_mark);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_SET, 2);
{
xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->open_txns.size()));
std::set<int64_t> ::const_iterator _iter355;
for (_iter355 = this->open_txns.begin(); _iter355 != this->open_txns.end(); ++_iter355)
{
xfer += oprot->writeI64((*_iter355));
}
xfer += oprot->writeSetEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(GetOpenTxnsResponse &a, GetOpenTxnsResponse &b) {
using ::std::swap;
swap(a.txn_high_water_mark, b.txn_high_water_mark);
swap(a.open_txns, b.open_txns);
}
const char* OpenTxnRequest::ascii_fingerprint = "3368C2F81F2FEF71F11EDACDB2A3ECEF";
const uint8_t OpenTxnRequest::binary_fingerprint[16] = {0x33,0x68,0xC2,0xF8,0x1F,0x2F,0xEF,0x71,0xF1,0x1E,0xDA,0xCD,0xB2,0xA3,0xEC,0xEF};
uint32_t OpenTxnRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_num_txns = false;
bool isset_user = false;
bool isset_hostname = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->num_txns);
isset_num_txns = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->user);
isset_user = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->hostname);
isset_hostname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_num_txns)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_user)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_hostname)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t OpenTxnRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("OpenTxnRequest");
xfer += oprot->writeFieldBegin("num_txns", ::apache::thrift::protocol::T_I32, 1);
xfer += oprot->writeI32(this->num_txns);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("user", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->user);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("hostname", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->hostname);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(OpenTxnRequest &a, OpenTxnRequest &b) {
using ::std::swap;
swap(a.num_txns, b.num_txns);
swap(a.user, b.user);
swap(a.hostname, b.hostname);
}
const char* OpenTxnsResponse::ascii_fingerprint = "E49D7D1A9013CC81CD0F69D631EF82E4";
const uint8_t OpenTxnsResponse::binary_fingerprint[16] = {0xE4,0x9D,0x7D,0x1A,0x90,0x13,0xCC,0x81,0xCD,0x0F,0x69,0xD6,0x31,0xEF,0x82,0xE4};
uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_txn_ids = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->txn_ids.clear();
uint32_t _size356;
::apache::thrift::protocol::TType _etype359;
xfer += iprot->readListBegin(_etype359, _size356);
this->txn_ids.resize(_size356);
uint32_t _i360;
for (_i360 = 0; _i360 < _size356; ++_i360)
{
xfer += iprot->readI64(this->txn_ids[_i360]);
}
xfer += iprot->readListEnd();
}
isset_txn_ids = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_txn_ids)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("OpenTxnsResponse");
xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->txn_ids.size()));
std::vector<int64_t> ::const_iterator _iter361;
for (_iter361 = this->txn_ids.begin(); _iter361 != this->txn_ids.end(); ++_iter361)
{
xfer += oprot->writeI64((*_iter361));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(OpenTxnsResponse &a, OpenTxnsResponse &b) {
using ::std::swap;
swap(a.txn_ids, b.txn_ids);
}
const char* AbortTxnRequest::ascii_fingerprint = "56A59CE7FFAF82BCA8A19FAACDE4FB75";
const uint8_t AbortTxnRequest::binary_fingerprint[16] = {0x56,0xA5,0x9C,0xE7,0xFF,0xAF,0x82,0xBC,0xA8,0xA1,0x9F,0xAA,0xCD,0xE4,0xFB,0x75};
uint32_t AbortTxnRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_txnid = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->txnid);
isset_txnid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_txnid)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t AbortTxnRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("AbortTxnRequest");
xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->txnid);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(AbortTxnRequest &a, AbortTxnRequest &b) {
using ::std::swap;
swap(a.txnid, b.txnid);
}
const char* CommitTxnRequest::ascii_fingerprint = "56A59CE7FFAF82BCA8A19FAACDE4FB75";
const uint8_t CommitTxnRequest::binary_fingerprint[16] = {0x56,0xA5,0x9C,0xE7,0xFF,0xAF,0x82,0xBC,0xA8,0xA1,0x9F,0xAA,0xCD,0xE4,0xFB,0x75};
uint32_t CommitTxnRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_txnid = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->txnid);
isset_txnid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_txnid)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t CommitTxnRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("CommitTxnRequest");
xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->txnid);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(CommitTxnRequest &a, CommitTxnRequest &b) {
using ::std::swap;
swap(a.txnid, b.txnid);
}
const char* LockComponent::ascii_fingerprint = "38B02531B0840AC9C72904A4649FD15F";
const uint8_t LockComponent::binary_fingerprint[16] = {0x38,0xB0,0x25,0x31,0xB0,0x84,0x0A,0xC9,0xC7,0x29,0x04,0xA4,0x64,0x9F,0xD1,0x5F};
uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_type = false;
bool isset_level = false;
bool isset_dbname = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast362;
xfer += iprot->readI32(ecast362);
this->type = (LockType::type)ecast362;
isset_type = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast363;
xfer += iprot->readI32(ecast363);
this->level = (LockLevel::type)ecast363;
isset_level = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbname);
isset_dbname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tablename);
this->__isset.tablename = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->partitionname);
this->__isset.partitionname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_type)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_level)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_dbname)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t LockComponent::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("LockComponent");
xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 1);
xfer += oprot->writeI32((int32_t)this->type);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("level", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32((int32_t)this->level);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->dbname);
xfer += oprot->writeFieldEnd();
if (this->__isset.tablename) {
xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->tablename);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.partitionname) {
xfer += oprot->writeFieldBegin("partitionname", ::apache::thrift::protocol::T_STRING, 5);
xfer += oprot->writeString(this->partitionname);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(LockComponent &a, LockComponent &b) {
using ::std::swap;
swap(a.type, b.type);
swap(a.level, b.level);
swap(a.dbname, b.dbname);
swap(a.tablename, b.tablename);
swap(a.partitionname, b.partitionname);
swap(a.__isset, b.__isset);
}
const char* LockRequest::ascii_fingerprint = "46BC5ED7196BC16CB216AD5CC67C6930";
const uint8_t LockRequest::binary_fingerprint[16] = {0x46,0xBC,0x5E,0xD7,0x19,0x6B,0xC1,0x6C,0xB2,0x16,0xAD,0x5C,0xC6,0x7C,0x69,0x30};
uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_component = false;
bool isset_user = false;
bool isset_hostname = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->component.clear();
uint32_t _size364;
::apache::thrift::protocol::TType _etype367;
xfer += iprot->readListBegin(_etype367, _size364);
this->component.resize(_size364);
uint32_t _i368;
for (_i368 = 0; _i368 < _size364; ++_i368)
{
xfer += this->component[_i368].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_component = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->txnid);
this->__isset.txnid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->user);
isset_user = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->hostname);
isset_hostname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_component)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_user)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_hostname)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("LockRequest");
xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->component.size()));
std::vector<LockComponent> ::const_iterator _iter369;
for (_iter369 = this->component.begin(); _iter369 != this->component.end(); ++_iter369)
{
xfer += (*_iter369).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
if (this->__isset.txnid) {
xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 2);
xfer += oprot->writeI64(this->txnid);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("user", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->user);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("hostname", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->hostname);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(LockRequest &a, LockRequest &b) {
using ::std::swap;
swap(a.component, b.component);
swap(a.txnid, b.txnid);
swap(a.user, b.user);
swap(a.hostname, b.hostname);
swap(a.__isset, b.__isset);
}
const char* LockResponse::ascii_fingerprint = "DFA40D9D2884599F3D1E7A57578F1384";
const uint8_t LockResponse::binary_fingerprint[16] = {0xDF,0xA4,0x0D,0x9D,0x28,0x84,0x59,0x9F,0x3D,0x1E,0x7A,0x57,0x57,0x8F,0x13,0x84};
uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_lockid = false;
bool isset_state = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->lockid);
isset_lockid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast370;
xfer += iprot->readI32(ecast370);
this->state = (LockState::type)ecast370;
isset_state = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_lockid)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_state)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t LockResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("LockResponse");
xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->lockid);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32((int32_t)this->state);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(LockResponse &a, LockResponse &b) {
using ::std::swap;
swap(a.lockid, b.lockid);
swap(a.state, b.state);
}
const char* CheckLockRequest::ascii_fingerprint = "56A59CE7FFAF82BCA8A19FAACDE4FB75";
const uint8_t CheckLockRequest::binary_fingerprint[16] = {0x56,0xA5,0x9C,0xE7,0xFF,0xAF,0x82,0xBC,0xA8,0xA1,0x9F,0xAA,0xCD,0xE4,0xFB,0x75};
uint32_t CheckLockRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_lockid = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->lockid);
isset_lockid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_lockid)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t CheckLockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("CheckLockRequest");
xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->lockid);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(CheckLockRequest &a, CheckLockRequest &b) {
using ::std::swap;
swap(a.lockid, b.lockid);
}
const char* UnlockRequest::ascii_fingerprint = "56A59CE7FFAF82BCA8A19FAACDE4FB75";
const uint8_t UnlockRequest::binary_fingerprint[16] = {0x56,0xA5,0x9C,0xE7,0xFF,0xAF,0x82,0xBC,0xA8,0xA1,0x9F,0xAA,0xCD,0xE4,0xFB,0x75};
uint32_t UnlockRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_lockid = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->lockid);
isset_lockid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_lockid)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t UnlockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("UnlockRequest");
xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->lockid);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(UnlockRequest &a, UnlockRequest &b) {
using ::std::swap;
swap(a.lockid, b.lockid);
}
const char* ShowLocksRequest::ascii_fingerprint = "99914B932BD37A50B983C5E7C90AE93B";
const uint8_t ShowLocksRequest::binary_fingerprint[16] = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
uint32_t ShowLocksRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
xfer += iprot->skip(ftype);
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t ShowLocksRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ShowLocksRequest");
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ShowLocksRequest &a, ShowLocksRequest &b) {
using ::std::swap;
(void) a;
(void) b;
}
const char* ShowLocksResponseElement::ascii_fingerprint = "5AD11F0E0EF1EE0A7C08B00FEFCFF24F";
const uint8_t ShowLocksResponseElement::binary_fingerprint[16] = {0x5A,0xD1,0x1F,0x0E,0x0E,0xF1,0xEE,0x0A,0x7C,0x08,0xB0,0x0F,0xEF,0xCF,0xF2,0x4F};
uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_lockid = false;
bool isset_dbname = false;
bool isset_state = false;
bool isset_type = false;
bool isset_lastheartbeat = false;
bool isset_user = false;
bool isset_hostname = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->lockid);
isset_lockid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbname);
isset_dbname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tablename);
this->__isset.tablename = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->partname);
this->__isset.partname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast371;
xfer += iprot->readI32(ecast371);
this->state = (LockState::type)ecast371;
isset_state = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast372;
xfer += iprot->readI32(ecast372);
this->type = (LockType::type)ecast372;
isset_type = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->txnid);
this->__isset.txnid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 8:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->lastheartbeat);
isset_lastheartbeat = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 9:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->acquiredat);
this->__isset.acquiredat = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 10:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->user);
isset_user = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 11:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->hostname);
isset_hostname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_lockid)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_dbname)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_state)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_type)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_lastheartbeat)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_user)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_hostname)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t ShowLocksResponseElement::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ShowLocksResponseElement");
xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->lockid);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->dbname);
xfer += oprot->writeFieldEnd();
if (this->__isset.tablename) {
xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->tablename);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.partname) {
xfer += oprot->writeFieldBegin("partname", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->partname);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_I32, 5);
xfer += oprot->writeI32((int32_t)this->state);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 6);
xfer += oprot->writeI32((int32_t)this->type);
xfer += oprot->writeFieldEnd();
if (this->__isset.txnid) {
xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 7);
xfer += oprot->writeI64(this->txnid);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("lastheartbeat", ::apache::thrift::protocol::T_I64, 8);
xfer += oprot->writeI64(this->lastheartbeat);
xfer += oprot->writeFieldEnd();
if (this->__isset.acquiredat) {
xfer += oprot->writeFieldBegin("acquiredat", ::apache::thrift::protocol::T_I64, 9);
xfer += oprot->writeI64(this->acquiredat);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("user", ::apache::thrift::protocol::T_STRING, 10);
xfer += oprot->writeString(this->user);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("hostname", ::apache::thrift::protocol::T_STRING, 11);
xfer += oprot->writeString(this->hostname);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) {
using ::std::swap;
swap(a.lockid, b.lockid);
swap(a.dbname, b.dbname);
swap(a.tablename, b.tablename);
swap(a.partname, b.partname);
swap(a.state, b.state);
swap(a.type, b.type);
swap(a.txnid, b.txnid);
swap(a.lastheartbeat, b.lastheartbeat);
swap(a.acquiredat, b.acquiredat);
swap(a.user, b.user);
swap(a.hostname, b.hostname);
swap(a.__isset, b.__isset);
}
const char* ShowLocksResponse::ascii_fingerprint = "BD598AA60FE941361FB54C43973C011F";
const uint8_t ShowLocksResponse::binary_fingerprint[16] = {0xBD,0x59,0x8A,0xA6,0x0F,0xE9,0x41,0x36,0x1F,0xB5,0x4C,0x43,0x97,0x3C,0x01,0x1F};
uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->locks.clear();
uint32_t _size373;
::apache::thrift::protocol::TType _etype376;
xfer += iprot->readListBegin(_etype376, _size373);
this->locks.resize(_size373);
uint32_t _i377;
for (_i377 = 0; _i377 < _size373; ++_i377)
{
xfer += this->locks[_i377].read(iprot);
}
xfer += iprot->readListEnd();
}
this->__isset.locks = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ShowLocksResponse");
xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->locks.size()));
std::vector<ShowLocksResponseElement> ::const_iterator _iter378;
for (_iter378 = this->locks.begin(); _iter378 != this->locks.end(); ++_iter378)
{
xfer += (*_iter378).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ShowLocksResponse &a, ShowLocksResponse &b) {
using ::std::swap;
swap(a.locks, b.locks);
swap(a.__isset, b.__isset);
}
const char* HeartbeatRequest::ascii_fingerprint = "0354D07C94CB8542872CA1277008860A";
const uint8_t HeartbeatRequest::binary_fingerprint[16] = {0x03,0x54,0xD0,0x7C,0x94,0xCB,0x85,0x42,0x87,0x2C,0xA1,0x27,0x70,0x08,0x86,0x0A};
uint32_t HeartbeatRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->lockid);
this->__isset.lockid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->txnid);
this->__isset.txnid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t HeartbeatRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("HeartbeatRequest");
if (this->__isset.lockid) {
xfer += oprot->writeFieldBegin("lockid", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->lockid);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.txnid) {
xfer += oprot->writeFieldBegin("txnid", ::apache::thrift::protocol::T_I64, 2);
xfer += oprot->writeI64(this->txnid);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(HeartbeatRequest &a, HeartbeatRequest &b) {
using ::std::swap;
swap(a.lockid, b.lockid);
swap(a.txnid, b.txnid);
swap(a.__isset, b.__isset);
}
const char* HeartbeatTxnRangeRequest::ascii_fingerprint = "F33135321253DAEB67B0E79E416CA831";
const uint8_t HeartbeatTxnRangeRequest::binary_fingerprint[16] = {0xF3,0x31,0x35,0x32,0x12,0x53,0xDA,0xEB,0x67,0xB0,0xE7,0x9E,0x41,0x6C,0xA8,0x31};
uint32_t HeartbeatTxnRangeRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_min = false;
bool isset_max = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->min);
isset_min = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->max);
isset_max = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_min)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_max)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t HeartbeatTxnRangeRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("HeartbeatTxnRangeRequest");
xfer += oprot->writeFieldBegin("min", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->min);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("max", ::apache::thrift::protocol::T_I64, 2);
xfer += oprot->writeI64(this->max);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) {
using ::std::swap;
swap(a.min, b.min);
swap(a.max, b.max);
}
const char* HeartbeatTxnRangeResponse::ascii_fingerprint = "33E49A70BD5C04262A0F407E3656E3CF";
const uint8_t HeartbeatTxnRangeResponse::binary_fingerprint[16] = {0x33,0xE4,0x9A,0x70,0xBD,0x5C,0x04,0x26,0x2A,0x0F,0x40,0x7E,0x36,0x56,0xE3,0xCF};
uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_aborted = false;
bool isset_nosuch = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_SET) {
{
this->aborted.clear();
uint32_t _size379;
::apache::thrift::protocol::TType _etype382;
xfer += iprot->readSetBegin(_etype382, _size379);
uint32_t _i383;
for (_i383 = 0; _i383 < _size379; ++_i383)
{
int64_t _elem384;
xfer += iprot->readI64(_elem384);
this->aborted.insert(_elem384);
}
xfer += iprot->readSetEnd();
}
isset_aborted = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_SET) {
{
this->nosuch.clear();
uint32_t _size385;
::apache::thrift::protocol::TType _etype388;
xfer += iprot->readSetBegin(_etype388, _size385);
uint32_t _i389;
for (_i389 = 0; _i389 < _size385; ++_i389)
{
int64_t _elem390;
xfer += iprot->readI64(_elem390);
this->nosuch.insert(_elem390);
}
xfer += iprot->readSetEnd();
}
isset_nosuch = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_aborted)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_nosuch)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("HeartbeatTxnRangeResponse");
xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1);
{
xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->aborted.size()));
std::set<int64_t> ::const_iterator _iter391;
for (_iter391 = this->aborted.begin(); _iter391 != this->aborted.end(); ++_iter391)
{
xfer += oprot->writeI64((*_iter391));
}
xfer += oprot->writeSetEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2);
{
xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast<uint32_t>(this->nosuch.size()));
std::set<int64_t> ::const_iterator _iter392;
for (_iter392 = this->nosuch.begin(); _iter392 != this->nosuch.end(); ++_iter392)
{
xfer += oprot->writeI64((*_iter392));
}
xfer += oprot->writeSetEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) {
using ::std::swap;
swap(a.aborted, b.aborted);
swap(a.nosuch, b.nosuch);
}
const char* CompactionRequest::ascii_fingerprint = "899FD1F339D8318D628687CC2CE2864B";
const uint8_t CompactionRequest::binary_fingerprint[16] = {0x89,0x9F,0xD1,0xF3,0x39,0xD8,0x31,0x8D,0x62,0x86,0x87,0xCC,0x2C,0xE2,0x86,0x4B};
uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_dbname = false;
bool isset_tablename = false;
bool isset_type = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbname);
isset_dbname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tablename);
isset_tablename = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->partitionname);
this->__isset.partitionname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast393;
xfer += iprot->readI32(ecast393);
this->type = (CompactionType::type)ecast393;
isset_type = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->runas);
this->__isset.runas = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_dbname)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tablename)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_type)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("CompactionRequest");
xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->dbname);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->tablename);
xfer += oprot->writeFieldEnd();
if (this->__isset.partitionname) {
xfer += oprot->writeFieldBegin("partitionname", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->partitionname);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 4);
xfer += oprot->writeI32((int32_t)this->type);
xfer += oprot->writeFieldEnd();
if (this->__isset.runas) {
xfer += oprot->writeFieldBegin("runas", ::apache::thrift::protocol::T_STRING, 5);
xfer += oprot->writeString(this->runas);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(CompactionRequest &a, CompactionRequest &b) {
using ::std::swap;
swap(a.dbname, b.dbname);
swap(a.tablename, b.tablename);
swap(a.partitionname, b.partitionname);
swap(a.type, b.type);
swap(a.runas, b.runas);
swap(a.__isset, b.__isset);
}
const char* ShowCompactRequest::ascii_fingerprint = "99914B932BD37A50B983C5E7C90AE93B";
const uint8_t ShowCompactRequest::binary_fingerprint[16] = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
uint32_t ShowCompactRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
xfer += iprot->skip(ftype);
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t ShowCompactRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ShowCompactRequest");
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ShowCompactRequest &a, ShowCompactRequest &b) {
using ::std::swap;
(void) a;
(void) b;
}
const char* ShowCompactResponseElement::ascii_fingerprint = "2F338C265DC4FD82DD13F4966FE43F13";
const uint8_t ShowCompactResponseElement::binary_fingerprint[16] = {0x2F,0x33,0x8C,0x26,0x5D,0xC4,0xFD,0x82,0xDD,0x13,0xF4,0x96,0x6F,0xE4,0x3F,0x13};
uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_dbname = false;
bool isset_tablename = false;
bool isset_type = false;
bool isset_state = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbname);
isset_dbname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tablename);
isset_tablename = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->partitionname);
this->__isset.partitionname = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_I32) {
int32_t ecast394;
xfer += iprot->readI32(ecast394);
this->type = (CompactionType::type)ecast394;
isset_type = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->state);
isset_state = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->workerid);
this->__isset.workerid = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 7:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->start);
this->__isset.start = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 8:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->runAs);
this->__isset.runAs = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_dbname)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_tablename)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_type)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_state)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t ShowCompactResponseElement::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ShowCompactResponseElement");
xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->dbname);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("tablename", ::apache::thrift::protocol::T_STRING, 2);
xfer += oprot->writeString(this->tablename);
xfer += oprot->writeFieldEnd();
if (this->__isset.partitionname) {
xfer += oprot->writeFieldBegin("partitionname", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->partitionname);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_I32, 4);
xfer += oprot->writeI32((int32_t)this->type);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_STRING, 5);
xfer += oprot->writeString(this->state);
xfer += oprot->writeFieldEnd();
if (this->__isset.workerid) {
xfer += oprot->writeFieldBegin("workerid", ::apache::thrift::protocol::T_STRING, 6);
xfer += oprot->writeString(this->workerid);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.start) {
xfer += oprot->writeFieldBegin("start", ::apache::thrift::protocol::T_I64, 7);
xfer += oprot->writeI64(this->start);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.runAs) {
xfer += oprot->writeFieldBegin("runAs", ::apache::thrift::protocol::T_STRING, 8);
xfer += oprot->writeString(this->runAs);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) {
using ::std::swap;
swap(a.dbname, b.dbname);
swap(a.tablename, b.tablename);
swap(a.partitionname, b.partitionname);
swap(a.type, b.type);
swap(a.state, b.state);
swap(a.workerid, b.workerid);
swap(a.start, b.start);
swap(a.runAs, b.runAs);
swap(a.__isset, b.__isset);
}
const char* ShowCompactResponse::ascii_fingerprint = "915B7B8DB8966D65769C0F98707BBAE3";
const uint8_t ShowCompactResponse::binary_fingerprint[16] = {0x91,0x5B,0x7B,0x8D,0xB8,0x96,0x6D,0x65,0x76,0x9C,0x0F,0x98,0x70,0x7B,0xBA,0xE3};
uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_compacts = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->compacts.clear();
uint32_t _size395;
::apache::thrift::protocol::TType _etype398;
xfer += iprot->readListBegin(_etype398, _size395);
this->compacts.resize(_size395);
uint32_t _i399;
for (_i399 = 0; _i399 < _size395; ++_i399)
{
xfer += this->compacts[_i399].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_compacts = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_compacts)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ShowCompactResponse");
xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->compacts.size()));
std::vector<ShowCompactResponseElement> ::const_iterator _iter400;
for (_iter400 = this->compacts.begin(); _iter400 != this->compacts.end(); ++_iter400)
{
xfer += (*_iter400).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ShowCompactResponse &a, ShowCompactResponse &b) {
using ::std::swap;
swap(a.compacts, b.compacts);
}
const char* NotificationEventRequest::ascii_fingerprint = "6E578DA8AB10EED824A75534350EBAEF";
const uint8_t NotificationEventRequest::binary_fingerprint[16] = {0x6E,0x57,0x8D,0xA8,0xAB,0x10,0xEE,0xD8,0x24,0xA7,0x55,0x34,0x35,0x0E,0xBA,0xEF};
uint32_t NotificationEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_lastEvent = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->lastEvent);
isset_lastEvent = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->maxEvents);
this->__isset.maxEvents = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_lastEvent)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t NotificationEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("NotificationEventRequest");
xfer += oprot->writeFieldBegin("lastEvent", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->lastEvent);
xfer += oprot->writeFieldEnd();
if (this->__isset.maxEvents) {
xfer += oprot->writeFieldBegin("maxEvents", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32(this->maxEvents);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(NotificationEventRequest &a, NotificationEventRequest &b) {
using ::std::swap;
swap(a.lastEvent, b.lastEvent);
swap(a.maxEvents, b.maxEvents);
swap(a.__isset, b.__isset);
}
const char* NotificationEvent::ascii_fingerprint = "ACAF0036D9999F3A389F490F5E22D369";
const uint8_t NotificationEvent::binary_fingerprint[16] = {0xAC,0xAF,0x00,0x36,0xD9,0x99,0x9F,0x3A,0x38,0x9F,0x49,0x0F,0x5E,0x22,0xD3,0x69};
uint32_t NotificationEvent::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_eventId = false;
bool isset_eventTime = false;
bool isset_eventType = false;
bool isset_message = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->eventId);
isset_eventId = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_I32) {
xfer += iprot->readI32(this->eventTime);
isset_eventTime = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->eventType);
isset_eventType = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
this->__isset.dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tableName);
this->__isset.tableName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 6:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
isset_message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_eventId)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_eventTime)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_eventType)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_message)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t NotificationEvent::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("NotificationEvent");
xfer += oprot->writeFieldBegin("eventId", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->eventId);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("eventTime", ::apache::thrift::protocol::T_I32, 2);
xfer += oprot->writeI32(this->eventTime);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("eventType", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->eventType);
xfer += oprot->writeFieldEnd();
if (this->__isset.dbName) {
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.tableName) {
xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 5);
xfer += oprot->writeString(this->tableName);
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 6);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(NotificationEvent &a, NotificationEvent &b) {
using ::std::swap;
swap(a.eventId, b.eventId);
swap(a.eventTime, b.eventTime);
swap(a.eventType, b.eventType);
swap(a.dbName, b.dbName);
swap(a.tableName, b.tableName);
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* NotificationEventResponse::ascii_fingerprint = "EE3DB23399639114BCD1782A0FB01818";
const uint8_t NotificationEventResponse::binary_fingerprint[16] = {0xEE,0x3D,0xB2,0x33,0x99,0x63,0x91,0x14,0xBC,0xD1,0x78,0x2A,0x0F,0xB0,0x18,0x18};
uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_events = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->events.clear();
uint32_t _size401;
::apache::thrift::protocol::TType _etype404;
xfer += iprot->readListBegin(_etype404, _size401);
this->events.resize(_size401);
uint32_t _i405;
for (_i405 = 0; _i405 < _size401; ++_i405)
{
xfer += this->events[_i405].read(iprot);
}
xfer += iprot->readListEnd();
}
isset_events = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_events)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("NotificationEventResponse");
xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->events.size()));
std::vector<NotificationEvent> ::const_iterator _iter406;
for (_iter406 = this->events.begin(); _iter406 != this->events.end(); ++_iter406)
{
xfer += (*_iter406).write(oprot);
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(NotificationEventResponse &a, NotificationEventResponse &b) {
using ::std::swap;
swap(a.events, b.events);
}
const char* CurrentNotificationEventId::ascii_fingerprint = "56A59CE7FFAF82BCA8A19FAACDE4FB75";
const uint8_t CurrentNotificationEventId::binary_fingerprint[16] = {0x56,0xA5,0x9C,0xE7,0xFF,0xAF,0x82,0xBC,0xA8,0xA1,0x9F,0xAA,0xCD,0xE4,0xFB,0x75};
uint32_t CurrentNotificationEventId::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_eventId = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_I64) {
xfer += iprot->readI64(this->eventId);
isset_eventId = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_eventId)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t CurrentNotificationEventId::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("CurrentNotificationEventId");
xfer += oprot->writeFieldBegin("eventId", ::apache::thrift::protocol::T_I64, 1);
xfer += oprot->writeI64(this->eventId);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) {
using ::std::swap;
swap(a.eventId, b.eventId);
}
const char* InsertEventRequestData::ascii_fingerprint = "ACE4F644F0FDD289DDC4EE5B83BC13C0";
const uint8_t InsertEventRequestData::binary_fingerprint[16] = {0xAC,0xE4,0xF6,0x44,0xF0,0xFD,0xD2,0x89,0xDD,0xC4,0xEE,0x5B,0x83,0xBC,0x13,0xC0};
uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_filesAdded = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->filesAdded.clear();
uint32_t _size407;
::apache::thrift::protocol::TType _etype410;
xfer += iprot->readListBegin(_etype410, _size407);
this->filesAdded.resize(_size407);
uint32_t _i411;
for (_i411 = 0; _i411 < _size407; ++_i411)
{
xfer += iprot->readString(this->filesAdded[_i411]);
}
xfer += iprot->readListEnd();
}
isset_filesAdded = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_filesAdded)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("InsertEventRequestData");
xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 1);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->filesAdded.size()));
std::vector<std::string> ::const_iterator _iter412;
for (_iter412 = this->filesAdded.begin(); _iter412 != this->filesAdded.end(); ++_iter412)
{
xfer += oprot->writeString((*_iter412));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(InsertEventRequestData &a, InsertEventRequestData &b) {
using ::std::swap;
swap(a.filesAdded, b.filesAdded);
}
const char* FireEventRequestData::ascii_fingerprint = "187E754B26707EE32451E6A27FB672CE";
const uint8_t FireEventRequestData::binary_fingerprint[16] = {0x18,0x7E,0x75,0x4B,0x26,0x70,0x7E,0xE3,0x24,0x51,0xE6,0xA2,0x7F,0xB6,0x72,0xCE};
uint32_t FireEventRequestData::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->insertData.read(iprot);
this->__isset.insertData = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t FireEventRequestData::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("FireEventRequestData");
xfer += oprot->writeFieldBegin("insertData", ::apache::thrift::protocol::T_STRUCT, 1);
xfer += this->insertData.write(oprot);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(FireEventRequestData &a, FireEventRequestData &b) {
using ::std::swap;
swap(a.insertData, b.insertData);
swap(a.__isset, b.__isset);
}
const char* FireEventRequest::ascii_fingerprint = "1BA3A7F00159254072C3979B1429B50B";
const uint8_t FireEventRequest::binary_fingerprint[16] = {0x1B,0xA3,0xA7,0xF0,0x01,0x59,0x25,0x40,0x72,0xC3,0x97,0x9B,0x14,0x29,0xB5,0x0B};
uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
bool isset_successful = false;
bool isset_data = false;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_BOOL) {
xfer += iprot->readBool(this->successful);
isset_successful = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 2:
if (ftype == ::apache::thrift::protocol::T_STRUCT) {
xfer += this->data.read(iprot);
isset_data = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 3:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->dbName);
this->__isset.dbName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 4:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->tableName);
this->__isset.tableName = true;
} else {
xfer += iprot->skip(ftype);
}
break;
case 5:
if (ftype == ::apache::thrift::protocol::T_LIST) {
{
this->partitionVals.clear();
uint32_t _size413;
::apache::thrift::protocol::TType _etype416;
xfer += iprot->readListBegin(_etype416, _size413);
this->partitionVals.resize(_size413);
uint32_t _i417;
for (_i417 = 0; _i417 < _size413; ++_i417)
{
xfer += iprot->readString(this->partitionVals[_i417]);
}
xfer += iprot->readListEnd();
}
this->__isset.partitionVals = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
if (!isset_successful)
throw TProtocolException(TProtocolException::INVALID_DATA);
if (!isset_data)
throw TProtocolException(TProtocolException::INVALID_DATA);
return xfer;
}
uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("FireEventRequest");
xfer += oprot->writeFieldBegin("successful", ::apache::thrift::protocol::T_BOOL, 1);
xfer += oprot->writeBool(this->successful);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldBegin("data", ::apache::thrift::protocol::T_STRUCT, 2);
xfer += this->data.write(oprot);
xfer += oprot->writeFieldEnd();
if (this->__isset.dbName) {
xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 3);
xfer += oprot->writeString(this->dbName);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.tableName) {
xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 4);
xfer += oprot->writeString(this->tableName);
xfer += oprot->writeFieldEnd();
}
if (this->__isset.partitionVals) {
xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5);
{
xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partitionVals.size()));
std::vector<std::string> ::const_iterator _iter418;
for (_iter418 = this->partitionVals.begin(); _iter418 != this->partitionVals.end(); ++_iter418)
{
xfer += oprot->writeString((*_iter418));
}
xfer += oprot->writeListEnd();
}
xfer += oprot->writeFieldEnd();
}
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(FireEventRequest &a, FireEventRequest &b) {
using ::std::swap;
swap(a.successful, b.successful);
swap(a.data, b.data);
swap(a.dbName, b.dbName);
swap(a.tableName, b.tableName);
swap(a.partitionVals, b.partitionVals);
swap(a.__isset, b.__isset);
}
const char* FireEventResponse::ascii_fingerprint = "99914B932BD37A50B983C5E7C90AE93B";
const uint8_t FireEventResponse::binary_fingerprint[16] = {0x99,0x91,0x4B,0x93,0x2B,0xD3,0x7A,0x50,0xB9,0x83,0xC5,0xE7,0xC9,0x0A,0xE9,0x3B};
uint32_t FireEventResponse::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
xfer += iprot->skip(ftype);
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t FireEventResponse::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("FireEventResponse");
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(FireEventResponse &a, FireEventResponse &b) {
using ::std::swap;
(void) a;
(void) b;
}
const char* MetaException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t MetaException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("MetaException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(MetaException &a, MetaException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* UnknownTableException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t UnknownTableException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t UnknownTableException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("UnknownTableException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(UnknownTableException &a, UnknownTableException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* UnknownDBException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t UnknownDBException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t UnknownDBException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t UnknownDBException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("UnknownDBException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(UnknownDBException &a, UnknownDBException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* AlreadyExistsException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t AlreadyExistsException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t AlreadyExistsException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t AlreadyExistsException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("AlreadyExistsException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(AlreadyExistsException &a, AlreadyExistsException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* InvalidPartitionException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t InvalidPartitionException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t InvalidPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t InvalidPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("InvalidPartitionException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(InvalidPartitionException &a, InvalidPartitionException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* UnknownPartitionException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t UnknownPartitionException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t UnknownPartitionException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t UnknownPartitionException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("UnknownPartitionException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(UnknownPartitionException &a, UnknownPartitionException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* InvalidObjectException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t InvalidObjectException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t InvalidObjectException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t InvalidObjectException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("InvalidObjectException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(InvalidObjectException &a, InvalidObjectException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* NoSuchObjectException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t NoSuchObjectException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t NoSuchObjectException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t NoSuchObjectException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("NoSuchObjectException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(NoSuchObjectException &a, NoSuchObjectException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* IndexAlreadyExistsException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t IndexAlreadyExistsException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t IndexAlreadyExistsException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t IndexAlreadyExistsException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("IndexAlreadyExistsException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* InvalidOperationException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t InvalidOperationException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t InvalidOperationException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t InvalidOperationException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("InvalidOperationException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(InvalidOperationException &a, InvalidOperationException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* ConfigValSecurityException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t ConfigValSecurityException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t ConfigValSecurityException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t ConfigValSecurityException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("ConfigValSecurityException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* InvalidInputException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t InvalidInputException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t InvalidInputException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t InvalidInputException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("InvalidInputException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(InvalidInputException &a, InvalidInputException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* NoSuchTxnException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t NoSuchTxnException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t NoSuchTxnException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t NoSuchTxnException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("NoSuchTxnException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(NoSuchTxnException &a, NoSuchTxnException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* TxnAbortedException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t TxnAbortedException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t TxnAbortedException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t TxnAbortedException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("TxnAbortedException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(TxnAbortedException &a, TxnAbortedException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* TxnOpenException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t TxnOpenException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t TxnOpenException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t TxnOpenException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("TxnOpenException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(TxnOpenException &a, TxnOpenException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
const char* NoSuchLockException::ascii_fingerprint = "EFB929595D312AC8F305D5A794CFEDA1";
const uint8_t NoSuchLockException::binary_fingerprint[16] = {0xEF,0xB9,0x29,0x59,0x5D,0x31,0x2A,0xC8,0xF3,0x05,0xD5,0xA7,0x94,0xCF,0xED,0xA1};
uint32_t NoSuchLockException::read(::apache::thrift::protocol::TProtocol* iprot) {
uint32_t xfer = 0;
std::string fname;
::apache::thrift::protocol::TType ftype;
int16_t fid;
xfer += iprot->readStructBegin(fname);
using ::apache::thrift::protocol::TProtocolException;
while (true)
{
xfer += iprot->readFieldBegin(fname, ftype, fid);
if (ftype == ::apache::thrift::protocol::T_STOP) {
break;
}
switch (fid)
{
case 1:
if (ftype == ::apache::thrift::protocol::T_STRING) {
xfer += iprot->readString(this->message);
this->__isset.message = true;
} else {
xfer += iprot->skip(ftype);
}
break;
default:
xfer += iprot->skip(ftype);
break;
}
xfer += iprot->readFieldEnd();
}
xfer += iprot->readStructEnd();
return xfer;
}
uint32_t NoSuchLockException::write(::apache::thrift::protocol::TProtocol* oprot) const {
uint32_t xfer = 0;
xfer += oprot->writeStructBegin("NoSuchLockException");
xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1);
xfer += oprot->writeString(this->message);
xfer += oprot->writeFieldEnd();
xfer += oprot->writeFieldStop();
xfer += oprot->writeStructEnd();
return xfer;
}
void swap(NoSuchLockException &a, NoSuchLockException &b) {
using ::std::swap;
swap(a.message, b.message);
swap(a.__isset, b.__isset);
}
}}} // namespace
| WANdisco/amplab-hive | metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp | C++ | apache-2.0 | 336,106 |
# $NetBSD: Makefile,v 1.2 2011/09/08 18:44:38 jmmv Exp $
NOMAN= # defined
.include <bsd.own.mk>
ATFFILE= no
TESTSDIR= ${TESTSBASE}/lib/libcurses
FILESDIR= ${TESTSDIR}/check_files
FILES= curses_start.chk
FILES+= addch.chk
FILES+= addchstr.chk
FILES+= addstr.chk
FILES+= attributes.chk
FILES+= bell.chk
FILES+= background1.chk
FILES+= background2.chk
FILES+= background3.chk
FILES+= background4.chk
FILES+= background5.chk
FILES+= chgat1.chk
FILES+= chgat2.chk
FILES+= chgat3.chk
FILES+= clear1.chk
FILES+= clear2.chk
FILES+= clear3.chk
FILES+= clear4.chk
FILES+= clear5.chk
FILES+= clear6.chk
FILES+= clear7.chk
FILES+= clear8.chk
FILES+= clear9.chk
FILES+= clear10.chk
FILES+= color_blank_draw.chk
FILES+= color_start.chk
FILES+= color_default.chk
FILES+= color_blue_back.chk
FILES+= color_red_fore.chk
FILES+= color_set.chk
FILES+= copywin1.chk
FILES+= copywin2.chk
FILES+= copywin3.chk
FILES+= copywin4.chk
FILES+= copywin5.chk
FILES+= copywin6.chk
FILES+= copywin7.chk
FILES+= copywin8.chk
FILES+= copywin9.chk
FILES+= copywin10.chk
FILES+= copywin11.chk
FILES+= copywin12.chk
FILES+= copywin13.chk
FILES+= copywin14.chk
FILES+= curs_set1.chk
FILES+= curs_set2.chk
FILES+= curs_set3.chk
FILES+= fill.chk
FILES+= home.chk
FILES+= timeout.chk
FILES+= box_standout.chk
FILES+= wborder.chk
FILES+= wborder_refresh.chk
FILES+= window.chk
FILES+= wscrl1.chk
FILES+= wscrl2.chk
FILES+= wgetstr.chk
FILES+= wgetstr_refresh.chk
FILES+= wprintw_refresh.chk
CLEANFILES=
.include <bsd.test.mk>
.include <bsd.files.mk>
.include <bsd.prog.mk>
| veritas-shine/minix3-rpi | tests/lib/libcurses/check_files/Makefile | Makefile | apache-2.0 | 1,599 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::hp::lefthand::snmp::mode::components::device;
use strict;
use warnings;
use storage::hp::lefthand::snmp::mode::components::resources qw($map_status);
my $mapping = {
storageDeviceSerialNumber => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1.7' },
storageDeviceTemperature => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1.9' },
storageDeviceTemperatureCritical => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1.10' },
storageDeviceTemperatureLimit => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1.11' },
storageDeviceTemperatureStatus => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1.12', map => $map_status },
storageDeviceName => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1.14' },
storageDeviceSmartHealth => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1.17' }, # normal, marginal, faulty
storageDeviceState => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1.90' },
storageDeviceStatus => { oid => '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1.91', map => $map_status },
};
my $oid_storageDeviceEntry = '.1.3.6.1.4.1.9804.3.1.1.2.4.2.1';
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $oid_storageDeviceEntry };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking devices");
$self->{components}->{device} = {name => 'devices', total => 0, skip => 0};
return if ($self->check_filter(section => 'device'));
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_storageDeviceEntry}})) {
next if ($oid !~ /^$mapping->{storageDeviceStatus}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $self->{snmp}->map_instance(mapping => $mapping, results => $self->{results}->{$oid_storageDeviceEntry}, instance => $instance);
if ($result->{storageDeviceState} =~ /off_and_secured|off_or_removed/i) {
$self->absent_problem(section => 'device', instance => $instance);
next;
}
next if ($self->check_filter(section => 'device', instance => $instance));
$self->{components}->{device}->{total}++;
$self->{output}->output_add(long_msg => sprintf("storage device '%s' status is '%s' [instance = %s, state = %s, serial = %s, smart health = %s]",
$result->{storageDeviceName}, $result->{storageDeviceStatus}, $instance, $result->{storageDeviceState},
$result->{storageDeviceSerialNumber}, $result->{storageDeviceSmartHealth}));
my $exit = $self->get_severity(label => 'default', section => 'device', value => $result->{storageDeviceStatus});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("storage device '%s' state is '%s'", $result->{storageDeviceName}, $result->{storageDeviceState}));
}
$exit = $self->get_severity(label => 'smart', section => 'device.smart', value => $result->{storageDeviceSmartHealth});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("storage device '%s' smart health state is '%s'", $result->{storageDeviceName}, $result->{storageDeviceSmartHealth}));
}
my ($exit2, $warn, $crit, $checked) = $self->get_severity_numeric(section => 'device.temperature', instance => $instance, value => $result->{storageDeviceTemperature});
if ($checked == 0) {
my $warn_th = '';
my $crit_th = defined($result->{storageDeviceTemperatureCritical}) ? $result->{storageDeviceTemperatureCritical} : '';
$self->{perfdata}->threshold_validate(label => 'warning-device.temperature-instance-' . $instance, value => $warn_th);
$self->{perfdata}->threshold_validate(label => 'critical-device.temperature-instance-' . $instance, value => $crit_th);
$exit = $self->{perfdata}->threshold_check(
value => $result->{storageDeviceTemperature},
threshold => [ { label => 'critical-device.temperature-instance-' . $instance, exit_litteral => 'critical' },
{ label => 'warning-device.temperature-instance-' . $instance, exit_litteral => 'warning' } ]);
$warn = $self->{perfdata}->get_perfdata_for_output(label => 'warning-device.temperature-instance-' . $instance);
$crit = $self->{perfdata}->get_perfdata_for_output(label => 'critical-device.temperature-instance-' . $instance)
}
if (!$self->{output}->is_status(value => $exit2, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit2,
short_msg => sprintf("storage device '%s' temperature is %s C", $result->{storageDeviceName}, $result->{storageDeviceTemperature}));
}
$self->{output}->perfdata_add(label => 'temp_' . $result->{storageDeviceName}, unit => 'C',
value => $result->{storageDeviceTemperature},
warning => $warn,
critical => $crit,
max => $result->{storageDeviceTemperatureLimit},
);
}
}
1; | maksimatveev/centreon-plugins | storage/hp/lefthand/snmp/mode/components/device.pm | Perl | apache-2.0 | 6,350 |
// This file is part of libfringe, a low-level green threading library.
// Copyright (c) whitequark <whitequark@whitequark.org>
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
extern crate fringe;
use fringe::OsStack;
use fringe::generator::Generator;
#[test]
fn producer() {
let stack = OsStack::new(0).unwrap();
let mut gen = Generator::new(stack, move |yielder, ()| {
for i in 0.. { yielder.suspend(i) }
});
assert_eq!(gen.next(), Some(0));
assert_eq!(gen.next(), Some(1));
assert_eq!(gen.next(), Some(2));
unsafe { gen.unsafe_unwrap(); }
}
| nathan7/libfringe | tests/iterator.rs | Rust | apache-2.0 | 810 |
#!/usr/bin/env bash
## Users, roles, tenants ##
adminUser=${1:-neutron}
adminRole=admin
l3AdminTenant=L3AdminTenant
serviceTenant=service
# Below user is just for demos so that we don't see all logical instances.
regularUser=viewer
password=viewer
echo -n "Checking if $l3AdminTenant tenant exists ..."
tenantId=`openstack project show $l3AdminTenant 2>&1 | awk '/No|id/ { if ($1 == "No") print "No"; else if ($2 == "id") print $4; }'`
if [ "$tenantId" == "No" ]; then
echo " No, it does not. Creating it."
tenantId=$(openstack project create $l3AdminTenant --domain="default" --or-show -f value -c id)
echo $tenantId
else
echo " Yes, it does."
fi
echo -n "Checking if $regularUser user exists ..."
userId=`openstack user show $regularUser 2>&1 | awk '/No user|id/ { if ($1 == "No") print "No"; else print $4; }'`
if [ "$userId" == "No" ]; then
echo " No, it does not. Creating it."
userId=$(openstack user create $regularUser --password $password --domain="default" --or-show -f value -c id)
echo $userId
else
echo " Yes, it does."
fi
echo -n "Checking if $adminUser user has admin privileges in $l3AdminTenant tenant ..."
isAdmin=`openstack --os-username $adminUser --os-project-name $l3AdminTenant user role list 2>&1 | awk 'BEGIN { res="No" } { if ($4 == "admin") res="Yes"; } END { print res; }'`
if [ "$isAdmin" == "No" ]; then
echo " No, it does not. Giving it admin rights."
admUserId=`openstack user show $adminUser | awk '{ if ($2 == "id") print $4 }'`
admRoleId=`openstack role show $adminRole | awk '{ if ($2 == "id") print $4 }'`
openstack role add $admRoleId --user $admUserId --project $tenantId
else
echo " Yes, it has."
fi
# What follows can be removed once L3AdminTenant is used to lookup UUID of L3AdminTenant
echo -n "Determining UUID of $serviceTenant tenant ..."
tenantId=`openstack project show $serviceTenant 2>&1 | awk '/No tenant|id/ { if ($1 == "No") print "No"; else if ($2 == "id") print $4; }'`
if [ "$tenantId" == "No" ]; then
echo "Error: $serviceTenant tenant does not seem to exist. Aborting!"
exit 1
else
echo " Done."
fi
echo -n "Checking if $adminUser user has admin privileges in $serviceTenant tenant ..."
isAdmin=`openstack --os-username $adminUser --os-project-name $serviceTenant user role list 2>&1 | awk 'BEGIN { res="No" } { if ($4 == "admin") res="Yes"; } END { print res; }'`
if [ "$isAdmin" == "No" ]; then
echo " No, it does not. Giving it admin rights."
admUserId=`openstack user show $adminUser | awk '{ if ($2 == "id") print $4 }'`
admRoleId=`openstack role show $adminRole | awk '{ if ($2 == "id") print $4 }'`
openstack role add $admRoleId --user $admUserId --project $tenantId
else
echo " Yes, it has."
fi
| Gitweijie/first_project | devstack/csr1kv/setup_keystone_for_csr1kv_l3.sh | Shell | apache-2.0 | 2,739 |
/* This file is part of SableCC ( http://sablecc.org ).
*
* See the NOTICE file distributed with this work for copyright information.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sablecc.sablecc.semantics;
public class Context {
private Grammar grammar;
}
| Herve-M/sablecc | src/org/sablecc/sablecc/semantics/Context.java | Java | apache-2.0 | 794 |
package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// InterfacesClient is the network Client
type InterfacesClient struct {
ManagementClient
}
// NewInterfacesClient creates an instance of the InterfacesClient client.
func NewInterfacesClient(subscriptionID string) InterfacesClient {
return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient client.
func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) InterfacesClient {
return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CreateOrUpdate creates or updates a network interface. This method may poll for completion. Polling can be canceled
// by passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP
// requests.
//
// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface.
// parameters is parameters supplied to the create or update network interface operation.
func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (<-chan Interface, <-chan error) {
resultChan := make(chan Interface, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result Interface
defer func() {
if err != nil {
errChan <- err
}
resultChan <- result
close(resultChan)
close(errChan)
}()
req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", nil, "Failure preparing request")
return
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure sending request")
return
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "CreateOrUpdate", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, networkInterfaceName string, parameters Interface, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client),
azure.DoPollForAsynchronous(client.PollingDelay))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result Interface, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Delete deletes the specified network interface. This method may poll for completion. Polling can be canceled by
// passing the cancel channel argument. The channel will be used to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface.
func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan autorest.Response, <-chan error) {
resultChan := make(chan autorest.Response, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result autorest.Response
defer func() {
if err != nil {
errChan <- err
}
resultChan <- result
close(resultChan)
close(errChan)
}()
req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", nil, "Failure preparing request")
return
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure sending request")
return
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Delete", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// DeletePreparer prepares the Delete request.
func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client),
azure.DoPollForAsynchronous(client.PollingDelay))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client InterfacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets information about the specified network interface.
//
// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface.
// expand is expands referenced resources.
func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string, expand string) (result Interface, err error) {
req, err := client.GetPreparer(resourceGroupName, networkInterfaceName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInterfaceName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client InterfacesClient) GetResponder(resp *http.Response) (result Interface, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetEffectiveRouteTable gets all route tables applied to a network interface. This method may poll for completion.
// Polling can be canceled by passing the cancel channel argument. The channel will be used to cancel polling and any
// outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface.
func (client InterfacesClient) GetEffectiveRouteTable(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan EffectiveRouteListResult, <-chan error) {
resultChan := make(chan EffectiveRouteListResult, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result EffectiveRouteListResult
defer func() {
if err != nil {
errChan <- err
}
resultChan <- result
close(resultChan)
close(errChan)
}()
req, err := client.GetEffectiveRouteTablePreparer(resourceGroupName, networkInterfaceName, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", nil, "Failure preparing request")
return
}
resp, err := client.GetEffectiveRouteTableSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure sending request")
return
}
result, err = client.GetEffectiveRouteTableResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// GetEffectiveRouteTablePreparer prepares the GetEffectiveRouteTable request.
func (client InterfacesClient) GetEffectiveRouteTablePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// GetEffectiveRouteTableSender sends the GetEffectiveRouteTable request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client),
azure.DoPollForAsynchronous(client.PollingDelay))
}
// GetEffectiveRouteTableResponder handles the response to the GetEffectiveRouteTable request. The method always
// closes the http.Response Body.
func (client InterfacesClient) GetEffectiveRouteTableResponder(resp *http.Response) (result EffectiveRouteListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// GetVirtualMachineScaleSetNetworkInterface get the specified network interface in a virtual machine scale set.
//
// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine
// scale set. virtualmachineIndex is the virtual machine index. networkInterfaceName is the name of the network
// interface. expand is expands referenced resources.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterface(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (result Interface, err error) {
req, err := client.GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", nil, "Failure preparing request")
return
}
resp, err := client.GetVirtualMachineScaleSetNetworkInterfaceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure sending request")
return
}
result, err = client.GetVirtualMachineScaleSetNetworkInterfaceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetVirtualMachineScaleSetNetworkInterface", resp, "Failure responding to request")
}
return
}
// GetVirtualMachineScaleSetNetworkInterfacePreparer prepares the GetVirtualMachineScaleSetNetworkInterface request.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualmachineIndex": autorest.Encode("path", virtualmachineIndex),
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client))
}
// GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always
// closes the http.Response Body.
func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceResponder(resp *http.Response) (result Interface, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List gets all network interfaces in a resource group.
//
// resourceGroupName is the name of the resource group.
func (client InterfacesClient) List(resourceGroupName string) (result InterfaceListResult, err error) {
req, err := client.ListPreparer(resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending request")
return
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListNextResults retrieves the next set of results, if any.
func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.InterfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "List", resp, "Failure responding to next results request")
}
return
}
// ListComplete gets all elements from the list without paging.
func (client InterfacesClient) ListComplete(resourceGroupName string, cancel <-chan struct{}) (<-chan Interface, <-chan error) {
resultChan := make(chan Interface)
errChan := make(chan error, 1)
go func() {
defer func() {
close(resultChan)
close(errChan)
}()
list, err := client.List(resourceGroupName)
if err != nil {
errChan <- err
return
}
if list.Value != nil {
for _, item := range *list.Value {
select {
case <-cancel:
return
case resultChan <- item:
// Intentionally left blank
}
}
}
for list.NextLink != nil {
list, err = client.ListNextResults(list)
if err != nil {
errChan <- err
return
}
if list.Value != nil {
for _, item := range *list.Value {
select {
case <-cancel:
return
case resultChan <- item:
// Intentionally left blank
}
}
}
}
}()
return resultChan, errChan
}
// ListAll gets all network interfaces in a subscription.
func (client InterfacesClient) ListAll() (result InterfaceListResult, err error) {
req, err := client.ListAllPreparer()
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing request")
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending request")
return
}
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to request")
}
return
}
// ListAllPreparer prepares the ListAll request.
func (client InterfacesClient) ListAllPreparer() (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListAllSender sends the ListAll request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client))
}
// ListAllResponder handles the response to the ListAll request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListAllResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListAllNextResults retrieves the next set of results, if any.
func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.InterfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure sending next results request")
}
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListAll", resp, "Failure responding to next results request")
}
return
}
// ListAllComplete gets all elements from the list without paging.
func (client InterfacesClient) ListAllComplete(cancel <-chan struct{}) (<-chan Interface, <-chan error) {
resultChan := make(chan Interface)
errChan := make(chan error, 1)
go func() {
defer func() {
close(resultChan)
close(errChan)
}()
list, err := client.ListAll()
if err != nil {
errChan <- err
return
}
if list.Value != nil {
for _, item := range *list.Value {
select {
case <-cancel:
return
case resultChan <- item:
// Intentionally left blank
}
}
}
for list.NextLink != nil {
list, err = client.ListAllNextResults(list)
if err != nil {
errChan <- err
return
}
if list.Value != nil {
for _, item := range *list.Value {
select {
case <-cancel:
return
case resultChan <- item:
// Intentionally left blank
}
}
}
}
}()
return resultChan, errChan
}
// ListEffectiveNetworkSecurityGroups gets all network security groups applied to a network interface. This method may
// poll for completion. Polling can be canceled by passing the cancel channel argument. The channel will be used to
// cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. networkInterfaceName is the name of the network interface.
func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (<-chan EffectiveNetworkSecurityGroupListResult, <-chan error) {
resultChan := make(chan EffectiveNetworkSecurityGroupListResult, 1)
errChan := make(chan error, 1)
go func() {
var err error
var result EffectiveNetworkSecurityGroupListResult
defer func() {
if err != nil {
errChan <- err
}
resultChan <- result
close(resultChan)
close(errChan)
}()
req, err := client.ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName, networkInterfaceName, cancel)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", nil, "Failure preparing request")
return
}
resp, err := client.ListEffectiveNetworkSecurityGroupsSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure sending request")
return
}
result, err = client.ListEffectiveNetworkSecurityGroupsResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure responding to request")
}
}()
return resultChan, errChan
}
// ListEffectiveNetworkSecurityGroupsPreparer prepares the ListEffectiveNetworkSecurityGroups request.
func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2017-03-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// ListEffectiveNetworkSecurityGroupsSender sends the ListEffectiveNetworkSecurityGroups request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client),
azure.DoPollForAsynchronous(client.PollingDelay))
}
// ListEffectiveNetworkSecurityGroupsResponder handles the response to the ListEffectiveNetworkSecurityGroups request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsResponder(resp *http.Response) (result EffectiveNetworkSecurityGroupListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListVirtualMachineScaleSetNetworkInterfaces gets all network interfaces in a virtual machine scale set.
//
// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine
// scale set.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string) (result InterfaceListResult, err error) {
req, err := client.ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing request")
return
}
resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending request")
return
}
result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to request")
}
return
}
// ListVirtualMachineScaleSetNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetNetworkInterfaces request.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client))
}
// ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListVirtualMachineScaleSetNetworkInterfacesNextResults retrieves the next set of results, if any.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.InterfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListVirtualMachineScaleSetNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure sending next results request")
}
result, err = client.ListVirtualMachineScaleSetNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetNetworkInterfaces", resp, "Failure responding to next results request")
}
return
}
// ListVirtualMachineScaleSetNetworkInterfacesComplete gets all elements from the list without paging.
func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesComplete(resourceGroupName string, virtualMachineScaleSetName string, cancel <-chan struct{}) (<-chan Interface, <-chan error) {
resultChan := make(chan Interface)
errChan := make(chan error, 1)
go func() {
defer func() {
close(resultChan)
close(errChan)
}()
list, err := client.ListVirtualMachineScaleSetNetworkInterfaces(resourceGroupName, virtualMachineScaleSetName)
if err != nil {
errChan <- err
return
}
if list.Value != nil {
for _, item := range *list.Value {
select {
case <-cancel:
return
case resultChan <- item:
// Intentionally left blank
}
}
}
for list.NextLink != nil {
list, err = client.ListVirtualMachineScaleSetNetworkInterfacesNextResults(list)
if err != nil {
errChan <- err
return
}
if list.Value != nil {
for _, item := range *list.Value {
select {
case <-cancel:
return
case resultChan <- item:
// Intentionally left blank
}
}
}
}
}()
return resultChan, errChan
}
// ListVirtualMachineScaleSetVMNetworkInterfaces gets information about all network interfaces in a virtual machine in
// a virtual machine scale set.
//
// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the virtual machine
// scale set. virtualmachineIndex is the virtual machine index.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (result InterfaceListResult, err error) {
req, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing request")
return
}
resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending request")
return
}
result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to request")
}
return
}
// ListVirtualMachineScaleSetVMNetworkInterfacesPreparer prepares the ListVirtualMachineScaleSetVMNetworkInterfaces request.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPreparer(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualmachineIndex": autorest.Encode("path", virtualmachineIndex),
"virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName),
}
const APIVersion = "2017-03-30"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the
// http.Response Body if it receives an error.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoRetryWithRegistration(client.Client))
}
// ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always
// closes the http.Response Body.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListVirtualMachineScaleSetVMNetworkInterfacesNextResults retrieves the next set of results, if any.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesNextResults(lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.InterfaceListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListVirtualMachineScaleSetVMNetworkInterfacesSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure sending next results request")
}
result, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListVirtualMachineScaleSetVMNetworkInterfaces", resp, "Failure responding to next results request")
}
return
}
// ListVirtualMachineScaleSetVMNetworkInterfacesComplete gets all elements from the list without paging.
func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesComplete(resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, cancel <-chan struct{}) (<-chan Interface, <-chan error) {
resultChan := make(chan Interface)
errChan := make(chan error, 1)
go func() {
defer func() {
close(resultChan)
close(errChan)
}()
list, err := client.ListVirtualMachineScaleSetVMNetworkInterfaces(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex)
if err != nil {
errChan <- err
return
}
if list.Value != nil {
for _, item := range *list.Value {
select {
case <-cancel:
return
case resultChan <- item:
// Intentionally left blank
}
}
}
for list.NextLink != nil {
list, err = client.ListVirtualMachineScaleSetVMNetworkInterfacesNextResults(list)
if err != nil {
errChan <- err
return
}
if list.Value != nil {
for _, item := range *list.Value {
select {
case <-cancel:
return
case resultChan <- item:
// Intentionally left blank
}
}
}
}
}()
return resultChan, errChan
}
| wojtekzw/imageproxy | vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-03-01/network/interfaces.go | GO | apache-2.0 | 42,507 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.operator;
import com.facebook.presto.sql.planner.plan.PlanNodeId;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.collect.ImmutableList;
import io.airlift.units.DataSize;
import io.airlift.units.Duration;
import javax.annotation.Nullable;
import javax.annotation.concurrent.Immutable;
import java.util.Optional;
import static com.google.common.base.Preconditions.checkArgument;
import static io.airlift.units.DataSize.succinctBytes;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.TimeUnit.NANOSECONDS;
@Immutable
public class OperatorStats
{
private final int operatorId;
private final PlanNodeId planNodeId;
private final String operatorType;
private final long totalDrivers;
private final long addInputCalls;
private final Duration addInputWall;
private final Duration addInputCpu;
private final Duration addInputUser;
private final DataSize inputDataSize;
private final long inputPositions;
private final double sumSquaredInputPositions;
private final long getOutputCalls;
private final Duration getOutputWall;
private final Duration getOutputCpu;
private final Duration getOutputUser;
private final DataSize outputDataSize;
private final long outputPositions;
private final Duration blockedWall;
private final long finishCalls;
private final Duration finishWall;
private final Duration finishCpu;
private final Duration finishUser;
private final DataSize memoryReservation;
private final DataSize systemMemoryReservation;
private final Optional<BlockedReason> blockedReason;
private final OperatorInfo info;
@JsonCreator
public OperatorStats(
@JsonProperty("operatorId") int operatorId,
@JsonProperty("planNodeId") PlanNodeId planNodeId,
@JsonProperty("operatorType") String operatorType,
@JsonProperty("totalDrivers") long totalDrivers,
@JsonProperty("addInputCalls") long addInputCalls,
@JsonProperty("addInputWall") Duration addInputWall,
@JsonProperty("addInputCpu") Duration addInputCpu,
@JsonProperty("addInputUser") Duration addInputUser,
@JsonProperty("inputDataSize") DataSize inputDataSize,
@JsonProperty("inputPositions") long inputPositions,
@JsonProperty("sumSquaredInputPositions") double sumSquaredInputPositions,
@JsonProperty("getOutputCalls") long getOutputCalls,
@JsonProperty("getOutputWall") Duration getOutputWall,
@JsonProperty("getOutputCpu") Duration getOutputCpu,
@JsonProperty("getOutputUser") Duration getOutputUser,
@JsonProperty("outputDataSize") DataSize outputDataSize,
@JsonProperty("outputPositions") long outputPositions,
@JsonProperty("blockedWall") Duration blockedWall,
@JsonProperty("finishCalls") long finishCalls,
@JsonProperty("finishWall") Duration finishWall,
@JsonProperty("finishCpu") Duration finishCpu,
@JsonProperty("finishUser") Duration finishUser,
@JsonProperty("memoryReservation") DataSize memoryReservation,
@JsonProperty("systemMemoryReservation") DataSize systemMemoryReservation,
@JsonProperty("blockedReason") Optional<BlockedReason> blockedReason,
@JsonProperty("info") OperatorInfo info)
{
checkArgument(operatorId >= 0, "operatorId is negative");
this.operatorId = operatorId;
this.planNodeId = requireNonNull(planNodeId, "planNodeId is null");
this.operatorType = requireNonNull(operatorType, "operatorType is null");
this.totalDrivers = totalDrivers;
this.addInputCalls = addInputCalls;
this.addInputWall = requireNonNull(addInputWall, "addInputWall is null");
this.addInputCpu = requireNonNull(addInputCpu, "addInputCpu is null");
this.addInputUser = requireNonNull(addInputUser, "addInputUser is null");
this.inputDataSize = requireNonNull(inputDataSize, "inputDataSize is null");
checkArgument(inputPositions >= 0, "inputPositions is negative");
this.inputPositions = inputPositions;
this.sumSquaredInputPositions = sumSquaredInputPositions;
this.getOutputCalls = getOutputCalls;
this.getOutputWall = requireNonNull(getOutputWall, "getOutputWall is null");
this.getOutputCpu = requireNonNull(getOutputCpu, "getOutputCpu is null");
this.getOutputUser = requireNonNull(getOutputUser, "getOutputUser is null");
this.outputDataSize = requireNonNull(outputDataSize, "outputDataSize is null");
checkArgument(outputPositions >= 0, "outputPositions is negative");
this.outputPositions = outputPositions;
this.blockedWall = requireNonNull(blockedWall, "blockedWall is null");
this.finishCalls = finishCalls;
this.finishWall = requireNonNull(finishWall, "finishWall is null");
this.finishCpu = requireNonNull(finishCpu, "finishCpu is null");
this.finishUser = requireNonNull(finishUser, "finishUser is null");
this.memoryReservation = requireNonNull(memoryReservation, "memoryReservation is null");
this.systemMemoryReservation = requireNonNull(systemMemoryReservation, "systemMemoryReservation is null");
this.blockedReason = blockedReason;
this.info = info;
}
@JsonProperty
public int getOperatorId()
{
return operatorId;
}
@JsonProperty
public PlanNodeId getPlanNodeId()
{
return planNodeId;
}
@JsonProperty
public String getOperatorType()
{
return operatorType;
}
@JsonProperty
public long getTotalDrivers()
{
return totalDrivers;
}
@JsonProperty
public long getAddInputCalls()
{
return addInputCalls;
}
@JsonProperty
public Duration getAddInputWall()
{
return addInputWall;
}
@JsonProperty
public Duration getAddInputCpu()
{
return addInputCpu;
}
@JsonProperty
public Duration getAddInputUser()
{
return addInputUser;
}
@JsonProperty
public DataSize getInputDataSize()
{
return inputDataSize;
}
@JsonProperty
public long getInputPositions()
{
return inputPositions;
}
@JsonProperty
public double getSumSquaredInputPositions()
{
return sumSquaredInputPositions;
}
@JsonProperty
public long getGetOutputCalls()
{
return getOutputCalls;
}
@JsonProperty
public Duration getGetOutputWall()
{
return getOutputWall;
}
@JsonProperty
public Duration getGetOutputCpu()
{
return getOutputCpu;
}
@JsonProperty
public Duration getGetOutputUser()
{
return getOutputUser;
}
@JsonProperty
public DataSize getOutputDataSize()
{
return outputDataSize;
}
@JsonProperty
public long getOutputPositions()
{
return outputPositions;
}
@JsonProperty
public Duration getBlockedWall()
{
return blockedWall;
}
@JsonProperty
public long getFinishCalls()
{
return finishCalls;
}
@JsonProperty
public Duration getFinishWall()
{
return finishWall;
}
@JsonProperty
public Duration getFinishCpu()
{
return finishCpu;
}
@JsonProperty
public Duration getFinishUser()
{
return finishUser;
}
@JsonProperty
public DataSize getMemoryReservation()
{
return memoryReservation;
}
@JsonProperty
public DataSize getSystemMemoryReservation()
{
return systemMemoryReservation;
}
@JsonProperty
public Optional<BlockedReason> getBlockedReason()
{
return blockedReason;
}
@Nullable
@JsonProperty
public OperatorInfo getInfo()
{
return info;
}
public OperatorStats add(OperatorStats... operators)
{
return add(ImmutableList.copyOf(operators));
}
public OperatorStats add(Iterable<OperatorStats> operators)
{
long totalDrivers = this.totalDrivers;
long addInputCalls = this.addInputCalls;
long addInputWall = this.addInputWall.roundTo(NANOSECONDS);
long addInputCpu = this.addInputCpu.roundTo(NANOSECONDS);
long addInputUser = this.addInputUser.roundTo(NANOSECONDS);
long inputDataSize = this.inputDataSize.toBytes();
long inputPositions = this.inputPositions;
double sumSquaredInputPositions = this.sumSquaredInputPositions;
long getOutputCalls = this.getOutputCalls;
long getOutputWall = this.getOutputWall.roundTo(NANOSECONDS);
long getOutputCpu = this.getOutputCpu.roundTo(NANOSECONDS);
long getOutputUser = this.getOutputUser.roundTo(NANOSECONDS);
long outputDataSize = this.outputDataSize.toBytes();
long outputPositions = this.outputPositions;
long blockedWall = this.blockedWall.roundTo(NANOSECONDS);
long finishCalls = this.finishCalls;
long finishWall = this.finishWall.roundTo(NANOSECONDS);
long finishCpu = this.finishCpu.roundTo(NANOSECONDS);
long finishUser = this.finishUser.roundTo(NANOSECONDS);
long memoryReservation = this.memoryReservation.toBytes();
long systemMemoryReservation = this.systemMemoryReservation.toBytes();
Optional<BlockedReason> blockedReason = this.blockedReason;
Mergeable<OperatorInfo> base = getMergeableInfoOrNull(info);
for (OperatorStats operator : operators) {
checkArgument(operator.getOperatorId() == operatorId, "Expected operatorId to be %s but was %s", operatorId, operator.getOperatorId());
totalDrivers += operator.totalDrivers;
addInputCalls += operator.getAddInputCalls();
addInputWall += operator.getAddInputWall().roundTo(NANOSECONDS);
addInputCpu += operator.getAddInputCpu().roundTo(NANOSECONDS);
addInputUser += operator.getAddInputUser().roundTo(NANOSECONDS);
inputDataSize += operator.getInputDataSize().toBytes();
inputPositions += operator.getInputPositions();
sumSquaredInputPositions += operator.getSumSquaredInputPositions();
getOutputCalls += operator.getGetOutputCalls();
getOutputWall += operator.getGetOutputWall().roundTo(NANOSECONDS);
getOutputCpu += operator.getGetOutputCpu().roundTo(NANOSECONDS);
getOutputUser += operator.getGetOutputUser().roundTo(NANOSECONDS);
outputDataSize += operator.getOutputDataSize().toBytes();
outputPositions += operator.getOutputPositions();
finishCalls += operator.getFinishCalls();
finishWall += operator.getFinishWall().roundTo(NANOSECONDS);
finishCpu += operator.getFinishCpu().roundTo(NANOSECONDS);
finishUser += operator.getFinishUser().roundTo(NANOSECONDS);
blockedWall += operator.getBlockedWall().roundTo(NANOSECONDS);
memoryReservation += operator.getMemoryReservation().toBytes();
systemMemoryReservation += operator.getSystemMemoryReservation().toBytes();
if (operator.getBlockedReason().isPresent()) {
blockedReason = operator.getBlockedReason();
}
OperatorInfo info = operator.getInfo();
if (base != null && info != null && base.getClass() == info.getClass()) {
base = mergeInfo(base, info);
}
}
return new OperatorStats(
operatorId,
planNodeId,
operatorType,
totalDrivers,
addInputCalls,
new Duration(addInputWall, NANOSECONDS).convertToMostSuccinctTimeUnit(),
new Duration(addInputCpu, NANOSECONDS).convertToMostSuccinctTimeUnit(),
new Duration(addInputUser, NANOSECONDS).convertToMostSuccinctTimeUnit(),
succinctBytes(inputDataSize),
inputPositions,
sumSquaredInputPositions,
getOutputCalls,
new Duration(getOutputWall, NANOSECONDS).convertToMostSuccinctTimeUnit(),
new Duration(getOutputCpu, NANOSECONDS).convertToMostSuccinctTimeUnit(),
new Duration(getOutputUser, NANOSECONDS).convertToMostSuccinctTimeUnit(),
succinctBytes(outputDataSize),
outputPositions,
new Duration(blockedWall, NANOSECONDS).convertToMostSuccinctTimeUnit(),
finishCalls,
new Duration(finishWall, NANOSECONDS).convertToMostSuccinctTimeUnit(),
new Duration(finishCpu, NANOSECONDS).convertToMostSuccinctTimeUnit(),
new Duration(finishUser, NANOSECONDS).convertToMostSuccinctTimeUnit(),
succinctBytes(memoryReservation),
succinctBytes(systemMemoryReservation),
blockedReason,
(OperatorInfo) base);
}
@SuppressWarnings("unchecked")
private static Mergeable<OperatorInfo> getMergeableInfoOrNull(OperatorInfo info)
{
Mergeable<OperatorInfo> base = null;
if (info instanceof Mergeable) {
base = (Mergeable<OperatorInfo>) info;
}
return base;
}
@SuppressWarnings("unchecked")
private static <T> Mergeable<T> mergeInfo(Mergeable<T> base, T other)
{
return (Mergeable<T>) base.mergeWith(other);
}
public OperatorStats summarize()
{
return new OperatorStats(
operatorId,
planNodeId,
operatorType,
totalDrivers,
addInputCalls,
addInputWall,
addInputCpu,
addInputUser,
inputDataSize,
inputPositions,
sumSquaredInputPositions,
getOutputCalls,
getOutputWall,
getOutputCpu,
getOutputUser,
outputDataSize,
outputPositions,
blockedWall,
finishCalls,
finishWall,
finishCpu,
finishUser,
memoryReservation,
systemMemoryReservation,
blockedReason,
(info != null && info.isFinal()) ? info : null);
}
}
| marsorp/blog | presto166/presto-main/src/main/java/com/facebook/presto/operator/OperatorStats.java | Java | apache-2.0 | 15,302 |
// See www.openfst.org for extensive documentation on this weighted
// finite-state transducer library.
//
// Class to compute the difference between two FSAs.
#ifndef FST_DIFFERENCE_H_
#define FST_DIFFERENCE_H_
#include <memory>
#include <fst/cache.h>
#include <fst/complement.h>
#include <fst/compose.h>
namespace fst {
template <class Arc, class M = Matcher<Fst<Arc>>,
class Filter = SequenceComposeFilter<M>,
class StateTable =
GenericComposeStateTable<Arc, typename Filter::FilterState>>
struct DifferenceFstOptions
: public ComposeFstOptions<Arc, M, Filter, StateTable> {
explicit DifferenceFstOptions(const CacheOptions &opts = CacheOptions(),
M *matcher1 = nullptr, M *matcher2 = nullptr,
Filter *filter = nullptr,
StateTable *state_table = nullptr)
: ComposeFstOptions<Arc, M, Filter, StateTable>(opts, matcher1, matcher2,
filter, state_table) {}
};
// Computes the difference between two FSAs. This version is a delayed FST.
// Only strings that are in the first automaton but not in second are retained
// in the result.
//
// The first argument must be an acceptor; the second argument must be an
// unweighted, epsilon-free, deterministic acceptor. One of the arguments must
// be label-sorted.
//
// Complexity: same as ComposeFst.
//
// Caveats: same as ComposeFst.
template <class A>
class DifferenceFst : public ComposeFst<A> {
public:
using Arc = A;
using Weight = typename Arc::Weight;
using StateId = typename Arc::StateId;
using ComposeFst<Arc>::CreateBase1;
// A - B = A ^ B'.
DifferenceFst(const Fst<Arc> &fst1, const Fst<Arc> &fst2,
const CacheOptions &opts = CacheOptions())
: ComposeFst<Arc>(CreateDifferenceImplWithCacheOpts(fst1, fst2, opts)) {
if (!fst1.Properties(kAcceptor, true)) {
FSTERROR() << "DifferenceFst: 1st argument not an acceptor";
GetImpl()->SetProperties(kError, kError);
}
}
template <class Matcher, class Filter, class StateTable>
DifferenceFst(
const Fst<Arc> &fst1, const Fst<Arc> &fst2,
const DifferenceFstOptions<Arc, Matcher, Filter, StateTable> &opts)
: ComposeFst<Arc>(
CreateDifferenceImplWithDifferenceOpts(fst1, fst2, opts)) {
if (!fst1.Properties(kAcceptor, true)) {
FSTERROR() << "DifferenceFst: 1st argument not an acceptor";
GetImpl()->SetProperties(kError, kError);
}
}
// See Fst<>::Copy() for doc.
DifferenceFst(const DifferenceFst<Arc> &fst, bool safe = false)
: ComposeFst<Arc>(fst, safe) {}
// Get a copy of this DifferenceFst. See Fst<>::Copy() for further doc.
DifferenceFst<Arc> *Copy(bool safe = false) const override {
return new DifferenceFst<Arc>(*this, safe);
}
private:
using Impl = internal::ComposeFstImplBase<Arc>;
using ImplToFst<Impl>::GetImpl;
static std::shared_ptr<Impl> CreateDifferenceImplWithCacheOpts(
const Fst<Arc> &fst1, const Fst<Arc> &fst2, const CacheOptions &opts) {
using RM = RhoMatcher<Matcher<Fst<A>>>;
ComplementFst<Arc> cfst(fst2);
ComposeFstOptions<A, RM> copts(
CacheOptions(), new RM(fst1, MATCH_NONE),
new RM(cfst, MATCH_INPUT, ComplementFst<Arc>::kRhoLabel));
return CreateBase1(fst1, cfst, copts);
}
template <class Matcher, class Filter, class StateTable>
static std::shared_ptr<Impl> CreateDifferenceImplWithDifferenceOpts(
const Fst<Arc> &fst1, const Fst<Arc> &fst2,
const DifferenceFstOptions<Arc, Matcher, Filter, StateTable> &opts) {
using RM = RhoMatcher<Matcher>;
ComplementFst<Arc> cfst(fst2);
ComposeFstOptions<Arc, RM> copts(opts);
copts.matcher1 = new RM(fst1, MATCH_NONE, kNoLabel, MATCHER_REWRITE_ALWAYS,
opts.matcher1);
copts.matcher2 = new RM(cfst, MATCH_INPUT, ComplementFst<Arc>::kRhoLabel,
MATCHER_REWRITE_ALWAYS, opts.matcher2);
return CreateBase1(fst1, cfst, copts);
}
};
// Specialization for DifferenceFst.
template <class Arc>
class StateIterator<DifferenceFst<Arc>>
: public StateIterator<ComposeFst<Arc>> {
public:
explicit StateIterator(const DifferenceFst<Arc> &fst)
: StateIterator<ComposeFst<Arc>>(fst) {}
};
// Specialization for DifferenceFst.
template <class Arc>
class ArcIterator<DifferenceFst<Arc>> : public ArcIterator<ComposeFst<Arc>> {
public:
using StateId = typename Arc::StateId;
ArcIterator(const DifferenceFst<Arc> &fst, StateId s)
: ArcIterator<ComposeFst<Arc>>(fst, s) {}
};
using DifferenceOptions = ComposeOptions;
// Useful alias when using StdArc.
using StdDifferenceFst = DifferenceFst<StdArc>;
using DifferenceOptions = ComposeOptions;
// Computes the difference between two FSAs. This version writes the difference
// to an output MutableFst. Only strings that are in the first automaton but not
// in the second are retained in the result.
//
// The first argument must be an acceptor; the second argument must be an
// unweighted, epsilon-free, deterministic acceptor. One of the arguments must
// be label-sorted.
//
// Complexity: same as Compose.
//
// Caveats: same as Compose.
template <class Arc>
void Difference(const Fst<Arc> &ifst1, const Fst<Arc> &ifst2,
MutableFst<Arc> *ofst,
const DifferenceOptions &opts = DifferenceOptions()) {
using M = Matcher<Fst<Arc>>;
// In each case, we cache only the last state for fastest copy.
switch (opts.filter_type) {
case AUTO_FILTER: {
CacheOptions nopts;
nopts.gc_limit = 0;
*ofst = DifferenceFst<Arc>(ifst1, ifst2, nopts);
break;
}
case SEQUENCE_FILTER: {
DifferenceFstOptions<Arc> dopts;
dopts.gc_limit = 0;
*ofst = DifferenceFst<Arc>(ifst1, ifst2, dopts);
break;
}
case ALT_SEQUENCE_FILTER: {
DifferenceFstOptions<Arc, M, AltSequenceComposeFilter<M>> dopts;
dopts.gc_limit = 0;
*ofst = DifferenceFst<Arc>(ifst1, ifst2, dopts);
break;
}
case MATCH_FILTER: {
DifferenceFstOptions<Arc, M, MatchComposeFilter<M>> dopts;
dopts.gc_limit = 0;
*ofst = DifferenceFst<Arc>(ifst1, ifst2, dopts);
break;
}
case NO_MATCH_FILTER: {
DifferenceFstOptions<Arc, M, NoMatchComposeFilter<M>> dopts;
dopts.gc_limit = 0;
*ofst = DifferenceFst<Arc>(ifst1, ifst2, dopts);
break;
}
case NULL_FILTER: {
DifferenceFstOptions<Arc, M, NullComposeFilter<M>> dopts;
dopts.gc_limit = 0;
*ofst = DifferenceFst<Arc>(ifst1, ifst2, dopts);
break;
}
case TRIVIAL_FILTER: {
DifferenceFstOptions<Arc, M, TrivialComposeFilter<M>> dopts;
dopts.gc_limit = 0;
*ofst = DifferenceFst<Arc>(ifst1, ifst2, dopts);
break;
}
}
if (opts.connect) Connect(ofst);
}
} // namespace fst
#endif // FST_DIFFERENCE_H_
| wiltonlazary/arangodb | 3rdParty/iresearch/external/openfst/fst/difference.h | C | apache-2.0 | 6,964 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.GenericAction;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction;
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction;
import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
import org.elasticsearch.action.admin.indices.flush.FlushAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.delete.DeleteAction;
import org.elasticsearch.action.get.GetAction;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.HashMap;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
public abstract class AbstractClientHeadersTestCase extends ESTestCase {
protected static final Settings HEADER_SETTINGS = Settings.builder()
.put(ThreadContext.PREFIX + ".key1", "val1")
.put(ThreadContext.PREFIX + ".key2", "val 2")
.build();
private static final GenericAction[] ACTIONS = new GenericAction[] {
// client actions
GetAction.INSTANCE, SearchAction.INSTANCE, DeleteAction.INSTANCE, DeleteStoredScriptAction.INSTANCE,
IndexAction.INSTANCE,
// cluster admin actions
ClusterStatsAction.INSTANCE, CreateSnapshotAction.INSTANCE, ClusterRerouteAction.INSTANCE,
// indices admin actions
CreateIndexAction.INSTANCE, IndicesStatsAction.INSTANCE, ClearIndicesCacheAction.INSTANCE, FlushAction.INSTANCE
};
protected ThreadPool threadPool;
private Client client;
@Override
public void setUp() throws Exception {
super.setUp();
Settings settings = Settings.builder()
.put(HEADER_SETTINGS)
.put("path.home", createTempDir().toString())
.put("node.name", "test-" + getTestName())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
threadPool = new ThreadPool(settings);
client = buildClient(settings, ACTIONS);
}
@Override
public void tearDown() throws Exception {
super.tearDown();
client.close();
terminate(threadPool);
}
protected abstract Client buildClient(Settings headersSettings, GenericAction[] testedActions);
public void testActions() {
// TODO this is a really shitty way to test it, we need to figure out a way to test all the client methods
// without specifying each one (reflection doesn't as each action needs its own special settings, without
// them, request validation will fail before the test is executed. (one option is to enable disabling the
// validation in the settings??? - ugly and conceptually wrong)
// choosing arbitrary top level actions to test
client.prepareGet("idx", "type", "id").execute(new AssertingActionListener<>(GetAction.NAME, client.threadPool()));
client.prepareSearch().execute(new AssertingActionListener<>(SearchAction.NAME, client.threadPool()));
client.prepareDelete("idx", "type", "id").execute(new AssertingActionListener<>(DeleteAction.NAME, client.threadPool()));
client.admin().cluster().prepareDeleteStoredScript("lang", "id").execute(new AssertingActionListener<>(DeleteStoredScriptAction.NAME, client.threadPool()));
client.prepareIndex("idx", "type", "id").setSource("source", XContentType.JSON).execute(new AssertingActionListener<>(IndexAction.NAME, client.threadPool()));
// choosing arbitrary cluster admin actions to test
client.admin().cluster().prepareClusterStats().execute(new AssertingActionListener<>(ClusterStatsAction.NAME, client.threadPool()));
client.admin().cluster().prepareCreateSnapshot("repo", "bck").execute(new AssertingActionListener<>(CreateSnapshotAction.NAME, client.threadPool()));
client.admin().cluster().prepareReroute().execute(new AssertingActionListener<>(ClusterRerouteAction.NAME, client.threadPool()));
// choosing arbitrary indices admin actions to test
client.admin().indices().prepareCreate("idx").execute(new AssertingActionListener<>(CreateIndexAction.NAME, client.threadPool()));
client.admin().indices().prepareStats().execute(new AssertingActionListener<>(IndicesStatsAction.NAME, client.threadPool()));
client.admin().indices().prepareClearCache("idx1", "idx2").execute(new AssertingActionListener<>(ClearIndicesCacheAction.NAME, client.threadPool()));
client.admin().indices().prepareFlush().execute(new AssertingActionListener<>(FlushAction.NAME, client.threadPool()));
}
public void testOverrideHeader() throws Exception {
String key1Val = randomAlphaOfLength(5);
Map<String, String> expected = new HashMap<>();
expected.put("key1", key1Val);
expected.put("key2", "val 2");
client.threadPool().getThreadContext().putHeader("key1", key1Val);
client.prepareGet("idx", "type", "id")
.execute(new AssertingActionListener<>(GetAction.NAME, expected, client.threadPool()));
client.admin().cluster().prepareClusterStats()
.execute(new AssertingActionListener<>(ClusterStatsAction.NAME, expected, client.threadPool()));
client.admin().indices().prepareCreate("idx")
.execute(new AssertingActionListener<>(CreateIndexAction.NAME, expected, client.threadPool()));
}
protected static void assertHeaders(Map<String, String> headers, Map<String, String> expected) {
assertNotNull(headers);
assertEquals(expected.size(), headers.size());
for (Map.Entry<String, String> expectedEntry : expected.entrySet()) {
assertEquals(headers.get(expectedEntry.getKey()), expectedEntry.getValue());
}
}
protected static void assertHeaders(ThreadPool pool) {
assertHeaders(pool.getThreadContext().getHeaders(), (Map)HEADER_SETTINGS.getAsSettings(ThreadContext.PREFIX).getAsStructuredMap());
}
public static class InternalException extends Exception {
private final String action;
public InternalException(String action) {
this.action = action;
}
}
protected static class AssertingActionListener<T> implements ActionListener<T> {
private final String action;
private final Map<String, String> expectedHeaders;
private final ThreadPool pool;
public AssertingActionListener(String action, ThreadPool pool) {
this(action, (Map)HEADER_SETTINGS.getAsSettings(ThreadContext.PREFIX).getAsStructuredMap(), pool);
}
public AssertingActionListener(String action, Map<String, String> expectedHeaders, ThreadPool pool) {
this.action = action;
this.expectedHeaders = expectedHeaders;
this.pool = pool;
}
@Override
public void onResponse(T t) {
fail("an internal exception was expected for action [" + action + "]");
}
@Override
public void onFailure(Exception t) {
Throwable e = unwrap(t, InternalException.class);
assertThat("expected action [" + action + "] to throw an internal exception", e, notNullValue());
assertThat(action, equalTo(((InternalException) e).action));
Map<String, String> headers = pool.getThreadContext().getHeaders();
assertHeaders(headers, expectedHeaders);
}
public Throwable unwrap(Throwable t, Class<? extends Throwable> exceptionType) {
int counter = 0;
Throwable result = t;
while (!exceptionType.isInstance(result)) {
if (result.getCause() == null) {
return null;
}
if (result.getCause() == result) {
return null;
}
if (counter++ > 10) {
// dear god, if we got more than 10 levels down, WTF? just bail
fail("Exception cause unwrapping ran for 10 levels: " + ExceptionsHelper.stackTrace(t));
return null;
}
result = result.getCause();
}
return result;
}
}
}
| nezirus/elasticsearch | core/src/test/java/org/elasticsearch/client/AbstractClientHeadersTestCase.java | Java | apache-2.0 | 9,899 |
// Copyright 2014 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Minor general functional components for end-to-end testing
* with protractor.
*/
var editor = require('./editor.js');
// Time (in ms) to wait when the system needs time for some computations.
var WAIT_TIME = 4000;
// Optionally accepts a waitTime integer in milliseconds.
var waitForSystem = function() {
var waitTime;
if (arguments.length === 1) {
waitTime = arguments[0];
} else {
waitTime = WAIT_TIME;
}
browser.sleep(waitTime);
};
var scrollToTop = function() {
browser.executeScript('window.scrollTo(0,0);');
};
// We will report all console logs of level greater than this.
var CONSOLE_LOG_THRESHOLD = 900;
var CONSOLE_ERRORS_TO_IGNORE = [];
var checkForConsoleErrors = function(errorsToIgnore) {
var irrelevantErrors = errorsToIgnore.concat(CONSOLE_ERRORS_TO_IGNORE);
browser.manage().logs().get('browser').then(function(browserLogs) {
var fatalErrors = [];
for (var i = 0; i < browserLogs.length; i++) {
if (browserLogs[i].level.value > CONSOLE_LOG_THRESHOLD) {
var errorFatal = true;
for (var j = 0; j < irrelevantErrors.length; j++) {
if (browserLogs[i].message.match(irrelevantErrors[j])) {
errorFatal = false;
}
}
if (errorFatal) {
fatalErrors.push(browserLogs[i]);
}
}
}
expect(fatalErrors).toEqual([]);
});
};
var SERVER_URL_PREFIX = 'http://localhost:9001';
var LIBRARY_URL_SUFFIX = '/library';
var EDITOR_URL_SLICE = '/create/';
var PLAYER_URL_SLICE = '/explore/';
var LOGIN_URL_SUFFIX = '/_ah/login';
var ADMIN_URL_SUFFIX = '/admin';
var MODERATOR_URL_SUFFIX = '/moderator';
var DONATION_THANK_URL_SUFFIX = '/thanks';
// Note that this only works in dev, due to the use of cache slugs in prod.
var SCRIPTS_URL_SLICE = '/assets/scripts/';
var EXPLORATION_ID_LENGTH = 12;
var FIRST_STATE_DEFAULT_NAME = 'Introduction';
var _getExplorationId = function(currentUrlPrefix) {
return {
then: function(callbackFunction) {
browser.getCurrentUrl().then(function(url) {
expect(url.slice(0, currentUrlPrefix.length)).toBe(currentUrlPrefix);
var explorationId = url.slice(
currentUrlPrefix.length,
currentUrlPrefix.length + EXPLORATION_ID_LENGTH);
return callbackFunction(explorationId);
});
}
};
};
// If we are currently in the editor, this will return a promise with the
// exploration ID.
var getExplorationIdFromEditor = function() {
return _getExplorationId(SERVER_URL_PREFIX + EDITOR_URL_SLICE);
};
// Likewise for the player
var getExplorationIdFromPlayer = function() {
return _getExplorationId(SERVER_URL_PREFIX + PLAYER_URL_SLICE);
};
// The explorationId here should be a string, not a promise.
var openEditor = function(explorationId) {
browser.get(EDITOR_URL_SLICE + explorationId);
browser.waitForAngular();
editor.exitTutorialIfNecessary();
};
var openPlayer = function(explorationId) {
browser.get(PLAYER_URL_SLICE + explorationId);
browser.waitForAngular();
};
// Takes the user from an exploration editor to its player.
// NOTE: we do not use the preview button because that will open a new window.
var moveToPlayer = function() {
getExplorationIdFromEditor().then(openPlayer);
};
// Takes the user from the exploration player to its editor.
var moveToEditor = function() {
getExplorationIdFromPlayer().then(openEditor);
};
var expect404Error = function() {
expect(element(by.css('.protractor-test-error-container')).getText()).
toMatch('Error 404');
};
// Checks no untranslated values are shown in the page.
var ensurePageHasNoTranslationIds = function() {
// The use of the InnerHTML is hacky, but is faster than checking each
// individual component that contains text.
element(by.css('.oppia-base-container')).getInnerHtml().then(
function(promiseValue) {
// First remove all the attributes translate and variables that are
// not displayed
var REGEX_TRANSLATE_ATTR = new RegExp('translate="I18N_', 'g');
var REGEX_NG_VARIABLE = new RegExp('<\\[\'I18N_', 'g');
expect(promiseValue.replace(REGEX_TRANSLATE_ATTR, '')
.replace(REGEX_NG_VARIABLE, '')).not.toContain('I18N');
});
};
var acceptAlert = function() {
browser.wait(function() {
return browser.switchTo().alert().accept().then(
function() {
return true;
},
function() {
return false;
}
);
});
};
exports.acceptAlert = acceptAlert;
exports.waitForSystem = waitForSystem;
exports.scrollToTop = scrollToTop;
exports.checkForConsoleErrors = checkForConsoleErrors;
exports.SERVER_URL_PREFIX = SERVER_URL_PREFIX;
exports.LIBRARY_URL_SUFFIX = LIBRARY_URL_SUFFIX;
exports.EDITOR_URL_SLICE = EDITOR_URL_SLICE;
exports.LOGIN_URL_SUFFIX = LOGIN_URL_SUFFIX;
exports.MODERATOR_URL_SUFFIX = MODERATOR_URL_SUFFIX;
exports.ADMIN_URL_SUFFIX = ADMIN_URL_SUFFIX;
exports.DONATION_THANK_URL_SUFFIX = DONATION_THANK_URL_SUFFIX;
exports.SCRIPTS_URL_SLICE = SCRIPTS_URL_SLICE;
exports.FIRST_STATE_DEFAULT_NAME = FIRST_STATE_DEFAULT_NAME;
exports.getExplorationIdFromEditor = getExplorationIdFromEditor;
exports.getExplorationIdFromPlayer = getExplorationIdFromPlayer;
exports.openEditor = openEditor;
exports.openPlayer = openPlayer;
exports.moveToPlayer = moveToPlayer;
exports.moveToEditor = moveToEditor;
exports.expect404Error = expect404Error;
exports.ensurePageHasNoTranslationIds = ensurePageHasNoTranslationIds;
| amgowano/oppia | core/tests/protractor_utils/general.js | JavaScript | apache-2.0 | 6,076 |
package org.jetbrains.plugins.scala.lang.formatter;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.command.CommandProcessor;
import com.intellij.openapi.editor.Document;
import com.intellij.openapi.editor.EditorFactory;
import com.intellij.openapi.editor.impl.DocumentImpl;
import com.intellij.openapi.util.TextRange;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.psi.PsiDocumentManager;
import com.intellij.psi.PsiFile;
import com.intellij.psi.codeStyle.CodeStyleManager;
import com.intellij.psi.codeStyle.CodeStyleSettings;
import com.intellij.psi.codeStyle.CodeStyleSettingsManager;
import com.intellij.psi.codeStyle.CommonCodeStyleSettings;
import com.intellij.testFramework.LightIdeaTestCase;
import com.intellij.util.IncorrectOperationException;
import org.jetbrains.plugins.scala.ScalaLanguage;
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings;
import org.jetbrains.plugins.scala.util.TestUtils;
import java.io.File;
import java.util.EnumMap;
import java.util.Map;
/**
* Base class for java formatter tests that holds utility methods.
*
* @author Denis Zhdanov
* @since Apr 27, 2010 6:26:29 PM
*/
//todo: almost duplicate from Java
public abstract class AbstractScalaFormatterTestBase extends LightIdeaTestCase {
protected enum Action {REFORMAT, INDENT}
private interface TestFormatAction {
void run(PsiFile psiFile, int startOffset, int endOffset);
}
private static final Map<Action, TestFormatAction> ACTIONS = new EnumMap<Action, TestFormatAction>(Action.class);
static {
ACTIONS.put(Action.REFORMAT, new TestFormatAction() {
public void run(PsiFile psiFile, int startOffset, int endOffset) {
CodeStyleManager.getInstance(getProject()).reformatText(psiFile, startOffset, endOffset);
}
});
ACTIONS.put(Action.INDENT, new TestFormatAction() {
public void run(PsiFile psiFile, int startOffset, int endOffset) {
CodeStyleManager.getInstance(getProject()).adjustLineIndent(psiFile, startOffset);
}
});
}
private static final String BASE_PATH = TestUtils.getTestDataPath() + "/psi/formatter";
public TextRange myTextRange;
public TextRange myLineRange;
public CommonCodeStyleSettings getCommonSettings() {
return getSettings().getCommonSettings(ScalaLanguage.INSTANCE);
}
public ScalaCodeStyleSettings getScalaSettings() {
return getSettings().getCustomSettings(ScalaCodeStyleSettings.class);
}
public CodeStyleSettings getSettings() {
return CodeStyleSettingsManager.getSettings(getProject());
}
public CommonCodeStyleSettings.IndentOptions getIndentOptions() {
return getCommonSettings().getIndentOptions();
}
public void doTest() throws Exception {
doTest(getTestName(false) + ".scala", getTestName(false) + "_after.scala");
}
public void doTest(String fileNameBefore, String fileNameAfter) throws Exception {
doTextTest(Action.REFORMAT, loadFile(fileNameBefore), loadFile(fileNameAfter));
}
public void doTextTest(final String text, String textAfter) throws IncorrectOperationException {
doTextTest(Action.REFORMAT, StringUtil.convertLineSeparators(text), StringUtil.convertLineSeparators(textAfter));
}
public void doTextTest(final Action action, final String text, String textAfter) throws IncorrectOperationException {
final PsiFile file = createFile("A.scala", text);
if (myLineRange != null) {
final DocumentImpl document = new DocumentImpl(text);
myTextRange =
new TextRange(document.getLineStartOffset(myLineRange.getStartOffset()), document.getLineEndOffset(myLineRange.getEndOffset()));
}
/*
CommandProcessor.getInstance().executeCommand(getProject(), new Runnable() {
public void run() {
ApplicationManager.getApplication().runWriteAction(new Runnable() {
public void run() {
performFormatting(file);
}
});
}
}, null, null);
assertEquals(prepareText(textAfter), prepareText(file.getText()));
*/
final PsiDocumentManager manager = PsiDocumentManager.getInstance(getProject());
final Document document = manager.getDocument(file);
CommandProcessor.getInstance().executeCommand(getProject(), new Runnable() {
public void run() {
ApplicationManager.getApplication().runWriteAction(new Runnable() {
public void run() {
document.replaceString(0, document.getTextLength(), text);
manager.commitDocument(document);
try {
TextRange rangeToUse = myTextRange;
if (rangeToUse == null) {
rangeToUse = file.getTextRange();
}
ACTIONS.get(action).run(file, rangeToUse.getStartOffset(), rangeToUse.getEndOffset());
}
catch (IncorrectOperationException e) {
assertTrue(e.getLocalizedMessage(), false);
}
}
});
}
}, "", "");
if (document == null) {
fail("Don't expect the document to be null");
return;
}
assertEquals(prepareText(textAfter), prepareText(document.getText()));
manager.commitDocument(document);
assertEquals(prepareText(textAfter), prepareText(file.getText()));
}
//todo: was unused, should be deleted (??)
/* public void doMethodTest(final String before, final String after) throws Exception {
doTextTest(
Action.REFORMAT,
"class Foo{\n" + " void foo() {\n" + before + '\n' + " }\n" + "}",
"class Foo {\n" + " void foo() {\n" + shiftIndentInside(after, 8, false) + '\n' + " }\n" + "}"
);
}
public void doClassTest(final String before, final String after) throws Exception {
doTextTest(
Action.REFORMAT,
"class Foo{\n" + before + '\n' + "}",
"class Foo {\n" + shiftIndentInside(after, 4, false) + '\n' + "}"
);
}*/
private static String prepareText(String actual) {
if (actual.startsWith("\n")) {
actual = actual.substring(1);
}
if (actual.startsWith("\n")) {
actual = actual.substring(1);
}
// Strip trailing spaces
final Document doc = EditorFactory.getInstance().createDocument(actual);
CommandProcessor.getInstance().executeCommand(getProject(), new Runnable() {
public void run() {
ApplicationManager.getApplication().runWriteAction(new Runnable() {
public void run() {
((DocumentImpl)doc).stripTrailingSpaces(getProject());
}
});
}
}, "formatting", null);
return doc.getText().trim();
}
private static String loadFile(String name) throws Exception {
String fullName = BASE_PATH + File.separatorChar + name;
String text = new String(FileUtil.loadFileText(new File(fullName)));
text = StringUtil.convertLineSeparators(text);
return text;
}
@Override
protected void setUp() throws Exception {
super.setUp();
TestUtils.disableTimerThread();
}
}
| ilinum/intellij-scala | test/org/jetbrains/plugins/scala/lang/formatter/AbstractScalaFormatterTestBase.java | Java | apache-2.0 | 7,072 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<title>HashGroupify (ARX Developer Documentation)</title>
<link rel="stylesheet" type="text/css" href="../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="HashGroupify (ARX Developer Documentation)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/HashGroupify.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev Class</li>
<li><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.GroupStatistics.html" title="class in org.deidentifier.arx.framework.check.groupify"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/deidentifier/arx/framework/check/groupify/HashGroupify.html" target="_top">Frames</a></li>
<li><a href="HashGroupify.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li><a href="#nested_class_summary">Nested</a> | </li>
<li>Field | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<!-- ======== START OF CLASS DATA ======== -->
<div class="header">
<div class="subTitle">org.deidentifier.arx.framework.check.groupify</div>
<h2 title="Class HashGroupify" class="title">Class HashGroupify</h2>
</div>
<div class="contentContainer">
<ul class="inheritance">
<li>java.lang.Object</li>
<li>
<ul class="inheritance">
<li>org.deidentifier.arx.framework.check.groupify.HashGroupify</li>
</ul>
</li>
</ul>
<div class="description">
<ul class="blockList">
<li class="blockList">
<dl>
<dt>All Implemented Interfaces:</dt>
<dd><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></dd>
</dl>
<hr>
<br>
<pre>public class <span class="strong">HashGroupify</span>
extends java.lang.Object
implements <a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></pre>
<div class="block">A hash groupify operator. It implements a hash table with chaining and keeps
track of additional properties per equivalence class</div>
</li>
</ul>
</div>
<div class="summary">
<ul class="blockList">
<li class="blockList">
<!-- ======== NESTED CLASS SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="nested_class_summary">
<!-- -->
</a>
<h3>Nested Class Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Nested Class Summary table, listing nested classes, and an explanation">
<caption><span>Nested Classes</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Class and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>static class </code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.GroupStatistics.html" title="class in org.deidentifier.arx.framework.check.groupify">HashGroupify.GroupStatistics</a></strong></code>
<div class="block">Statistics about the groups, excluding outliers.</div>
</td>
</tr>
</table>
</li>
</ul>
<!-- ======== CONSTRUCTOR SUMMARY ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_summary">
<!-- -->
</a>
<h3>Constructor Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation">
<caption><span>Constructors</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colOne" scope="col">Constructor and Description</th>
</tr>
<tr class="altColor">
<td class="colOne"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#HashGroupify(int,%20org.deidentifier.arx.ARXConfiguration.ARXConfigurationInternal)">HashGroupify</a></strong>(int capacity,
<a href="../../../../../../org/deidentifier/arx/ARXConfiguration.ARXConfigurationInternal.html" title="class in org.deidentifier.arx">ARXConfiguration.ARXConfigurationInternal</a> config)</code>
<div class="block">Constructs a new hash groupify operator.</div>
</td>
</tr>
</table>
</li>
</ul>
<!-- ========== METHOD SUMMARY =========== -->
<ul class="blockList">
<li class="blockList"><a name="method_summary">
<!-- -->
</a>
<h3>Method Summary</h3>
<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation">
<caption><span>Methods</span><span class="tabEnd"> </span></caption>
<tr>
<th class="colFirst" scope="col">Modifier and Type</th>
<th class="colLast" scope="col">Method and Description</th>
</tr>
<tr class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#addAll(int[],%20int,%20int,%20int[],%20int)">addAll</a></strong>(int[] key,
int representant,
int count,
int[] sensitive,
int pcount)</code>
<div class="block">Generic adder for all combinations of criteria in mode transform ALL.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#addGroupify(int[],%20int,%20int,%20org.deidentifier.arx.framework.check.distribution.Distribution[],%20int)">addGroupify</a></strong>(int[] key,
int representant,
int count,
<a href="../../../../../../org/deidentifier/arx/framework/check/distribution/Distribution.html" title="class in org.deidentifier.arx.framework.check.distribution">Distribution</a>[] distributions,
int pcount)</code>
<div class="block">Generic adder for all combinations of criteria in mode transform GROUPIFY.</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#addSnapshot(int[],%20int,%20int,%20int[][],%20int[][],%20int)">addSnapshot</a></strong>(int[] key,
int representant,
int count,
int[][] elements,
int[][] frequencies,
int pcount)</code>
<div class="block">Generic adder for all combinations of criteria in mode transform SNAPSHOT.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#analyze(org.deidentifier.arx.framework.lattice.Node,%20boolean)">analyze</a></strong>(<a href="../../../../../../org/deidentifier/arx/framework/lattice/Node.html" title="class in org.deidentifier.arx.framework.lattice">Node</a> transformation,
boolean force)</code>
<div class="block">Computes the anonymity properties and suppressed tuples etc.</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#clear()">clear</a></strong>()</code>
<div class="block">Clear.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupifyEntry.html" title="class in org.deidentifier.arx.framework.check.groupify">HashGroupifyEntry</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#getFirstEntry()">getFirstEntry</a></strong>()</code>
<div class="block">Gets the first entry.</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.GroupStatistics.html" title="class in org.deidentifier.arx.framework.check.groupify">HashGroupify.GroupStatistics</a></code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#getGroupStatistics()">getGroupStatistics</a></strong>()</code>
<div class="block">Returns statistics about the groups.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#isAnonymous()">isAnonymous</a></strong>()</code>
<div class="block">Are all defined privacy criteria fulfilled by this transformation, given the specified limit on suppressed tuples.</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>boolean</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#isKAnonymous()">isKAnonymous</a></strong>()</code>
<div class="block">Is the current transformation k-anonymous.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#markOutliers(int[][])">markOutliers</a></strong>(int[][] data)</code>
<div class="block">Marks all outliers.</div>
</td>
</tr>
<tr class="altColor">
<td class="colFirst"><code>void</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#resetSuppression()">resetSuppression</a></strong>()</code>
<div class="block">This method will reset all flags that indicate that equivalence classes are suppressed.</div>
</td>
</tr>
<tr class="rowColor">
<td class="colFirst"><code>int</code></td>
<td class="colLast"><code><strong><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.html#size()">size</a></strong>()</code>
<div class="block">Size.</div>
</td>
</tr>
</table>
<ul class="blockList">
<li class="blockList"><a name="methods_inherited_from_class_java.lang.Object">
<!-- -->
</a>
<h3>Methods inherited from class java.lang.Object</h3>
<code>clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
<div class="details">
<ul class="blockList">
<li class="blockList">
<!-- ========= CONSTRUCTOR DETAIL ======== -->
<ul class="blockList">
<li class="blockList"><a name="constructor_detail">
<!-- -->
</a>
<h3>Constructor Detail</h3>
<a name="HashGroupify(int, org.deidentifier.arx.ARXConfiguration.ARXConfigurationInternal)">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>HashGroupify</h4>
<pre>public HashGroupify(int capacity,
<a href="../../../../../../org/deidentifier/arx/ARXConfiguration.ARXConfigurationInternal.html" title="class in org.deidentifier.arx">ARXConfiguration.ARXConfigurationInternal</a> config)</pre>
<div class="block">Constructs a new hash groupify operator.</div>
<dl><dt><span class="strong">Parameters:</span></dt><dd><code>capacity</code> - The capacity</dd><dd><code>config</code> - The config</dd></dl>
</li>
</ul>
</li>
</ul>
<!-- ============ METHOD DETAIL ========== -->
<ul class="blockList">
<li class="blockList"><a name="method_detail">
<!-- -->
</a>
<h3>Method Detail</h3>
<a name="addAll(int[], int, int, int[], int)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>addAll</h4>
<pre>public void addAll(int[] key,
int representant,
int count,
int[] sensitive,
int pcount)</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#addAll(int[],%20int,%20int,%20int[],%20int)">IHashGroupify</a></code></strong></div>
<div class="block">Generic adder for all combinations of criteria in mode transform ALL.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#addAll(int[],%20int,%20int,%20int[],%20int)">addAll</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
</dl>
</li>
</ul>
<a name="addGroupify(int[], int, int, org.deidentifier.arx.framework.check.distribution.Distribution[], int)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>addGroupify</h4>
<pre>public void addGroupify(int[] key,
int representant,
int count,
<a href="../../../../../../org/deidentifier/arx/framework/check/distribution/Distribution.html" title="class in org.deidentifier.arx.framework.check.distribution">Distribution</a>[] distributions,
int pcount)</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#addGroupify(int[],%20int,%20int,%20org.deidentifier.arx.framework.check.distribution.Distribution[],%20int)">IHashGroupify</a></code></strong></div>
<div class="block">Generic adder for all combinations of criteria in mode transform GROUPIFY.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#addGroupify(int[],%20int,%20int,%20org.deidentifier.arx.framework.check.distribution.Distribution[],%20int)">addGroupify</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
</dl>
</li>
</ul>
<a name="addSnapshot(int[], int, int, int[][], int[][], int)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>addSnapshot</h4>
<pre>public void addSnapshot(int[] key,
int representant,
int count,
int[][] elements,
int[][] frequencies,
int pcount)</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#addSnapshot(int[],%20int,%20int,%20int[][],%20int[][],%20int)">IHashGroupify</a></code></strong></div>
<div class="block">Generic adder for all combinations of criteria in mode transform SNAPSHOT.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#addSnapshot(int[],%20int,%20int,%20int[][],%20int[][],%20int)">addSnapshot</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
</dl>
</li>
</ul>
<a name="analyze(org.deidentifier.arx.framework.lattice.Node, boolean)">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>analyze</h4>
<pre>public void analyze(<a href="../../../../../../org/deidentifier/arx/framework/lattice/Node.html" title="class in org.deidentifier.arx.framework.lattice">Node</a> transformation,
boolean force)</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#analyze(org.deidentifier.arx.framework.lattice.Node,%20boolean)">IHashGroupify</a></code></strong></div>
<div class="block">Computes the anonymity properties and suppressed tuples etc. Must be called
when all tuples have been passed to the operator. When the flag is set to true
the method will make sure that all equivalence classes that do not fulfill all
privacy criteria are marked for suppression. If the flag is set to false,
the operator may perform an early abort, which may lead to an inconsistent classification
of equivalence classes.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#analyze(org.deidentifier.arx.framework.lattice.Node,%20boolean)">analyze</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
</dl>
</li>
</ul>
<a name="clear()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>clear</h4>
<pre>public void clear()</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#clear()">IHashGroupify</a></code></strong></div>
<div class="block">Clear.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#clear()">clear</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
</dl>
</li>
</ul>
<a name="getFirstEntry()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getFirstEntry</h4>
<pre>public <a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupifyEntry.html" title="class in org.deidentifier.arx.framework.check.groupify">HashGroupifyEntry</a> getFirstEntry()</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#getFirstEntry()">IHashGroupify</a></code></strong></div>
<div class="block">Gets the first entry.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#getFirstEntry()">getFirstEntry</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
<dt><span class="strong">Returns:</span></dt><dd>the first entry</dd></dl>
</li>
</ul>
<a name="getGroupStatistics()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>getGroupStatistics</h4>
<pre>public <a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.GroupStatistics.html" title="class in org.deidentifier.arx.framework.check.groupify">HashGroupify.GroupStatistics</a> getGroupStatistics()</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#getGroupStatistics()">IHashGroupify</a></code></strong></div>
<div class="block">Returns statistics about the groups.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#getGroupStatistics()">getGroupStatistics</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
<dt><span class="strong">Returns:</span></dt><dd></dd></dl>
</li>
</ul>
<a name="isAnonymous()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>isAnonymous</h4>
<pre>public boolean isAnonymous()</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#isAnonymous()">IHashGroupify</a></code></strong></div>
<div class="block">Are all defined privacy criteria fulfilled by this transformation, given the specified limit on suppressed tuples.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#isAnonymous()">isAnonymous</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
<dt><span class="strong">Returns:</span></dt><dd>true, if successful</dd></dl>
</li>
</ul>
<a name="isKAnonymous()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>isKAnonymous</h4>
<pre>public boolean isKAnonymous()</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#isKAnonymous()">IHashGroupify</a></code></strong></div>
<div class="block">Is the current transformation k-anonymous. Always returns true, if no k-anonymity (sub-)criterion was specified</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#isKAnonymous()">isKAnonymous</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
<dt><span class="strong">Returns:</span></dt><dd></dd></dl>
</li>
</ul>
<a name="markOutliers(int[][])">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>markOutliers</h4>
<pre>public void markOutliers(int[][] data)</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#markOutliers(int[][])">IHashGroupify</a></code></strong></div>
<div class="block">Marks all outliers.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#markOutliers(int[][])">markOutliers</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
</dl>
</li>
</ul>
<a name="resetSuppression()">
<!-- -->
</a>
<ul class="blockList">
<li class="blockList">
<h4>resetSuppression</h4>
<pre>public void resetSuppression()</pre>
<div class="block">This method will reset all flags that indicate that equivalence classes are suppressed.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#resetSuppression()">resetSuppression</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
</dl>
</li>
</ul>
<a name="size()">
<!-- -->
</a>
<ul class="blockListLast">
<li class="blockList">
<h4>size</h4>
<pre>public int size()</pre>
<div class="block"><strong>Description copied from interface: <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#size()">IHashGroupify</a></code></strong></div>
<div class="block">Size.</div>
<dl>
<dt><strong>Specified by:</strong></dt>
<dd><code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html#size()">size</a></code> in interface <code><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/IHashGroupify.html" title="interface in org.deidentifier.arx.framework.check.groupify">IHashGroupify</a></code></dd>
<dt><span class="strong">Returns:</span></dt><dd>the int</dd></dl>
</li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
<!-- ========= END OF CLASS DATA ========= -->
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../overview-summary.html">Overview</a></li>
<li><a href="package-summary.html">Package</a></li>
<li class="navBarCell1Rev">Class</li>
<li><a href="class-use/HashGroupify.html">Use</a></li>
<li><a href="package-tree.html">Tree</a></li>
<li><a href="../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev Class</li>
<li><a href="../../../../../../org/deidentifier/arx/framework/check/groupify/HashGroupify.GroupStatistics.html" title="class in org.deidentifier.arx.framework.check.groupify"><span class="strong">Next Class</span></a></li>
</ul>
<ul class="navList">
<li><a href="../../../../../../index.html?org/deidentifier/arx/framework/check/groupify/HashGroupify.html" target="_top">Frames</a></li>
<li><a href="HashGroupify.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<div>
<ul class="subNavList">
<li>Summary: </li>
<li><a href="#nested_class_summary">Nested</a> | </li>
<li>Field | </li>
<li><a href="#constructor_summary">Constr</a> | </li>
<li><a href="#method_summary">Method</a></li>
</ul>
<ul class="subNavList">
<li>Detail: </li>
<li>Field | </li>
<li><a href="#constructor_detail">Constr</a> | </li>
<li><a href="#method_detail">Method</a></li>
</ul>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| TheRealRasu/arx | doc/dev/org/deidentifier/arx/framework/check/groupify/HashGroupify.html | HTML | apache-2.0 | 28,537 |
<?php
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* see
* http://www.opensocial.org/Technical-Resources/opensocial-spec-v081/opensocial-reference#opensocial.Activity
*/
class Shindig_Activity {
public $appId;
public $body;
public $bodyId;
public $externalId;
public $id;
public $mediaItems;
public $postedTime;
public $priority;
public $streamFaviconUrl;
public $streamSourceUrl;
public $streamTitle;
public $streamUrl;
public $templateParams;
public $title;
public $titleId;
public $url;
public $userId;
public function __construct($id, $userId) {
$this->id = $id;
$this->userId = $userId;
}
public function getAppId() {
return $this->appId;
}
public function setAppId($appId) {
$this->appId = $appId;
}
public function getBody() {
return $this->body;
}
public function setBody($body) {
$this->body = $body;
}
public function getBodyId() {
return $this->bodyId;
}
public function setBodyId($bodyId) {
$this->bodyId = $bodyId;
}
public function getExternalId() {
return $this->externalId;
}
public function setExternalId($externalId) {
$this->externalId = $externalId;
}
public function getId() {
return $this->id;
}
public function setId($id) {
$this->id = $id;
}
public function getMediaItems() {
return $this->mediaItems;
}
public function setMediaItems($mediaItems) {
$this->mediaItems = $mediaItems;
}
public function getPostedTime() {
return $this->postedTime;
}
public function setPostedTime($postedTime) {
$this->postedTime = $postedTime;
}
public function getPriority() {
return $this->priority;
}
public function setPriority($priority) {
$this->priority = $priority;
}
public function getStreamFaviconUrl() {
return $this->streamFaviconUrl;
}
public function setStreamFaviconUrl($streamFaviconUrl) {
$this->streamFaviconUrl = $streamFaviconUrl;
}
public function getStreamSourceUrl() {
return $this->streamSourceUrl;
}
public function setStreamSourceUrl($streamSourceUrl) {
$this->streamSourceUrl = $streamSourceUrl;
}
public function getStreamTitle() {
return $this->streamTitle;
}
public function setStreamTitle($streamTitle) {
$this->streamTitle = $streamTitle;
}
public function getStreamUrl() {
return $this->streamUrl;
}
public function setStreamUrl($streamUrl) {
$this->streamUrl = $streamUrl;
}
public function getTemplateParams() {
return $this->templateParams;
}
public function setTemplateParams($templateParams) {
$this->templateParams = $templateParams;
}
public function getTitle() {
return $this->title;
}
public function setTitle($title) {
$this->title = strip_tags($title, '<b><i><a><span><img>');
}
public function getTitleId() {
return $this->titleId;
}
public function setTitleId($titleId) {
$this->titleId = $titleId;
}
public function getUrl() {
return $this->url;
}
public function setUrl($url) {
$this->url = $url;
}
public function getUserId() {
return $this->userId;
}
public function setUserId($userId) {
$this->userId = $userId;
}
}
| cripure/openpne3 | plugins/opOpenSocialPlugin/lib/vendor/Shindig/src/social/model/Activity.php | PHP | apache-2.0 | 4,000 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.openapi.module.impl.scopes;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.projectRoots.Sdk;
import com.intellij.openapi.roots.*;
import com.intellij.openapi.roots.libraries.Library;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.util.Condition;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.psi.search.GlobalSearchScope;
import com.intellij.util.containers.ContainerUtil;
import gnu.trove.THashSet;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.jetbrains.annotations.TestOnly;
import java.util.*;
/**
* @author max
*/
public class LibraryRuntimeClasspathScope extends GlobalSearchScope {
private final ProjectFileIndex myIndex;
private final LinkedHashSet<VirtualFile> myEntries = new LinkedHashSet<VirtualFile>();
private int myCachedHashCode = 0;
public LibraryRuntimeClasspathScope(final Project project, final List<Module> modules) {
super(project);
myIndex = ProjectRootManager.getInstance(project).getFileIndex();
final Set<Sdk> processedSdk = new THashSet<Sdk>();
final Set<Library> processedLibraries = new THashSet<Library>();
final Set<Module> processedModules = new THashSet<Module>();
final Condition<OrderEntry> condition = new Condition<OrderEntry>() {
@Override
public boolean value(OrderEntry orderEntry) {
if (orderEntry instanceof ModuleOrderEntry) {
final Module module = ((ModuleOrderEntry)orderEntry).getModule();
return module != null && !processedModules.contains(module);
}
return true;
}
};
for (Module module : modules) {
buildEntries(module, processedModules, processedLibraries, processedSdk, condition);
}
}
public LibraryRuntimeClasspathScope(Project project, LibraryOrderEntry entry) {
super(project);
myIndex = ProjectRootManager.getInstance(project).getFileIndex();
Collections.addAll(myEntries, entry.getRootFiles(OrderRootType.CLASSES));
}
public int hashCode() {
if (myCachedHashCode == 0) {
myCachedHashCode = myEntries.hashCode();
}
return myCachedHashCode;
}
public boolean equals(Object object) {
if (object == this) return true;
if (object == null || object.getClass() != LibraryRuntimeClasspathScope.class) return false;
final LibraryRuntimeClasspathScope that = (LibraryRuntimeClasspathScope)object;
return that.myEntries.equals(myEntries);
}
private void buildEntries(@NotNull final Module module,
@NotNull final Set<Module> processedModules,
@NotNull final Set<Library> processedLibraries,
@NotNull final Set<Sdk> processedSdk,
Condition<OrderEntry> condition) {
if (!processedModules.add(module)) return;
ModuleRootManager.getInstance(module).orderEntries().recursively().satisfying(condition).process(new RootPolicy<LinkedHashSet<VirtualFile>>() {
public LinkedHashSet<VirtualFile> visitLibraryOrderEntry(final LibraryOrderEntry libraryOrderEntry,
final LinkedHashSet<VirtualFile> value) {
final Library library = libraryOrderEntry.getLibrary();
if (library != null && processedLibraries.add(library)) {
ContainerUtil.addAll(value, libraryOrderEntry.getRootFiles(OrderRootType.CLASSES));
}
return value;
}
public LinkedHashSet<VirtualFile> visitModuleSourceOrderEntry(final ModuleSourceOrderEntry moduleSourceOrderEntry,
final LinkedHashSet<VirtualFile> value) {
processedModules.add(moduleSourceOrderEntry.getOwnerModule());
ContainerUtil.addAll(value, moduleSourceOrderEntry.getRootModel().getSourceRoots());
return value;
}
@Override
public LinkedHashSet<VirtualFile> visitModuleOrderEntry(ModuleOrderEntry moduleOrderEntry, LinkedHashSet<VirtualFile> value) {
final Module depModule = moduleOrderEntry.getModule();
if (depModule != null) {
ContainerUtil.addAll(value, ModuleRootManager.getInstance(depModule).getSourceRoots());
}
return value;
}
public LinkedHashSet<VirtualFile> visitJdkOrderEntry(final JdkOrderEntry jdkOrderEntry, final LinkedHashSet<VirtualFile> value) {
final Sdk jdk = jdkOrderEntry.getJdk();
if (jdk != null && processedSdk.add(jdk)) {
ContainerUtil.addAll(value, jdkOrderEntry.getRootFiles(OrderRootType.CLASSES));
}
return value;
}
}, myEntries);
}
public boolean contains(VirtualFile file) {
return myEntries.contains(getFileRoot(file));
}
@Nullable
private VirtualFile getFileRoot(VirtualFile file) {
if (myIndex.isLibraryClassFile(file)) {
return myIndex.getClassRootForFile(file);
}
if (myIndex.isInContent(file)) {
return myIndex.getSourceRootForFile(file);
}
if (myIndex.isInLibraryClasses(file)) {
return myIndex.getClassRootForFile(file);
}
return null;
}
public int compare(VirtualFile file1, VirtualFile file2) {
final VirtualFile r1 = getFileRoot(file1);
final VirtualFile r2 = getFileRoot(file2);
for (VirtualFile root : myEntries) {
if (Comparing.equal(r1, root)) return 1;
if (Comparing.equal(r2, root)) return -1;
}
return 0;
}
@TestOnly
public List<VirtualFile> getRoots() {
return new ArrayList<VirtualFile>(myEntries);
}
public boolean isSearchInModuleContent(@NotNull Module aModule) {
return false;
}
public boolean isSearchInLibraries() {
return true;
}
}
| liveqmock/platform-tools-idea | platform/indexing-impl/src/com/intellij/openapi/module/impl/scopes/LibraryRuntimeClasspathScope.java | Java | apache-2.0 | 6,421 |
/**
* Copyright (C) 2013-2014 EaseMob Technologies. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*******************************************************************************
* Copyright 2011, 2012 Chris Banes.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package com.easemob.chatuidemo.widget.photoview;
import android.annotation.TargetApi;
import android.content.Context;
import android.os.Build.VERSION;
import android.os.Build.VERSION_CODES;
import android.widget.OverScroller;
import android.widget.Scroller;
public abstract class ScrollerProxy {
public static ScrollerProxy getScroller(Context context) {
if (VERSION.SDK_INT < VERSION_CODES.GINGERBREAD) {
return new PreGingerScroller(context);
} else {
return new GingerScroller(context);
}
}
public abstract boolean computeScrollOffset();
public abstract void fling(int startX, int startY, int velocityX, int velocityY, int minX, int maxX, int minY,
int maxY, int overX, int overY);
public abstract void forceFinished(boolean finished);
public abstract int getCurrX();
public abstract int getCurrY();
@TargetApi(9)
private static class GingerScroller extends ScrollerProxy {
private OverScroller mScroller;
public GingerScroller(Context context) {
mScroller = new OverScroller(context);
}
@Override
public boolean computeScrollOffset() {
return mScroller.computeScrollOffset();
}
@Override
public void fling(int startX, int startY, int velocityX, int velocityY, int minX, int maxX, int minY, int maxY,
int overX, int overY) {
mScroller.fling(startX, startY, velocityX, velocityY, minX, maxX, minY, maxY, overX, overY);
}
@Override
public void forceFinished(boolean finished) {
mScroller.forceFinished(finished);
}
@Override
public int getCurrX() {
return mScroller.getCurrX();
}
@Override
public int getCurrY() {
return mScroller.getCurrY();
}
}
private static class PreGingerScroller extends ScrollerProxy {
private Scroller mScroller;
public PreGingerScroller(Context context) {
mScroller = new Scroller(context);
}
@Override
public boolean computeScrollOffset() {
return mScroller.computeScrollOffset();
}
@Override
public void fling(int startX, int startY, int velocityX, int velocityY, int minX, int maxX, int minY, int maxY,
int overX, int overY) {
mScroller.fling(startX, startY, velocityX, velocityY, minX, maxX, minY, maxY);
}
@Override
public void forceFinished(boolean finished) {
mScroller.forceFinished(finished);
}
@Override
public int getCurrX() {
return mScroller.getCurrX();
}
@Override
public int getCurrY() {
return mScroller.getCurrY();
}
}
}
| cf0566/CarMarket | src/com/easemob/chatuidemo/widget/photoview/ScrollerProxy.java | Java | apache-2.0 | 3,945 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.druid.sql.calcite.expression.builtin;
import org.apache.calcite.rex.RexCall;
import org.apache.calcite.rex.RexLiteral;
import org.apache.calcite.rex.RexNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.fun.SqlStdOperatorTable;
import org.apache.druid.query.filter.DimFilter;
import org.apache.druid.query.filter.LikeDimFilter;
import org.apache.druid.segment.VirtualColumn;
import org.apache.druid.segment.column.RowSignature;
import org.apache.druid.sql.calcite.expression.DirectOperatorConversion;
import org.apache.druid.sql.calcite.expression.DruidExpression;
import org.apache.druid.sql.calcite.expression.Expressions;
import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.rel.VirtualColumnRegistry;
import javax.annotation.Nullable;
import java.util.List;
public class LikeOperatorConversion extends DirectOperatorConversion
{
private static final SqlOperator SQL_FUNCTION = SqlStdOperatorTable.LIKE;
public LikeOperatorConversion()
{
super(SQL_FUNCTION, "like");
}
@Override
public SqlOperator calciteOperator()
{
return SQL_FUNCTION;
}
@Nullable
@Override
public DimFilter toDruidFilter(
PlannerContext plannerContext,
RowSignature rowSignature,
@Nullable VirtualColumnRegistry virtualColumnRegistry,
RexNode rexNode
)
{
final List<RexNode> operands = ((RexCall) rexNode).getOperands();
final DruidExpression druidExpression = Expressions.toDruidExpression(
plannerContext,
rowSignature,
operands.get(0)
);
if (druidExpression == null) {
return null;
}
if (druidExpression.isSimpleExtraction()) {
return new LikeDimFilter(
druidExpression.getSimpleExtraction().getColumn(),
RexLiteral.stringValue(operands.get(1)),
operands.size() > 2 ? RexLiteral.stringValue(operands.get(2)) : null,
druidExpression.getSimpleExtraction().getExtractionFn()
);
} else if (virtualColumnRegistry != null) {
VirtualColumn v = virtualColumnRegistry.getOrCreateVirtualColumnForExpression(
plannerContext,
druidExpression,
operands.get(0).getType().getSqlTypeName()
);
return new LikeDimFilter(
v.getOutputName(),
RexLiteral.stringValue(operands.get(1)),
operands.size() > 2 ? RexLiteral.stringValue(operands.get(2)) : null,
null
);
} else {
return null;
}
}
}
| implydata/druid | sql/src/main/java/org/apache/druid/sql/calcite/expression/builtin/LikeOperatorConversion.java | Java | apache-2.0 | 3,331 |
/// Copyright (c) 2009 Microsoft Corporation
///
/// Redistribution and use in source and binary forms, with or without modification, are permitted provided
/// that the following conditions are met:
/// * Redistributions of source code must retain the above copyright notice, this list of conditions and
/// the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
/// the following disclaimer in the documentation and/or other materials provided with the distribution.
/// * Neither the name of Microsoft nor the names of its contributors may be used to
/// endorse or promote products derived from this software without specific prior written permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
/// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
/// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
/// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
/// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
/// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ES5Harness.registerTest({
id: "15.2.3.6-3-37",
path: "TestCases/chapter15/15.2/15.2.3/15.2.3.6/15.2.3.6-3-37.js",
description: "Object.defineProperty - 'Attributes' is a Number object that uses Object's [[Get]] method to access the 'enumerable' property (8.10.5 step 3.a)",
test: function testcase() {
var obj = {};
var accessed = false;
var numObj = new Number(-2);
numObj.enumerable = true;
Object.defineProperty(obj, "property", numObj);
for (var prop in obj) {
if (prop === "property") {
accessed = true;
}
}
return accessed;
},
precondition: function prereq() {
return fnExists(Object.defineProperty);
}
});
| hnafar/IronJS | Src/Tests/ietestcenter/chapter15/15.2/15.2.3/15.2.3.6/15.2.3.6-3-37.js | JavaScript | apache-2.0 | 2,322 |
# Apache Aurora development environment
This directory contains [Packer](https://packer.io) scripts
to build and distribute the base development environment for Aurora.
The goal of this environment is to pre-fetch dependencies and artifacts
needed for the integration test environment so that `vagrant up` is
cheap after the box has been fetched for the first time.
As dependencies (libraries, or external packages) of Aurora change, it
will be helpful to update this box and keep this cost low.
## Updating the box
1. Download [packer](https://www.packer.io/downloads.html)
2. Modify build scripts to make the changes you want
(e.g. install packages via `apt`)
3. Fetch the latest version of our base box
$ vagrant box update --box bento/ubuntu-16.04
The box will be stored in version-specific directory under
`~/.vagrant.d/boxes/bento-VAGRANTSLASH-ubuntu-16.04/`. Find the path to the `.ovf` file for the
latest version of the box. For the remainder of this document, this path will be referred to as
`$UBUNTU_OVF`.
4. Build the new box
Using the path from the previous step, run the following command to start the build.
$ packer build -var "base_box_ovf=$UBUNTU_OVF" aurora.json
This takes a while, approximately 20 minutes. When finished, your working directory will
contain a file named `packer_virtualbox-ovf_virtualbox.box`.
5. Verify your box locally
$ vagrant box add --name aurora-dev-env-testing \
packer_virtualbox-ovf_virtualbox.box
This will make a vagrant box named `aurora-dev-env-testing` locally available to vagrant
(i.e. not on Vagrant Cloud). We use a different name here to avoid confusion that could
arise from using an unreleased base box.
Edit the [`Vagrantfile`](../../Vagrantfile), changing the line
config.vm.box = "apache-aurora/dev-environment"
to
config.vm.box = "aurora-dev-env-testing"
and comment out vm version
# config.vm.box_version = "0.0.X"
At this point, you can use the box as normal to run integration tests.
6. Upload the box to Vagrant Cloud
Our boxes are stored in [Vagrant Cloud](https://vagrantcloud.com/apache-aurora/dev-environment).
In order to upload a new version of our box, you must have committer access to upload
a dev image box, please ask in dev@aurora.apache.org or our Slack Channel
if you would like to contribute. More info can be found by visiting our
[community page](http://aurora.apache.org/community/).
Once you have access to our Vagrant Cloud organization, a token can be generated by
going to your [security settings](https://app.vagrantup.com/settings/security).
Store the token in a safe place as it will be needed for future submissions.
Next, three environmental variables must be set: `$UBUNTU_OVF`, `$VAGRANT_CLOUD_TOKEN`,
and `$BOX_VERSION`.
$ export UBUNTU_OVF=<Location of base image (.ovf) on local machine>
$ export VAGRANT_CLOUD_TOKEN=<API Token from Hashicorp>
$ export BOX_VERSION=<SemVer to be given to this box>
**Make sure the variables are set correctly before proceeding as a mistake can cause
the very time consuming process to fail or clobber a previous box.**
$ env | grep -E "UBUNTU_OVF|VAGRANT_CLOUD_TOKEN|BOX_VERSION"
Then finally run the release packer configuration which will upload the vagrant box.
$ packer build aurora-release.json
Note: This process will rebuild the box and upload it to the Vagrant Cloud. Unfortunately,
there currently is no way to skip the build step as the output from the first post-processor
(Vagrant) is required for the second (Vagrant-Cloud).
You may now change the version in [`Vagrantfile`](../../Vagrantfile) to the one specified in
`$BOX_VERSION` and commit the change.
| apache/aurora | build-support/packer/README.md | Markdown | apache-2.0 | 3,869 |
/*
* Copyright (C) 2015 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect.testing;
import com.google.common.annotations.GwtIncompatible;
import java.util.List;
/**
* Creates, based on your criteria, a JUnit test suite that exhaustively tests
* a ConcurrentNavigableMap implementation.
*
* @author Louis Wasserman
*/
@GwtIncompatible
public class ConcurrentNavigableMapTestSuiteBuilder<K, V>
extends NavigableMapTestSuiteBuilder<K, V> {
public static <K, V> ConcurrentNavigableMapTestSuiteBuilder<K, V> using(
TestSortedMapGenerator<K, V> generator) {
ConcurrentNavigableMapTestSuiteBuilder<K, V> result =
new ConcurrentNavigableMapTestSuiteBuilder<K, V>();
result.usingGenerator(generator);
return result;
}
@Override
protected List<Class<? extends AbstractTester>> getTesters() {
List<Class<? extends AbstractTester>> testers = Helpers.copyToList(super.getTesters());
testers.addAll(ConcurrentMapTestSuiteBuilder.TESTERS);
return testers;
}
@Override
NavigableMapTestSuiteBuilder<K, V> subSuiteUsing(TestSortedMapGenerator<K, V> generator) {
return using(generator);
}
}
| DavesMan/guava | guava-testlib/src/com/google/common/collect/testing/ConcurrentNavigableMapTestSuiteBuilder.java | Java | apache-2.0 | 1,706 |
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test case for issue #2843.
//
proto! streamp (
open:send<T:Send> {
data(T) -> open<T>
}
)
fn rendezvous() {
let (s, c) = streamp::init();
let streams: ~[streamp::client::open<int>] = ~[c];
error!("%?", streams[0]);
}
pub fn main() {
//os::getenv("FOO");
rendezvous();
}
| j16r/rust | src/test/run-pass/issue-2834.rs | Rust | apache-2.0 | 780 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.pherf.rules;
import org.apache.phoenix.thirdparty.com.google.common.base.Preconditions;
import org.apache.phoenix.pherf.configuration.Column;
import org.apache.phoenix.pherf.configuration.DataSequence;
import org.apache.phoenix.pherf.configuration.DataTypeMapping;
import java.util.concurrent.atomic.AtomicLong;
public class SequentialIntegerDataGenerator implements RuleBasedDataGenerator {
private final Column columnRule;
private final AtomicLong counter;
private final long minValue;
private final long maxValue;
public SequentialIntegerDataGenerator(Column columnRule) {
Preconditions.checkArgument(columnRule.getDataSequence() == DataSequence.SEQUENTIAL);
Preconditions.checkArgument(isIntegerType(columnRule.getType()));
this.columnRule = columnRule;
minValue = columnRule.getMinValue();
maxValue = columnRule.getMaxValue();
counter = new AtomicLong(0);
}
/**
* Note that this method rolls over for attempts to get larger than maxValue
* @return new DataValue
*/
@Override
public DataValue getDataValue() {
return new DataValue(columnRule.getType(), String.valueOf((counter.getAndIncrement() % (maxValue - minValue + 1)) + minValue));
}
// Probably could go into a util class in the future
boolean isIntegerType(DataTypeMapping mapping) {
switch (mapping) {
case BIGINT:
case INTEGER:
case TINYINT:
case UNSIGNED_LONG:
return true;
default:
return false;
}
}
}
| ankitsinghal/phoenix | phoenix-pherf/src/main/java/org/apache/phoenix/pherf/rules/SequentialIntegerDataGenerator.java | Java | apache-2.0 | 2,431 |
package org.ovirt.engine.core.bll;
import org.ovirt.engine.core.common.action.VdcActionParametersBase;
public abstract class ConfigCommandBase<T extends VdcActionParametersBase> extends CommandBase<T> {
protected ConfigCommandBase(T parameters) {
super(parameters);
}
}
| Dhandapani/gluster-ovirt | backend/manager/modules/bll/src/main/java/org/ovirt/engine/core/bll/ConfigCommandBase.java | Java | apache-2.0 | 288 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.infinispan.remote;
import org.apache.camel.test.infra.infinispan.services.InfinispanService;
import org.apache.camel.test.infra.infinispan.services.InfinispanServiceFactory;
import org.infinispan.client.hotrod.RemoteCache;
import org.infinispan.commons.api.BasicCache;
import org.infinispan.configuration.cache.CacheMode;
import org.jgroups.util.UUID;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.RegisterExtension;
import org.testcontainers.shaded.org.apache.commons.lang.SystemUtils;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class InfinispanRemoteConfigurationIT {
@RegisterExtension
static InfinispanService service = InfinispanServiceFactory.createService();
@Test
public void remoteCacheWithoutProperties() throws Exception {
InfinispanRemoteConfiguration configuration = new InfinispanRemoteConfiguration();
configuration.setHosts(service.host() + ":" + service.port());
configuration.setSecure(true);
configuration.setUsername(service.username());
configuration.setPassword(service.password());
configuration.setSecurityServerName("infinispan");
configuration.setSaslMechanism("DIGEST-MD5");
configuration.setSecurityRealm("default");
if (SystemUtils.IS_OS_MAC) {
configuration.addConfigurationProperty(
"infinispan.client.hotrod.client_intelligence", "BASIC");
}
try (InfinispanRemoteManager manager = new InfinispanRemoteManager(configuration)) {
manager.start();
manager.getCacheContainer().administration()
.getOrCreateCache(
"misc_cache",
new org.infinispan.configuration.cache.ConfigurationBuilder()
.clustering()
.cacheMode(CacheMode.DIST_SYNC).build());
BasicCache<Object, Object> cache = manager.getCache("misc_cache");
assertNotNull(cache);
assertTrue(cache instanceof RemoteCache);
String key = UUID.randomUUID().toString();
assertNull(cache.put(key, "val1"));
assertNull(cache.put(key, "val2"));
}
}
@Test
public void remoteCacheWithProperties() throws Exception {
InfinispanRemoteConfiguration configuration = new InfinispanRemoteConfiguration();
configuration.setHosts(service.host() + ":" + service.port());
configuration.setSecure(true);
configuration.setUsername(service.username());
configuration.setPassword(service.password());
configuration.setSecurityServerName("infinispan");
configuration.setSaslMechanism("DIGEST-MD5");
configuration.setSecurityRealm("default");
if (SystemUtils.IS_OS_MAC) {
configuration.setConfigurationUri("infinispan/client-mac.properties");
} else {
configuration.setConfigurationUri("infinispan/client.properties");
}
try (InfinispanRemoteManager manager = new InfinispanRemoteManager(configuration)) {
manager.start();
manager.getCacheContainer().administration()
.getOrCreateCache(
"misc_cache",
new org.infinispan.configuration.cache.ConfigurationBuilder()
.clustering()
.cacheMode(CacheMode.DIST_SYNC).build());
BasicCache<Object, Object> cache = manager.getCache("misc_cache");
assertNotNull(cache);
assertTrue(cache instanceof RemoteCache);
String key = UUID.randomUUID().toString();
assertNull(cache.put(key, "val1"));
assertNotNull(cache.put(key, "val2"));
}
}
}
| christophd/camel | components/camel-infinispan/camel-infinispan/src/test/java/org/apache/camel/component/infinispan/remote/InfinispanRemoteConfigurationIT.java | Java | apache-2.0 | 4,826 |
package animate
import (
"testing"
)
func TestGeocode(t *testing.T) {
testData := []struct {
text string
}{
{"funny cat"},
}
command := Animate()
for _, d := range testData {
rsp, err := command.Exec("animate", d.text)
if err != nil {
t.Fatal(err)
}
if rsp == nil {
t.Fatal("expected result, got nil")
}
}
}
| yzprofile/go-plugins | micro/bot/command/animate/animate_test.go | GO | apache-2.0 | 340 |
// Copyright 2015 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.profiler;
import static com.google.common.truth.Truth.assertThat;
import static java.nio.charset.StandardCharsets.ISO_8859_1;
import static org.junit.Assert.fail;
import com.google.devtools.build.lib.clock.BlazeClock;
import com.google.devtools.build.lib.clock.Clock;
import com.google.devtools.build.lib.profiler.Profiler.ProfiledTaskKinds;
import com.google.devtools.build.lib.profiler.analysis.ProfileInfo;
import com.google.devtools.build.lib.testutil.FoundationTestCase;
import com.google.devtools.build.lib.testutil.ManualClock;
import com.google.devtools.build.lib.testutil.Suite;
import com.google.devtools.build.lib.testutil.TestSpec;
import com.google.devtools.build.lib.vfs.FileSystemUtils;
import com.google.devtools.build.lib.vfs.Path;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.zip.Deflater;
import java.util.zip.DeflaterOutputStream;
import java.util.zip.Inflater;
import java.util.zip.InflaterInputStream;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/**
* Unit tests for the profiler.
*/
@TestSpec(size = Suite.MEDIUM_TESTS) // testConcurrentProfiling takes ~700ms, testProfiler 100ms.
@RunWith(JUnit4.class)
public class ProfilerTest extends FoundationTestCase {
private Path cacheDir;
private Profiler profiler = Profiler.instance();
private ManualClock clock;
@Before
public final void createCacheDirectory() throws Exception {
cacheDir = scratch.dir("/tmp");
}
@Before
public final void setManualClock() {
clock = new ManualClock();
BlazeClock.setClock(clock);
}
@Test
public void testProfilerActivation() throws Exception {
Path cacheFile = cacheDir.getRelative("profile1.dat");
assertThat(profiler.isActive()).isFalse();
profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "basic test", false,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
assertThat(profiler.isActive()).isTrue();
profiler.stop();
assertThat(profiler.isActive()).isFalse();
}
@Test
public void testTaskDetails() throws Exception {
Path cacheFile = cacheDir.getRelative("profile1.dat");
profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "basic test", false,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
profiler.startTask(ProfilerTask.ACTION, "action task");
profiler.logEvent(ProfilerTask.TEST, "event");
profiler.completeTask(ProfilerTask.ACTION);
profiler.stop();
ProfileInfo info = ProfileInfo.loadProfile(cacheFile);
info.calculateStats();
ProfileInfo.Task task = info.allTasksById.get(0);
assertThat(task.id).isEqualTo(1);
assertThat(task.type).isEqualTo(ProfilerTask.ACTION);
assertThat(task.getDescription()).isEqualTo("action task");
task = info.allTasksById.get(1);
assertThat(task.id).isEqualTo(2);
assertThat(task.type).isEqualTo(ProfilerTask.TEST);
assertThat(task.getDescription()).isEqualTo("event");
}
@Test
public void testProfiler() throws Exception {
Path cacheFile = cacheDir.getRelative("profile1.dat");
profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "basic test", false,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
profiler.logSimpleTask(BlazeClock.instance().nanoTime(),
ProfilerTask.PHASE, "profiler start");
profiler.startTask(ProfilerTask.ACTION, "complex task");
profiler.logEvent(ProfilerTask.PHASE, "event1");
profiler.startTask(ProfilerTask.ACTION_CHECK, "complex subtask");
// next task takes less than 10 ms and should be only aggregated
profiler.logSimpleTask(BlazeClock.instance().nanoTime(),
ProfilerTask.VFS_STAT, "stat1");
long startTime = BlazeClock.instance().nanoTime();
clock.advanceMillis(20);
// this one will take at least 20 ms and should be present
profiler.logSimpleTask(startTime, ProfilerTask.VFS_STAT, "stat2");
profiler.completeTask(ProfilerTask.ACTION_CHECK);
profiler.completeTask(ProfilerTask.ACTION);
profiler.stop();
// all other calls to profiler should be ignored
profiler.logEvent(ProfilerTask.PHASE, "should be ignored");
// normally this would cause an exception but it is ignored since profiler
// is disabled
profiler.completeTask(ProfilerTask.ACTION_EXECUTE);
ProfileInfo info = ProfileInfo.loadProfile(cacheFile);
info.calculateStats();
assertThat(info.allTasksById).hasSize(6); // only 5 tasks + finalization should be recorded
ProfileInfo.Task task = info.allTasksById.get(0);
assertThat(task.stats.isEmpty()).isTrue();
task = info.allTasksById.get(1);
int count = 0;
for (ProfileInfo.AggregateAttr attr : task.getStatAttrArray()) {
if (attr != null) {
count++;
}
}
assertThat(count).isEqualTo(2); // only children are GENERIC and ACTION_CHECK
assertThat(ProfilerTask.TASK_COUNT).isEqualTo(task.aggregatedStats.toArray().length);
assertThat(task.aggregatedStats.getAttr(ProfilerTask.VFS_STAT).count).isEqualTo(2);
task = info.allTasksById.get(2);
assertThat(task.durationNanos).isEqualTo(0);
task = info.allTasksById.get(3);
assertThat(task.stats.getAttr(ProfilerTask.VFS_STAT).count).isEqualTo(2);
assertThat(task.subtasks).hasLength(1);
assertThat(task.subtasks[0].getDescription()).isEqualTo("stat2");
// assert that startTime grows with id
long time = -1;
for (ProfileInfo.Task t : info.allTasksById) {
assertThat(t.startTime).isAtLeast(time);
time = t.startTime;
}
}
@Test
public void testProfilerRecordingAllEvents() throws Exception {
Path cacheFile = cacheDir.getRelative("profile1.dat");
profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "basic test", true,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
profiler.startTask(ProfilerTask.ACTION, "action task");
// Next task takes less than 10 ms but should be recorded anyway.
clock.advanceMillis(1);
profiler.logSimpleTask(BlazeClock.instance().nanoTime(), ProfilerTask.VFS_STAT, "stat1");
profiler.completeTask(ProfilerTask.ACTION);
profiler.stop();
ProfileInfo info = ProfileInfo.loadProfile(cacheFile);
info.calculateStats();
assertThat(info.allTasksById).hasSize(3); // 2 tasks + finalization should be recorded
ProfileInfo.Task task = info.allTasksById.get(1);
assertThat(task.type).isEqualTo(ProfilerTask.VFS_STAT);
// Check that task would have been dropped if profiler was not configured to record everything.
assertThat(task.durationNanos).isLessThan(ProfilerTask.VFS_STAT.minDuration);
}
@Test
public void testProfilerRecordingOnlySlowestEvents() throws Exception {
Path profileData = cacheDir.getRelative("foo");
profiler.start(ProfiledTaskKinds.SLOWEST, profileData.getOutputStream(), "test", true,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
profiler.logSimpleTask(10000, 20000, ProfilerTask.VFS_STAT, "stat");
profiler.logSimpleTask(20000, 30000, ProfilerTask.REMOTE_EXECUTION, "remote execution");
assertThat(profiler.isProfiling(ProfilerTask.VFS_STAT)).isTrue();
assertThat(profiler.isProfiling(ProfilerTask.REMOTE_EXECUTION)).isFalse();
profiler.stop();
ProfileInfo info = ProfileInfo.loadProfile(profileData);
info.calculateStats();
assertThat(info.allTasksById).hasSize(1); // only VFS_STAT task should be recorded
ProfileInfo.Task task = info.allTasksById.get(0);
assertThat(task.type).isEqualTo(ProfilerTask.VFS_STAT);
}
@Test
public void testProfilerRecordsNothing() throws Exception {
Path profileData = cacheDir.getRelative("foo");
profiler.start(ProfiledTaskKinds.NONE, profileData.getOutputStream(), "test", true,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
profiler.logSimpleTask(10000, 20000, ProfilerTask.VFS_STAT, "stat");
assertThat(ProfilerTask.VFS_STAT.collectsSlowestInstances()).isTrue();
assertThat(profiler.isProfiling(ProfilerTask.VFS_STAT)).isFalse();
profiler.stop();
ProfileInfo info = ProfileInfo.loadProfile(profileData);
info.calculateStats();
assertThat(info.allTasksById).isEmpty();
}
@Test
public void testInconsistentCompleteTask() throws Exception {
Path cacheFile = cacheDir.getRelative("profile2.dat");
profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(),
"task stack inconsistency test", false,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
profiler.startTask(ProfilerTask.PHASE, "some task");
try {
profiler.completeTask(ProfilerTask.ACTION);
fail();
} catch (IllegalStateException e) {
// this is expected
}
profiler.stop();
}
@Test
public void testConcurrentProfiling() throws Exception {
Path cacheFile = cacheDir.getRelative("profile3.dat");
profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "concurrent test", false,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
long id = Thread.currentThread().getId();
Thread thread1 = new Thread() {
@Override public void run() {
for (int i = 0; i < 10000; i++) {
Profiler.instance().logEvent(ProfilerTask.TEST, "thread1");
}
}
};
long id1 = thread1.getId();
Thread thread2 = new Thread() {
@Override public void run() {
for (int i = 0; i < 10000; i++) {
Profiler.instance().logEvent(ProfilerTask.TEST, "thread2");
}
}
};
long id2 = thread2.getId();
profiler.startTask(ProfilerTask.PHASE, "main task");
profiler.logEvent(ProfilerTask.TEST, "starting threads");
thread1.start();
thread2.start();
thread2.join();
thread1.join();
profiler.logEvent(ProfilerTask.TEST, "joined");
profiler.completeTask(ProfilerTask.PHASE);
profiler.stop();
ProfileInfo info = ProfileInfo.loadProfile(cacheFile);
info.calculateStats();
info.analyzeRelationships();
assertThat(info.allTasksById).hasSize(4 + 10000 + 10000); // total number of tasks
assertThat(info.tasksByThread).hasSize(3); // total number of threads
// while main thread had 3 tasks, 2 of them were nested, so tasksByThread
// would contain only one "main task" task
assertThat(info.tasksByThread.get(id)).hasLength(2);
ProfileInfo.Task mainTask = info.tasksByThread.get(id)[0];
assertThat(mainTask.getDescription()).isEqualTo("main task");
assertThat(mainTask.subtasks).hasLength(2);
// other threads had 10000 independent recorded tasks each
assertThat(info.tasksByThread.get(id1)).hasLength(10000);
assertThat(info.tasksByThread.get(id2)).hasLength(10000);
int startId = mainTask.subtasks[0].id; // id of "starting threads"
int endId = mainTask.subtasks[1].id; // id of "joining"
assertThat(startId).isLessThan(info.tasksByThread.get(id1)[0].id);
assertThat(startId).isLessThan(info.tasksByThread.get(id2)[0].id);
assertThat(endId).isGreaterThan(info.tasksByThread.get(id1)[9999].id);
assertThat(endId).isGreaterThan(info.tasksByThread.get(id2)[9999].id);
}
@Test
public void testPhaseTasks() throws Exception {
Path cacheFile = cacheDir.getRelative("profile4.dat");
profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "phase test", false,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
Thread thread1 = new Thread() {
@Override public void run() {
for (int i = 0; i < 100; i++) {
Profiler.instance().logEvent(ProfilerTask.TEST, "thread1");
}
}
};
profiler.markPhase(ProfilePhase.INIT); // Empty phase.
profiler.markPhase(ProfilePhase.LOAD);
thread1.start();
thread1.join();
clock.advanceMillis(1);
profiler.markPhase(ProfilePhase.ANALYZE);
Thread thread2 = new Thread() {
@Override public void run() {
profiler.startTask(ProfilerTask.TEST, "complex task");
for (int i = 0; i < 100; i++) {
Profiler.instance().logEvent(ProfilerTask.TEST, "thread2a");
}
profiler.completeTask(ProfilerTask.TEST);
profiler.markPhase(ProfilePhase.EXECUTE);
for (int i = 0; i < 100; i++) {
Profiler.instance().logEvent(ProfilerTask.TEST, "thread2b");
}
}
};
thread2.start();
thread2.join();
profiler.logEvent(ProfilerTask.TEST, "last task");
clock.advanceMillis(1);
profiler.stop();
ProfileInfo info = ProfileInfo.loadProfile(cacheFile);
info.calculateStats();
info.analyzeRelationships();
// number of tasks: INIT(1) + LOAD(1) + Thread1.TEST(100) + ANALYZE(1)
// + Thread2a.TEST(100) + TEST(1) + EXECUTE(1) + Thread2b.TEST(100) + TEST(1) + INFO(1)
assertThat(info.allTasksById).hasSize(1 + 1 + 100 + 1 + 100 + 1 + 1 + 100 + 1 + 1);
assertThat(info.tasksByThread).hasSize(3); // total number of threads
// Phase0 contains only itself
ProfileInfo.Task p0 = info.getPhaseTask(ProfilePhase.INIT);
assertThat(info.getTasksForPhase(p0)).hasSize(1);
// Phase1 contains itself and 100 TEST "thread1" tasks
ProfileInfo.Task p1 = info.getPhaseTask(ProfilePhase.LOAD);
assertThat(info.getTasksForPhase(p1)).hasSize(101);
// Phase2 contains itself and 1 "complex task"
ProfileInfo.Task p2 = info.getPhaseTask(ProfilePhase.ANALYZE);
assertThat(info.getTasksForPhase(p2)).hasSize(2);
// Phase3 contains itself, 100 TEST "thread2b" tasks and "last task"
ProfileInfo.Task p3 = info.getPhaseTask(ProfilePhase.EXECUTE);
assertThat(info.getTasksForPhase(p3)).hasSize(103);
}
@Test
public void testCorruptedFile() throws Exception {
Path cacheFile = cacheDir.getRelative("profile5.dat");
profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(), "phase test", false,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
for (int i = 0; i < 100; i++) {
profiler.startTask(ProfilerTask.TEST, "outer task " + i);
clock.advanceMillis(1);
profiler.logEvent(ProfilerTask.TEST, "inner task " + i);
profiler.completeTask(ProfilerTask.TEST);
}
profiler.stop();
ProfileInfo info = ProfileInfo.loadProfile(cacheFile);
info.calculateStats();
assertThat(info.isCorruptedOrIncomplete()).isFalse();
Path corruptedFile = cacheDir.getRelative("profile5bad.dat");
FileSystemUtils.writeContent(
corruptedFile, Arrays.copyOf(FileSystemUtils.readContent(cacheFile), 2000));
info = ProfileInfo.loadProfile(corruptedFile);
info.calculateStats();
assertThat(info.isCorruptedOrIncomplete()).isTrue();
// Since root tasks will appear after nested tasks in the profile file and
// we have exactly one nested task for each root task, the following will always
// be true for our corrupted file:
// 0 <= number_of_all_tasks - 2*number_of_root_tasks <= 1
assertThat(info.allTasksById.size() / 2).isEqualTo(info.rootTasksById.size());
}
@Test
public void testUnsupportedProfilerRecord() throws Exception {
Path dataFile = cacheDir.getRelative("profile5.dat");
profiler.start(ProfiledTaskKinds.ALL, dataFile.getOutputStream(), "phase test", false,
BlazeClock.instance(), BlazeClock.instance().nanoTime());
profiler.startTask(ProfilerTask.TEST, "outer task");
profiler.logEvent(ProfilerTask.EXCEPTION, "inner task");
profiler.completeTask(ProfilerTask.TEST);
profiler.startTask(ProfilerTask.SCANNER, "outer task 2");
profiler.logSimpleTask(Profiler.nanoTimeMaybe(), ProfilerTask.TEST, "inner task 2");
profiler.completeTask(ProfilerTask.SCANNER);
profiler.stop();
// Validate our test profile.
ProfileInfo info = ProfileInfo.loadProfile(dataFile);
info.calculateStats();
assertThat(info.isCorruptedOrIncomplete()).isFalse();
assertThat(info.getStatsForType(ProfilerTask.TEST, info.rootTasksById).count).isEqualTo(2);
assertThat(info.getStatsForType(ProfilerTask.UNKNOWN, info.rootTasksById).count).isEqualTo(0);
// Now replace "TEST" type with something unsupported - e.g. "XXXX".
InputStream in = new InflaterInputStream(dataFile.getInputStream(), new Inflater(false), 65536);
byte[] buffer = new byte[60000];
int len = in.read(buffer);
in.close();
assertThat(len).isLessThan(buffer.length); // Validate that file was completely decoded.
String content = new String(buffer, ISO_8859_1);
int infoIndex = content.indexOf("TEST");
assertThat(infoIndex).isGreaterThan(0);
content = content.substring(0, infoIndex) + "XXXX" + content.substring(infoIndex + 4);
OutputStream out = new DeflaterOutputStream(dataFile.getOutputStream(),
new Deflater(Deflater.BEST_SPEED, false), 65536);
out.write(content.getBytes(ISO_8859_1));
out.close();
// Validate that XXXX records were classified as UNKNOWN.
info = ProfileInfo.loadProfile(dataFile);
info.calculateStats();
assertThat(info.isCorruptedOrIncomplete()).isFalse();
assertThat(info.getStatsForType(ProfilerTask.TEST, info.rootTasksById).count).isEqualTo(0);
assertThat(info.getStatsForType(ProfilerTask.SCANNER, info.rootTasksById).count).isEqualTo(1);
assertThat(info.getStatsForType(ProfilerTask.EXCEPTION, info.rootTasksById).count).isEqualTo(1);
assertThat(info.getStatsForType(ProfilerTask.UNKNOWN, info.rootTasksById).count).isEqualTo(2);
}
@Test
public void testResilenceToNonDecreasingNanoTimes() throws Exception {
final long initialNanoTime = BlazeClock.instance().nanoTime();
final AtomicInteger numNanoTimeCalls = new AtomicInteger(0);
Clock badClock = new Clock() {
@Override
public long currentTimeMillis() {
return BlazeClock.instance().currentTimeMillis();
}
@Override
public long nanoTime() {
return initialNanoTime - numNanoTimeCalls.addAndGet(1);
}
};
Path cacheFile = cacheDir.getRelative("profile1.dat");
profiler.start(ProfiledTaskKinds.ALL, cacheFile.getOutputStream(),
"testResilenceToNonDecreasingNanoTimes", false, badClock, initialNanoTime);
profiler.logSimpleTask(badClock.nanoTime(), ProfilerTask.TEST, "some task");
profiler.stop();
}
}
| damienmg/bazel | src/test/java/com/google/devtools/build/lib/profiler/ProfilerTest.java | Java | apache-2.0 | 19,092 |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.runtimefields.query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.Automata;
import org.apache.lucene.util.automaton.ByteRunAutomaton;
import org.elasticsearch.script.Script;
import org.elasticsearch.xpack.runtimefields.mapper.StringFieldScript;
import java.util.List;
import java.util.Objects;
public class StringScriptFieldRangeQuery extends AbstractStringScriptFieldQuery {
private final String lowerValue;
private final String upperValue;
private final boolean includeLower;
private final boolean includeUpper;
public StringScriptFieldRangeQuery(
Script script,
StringFieldScript.LeafFactory leafFactory,
String fieldName,
String lowerValue,
String upperValue,
boolean includeLower,
boolean includeUpper
) {
super(script, leafFactory, fieldName);
this.lowerValue = Objects.requireNonNull(lowerValue);
this.upperValue = Objects.requireNonNull(upperValue);
this.includeLower = includeLower;
this.includeUpper = includeUpper;
assert lowerValue.compareTo(upperValue) <= 0;
}
@Override
protected boolean matches(List<String> values) {
for (String value : values) {
int lct = lowerValue.compareTo(value);
boolean lowerOk = includeLower ? lct <= 0 : lct < 0;
if (lowerOk) {
int uct = upperValue.compareTo(value);
boolean upperOk = includeUpper ? uct >= 0 : uct > 0;
if (upperOk) {
return true;
}
}
}
return false;
}
@Override
public void visit(QueryVisitor visitor) {
if (visitor.acceptField(fieldName())) {
visitor.consumeTermsMatching(
this,
fieldName(),
() -> new ByteRunAutomaton(
Automata.makeBinaryInterval(new BytesRef(lowerValue), includeLower, new BytesRef(upperValue), includeUpper)
)
);
}
}
@Override
public final String toString(String field) {
StringBuilder b = new StringBuilder();
if (false == fieldName().contentEquals(field)) {
b.append(fieldName()).append(':');
}
b.append(includeLower ? '[' : '{');
b.append(lowerValue).append(" TO ").append(upperValue);
b.append(includeUpper ? ']' : '}');
return b.toString();
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), lowerValue, upperValue, includeLower, includeUpper);
}
@Override
public boolean equals(Object obj) {
if (false == super.equals(obj)) {
return false;
}
StringScriptFieldRangeQuery other = (StringScriptFieldRangeQuery) obj;
return lowerValue.equals(other.lowerValue)
&& upperValue.equals(other.upperValue)
&& includeLower == other.includeLower
&& includeUpper == other.includeUpper;
}
String lowerValue() {
return lowerValue;
}
String upperValue() {
return upperValue;
}
boolean includeLower() {
return includeLower;
}
boolean includeUpper() {
return includeUpper;
}
}
| nknize/elasticsearch | x-pack/plugin/runtime-fields/src/main/java/org/elasticsearch/xpack/runtimefields/query/StringScriptFieldRangeQuery.java | Java | apache-2.0 | 3,629 |
/*
* Copyright 2014 Dominick Baier, Brock Allen
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using IdentityManager.Api.Models;
using System;
using System.Linq;
namespace IdentityManager
{
static class IdentityManagerResultExtensions
{
public static ErrorModel ToError(this IdentityManagerResult result)
{
if (result == null) throw new ArgumentNullException("result");
return new ErrorModel
{
Errors = result.Errors.ToArray()
};
}
}
}
| Ernesto99/IdentityManager | source/Core/Extensions/IdentityManagerResultExtensions.cs | C# | apache-2.0 | 1,056 |
/*
* Copyright © 2013-2018 camunda services GmbH and various authors (info@camunda.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.container.impl.jboss.deployment.marker;
import java.util.List;
import org.camunda.bpm.application.AbstractProcessApplication;
import org.camunda.bpm.application.impl.metadata.spi.ProcessesXml;
import org.camunda.bpm.container.impl.jboss.util.ProcessesXmlWrapper;
import org.jboss.as.ee.component.ComponentDescription;
import org.jboss.as.server.deployment.AttachmentKey;
import org.jboss.as.server.deployment.AttachmentList;
import org.jboss.as.server.deployment.DeploymentUnit;
import org.jboss.jandex.AnnotationInstance;
/**
*
* @author Daniel Meyer
*
*/
public class ProcessApplicationAttachments {
private static final AttachmentKey<Boolean> MARKER = AttachmentKey.create(Boolean.class);
private static final AttachmentKey<Boolean> PART_OF_MARKER = AttachmentKey.create(Boolean.class);
private static final AttachmentKey<AttachmentList<ProcessesXmlWrapper>> PROCESSES_XML_LIST = AttachmentKey.createList(ProcessesXmlWrapper.class);
private static final AttachmentKey<ComponentDescription> PA_COMPONENT = AttachmentKey.create(ComponentDescription.class);
private static final AttachmentKey<AnnotationInstance> POST_DEPLOY_METHOD = AttachmentKey.create(AnnotationInstance.class);
private static final AttachmentKey<AnnotationInstance> PRE_UNDEPLOY_METHOD = AttachmentKey.create(AnnotationInstance.class);
/**
* Attach the parsed ProcessesXml file to a deployment unit.
*
*/
public static void addProcessesXml(DeploymentUnit unit, ProcessesXmlWrapper processesXmlWrapper) {
unit.addToAttachmentList(PROCESSES_XML_LIST, processesXmlWrapper);
}
/**
* Returns the attached {@link ProcessesXml} marker or null;
*
*/
public static List<ProcessesXmlWrapper> getProcessesXmls(DeploymentUnit deploymentUnit) {
return deploymentUnit.getAttachmentList(PROCESSES_XML_LIST);
}
/**
* marks a a {@link DeploymentUnit} as a process application
*/
public static void mark(DeploymentUnit unit) {
unit.putAttachment(MARKER, Boolean.TRUE);
}
/**
* marks a a {@link DeploymentUnit} as part of a process application
*/
public static void markPartOfProcessApplication(DeploymentUnit unit) {
if(unit.getParent() != null && unit.getParent() != unit) {
unit.getParent().putAttachment(PART_OF_MARKER, Boolean.TRUE);
}
}
/**
* return true if the deployment unit is either itself a process
* application or part of a process application.
*/
public static boolean isPartOfProcessApplication(DeploymentUnit unit) {
if(isProcessApplication(unit)) {
return true;
}
if(unit.getParent() != null && unit.getParent() != unit) {
return unit.getParent().hasAttachment(PART_OF_MARKER);
}
return false;
}
/**
* Returns true if the {@link DeploymentUnit} itself is a process application (carries a processes.xml)
*
*/
public static boolean isProcessApplication(DeploymentUnit deploymentUnit) {
return deploymentUnit.hasAttachment(MARKER);
}
/**
* Returns the {@link ComponentDescription} for the {@link AbstractProcessApplication} component
*/
public static ComponentDescription getProcessApplicationComponent(DeploymentUnit deploymentUnit) {
return deploymentUnit.getAttachment(PA_COMPONENT);
}
/**
* Attach the {@link ComponentDescription} for the {@link AbstractProcessApplication} component
*/
public static void attachProcessApplicationComponent(DeploymentUnit deploymentUnit, ComponentDescription componentDescription){
deploymentUnit.putAttachment(PA_COMPONENT, componentDescription);
}
/**
* Attach the {@link AnnotationInstance}s for the PostDeploy methods
*/
public static void attachPostDeployDescription(DeploymentUnit deploymentUnit, AnnotationInstance annotation){
deploymentUnit.putAttachment(POST_DEPLOY_METHOD, annotation);
}
/**
* Attach the {@link AnnotationInstance}s for the PreUndeploy methods
*/
public static void attachPreUndeployDescription(DeploymentUnit deploymentUnit, AnnotationInstance annotation){
deploymentUnit.putAttachment(PRE_UNDEPLOY_METHOD, annotation);
}
/**
* @return the description of the PostDeploy method
*/
public static AnnotationInstance getPostDeployDescription(DeploymentUnit deploymentUnit) {
return deploymentUnit.getAttachment(POST_DEPLOY_METHOD);
}
/**
* @return the description of the PreUndeploy method
*/
public static AnnotationInstance getPreUndeployDescription(DeploymentUnit deploymentUnit) {
return deploymentUnit.getAttachment(PRE_UNDEPLOY_METHOD);
}
private ProcessApplicationAttachments() {
}
}
| xasx/camunda-bpm-platform | distro/wildfly8/subsystem/src/main/java/org/camunda/bpm/container/impl/jboss/deployment/marker/ProcessApplicationAttachments.java | Java | apache-2.0 | 5,340 |
/* Copyright 2010, Object Management Group, Inc.
* Copyright 2010, PrismTech, Inc.
* Copyright 2010, Real-Time Innovations, Inc.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.omg.dds.type.typeobject;
import java.util.List;
import org.omg.dds.type.Extensibility;
import org.omg.dds.type.ID;
import org.omg.dds.type.Nested;
@Extensibility(Extensibility.Kind.MUTABLE_EXTENSIBILITY)
@Nested
public interface UnionType extends Type
{
// -----------------------------------------------------------------------
// Properties
// -----------------------------------------------------------------------
@ID(MemberId.MEMBER_UNIONTYPE_MEMBER_ID)
public List<UnionMember> getMember();
// -----------------------------------------------------------------------
// Types
// -----------------------------------------------------------------------
public static final class MemberId
{
// --- Constants: ----------------------------------------------------
public static final int MEMBER_UNIONTYPE_MEMBER_ID = 100;
// --- Constructor: --------------------------------------------------
private MemberId() {
// empty
}
}
}
| steveturner/datadistrib4j | srcJava/org/omg/dds/type/typeobject/UnionType.java | Java | apache-2.0 | 1,765 |
/*-------------------------------------------------------------------------
*
* ipc.h
* POSTGRES inter-process communication definitions.
*
* This file is misnamed, as it no longer has much of anything directly
* to do with IPC. The functionality here is concerned with managing
* exit-time cleanup for either a postmaster or a backend.
*
*
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/include/storage/ipc.h,v 1.81 2010/01/20 18:54:27 heikki Exp $
*
*-------------------------------------------------------------------------
*/
#ifndef IPC_H
#define IPC_H
typedef void (*pg_on_exit_callback) (int code, Datum arg);
typedef void (*shmem_startup_hook_type) (void);
/*----------
* API for handling cleanup that must occur during either ereport(ERROR)
* or ereport(FATAL) exits from a block of code. (Typical examples are
* undoing transient changes to shared-memory state.)
*
* PG_ENSURE_ERROR_CLEANUP(cleanup_function, arg);
* {
* ... code that might throw ereport(ERROR) or ereport(FATAL) ...
* }
* PG_END_ENSURE_ERROR_CLEANUP(cleanup_function, arg);
*
* where the cleanup code is in a function declared per pg_on_exit_callback.
* The Datum value "arg" can carry any information the cleanup function
* needs.
*
* This construct ensures that cleanup_function() will be called during
* either ERROR or FATAL exits. It will not be called on successful
* exit from the controlled code. (If you want it to happen then too,
* call the function yourself from just after the construct.)
*
* Note: the macro arguments are multiply evaluated, so avoid side-effects.
*----------
*/
#define PG_ENSURE_ERROR_CLEANUP(cleanup_function, arg) \
do { \
on_shmem_exit(cleanup_function, arg); \
PG_TRY()
#define PG_END_ENSURE_ERROR_CLEANUP(cleanup_function, arg) \
cancel_shmem_exit(cleanup_function, arg); \
PG_CATCH(); \
{ \
cancel_shmem_exit(cleanup_function, arg); \
cleanup_function (0, arg); \
PG_RE_THROW(); \
} \
PG_END_TRY(); \
} while (0)
/* ipc.c */
extern bool proc_exit_inprogress;
extern void proc_exit(int code);
extern void shmem_exit(int code);
extern void on_proc_exit(pg_on_exit_callback function, Datum arg);
extern void on_shmem_exit(pg_on_exit_callback function, Datum arg);
extern void cancel_shmem_exit(pg_on_exit_callback function, Datum arg);
extern void on_exit_reset(void);
/* ipci.c */
extern PGDLLIMPORT shmem_startup_hook_type shmem_startup_hook;
extern void CreateSharedMemoryAndSemaphores(bool makePrivate, int port);
#endif /* IPC_H */
| bubichain/blockchain | src/3rd/win32/include/postgresql/server/storage/ipc.h | C | apache-2.0 | 2,660 |
#!/bin/bash
. ./setenv
C_BIND=`uname -n`
C_PORT="1528"
S_PATH=$S_HOME/product-gfxd/bin
#echo "creating schema"
$S_PATH/gfxd run -file=schema_temp.sql -client-port=$C_PORT -client-bind-address=$C_BIND
| SnappyDataInc/snappy-store | gemfirexd/tools/src/test/resources/lib/diskCompatibilityScripts/exec-create-temp-schema.sh | Shell | apache-2.0 | 204 |
package alien4cloud.tosca.parser.postprocess;
import static alien4cloud.utils.AlienUtils.safe;
import javax.annotation.Resource;
import org.alien4cloud.tosca.model.types.NodeType;
import org.springframework.stereotype.Component;
/**
* Post process a node type.
*/
@Component
public class NodeTypePostProcessor implements IPostProcessor<NodeType> {
@Resource
private CapabilityDefinitionPostProcessor capabilityDefinitionPostProcessor;
@Resource
private RequirementDefinitionPostProcessor requirementDefinitionPostProcessor;
@Override
public void process(NodeType instance) {
safe(instance.getCapabilities()).forEach(capabilityDefinitionPostProcessor);
safe(instance.getRequirements()).forEach(requirementDefinitionPostProcessor);
}
} | alien4cloud/alien4cloud | alien4cloud-tosca/src/main/java/alien4cloud/tosca/parser/postprocess/NodeTypePostProcessor.java | Java | apache-2.0 | 784 |
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package protoutil
import (
"bytes"
"crypto/sha256"
"encoding/asn1"
"math/big"
"github.com/golang/protobuf/proto"
cb "github.com/hyperledger/fabric-protos-go/common"
"github.com/pkg/errors"
)
// NewBlock constructs a block with no data and no metadata.
func NewBlock(seqNum uint64, previousHash []byte) *cb.Block {
block := &cb.Block{}
block.Header = &cb.BlockHeader{}
block.Header.Number = seqNum
block.Header.PreviousHash = previousHash
block.Header.DataHash = []byte{}
block.Data = &cb.BlockData{}
var metadataContents [][]byte
for i := 0; i < len(cb.BlockMetadataIndex_name); i++ {
metadataContents = append(metadataContents, []byte{})
}
block.Metadata = &cb.BlockMetadata{Metadata: metadataContents}
return block
}
type asn1Header struct {
Number *big.Int
PreviousHash []byte
DataHash []byte
}
func BlockHeaderBytes(b *cb.BlockHeader) []byte {
asn1Header := asn1Header{
PreviousHash: b.PreviousHash,
DataHash: b.DataHash,
Number: new(big.Int).SetUint64(b.Number),
}
result, err := asn1.Marshal(asn1Header)
if err != nil {
// Errors should only arise for types which cannot be encoded, since the
// BlockHeader type is known a-priori to contain only encodable types, an
// error here is fatal and should not be propagated
panic(err)
}
return result
}
func BlockHeaderHash(b *cb.BlockHeader) []byte {
sum := sha256.Sum256(BlockHeaderBytes(b))
return sum[:]
}
func BlockDataHash(b *cb.BlockData) []byte {
sum := sha256.Sum256(bytes.Join(b.Data, nil))
return sum[:]
}
// GetChannelIDFromBlockBytes returns channel ID given byte array which represents
// the block
func GetChannelIDFromBlockBytes(bytes []byte) (string, error) {
block, err := UnmarshalBlock(bytes)
if err != nil {
return "", err
}
return GetChannelIDFromBlock(block)
}
// GetChannelIDFromBlock returns channel ID in the block
func GetChannelIDFromBlock(block *cb.Block) (string, error) {
if block == nil || block.Data == nil || block.Data.Data == nil || len(block.Data.Data) == 0 {
return "", errors.New("failed to retrieve channel id - block is empty")
}
var err error
envelope, err := GetEnvelopeFromBlock(block.Data.Data[0])
if err != nil {
return "", err
}
payload, err := UnmarshalPayload(envelope.Payload)
if err != nil {
return "", err
}
if payload.Header == nil {
return "", errors.New("failed to retrieve channel id - payload header is empty")
}
chdr, err := UnmarshalChannelHeader(payload.Header.ChannelHeader)
if err != nil {
return "", err
}
return chdr.ChannelId, nil
}
// GetMetadataFromBlock retrieves metadata at the specified index.
func GetMetadataFromBlock(block *cb.Block, index cb.BlockMetadataIndex) (*cb.Metadata, error) {
if block.Metadata == nil {
return nil, errors.New("no metadata in block")
}
if len(block.Metadata.Metadata) <= int(index) {
return nil, errors.Errorf("no metadata at index [%s]", index)
}
md := &cb.Metadata{}
err := proto.Unmarshal(block.Metadata.Metadata[index], md)
if err != nil {
return nil, errors.Wrapf(err, "error unmarshalling metadata at index [%s]", index)
}
return md, nil
}
// GetMetadataFromBlockOrPanic retrieves metadata at the specified index, or
// panics on error
func GetMetadataFromBlockOrPanic(block *cb.Block, index cb.BlockMetadataIndex) *cb.Metadata {
md, err := GetMetadataFromBlock(block, index)
if err != nil {
panic(err)
}
return md
}
// GetConsenterMetadataFromBlock attempts to retrieve consenter metadata from the value
// stored in block metadata at index SIGNATURES (first field). If no consenter metadata
// is found there, it falls back to index ORDERER (third field).
func GetConsenterMetadataFromBlock(block *cb.Block) (*cb.Metadata, error) {
m, err := GetMetadataFromBlock(block, cb.BlockMetadataIndex_SIGNATURES)
if err != nil {
return nil, errors.WithMessage(err, "failed to retrieve metadata")
}
// TODO FAB-15864 Remove this fallback when we can stop supporting upgrade from pre-1.4.1 orderer
if len(m.Value) == 0 {
return GetMetadataFromBlock(block, cb.BlockMetadataIndex_ORDERER)
}
obm := &cb.OrdererBlockMetadata{}
err = proto.Unmarshal(m.Value, obm)
if err != nil {
return nil, errors.Wrap(err, "failed to unmarshal orderer block metadata")
}
res := &cb.Metadata{}
err = proto.Unmarshal(obm.ConsenterMetadata, res)
if err != nil {
return nil, errors.Wrap(err, "failed to unmarshal consenter metadata")
}
return res, nil
}
// GetLastConfigIndexFromBlock retrieves the index of the last config block as
// encoded in the block metadata
func GetLastConfigIndexFromBlock(block *cb.Block) (uint64, error) {
m, err := GetMetadataFromBlock(block, cb.BlockMetadataIndex_SIGNATURES)
if err != nil {
return 0, errors.WithMessage(err, "failed to retrieve metadata")
}
// TODO FAB-15864 Remove this fallback when we can stop supporting upgrade from pre-1.4.1 orderer
if len(m.Value) == 0 {
m, err := GetMetadataFromBlock(block, cb.BlockMetadataIndex_LAST_CONFIG)
if err != nil {
return 0, errors.WithMessage(err, "failed to retrieve metadata")
}
lc := &cb.LastConfig{}
err = proto.Unmarshal(m.Value, lc)
if err != nil {
return 0, errors.Wrap(err, "error unmarshalling LastConfig")
}
return lc.Index, nil
}
obm := &cb.OrdererBlockMetadata{}
err = proto.Unmarshal(m.Value, obm)
if err != nil {
return 0, errors.Wrap(err, "failed to unmarshal orderer block metadata")
}
return obm.LastConfig.Index, nil
}
// GetLastConfigIndexFromBlockOrPanic retrieves the index of the last config
// block as encoded in the block metadata, or panics on error
func GetLastConfigIndexFromBlockOrPanic(block *cb.Block) uint64 {
index, err := GetLastConfigIndexFromBlock(block)
if err != nil {
panic(err)
}
return index
}
// CopyBlockMetadata copies metadata from one block into another
func CopyBlockMetadata(src *cb.Block, dst *cb.Block) {
dst.Metadata = src.Metadata
// Once copied initialize with rest of the
// required metadata positions.
InitBlockMetadata(dst)
}
// InitBlockMetadata initializes metadata structure
func InitBlockMetadata(block *cb.Block) {
if block.Metadata == nil {
block.Metadata = &cb.BlockMetadata{Metadata: [][]byte{{}, {}, {}, {}, {}}}
} else if len(block.Metadata.Metadata) < int(cb.BlockMetadataIndex_COMMIT_HASH+1) {
for i := int(len(block.Metadata.Metadata)); i <= int(cb.BlockMetadataIndex_COMMIT_HASH); i++ {
block.Metadata.Metadata = append(block.Metadata.Metadata, []byte{})
}
}
}
| hyperledger/fabric | protoutil/blockutils.go | GO | apache-2.0 | 6,557 |
/*
* Copyright 2021 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.navercorp.pinpoint.profiler.plugin.config;
import com.navercorp.pinpoint.bootstrap.config.DefaultProfilerConfig;
import com.navercorp.pinpoint.bootstrap.config.Value;
import com.navercorp.pinpoint.common.util.StringUtils;
import java.util.Collections;
import java.util.List;
public class DefaultPluginLoadingConfig implements PluginLoadingConfig {
// ArtifactIdUtils.ARTIFACT_SEPARATOR
private static final String ARTIFACT_SEPARATOR = ";";
private List<String> pluginLoadOrder = Collections.emptyList();
private List<String> disabledPlugins = Collections.emptyList();
private List<String> importPluginIds = Collections.emptyList();
public DefaultPluginLoadingConfig() {
}
@Override
public List<String> getPluginLoadOrder() {
return pluginLoadOrder;
}
@Value("${profiler.plugin.load.order}")
public void setPluginLoadOrder(String pluginLoadOrder) {
this.pluginLoadOrder = StringUtils.tokenizeToStringList(pluginLoadOrder, ",");
}
@Override
public List<String> getDisabledPlugins() {
return disabledPlugins;
}
@Value("${profiler.plugin.disable}")
public void setDisabledPlugins(String disabledPlugins) {
this.disabledPlugins = StringUtils.tokenizeToStringList(disabledPlugins, ",");
}
@Override
public List<String> getImportPluginIds() {
return importPluginIds;
}
@Value("${" + DefaultProfilerConfig.IMPORT_PLUGIN + "}")
public void setImportPluginIds(String importPluginIds) {
this.importPluginIds = StringUtils.tokenizeToStringList(importPluginIds, ARTIFACT_SEPARATOR);
}
@Override
public String toString() {
return "DefaultPluginLoadingConfig{" +
"pluginLoadOrder=" + pluginLoadOrder +
", disabledPlugins=" + disabledPlugins +
", importPluginIds=" + importPluginIds +
'}';
}
}
| emeroad/pinpoint | profiler/src/main/java/com/navercorp/pinpoint/profiler/plugin/config/DefaultPluginLoadingConfig.java | Java | apache-2.0 | 2,527 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @author Pavel N. Vyssotski
*/
// OptionParser.cpp
#include <cstring>
#include "AgentBase.h"
#include "MemoryManager.h"
#include "AgentException.h"
#include "Log.h"
#include "OptionParser.h"
using namespace jdwp;
using namespace std;
OptionParser::OptionParser() throw()
{
m_optionCount = 0;
m_optionString = 0;
m_options = 0;
m_help = false;
m_suspend = true;
m_server = false;
m_timeout = 0;
m_transport = 0;
m_address = 0;
m_log = 0;
m_kindFilter = 0;
m_srcFilter = 0;
m_onuncaught = false;
m_onthrow = 0;
m_launch = 0;
}
bool OptionParser::AsciiToBool(const char *str) throw(IllegalArgumentException)
{
if (strcmp("y", str) == 0) {
return true;
} else if (strcmp("n", str) == 0) {
return false;
} else {
throw IllegalArgumentException();
}
}
void OptionParser::Parse(const char* str) throw(AgentException)
{
size_t i;
int k;
if (str == 0)
return;
const size_t len = strlen(str);
if (len == 0)
return;
for (i = 0; i < len; i++) {
if (str[i] == ',') {
m_optionCount++;
} else if (str[i] == '"' || str[i] == '\'') {
char quote = str[i];
if (i > 0 && str[i-1] != '=') {
throw IllegalArgumentException();
}
i++;
while (i < len && str[i] != quote) {
i++;
}
if (i+1 < len && str[i+1] != ',') {
throw IllegalArgumentException();
}
}
}
m_optionCount++;
m_optionString = reinterpret_cast<char*>(AgentBase::GetMemoryManager().
Allocate(len + 1 JDWP_FILE_LINE));
strcpy(m_optionString, str);
m_options = reinterpret_cast<Option*>(AgentBase::GetMemoryManager().
Allocate(m_optionCount * sizeof(Option) JDWP_FILE_LINE));
m_options[0].name = m_optionString;
m_options[0].value = "";
k = 0;
bool waitEndOfOption = false;
for (i = 0; i < len && k < m_optionCount; i++) {
if ((m_optionString[i] == '=') && (!waitEndOfOption)) {
waitEndOfOption = true;
m_optionString[i] = '\0';
m_options[k].value = &m_optionString[i+1];
} else if (m_optionString[i] == ',') {
waitEndOfOption = false;
m_optionString[i] = '\0';
k++;
m_options[k].name = &m_optionString[i+1];
m_options[k].value = "";
} else if (m_optionString[i] == '"' || m_optionString[i] == '\'') {
char quote = m_optionString[i];
m_optionString[i] = '\0';
m_options[k].value = &m_optionString[i+1];
i++;
while (i < len && m_optionString[i] != quote) {
i++;
}
if (i < len) {
m_optionString[i] = '\0';
}
}
}
for (k = 0; k < m_optionCount; k++) {
if (strcmp("transport", m_options[k].name) == 0) {
m_transport = m_options[k].value;
} else if (strcmp("address", m_options[k].name) == 0) {
m_address = m_options[k].value;
} else if (strcmp("timeout", m_options[k].name) == 0) {
m_timeout = atol(m_options[k].value);
} else if (strcmp("suspend", m_options[k].name) == 0) {
m_suspend = AsciiToBool(m_options[k].value);
} else if (strcmp("server", m_options[k].name) == 0) {
m_server = AsciiToBool(m_options[k].value);
} else if (strcmp("launch", m_options[k].name) == 0) {
m_launch = m_options[k].value;
} else if (strcmp("onuncaught", m_options[k].name) == 0) {
m_onuncaught = AsciiToBool(m_options[k].value);
} else if (strcmp("onthrow", m_options[k].name) == 0) {
m_onthrow = m_options[k].value;
} else if (strcmp("help", m_options[k].name) == 0) {
m_help = true;
#ifndef NDEBUG
} else if (strcmp("log", m_options[k].name) == 0) {
m_log = m_options[k].value;
} else if (strcmp("trace", m_options[k].name) == 0) {
m_kindFilter = m_options[k].value;
} else if (strcmp("src", m_options[k].name) == 0) {
m_srcFilter = m_options[k].value;
#endif // NDEBUG
}
}
if ((m_onthrow != 0) || (m_onuncaught != 0)) {
if (m_launch == 0) {
JDWP_ERROR("Specify launch=<command line> when using onthrow or onuncaught option");
throw IllegalArgumentException();
}
}
}
OptionParser::~OptionParser() throw()
{
if (m_optionString != 0)
AgentBase::GetMemoryManager().Free(m_optionString JDWP_FILE_LINE);
if (m_options != 0)
AgentBase::GetMemoryManager().Free(m_options JDWP_FILE_LINE);
}
const char *OptionParser::FindOptionValue(const char *name) const throw()
{
for (int i = 0; i < m_optionCount; i++) {
if (strcmp(name, m_options[i].name) == 0) {
return m_options[i].value;
}
}
return 0;
}
| freeVM/freeVM | enhanced/java/jdktools/modules/jpda/src/main/native/jdwp/common/agent/core/OptionParser.cpp | C++ | apache-2.0 | 5,843 |
package org.jaudiotagger.audio.mp4;
import org.jaudiotagger.audio.generic.GenericAudioHeader;
import org.jaudiotagger.audio.mp4.atom.Mp4EsdsBox;
/**
* Store some additional attributes not available for all audio types
*/
public class Mp4AudioHeader extends GenericAudioHeader {
/**
* The key for the kind field<br>
*
* @see #content
*/
public final static String FIELD_KIND = "KIND";
/**
* The key for the profile<br>
*
* @see #content
*/
public final static String FIELD_PROFILE = "PROFILE";
/**
* The key for the ftyp brand<br>
*
* @see #content
*/
public final static String FIELD_BRAND = "BRAND";
public void setKind(Mp4EsdsBox.Kind kind) {
content.put(FIELD_KIND, kind);
}
/**
* @return kind
*/
public Mp4EsdsBox.Kind getKind() {
return (Mp4EsdsBox.Kind) content.get(FIELD_KIND);
}
/**
* The key for the profile
*
* @param profile
*/
public void setProfile(Mp4EsdsBox.AudioProfile profile) {
content.put(FIELD_PROFILE, profile);
}
/**
* @return audio profile
*/
public Mp4EsdsBox.AudioProfile getProfile() {
return (Mp4EsdsBox.AudioProfile) content.get(FIELD_PROFILE);
}
/**
* @param brand
*/
public void setBrand(String brand) {
content.put(FIELD_BRAND, brand);
}
/**
* @return brand
*/
public String getBrand() {
return (String) content.get(FIELD_BRAND);
}
}
| dubenju/javay | src/java/org/jaudiotagger/audio/mp4/Mp4AudioHeader.java | Java | apache-2.0 | 1,542 |
########################################################################
#
# Linux on Hyper-V and Azure Test Code, ver. 1.0.0
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
# ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR
# PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
#
########################################################################
<#
.Synopsis
Run continous Ping while disabling and enabling the SR-IOV feature
.Description
Continuously ping a server, from a Linux client, over a SR-IOV connection.
Disable SR-IOV on the Linux client and observe RTT increase.
Re-enable SR-IOV and observe that RTT lowers.
.Parameter vmName
Name of the test VM.
.Parameter hvServer
Name of the Hyper-V server hosting the VM.
.Parameter testParams
Semicolon separated list of test parameters.
This setup script does not use any setup scripts.
.Example
<test>
<testName>Ping_DisableVF</testName>
<testScript>setupscripts\SR-IOV_Ping_DisableVF.ps1</testScript>
<files>remote-scripts/ica/utils.sh,remote-scripts/ica/SR-IOV_Utils.sh</files>
<setupScript>
<file>setupscripts\RevertSnapshot.ps1</file>
<file>setupscripts\SR-IOV_enable.ps1</file>
</setupScript>
<noReboot>False</noReboot>
<testParams>
<param>NIC=NetworkAdapter,External,SRIOV,001600112200</param>
<param>TC_COVERED=SRIOV-5A</param>
<param>VF_IP1=10.11.12.31</param>
<param>VF_IP2=10.11.12.32</param>
<param>NETMASK=255.255.255.0</param>
<param>REMOTE_SERVER=remoteHostname</param>
</testParams>
<timeout>1800</timeout>
</test>
#>
param ([String] $vmName, [String] $hvServer, [string] $testParams)
#############################################################
#
# Main script body
#
#############################################################
$retVal = $False
$leaveTrail = "no"
#
# Check the required input args are present
#
# Write out test Params
$testParams
if ($hvServer -eq $null)
{
"ERROR: hvServer is null"
return $False
}
if ($testParams -eq $null)
{
"ERROR: testParams is null"
return $False
}
#change working directory to root dir
$testParams -match "RootDir=([^;]+)"
if (-not $?)
{
"Mandatory param RootDir=Path; not found!"
return $false
}
$rootDir = $Matches[1]
if (Test-Path $rootDir)
{
Set-Location -Path $rootDir
if (-not $?)
{
"ERROR: Could not change directory to $rootDir !"
return $false
}
"Changed working directory to $rootDir"
}
else
{
"ERROR: RootDir = $rootDir is not a valid path"
return $false
}
# Source TCUitls.ps1 for getipv4 and other functions
if (Test-Path ".\setupScripts\TCUtils.ps1")
{
. .\setupScripts\TCUtils.ps1
}
else
{
"ERROR: Could not find setupScripts\TCUtils.ps1"
return $false
}
# Source NET_UTILS.ps1 for network functions
if (Test-Path ".\setupScripts\NET_UTILS.ps1")
{
. .\setupScripts\NET_UTILS.ps1
}
else
{
"ERROR: Could not find setupScripts\NET_Utils.ps1"
return $false
}
# Process the test params
$params = $testParams.Split(';')
foreach ($p in $params)
{
$fields = $p.Split("=")
switch ($fields[0].Trim())
{
"SshKey" { $sshKey = $fields[1].Trim() }
"ipv4" { $ipv4 = $fields[1].Trim() }
"VF_IP1" { $vmVF_IP1 = $fields[1].Trim() }
"VF_IP2" { $vmVF_IP2 = $fields[1].Trim() }
"NETMASK" { $netmask = $fields[1].Trim() }
"VM2NAME" { $vm2Name = $fields[1].Trim() }
"REMOTE_SERVER" { $remoteServer = $fields[1].Trim()}
"TC_COVERED" { $TC_COVERED = $fields[1].Trim() }
}
}
$summaryLog = "${vmName}_summary.log"
del $summaryLog -ErrorAction SilentlyContinue
Write-Output "This script covers test case: ${TC_COVERED}" | Tee-Object -Append -file $summaryLog
# Get IPs
$ipv4 = GetIPv4 $vmName $hvServer
"${vmName} IPADDRESS: ${ipv4}"
#
# Configure VF on test VM
#
Start-Sleep -s 5
$retVal = ConfigureVF $ipv4 $sshKey $netmask
if (-not $retVal)
{
"ERROR: Failed to configure eth1 on vm $vmName (IP: ${ipv4}), by setting a static IP of $vmVF_IP1 , netmask $netmask"
return $false
}
Start-Sleep -s 10
#
# Run Ping with SR-IOV enabled
#
.\bin\plink.exe -i ssh\$sshKey root@${ipv4} "echo 'source constants.sh && ping -c 600 -I eth1 `$VF_IP2 > PingResults.log &' > runPing.sh"
Start-Sleep -s 5
.\bin\plink.exe -i ssh\$sshKey root@${ipv4} "bash ~/runPing.sh > ~/Ping.log 2>&1"
# Wait 60 seconds and read the RTT
"Get Logs"
Start-Sleep -s 30
[decimal]$vfEnabledRTT = .\bin\plink.exe -i ssh\$sshKey root@${ipv4} "tail -2 PingResults.log | head -1 | awk '{print `$7}' | sed 's/=/ /' | awk '{print `$2}'"
if (-not $vfEnabledRTT){
"ERROR: No result was logged! Check if Ping was executed!" | Tee-Object -Append -file $summaryLog
return $false
}
"The RTT before disabling SR-IOV is $vfEnabledRTT ms" | Tee-Object -Append -file $summaryLog
#
# Disable SR-IOV on test VM
#
Start-Sleep -s 5
"Disabling VF on vm1"
Set-VMNetworkAdapter -VMName $vmName -ComputerName $hvServer -IovWeight 0
if (-not $?) {
"ERROR: Failed to disable SR-IOV on $vmName!" | Tee-Object -Append -file $summaryLog
return $false
}
# Read the RTT with SR-IOV disabled; it should be higher
Start-Sleep -s 30
[decimal]$vfDisabledRTT = .\bin\plink.exe -i ssh\$sshKey root@${ipv4} "tail -2 PingResults.log | head -1 | awk '{print `$7}' | sed 's/=/ /' | awk '{print `$2}'"
if (-not $vfDisabledRTT){
"ERROR: No result was logged after SR-IOV was disabled!" | Tee-Object -Append -file $summaryLog
return $false
}
"The RTT with SR-IOV disabled is $vfDisabledRTT ms" | Tee-Object -Append -file $summaryLog
if ($vfDisabledRTT -le $vfEnabledRTT) {
"ERROR: The RTT was lower with SR-IOV disabled, it should be higher" | Tee-Object -Append -file $summaryLog
return $false
}
#
# Enable SR-IOV on test VM
"Enable VF on vm1"
Set-VMNetworkAdapter -VMName $vmName -ComputerName $hvServer -IovWeight 1
if (-not $?) {
"ERROR: Failed to enable SR-IOV on $vmName!" | Tee-Object -Append -file $summaryLog
return $false
}
Start-Sleep -s 30
# Read the RTT again, it should be lower than before
# We should see values to close to the initial RTT measured
[decimal]$vfEnabledRTT = $vfEnabledRTT * 1.3
[decimal]$vfFinalRTT = .\bin\plink.exe -i ssh\$sshKey root@${ipv4} "tail -2 PingResults.log | head -1 | awk '{print `$7}' | sed 's/=/ /' | awk '{print `$2}'"
"The RTT after re-enabling SR-IOV is $vfFinalRTT ms" | Tee-Object -Append -file $summaryLog
if ($vfFinalRTT -gt $vfEnabledRTT) {
"ERROR: After re-enabling SR-IOV, the RTT value has not lowered enough
Please check if the VF was successfully restarted" | Tee-Object -Append -file $summaryLog
return $false
}
return $true | bogdancarpusor/lis-test | WS2012R2/lisa/setupscripts/SR-IOV_Ping_DisableVF.ps1 | PowerShell | apache-2.0 | 7,327 |
/**
* @license
* Copyright 2020 The FOAM Authors. All Rights Reserved.
* http://www.apache.org/licenses/LICENSE-2.0
*/
foam.CLASS({
package: 'foam.nanos.crunch.lite',
name: 'MinMaxCapabilityRefinement',
refines: 'foam.nanos.crunch.MinMaxCapability',
implements: [
'foam.nanos.crunch.lite.CapableCompatibleCapability'
],
javaImports: [
'foam.nanos.crunch.CapabilityJunctionPayload',
'foam.nanos.crunch.CrunchService',
'static foam.nanos.crunch.CapabilityJunctionStatus.*'
],
methods: [
{
name: 'getCapableChainedStatus',
documentation: `
numberGrantedOrPending are the available CapablePayloads which are GRANTED or can eventually be turned into
GRANTED from PENDING state. If MinMaxCapability.min is greater than the number of available payloads which are GRANTED or
can eventually be turned into GRANTED, then it is impossible for the total amount of GRANTED payloads to be greater than the MIN,
thereby fulfilling the minimum requirement.
For example, let there be a min max capablity which has 10 prerequisites and a min of 2.
If the user selected only 3 of those prereqs in the wizard, then the CapablePayload.status for those 3 will each be in PENDING
state with approvals generated for each one. Note, there will only be these 3 CapablePayloads out of the 10 Prereqs avaliable on the
Capable object since the user only selected 3.
If 1 of those 3 CapablePayloads get rejected. Then there will be 2 numberGrantedOrPending which could still potentially satisfy
the min requirement of 2 if those 2 CapablePayloads get set to GRANTED.
If 2 of those 3 CapablePayloads get rejected. Then there will be 1 numberGrantedOrPending which would be impossible to satisfy the
MinMaxCapability.min requirement of 2 even if that 1 CapablePayload is GRANTED.
`,
javaCode: `
CrunchService crunchService = (CrunchService) x.get("crunchService");
List<String> prereqCapIds = crunchService.getPrereqs(getId());
int numberGranted = 0;
int numberPending = 0;
int numberRejected = 0;
for ( String capId : prereqCapIds ) {
CapabilityJunctionPayload prereqPayload = (CapabilityJunctionPayload)
capablePayloadDAO.find(capId);
if ( prereqPayload == null ) {
continue;
}
switch ( prereqPayload.getStatus() ) {
case GRANTED:
numberGranted++;
continue;
case PENDING:
case APPROVED:
numberPending++;
continue;
case REJECTED:
numberRejected++;
continue;
}
}
int numberTotal = numberGranted + numberPending + numberRejected;
int numberGrantedOrPending = numberGranted + numberPending;
if ( numberTotal == 0 ){
return CapabilityJunctionStatus.ACTION_REQUIRED;
}
if ( getMin() > numberGrantedOrPending ){
return CapabilityJunctionStatus.REJECTED;
}
if ( numberGranted >= getMin() ) {
return CapabilityJunctionStatus.GRANTED;
}
if ( numberTotal >= getMin() ) {
return CapabilityJunctionStatus.PENDING;
}
return CapabilityJunctionStatus.ACTION_REQUIRED;
`
}
]
});
| jacksonic/vjlofvhjfgm | src/foam/nanos/crunch/lite/MinMaxCapabilityRefinement.js | JavaScript | apache-2.0 | 3,413 |
/*
* Copyright 2016 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.serverhealth;
import com.thoughtworks.go.config.CaseInsensitiveString;
import com.thoughtworks.go.config.CruiseConfig;
import com.thoughtworks.go.config.PipelineConfig;
import com.thoughtworks.go.config.remote.ConfigRepoConfig;
import com.thoughtworks.go.domain.materials.Material;
import com.thoughtworks.go.domain.materials.MaterialConfig;
import org.apache.commons.lang.StringUtils;
import java.util.HashSet;
import java.util.Set;
public class HealthStateScope implements Comparable<HealthStateScope> {
public static final HealthStateScope GLOBAL = new HealthStateScope(ScopeType.GLOBAL, "GLOBAL");
private final ScopeType type;
private final String scope;
private HealthStateScope(ScopeType type, String scope) {
this.type = type;
this.scope = scope;
}
public static HealthStateScope forGroup(String groupName) {
return new HealthStateScope(ScopeType.GROUP, groupName);
}
public static HealthStateScope forPipeline(String pipelineName) {
return new HealthStateScope(ScopeType.PIPELINE, pipelineName);
}
public static HealthStateScope forFanin(String pipelineName) {
return new HealthStateScope(ScopeType.FANIN, pipelineName);
}
public static HealthStateScope forStage(String pipelineName, String stageName) {
return new HealthStateScope(ScopeType.STAGE, pipelineName + "/" + stageName);
}
public static HealthStateScope forJob(String pipelineName, String stageName, String jobName) {
return new HealthStateScope(ScopeType.JOB, pipelineName + "/" + stageName + "/" + jobName);
}
public static HealthStateScope forMaterial(Material material) {
return new HealthStateScope(ScopeType.MATERIAL, material.getSqlCriteria().toString());
}
public static HealthStateScope forMaterialUpdate(Material material) {
return new HealthStateScope(ScopeType.MATERIAL_UPDATE, material.getFingerprint());
}
public static HealthStateScope forMaterialConfig(MaterialConfig materialConfig) {
return new HealthStateScope(ScopeType.MATERIAL, materialConfig.getSqlCriteria().toString());
}
public static HealthStateScope forMaterialConfigUpdate(MaterialConfig materialConfig) {
return new HealthStateScope(ScopeType.MATERIAL_UPDATE, materialConfig.getFingerprint());
}
public static HealthStateScope forConfigRepo(String operation) {
return new HealthStateScope(ScopeType.CONFIG_REPO, operation);
}
public static HealthStateScope forPartialConfigRepo(ConfigRepoConfig repoConfig) {
return new HealthStateScope(ScopeType.CONFIG_PARTIAL, repoConfig.getMaterialConfig().getFingerprint());
}
public static HealthStateScope forPartialConfigRepo(String fingerprint) {
return new HealthStateScope(ScopeType.CONFIG_PARTIAL, fingerprint);
}
public boolean isSame(String scope) {
return StringUtils.endsWithIgnoreCase(this.scope, scope);
}
public boolean isForPipeline() {
return type == ScopeType.PIPELINE;
}
public boolean isForGroup() {
return type == ScopeType.GROUP;
}
public boolean isForMaterial() {
return type == ScopeType.MATERIAL;
}
ScopeType getType() {
return type;
}
public String getScope() {
return scope;
}
public String toString() {
return String.format("LogScope[%s, scope=%s]", type, scope);
}
public boolean equals(Object that) {
if (this == that) { return true; }
if (that == null) { return false; }
if (getClass() != that.getClass()) { return false; }
return equals((HealthStateScope) that);
}
private boolean equals(HealthStateScope that) {
if (type != that.type) { return false; }
if (!scope.equals(that.scope)) { return false; }
return true;
}
public int hashCode() {
int result = type.hashCode();
result = 31 * result + (scope != null ? scope.hashCode() : 0);
return result;
}
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig) {
return type.isRemovedFromConfig(cruiseConfig, scope);
}
public static HealthStateScope forAgent(String cookie) {
return new HealthStateScope(ScopeType.GLOBAL, cookie);
}
public static HealthStateScope forInvalidConfig() {
return new HealthStateScope(ScopeType.GLOBAL, "global");
}
public int compareTo(HealthStateScope o) {
int comparison;
comparison = type.compareTo(o.type);
if (comparison != 0) {
return comparison;
}
comparison = scope.compareTo(o.scope);
if (comparison != 0) {
return comparison;
}
return 0;
}
public static HealthStateScope forPlugin(String symbolicName) {
return new HealthStateScope(ScopeType.PLUGIN, symbolicName);
}
public Set<String> getPipelineNames(CruiseConfig config) {
HashSet<String> pipelineNames = new HashSet<>();
switch (type) {
case PIPELINE:
case FANIN:
pipelineNames.add(scope);
break;
case STAGE:
case JOB:
pipelineNames.add(scope.split("/")[0]);
break;
case MATERIAL:
for (PipelineConfig pc : config.getAllPipelineConfigs()) {
for (MaterialConfig mc : pc.materialConfigs()) {
String scope = HealthStateScope.forMaterialConfig(mc).getScope();
if (scope.equals(this.scope)) {
pipelineNames.add(pc.name().toString());
}
}
}
break;
case MATERIAL_UPDATE:
for (PipelineConfig pc : config.getAllPipelineConfigs()) {
for (MaterialConfig mc : pc.materialConfigs()) {
String scope = HealthStateScope.forMaterialConfigUpdate(mc).getScope();
if (scope.equals(this.scope)) {
pipelineNames.add(pc.name().toString());
}
}
}
break;
}
return pipelineNames;
}
public boolean isForConfigPartial() {
return type == ScopeType.CONFIG_PARTIAL;
}
enum ScopeType {
GLOBAL,
CONFIG_REPO,
GROUP {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String group) {
return !cruiseConfig.hasPipelineGroup(group);
}
},
MATERIAL {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String materialScope) {
for (MaterialConfig materialConfig : cruiseConfig.getAllUniqueMaterials()) {
if (HealthStateScope.forMaterialConfig(materialConfig).getScope().equals(materialScope)) {
return false;
}
}
return true;
}
},
MATERIAL_UPDATE {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String materialScope) {
for (MaterialConfig materialConfig : cruiseConfig.getAllUniqueMaterials()) {
if (HealthStateScope.forMaterialConfigUpdate(materialConfig).getScope().equals(materialScope)) {
return false;
}
}
return true;
}
},
CONFIG_PARTIAL {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String materialScope) {
for (ConfigRepoConfig configRepoConfig : cruiseConfig.getConfigRepos()) {
if (HealthStateScope.forPartialConfigRepo(configRepoConfig).getScope().equals(materialScope)) {
return false;
}
}
return true;
}
},
PIPELINE {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipeline) {
return !cruiseConfig.hasPipelineNamed(new CaseInsensitiveString(pipeline));
}
},
FANIN {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipeline) {
return !cruiseConfig.hasPipelineNamed(new CaseInsensitiveString(pipeline));
}
},
STAGE {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipelineStage) {
String[] parts = pipelineStage.split("/");
return !cruiseConfig.hasStageConfigNamed(new CaseInsensitiveString(parts[0]), new CaseInsensitiveString(parts[1]), true);
}
},
JOB {
public boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String pipelineStageJob) {
String[] parts = pipelineStageJob.split("/");
return !cruiseConfig.hasBuildPlan(new CaseInsensitiveString(parts[0]), new CaseInsensitiveString(parts[1]), parts[2], true);
}
}, PLUGIN;
protected boolean isRemovedFromConfig(CruiseConfig cruiseConfig, String scope) {
return false;
};
}
}
| VibyJocke/gocd | common/src/com/thoughtworks/go/serverhealth/HealthStateScope.java | Java | apache-2.0 | 9,950 |
/*
* Copyright 2019 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.stunner.bpmn.client.marshall.converters.customproperties;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class AssociationListTest {
private AssociationList tested;
public static final String VALUE = "[din]var1->input1,[din]var2->input2,[dout]var3->output1," +
"[dout]var5->output2";
public static final String VALUE_WITH_COMMA = "[din]var1->input1,[din]var2->input2,input22,input33," +
"[dout]var3->output1,[dout]var5->output2,output22,ouput23";
@Before
public void setUp() {
tested = new AssociationList();
}
@Test
public void fromString() {
AssociationList list = tested.fromString(VALUE);
assertEquals(2, list.getInputs().size());
assertEquals(2, list.getOutputs().size());
}
@Test
public void fromStringWithComma() {
AssociationList list = tested.fromString(VALUE_WITH_COMMA);
assertEquals(2, list.getInputs().size());
assertEquals(2, list.getOutputs().size());
}
} | porcelli-forks/kie-wb-common | kie-wb-common-stunner/kie-wb-common-stunner-sets/kie-wb-common-stunner-bpmn/kie-wb-common-stunner-bpmn-marshalling/src/test/java/org/kie/workbench/common/stunner/bpmn/client/marshall/converters/customproperties/AssociationListTest.java | Java | apache-2.0 | 1,713 |
/*
* Copyright 2008-2013 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package voldemort.cluster;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import voldemort.VoldemortException;
import voldemort.annotations.concurrency.Threadsafe;
import voldemort.annotations.jmx.JmxGetter;
import voldemort.annotations.jmx.JmxManaged;
import voldemort.utils.Utils;
import com.google.common.collect.Sets;
/**
* A representation of the voldemort cluster
*
*
*/
@Threadsafe
@JmxManaged(description = "Metadata about the physical servers on which the Voldemort cluster runs")
public class Cluster implements Serializable {
private static final long serialVersionUID = 1;
private final String name;
private final int numberOfPartitionIds;
private final Map<Integer, Node> nodesById;
private final Map<Integer, Zone> zonesById;
private final Map<Zone, List<Integer>> nodesPerZone;
private final Map<Zone, List<Integer>> partitionsPerZone;
// Since partitionId space must be dense, arrays could be used instead of
// maps. To do so, the partition ID range would have to be determined. This
// could be done by summing up the lengths of each node's .getPartitionIds()
// returned list. This could be faster to construct and lookup by some
// constant and memory footprint could be better.
private final Map<Integer, Zone> partitionIdToZone;
private final Node[] partitionIdToNodeArray;
private final Map<Integer, Node> partitionIdToNode;
private final Map<Integer, Integer> partitionIdToNodeId;
public Cluster(String name, List<Node> nodes) {
this(name, nodes, new ArrayList<Zone>());
}
public Cluster(String name, List<Node> nodes, List<Zone> zones) {
this.name = Utils.notNull(name);
this.partitionsPerZone = new LinkedHashMap<Zone, List<Integer>>();
this.nodesPerZone = new LinkedHashMap<Zone, List<Integer>>();
this.partitionIdToZone = new HashMap<Integer, Zone>();
Map<Integer, Node> partitionIdToNodeMap = new HashMap<Integer, Node>();
this.partitionIdToNode = new HashMap<Integer, Node>();
this.partitionIdToNodeId = new HashMap<Integer, Integer>();
if(zones.size() != 0) {
zonesById = new LinkedHashMap<Integer, Zone>(zones.size());
for(Zone zone: zones) {
if(zonesById.containsKey(zone.getId()))
throw new IllegalArgumentException("Zone id " + zone.getId()
+ " appears twice in the zone list.");
zonesById.put(zone.getId(), zone);
nodesPerZone.put(zone, new ArrayList<Integer>());
partitionsPerZone.put(zone, new ArrayList<Integer>());
}
} else {
// Add default zone
zonesById = new LinkedHashMap<Integer, Zone>(1);
Zone defaultZone = new Zone();
zonesById.put(defaultZone.getId(), defaultZone);
nodesPerZone.put(defaultZone, new ArrayList<Integer>());
partitionsPerZone.put(defaultZone, new ArrayList<Integer>());
}
this.nodesById = new LinkedHashMap<Integer, Node>(nodes.size());
for(Node node: nodes) {
if(nodesById.containsKey(node.getId()))
throw new IllegalArgumentException("Node id " + node.getId()
+ " appears twice in the node list.");
nodesById.put(node.getId(), node);
Zone nodesZone = zonesById.get(node.getZoneId());
if(nodesZone == null) {
throw new IllegalArgumentException("No zone associated with this node exists.");
}
nodesPerZone.get(nodesZone).add(node.getId());
partitionsPerZone.get(nodesZone).addAll(node.getPartitionIds());
for(Integer partitionId: node.getPartitionIds()) {
if(this.partitionIdToNodeId.containsKey(partitionId)) {
throw new IllegalArgumentException("Partition id " + partitionId
+ " found on two nodes : " + node.getId()
+ " and "
+ this.partitionIdToNodeId.get(partitionId));
}
this.partitionIdToZone.put(partitionId, nodesZone);
partitionIdToNodeMap.put(partitionId, node);
this.partitionIdToNode.put(partitionId, node);
this.partitionIdToNodeId.put(partitionId, node.getId());
}
}
this.numberOfPartitionIds = getNumberOfTags(nodes);
this.partitionIdToNodeArray = new Node[this.numberOfPartitionIds];
for(int partitionId = 0; partitionId < this.numberOfPartitionIds; partitionId++) {
this.partitionIdToNodeArray[partitionId] = partitionIdToNodeMap.get(partitionId);
}
}
private int getNumberOfTags(List<Node> nodes) {
List<Integer> tags = new ArrayList<Integer>();
for(Node node: nodes) {
tags.addAll(node.getPartitionIds());
}
Collections.sort(tags);
for(int i = 0; i < numberOfPartitionIds; i++) {
if(tags.get(i).intValue() != i)
throw new IllegalArgumentException("Invalid tag assignment.");
}
return tags.size();
}
@JmxGetter(name = "name", description = "The name of the cluster")
public String getName() {
return name;
}
public Collection<Node> getNodes() {
return nodesById.values();
}
/**
* @return Sorted set of node Ids
*/
public Set<Integer> getNodeIds() {
Set<Integer> nodeIds = nodesById.keySet();
return new TreeSet<Integer>(nodeIds);
}
/**
*
* @return Sorted set of Zone Ids
*/
public Set<Integer> getZoneIds() {
Set<Integer> zoneIds = zonesById.keySet();
return new TreeSet<Integer>(zoneIds);
}
public Collection<Zone> getZones() {
return zonesById.values();
}
public Zone getZoneById(int id) {
Zone zone = zonesById.get(id);
if(zone == null) {
throw new VoldemortException("No such zone in cluster: " + id
+ " Available zones : " + displayZones());
}
return zone;
}
private String displayZones() {
String zoneIDS = "{";
for(Zone z: this.getZones()) {
if(zoneIDS.length() != 1)
zoneIDS += ",";
zoneIDS += z.getId();
}
zoneIDS += "}";
return zoneIDS;
}
public int getNumberOfZones() {
return zonesById.size();
}
public int getNumberOfPartitionsInZone(Integer zoneId) {
return partitionsPerZone.get(getZoneById(zoneId)).size();
}
public int getNumberOfNodesInZone(Integer zoneId) {
return nodesPerZone.get(getZoneById(zoneId)).size();
}
/**
* @return Sorted set of node Ids for given zone
*/
public Set<Integer> getNodeIdsInZone(Integer zoneId) {
return new TreeSet<Integer>(nodesPerZone.get(getZoneById(zoneId)));
}
/**
* @return Sorted set of partition Ids for given zone
*/
public Set<Integer> getPartitionIdsInZone(Integer zoneId) {
return new TreeSet<Integer>(partitionsPerZone.get(getZoneById(zoneId)));
}
public Zone getZoneForPartitionId(int partitionId) {
return partitionIdToZone.get(partitionId);
}
public Node getNodeForPartitionId(int partitionId) {
return this.partitionIdToNodeArray[partitionId];
}
public Node[] getPartitionIdToNodeArray() {
return this.partitionIdToNodeArray;
}
/**
*
* @return Map of partition id to node id.
*/
public Map<Integer, Integer> getPartitionIdToNodeIdMap() {
return new HashMap<Integer, Integer>(partitionIdToNodeId);
}
public Node getNodeById(int id) {
Node node = nodesById.get(id);
if(node == null)
throw new VoldemortException("No such node in cluster: " + id);
return node;
}
/**
* Given a cluster and a node id checks if the node exists
*
* @param nodeId The node id to search for
* @return True if cluster contains the node id, else false
*/
public boolean hasNodeWithId(int nodeId) {
Node node = nodesById.get(nodeId);
if(node == null) {
return false;
}
return true;
}
@JmxGetter(name = "numberOfNodes", description = "The number of nodes in the cluster.")
public int getNumberOfNodes() {
return nodesById.size();
}
public int getNumberOfPartitions() {
return numberOfPartitionIds;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("Cluster('");
builder.append(getName());
builder.append("', [");
for(Node n: getNodes()) {
builder.append(n.toString());
builder.append('\n');
}
builder.append("])");
return builder.toString();
}
/**
* Return a detailed string representation of the current cluster
*
* @param isDetailed
* @return descripton of cluster
*/
public String toString(boolean isDetailed) {
if(!isDetailed) {
return toString();
}
StringBuilder builder = new StringBuilder("Cluster [" + getName() + "] Nodes ["
+ getNumberOfNodes() + "] Zones ["
+ getNumberOfZones() + "] Partitions ["
+ getNumberOfPartitions() + "]");
builder.append(" Zone Info [" + getZones() + "]");
builder.append(" Node Info [" + getNodes() + "]");
return builder.toString();
}
/**
* Clones the cluster by constructing a new one with same name, partition
* layout, and nodes.
*
* @param cluster
* @return clone of Cluster cluster.
*/
public static Cluster cloneCluster(Cluster cluster) {
// Could add a better .clone() implementation that clones the derived
// data structures. The constructor invoked by this clone implementation
// can be slow for large numbers of partitions. Probably faster to copy
// all the maps and stuff.
return new Cluster(cluster.getName(),
new ArrayList<Node>(cluster.getNodes()),
new ArrayList<Zone>(cluster.getZones()));
/*-
* Historic "clone" code being kept in case this, for some reason, was the "right" way to be doing this.
ClusterMapper mapper = new ClusterMapper();
return mapper.readCluster(new StringReader(mapper.writeCluster(cluster)));
*/
}
@Override
public boolean equals(Object second) {
if(this == second)
return true;
if(second == null || second.getClass() != getClass())
return false;
Cluster secondCluster = (Cluster) second;
if(this.getZones().size() != secondCluster.getZones().size()) {
return false;
}
if(this.getNodes().size() != secondCluster.getNodes().size()) {
return false;
}
for(Zone zoneA: this.getZones()) {
Zone zoneB;
try {
zoneB = secondCluster.getZoneById(zoneA.getId());
} catch(VoldemortException e) {
return false;
}
if(zoneB == null || zoneB.getProximityList().size() != zoneA.getProximityList().size()) {
return false;
}
for(int index = 0; index < zoneA.getProximityList().size(); index++) {
if(zoneA.getProximityList().get(index) != zoneB.getProximityList().get(index)) {
return false;
}
}
}
for(Node nodeA: this.getNodes()) {
Node nodeB;
try {
nodeB = secondCluster.getNodeById(nodeA.getId());
} catch(VoldemortException e) {
return false;
}
if(nodeA.getNumberOfPartitions() != nodeB.getNumberOfPartitions()) {
return false;
}
if(nodeA.getZoneId() != nodeB.getZoneId()) {
return false;
}
if(!Sets.newHashSet(nodeA.getPartitionIds())
.equals(Sets.newHashSet(nodeB.getPartitionIds())))
return false;
}
return true;
}
@Override
public int hashCode() {
int hc = getNodes().size();
for(Node node: getNodes()) {
hc ^= node.getHost().hashCode();
}
return hc;
}
}
| HB-SI/voldemort | src/java/voldemort/cluster/Cluster.java | Java | apache-2.0 | 13,745 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<title>Uses of Class org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition (ARX GUI Documentation)</title>
<link rel="stylesheet" type="text/css" href="../../../../../../../../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="Uses of Class org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition (ARX GUI Documentation)";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../../org/deidentifier/arx/gui/view/impl/define/ViewAttributeDefinition.html" title="class in org.deidentifier.arx.gui.view.impl.define">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../../index.html?org/deidentifier/arx/gui/view/impl/define/class-use/ViewAttributeDefinition.html" target="_top">Frames</a></li>
<li><a href="ViewAttributeDefinition.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h2 title="Uses of Class org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition" class="title">Uses of Class<br>org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition</h2>
</div>
<div class="classUseContainer">No usage of org.deidentifier.arx.gui.view.impl.define.ViewAttributeDefinition</div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../../../../../../../../overview-summary.html">Overview</a></li>
<li><a href="../package-summary.html">Package</a></li>
<li><a href="../../../../../../../../org/deidentifier/arx/gui/view/impl/define/ViewAttributeDefinition.html" title="class in org.deidentifier.arx.gui.view.impl.define">Class</a></li>
<li class="navBarCell1Rev">Use</li>
<li><a href="../package-tree.html">Tree</a></li>
<li><a href="../../../../../../../../deprecated-list.html">Deprecated</a></li>
<li><a href="../../../../../../../../index-all.html">Index</a></li>
<li><a href="../../../../../../../../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="../../../../../../../../index.html?org/deidentifier/arx/gui/view/impl/define/class-use/ViewAttributeDefinition.html" target="_top">Frames</a></li>
<li><a href="ViewAttributeDefinition.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../../../../../../../../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| TheRealRasu/arx | doc/gui/org/deidentifier/arx/gui/view/impl/define/class-use/ViewAttributeDefinition.html | HTML | apache-2.0 | 4,502 |
<?php
final class PhabricatorSetupCheckRepositories extends PhabricatorSetupCheck {
protected function executeChecks() {
$repo_path = PhabricatorEnv::getEnvConfig('repository.default-local-path');
if (!$repo_path) {
$summary = pht(
"The configuration option '%s' is not set.",
'repository.default-local-path');
$this->newIssue('repository.default-local-path.empty')
->setName(pht('Missing Repository Local Path'))
->setSummary($summary)
->addPhabricatorConfig('repository.default-local-path');
return;
}
if (!Filesystem::pathExists($repo_path)) {
$summary = pht(
'The path for local repositories does not exist, or is not '.
'readable by the webserver.');
$message = pht(
"The directory for local repositories (%s) does not exist, or is not ".
"readable by the webserver. Phabricator uses this directory to store ".
"information about repositories. If this directory does not exist, ".
"create it:\n\n".
"%s\n".
"If this directory exists, make it readable to the webserver. You ".
"can also edit the configuration below to use some other directory.",
phutil_tag('tt', array(), $repo_path),
phutil_tag('pre', array(), csprintf('$ mkdir -p %s', $repo_path)));
$this->newIssue('repository.default-local-path.empty')
->setName(pht('Missing Repository Local Path'))
->setSummary($summary)
->setMessage($message)
->addPhabricatorConfig('repository.default-local-path');
}
}
}
| WuJiahu/phabricator | src/applications/config/check/PhabricatorSetupCheckRepositories.php | PHP | apache-2.0 | 1,597 |
/**
* Copyright 2014-2016 CyberVision, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kaaproject.kaa.server.common.nosql.cassandra.dao.model;
import nl.jqno.equalsverifier.EqualsVerifier;
import nl.jqno.equalsverifier.Warning;
import org.junit.Test;
public class CassandraEndpointUserTest {
@Test
public void hashCodeEqualsTest(){
EqualsVerifier.forClass(CassandraEndpointUser.class).suppress(Warning.NONFINAL_FIELDS).verify();
}
} | Deepnekroz/kaa | server/common/nosql/cassandra-dao/src/test/java/org/kaaproject/kaa/server/common/nosql/cassandra/dao/model/CassandraEndpointUserTest.java | Java | apache-2.0 | 999 |
# Copyright 2015 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fastfood Chef Cookbook manager."""
from __future__ import print_function
import os
from fastfood import utils
class CookBook(object):
"""Chef Cookbook object.
Understands metadata.rb, Berksfile and how to parse them.
"""
def __init__(self, path):
"""Initialize CookBook wrapper at 'path'."""
self.path = utils.normalize_path(path)
self._metadata = None
if not os.path.isdir(path):
raise ValueError("Cookbook dir %s does not exist."
% self.path)
self._berksfile = None
@property
def name(self):
"""Cookbook name property."""
try:
return self.metadata.to_dict()['name']
except KeyError:
raise LookupError("%s is missing 'name' attribute'."
% self.metadata)
@property
def metadata(self):
"""Return dict representation of this cookbook's metadata.rb ."""
self.metadata_path = os.path.join(self.path, 'metadata.rb')
if not os.path.isfile(self.metadata_path):
raise ValueError("Cookbook needs metadata.rb, %s"
% self.metadata_path)
if not self._metadata:
self._metadata = MetadataRb(open(self.metadata_path, 'r+'))
return self._metadata
@property
def berksfile(self):
"""Return this cookbook's Berksfile instance."""
self.berks_path = os.path.join(self.path, 'Berksfile')
if not self._berksfile:
if not os.path.isfile(self.berks_path):
raise ValueError("No Berksfile found at %s"
% self.berks_path)
self._berksfile = Berksfile(open(self.berks_path, 'r+'))
return self._berksfile
class MetadataRb(utils.FileWrapper):
"""Wrapper for a metadata.rb file."""
@classmethod
def from_dict(cls, dictionary):
"""Create a MetadataRb instance from a dict."""
cookbooks = set()
# put these in order
groups = [cookbooks]
for key, val in dictionary.items():
if key == 'depends':
cookbooks.update({cls.depends_statement(cbn, meta)
for cbn, meta in val.items()})
body = ''
for group in groups:
if group:
body += '\n'
body += '\n'.join(group)
return cls.from_string(body)
@staticmethod
def depends_statement(cookbook_name, metadata=None):
"""Return a valid Ruby 'depends' statement for the metadata.rb file."""
line = "depends '%s'" % cookbook_name
if metadata:
if not isinstance(metadata, dict):
raise TypeError("Stencil dependency options for %s "
"should be a dict of options, not %s."
% (cookbook_name, metadata))
if metadata:
line = "%s '%s'" % (line, "', '".join(metadata))
return line
def to_dict(self):
"""Return a dictionary representation of this metadata.rb file."""
return self.parse()
def parse(self):
"""Parse the metadata.rb into a dict."""
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
depends = {}
for line in data:
if not len(line) == 2:
continue
key, value = line
if key == 'depends':
value = value.split(',')
lib = utils.ruby_strip(value[0])
detail = [utils.ruby_strip(j) for j in value[1:]]
depends[lib] = detail
datamap = {key: utils.ruby_strip(val) for key, val in data}
if depends:
datamap['depends'] = depends
self.seek(0)
return datamap
def merge(self, other):
"""Add requirements from 'other' metadata.rb into this one."""
if not isinstance(other, MetadataRb):
raise TypeError("MetadataRb to merge should be a 'MetadataRb' "
"instance, not %s.", type(other))
current = self.to_dict()
new = other.to_dict()
# compare and gather cookbook dependencies
meta_writelines = ['%s\n' % self.depends_statement(cbn, meta)
for cbn, meta in new.get('depends', {}).items()
if cbn not in current.get('depends', {})]
self.write_statements(meta_writelines)
return self.to_dict()
class Berksfile(utils.FileWrapper):
"""Wrapper for a Berksfile."""
berks_options = [
'branch',
'git',
'path',
'ref',
'revision',
'tag',
]
def to_dict(self):
"""Return a dictionary representation of this Berksfile."""
return self.parse()
def parse(self):
"""Parse this Berksfile into a dict."""
self.flush()
self.seek(0)
data = utils.ruby_lines(self.readlines())
data = [tuple(j.strip() for j in line.split(None, 1))
for line in data]
datamap = {}
for line in data:
if len(line) == 1:
datamap[line[0]] = True
elif len(line) == 2:
key, value = line
if key == 'cookbook':
datamap.setdefault('cookbook', {})
value = [utils.ruby_strip(v) for v in value.split(',')]
lib, detail = value[0], value[1:]
datamap['cookbook'].setdefault(lib, {})
# if there is additional dependency data but its
# not the ruby hash, its the version constraint
if detail and not any("".join(detail).startswith(o)
for o in self.berks_options):
constraint, detail = detail[0], detail[1:]
datamap['cookbook'][lib]['constraint'] = constraint
if detail:
for deet in detail:
opt, val = [
utils.ruby_strip(i)
for i in deet.split(':', 1)
]
if not any(opt == o for o in self.berks_options):
raise ValueError(
"Cookbook detail '%s' does not specify "
"one of '%s'" % (opt, self.berks_options))
else:
datamap['cookbook'][lib][opt.strip(':')] = (
utils.ruby_strip(val))
elif key == 'source':
datamap.setdefault(key, [])
datamap[key].append(utils.ruby_strip(value))
elif key:
datamap[key] = utils.ruby_strip(value)
self.seek(0)
return datamap
@classmethod
def from_dict(cls, dictionary):
"""Create a Berksfile instance from a dict."""
cookbooks = set()
sources = set()
other = set()
# put these in order
groups = [sources, cookbooks, other]
for key, val in dictionary.items():
if key == 'cookbook':
cookbooks.update({cls.cookbook_statement(cbn, meta)
for cbn, meta in val.items()})
elif key == 'source':
sources.update({"source '%s'" % src for src in val})
elif key == 'metadata':
other.add('metadata')
body = ''
for group in groups:
if group:
body += '\n'
body += '\n'.join(group)
return cls.from_string(body)
@staticmethod
def cookbook_statement(cookbook_name, metadata=None):
"""Return a valid Ruby 'cookbook' statement for the Berksfile."""
line = "cookbook '%s'" % cookbook_name
if metadata:
if not isinstance(metadata, dict):
raise TypeError("Berksfile dependency hash for %s "
"should be a dict of options, not %s."
% (cookbook_name, metadata))
# not like the others...
if 'constraint' in metadata:
line += ", '%s'" % metadata.pop('constraint')
for opt, spec in metadata.items():
line += ", %s: '%s'" % (opt, spec)
return line
def merge(self, other):
"""Add requirements from 'other' Berksfile into this one."""
if not isinstance(other, Berksfile):
raise TypeError("Berksfile to merge should be a 'Berksfile' "
"instance, not %s.", type(other))
current = self.to_dict()
new = other.to_dict()
# compare and gather cookbook dependencies
berks_writelines = ['%s\n' % self.cookbook_statement(cbn, meta)
for cbn, meta in new.get('cookbook', {}).items()
if cbn not in current.get('cookbook', {})]
# compare and gather 'source' requirements
berks_writelines.extend(["source '%s'\n" % src for src
in new.get('source', [])
if src not in current.get('source', [])])
self.write_statements(berks_writelines)
return self.to_dict()
| samstav/fastfood | fastfood/book.py | Python | apache-2.0 | 10,125 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import java.io.IOException;
import static org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes;
import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals;
final class CompositeValuesComparator {
private final int size;
private final CompositeValuesSource<?, ?>[] arrays;
private boolean topValueSet = false;
/**
*
* @param sources The list of {@link CompositeValuesSourceConfig} to build the composite buckets.
* @param size The number of composite buckets to keep.
*/
CompositeValuesComparator(IndexReader reader, CompositeValuesSourceConfig[] sources, int size) {
this.size = size;
this.arrays = new CompositeValuesSource<?, ?>[sources.length];
for (int i = 0; i < sources.length; i++) {
final int reverseMul = sources[i].reverseMul();
if (sources[i].valuesSource() instanceof WithOrdinals && reader instanceof DirectoryReader) {
WithOrdinals vs = (WithOrdinals) sources[i].valuesSource();
arrays[i] = CompositeValuesSource.wrapGlobalOrdinals(vs, size, reverseMul);
} else if (sources[i].valuesSource() instanceof Bytes) {
Bytes vs = (Bytes) sources[i].valuesSource();
arrays[i] = CompositeValuesSource.wrapBinary(vs, size, reverseMul);
} else if (sources[i].valuesSource() instanceof Numeric) {
final Numeric vs = (Numeric) sources[i].valuesSource();
if (vs.isFloatingPoint()) {
arrays[i] = CompositeValuesSource.wrapDouble(vs, size, reverseMul);
} else {
arrays[i] = CompositeValuesSource.wrapLong(vs, sources[i].format(), size, reverseMul);
}
}
}
}
/**
* Moves the values in <code>slot1</code> to <code>slot2</code>.
*/
void move(int slot1, int slot2) {
assert slot1 < size && slot2 < size;
for (int i = 0; i < arrays.length; i++) {
arrays[i].move(slot1, slot2);
}
}
/**
* Compares the values in <code>slot1</code> with <code>slot2</code>.
*/
int compare(int slot1, int slot2) {
assert slot1 < size && slot2 < size;
for (int i = 0; i < arrays.length; i++) {
int cmp = arrays[i].compare(slot1, slot2);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/**
* Returns true if a top value has been set for this comparator.
*/
boolean hasTop() {
return topValueSet;
}
/**
* Sets the top values for this comparator.
*/
void setTop(Comparable<?>[] values) {
assert values.length == arrays.length;
topValueSet = true;
for (int i = 0; i < arrays.length; i++) {
arrays[i].setTop(values[i]);
}
}
/**
* Compares the top values with the values in <code>slot</code>.
*/
int compareTop(int slot) {
assert slot < size;
for (int i = 0; i < arrays.length; i++) {
int cmp = arrays[i].compareTop(slot);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/**
* Builds the {@link CompositeKey} for <code>slot</code>.
*/
CompositeKey toCompositeKey(int slot) throws IOException {
assert slot < size;
Comparable<?>[] values = new Comparable<?>[arrays.length];
for (int i = 0; i < values.length; i++) {
values[i] = arrays[i].toComparable(slot);
}
return new CompositeKey(values);
}
/**
* Gets the {@link LeafBucketCollector} that will record the composite buckets of the visited documents.
*/
CompositeValuesSource.Collector getLeafCollector(LeafReaderContext context, CompositeValuesSource.Collector in) throws IOException {
int last = arrays.length - 1;
CompositeValuesSource.Collector next = arrays[last].getLeafCollector(context, in);
for (int i = last - 1; i >= 0; i--) {
next = arrays[i].getLeafCollector(context, next);
}
return next;
}
}
| qwerty4030/elasticsearch | server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesComparator.java | Java | apache-2.0 | 5,333 |
<?php
App::uses('AppModel', 'Model');
App::uses('SimplePasswordHasher', 'Controller/Component/Auth');
class Product extends AppModel {
} | katedoloverio/PCInventory | app/Model/Product.php | PHP | apache-2.0 | 138 |
class UsersController < ApplicationController
include BatchProcessable
# special find method before load_resource
before_filter :build_user_with_proper_mission, :only => [:new, :create]
# authorization via CanCan
load_and_authorize_resource
def index
# sort and eager load
@users = @users.by_name
# if there is a search with the '.' character in it, we can't eager load due to a bug in Rails
# this should be fixed in Rails 4
unless params[:search].present? && params[:search].match(/\./)
@users = @users.with_assoc
end
# do search if applicable
if params[:search].present?
begin
@users = User.do_search(@users, params[:search])
rescue Search::ParseError
flash.now[:error] = $!.to_s
@search_error = true
end
end
end
def new
# set the default pref_lang based on the mission settings
prepare_and_render_form
end
def show
prepare_and_render_form
end
def edit
prepare_and_render_form
end
def create
if @user.save
@user.reset_password_if_requested
set_success(@user)
# render printable instructions if requested
handle_printable_instructions
# if create failed, render the form again
else
flash.now[:error] = I18n.t('activerecord.errors.models.user.general')
prepare_and_render_form
end
end
def update
# make sure changing assignment role is permitted if attempting
authorize!(:change_assignments, @user) if params[:user]['assignments_attributes']
@user.assign_attributes(params[:user])
pref_lang_changed = @user.pref_lang_changed?
if @user.save
if @user == current_user
I18n.locale = @user.pref_lang.to_sym if pref_lang_changed
flash[:success] = t("user.profile_updated")
redirect_to(:action => :edit)
else
set_success(@user)
# if the user's password was reset, do it, and show instructions if requested
@user.reset_password_if_requested
handle_printable_instructions
end
# if save failed, render the form again
else
flash.now[:error] = I18n.t('activerecord.errors.models.user.general')
prepare_and_render_form
end
end
def destroy
destroy_and_handle_errors(@user)
redirect_to(index_url_with_page_num)
end
# shows printable login instructions for the user
def login_instructions
end
# exports the selected users to VCF format
def export
respond_to do |format|
format.vcf do
@users = params[:selected] ? load_selected_objects(User) : []
render(:text => @users.collect{|u| u.to_vcf}.join("\n"))
end
end
end
def regenerate_key
@user = User.find(params[:id])
@user.regenerate_api_key
redirect_to(:action => :edit)
end
private
# if we need to print instructions, redirects to the instructions action. otherwise redirects to index.
def handle_printable_instructions
if @user.reset_password_method == "print"
# save the password in the flash since we won't be able to get it once it's crypted
flash[:password] = @user.password
redirect_to(:action => :login_instructions, :id => @user.id)
else
redirect_to(index_url_with_page_num)
end
end
# prepares objects and renders the form template
def prepare_and_render_form
if admin_mode?
# get assignable missons and roles for this user
@assignments = @user.assignments.as_json(:include => :mission, :methods => :new_record?)
@assignment_permissions = @user.assignments.map{|a| can?(:update, a)}
@assignable_missions = Mission.accessible_by(current_ability, :assign_to).sorted_by_name.as_json(:only => [:id, :name])
@assignable_roles = Ability.assignable_roles(current_user)
else
@current_assignment = @user.assignments_by_mission[current_mission] || @user.assignments.build(:mission => current_mission)
end
render(:form)
end
# builds a user with an appropriate mission assignment if the current_user doesn't have permission to edit a blank user
def build_user_with_proper_mission
@user = User.new(params[:user])
if cannot?(:create, @user) && @user.assignments.empty?
@user.assignments.build(:mission => current_mission)
end
end
end
| nmckahd/AHDBurundi | app/controllers/users_controller.rb | Ruby | apache-2.0 | 4,362 |
<?php
final class PhabricatorProjectIcon extends Phobject {
public static function getIconMap() {
return
array(
'fa-briefcase' => pht('Briefcase'),
'fa-tags' => pht('Tag'),
'fa-folder' => pht('Folder'),
'fa-users' => pht('Team'),
'fa-bug' => pht('Bug'),
'fa-trash-o' => pht('Garbage'),
'fa-calendar' => pht('Deadline'),
'fa-flag-checkered' => pht('Goal'),
'fa-envelope' => pht('Communication'),
'fa-truck' => pht('Release'),
'fa-lock' => pht('Policy'),
'fa-umbrella' => pht('An Umbrella'),
'fa-cloud' => pht('The Cloud'),
'fa-building' => pht('Company'),
'fa-credit-card' => pht('Accounting'),
'fa-flask' => pht('Experimental'),
);
}
public static function getColorMap() {
$shades = PHUITagView::getShadeMap();
$shades = array_select_keys(
$shades,
array(PhabricatorProject::DEFAULT_COLOR)) + $shades;
unset($shades[PHUITagView::COLOR_DISABLED]);
return $shades;
}
public static function getLabel($key) {
$map = self::getIconMap();
return $map[$key];
}
public static function getAPIName($key) {
return substr($key, 3);
}
public static function renderIconForChooser($icon) {
$project_icons = PhabricatorProjectIcon::getIconMap();
return phutil_tag(
'span',
array(),
array(
id(new PHUIIconView())->setIconFont($icon),
' ',
idx($project_icons, $icon, pht('Unknown Icon')),
));
}
}
| hach-que/unearth-phabricator | src/applications/project/icon/PhabricatorProjectIcon.php | PHP | apache-2.0 | 1,547 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.tamaya.events;
import org.apache.tamaya.ConfigException;
import org.apache.tamaya.ConfigOperator;
import org.apache.tamaya.ConfigQuery;
import org.apache.tamaya.Configuration;
import org.apache.tamaya.ConfigurationProvider;
import org.apache.tamaya.TypeLiteral;
import org.apache.tamaya.spi.PropertyConverter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
/**
* Created by Anatole on 24.03.2015.
*/
public class TestConfigView implements ConfigOperator{
private static final TestConfigView INSTANCE = new TestConfigView();
private TestConfigView(){}
public static ConfigOperator of(){
return INSTANCE;
}
@Override
public Configuration operate(final Configuration config) {
return new Configuration() {
@Override
public Map<String, String> getProperties() {
Map<String, String> result = new HashMap<>();
for (Map.Entry<String, String> en : config.getProperties().entrySet()) {
if (en.getKey().startsWith("test")) {
result.put(en.getKey(), en.getValue());
}
}
return result;
// return config.getProperties().entrySet().stream().filter(e -> e.getKey().startsWith("test")).collect(
// Collectors.toMap(en -> en.getKey(), en -> en.getValue()));
}
@Override
public Configuration with(ConfigOperator operator) {
return null;
}
@Override
public <T> T query(ConfigQuery<T> query) {
return null;
}
@Override
public String get(String key) {
return getProperties().get(key);
}
@Override
public <T> T get(String key, Class<T> type) {
return (T) get(key, TypeLiteral.of(type));
}
/**
* Accesses the current String value for the given key and tries to convert it
* using the {@link org.apache.tamaya.spi.PropertyConverter} instances provided by the current
* {@link org.apache.tamaya.spi.ConfigurationContext}.
*
* @param key the property's absolute, or relative path, e.g. @code
* a/b/c/d.myProperty}.
* @param type The target type required, not null.
* @param <T> the value type
* @return the converted value, never null.
*/
@Override
public <T> T get(String key, TypeLiteral<T> type) {
String value = get(key);
if (value != null) {
List<PropertyConverter<T>> converters = ConfigurationProvider.getConfigurationContext()
.getPropertyConverters(type);
for (PropertyConverter<T> converter : converters) {
try {
T t = converter.convert(value);
if (t != null) {
return t;
}
} catch (Exception e) {
Logger.getLogger(getClass().getName())
.log(Level.FINEST, "PropertyConverter: " + converter + " failed to convert value: "
+ value, e);
}
}
throw new ConfigException("Unparseable config value for type: " + type.getRawType().getName() + ": " + key);
}
return null;
}
};
}
}
| syzer/incubator-tamaya | modules/events/src/test/java/org/apache/tamaya/events/TestConfigView.java | Java | apache-2.0 | 4,643 |
<?php
/*+***********************************************************************************
* The contents of this file are subject to the vtiger CRM Public License Version 1.0
* ("License"); You may not use this file except in compliance with the License
* The Original Code is: vtiger CRM Open Source
* The Initial Developer of the Original Code is vtiger.
* Portions created by vtiger are Copyright (C) vtiger.
* All Rights Reserved.
*************************************************************************************/
class Products_Module_Model extends Vtiger_Module_Model {
/**
* Function to get list view query for popup window
* @param <String> $sourceModule Parent module
* @param <String> $field parent fieldname
* @param <Integer> $record parent id
* @param <String> $listQuery
* @return <String> Listview Query
*/
public function getQueryByModuleField($sourceModule, $field, $record, $listQuery) {
$supportedModulesList = array($this->getName(), 'Vendors', 'Leads', 'Accounts', 'Contacts', 'Potentials');
if (($sourceModule == 'PriceBooks' && $field == 'priceBookRelatedList')
|| in_array($sourceModule, $supportedModulesList)
|| in_array($sourceModule, getInventoryModules())) {
$condition = " vtiger_products.discontinued = 1 ";
if ($sourceModule === $this->getName()) {
$condition .= " AND vtiger_products.productid NOT IN (SELECT productid FROM vtiger_seproductsrel WHERE crmid = '$record' UNION SELECT crmid FROM vtiger_seproductsrel WHERE productid = '$record') AND vtiger_products.productid <> '$record' ";
} elseif ($sourceModule === 'PriceBooks') {
$condition .= " AND vtiger_products.productid NOT IN (SELECT productid FROM vtiger_pricebookproductrel WHERE pricebookid = '$record') ";
} elseif ($sourceModule === 'Vendors') {
$condition .= " AND vtiger_products.vendor_id != '$record' ";
} elseif (in_array($sourceModule, $supportedModulesList)) {
$condition .= " AND vtiger_products.productid NOT IN (SELECT productid FROM vtiger_seproductsrel WHERE crmid = '$record')";
}
$pos = stripos($listQuery, 'where');
if ($pos) {
$split = spliti('where', $listQuery);
$overRideQuery = $split[0] . ' WHERE ' . $split[1] . ' AND ' . $condition;
} else {
$overRideQuery = $listQuery. ' WHERE ' . $condition;
}
return $overRideQuery;
}
}
/**
* Function to get Specific Relation Query for this Module
* @param <type> $relatedModule
* @return <type>
*/
public function getSpecificRelationQuery($relatedModule) {
if ($relatedModule === 'Leads') {
$specificQuery = 'AND vtiger_leaddetails.converted = 0';
return $specificQuery;
}
return parent::getSpecificRelationQuery($relatedModule);
}
/**
* Function to get prices for specified products with specific currency
* @param <Integer> $currenctId
* @param <Array> $productIdsList
* @return <Array>
*/
public function getPricesForProducts($currencyId, $productIdsList) {
return getPricesForProducts($currencyId, $productIdsList, $this->getName());
}
/**
* Function to check whether the module is summary view supported
* @return <Boolean> - true/false
*/
public function isSummaryViewSupported() {
return false;
}
/**
* Function searches the records in the module, if parentId & parentModule
* is given then searches only those records related to them.
* @param <String> $searchValue - Search value
* @param <Integer> $parentId - parent recordId
* @param <String> $parentModule - parent module name
* @return <Array of Vtiger_Record_Model>
*/
public function searchRecord($searchValue, $parentId=false, $parentModule=false, $relatedModule=false) {
if(!empty($searchValue) && empty($parentId) && empty($parentModule) && (in_array($relatedModule, getInventoryModules()))) {
$matchingRecords = Products_Record_Model::getSearchResult($searchValue, $this->getName());
}else {
return parent::searchRecord($searchValue);
}
return $matchingRecords;
}
/**
* Function returns query for Product-PriceBooks relation
* @param <Vtiger_Record_Model> $recordModel
* @param <Vtiger_Record_Model> $relatedModuleModel
* @return <String>
*/
function get_product_pricebooks($recordModel, $relatedModuleModel) {
$query = 'SELECT vtiger_pricebook.pricebookid, vtiger_pricebook.bookname, vtiger_pricebook.active, vtiger_crmentity.crmid,
vtiger_crmentity.smownerid, vtiger_pricebookproductrel.listprice, vtiger_products.unit_price
FROM vtiger_pricebook
INNER JOIN vtiger_pricebookproductrel ON vtiger_pricebook.pricebookid = vtiger_pricebookproductrel.pricebookid
INNER JOIN vtiger_crmentity on vtiger_crmentity.crmid = vtiger_pricebook.pricebookid
INNER JOIN vtiger_products on vtiger_products.productid = vtiger_pricebookproductrel.productid
INNER JOIN vtiger_pricebookcf on vtiger_pricebookcf.pricebookid = vtiger_pricebook.pricebookid
LEFT JOIN vtiger_users ON vtiger_users.id=vtiger_crmentity.smownerid
LEFT JOIN vtiger_groups ON vtiger_groups.groupid = vtiger_crmentity.smownerid '
. Users_Privileges_Model::getNonAdminAccessControlQuery($relatedModuleModel->getName()) .'
WHERE vtiger_products.productid = '.$recordModel->getId().' and vtiger_crmentity.deleted = 0';
return $query;
}
} | basiljose1/byjcrm | modules/Products/models/Module.php | PHP | apache-2.0 | 5,293 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.functions.aggfunctions;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.expressions.Expression;
import org.apache.flink.table.expressions.UnresolvedCallExpression;
import org.apache.flink.table.expressions.UnresolvedReferenceExpression;
import org.apache.flink.table.types.DataType;
import org.apache.flink.table.types.logical.DecimalType;
import org.apache.flink.table.types.logical.utils.LogicalTypeMerging;
import java.math.BigDecimal;
import static org.apache.flink.table.expressions.ApiExpressionUtils.unresolvedRef;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.cast;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.div;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.equalTo;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.ifThenElse;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.isNull;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.literal;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.minus;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.nullOf;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.plus;
import static org.apache.flink.table.planner.expressions.ExpressionBuilder.typeLiteral;
/** built-in avg aggregate function. */
public abstract class AvgAggFunction extends DeclarativeAggregateFunction {
private UnresolvedReferenceExpression sum = unresolvedRef("sum");
private UnresolvedReferenceExpression count = unresolvedRef("count");
public abstract DataType getSumType();
@Override
public int operandCount() {
return 1;
}
@Override
public UnresolvedReferenceExpression[] aggBufferAttributes() {
return new UnresolvedReferenceExpression[] {sum, count};
}
@Override
public DataType[] getAggBufferTypes() {
return new DataType[] {getSumType(), DataTypes.BIGINT()};
}
@Override
public Expression[] initialValuesExpressions() {
return new Expression[] {
/* sum = */ literal(0L, getSumType().notNull()), /* count = */ literal(0L)
};
}
@Override
public Expression[] accumulateExpressions() {
return new Expression[] {
/* sum = */ adjustSumType(ifThenElse(isNull(operand(0)), sum, plus(sum, operand(0)))),
/* count = */ ifThenElse(isNull(operand(0)), count, plus(count, literal(1L))),
};
}
@Override
public Expression[] retractExpressions() {
return new Expression[] {
/* sum = */ adjustSumType(ifThenElse(isNull(operand(0)), sum, minus(sum, operand(0)))),
/* count = */ ifThenElse(isNull(operand(0)), count, minus(count, literal(1L))),
};
}
@Override
public Expression[] mergeExpressions() {
return new Expression[] {
/* sum = */ adjustSumType(plus(sum, mergeOperand(sum))),
/* count = */ plus(count, mergeOperand(count))
};
}
private UnresolvedCallExpression adjustSumType(UnresolvedCallExpression sumExpr) {
return cast(sumExpr, typeLiteral(getSumType()));
}
/** If all input are nulls, count will be 0 and we will get null after the division. */
@Override
public Expression getValueExpression() {
Expression ifTrue = nullOf(getResultType());
Expression ifFalse = cast(div(sum, count), typeLiteral(getResultType()));
return ifThenElse(equalTo(count, literal(0L)), ifTrue, ifFalse);
}
/** Built-in Byte Avg aggregate function. */
public static class ByteAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.TINYINT();
}
@Override
public DataType getSumType() {
return DataTypes.BIGINT();
}
}
/** Built-in Short Avg aggregate function. */
public static class ShortAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.SMALLINT();
}
@Override
public DataType getSumType() {
return DataTypes.BIGINT();
}
}
/** Built-in Integer Avg aggregate function. */
public static class IntAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.INT();
}
@Override
public DataType getSumType() {
return DataTypes.BIGINT();
}
}
/** Built-in Long Avg aggregate function. */
public static class LongAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.BIGINT();
}
@Override
public DataType getSumType() {
return DataTypes.BIGINT();
}
}
/** Built-in Float Avg aggregate function. */
public static class FloatAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.FLOAT();
}
@Override
public DataType getSumType() {
return DataTypes.DOUBLE();
}
@Override
public Expression[] initialValuesExpressions() {
return new Expression[] {literal(0D), literal(0L)};
}
}
/** Built-in Double Avg aggregate function. */
public static class DoubleAvgAggFunction extends AvgAggFunction {
@Override
public DataType getResultType() {
return DataTypes.DOUBLE();
}
@Override
public DataType getSumType() {
return DataTypes.DOUBLE();
}
@Override
public Expression[] initialValuesExpressions() {
return new Expression[] {literal(0D), literal(0L)};
}
}
/** Built-in Decimal Avg aggregate function. */
public static class DecimalAvgAggFunction extends AvgAggFunction {
private final DecimalType type;
public DecimalAvgAggFunction(DecimalType type) {
this.type = type;
}
@Override
public DataType getResultType() {
DecimalType t = (DecimalType) LogicalTypeMerging.findAvgAggType(type);
return DataTypes.DECIMAL(t.getPrecision(), t.getScale());
}
@Override
public DataType getSumType() {
DecimalType t = (DecimalType) LogicalTypeMerging.findSumAggType(type);
return DataTypes.DECIMAL(t.getPrecision(), t.getScale());
}
@Override
public Expression[] initialValuesExpressions() {
return new Expression[] {literal(BigDecimal.ZERO, getSumType().notNull()), literal(0L)};
}
}
}
| StephanEwen/incubator-flink | flink-table/flink-table-planner/src/main/java/org/apache/flink/table/planner/functions/aggfunctions/AvgAggFunction.java | Java | apache-2.0 | 7,723 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Defines relational expressions and rules for converting between calling
* conventions.
*/
package org.apache.calcite.rel.convert;
// End package-info.java
| mehant/incubator-calcite | core/src/main/java/org/apache/calcite/rel/convert/package-info.java | Java | apache-2.0 | 964 |
/*Copyright (C) 2012 Longerian (http://www.longerian.me)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.*/
package org.rubychina.android;
import java.util.ArrayList;
import java.util.List;
import org.rubychina.android.type.Node;
import org.rubychina.android.type.SiteGroup;
import org.rubychina.android.type.Topic;
import org.rubychina.android.type.User;
public enum GlobalResource {
INSTANCE;
private List<Topic> curTopics = new ArrayList<Topic>();
private List<Node> nodes = new ArrayList<Node>();
private List<User> users = new ArrayList<User>();
private List<SiteGroup> sites = new ArrayList<SiteGroup>();
public synchronized List<Topic> getCurTopics() {
return curTopics;
}
public synchronized void setCurTopics(List<Topic> curTopics) {
this.curTopics = curTopics;
}
public synchronized List<Node> getNodes() {
return nodes;
}
public synchronized void setNodes(List<Node> nodes) {
this.nodes = nodes;
}
public synchronized List<User> getUsers() {
return users;
}
public synchronized void setUsers(List<User> users) {
this.users = users;
}
public synchronized List<SiteGroup> getSites() {
return sites;
}
public synchronized void setSites(List<SiteGroup> sites) {
this.sites = sites;
}
}
| longerian/RC4A | src/org/rubychina/android/GlobalResource.java | Java | apache-2.0 | 1,797 |
package set
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path"
"strings"
"github.com/golang/glog"
"github.com/spf13/cobra"
kapi "k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/meta"
kresource "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
kclient "k8s.io/kubernetes/pkg/client/unversioned"
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"github.com/openshift/origin/pkg/cmd/templates"
cmdutil "github.com/openshift/origin/pkg/cmd/util"
"github.com/openshift/origin/pkg/cmd/util/clientcmd"
)
const (
volumePrefix = "volume-"
storageAnnClass = "volume.beta.kubernetes.io/storage-class"
)
var (
volumeLong = templates.LongDesc(`
Update volumes on a pod template
This command can add, update or remove volumes from containers for any object
that has a pod template (deployment configs, replication controllers, or pods).
You can list volumes in pod or any object that has a pod template. You can
specify a single object or multiple, and alter volumes on all containers or
just those that match a given name.
If you alter a volume setting on a deployment config, a deployment will be
triggered. Changing a replication controller will not affect running pods, and
you cannot change a pod's volumes once it has been created.
Volume types include:
* emptydir (empty directory) *default*: A directory allocated when the pod is
created on a local host, is removed when the pod is deleted and is not copied
across servers
* hostdir (host directory): A directory with specific path on any host
(requires elevated privileges)
* persistentvolumeclaim or pvc (persistent volume claim): Link the volume
directory in the container to a persistent volume claim you have allocated by
name - a persistent volume claim is a request to allocate storage. Note that
if your claim hasn't been bound, your pods will not start.
* secret (mounted secret): Secret volumes mount a named secret to the provided
directory.
For descriptions on other volume types, see https://docs.openshift.com`)
volumeExample = templates.Examples(`
# List volumes defined on all deployment configs in the current project
%[1]s volume dc --all
# Add a new empty dir volume to deployment config (dc) 'registry' mounted under
# /var/lib/registry
%[1]s volume dc/registry --add --mount-path=/var/lib/registry
# Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1'
%[1]s volume dc/registry --add --name=v1 -t pvc --claim-name=pvc1 --overwrite
# Remove volume 'v1' from deployment config 'registry'
%[1]s volume dc/registry --remove --name=v1
# Create a new persistent volume claim that overwrites an existing volume 'v1'
%[1]s volume dc/registry --add --name=v1 -t pvc --claim-size=1G --overwrite
# Change the mount point for volume 'v1' to /data
%[1]s volume dc/registry --add --name=v1 -m /data --overwrite
# Modify the deployment config by removing volume mount "v1" from container "c1"
# (and by removing the volume "v1" if no other containers have volume mounts that reference it)
%[1]s volume dc/registry --remove --name=v1 --containers=c1
# Add new volume based on a more complex volume source (Git repo, AWS EBS, GCE PD,
# Ceph, Gluster, NFS, ISCSI, ...)
%[1]s volume dc/registry --add -m /repo --source=<json-string>`)
)
type VolumeOptions struct {
DefaultNamespace string
ExplicitNamespace bool
Out io.Writer
Err io.Writer
Mapper meta.RESTMapper
Typer runtime.ObjectTyper
RESTClientFactory func(mapping *meta.RESTMapping) (resource.RESTClient, error)
UpdatePodSpecForObject func(obj runtime.Object, fn func(*kapi.PodSpec) error) (bool, error)
Client kclient.PersistentVolumeClaimsNamespacer
Encoder runtime.Encoder
// Resource selection
Selector string
All bool
Filenames []string
// Operations
Add bool
Remove bool
List bool
// Common optional params
Name string
Containers string
Confirm bool
Output string
PrintObject func([]*resource.Info) error
OutputVersion unversioned.GroupVersion
// Add op params
AddOpts *AddVolumeOptions
}
type AddVolumeOptions struct {
Type string
MountPath string
Overwrite bool
Path string
ConfigMapName string
SecretName string
Source string
CreateClaim bool
ClaimName string
ClaimSize string
ClaimMode string
ClaimClass string
TypeChanged bool
}
func NewCmdVolume(fullName string, f *clientcmd.Factory, out, errOut io.Writer) *cobra.Command {
addOpts := &AddVolumeOptions{}
opts := &VolumeOptions{AddOpts: addOpts}
cmd := &cobra.Command{
Use: "volumes RESOURCE/NAME --add|--remove|--list",
Short: "Update volumes on a pod template",
Long: volumeLong,
Example: fmt.Sprintf(volumeExample, fullName),
Aliases: []string{"volume"},
Run: func(cmd *cobra.Command, args []string) {
addOpts.TypeChanged = cmd.Flag("type").Changed
err := opts.Validate(cmd, args)
if err != nil {
kcmdutil.CheckErr(kcmdutil.UsageError(cmd, err.Error()))
}
err = opts.Complete(f, cmd, out, errOut)
kcmdutil.CheckErr(err)
err = opts.RunVolume(args)
if err == cmdutil.ErrExit {
os.Exit(1)
}
kcmdutil.CheckErr(err)
},
}
cmd.Flags().StringVarP(&opts.Selector, "selector", "l", "", "Selector (label query) to filter on")
cmd.Flags().BoolVar(&opts.All, "all", false, "select all resources in the namespace of the specified resource types")
cmd.Flags().StringSliceVarP(&opts.Filenames, "filename", "f", opts.Filenames, "Filename, directory, or URL to file to use to edit the resource.")
cmd.Flags().BoolVar(&opts.Add, "add", false, "Add volume and/or volume mounts for containers")
cmd.Flags().BoolVar(&opts.Remove, "remove", false, "Remove volume and/or volume mounts for containers")
cmd.Flags().BoolVar(&opts.List, "list", false, "List volumes and volume mounts for containers")
cmd.Flags().StringVar(&opts.Name, "name", "", "Name of the volume. If empty, auto generated for add operation")
cmd.Flags().StringVarP(&opts.Containers, "containers", "c", "*", "The names of containers in the selected pod templates to change - may use wildcards")
cmd.Flags().BoolVar(&opts.Confirm, "confirm", false, "Confirm that you really want to remove multiple volumes")
cmd.Flags().StringVarP(&addOpts.Type, "type", "t", "", "Type of the volume source for add operation. Supported options: emptyDir, hostPath, secret, configmap, persistentVolumeClaim")
cmd.Flags().StringVarP(&addOpts.MountPath, "mount-path", "m", "", "Mount path inside the container. Optional param for --add or --remove")
cmd.Flags().BoolVar(&addOpts.Overwrite, "overwrite", false, "If true, replace existing volume source and/or volume mount for the given resource")
cmd.Flags().StringVar(&addOpts.Path, "path", "", "Host path. Must be provided for hostPath volume type")
cmd.Flags().StringVar(&addOpts.ConfigMapName, "configmap-name", "", "Name of the persisted config map. Must be provided for configmap volume type")
cmd.Flags().StringVar(&addOpts.SecretName, "secret-name", "", "Name of the persisted secret. Must be provided for secret volume type")
cmd.Flags().StringVar(&addOpts.ClaimName, "claim-name", "", "Persistent volume claim name. Must be provided for persistentVolumeClaim volume type")
cmd.Flags().StringVar(&addOpts.ClaimClass, "claim-class", "", "StorageClass to use for the persistent volume claim")
cmd.Flags().StringVar(&addOpts.ClaimSize, "claim-size", "", "If specified along with a persistent volume type, create a new claim with the given size in bytes. Accepts SI notation: 10, 10G, 10Gi")
cmd.Flags().StringVar(&addOpts.ClaimMode, "claim-mode", "ReadWriteOnce", "Set the access mode of the claim to be created. Valid values are ReadWriteOnce (rwo), ReadWriteMany (rwm), or ReadOnlyMany (rom)")
cmd.Flags().StringVar(&addOpts.Source, "source", "", "Details of volume source as json string. This can be used if the required volume type is not supported by --type option. (e.g.: '{\"gitRepo\": {\"repository\": <git-url>, \"revision\": <commit-hash>}}')")
kcmdutil.AddPrinterFlags(cmd)
cmd.MarkFlagFilename("filename", "yaml", "yml", "json")
// deprecate --list option
cmd.Flags().MarkDeprecated("list", "Volumes and volume mounts can be listed by providing a resource with no additional options.")
return cmd
}
func (v *VolumeOptions) Validate(cmd *cobra.Command, args []string) error {
if len(v.Selector) > 0 {
if _, err := labels.Parse(v.Selector); err != nil {
return errors.New("--selector=<selector> must be a valid label selector")
}
if v.All {
return errors.New("you may specify either --selector or --all but not both")
}
}
if len(v.Filenames) == 0 && len(args) < 1 {
return errors.New("provide one or more resources to add, list, or delete volumes on as TYPE/NAME")
}
numOps := 0
if v.Add {
numOps++
}
if v.Remove {
numOps++
}
if v.List {
numOps++
}
switch {
case numOps == 0:
v.List = true
case numOps > 1:
return errors.New("you may only specify one operation at a time")
}
output := kcmdutil.GetFlagString(cmd, "output")
if v.List && len(output) > 0 {
return errors.New("--list and --output may not be specified together")
}
err := v.AddOpts.Validate(v.Add)
if err != nil {
return err
}
// Removing all volumes for the resource type needs confirmation
if v.Remove && len(v.Name) == 0 && !v.Confirm {
return errors.New("must provide --confirm for removing more than one volume")
}
return nil
}
func (a *AddVolumeOptions) Validate(isAddOp bool) error {
if isAddOp {
if len(a.Type) == 0 && (len(a.ClaimName) > 0 || len(a.ClaimSize) > 0) {
a.Type = "persistentvolumeclaim"
a.TypeChanged = true
}
if len(a.Type) == 0 && (len(a.SecretName) > 0) {
a.Type = "secret"
a.TypeChanged = true
}
if len(a.Type) == 0 && (len(a.ConfigMapName) > 0) {
a.Type = "configmap"
a.TypeChanged = true
}
if len(a.Type) == 0 && (len(a.Path) > 0) {
a.Type = "hostpath"
a.TypeChanged = true
}
if len(a.Type) == 0 {
a.Type = "emptydir"
}
if len(a.Type) == 0 && len(a.Source) == 0 {
return errors.New("must provide --type or --source for --add operation")
} else if a.TypeChanged && len(a.Source) > 0 {
return errors.New("either specify --type or --source but not both for --add operation")
}
if len(a.Type) > 0 {
switch strings.ToLower(a.Type) {
case "emptydir":
case "hostpath":
if len(a.Path) == 0 {
return errors.New("must provide --path for --type=hostPath")
}
case "secret":
if len(a.SecretName) == 0 {
return errors.New("must provide --secret-name for --type=secret")
}
case "configmap":
if len(a.ConfigMapName) == 0 {
return errors.New("must provide --configmap-name for --type=configmap")
}
case "persistentvolumeclaim", "pvc":
if len(a.ClaimName) == 0 && len(a.ClaimSize) == 0 {
return errors.New("must provide --claim-name or --claim-size (to create a new claim) for --type=pvc")
}
default:
return errors.New("invalid volume type. Supported types: emptyDir, hostPath, secret, persistentVolumeClaim")
}
} else if len(a.Path) > 0 || len(a.SecretName) > 0 || len(a.ClaimName) > 0 {
return errors.New("--path|--secret-name|--claim-name are only valid for --type option")
}
if len(a.Source) > 0 {
var source map[string]interface{}
err := json.Unmarshal([]byte(a.Source), &source)
if err != nil {
return err
}
if len(source) > 1 {
return errors.New("must provide only one volume for --source")
}
var vs kapi.VolumeSource
err = json.Unmarshal([]byte(a.Source), &vs)
if err != nil {
return err
}
}
if len(a.ClaimClass) > 0 {
selectedLowerType := strings.ToLower(a.Type)
if selectedLowerType != "persistentvolumeclaim" && selectedLowerType != "pvc" {
return errors.New("must provide --type as persistentVolumeClaim")
}
if len(a.ClaimSize) == 0 {
return errors.New("must provide --claim-size to create new pvc with claim-class")
}
}
} else if len(a.Source) > 0 || len(a.Path) > 0 || len(a.SecretName) > 0 || len(a.ConfigMapName) > 0 || len(a.ClaimName) > 0 || a.Overwrite {
return errors.New("--type|--path|--configmap-name|--secret-name|--claim-name|--source|--overwrite are only valid for --add operation")
}
return nil
}
func (v *VolumeOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, out, errOut io.Writer) error {
clientConfig, err := f.ClientConfig()
if err != nil {
return err
}
v.OutputVersion, err = kcmdutil.OutputVersion(cmd, clientConfig.GroupVersion)
if err != nil {
return err
}
_, kc, err := f.Clients()
if err != nil {
return err
}
v.Client = kc
cmdNamespace, explicit, err := f.DefaultNamespace()
if err != nil {
return err
}
mapper, typer := f.Object(false)
v.Output = kcmdutil.GetFlagString(cmd, "output")
if len(v.Output) > 0 {
v.PrintObject = func(infos []*resource.Info) error {
return f.PrintResourceInfos(cmd, infos, v.Out)
}
}
v.DefaultNamespace = cmdNamespace
v.ExplicitNamespace = explicit
v.Out = out
v.Err = errOut
v.Mapper = mapper
v.Typer = typer
v.RESTClientFactory = f.Factory.ClientForMapping
v.UpdatePodSpecForObject = f.UpdatePodSpecForObject
v.Encoder = f.JSONEncoder()
// In case of volume source ignore the default volume type
if len(v.AddOpts.Source) > 0 {
v.AddOpts.Type = ""
}
if len(v.AddOpts.ClaimSize) > 0 {
v.AddOpts.CreateClaim = true
if len(v.AddOpts.ClaimName) == 0 {
v.AddOpts.ClaimName = kapi.SimpleNameGenerator.GenerateName("pvc-")
}
q, err := kresource.ParseQuantity(v.AddOpts.ClaimSize)
if err != nil {
return fmt.Errorf("--claim-size is not valid: %v", err)
}
v.AddOpts.ClaimSize = q.String()
}
switch strings.ToLower(v.AddOpts.ClaimMode) {
case strings.ToLower(string(kapi.ReadOnlyMany)), "rom":
v.AddOpts.ClaimMode = string(kapi.ReadOnlyMany)
case strings.ToLower(string(kapi.ReadWriteOnce)), "rwo":
v.AddOpts.ClaimMode = string(kapi.ReadWriteOnce)
case strings.ToLower(string(kapi.ReadWriteMany)), "rwm":
v.AddOpts.ClaimMode = string(kapi.ReadWriteMany)
case "":
default:
return errors.New("--claim-mode must be one of ReadWriteOnce (rwo), ReadWriteMany (rwm), or ReadOnlyMany (rom)")
}
return nil
}
func (v *VolumeOptions) RunVolume(args []string) error {
mapper := resource.ClientMapperFunc(v.RESTClientFactory)
b := resource.NewBuilder(v.Mapper, v.Typer, mapper, kapi.Codecs.UniversalDecoder()).
ContinueOnError().
NamespaceParam(v.DefaultNamespace).DefaultNamespace().
FilenameParam(v.ExplicitNamespace, false, v.Filenames...).
SelectorParam(v.Selector).
ResourceTypeOrNameArgs(v.All, args...).
Flatten()
singular := false
infos, err := b.Do().IntoSingular(&singular).Infos()
if err != nil {
return err
}
if v.List {
listingErrors := v.printVolumes(infos)
if len(listingErrors) > 0 {
return cmdutil.ErrExit
}
return nil
}
updateInfos := []*resource.Info{}
// if a claim should be created, generate the info we'll add to the flow
if v.Add && v.AddOpts.CreateClaim {
claim := v.AddOpts.createClaim()
m, err := v.Mapper.RESTMapping(kapi.Kind("PersistentVolumeClaim"))
if err != nil {
return err
}
client, err := mapper.ClientForMapping(m)
if err != nil {
return err
}
info := &resource.Info{
Mapping: m,
Client: client,
Namespace: v.DefaultNamespace,
Object: claim,
}
infos = append(infos, info)
updateInfos = append(updateInfos, info)
}
patches, patchError := v.getVolumeUpdatePatches(infos, singular)
if patchError != nil {
return patchError
}
if v.PrintObject != nil {
return v.PrintObject(infos)
}
failed := false
for _, info := range updateInfos {
var obj runtime.Object
if len(info.ResourceVersion) == 0 {
obj, err = resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, false, info.Object)
} else {
obj, err = resource.NewHelper(info.Client, info.Mapping).Replace(info.Namespace, info.Name, true, info.Object)
}
if err != nil {
handlePodUpdateError(v.Err, err, "volume")
failed = true
continue
}
info.Refresh(obj, true)
fmt.Fprintf(v.Out, "%s/%s\n", info.Mapping.Resource, info.Name)
}
for _, patch := range patches {
info := patch.Info
if patch.Err != nil {
failed = true
fmt.Fprintf(v.Err, "error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err)
continue
}
if string(patch.Patch) == "{}" || len(patch.Patch) == 0 {
fmt.Fprintf(v.Err, "info: %s %q was not changed\n", info.Mapping.Resource, info.Name)
continue
}
glog.V(4).Infof("Calculated patch %s", patch.Patch)
obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, kapi.StrategicMergePatchType, patch.Patch)
if err != nil {
handlePodUpdateError(v.Err, err, "volume")
failed = true
continue
}
info.Refresh(obj, true)
kcmdutil.PrintSuccess(v.Mapper, false, v.Out, info.Mapping.Resource, info.Name, false, "updated")
}
if failed {
return cmdutil.ErrExit
}
return nil
}
func (v *VolumeOptions) getVolumeUpdatePatches(infos []*resource.Info, singular bool) ([]*Patch, error) {
skipped := 0
patches := CalculatePatches(infos, v.Encoder, func(info *resource.Info) (bool, error) {
transformed := false
ok, err := v.UpdatePodSpecForObject(info.Object, func(spec *kapi.PodSpec) error {
var e error
switch {
case v.Add:
e = v.addVolumeToSpec(spec, info, singular)
transformed = true
case v.Remove:
e = v.removeVolumeFromSpec(spec, info)
transformed = true
}
return e
})
if !ok {
skipped++
}
return transformed, err
})
if singular && skipped == len(infos) {
patchError := fmt.Errorf("the %s %s is not a pod or does not have a pod template", infos[0].Mapping.Resource, infos[0].Name)
return patches, patchError
}
return patches, nil
}
func setVolumeSourceByType(kv *kapi.Volume, opts *AddVolumeOptions) error {
switch strings.ToLower(opts.Type) {
case "emptydir":
kv.EmptyDir = &kapi.EmptyDirVolumeSource{}
case "hostpath":
kv.HostPath = &kapi.HostPathVolumeSource{
Path: opts.Path,
}
case "secret":
kv.Secret = &kapi.SecretVolumeSource{
SecretName: opts.SecretName,
}
case "configmap":
kv.ConfigMap = &kapi.ConfigMapVolumeSource{
LocalObjectReference: kapi.LocalObjectReference{
Name: opts.ConfigMapName,
},
}
case "persistentvolumeclaim", "pvc":
kv.PersistentVolumeClaim = &kapi.PersistentVolumeClaimVolumeSource{
ClaimName: opts.ClaimName,
}
default:
return fmt.Errorf("invalid volume type: %s", opts.Type)
}
return nil
}
func (v *VolumeOptions) printVolumes(infos []*resource.Info) []error {
listingErrors := []error{}
for _, info := range infos {
_, err := v.UpdatePodSpecForObject(info.Object, func(spec *kapi.PodSpec) error {
return v.listVolumeForSpec(spec, info)
})
if err != nil {
listingErrors = append(listingErrors, err)
fmt.Fprintf(v.Err, "error: %s/%s %v\n", info.Mapping.Resource, info.Name, err)
}
}
return listingErrors
}
func (v *AddVolumeOptions) createClaim() *kapi.PersistentVolumeClaim {
pvc := &kapi.PersistentVolumeClaim{
ObjectMeta: kapi.ObjectMeta{
Name: v.ClaimName,
},
Spec: kapi.PersistentVolumeClaimSpec{
AccessModes: []kapi.PersistentVolumeAccessMode{kapi.PersistentVolumeAccessMode(v.ClaimMode)},
Resources: kapi.ResourceRequirements{
Requests: kapi.ResourceList{
kapi.ResourceName(kapi.ResourceStorage): kresource.MustParse(v.ClaimSize),
},
},
},
}
if len(v.ClaimClass) > 0 {
pvc.Annotations = map[string]string{
storageAnnClass: v.ClaimClass,
}
}
return pvc
}
func (v *VolumeOptions) setVolumeSource(kv *kapi.Volume) error {
var err error
opts := v.AddOpts
if len(opts.Type) > 0 {
err = setVolumeSourceByType(kv, opts)
} else if len(opts.Source) > 0 {
err = json.Unmarshal([]byte(opts.Source), &kv.VolumeSource)
}
return err
}
func (v *VolumeOptions) setVolumeMount(spec *kapi.PodSpec, info *resource.Info) error {
opts := v.AddOpts
containers, _ := selectContainers(spec.Containers, v.Containers)
if len(containers) == 0 && v.Containers != "*" {
fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers)
return nil
}
for _, c := range containers {
for _, m := range c.VolumeMounts {
if path.Clean(m.MountPath) == path.Clean(opts.MountPath) && m.Name != v.Name {
return fmt.Errorf("volume mount '%s' already exists for container '%s'", opts.MountPath, c.Name)
}
}
for i, m := range c.VolumeMounts {
if m.Name == v.Name {
c.VolumeMounts = append(c.VolumeMounts[:i], c.VolumeMounts[i+1:]...)
break
}
}
volumeMount := &kapi.VolumeMount{
Name: v.Name,
MountPath: path.Clean(opts.MountPath),
}
c.VolumeMounts = append(c.VolumeMounts, *volumeMount)
}
return nil
}
func (v *VolumeOptions) getVolumeName(spec *kapi.PodSpec, singleResource bool) (string, error) {
opts := v.AddOpts
if opts.Overwrite {
// Multiple resources can have same mount-path for different volumes,
// so restrict it for single resource to uniquely find the volume
if !singleResource {
return "", fmt.Errorf("you must specify --name for the volume name when dealing with multiple resources")
}
if len(opts.MountPath) > 0 {
containers, _ := selectContainers(spec.Containers, v.Containers)
var name string
matchCount := 0
for _, c := range containers {
for _, m := range c.VolumeMounts {
if path.Clean(m.MountPath) == path.Clean(opts.MountPath) {
name = m.Name
matchCount += 1
break
}
}
}
switch matchCount {
case 0:
return "", fmt.Errorf("unable to find the volume for mount-path: %s", opts.MountPath)
case 1:
return name, nil
default:
return "", fmt.Errorf("found multiple volumes with same mount-path: %s", opts.MountPath)
}
} else {
return "", fmt.Errorf("ambiguous --overwrite, specify --name or --mount-path")
}
} else { // Generate volume name
name := kapi.SimpleNameGenerator.GenerateName(volumePrefix)
if len(v.Output) == 0 {
fmt.Fprintf(v.Err, "info: Generated volume name: %s\n", name)
}
return name, nil
}
}
func (v *VolumeOptions) addVolumeToSpec(spec *kapi.PodSpec, info *resource.Info, singleResource bool) error {
opts := v.AddOpts
if len(v.Name) == 0 {
var err error
v.Name, err = v.getVolumeName(spec, singleResource)
if err != nil {
return err
}
}
newVolume := &kapi.Volume{
Name: v.Name,
}
setSource := true
for i, vol := range spec.Volumes {
if v.Name == vol.Name {
if !opts.Overwrite {
return fmt.Errorf("volume '%s' already exists. Use --overwrite to replace", v.Name)
}
if !opts.TypeChanged && len(opts.Source) == 0 {
newVolume.VolumeSource = vol.VolumeSource
setSource = false
}
spec.Volumes = append(spec.Volumes[:i], spec.Volumes[i+1:]...)
break
}
}
if setSource {
err := v.setVolumeSource(newVolume)
if err != nil {
return err
}
}
spec.Volumes = append(spec.Volumes, *newVolume)
if len(opts.MountPath) > 0 {
err := v.setVolumeMount(spec, info)
if err != nil {
return err
}
}
return nil
}
func (v *VolumeOptions) removeSpecificVolume(spec *kapi.PodSpec, containers, skippedContainers []*kapi.Container) error {
for _, c := range containers {
for i, m := range c.VolumeMounts {
if v.Name == m.Name {
c.VolumeMounts = append(c.VolumeMounts[:i], c.VolumeMounts[i+1:]...)
break
}
}
}
// Remove volume if no container is using it
found := false
for _, c := range skippedContainers {
for _, m := range c.VolumeMounts {
if v.Name == m.Name {
found = true
break
}
}
if found {
break
}
}
if !found {
foundVolume := false
for i, vol := range spec.Volumes {
if v.Name == vol.Name {
spec.Volumes = append(spec.Volumes[:i], spec.Volumes[i+1:]...)
foundVolume = true
break
}
}
if !foundVolume {
return fmt.Errorf("volume '%s' not found", v.Name)
}
}
return nil
}
func (v *VolumeOptions) removeVolumeFromSpec(spec *kapi.PodSpec, info *resource.Info) error {
containers, skippedContainers := selectContainers(spec.Containers, v.Containers)
if len(containers) == 0 && v.Containers != "*" {
fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers)
return nil
}
if len(v.Name) == 0 {
for _, c := range containers {
c.VolumeMounts = []kapi.VolumeMount{}
}
spec.Volumes = []kapi.Volume{}
} else {
err := v.removeSpecificVolume(spec, containers, skippedContainers)
if err != nil {
return err
}
}
return nil
}
func sourceAccessMode(readOnly bool) string {
if readOnly {
return " read-only"
}
return ""
}
func describePersistentVolumeClaim(claim *kapi.PersistentVolumeClaim) string {
if len(claim.Spec.VolumeName) == 0 {
// TODO: check for other dimensions of request - IOPs, etc
if val, ok := claim.Spec.Resources.Requests[kapi.ResourceStorage]; ok {
return fmt.Sprintf("waiting for %sB allocation", val.String())
}
return "waiting to allocate"
}
// TODO: check for other dimensions of capacity?
if val, ok := claim.Status.Capacity[kapi.ResourceStorage]; ok {
return fmt.Sprintf("allocated %sB", val.String())
}
return "allocated unknown size"
}
func describeVolumeSource(source *kapi.VolumeSource) string {
switch {
case source.AWSElasticBlockStore != nil:
return fmt.Sprintf("AWS EBS %s type=%s partition=%d%s", source.AWSElasticBlockStore.VolumeID, source.AWSElasticBlockStore.FSType, source.AWSElasticBlockStore.Partition, sourceAccessMode(source.AWSElasticBlockStore.ReadOnly))
case source.EmptyDir != nil:
return "empty directory"
case source.GCEPersistentDisk != nil:
return fmt.Sprintf("GCE PD %s type=%s partition=%d%s", source.GCEPersistentDisk.PDName, source.GCEPersistentDisk.FSType, source.GCEPersistentDisk.Partition, sourceAccessMode(source.GCEPersistentDisk.ReadOnly))
case source.GitRepo != nil:
if len(source.GitRepo.Revision) == 0 {
return fmt.Sprintf("Git repository %s", source.GitRepo.Repository)
}
return fmt.Sprintf("Git repository %s @ %s", source.GitRepo.Repository, source.GitRepo.Revision)
case source.Glusterfs != nil:
return fmt.Sprintf("GlusterFS %s:%s%s", source.Glusterfs.EndpointsName, source.Glusterfs.Path, sourceAccessMode(source.Glusterfs.ReadOnly))
case source.HostPath != nil:
return fmt.Sprintf("host path %s", source.HostPath.Path)
case source.ISCSI != nil:
return fmt.Sprintf("ISCSI %s target-portal=%s type=%s lun=%d%s", source.ISCSI.IQN, source.ISCSI.TargetPortal, source.ISCSI.FSType, source.ISCSI.Lun, sourceAccessMode(source.ISCSI.ReadOnly))
case source.NFS != nil:
return fmt.Sprintf("NFS %s:%s%s", source.NFS.Server, source.NFS.Path, sourceAccessMode(source.NFS.ReadOnly))
case source.PersistentVolumeClaim != nil:
return fmt.Sprintf("pvc/%s%s", source.PersistentVolumeClaim.ClaimName, sourceAccessMode(source.PersistentVolumeClaim.ReadOnly))
case source.RBD != nil:
return fmt.Sprintf("Ceph RBD %v type=%s image=%s pool=%s%s", source.RBD.CephMonitors, source.RBD.FSType, source.RBD.RBDImage, source.RBD.RBDPool, sourceAccessMode(source.RBD.ReadOnly))
case source.Secret != nil:
return fmt.Sprintf("secret/%s", source.Secret.SecretName)
default:
return "unknown"
}
}
func (v *VolumeOptions) listVolumeForSpec(spec *kapi.PodSpec, info *resource.Info) error {
containers, _ := selectContainers(spec.Containers, v.Containers)
if len(containers) == 0 && v.Containers != "*" {
fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers)
return nil
}
fmt.Fprintf(v.Out, "%s/%s\n", info.Mapping.Resource, info.Name)
checkName := (len(v.Name) > 0)
found := false
for _, vol := range spec.Volumes {
if checkName && v.Name != vol.Name {
continue
}
found = true
refInfo := ""
if vol.VolumeSource.PersistentVolumeClaim != nil {
claimName := vol.VolumeSource.PersistentVolumeClaim.ClaimName
claim, err := v.Client.PersistentVolumeClaims(info.Namespace).Get(claimName)
switch {
case err == nil:
refInfo = fmt.Sprintf("(%s)", describePersistentVolumeClaim(claim))
case apierrs.IsNotFound(err):
refInfo = "(does not exist)"
default:
fmt.Fprintf(v.Err, "error: unable to retrieve persistent volume claim %s referenced in %s/%s: %v", claimName, info.Mapping.Resource, info.Name, err)
}
}
if len(refInfo) > 0 {
refInfo = " " + refInfo
}
fmt.Fprintf(v.Out, " %s%s as %s\n", describeVolumeSource(&vol.VolumeSource), refInfo, vol.Name)
for _, c := range containers {
for _, m := range c.VolumeMounts {
if vol.Name != m.Name {
continue
}
if len(spec.Containers) == 1 {
fmt.Fprintf(v.Out, " mounted at %s\n", m.MountPath)
} else {
fmt.Fprintf(v.Out, " mounted at %s in container %s\n", m.MountPath, c.Name)
}
}
}
}
if checkName && !found {
return fmt.Errorf("volume %q not found", v.Name)
}
return nil
}
| jeffvance/origin | pkg/cmd/cli/cmd/set/volume.go | GO | apache-2.0 | 29,411 |
/*
* Copyright 2011 JBoss Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.workbench.common.widgets.client.widget;
public class PercentageCalculator {
public static int calculatePercent(int numerator,
int denominator) {
int percent = 0;
if (denominator != 0) {
percent = (int) ((((float) denominator - (float) numerator) / (float) denominator) * 100);
}
return percent;
}
}
| psiroky/kie-wb-common | kie-wb-common-widgets/kie-wb-common-ui/src/main/java/org/kie/workbench/common/widgets/client/widget/PercentageCalculator.java | Java | apache-2.0 | 979 |
/*
* Copyright 2015 DuraSpace, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fcrepo.http.api;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
/**
* @author cabeer
* @since 10/17/14
*/
@Component
public class FedoraHttpConfiguration {
@Value("${fcrepo.http.ldp.putRequiresIfMatch:false}")
private boolean putRequiresIfMatch;
/**
* Should PUT requests require an If-Match header?
* @return put request if match
*/
public boolean putRequiresIfMatch() {
return putRequiresIfMatch;
}
}
| ruebot/fcrepo4 | fcrepo-http-api/src/main/java/org/fcrepo/http/api/FedoraHttpConfiguration.java | Java | apache-2.0 | 1,125 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute import flavors as flavors_api
from nova.api.openstack.compute.views import flavors as flavors_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'flavormanage')
class FlavorManageController(wsgi.Controller):
"""
The Flavor Lifecycle API controller for the OpenStack API.
"""
_view_builder_class = flavors_view.ViewBuilder
def __init__(self):
super(FlavorManageController, self).__init__()
@wsgi.action("delete")
def _delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
return webob.Response(status_int=202)
@wsgi.action("create")
@wsgi.serializers(xml=flavors_api.FlavorTemplate)
def _create(self, req, body):
context = req.environ['nova.context']
authorize(context)
if not self.is_valid_body(body, 'flavor'):
msg = _("Invalid request body")
raise webob.exc.HTTPBadRequest(explanation=msg)
vals = body['flavor']
name = vals.get('name')
flavorid = vals.get('id')
memory = vals.get('ram')
vcpus = vals.get('vcpus')
root_gb = vals.get('disk')
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
is_public = vals.get('os-flavor-access:is_public', True)
try:
flavor = flavors.create(name, memory, vcpus, root_gb,
ephemeral_gb=ephemeral_gb,
flavorid=flavorid, swap=swap,
rxtx_factor=rxtx_factor,
is_public=is_public)
if not flavor['is_public']:
flavors.add_flavor_access(flavor['flavorid'],
context.project_id, context)
req.cache_db_flavor(flavor)
except (exception.InstanceTypeExists,
exception.InstanceTypeIdExists) as err:
raise webob.exc.HTTPConflict(explanation=err.format_message())
except exception.InvalidInput as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
return self._view_builder.show(req, flavor)
class Flavormanage(extensions.ExtensionDescriptor):
"""
Flavor create/delete API support
"""
name = "FlavorManage"
alias = "os-flavor-manage"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_manage/api/v1.1")
updated = "2012-01-19T00:00:00+00:00"
def get_controller_extensions(self):
controller = FlavorManageController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| ntt-sic/nova | nova/api/openstack/compute/contrib/flavormanage.py | Python | apache-2.0 | 3,894 |
<?php
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/pubsub/v1/pubsub.proto
namespace Google\Cloud\PubSub\V1;
use Google\Protobuf\Internal\GPBType;
use Google\Protobuf\Internal\RepeatedField;
use Google\Protobuf\Internal\GPBUtil;
/**
* Request for the ModifyAckDeadline method.
*
* Generated from protobuf message <code>google.pubsub.v1.ModifyAckDeadlineRequest</code>
*/
class ModifyAckDeadlineRequest extends \Google\Protobuf\Internal\Message
{
/**
* Required. The name of the subscription.
* Format is `projects/{project}/subscriptions/{sub}`.
*
* Generated from protobuf field <code>string subscription = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {</code>
*/
private $subscription = '';
/**
* Required. List of acknowledgment IDs.
*
* Generated from protobuf field <code>repeated string ack_ids = 4 [(.google.api.field_behavior) = REQUIRED];</code>
*/
private $ack_ids;
/**
* Required. The new ack deadline with respect to the time this request was
* sent to the Pub/Sub system. For example, if the value is 10, the new ack
* deadline will expire 10 seconds after the `ModifyAckDeadline` call was
* made. Specifying zero might immediately make the message available for
* delivery to another subscriber client. This typically results in an
* increase in the rate of message redeliveries (that is, duplicates).
* The minimum deadline you can specify is 0 seconds.
* The maximum deadline you can specify is 600 seconds (10 minutes).
*
* Generated from protobuf field <code>int32 ack_deadline_seconds = 3 [(.google.api.field_behavior) = REQUIRED];</code>
*/
private $ack_deadline_seconds = 0;
/**
* Constructor.
*
* @param array $data {
* Optional. Data for populating the Message object.
*
* @type string $subscription
* Required. The name of the subscription.
* Format is `projects/{project}/subscriptions/{sub}`.
* @type string[]|\Google\Protobuf\Internal\RepeatedField $ack_ids
* Required. List of acknowledgment IDs.
* @type int $ack_deadline_seconds
* Required. The new ack deadline with respect to the time this request was
* sent to the Pub/Sub system. For example, if the value is 10, the new ack
* deadline will expire 10 seconds after the `ModifyAckDeadline` call was
* made. Specifying zero might immediately make the message available for
* delivery to another subscriber client. This typically results in an
* increase in the rate of message redeliveries (that is, duplicates).
* The minimum deadline you can specify is 0 seconds.
* The maximum deadline you can specify is 600 seconds (10 minutes).
* }
*/
public function __construct($data = NULL) {
\GPBMetadata\Google\Pubsub\V1\Pubsub::initOnce();
parent::__construct($data);
}
/**
* Required. The name of the subscription.
* Format is `projects/{project}/subscriptions/{sub}`.
*
* Generated from protobuf field <code>string subscription = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {</code>
* @return string
*/
public function getSubscription()
{
return $this->subscription;
}
/**
* Required. The name of the subscription.
* Format is `projects/{project}/subscriptions/{sub}`.
*
* Generated from protobuf field <code>string subscription = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = {</code>
* @param string $var
* @return $this
*/
public function setSubscription($var)
{
GPBUtil::checkString($var, True);
$this->subscription = $var;
return $this;
}
/**
* Required. List of acknowledgment IDs.
*
* Generated from protobuf field <code>repeated string ack_ids = 4 [(.google.api.field_behavior) = REQUIRED];</code>
* @return \Google\Protobuf\Internal\RepeatedField
*/
public function getAckIds()
{
return $this->ack_ids;
}
/**
* Required. List of acknowledgment IDs.
*
* Generated from protobuf field <code>repeated string ack_ids = 4 [(.google.api.field_behavior) = REQUIRED];</code>
* @param string[]|\Google\Protobuf\Internal\RepeatedField $var
* @return $this
*/
public function setAckIds($var)
{
$arr = GPBUtil::checkRepeatedField($var, \Google\Protobuf\Internal\GPBType::STRING);
$this->ack_ids = $arr;
return $this;
}
/**
* Required. The new ack deadline with respect to the time this request was
* sent to the Pub/Sub system. For example, if the value is 10, the new ack
* deadline will expire 10 seconds after the `ModifyAckDeadline` call was
* made. Specifying zero might immediately make the message available for
* delivery to another subscriber client. This typically results in an
* increase in the rate of message redeliveries (that is, duplicates).
* The minimum deadline you can specify is 0 seconds.
* The maximum deadline you can specify is 600 seconds (10 minutes).
*
* Generated from protobuf field <code>int32 ack_deadline_seconds = 3 [(.google.api.field_behavior) = REQUIRED];</code>
* @return int
*/
public function getAckDeadlineSeconds()
{
return $this->ack_deadline_seconds;
}
/**
* Required. The new ack deadline with respect to the time this request was
* sent to the Pub/Sub system. For example, if the value is 10, the new ack
* deadline will expire 10 seconds after the `ModifyAckDeadline` call was
* made. Specifying zero might immediately make the message available for
* delivery to another subscriber client. This typically results in an
* increase in the rate of message redeliveries (that is, duplicates).
* The minimum deadline you can specify is 0 seconds.
* The maximum deadline you can specify is 600 seconds (10 minutes).
*
* Generated from protobuf field <code>int32 ack_deadline_seconds = 3 [(.google.api.field_behavior) = REQUIRED];</code>
* @param int $var
* @return $this
*/
public function setAckDeadlineSeconds($var)
{
GPBUtil::checkInt32($var);
$this->ack_deadline_seconds = $var;
return $this;
}
}
| googleapis/google-cloud-php-pubsub | src/V1/ModifyAckDeadlineRequest.php | PHP | apache-2.0 | 6,591 |
/*
* Copyright 2000-2009 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.util.xml.highlighting;
import com.intellij.codeInspection.LocalQuickFix;
import com.intellij.codeInspection.ProblemHighlightType;
import com.intellij.lang.annotation.Annotation;
import com.intellij.lang.annotation.HighlightSeverity;
import com.intellij.openapi.util.TextRange;
import com.intellij.psi.PsiReference;
import com.intellij.util.xml.DomElement;
import com.intellij.util.xml.DomFileElement;
import com.intellij.util.xml.GenericDomValue;
import com.intellij.util.xml.reflect.DomCollectionChildDescription;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
public interface DomElementAnnotationHolder extends Iterable<DomElementProblemDescriptor>{
boolean isOnTheFly();
@NotNull
DomFileElement<?> getFileElement();
@NotNull
DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, @Nullable String message, LocalQuickFix... fixes);
@NotNull
DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, DomCollectionChildDescription childDescription, @Nullable String message);
@NotNull
DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, HighlightSeverity highlightType, String message);
DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, HighlightSeverity highlightType, String message, LocalQuickFix... fixes);
DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, HighlightSeverity highlightType, String message, TextRange textRange, LocalQuickFix... fixes);
DomElementProblemDescriptor createProblem(@NotNull DomElement domElement, ProblemHighlightType highlightType, String message, @Nullable TextRange textRange, LocalQuickFix... fixes);
@NotNull
DomElementResolveProblemDescriptor createResolveProblem(@NotNull GenericDomValue element, @NotNull PsiReference reference);
/**
* Is useful only if called from {@link com.intellij.util.xml.highlighting.DomElementsAnnotator} instance
* @param element element
* @param severity highlight severity
* @param message description
* @return annotation
*/
@NotNull
Annotation createAnnotation(@NotNull DomElement element, HighlightSeverity severity, @Nullable String message);
int getSize();
}
| paplorinc/intellij-community | xml/dom-openapi/src/com/intellij/util/xml/highlighting/DomElementAnnotationHolder.java | Java | apache-2.0 | 2,866 |
# megam_rustyprint
Display data in table format on console
| megamsys/megam_rustyprint | README.md | Markdown | apache-2.0 | 59 |
package com.mapswithme.maps.purchase;
import android.app.Activity;
import android.content.Intent;
import android.os.Bundle;
import android.text.TextUtils;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import com.android.billingclient.api.SkuDetails;
import com.bumptech.glide.Glide;
import com.mapswithme.maps.Framework;
import com.mapswithme.maps.PrivateVariables;
import com.mapswithme.maps.PurchaseOperationObservable;
import com.mapswithme.maps.R;
import com.mapswithme.maps.base.BaseMwmFragment;
import com.mapswithme.maps.bookmarks.data.PaymentData;
import com.mapswithme.maps.dialog.AlertDialogCallback;
import com.mapswithme.util.Utils;
import com.mapswithme.util.log.Logger;
import com.mapswithme.util.log.LoggerFactory;
import com.mapswithme.util.statistics.Statistics;
import java.util.Collections;
import java.util.List;
public class BookmarkPaymentFragment extends BaseMwmFragment
implements AlertDialogCallback, PurchaseStateActivator<BookmarkPaymentState>
{
static final String ARG_PAYMENT_DATA = "arg_payment_data";
private static final Logger LOGGER = LoggerFactory.INSTANCE.getLogger(LoggerFactory.Type.BILLING);
private static final String TAG = BookmarkPaymentFragment.class.getSimpleName();
private static final String EXTRA_CURRENT_STATE = "extra_current_state";
private static final String EXTRA_PRODUCT_DETAILS = "extra_product_details";
private static final String EXTRA_SUBS_PRODUCT_DETAILS = "extra_subs_product_details";
private static final String EXTRA_VALIDATION_RESULT = "extra_validation_result";
@SuppressWarnings("NullableProblems")
@NonNull
private PurchaseController<PurchaseCallback> mPurchaseController;
@SuppressWarnings("NullableProblems")
@NonNull
private BookmarkPurchaseCallback mPurchaseCallback;
@SuppressWarnings("NullableProblems")
@NonNull
private PaymentData mPaymentData;
@Nullable
private ProductDetails mProductDetails;
@Nullable
private ProductDetails mSubsProductDetails;
private boolean mValidationResult;
@NonNull
private BookmarkPaymentState mState = BookmarkPaymentState.NONE;
@SuppressWarnings("NullableProblems")
@NonNull
private BillingManager<PlayStoreBillingCallback> mSubsProductDetailsLoadingManager;
@NonNull
private final SubsProductDetailsCallback mSubsProductDetailsCallback
= new SubsProductDetailsCallback();
@Override
public void onCreate(@Nullable Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
Bundle args = getArguments();
if (args == null)
throw new IllegalStateException("Args must be provided for payment fragment!");
PaymentData paymentData = args.getParcelable(ARG_PAYMENT_DATA);
if (paymentData == null)
throw new IllegalStateException("Payment data must be provided for payment fragment!");
mPaymentData = paymentData;
mPurchaseCallback = new BookmarkPurchaseCallback(mPaymentData.getServerId());
}
@Nullable
@Override
public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable
Bundle savedInstanceState)
{
mPurchaseController = PurchaseFactory.createBookmarkPurchaseController(requireContext(),
mPaymentData.getProductId(),
mPaymentData.getServerId());
if (savedInstanceState != null)
mPurchaseController.onRestore(savedInstanceState);
mPurchaseController.initialize(requireActivity());
mSubsProductDetailsLoadingManager = PurchaseFactory.createSubscriptionBillingManager();
mSubsProductDetailsLoadingManager.initialize(requireActivity());
mSubsProductDetailsLoadingManager.addCallback(mSubsProductDetailsCallback);
mSubsProductDetailsCallback.attach(this);
View root = inflater.inflate(R.layout.fragment_bookmark_payment, container, false);
View subscriptionButton = root.findViewById(R.id.buy_subs_btn);
subscriptionButton.setOnClickListener(v -> onBuySubscriptionClicked());
TextView buyInappBtn = root.findViewById(R.id.buy_inapp_btn);
buyInappBtn.setOnClickListener(v -> onBuyInappClicked());
return root;
}
private void onBuySubscriptionClicked()
{
SubscriptionType type = SubscriptionType.getTypeByBookmarksGroup(mPaymentData.getGroup());
if (type.equals(SubscriptionType.BOOKMARKS_SIGHTS))
{
BookmarksSightsSubscriptionActivity.startForResult
(this, PurchaseUtils.REQ_CODE_PAY_SUBSCRIPTION, Statistics.ParamValue.CARD);
return;
}
BookmarksAllSubscriptionActivity.startForResult
(this, PurchaseUtils.REQ_CODE_PAY_SUBSCRIPTION, Statistics.ParamValue.CARD);
}
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data)
{
super.onActivityResult(requestCode, resultCode, data);
if (resultCode != Activity.RESULT_OK)
return;
if (requestCode == PurchaseUtils.REQ_CODE_PAY_SUBSCRIPTION)
{
Intent intent = new Intent();
intent.putExtra(PurchaseUtils.EXTRA_IS_SUBSCRIPTION, true);
requireActivity().setResult(Activity.RESULT_OK, intent);
requireActivity().finish();
}
}
private void onBuyInappClicked()
{
Statistics.INSTANCE.trackPurchasePreviewSelect(mPaymentData.getServerId(),
mPaymentData.getProductId());
Statistics.INSTANCE.trackPurchaseEvent(Statistics.EventName.INAPP_PURCHASE_PREVIEW_PAY,
mPaymentData.getServerId(),
Statistics.STATISTICS_CHANNEL_REALTIME);
startPurchaseTransaction();
}
@Override
public boolean onBackPressed()
{
if (mState == BookmarkPaymentState.VALIDATION)
{
Toast.makeText(requireContext(), R.string.purchase_please_wait_toast, Toast.LENGTH_SHORT)
.show();
return true;
}
Statistics.INSTANCE.trackPurchaseEvent(Statistics.EventName.INAPP_PURCHASE_PREVIEW_CANCEL,
mPaymentData.getServerId());
return super.onBackPressed();
}
@Override
public void onViewCreated(View view, @Nullable Bundle savedInstanceState)
{
super.onViewCreated(view, savedInstanceState);
if (savedInstanceState == null)
Statistics.INSTANCE.trackPurchasePreviewShow(mPaymentData.getServerId(),
PrivateVariables.bookmarksVendor(),
mPaymentData.getProductId());
LOGGER.d(TAG, "onViewCreated savedInstanceState = " + savedInstanceState);
setInitialPaymentData();
loadImage();
if (savedInstanceState != null)
{
mProductDetails = savedInstanceState.getParcelable(EXTRA_PRODUCT_DETAILS);
if (mProductDetails != null)
updateProductDetails();
mSubsProductDetails = savedInstanceState.getParcelable(EXTRA_SUBS_PRODUCT_DETAILS);
if (mSubsProductDetails != null)
updateSubsProductDetails();
mValidationResult = savedInstanceState.getBoolean(EXTRA_VALIDATION_RESULT);
BookmarkPaymentState savedState
= BookmarkPaymentState.values()[savedInstanceState.getInt(EXTRA_CURRENT_STATE)];
activateState(savedState);
return;
}
activateState(BookmarkPaymentState.PRODUCT_DETAILS_LOADING);
mPurchaseController.queryProductDetails();
SubscriptionType type = SubscriptionType.getTypeByBookmarksGroup(mPaymentData.getGroup());
List<String> subsProductIds =
Collections.singletonList(type.getMonthlyProductId());
mSubsProductDetailsLoadingManager.queryProductDetails(subsProductIds);
}
@Override
public void onDestroyView()
{
super.onDestroyView();
mPurchaseController.destroy();
mSubsProductDetailsLoadingManager.removeCallback(mSubsProductDetailsCallback);
mSubsProductDetailsCallback.detach();
mSubsProductDetailsLoadingManager.destroy();
}
private void startPurchaseTransaction()
{
activateState(BookmarkPaymentState.TRANSACTION_STARTING);
Framework.nativeStartPurchaseTransaction(mPaymentData.getServerId(),
PrivateVariables.bookmarksVendor());
}
void launchBillingFlow()
{
mPurchaseController.launchPurchaseFlow(mPaymentData.getProductId());
activateState(BookmarkPaymentState.PAYMENT_IN_PROGRESS);
}
@Override
public void onStart()
{
super.onStart();
PurchaseOperationObservable observable = PurchaseOperationObservable.from(requireContext());
observable.addTransactionObserver(mPurchaseCallback);
mPurchaseController.addCallback(mPurchaseCallback);
mPurchaseCallback.attach(this);
}
@Override
public void onStop()
{
super.onStop();
PurchaseOperationObservable observable = PurchaseOperationObservable.from(requireContext());
observable.removeTransactionObserver(mPurchaseCallback);
mPurchaseController.removeCallback();
mPurchaseCallback.detach();
}
@Override
public void onSaveInstanceState(Bundle outState)
{
super.onSaveInstanceState(outState);
LOGGER.d(TAG, "onSaveInstanceState");
outState.putInt(EXTRA_CURRENT_STATE, mState.ordinal());
outState.putParcelable(EXTRA_PRODUCT_DETAILS, mProductDetails);
outState.putParcelable(EXTRA_SUBS_PRODUCT_DETAILS, mSubsProductDetails);
mPurchaseController.onSave(outState);
}
@Override
public void activateState(@NonNull BookmarkPaymentState state)
{
if (state == mState)
return;
LOGGER.i(TAG, "Activate state: " + state);
mState = state;
mState.activate(this);
}
private void loadImage()
{
if (TextUtils.isEmpty(mPaymentData.getImgUrl()))
return;
ImageView imageView = getViewOrThrow().findViewById(R.id.image);
Glide.with(imageView.getContext())
.load(mPaymentData.getImgUrl())
.centerCrop()
.into(imageView);
}
private void setInitialPaymentData()
{
TextView name = getViewOrThrow().findViewById(R.id.product_catalog_name);
name.setText(mPaymentData.getName());
TextView author = getViewOrThrow().findViewById(R.id.author_name);
author.setText(mPaymentData.getAuthorName());
}
void handleProductDetails(@NonNull List<SkuDetails> details)
{
if (details.isEmpty())
return;
SkuDetails skuDetails = details.get(0);
mProductDetails = PurchaseUtils.toProductDetails(skuDetails);
}
void handleSubsProductDetails(@NonNull List<SkuDetails> details)
{
if (details.isEmpty())
return;
SkuDetails skuDetails = details.get(0);
mSubsProductDetails = PurchaseUtils.toProductDetails(skuDetails);
}
void handleValidationResult(boolean validationResult)
{
mValidationResult = validationResult;
}
@Override
public void onAlertDialogPositiveClick(int requestCode, int which)
{
handleErrorDialogEvent(requestCode);
}
@Override
public void onAlertDialogNegativeClick(int requestCode, int which)
{
// Do nothing by default.
}
@Override
public void onAlertDialogCancel(int requestCode)
{
handleErrorDialogEvent(requestCode);
}
private void handleErrorDialogEvent(int requestCode)
{
switch (requestCode)
{
case PurchaseUtils.REQ_CODE_PRODUCT_DETAILS_FAILURE:
requireActivity().finish();
break;
case PurchaseUtils.REQ_CODE_START_TRANSACTION_FAILURE:
case PurchaseUtils.REQ_CODE_PAYMENT_FAILURE:
activateState(BookmarkPaymentState.PRODUCT_DETAILS_LOADED);
break;
}
}
void updateProductDetails()
{
if (mProductDetails == null)
throw new AssertionError("Product details must be obtained at this moment!");
TextView buyButton = getViewOrThrow().findViewById(R.id.buy_inapp_btn);
String price = Utils.formatCurrencyString(mProductDetails.getPrice(),
mProductDetails.getCurrencyCode());
buyButton.setText(getString(R.string.buy_btn, price));
TextView storeName = getViewOrThrow().findViewById(R.id.product_store_name);
storeName.setText(mProductDetails.getTitle());
}
void updateSubsProductDetails()
{
if (mSubsProductDetails == null)
throw new AssertionError("Subs product details must be obtained at this moment!");
String formattedPrice = Utils.formatCurrencyString(mSubsProductDetails.getPrice(),
mSubsProductDetails.getCurrencyCode());
TextView subsButton = getViewOrThrow().findViewById(R.id.buy_subs_btn);
subsButton.setText(getString(R.string.buy_btn_for_subscription_version_2, formattedPrice));
}
void finishValidation()
{
if (mValidationResult)
requireActivity().setResult(Activity.RESULT_OK);
requireActivity().finish();
}
}
| matsprea/omim | android/src/com/mapswithme/maps/purchase/BookmarkPaymentFragment.java | Java | apache-2.0 | 13,042 |
/*
* Copyright 2010-2013 Coda Hale and Yammer, Inc., 2014-2017 Dropwizard Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.dropwizard.metrics;
import org.junit.Test;
import io.dropwizard.metrics.SlidingWindowReservoir;
import static org.assertj.core.api.Assertions.assertThat;
public class SlidingWindowReservoirTest {
private final SlidingWindowReservoir reservoir = new SlidingWindowReservoir(3);
@Test
public void handlesSmallDataStreams() throws Exception {
reservoir.update(1);
reservoir.update(2);
assertThat(reservoir.getSnapshot().getValues())
.containsOnly(1, 2);
}
@Test
public void onlyKeepsTheMostRecentFromBigDataStreams() throws Exception {
reservoir.update(1);
reservoir.update(2);
reservoir.update(3);
reservoir.update(4);
assertThat(reservoir.getSnapshot().getValues())
.containsOnly(2, 3, 4);
}
}
| networknt/light-4j | metrics/src/test/java/io/dropwizard/metrics/SlidingWindowReservoirTest.java | Java | apache-2.0 | 1,479 |
/*
* Copyright (c) 2008-2017, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.concurrent.atomicreference;
import com.hazelcast.core.HazelcastInstance;
import com.hazelcast.core.IAtomicReference;
import com.hazelcast.test.HazelcastParallelClassRunner;
import com.hazelcast.test.HazelcastTestSupport;
import com.hazelcast.test.annotation.ParallelTest;
import com.hazelcast.test.annotation.QuickTest;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import java.io.Serializable;
import java.util.concurrent.ExecutionException;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
@RunWith(HazelcastParallelClassRunner.class)
@Category({QuickTest.class, ParallelTest.class})
public class AtomicReferenceInstanceSharingTest extends HazelcastTestSupport {
private HazelcastInstance[] instances;
private HazelcastInstance local;
private HazelcastInstance remote;
@Before
public void setUp() {
instances = createHazelcastInstanceFactory(2).newInstances();
warmUpPartitions(instances);
local = instances[0];
remote = instances[1];
}
@Test
public void invocationToLocalMember() throws ExecutionException, InterruptedException {
String localKey = generateKeyOwnedBy(local);
IAtomicReference<DummyObject> ref = local.getAtomicReference(localKey);
DummyObject inserted = new DummyObject();
ref.set(inserted);
DummyObject get1 = ref.get();
DummyObject get2 = ref.get();
assertNotNull(get1);
assertNotNull(get2);
assertNotSame(get1, get2);
assertNotSame(get1, inserted);
assertNotSame(get2, inserted);
}
public static class DummyObject implements Serializable {
}
@Test
public void invocationToRemoteMember() throws ExecutionException, InterruptedException {
String localKey = generateKeyOwnedBy(remote);
IAtomicReference<DummyObject> ref = local.getAtomicReference(localKey);
DummyObject inserted = new DummyObject();
ref.set(inserted);
DummyObject get1 = ref.get();
DummyObject get2 = ref.get();
assertNotNull(get1);
assertNotNull(get2);
assertNotSame(get1, get2);
assertNotSame(get1, inserted);
assertNotSame(get2, inserted);
}
}
| juanavelez/hazelcast | hazelcast/src/test/java/com/hazelcast/concurrent/atomicreference/AtomicReferenceInstanceSharingTest.java | Java | apache-2.0 | 2,990 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/codedeploy/model/TrafficRoutingType.h>
#include <aws/core/utils/HashingUtils.h>
#include <aws/core/Globals.h>
#include <aws/core/utils/EnumParseOverflowContainer.h>
using namespace Aws::Utils;
namespace Aws
{
namespace CodeDeploy
{
namespace Model
{
namespace TrafficRoutingTypeMapper
{
static const int TimeBasedCanary_HASH = HashingUtils::HashString("TimeBasedCanary");
static const int TimeBasedLinear_HASH = HashingUtils::HashString("TimeBasedLinear");
static const int AllAtOnce_HASH = HashingUtils::HashString("AllAtOnce");
TrafficRoutingType GetTrafficRoutingTypeForName(const Aws::String& name)
{
int hashCode = HashingUtils::HashString(name.c_str());
if (hashCode == TimeBasedCanary_HASH)
{
return TrafficRoutingType::TimeBasedCanary;
}
else if (hashCode == TimeBasedLinear_HASH)
{
return TrafficRoutingType::TimeBasedLinear;
}
else if (hashCode == AllAtOnce_HASH)
{
return TrafficRoutingType::AllAtOnce;
}
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
overflowContainer->StoreOverflow(hashCode, name);
return static_cast<TrafficRoutingType>(hashCode);
}
return TrafficRoutingType::NOT_SET;
}
Aws::String GetNameForTrafficRoutingType(TrafficRoutingType enumValue)
{
switch(enumValue)
{
case TrafficRoutingType::TimeBasedCanary:
return "TimeBasedCanary";
case TrafficRoutingType::TimeBasedLinear:
return "TimeBasedLinear";
case TrafficRoutingType::AllAtOnce:
return "AllAtOnce";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue));
}
return {};
}
}
} // namespace TrafficRoutingTypeMapper
} // namespace Model
} // namespace CodeDeploy
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-codedeploy/source/model/TrafficRoutingType.cpp | C++ | apache-2.0 | 2,400 |
/*
*
* Copyright 2019 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"fmt"
"testing"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/resolver/manual"
)
var _ balancer.V2Balancer = &funcBalancer{}
type funcBalancer struct {
updateClientConnState func(s balancer.ClientConnState) error
}
func (*funcBalancer) HandleSubConnStateChange(balancer.SubConn, connectivity.State) {
panic("unimplemented") // v1 API
}
func (*funcBalancer) HandleResolvedAddrs([]resolver.Address, error) {
panic("unimplemented") // v1 API
}
func (b *funcBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
return b.updateClientConnState(s)
}
func (*funcBalancer) ResolverError(error) {}
func (*funcBalancer) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
panic("unimplemented") // we never have sub-conns
}
func (*funcBalancer) Close() {}
type funcBalancerBuilder struct {
name string
instance *funcBalancer
}
func (b *funcBalancerBuilder) Build(balancer.ClientConn, balancer.BuildOptions) balancer.Balancer {
return b.instance
}
func (b *funcBalancerBuilder) Name() string { return b.name }
// TestBalancerErrorResolverPolling injects balancer errors and verifies
// ResolveNow is called on the resolver with the appropriate backoff strategy
// being consulted between ResolveNow calls.
func (s) TestBalancerErrorResolverPolling(t *testing.T) {
// The test balancer will return ErrBadResolverState iff the
// ClientConnState contains no addresses.
fb := &funcBalancer{
updateClientConnState: func(s balancer.ClientConnState) error {
if len(s.ResolverState.Addresses) == 0 {
return balancer.ErrBadResolverState
}
return nil
},
}
const balName = "BalancerErrorResolverPolling"
balancer.Register(&funcBalancerBuilder{name: balName, instance: fb})
testResolverErrorPolling(t,
func(r *manual.Resolver) {
// No addresses so the balancer will fail.
r.CC.UpdateState(resolver.State{})
}, func(r *manual.Resolver) {
// UpdateState will block if ResolveNow is being called (which blocks on
// rn), so call it in a goroutine. Include some address so the balancer
// will be happy.
go r.CC.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: "x"}}})
},
WithDefaultServiceConfig(fmt.Sprintf(`{ "loadBalancingConfig": [{"%v": {}}] }`, balName)))
}
| Miciah/origin | vendor/google.golang.org/grpc/balancer_conn_wrappers_test.go | GO | apache-2.0 | 2,966 |
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights
* Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.amazonaws.services.directory.model.transform;
import java.util.Map;
import java.util.Map.Entry;
import java.math.*;
import java.nio.ByteBuffer;
import com.amazonaws.services.directory.model.*;
import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*;
import com.amazonaws.transform.*;
import com.fasterxml.jackson.core.JsonToken;
import static com.fasterxml.jackson.core.JsonToken.*;
/**
* CreateConditionalForwarderResult JSON Unmarshaller
*/
public class CreateConditionalForwarderResultJsonUnmarshaller implements
Unmarshaller<CreateConditionalForwarderResult, JsonUnmarshallerContext> {
public CreateConditionalForwarderResult unmarshall(
JsonUnmarshallerContext context) throws Exception {
CreateConditionalForwarderResult createConditionalForwarderResult = new CreateConditionalForwarderResult();
return createConditionalForwarderResult;
}
private static CreateConditionalForwarderResultJsonUnmarshaller instance;
public static CreateConditionalForwarderResultJsonUnmarshaller getInstance() {
if (instance == null)
instance = new CreateConditionalForwarderResultJsonUnmarshaller();
return instance;
}
}
| flofreud/aws-sdk-java | aws-java-sdk-directory/src/main/java/com/amazonaws/services/directory/model/transform/CreateConditionalForwarderResultJsonUnmarshaller.java | Java | apache-2.0 | 1,808 |
---
id: version-2.8.2-io-rabbitmq-source
title: RabbitMQ source connector
sidebar_label: RabbitMQ source connector
original_id: io-rabbitmq-source
---
The RabbitMQ source connector receives messages from RabbitMQ clusters
and writes messages to Pulsar topics.
## Configuration
The configuration of the RabbitMQ source connector has the following properties.
### Property
| Name | Type|Required | Default | Description
|------|----------|----------|---------|-------------|
| `connectionName` |String| true | " " (empty string) | The connection name. |
| `host` | String| true | " " (empty string) | The RabbitMQ host. |
| `port` | int |true | 5672 | The RabbitMQ port. |
| `virtualHost` |String|true | / | The virtual host used to connect to RabbitMQ. |
| `username` | String|false | guest | The username used to authenticate to RabbitMQ. |
| `password` | String|false | guest | The password used to authenticate to RabbitMQ. |
| `queueName` | String|true | " " (empty string) | The RabbitMQ queue name that messages should be read from or written to. |
| `requestedChannelMax` | int|false | 0 | The initially requested maximum channel number. <br><br>0 means unlimited. |
| `requestedFrameMax` | int|false |0 | The initially requested maximum frame size in octets. <br><br>0 means unlimited. |
| `connectionTimeout` | int|false | 60000 | The timeout of TCP connection establishment in milliseconds. <br><br>0 means infinite. |
| `handshakeTimeout` | int|false | 10000 | The timeout of AMQP0-9-1 protocol handshake in milliseconds. |
| `requestedHeartbeat` | int|false | 60 | The requested heartbeat timeout in seconds. |
| `prefetchCount` | int|false | 0 | The maximum number of messages that the server delivers.<br><br> 0 means unlimited. |
| `prefetchGlobal` | boolean|false | false |Whether the setting should be applied to the entire channel rather than each consumer. |
| `passive` | boolean|false | false | Whether the rabbitmq consumer should create its own queue or bind to an existing one. |
### Example
Before using the RabbitMQ source connector, you need to create a configuration file through one of the following methods.
* JSON
```json
{
"host": "localhost",
"port": "5672",
"virtualHost": "/",
"username": "guest",
"password": "guest",
"queueName": "test-queue",
"connectionName": "test-connection",
"requestedChannelMax": "0",
"requestedFrameMax": "0",
"connectionTimeout": "60000",
"handshakeTimeout": "10000",
"requestedHeartbeat": "60",
"prefetchCount": "0",
"prefetchGlobal": "false",
"passive": "false"
}
```
* YAML
```yaml
configs:
host: "localhost"
port: 5672
virtualHost: "/"
username: "guest"
password: "guest"
queueName: "test-queue"
connectionName: "test-connection"
requestedChannelMax: 0
requestedFrameMax: 0
connectionTimeout: 60000
handshakeTimeout: 10000
requestedHeartbeat: 60
prefetchCount: 0
prefetchGlobal: "false"
passive: "false"
```
| massakam/pulsar | site2/website/versioned_docs/version-2.8.2/io-rabbitmq-source.md | Markdown | apache-2.0 | 3,158 |
/*
* Copyright 2020 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.server.controller.actions;
import com.thoughtworks.go.config.validation.GoConfigValidity;
import com.thoughtworks.go.server.web.JsonView;
import com.thoughtworks.go.server.web.SimpleJsonView;
import com.thoughtworks.go.serverhealth.ServerHealthState;
import com.thoughtworks.go.util.GoConstants;
import org.springframework.web.servlet.ModelAndView;
import javax.servlet.http.HttpServletResponse;
import java.util.LinkedHashMap;
import java.util.Map;
import static com.thoughtworks.go.util.GoConstants.ERROR_FOR_JSON;
import static com.thoughtworks.go.util.GoConstants.RESPONSE_CHARSET_JSON;
import static javax.servlet.http.HttpServletResponse.*;
public class JsonAction implements RestfulAction {
private final int status;
private final Object json;
public static JsonAction from(ServerHealthState serverHealthState) {
if (serverHealthState.isSuccess()) {
return jsonCreated(new LinkedHashMap());
}
Map<String, Object> jsonLog = new LinkedHashMap<>();
jsonLog.put(ERROR_FOR_JSON, serverHealthState.getDescription());
return new JsonAction(serverHealthState.getType().getHttpCode(), jsonLog);
}
public static JsonAction jsonCreated(Object json) {
return new JsonAction(SC_CREATED, json);
}
public static JsonAction jsonFound(Object json) {
return new JsonAction(SC_OK, json);
}
public static JsonAction jsonOK() {
return jsonOK(new LinkedHashMap());
}
public static JsonAction jsonNotAcceptable(Object json) {
return new JsonAction(SC_NOT_ACCEPTABLE, json);
}
public static JsonAction jsonForbidden() {
return new JsonAction(SC_FORBIDDEN, new LinkedHashMap());
}
public static JsonAction jsonForbidden(String message) {
Map<String, Object> map = new LinkedHashMap<>();
map.put(ERROR_FOR_JSON, message);
return new JsonAction(SC_FORBIDDEN, map);
}
public static JsonAction jsonForbidden(Exception e) {
return jsonForbidden(e.getMessage());
}
public static JsonAction jsonBadRequest(Object json) {
return new JsonAction(SC_BAD_REQUEST, json);
}
public static JsonAction jsonNotFound(Object json) {
return new JsonAction(SC_NOT_FOUND, json);
}
public static JsonAction jsonConflict(Object json) {
return new JsonAction(SC_CONFLICT, json);
}
public static JsonAction jsonByValidity(Object json, GoConfigValidity.InvalidGoConfig configValidity) {
return (configValidity.isType(GoConfigValidity.VT_CONFLICT) ||
configValidity.isType(GoConfigValidity.VT_MERGE_OPERATION_ERROR) ||
configValidity.isType(GoConfigValidity.VT_MERGE_POST_VALIDATION_ERROR) ||
configValidity.isType(GoConfigValidity.VT_MERGE_PRE_VALIDATION_ERROR)) ? jsonConflict(json) : jsonNotFound(json);
}
/**
* @deprecated replace with createView
*/
@Override
public ModelAndView respond(HttpServletResponse response) {
return new JsonModelAndView(response, json, status);
}
private JsonAction(int status, Object json) {
this.status = status;
this.json = json;
}
public ModelAndView createView() {
SimpleJsonView view = new SimpleJsonView(status, json);
return new ModelAndView(view, JsonView.asMap(json));
}
public static JsonAction jsonOK(Map jsonMap) {
return new JsonAction(SC_OK, jsonMap);
}
private class JsonModelAndView extends ModelAndView {
@Override
public String getViewName() {
return "jsonView";
}
public JsonModelAndView(HttpServletResponse response, Object json, int status) {
super(new JsonView(), JsonView.asMap(json));
// In IE, there's a problem with caching. We want to cache if we can.
// This will force the browser to clear the cache only for this page.
// If any other pages need to clear the cache, we might want to move this
// logic to an intercepter.
response.addHeader("Cache-Control", GoConstants.CACHE_CONTROL);
response.setStatus(status);
response.setContentType(RESPONSE_CHARSET_JSON);
}
}
}
| ketan/gocd | server/src/main/java/com/thoughtworks/go/server/controller/actions/JsonAction.java | Java | apache-2.0 | 4,901 |
/*
* Copyright 2000-2017 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.util.xml.impl;
import com.intellij.ide.highlighter.DomSupportEnabled;
import com.intellij.openapi.Disposable;
import com.intellij.openapi.components.ServiceManager;
import com.intellij.openapi.fileTypes.StdFileTypes;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.roots.ProjectFileIndex;
import com.intellij.openapi.util.Condition;
import com.intellij.openapi.util.Disposer;
import com.intellij.openapi.util.Factory;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.vfs.*;
import com.intellij.openapi.vfs.newvfs.NewVirtualFile;
import com.intellij.pom.PomManager;
import com.intellij.pom.PomModel;
import com.intellij.pom.PomModelAspect;
import com.intellij.pom.event.PomModelEvent;
import com.intellij.pom.event.PomModelListener;
import com.intellij.pom.xml.XmlAspect;
import com.intellij.pom.xml.XmlChangeSet;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiFile;
import com.intellij.psi.PsiFileFactory;
import com.intellij.psi.PsiManager;
import com.intellij.psi.impl.PsiManagerEx;
import com.intellij.psi.xml.XmlAttribute;
import com.intellij.psi.xml.XmlElement;
import com.intellij.psi.xml.XmlFile;
import com.intellij.psi.xml.XmlTag;
import com.intellij.reference.SoftReference;
import com.intellij.semantic.SemKey;
import com.intellij.semantic.SemService;
import com.intellij.util.EventDispatcher;
import com.intellij.util.SmartList;
import com.intellij.util.containers.ContainerUtil;
import com.intellij.util.xml.*;
import com.intellij.util.xml.events.DomEvent;
import com.intellij.util.xml.reflect.AbstractDomChildrenDescription;
import com.intellij.util.xml.reflect.DomGenericInfo;
import net.sf.cglib.proxy.AdvancedProxy;
import net.sf.cglib.proxy.InvocationHandler;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.lang.ref.WeakReference;
import java.lang.reflect.Type;
import java.util.*;
/**
* @author peter
*/
public final class DomManagerImpl extends DomManager {
private static final Key<Object> MOCK = Key.create("MockElement");
static final Key<WeakReference<DomFileElementImpl>> CACHED_FILE_ELEMENT = Key.create("CACHED_FILE_ELEMENT");
static final Key<DomFileDescription> MOCK_DESCRIPTION = Key.create("MockDescription");
static final SemKey<FileDescriptionCachedValueProvider> FILE_DESCRIPTION_KEY = SemKey.createKey("FILE_DESCRIPTION_KEY");
static final SemKey<DomInvocationHandler> DOM_HANDLER_KEY = SemKey.createKey("DOM_HANDLER_KEY");
static final SemKey<IndexedElementInvocationHandler> DOM_INDEXED_HANDLER_KEY = DOM_HANDLER_KEY.subKey("DOM_INDEXED_HANDLER_KEY");
static final SemKey<CollectionElementInvocationHandler> DOM_COLLECTION_HANDLER_KEY = DOM_HANDLER_KEY.subKey("DOM_COLLECTION_HANDLER_KEY");
static final SemKey<CollectionElementInvocationHandler> DOM_CUSTOM_HANDLER_KEY = DOM_HANDLER_KEY.subKey("DOM_CUSTOM_HANDLER_KEY");
static final SemKey<AttributeChildInvocationHandler> DOM_ATTRIBUTE_HANDLER_KEY = DOM_HANDLER_KEY.subKey("DOM_ATTRIBUTE_HANDLER_KEY");
private final EventDispatcher<DomEventListener> myListeners = EventDispatcher.create(DomEventListener.class);
private final Project myProject;
private final SemService mySemService;
private final DomApplicationComponent myApplicationComponent;
private boolean myChanging;
public DomManagerImpl(Project project) {
super(project);
myProject = project;
mySemService = SemService.getSemService(project);
myApplicationComponent = DomApplicationComponent.getInstance();
final PomModel pomModel = PomManager.getModel(project);
pomModel.addModelListener(new PomModelListener() {
@Override
public void modelChanged(PomModelEvent event) {
if (myChanging) return;
final XmlChangeSet changeSet = (XmlChangeSet)event.getChangeSet(pomModel.getModelAspect(XmlAspect.class));
if (changeSet != null) {
for (XmlFile file : changeSet.getChangedFiles()) {
DomFileElementImpl<DomElement> element = getCachedFileElement(file);
if (element != null) {
fireEvent(new DomEvent(element, false));
}
}
}
}
@Override
public boolean isAspectChangeInteresting(PomModelAspect aspect) {
return aspect instanceof XmlAspect;
}
}, project);
VirtualFileManager.getInstance().addVirtualFileListener(new VirtualFileListener() {
private final List<DomEvent> myDeletionEvents = new SmartList<>();
@Override
public void contentsChanged(@NotNull VirtualFileEvent event) {
if (!event.isFromSave()) {
fireEvents(calcDomChangeEvents(event.getFile()));
}
}
@Override
public void fileMoved(@NotNull VirtualFileMoveEvent event) {
fireEvents(calcDomChangeEvents(event.getFile()));
}
@Override
public void beforeFileDeletion(@NotNull final VirtualFileEvent event) {
myDeletionEvents.addAll(calcDomChangeEvents(event.getFile()));
}
@Override
public void fileDeleted(@NotNull VirtualFileEvent event) {
if (!myDeletionEvents.isEmpty()) {
fireEvents(myDeletionEvents);
myDeletionEvents.clear();
}
}
@Override
public void propertyChanged(@NotNull VirtualFilePropertyEvent event) {
final VirtualFile file = event.getFile();
if (!file.isDirectory() && VirtualFile.PROP_NAME.equals(event.getPropertyName())) {
fireEvents(calcDomChangeEvents(file));
}
}
}, myProject);
}
public long getPsiModificationCount() {
return PsiManager.getInstance(getProject()).getModificationTracker().getModificationCount();
}
public <T extends DomInvocationHandler> void cacheHandler(SemKey<T> key, XmlElement element, T handler) {
mySemService.setCachedSemElement(key, element, handler);
}
private PsiFile getCachedPsiFile(VirtualFile file) {
return PsiManagerEx.getInstanceEx(myProject).getFileManager().getCachedPsiFile(file);
}
private List<DomEvent> calcDomChangeEvents(final VirtualFile file) {
if (!(file instanceof NewVirtualFile) || myProject.isDisposed()) {
return Collections.emptyList();
}
final List<DomEvent> events = ContainerUtil.newArrayList();
VfsUtilCore.visitChildrenRecursively(file, new VirtualFileVisitor() {
@Override
public boolean visitFile(@NotNull VirtualFile file) {
if (myProject.isDisposed() || !ProjectFileIndex.SERVICE.getInstance(myProject).isInContent(file)) {
return false;
}
if (!file.isDirectory() && StdFileTypes.XML == file.getFileType()) {
final PsiFile psiFile = getCachedPsiFile(file);
if (psiFile != null && StdFileTypes.XML.equals(psiFile.getFileType()) && psiFile instanceof XmlFile) {
final DomFileElementImpl domElement = getCachedFileElement((XmlFile)psiFile);
if (domElement != null) {
events.add(new DomEvent(domElement, false));
}
}
}
return true;
}
@Nullable
@Override
public Iterable<VirtualFile> getChildrenIterable(@NotNull VirtualFile file) {
return ((NewVirtualFile)file).getCachedChildren();
}
});
return events;
}
@SuppressWarnings({"MethodOverridesStaticMethodOfSuperclass"})
public static DomManagerImpl getDomManager(Project project) {
return (DomManagerImpl)DomManager.getDomManager(project);
}
@Override
public void addDomEventListener(DomEventListener listener, Disposable parentDisposable) {
myListeners.addListener(listener, parentDisposable);
}
@Override
public final ConverterManager getConverterManager() {
return ServiceManager.getService(ConverterManager.class);
}
@Override
public final ModelMerger createModelMerger() {
return new ModelMergerImpl();
}
final void fireEvent(DomEvent event) {
if (mySemService.isInsideAtomicChange()) return;
incModificationCount();
myListeners.getMulticaster().eventOccured(event);
}
private void fireEvents(Collection<DomEvent> events) {
for (DomEvent event : events) {
fireEvent(event);
}
}
@Override
public final DomGenericInfo getGenericInfo(final Type type) {
return myApplicationComponent.getStaticGenericInfo(type);
}
@Nullable
public static DomInvocationHandler getDomInvocationHandler(DomElement proxy) {
if (proxy instanceof DomFileElement) {
return null;
}
if (proxy instanceof DomInvocationHandler) {
return (DomInvocationHandler)proxy;
}
final InvocationHandler handler = AdvancedProxy.getInvocationHandler(proxy);
if (handler instanceof StableInvocationHandler) {
//noinspection unchecked
final DomElement element = ((StableInvocationHandler<DomElement>)handler).getWrappedElement();
return element == null ? null : getDomInvocationHandler(element);
}
if (handler instanceof DomInvocationHandler) {
return (DomInvocationHandler)handler;
}
return null;
}
@NotNull
public static DomInvocationHandler getNotNullHandler(DomElement proxy) {
DomInvocationHandler handler = getDomInvocationHandler(proxy);
if (handler == null) {
throw new AssertionError("null handler for " + proxy);
}
return handler;
}
public static StableInvocationHandler getStableInvocationHandler(Object proxy) {
return (StableInvocationHandler)AdvancedProxy.getInvocationHandler(proxy);
}
public DomApplicationComponent getApplicationComponent() {
return myApplicationComponent;
}
@Override
public final Project getProject() {
return myProject;
}
@Override
@NotNull
public final <T extends DomElement> DomFileElementImpl<T> getFileElement(final XmlFile file, final Class<T> aClass, String rootTagName) {
//noinspection unchecked
if (file.getUserData(MOCK_DESCRIPTION) == null) {
file.putUserData(MOCK_DESCRIPTION, new MockDomFileDescription<>(aClass, rootTagName, file.getViewProvider().getVirtualFile()));
mySemService.clearCache();
}
final DomFileElementImpl<T> fileElement = getFileElement(file);
assert fileElement != null;
return fileElement;
}
@SuppressWarnings({"unchecked"})
@NotNull
final <T extends DomElement> FileDescriptionCachedValueProvider<T> getOrCreateCachedValueProvider(final XmlFile xmlFile) {
//noinspection ConstantConditions
return mySemService.getSemElement(FILE_DESCRIPTION_KEY, xmlFile);
}
public final Set<DomFileDescription> getFileDescriptions(String rootTagName) {
return myApplicationComponent.getFileDescriptions(rootTagName);
}
public final Set<DomFileDescription> getAcceptingOtherRootTagNameDescriptions() {
return myApplicationComponent.getAcceptingOtherRootTagNameDescriptions();
}
@NotNull
@NonNls
public final String getComponentName() {
return getClass().getName();
}
final void runChange(Runnable change) {
final boolean b = setChanging(true);
try {
change.run();
}
finally {
setChanging(b);
}
}
final boolean setChanging(final boolean changing) {
boolean oldChanging = myChanging;
if (changing) {
assert !oldChanging;
}
myChanging = changing;
return oldChanging;
}
@Override
@Nullable
public final <T extends DomElement> DomFileElementImpl<T> getFileElement(XmlFile file) {
if (file == null) return null;
if (!(file.getFileType() instanceof DomSupportEnabled)) return null;
final VirtualFile virtualFile = file.getVirtualFile();
if (virtualFile != null && virtualFile.isDirectory()) return null;
return this.<T>getOrCreateCachedValueProvider(file).getFileElement();
}
@Nullable
static <T extends DomElement> DomFileElementImpl<T> getCachedFileElement(@NotNull XmlFile file) {
//noinspection unchecked
return SoftReference.dereference(file.getUserData(CACHED_FILE_ELEMENT));
}
@Override
@Nullable
public final <T extends DomElement> DomFileElementImpl<T> getFileElement(XmlFile file, Class<T> domClass) {
final DomFileDescription description = getDomFileDescription(file);
if (description != null && myApplicationComponent.assignabilityCache.isAssignable(domClass, description.getRootElementClass())) {
return getFileElement(file);
}
return null;
}
@Override
@Nullable
public final DomElement getDomElement(final XmlTag element) {
if (myChanging) return null;
final DomInvocationHandler handler = getDomHandler(element);
return handler != null ? handler.getProxy() : null;
}
@Override
@Nullable
public GenericAttributeValue getDomElement(final XmlAttribute attribute) {
if (myChanging) return null;
final AttributeChildInvocationHandler handler = mySemService.getSemElement(DOM_ATTRIBUTE_HANDLER_KEY, attribute);
return handler == null ? null : (GenericAttributeValue)handler.getProxy();
}
@Nullable
public DomInvocationHandler getDomHandler(final XmlElement tag) {
if (tag == null) return null;
List<DomInvocationHandler> cached = mySemService.getCachedSemElements(DOM_HANDLER_KEY, tag);
if (cached != null && !cached.isEmpty()) {
return cached.get(0);
}
return mySemService.getSemElement(DOM_HANDLER_KEY, tag);
}
@Override
@Nullable
public AbstractDomChildrenDescription findChildrenDescription(@NotNull final XmlTag tag, @NotNull final DomElement parent) {
return findChildrenDescription(tag, getDomInvocationHandler(parent));
}
static AbstractDomChildrenDescription findChildrenDescription(final XmlTag tag, final DomInvocationHandler parent) {
final DomGenericInfoEx info = parent.getGenericInfo();
return info.findChildrenDescription(parent, tag.getLocalName(), tag.getNamespace(), false, tag.getName());
}
public final boolean isDomFile(@Nullable PsiFile file) {
return file instanceof XmlFile && getFileElement((XmlFile)file) != null;
}
@Nullable
public final DomFileDescription<?> getDomFileDescription(PsiElement element) {
if (element instanceof XmlElement) {
final PsiFile psiFile = element.getContainingFile();
if (psiFile instanceof XmlFile) {
return getDomFileDescription((XmlFile)psiFile);
}
}
return null;
}
@Override
public final <T extends DomElement> T createMockElement(final Class<T> aClass, final Module module, final boolean physical) {
final XmlFile file = (XmlFile)PsiFileFactory.getInstance(myProject).createFileFromText("a.xml", StdFileTypes.XML, "", (long)0, physical);
file.putUserData(MOCK_ELEMENT_MODULE, module);
file.putUserData(MOCK, new Object());
return getFileElement(file, aClass, "I_sincerely_hope_that_nobody_will_have_such_a_root_tag_name").getRootElement();
}
@Override
public final boolean isMockElement(DomElement element) {
return DomUtil.getFile(element).getUserData(MOCK) != null;
}
@Override
public final <T extends DomElement> T createStableValue(final Factory<T> provider) {
return createStableValue(provider, t -> t.isValid());
}
@Override
public final <T> T createStableValue(final Factory<T> provider, final Condition<T> validator) {
final T initial = provider.create();
assert initial != null;
final StableInvocationHandler handler = new StableInvocationHandler<>(initial, provider, validator);
final Set<Class> intf = new HashSet<>();
ContainerUtil.addAll(intf, initial.getClass().getInterfaces());
intf.add(StableElement.class);
//noinspection unchecked
return (T)AdvancedProxy.createProxy(initial.getClass().getSuperclass(), intf.toArray(new Class[intf.size()]),
handler);
}
public final <T extends DomElement> void registerFileDescription(final DomFileDescription<T> description, Disposable parentDisposable) {
registerFileDescription(description);
Disposer.register(parentDisposable, new Disposable() {
@Override
public void dispose() {
getFileDescriptions(description.getRootTagName()).remove(description);
getAcceptingOtherRootTagNameDescriptions().remove(description);
}
});
}
@Override
public final void registerFileDescription(final DomFileDescription description) {
mySemService.clearCache();
myApplicationComponent.registerFileDescription(description);
}
@Override
@NotNull
public final DomElement getResolvingScope(GenericDomValue element) {
final DomFileDescription<?> description = DomUtil.getFileElement(element).getFileDescription();
return description.getResolveScope(element);
}
@Override
@Nullable
public final DomElement getIdentityScope(DomElement element) {
final DomFileDescription description = DomUtil.getFileElement(element).getFileDescription();
return description.getIdentityScope(element);
}
@Override
public TypeChooserManager getTypeChooserManager() {
return myApplicationComponent.getTypeChooserManager();
}
public void performAtomicChange(@NotNull Runnable change) {
mySemService.performAtomicChange(change);
if (!mySemService.isInsideAtomicChange()) {
incModificationCount();
}
}
public SemService getSemService() {
return mySemService;
}
}
| ThiagoGarciaAlves/intellij-community | xml/dom-impl/src/com/intellij/util/xml/impl/DomManagerImpl.java | Java | apache-2.0 | 17,979 |
# Copyright (c) 2013-2016 Cinchapi Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.tools import *
import os
import time
from subprocess import *
import signal
from . import test_data
from concourse import Concourse, Tag, Link, Diff, Operator, constants
from concourse.thriftapi.shared.ttypes import Type
from concourse.utils import python_to_thrift
import ujson
from tests import ignore
import socket
class IntegrationBaseTest(object):
"""
Base class for unit tests that use Mockcourse.
"""
port = None
process = None
client = None
expected_network_latency = 0.05
@classmethod
def setup_class(cls):
""" Fixture method to start Mockcourse and connect before the tests start to run.
"""
port = IntegrationBaseTest.get_open_port()
dir = os.path.dirname(os.path.realpath(__file__)) + '/../../mockcourse'
script = dir + '/mockcourse '+str(port)
cls.process = Popen(script, shell=True, preexec_fn=os.setsid)
cls.client = None
tries = 5
while tries > 0 and cls.client is None:
tries -= 1
time.sleep(1) # Wait for Mockcourse to start
try:
cls.client = Concourse.connect(port=port)
except RuntimeError as e:
if tries == 0:
raise e
else:
continue
@classmethod
def teardown_class(cls):
""" Fixture method to kill Mockcourse after all the tests have fun.
"""
os.killpg(cls.process.pid, signal.SIGTERM)
def tearDown(self):
"""" Logout" and clear all the data that the client stored in Mockcourse after each test. This ensures that the
environment for each test is clean and predicatable.
"""
self.client.logout() # Mockcourse logout simply clears the content of the datastore
def get_time_anchor(self):
""" Return a time anchor and sleep for long enough to account for network latency
"""
anchor = test_data.current_time_millis()
time.sleep(self.expected_network_latency)
return anchor
@staticmethod
def get_open_port():
"""Return an open port that is chosen by the OS
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("localhost", 0))
port = sock.getsockname()[1]
sock.close()
return port
class TestPythonClientDriver(IntegrationBaseTest):
"""
Implementations for standard unit tests that verify the Python client driver
conforms to the Concourse standard
"""
def __do_test_value_round_trip(self, value, ttype):
"""
Do the round_trip test logic for the specified value of the specified type
:param value:
"""
key = test_data.random_string()
record = self.client.add(key=key, value=value)
stored = self.client.get(key=key, record=record)
assert_equal(value, stored)
assert_equal(python_to_thrift(stored).type, ttype)
def test_string_round_trip(self):
self.__do_test_value_round_trip(test_data.random_string(), Type.STRING)
def test_bool_round_trip(self):
self.__do_test_value_round_trip(test_data.random_bool(), Type.BOOLEAN)
def test_tag_round_trip(self):
self.__do_test_value_round_trip(Tag.create(test_data.random_string()), Type.TAG)
def test_link_round_trip(self):
self.__do_test_value_round_trip(Link.to(test_data.random_int()), Type.LINK)
def test_int_round_trip(self):
self.__do_test_value_round_trip(test_data.random_int(), Type.INTEGER)
self.__do_test_value_round_trip(2147483647, Type.INTEGER)
self.__do_test_value_round_trip(-2147483648, Type.INTEGER)
def test_long_round_trip(self):
self.__do_test_value_round_trip(2147483648, Type.LONG)
self.__do_test_value_round_trip(-2147483649, Type.LONG)
self.__do_test_value_round_trip(test_data.random_long(), Type.LONG)
def test_float_round_trip(self):
self.__do_test_value_round_trip(3.4028235E38, Type.DOUBLE)
self.__do_test_value_round_trip(-1.4E-45, Type.DOUBLE)
def test_abort(self):
self.client.stage()
key = test_data.random_string()
value = "some value"
record = 1
self.client.add(key=key, value=value, record=record)
self.client.abort()
assert_is_none(self.client.get(key=key, record=record))
def test_add_key_value(self):
key = test_data.random_string()
value = "static value"
record = self.client.add(key=key, value=value)
assert_is_not_none(record)
stored = self.client.get(key=key, record=record)
assert_equal(stored, value)
def test_add_key_value_record(self):
key = test_data.random_string()
value = "static value"
record = 17
assert_true(self.client.add(key=key, value=value, record=record))
stored = self.client.get(key=key, record=record)
assert_equal(stored, value)
def test_add_key_value_records(self):
key = test_data.random_string()
value = "static value"
records = [1, 2, 3]
result = self.client.add(key=key, value=value, records=records)
assert_true(isinstance(result, dict))
assert_true(result.get(1))
assert_true(result.get(2))
assert_true(result.get(3))
def test_audit_key_record(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1000
for value in values:
self.client.set(key, value, record)
audit = self.client.audit(key, record)
assert_equal(5, len(audit))
expected = 'ADD'
for k, v in audit.items():
assert_true(v.startswith(expected))
expected = 'REMOVE' if expected == 'ADD' else 'ADD'
def test_audit_key_record_start(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1001
for value in values:
self.client.set(key, value, record)
start = self.client.time()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
audit = self.client.audit(key, record, start=start)
assert_equal(6, len(audit))
def test_audit_key_record_start_end(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1002
for value in values:
self.client.set(key, value, record)
start = self.client.time()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
end = self.client.time()
values = [True, False]
for value in values:
self.client.set(key, value, record)
audit = self.client.audit(key, record, start=start, end=end)
assert_equal(6, len(audit))
def test_audit_key_record_startstr(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1001
for value in values:
self.client.set(key, value, record)
anchor = self.get_time_anchor()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
start = test_data.get_elapsed_millis_string(anchor)
audit = self.client.audit(key, record, start=start)
assert_equal(6, len(audit))
def test_audit_key_record_startstr_endstr(self):
key = test_data.random_string()
values = ["one", "two", "three"]
record = 1002
for value in values:
self.client.set(key, value, record)
start_anchor = self.get_time_anchor()
values = [4, 5, 6]
for value in values:
self.client.set(key, value, record)
end_anchor = self.get_time_anchor()
values = [True, False]
for value in values:
self.client.set(key, value, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
audit = self.client.audit(key, record, start=start, end=end)
assert_equal(6, len(audit))
def test_audit_record(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "foo"
record = 1002
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
audit = self.client.audit(record)
assert_equal(3, len(audit))
def test_audit_record_start(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start = self.client.time()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
audit = self.client.audit(record, start=start)
assert_equal(3, len(audit))
def test_audit_record_start_end(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start = self.client.time()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
end = self.client.time()
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
audit = self.client.audit(record, start=start, end=end)
assert_equal(3, len(audit))
def test_audit_record_startstr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
anchor = self.get_time_anchor()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
start = test_data.get_elapsed_millis_string(anchor)
audit = self.client.audit(record, start=start)
assert_equal(3, len(audit))
def test_audit_record_startstr_endstr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value = "bar"
record = 344
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start_anchor = self.get_time_anchor()
self.client.remove(key1, value, record)
self.client.remove(key2, value, record)
self.client.remove(key3, value, record)
end_anchor = self.get_time_anchor()
self.client.add(key1, value, record)
self.client.add(key2, value, record)
self.client.add(key3, value, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
audit = self.client.audit(record, start=start, end=end)
assert_equal(3, len(audit))
def test_browse_key(self):
key = test_data.random_string()
value = 10
self.client.add(key, value, [1, 2, 3])
value = test_data.random_string()
self.client.add(key, value, [10, 20, 30])
data = self.client.browse(key)
assert_equal([1, 2, 3], data.get(10))
assert_equal([20, 10, 30], data.get(value))
def test_browse_keys(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
data = self.client.browse([key1, key2, key3])
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
def test_browse_keys_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
time = self.client.time()
self.client.add(key1, "Foo")
self.client.add(key2, "Foo")
self.client.add(key3, "Foo")
data = self.client.browse([key1, key2, key3], time=time)
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
def test_browse_key_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
ts = test_data.get_elapsed_millis_string(self.get_time_anchor())
data = self.client.browse([key1, key2, key3], time=ts)
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
@ignore
def test_browse_keys_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
key3 = test_data.random_string()
value1 = "A"
value2 = "B"
value3 = "C"
record1 = 1
record2 = 2
record3 = 3
self.client.add(key1, value1, record1)
self.client.add(key2, value2, record2)
self.client.add(key3, value3, record3)
anchor = self.get_time_anchor()
self.client.add(key1, "D", record1)
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.browse([key1, key2, key3], time=ts)
assert_equal({value1: [record1]}, data.get(key1))
assert_equal({value2: [record2]}, data.get(key2))
assert_equal({value3: [record3]}, data.get(key3))
def test_browse_key_time(self):
key = test_data.random_string()
value = 10
self.client.add(key, value, [1, 2, 3])
value = test_data.random_string()
self.client.add(key, value, [10, 20, 30])
timestamp = self.client.time()
self.client.add(key=key, value=True)
data = self.client.browse(key, timestamp)
assert_equal([1, 2, 3], data.get(10))
assert_equal([20, 10, 30], data.get(value))
def test_chronologize_key_record(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
self.client.remove(key, 1, record)
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
data = self.client.chronologize(key, record)
assert_equal([[1], [1, 2], [1, 2, 3], [2, 3], [3]], list(data.values()))
def test_chronologize_key_record_start(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
start = self.client.time()
self.client.remove(key, 1, record)
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
data = self.client.chronologize(key, record, time=start)
assert_equal([[2, 3], [3]], list(data.values()))
def test_chronologize_key_record_start_end(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
start = self.client.time()
self.client.remove(key, 1, record)
end = self.client.time()
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
data = self.client.chronologize(key, record, timestamp=start, end=end)
assert_equal([[2, 3]], list(data.values()))
def test_chronologize_key_record_startstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
anchor = self.get_time_anchor()
self.client.remove(key, 1, record)
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
start = test_data.get_elapsed_millis_string(anchor)
data = self.client.chronologize(key, record, time=start)
assert_equal([[2, 3], [3]], list(data.values()))
def test_chronologize_key_record_startstr_endstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
start_anchor = self.get_time_anchor()
self.client.remove(key, 1, record)
end_anchor = self.get_time_anchor()
self.client.remove(key, 2, record)
self.client.remove(key, 3, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
data = self.client.chronologize(key, record, timestamp=start, end=end)
assert_equal([[2, 3]], list(data.values()))
def test_clear_key_record(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
self.client.add(key, 2, record)
self.client.add(key, 3, record)
self.client.clear(key=key, record=record)
data = self.client.select(key=key, record=record)
assert_equal([], data)
def test_clear_key_records(self):
key = test_data.random_string()
records = [1, 2, 3]
self.client.add(key, 1, records)
self.client.add(key, 2, records)
self.client.add(key, 3, records)
self.client.clear(key=key, records=records)
data = self.client.select(key=key, records=records)
assert_equal({}, data)
def test_clear_keys_record(self):
key1 = test_data.random_string(6)
key2 = test_data.random_string(7)
key3 = test_data.random_string(8)
record = test_data.random_long()
self.client.add(key1, 1, record)
self.client.add(key2, 2, record)
self.client.add(key3, 3, record)
self.client.clear(keys=[key1, key2, key3], record=record)
data = self.client.select(keys=[key1, key2, key3], record=record)
assert_equal({}, data)
def test_clear_keys_records(self):
data = {
'a': 'A',
'b': 'B',
'c': ['C', True],
'd': 'D'
}
records = [1, 2, 3]
self.client.insert(data=data, records=records)
self.client.clear(keys=['a', 'b', 'c'], records=records)
data = self.client.get(key='d', records=records)
assert_equal({
1: 'D',
2: 'D',
3: 'D'
}, data)
def test_clear_record(self):
data = {
'a': 'A',
'b': 'B',
'c': ['C', True]
}
record = next(iter(self.client.insert(data)))
self.client.clear(record=record)
data = self.client.select(record=record)
assert_equal({}, data)
def test_clear_records(self):
data = {
'a': 'A',
'b': 'B',
'c': ['C', True],
'd': 'D'
}
records = [1, 2, 3]
self.client.insert(data=data, records=records)
self.client.clear(records=records)
data = self.client.select(records=records)
assert_equal({1: {}, 2: {}, 3: {}}, data)
def test_commit(self):
self.client.stage()
record = self.client.add("name", "jeff nelson")
self.client.commit()
assert_equal(['name'], list(self.client.describe(record)))
def test_describe_record(self):
self.client.set('name', 'tom brady', 1)
self.client.set('age', 100, 1)
self.client.set('team', 'new england patriots', 1)
keys = self.client.describe(1)
assert_equals(['age', 'name', 'team'], keys)
def test_describe_record_time(self):
self.client.set('name', 'tom brady', 1)
self.client.set('age', 100, 1)
self.client.set('team', 'new england patriots', 1)
timestamp = self.client.time()
self.client.clear('name', 1)
keys = self.client.describe(1, time=timestamp)
assert_equals(['age', 'name', 'team'], keys)
def test_describe_record_timestr(self):
self.client.set('name', 'tom brady', 1)
self.client.set('age', 100, 1)
self.client.set('team', 'new england patriots', 1)
anchor = self.get_time_anchor()
self.client.clear('name', 1)
timestamp = test_data.get_elapsed_millis_string(anchor)
keys = self.client.describe(1, time=timestamp)
assert_equals(['age', 'name', 'team'], keys)
def test_describe_records(self):
records = [1, 2, 3]
self.client.set('name', 'tom brady', records)
self.client.set('age', 100, records)
self.client.set('team', 'new england patriots', records)
keys = self.client.describe(records)
assert_equals(['age', 'name', 'team'], keys[1])
assert_equals(['age', 'name', 'team'], keys[2])
assert_equals(['age', 'name', 'team'], keys[3])
def test_describe_records_time(self):
records = [1, 2, 3]
self.client.set('name', 'tom brady', records)
self.client.set('age', 100, records)
self.client.set('team', 'new england patriots', records)
timestamp = self.client.time()
self.client.clear(records=records)
keys = self.client.describe(records, timestamp=timestamp)
assert_equals(['age', 'name', 'team'], keys[1])
assert_equals(['age', 'name', 'team'], keys[2])
assert_equals(['age', 'name', 'team'], keys[3])
def test_describe_records_timestr(self):
records = [1, 2, 3]
self.client.set('name', 'tom brady', records)
self.client.set('age', 100, records)
self.client.set('team', 'new england patriots', records)
anchor = self.get_time_anchor()
self.client.clear(records=records)
timestamp = test_data.get_elapsed_millis_string(anchor)
keys = self.client.describe(records, timestamp=timestamp)
assert_equals(['age', 'name', 'team'], keys[1])
assert_equals(['age', 'name', 'team'], keys[2])
assert_equals(['age', 'name', 'team'], keys[3])
def test_diff_key_record_start(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
start = self.client.time()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
diff = self.client.diff(key, record, start)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_record_startstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
anchor = self.get_time_anchor()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
start = test_data.get_elapsed_millis_string(anchor)
diff = self.client.diff(key, record, start)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_record_start_end(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
start = self.client.time()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
end = self.client.time()
self.client.set(key, 3, record)
diff = self.client.diff(key, record, start, end)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_record_startstr_endstr(self):
key = test_data.random_string()
record = test_data.random_long()
self.client.add(key, 1, record)
start_anchor = self.get_time_anchor()
self.client.add(key, 2, record)
self.client.remove(key, 1, record)
end_anchor = self.get_time_anchor()
self.client.set(key, 3, record)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
diff = self.client.diff(key, record, start, end)
assert_equal([2], diff.get(Diff.ADDED))
assert_equal([1], diff.get(Diff.REMOVED))
def test_diff_key_start(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
start = self.client.time()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
diff = self.client.diff(key=key, start=start)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_key_startstr(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
anchor = self.get_time_anchor()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
start = test_data.get_elapsed_millis_string(anchor)
diff = self.client.diff(key=key, start=start)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_key_start_end(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
start = self.client.time()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
end = self.client.time()
self.client.add(key=key, value=4, record=1)
diff = self.client.diff(key=key, start=start, end=end)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_key_startstr_endstr(self):
key = test_data.random_string()
self.client.add(key=key, value=1, record=1)
start_anchor = self.get_time_anchor()
self.client.add(key=key, value=2, record=1)
self.client.add(key=key, value=1, record=2)
self.client.add(key=key, value=3, record=3)
self.client.remove(key=key, value=1, record=2)
end_anchor = self.get_time_anchor()
self.client.add(key=key, value=4, record=1)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
diff = self.client.diff(key=key, start=start, end=end)
assert_equal(2, len(diff.keys()))
diff2 = diff.get(2)
diff3 = diff.get(3)
assert_equal([1], diff2.get(Diff.ADDED))
assert_equal([3], diff3.get(Diff.ADDED))
assert_is_none(diff2.get(Diff.REMOVED))
assert_is_none(diff3.get(Diff.REMOVED))
def test_diff_record_start(self):
self.client.add(key="foo", value=1, record=1)
start = self.client.time()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
diff = self.client.diff(record=1, time=start)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_diff_record_startstr(self):
self.client.add(key="foo", value=1, record=1)
anchor = self.get_time_anchor()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
start = test_data.get_elapsed_millis_string(anchor)
diff = self.client.diff(record=1, time=start)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_diff_record_start_end(self):
self.client.add(key="foo", value=1, record=1)
start = self.client.time()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
end = self.client.time()
self.client.set(key="car", value=100, record=1)
diff = self.client.diff(record=1, time=start, end=end)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_diff_record_startstr_endstr(self):
self.client.add(key="foo", value=1, record=1)
start_anchor = self.get_time_anchor()
self.client.set(key="foo", value=2, record=1)
self.client.add(key="bar", value=True, record=1)
end_anchor = self.get_time_anchor()
self.client.set(key="car", value=100, record=1)
start = test_data.get_elapsed_millis_string(start_anchor)
end = test_data.get_elapsed_millis_string(end_anchor)
diff = self.client.diff(record=1, time=start, end=end)
assert_equal([1], diff.get('foo').get(Diff.REMOVED))
assert_equal([2], diff.get('foo').get(Diff.ADDED))
assert_equal([True], diff.get('bar').get(Diff.ADDED))
def test_find_ccl(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key+' > 3'))
assert_equal(list(range(4, 10)), records)
@raises(Exception)
def test_find_ccl_handle_parse_exception(self):
self.client.find(ccl="throw parse exception")
def test_find_key_operator_value(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3))
assert_equal(list(range(4, 10)), records)
def test_find_key_operator_values(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6]))
assert_equal([3, 4, 5], records)
def test_find_key_operator_value_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_find_key_operator_value_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator=Operator.GREATER_THAN, value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_find_key_operator_values_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operator_values_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator=Operator.BETWEEN, values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_values_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_values(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator="bw", values=[3, 6]))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_values_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator="bw", values=[3, 6], time=ts))
assert_equal([3, 4, 5], records)
def test_find_key_operatorstr_value(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
records = list(self.client.find(key=key, operator="gt", value=3))
assert_equal(list(range(4, 10)), records)
def test_find_key_operatorstr_value_time(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
ts = self.client.time()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
records = list(self.client.find(key=key, operator="gt", value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_find_key_operatorstr_value_timestr(self):
key = test_data.random_string()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n)
anchor = self.get_time_anchor()
for n in range(0, 10):
self.client.add(key=key, value=n, record=n+1)
ts = test_data.get_elapsed_millis_string(anchor)
records = list(self.client.find(key=key, operator="gt", value=3, time=ts))
assert_equal(list(range(4, 10)), records)
def test_get_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ccl = key2 + ' = 10'
data = self.client.get(ccl=ccl)
expected = {
key1: 3,
key2: 10
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_get_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ts = self.client.time()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
data = self.client.get(ccl=ccl, time=ts)
expected = {
key1: 3,
key2: 10
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_get_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(ccl=ccl, time=ts)
expected = {
key1: 3,
key2: 10
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_get_key_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.get(key=key1, ccl=ccl)
expected = {
record1: 3,
record2: 4
}
assert_equal(expected, data)
def test_get_keys_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.get(keys=[key1, key2], ccl=ccl)
expected = {
record1: {key1: 3, key2: 10},
record2: {key1: 4, key2: 10},
}
assert_equal(expected, data)
def test_get_key_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.get(key=key1, ccl=ccl, time=ts)
expected = {
record1: 3,
record2: 4
}
assert_equal(expected, data)
def test_get_keys_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.get(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: 3, key2: 10},
record2: {key1: 4, key2: 10},
}
assert_equal(expected, data)
def test_get_key_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(key=key1, ccl=ccl, time=ts)
expected = {
record1: 3,
record2: 4
}
assert_equal(expected, data)
def test_get_keys_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: 3, key2: 10},
record2: {key1: 4, key2: 10},
}
assert_equal(expected, data)
def test_get_key_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
assert_equal(3, self.client.get(key='foo', record=1))
def test_get_key_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
ts = self.client.time()
self.client.add('foo', 4, 1)
assert_equal(3, self.client.get(key='foo', record=1, time=ts))
def test_get_key_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 4, 1)
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal(3, self.client.get(key='foo', record=1, time=ts))
def test_get_key_records(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
assert_equal({
1: 3,
2: 3,
3: 3
}, self.client.get(key='foo', record=[1, 2, 3]))
def test_get_key_records_time(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
ts = self.client.time()
self.client.add('foo', 4, [1, 2, 3])
assert_equal({
1: 3,
2: 3,
3: 3
}, self.client.get(key='foo', record=[1, 2, 3], time=ts))
def test_get_key_records_timestr(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
anchor = self.get_time_anchor()
self.client.add('foo', 4, [1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal({
1: 3,
2: 3,
3: 3
}, self.client.get(key='foo', record=[1, 2, 3], time=ts))
def test_get_keys_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
data = self.client.get(keys=['foo', 'bar'], record=1)
expected = {
'foo': 2,
'bar': 2
}
assert_equal(expected, data)
def test_get_keys_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
ts = self.client.time()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
data = self.client.get(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal(expected, data)
def test_get_keys_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal(expected, data)
def test_get_keys_records_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal({
1: expected,
2: expected
}, data)
def test_get_keys_records_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.get(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': 2,
'bar': 2
}
assert_equal({
1: expected,
2: expected
}, data)
def test_get_keys_records(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.get(keys=['foo', 'bar'], records=[1, 2])
expected = {
'foo': 2,
'bar': 2
}
assert_equal({
1: expected,
2: expected
}, data)
def test_insert_dict(self):
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
record = self.client.insert(data=data)[0]
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_dicts(self):
data = [
{
'foo': 1
},
{
'foo': 2
},
{
'foo': 3
}
]
records = self.client.insert(data=data)
assert_equal(len(data), len(records))
def test_insert_json(self):
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
data = ujson.dumps(data)
record = self.client.insert(data=data)[0]
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_json_list(self):
data = [
{
'foo': 1
},
{
'foo': 2
},
{
'foo': 3
}
]
count = len(data)
data = ujson.dumps(data)
records = self.client.insert(data=data)
assert_equal(count, len(records))
def test_insert_dict_record(self):
record = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
result = self.client.insert(data=data, record=record)
assert_true(result)
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_json_record(self):
record = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
data = ujson.dumps(data)
result = self.client.insert(data=data, record=record)
assert_true(result)
assert_equal('a', self.client.get(key='string', record=record))
assert_equal(1, self.client.get(key='int', record=record))
assert_equal(3.14, self.client.get(key='double', record=record))
assert_equal(True, self.client.get(key='bool', record=record))
assert_equal(['a', 1, 3.14, True], self.client.select(key='multi', record=record))
def test_insert_dict_records(self):
record1 = test_data.random_long()
record2 = test_data.random_long()
record3 = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
result = self.client.insert(data=data, records=[record1, record2, record3])
assert_true({
record1: True,
record2: True,
record3: True
}, result)
def test_insert_json_records(self):
record1 = test_data.random_long()
record2 = test_data.random_long()
record3 = test_data.random_long()
data = {
'string': 'a',
'int': 1,
'double': 3.14,
'bool': True,
'multi': ['a', 1, 3.14, True]
}
data = ujson.dumps(data)
result = self.client.insert(data=data, records=[record1, record2, record3])
assert_true({
record1: True,
record2: True,
record3: True
}, result)
def test_inventory(self):
records = [1, 2, 3, 4, 5, 6, 7]
self.client.add(key='foo', value=17, records=records)
assert_equal(records, self.client.inventory())
def test_jsonify_records(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
dump = self.client.jsonify(records=[record1, record2])
data = {
'int': [1],
'multi': [1, 2, 3, 4]
}
assert_equal([data, data], ujson.loads(dump))
def test_jsonify_records_identifier(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
dump = self.client.jsonify(records=[record1, record2], id=True)
data1 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 1
}
data2 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 2
}
assert_equal([data1, data2], ujson.loads(dump))
def test_jsonify_records_time(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
ts = self.client.time()
self.client.add('foo', 10, [record1, record2])
dump = self.client.jsonify(records=[record1, record2], time=ts)
data = {
'int': [1],
'multi': [1, 2, 3, 4]
}
assert_equal([data, data], ujson.loads(dump))
@ignore
def test_jsonify_records_timestr(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.add('foo', 10, [record1, record2])
ts = test_data.get_elapsed_millis_string(anchor)
dump = self.client.jsonify(records=[record1, record2], time=ts)
data = {
'int': [1],
'multi': [1, 2, 3, 4]
}
assert_equal([data, data], ujson.loads(dump))
def test_jsonify_records_identifier_time(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
ts = self.client.time()
self.client.add(key='foo', value=True, records=[record1, record2])
dump = self.client.jsonify(records=[record1, record2], id=True, time=ts)
data1 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 1
}
data2 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 2
}
assert_equal([data1, data2], ujson.loads(dump))
def test_jsonify_records_identifier_timestr(self):
record1 = 1
record2 = 2
data = {
'int': 1,
'multi': [1, 2, 3, 4]
}
self.client.insert(data=data, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.add(key='foo', value=True, records=[record1, record2])
ts = test_data.get_elapsed_millis_string(anchor)
dump = self.client.jsonify(records=[record1, record2], id=True, time=ts)
data1 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 1
}
data2 = {
'int': [1],
'multi': [1, 2, 3, 4],
constants.JSON_RESERVED_IDENTIFIER_NAME: 2
}
assert_equal([data1, data2], ujson.loads(dump))
def test_ping_record(self):
record = 1
assert_false(self.client.ping(record))
self.client.add(key='foo', value=1, record=record)
assert_true(self.client.ping(record))
self.client.clear(key='foo', record=record)
assert_false(self.client.ping(record))
def test_ping_records(self):
self.client.add(key='foo', value=1, records=[1, 2])
data = self.client.ping([1, 2, 3])
assert_equal({
1: True,
2: True,
3: False
}, data)
def test_remove_key_value_record(self):
key = 'foo'
value = 1
record = 1
assert_false(self.client.remove(key, value, record))
self.client.add(key, value, record)
assert_true(self.client.remove(key=key, record=record, value=value))
def test_remove_key_value_records(self):
key = 'foo'
value = 1
self.client.add(key, value, records=[1, 2])
data = self.client.remove(key, value, records=[1, 2, 3])
assert_equal({
1: True,
2: True,
3: False
}, data)
def test_revert_key_records_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(key='one', records=[1, 2, 3], time=ts)
data = self.client.select(key='one', record=[1, 2, 3])
assert_equal({
1: [1],
2: [1],
3: [1]
}, data)
def test_revert_key_records_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(key='one', records=[1, 2, 3], time=ts)
data = self.client.select(key='one', record=[1, 2, 3])
assert_equal({
1: [1],
2: [1],
3: [1]
}, data)
def test_revert_keys_records_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3])
data3 = {
'one': [1],
'two': [2],
'three': [3]
}
assert_equal({
1: data3,
2: data3,
3: data3
}, data)
def test_revert_keys_records_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(keys=['one', 'two', 'three'], records=[1, 2, 3], time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=[1, 2, 3])
data3 = {
'one': [1],
'two': [2],
'three': [3]
}
assert_equal({
1: data3,
2: data3,
3: data3
}, data)
def test_revert_keys_record_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(key=['one', 'two', 'three'], records=1, time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=1)
assert_equal({
'one': [1],
'two': [2],
'three': [3]
}, data)
def test_revert_keys_record_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(key=['one', 'two', 'three'], records=1, time=ts)
data = self.client.select(key=['one', 'two', 'three'], record=1)
assert_equal({
'one': [1],
'two': [2],
'three': [3]
}, data)
def test_revert_key_record_time(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
ts = self.client.time()
self.client.insert(data=data2, records=[1, 2, 3])
self.client.revert(key='one', records=1, time=ts)
data = self.client.select(key='one', record=1)
assert_equal([1], data)
def test_revert_key_record_timestr(self):
data1 = {
'one': 1,
'two': 2,
'three': 3
}
data2 = {
'one': True,
'two': True,
'three': True
}
self.client.insert(data=data1, records=[1, 2, 3])
anchor = self.get_time_anchor()
self.client.insert(data=data2, records=[1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
self.client.revert(key='one', records=1, time=ts)
data = self.client.select(key='one', record=1)
assert_equal([1], data)
def test_search(self):
self.client.add(key="name", value="jeff", record=1)
self.client.add(key="name", value="jeffery", record=2)
self.client.add(key="name", value="jeremy", record=3)
self.client.add(key="name", value="ben jefferson", record=4)
records = self.client.search(key="name", query="jef")
assert_equal([1, 2, 4], records)
def test_select_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ccl = key2 + ' = 10'
data = self.client.select(ccl=ccl)
expected = {
key1: [1, 2, 3],
key2: [10]
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_select_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
ts = self.client.time()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
data = self.client.select(ccl=ccl, time=ts)
expected = {
key1: [1, 2, 3],
key2: [10]
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_select_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
anchor = self.get_time_anchor()
self.client.set(key=key2, value=11, records=[record1, record2])
ccl = key2 + ' > 10'
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(ccl=ccl, time=ts)
expected = {
key1: [1, 2, 3],
key2: [10]
}
assert_equal(data.get(record1), expected)
assert_equal(data.get(record2), expected)
def test_select_key_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.select(key=key1, ccl=ccl)
expected = {
record1: [1, 2, 3],
record2: [1, 2, 3, 4]
}
assert_equal(expected, data)
def test_select_keys_ccl(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ccl = key2 + ' = 10'
data = self.client.select(keys=[key1, key2], ccl=ccl)
expected = {
record1: {key1: [1, 2, 3], key2: [10]},
record2: {key1: [1, 2, 3, 4], key2: [10]},
}
assert_equal(expected, data)
def test_select_key_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.select(key=key1, ccl=ccl, time=ts)
expected = {
record1: [1, 2, 3],
record2: [1, 2, 3, 4]
}
assert_equal(expected, data)
def test_select_keys_ccl_time(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
ts = self.client.time()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
data = self.client.select(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: [1, 2, 3], key2: [10]},
record2: {key1: [1, 2, 3, 4], key2: [10]},
}
assert_equal(expected, data)
def test_select_key_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(key=key1, ccl=ccl, time=ts)
expected = {
record1: [1, 2, 3],
record2: [1, 2, 3, 4]
}
assert_equal(expected, data)
def test_select_keys_ccl_timestr(self):
key1 = test_data.random_string()
key2 = test_data.random_string()
record1 = test_data.random_long()
record2 = test_data.random_long()
self.client.add(key=key1, value=1, records=[record1, record2])
self.client.add(key=key1, value=2, records=[record1, record2])
self.client.add(key=key1, value=3, records=[record1, record2])
self.client.add(key=key2, value=10, records=[record1, record2])
self.client.add(key=key1, value=4, record=record2)
anchor = self.get_time_anchor()
ccl = key2 + ' = 10'
self.client.set(key=key1, value=100, record=[record2, record1])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(key=[key1, key2], ccl=ccl, time=ts)
expected = {
record1: {key1: [1, 2, 3], key2: [10]},
record2: {key1: [1, 2, 3, 4], key2: [10]},
}
assert_equal(expected, data)
def test_select_key_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
assert_equal([1, 2, 3], self.client.select(key='foo', record=1))
def test_select_key_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
ts = self.client.time()
self.client.add('foo', 4, 1)
assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts))
def test_select_key_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('foo', 3, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 4, 1)
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal([1, 2, 3], self.client.select(key='foo', record=1, time=ts))
def test_select_key_records(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
assert_equal({
1: [1, 2, 3],
2: [1, 2, 3],
3: [1, 2, 3]
}, self.client.select(key='foo', record=[1, 2, 3]))
def test_select_key_records_time(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
ts = self.client.time()
self.client.add('foo', 4, [1, 2, 3])
assert_equal({
1: [1, 2, 3],
2: [1, 2, 3],
3: [1, 2, 3]
}, self.client.select(key='foo', record=[1, 2, 3], time=ts))
def test_select_key_records_timestr(self):
self.client.add('foo', 1, [1, 2, 3])
self.client.add('foo', 2, [1, 2, 3])
self.client.add('foo', 3, [1, 2, 3])
anchor = self.get_time_anchor()
self.client.add('foo', 4, [1, 2, 3])
ts = test_data.get_elapsed_millis_string(anchor)
assert_equal({
1: [1, 2, 3],
2: [1, 2, 3],
3: [1, 2, 3]
}, self.client.select(key='foo', record=[1, 2, 3], time=ts))
def test_select_keys_record(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
data = self.client.select(keys=['foo', 'bar'], record=1)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_keys_record_time(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
ts = self.client.time()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
data = self.client.select(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_keys_record_timestr(self):
self.client.add('foo', 1, 1)
self.client.add('foo', 2, 1)
self.client.add('bar', 1, 1)
self.client.add('bar', 2, 1)
anchor = self.get_time_anchor()
self.client.add('foo', 3, 1)
self.client.add('bar', 3, 1)
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(keys=['foo', 'bar'], record=1, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_keys_records_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_keys_records_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(keys=['foo', 'bar'], records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_keys_records(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.select(keys=['foo', 'bar'], records=[1, 2])
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_record(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.select(record=1)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_record_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.select(record=2, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_record_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select(record=2, time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal(expected, data)
def test_select_records(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
data = self.client.select(records=[1, 2])
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_records_time(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
ts = self.client.time()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
data = self.client.select( records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_select_records_timestr(self):
self.client.add('foo', 1, [1, 2])
self.client.add('foo', 2, [1, 2])
self.client.add('bar', 1, [1, 2])
self.client.add('bar', 2, [1, 2])
anchor = self.get_time_anchor()
self.client.add('foo', 3, [1, 2])
self.client.add('bar', 3, [1, 2])
ts = test_data.get_elapsed_millis_string(anchor)
data = self.client.select( records=[1, 2], time=ts)
expected = {
'foo': [1, 2],
'bar': [1, 2]
}
assert_equal({
1: expected,
2: expected
}, data)
def test_set_key_value(self):
key = "foo"
value = 1
record = self.client.set(key=key, value=value)
data = self.client.select(record=record)
assert_equal({
'foo': [1]
}, data)
def test_set_key_value_record(self):
key = "foo"
value = 1
record = 1
self.client.add(key=key, value=2, record=record)
self.client.add(key=key, value=2, record=record)
self.client.set(key=key, value=value, record=record)
data = self.client.select(record=record)
assert_equal({
'foo': [1]
}, data)
def test_set_key_value_records(self):
key = "foo"
value = 1
records = [1, 2, 3]
self.client.add(key=key, value=2, record=records)
self.client.add(key=key, value=2, record=records)
self.client.set(key=key, value=value, record=records)
data = self.client.select(record=records)
expected = {
'foo': [1]
}
assert_equal({
1: expected,
2: expected,
3: expected
}, data)
def test_stage(self):
assert_is_none(self.client.transaction)
self.client.stage()
assert_is_not_none(self.client.transaction)
self.client.abort()
def test_time(self):
assert_true(isinstance(self.client.time(), int))
def test_time_phrase(self):
assert_true(isinstance(self.client.time("3 seconds ago"), int))
def test_verify_and_swap(self):
self.client.add("foo", 2, 2)
assert_false(self.client.verify_and_swap(key='foo', expected=1, record=2, replacement=3))
assert_true(self.client.verify_and_swap(key='foo', expected=2, record=2, replacement=3))
assert_equal(3, self.client.get(key='foo', record=2))
def test_verify_or_set(self):
self.client.add("foo", 2, 2)
self.client.verify_or_set(key='foo', value=3, record=2)
assert_equal(3, self.client.get(key='foo', record=2))
def test_verify_key_value_record(self):
self.client.add('name', 'jeff', 1)
self.client.add('name', 'jeffery', 1)
self.client.add('name', 'bob', 1)
assert_true(self.client.verify('name', 'jeff', 1))
self.client.remove('name', 'jeff', 1)
assert_false(self.client.verify('name', 'jeff', 1))
def test_verify_key_value_record_time(self):
self.client.add('name', 'jeff', 1)
self.client.add('name', 'jeffery', 1)
self.client.add('name', 'bob', 1)
ts = self.client.time()
self.client.remove('name', 'jeff', 1)
assert_true(self.client.verify('name', 'jeff', 1, time=ts))
def test_verify_key_value_record_timestr(self):
self.client.add('name', 'jeff', 1)
self.client.add('name', 'jeffery', 1)
self.client.add('name', 'bob', 1)
anchor = self.get_time_anchor()
self.client.remove('name', 'jeff', 1)
ts = test_data.get_elapsed_millis_string(anchor)
assert_true(self.client.verify('name', 'jeff', 1, time=ts))
def test_link_key_source_destination(self):
assert_true(self.client.link(key='friends', source=1, destination=2))
assert_equal(Link.to(2), self.client.get('friends', record=1))
def test_link_key_source_destinations(self):
assert_equal({
2: True,
3: True,
4: True
}, self.client.link(key='friends', source=1, destination=[2, 3, 4]))
def test_unlink_key_source_destination(self):
assert_true(self.client.link(key='friends', source=1, destination=2))
assert_true(self.client.unlink(key='friends', source=1, destination=2))
def test_unlink_key_source_destinations(self):
assert_true(self.client.link(key='friends', source=1, destination=2))
assert_equal({
2: True,
3: False
}, self.client.unlink(key='friends', source=1, destination=[2, 3]))
def test_find_or_add_key_value(self):
record = self.client.find_or_add("age", 23)
assert_equal(23, self.client.get("age", record))
def test_find_or_insert_ccl_json(self):
data = {
'name': 'jeff nelson'
}
data = ujson.dumps(data)
record = self.client.find_or_insert(criteria="age > 10", data=data)
assert_equal('jeff nelson', self.client.get("name", record))
def test_find_or_insert_ccl_dict(self):
data = {
'name': 'jeff nelson'
}
record = self.client.find_or_insert(criteria="age > 10", data=data)
assert_equal('jeff nelson', self.client.get("name", record))
def test_insert_dict_with_link(self):
data = {
'foo': Link.to(1)
}
record = self.client.insert(data=data)[0]
assert_equal(Link.to(1), self.client.get(key='foo', record=record))
def test_insert_dict_with_resolvable_link(self):
record1 = self.client.add('foo', 1)
record2 = self.client.insert(data={
'foo': Link.to_where('foo = 1')
})[0]
assert_equal(Link.to(record1), self.client.get(key='foo', record=record2)) | remiemalik/concourse | concourse-driver-python/tests/integration_tests.py | Python | apache-2.0 | 87,056 |
module('BadAriaRole');
test('No elements === no problems.', function(assert) {
var config = {
ruleName: 'badAriaRole',
expected: axs.constants.AuditResult.NA
};
assert.runRule(config);
});
test('No roles === no problems.', function(assert) {
// Setup fixture
var fixture = document.getElementById('qunit-fixture');
for (var i = 0; i < 10; i++)
fixture.appendChild(document.createElement('div'));
var config = {
ruleName: 'badAriaRole',
expected: axs.constants.AuditResult.NA
};
assert.runRule(config);
});
test('Good role === no problems.', function(assert) {
// Setup fixture
var fixture = document.getElementById('qunit-fixture');
for (var r in axs.constants.ARIA_ROLES) {
if (axs.constants.ARIA_ROLES.hasOwnProperty(r) && !axs.constants.ARIA_ROLES[r]['abstract']) {
var div = document.createElement('div');
div.setAttribute('role', r);
fixture.appendChild(div);
}
}
var config = {
ruleName: 'badAriaRole',
expected: axs.constants.AuditResult.PASS,
elements: []
};
assert.runRule(config);
});
test('Bad role == problem', function(assert) {
// Setup fixture
var fixture = document.getElementById('qunit-fixture');
var div = document.createElement('div');
div.setAttribute('role', 'not-an-aria-role');
fixture.appendChild(div);
var config = {
ruleName: 'badAriaRole',
expected: axs.constants.AuditResult.FAIL,
elements: [div]
};
assert.runRule(config);
});
test('Abstract role == problem', function(assert) {
// Setup fixture
var fixture = document.getElementById('qunit-fixture');
var div = document.createElement('div');
div.setAttribute('role', 'input');
fixture.appendChild(div);
var config = {
ruleName: 'badAriaRole',
expected: axs.constants.AuditResult.FAIL,
elements: [div]
};
assert.runRule(config);
});
| alice/accessibility-developer-tools | test/audits/bad-aria-role-test.js | JavaScript | apache-2.0 | 1,950 |
/**
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/securityhub/model/StandardsSubscriptionRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
namespace Aws
{
namespace SecurityHub
{
namespace Model
{
StandardsSubscriptionRequest::StandardsSubscriptionRequest() :
m_standardsArnHasBeenSet(false),
m_standardsInputHasBeenSet(false)
{
}
StandardsSubscriptionRequest::StandardsSubscriptionRequest(JsonView jsonValue) :
m_standardsArnHasBeenSet(false),
m_standardsInputHasBeenSet(false)
{
*this = jsonValue;
}
StandardsSubscriptionRequest& StandardsSubscriptionRequest::operator =(JsonView jsonValue)
{
if(jsonValue.ValueExists("StandardsArn"))
{
m_standardsArn = jsonValue.GetString("StandardsArn");
m_standardsArnHasBeenSet = true;
}
if(jsonValue.ValueExists("StandardsInput"))
{
Aws::Map<Aws::String, JsonView> standardsInputJsonMap = jsonValue.GetObject("StandardsInput").GetAllObjects();
for(auto& standardsInputItem : standardsInputJsonMap)
{
m_standardsInput[standardsInputItem.first] = standardsInputItem.second.AsString();
}
m_standardsInputHasBeenSet = true;
}
return *this;
}
JsonValue StandardsSubscriptionRequest::Jsonize() const
{
JsonValue payload;
if(m_standardsArnHasBeenSet)
{
payload.WithString("StandardsArn", m_standardsArn);
}
if(m_standardsInputHasBeenSet)
{
JsonValue standardsInputJsonMap;
for(auto& standardsInputItem : m_standardsInput)
{
standardsInputJsonMap.WithString(standardsInputItem.first, standardsInputItem.second);
}
payload.WithObject("StandardsInput", std::move(standardsInputJsonMap));
}
return payload;
}
} // namespace Model
} // namespace SecurityHub
} // namespace Aws
| awslabs/aws-sdk-cpp | aws-cpp-sdk-securityhub/source/model/StandardsSubscriptionRequest.cpp | C++ | apache-2.0 | 1,905 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.