code
stringlengths
3
1.05M
repo_name
stringlengths
4
116
path
stringlengths
4
991
language
stringclasses
9 values
license
stringclasses
15 values
size
int32
3
1.05M
/* * Copyright (c) 2005-2011, WSO2 Inc. (http://www.wso2.org) All Rights Reserved. * * WSO2 Inc. licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except * in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.wso2.carbon.appmgt.impl.workflow; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.wso2.carbon.user.mgt.stub.UserAdminStub; import org.wso2.carbon.user.mgt.stub.types.carbon.FlaggedName; import org.wso2.carbon.utils.CarbonUtils; import java.util.ArrayList; import java.util.List; public abstract class UserSignUpWorkflowExecutor extends WorkflowExecutor { private static final Log log = LogFactory.getLog(UserSignUpWSWorkflowExecutor.class); /** * Method updates Roles users with subscriber role * @param serverURL * @param adminUsername * @param adminPassword * @param userName * @param role * @throws Exception */ protected static void updateRolesOfUser(String serverURL, String adminUsername, String adminPassword, String userName, String role) throws Exception { log.info("Adding Subscriber role to user"); String url = serverURL + "UserAdmin"; UserAdminStub userAdminStub = new UserAdminStub(url); CarbonUtils.setBasicAccessSecurityHeaders(adminUsername, adminPassword, true, userAdminStub._getServiceClient()); FlaggedName[] flaggedNames = userAdminStub.getRolesOfUser(userName, "*", -1); List<String> roles = new ArrayList<String>(); if (flaggedNames != null) { for (int i = 0; i < flaggedNames.length; i++) { if (flaggedNames[i].getSelected()) { roles.add(flaggedNames[i].getItemName()); } } } roles.add(role); userAdminStub.updateRolesOfUser(userName, roles.toArray(new String[roles.size()])); } }
maheshika/carbon-appmgt
components/appmgt/org.wso2.carbon.appmgt.impl/src/main/java/org/wso2/carbon/appmgt/impl/workflow/UserSignUpWorkflowExecutor.java
Java
apache-2.0
2,414
package net.dev123.mblog.entity; public enum UnreadType { COMMENT(1), METION(2), DIRECT_MESSAGE(3), FOLLOWER(4); private int type; private UnreadType(int type) { this.type = type; } public int getType() { return type; } public void setType(int type) { this.type = type; } }
yibome/yibo-library
src/main/java/net/dev123/mblog/entity/UnreadType.java
Java
apache-2.0
296
// Copyright 2016 Yahoo Inc. // Licensed under the terms of the Apache license. Please see LICENSE.md file distributed with this work for terms. package com.yahoo.bard.webservice.web.apirequest; /** * Jobs API Request. Such an API Request binds, validates, and models the parts of a request to the Jobs endpoint. */ public interface JobsApiRequest extends ApiRequest { String REQUEST_MAPPER_NAMESPACE = "jobsApiRequestMapper"; String EXCEPTION_HANDLER_NAMESPACE = "jobsApiRequestExceptionHandler"; }
yahoo/fili
fili-core/src/main/java/com/yahoo/bard/webservice/web/apirequest/JobsApiRequest.java
Java
apache-2.0
511
/******************************************************************************* * Copyright 2012 Apigee Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ package org.usergrid.mongo.commands; import static org.usergrid.utils.MapUtils.entry; import static org.usergrid.utils.MapUtils.map; import org.jboss.netty.channel.ChannelHandlerContext; import org.jboss.netty.channel.MessageEvent; import org.usergrid.mongo.MongoChannelHandler; import org.usergrid.mongo.protocol.OpQuery; import org.usergrid.mongo.protocol.OpReply; public class Getlasterror extends MongoCommand { @Override public OpReply execute(MongoChannelHandler handler, ChannelHandlerContext ctx, MessageEvent e, OpQuery opQuery) { OpReply reply = new OpReply(opQuery); // there's an error in the attachment if (ctx.getAttachment() instanceof Exception) { reply.addDocument(map( entry("n", 0), entry("connectionId", 20), entry("wtime", 0), entry("err", ((Exception) ctx.getAttachment()).getMessage()), entry("ok", 0.0))); } else { reply.addDocument(map(entry("n", 0), entry("connectionId", 20), entry("wtime", 0), entry("err", null), entry("ok", 1.0))); } return reply; } }
pgorla/usergrid
mongo-emulator/src/main/java/org/usergrid/mongo/commands/Getlasterror.java
Java
apache-2.0
1,962
/** * */ /** * Copyright (C) 2013 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.bbg.loader; import java.net.URI; import java.util.Map; import org.testng.annotations.Test; import com.google.common.collect.Sets; import com.opengamma.bbg.referencedata.ReferenceDataProvider; import com.opengamma.bbg.referencedata.impl.RemoteReferenceDataProvider; import com.opengamma.master.security.ManageableSecurity; /** * */ /** * @author arroub * */ @Test public class BondLoaderTest { public void test() { ReferenceDataProvider referenceDataProvider = new RemoteReferenceDataProvider(URI.create("http://marketdataserver-lx-1:8090/jax/components/ReferenceDataProvider/bloomberg")); BondLoader bondLoader = new BondLoader(referenceDataProvider); Map<String, ManageableSecurity> loadSecurities = bondLoader.loadSecurities(Sets.newHashSet("/ticker/NGGLN 2.983 07/08/18 Corp")); System.err.println(loadSecurities.get("/ticker/NGGLN 2.983 07/08/18 Corp").getAttributes()); } }
nssales/OG-Platform
projects/OG-Bloomberg/src/test/java/com/opengamma/bbg/loader/BondLoaderTest.java
Java
apache-2.0
1,082
<?php namespace backend\models; use Yii; /** * This is the model class for table "{{%language}}". * * @property integer $id * @property string $language * @property string $icon */ class Language extends \yii\db\ActiveRecord { public $filename; public $file; public $path_s; public $img_id; /** * @inheritdoc */ public static function tableName() { return '{{%language}}'; } /** * @inheritdoc */ public function rules() { return [ [['language'], 'required'], [['language'], 'unique'], [['language'], 'string', 'max' => 20], [['code'], 'required'], [['code'], 'unique'], [['code'], 'string', 'max' => 4], [['icon'], 'file', 'extensions' => ['png', 'jpg', 'gif']], [['order'], 'integer'], ]; } /** * @inheritdoc */ public function attributeLabels() { return [ ]; } public function getImages() { /** * 第一个参数为要关联的字表模型类名称, * 第二个参数指定 通过子表的 id 去关联主表的 id 字段 */ return $this->hasOne(Images::className(), ['related_id' => 'id'])->onCondition(['images.model' => 'language']); } }
n34n/nxshop
backend/models/Language.php
PHP
apache-2.0
1,341
import NetRegexes from '../../../../../resources/netregexes'; import ZoneId from '../../../../../resources/zone_id'; import { OopsyData } from '../../../../../types/data'; import { OopsyTriggerSet } from '../../../../../types/oopsy'; export type Data = OopsyData; // O12N - Alphascape 4.0 Normal const triggerSet: OopsyTriggerSet<Data> = { zoneId: ZoneId.AlphascapeV40, damageWarn: { 'O12N Floodlight': '3309', // targeted circular aoes after Program Alpha 'O12N Efficient Bladework': '32FF', // telegraphed centered circle 'O12N Efficient Bladework Untelegraphed': '32F3', // centered circle after transformation 'O12N Optimized Blizzard III': '3303', // cross aoe 'O12N Superliminal Steel 1': '3306', // sides of the room 'O12N Superliminal Steel 2': '3307', // sides of the room 'O12N Beyond Strength': '3300', // donut 'O12N Optical Laser': '3320', // line aoe from eye 'O12N Optimized Sagittarius Arrow': '3323', // line aoe from Omega-M }, shareWarn: { 'O12N Solar Ray': '330F', // circular tankbuster }, soloWarn: { 'O12N Spotlight': '330A', // stack marker }, triggers: [ { id: 'O12N Discharger Knocked Off', type: 'Ability', netRegex: NetRegexes.ability({ id: '32F6' }), deathReason: (_data, matches) => { return { id: matches.targetId, name: matches.target, text: { en: 'Knocked off', de: 'Runtergefallen', fr: 'Renversé(e)', ja: 'ノックバック', cn: '击退坠落', ko: '넉백', }, }; }, }, ], }; export default triggerSet;
quisquous/cactbot
ui/oopsyraidsy/data/04-sb/raid/o12n.ts
TypeScript
apache-2.0
1,673
/* * Copyright DataStax, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.oss.driver.internal.core.type.codec.extras; import com.datastax.oss.driver.api.core.type.codec.MappingCodec; import com.datastax.oss.driver.api.core.type.codec.TypeCodec; import com.datastax.oss.driver.api.core.type.reflect.GenericType; import edu.umd.cs.findbugs.annotations.NonNull; import edu.umd.cs.findbugs.annotations.Nullable; import java.util.Collection; import java.util.Map; import java.util.Objects; import java.util.Optional; import net.jcip.annotations.Immutable; /** * A codec that wraps other codecs around {@link Optional} instances. * * @param <T> The wrapped Java type. */ @Immutable public class OptionalCodec<T> extends MappingCodec<T, Optional<T>> { public OptionalCodec(@NonNull TypeCodec<T> innerCodec) { super( Objects.requireNonNull(innerCodec, "innerCodec must not be null"), GenericType.optionalOf(innerCodec.getJavaType())); } @Override public boolean accepts(@NonNull Object value) { Objects.requireNonNull(value); if (value instanceof Optional) { Optional<?> optional = (Optional<?>) value; return optional.map(innerCodec::accepts).orElse(true); } return false; } @Nullable @Override protected Optional<T> innerToOuter(@Nullable T value) { return Optional.ofNullable(isAbsent(value) ? null : value); } @Nullable @Override protected T outerToInner(@Nullable Optional<T> value) { return value != null && value.isPresent() ? value.get() : null; } protected boolean isAbsent(@Nullable T value) { return value == null || (value instanceof Collection && ((Collection<?>) value).isEmpty()) || (value instanceof Map && ((Map<?, ?>) value).isEmpty()); } }
datastax/java-driver
core/src/main/java/com/datastax/oss/driver/internal/core/type/codec/extras/OptionalCodec.java
Java
apache-2.0
2,304
/* Copyright 2005-2006 The Apache Software Foundation or its licensors, as applicable Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /** * @author Gregory Shimansky, Petr Ivanov * @version $Revision: 1.3 $ */ /* * Created on 22.11.2004 */ package org.apache.harmony.vts.test.vm.jni.string_operations; import org.apache.harmony.vts.test.vm.jni.share.JNITest; /** * @author Gregory Shimansky * * Test for NewString function. */ public class NewStringTest extends JNITest { private native String nativeExecute(char []str); /** * Native code creates a string from UTF chars and returns it. * Test checks that the string is correct. * @see org.apache.harmony.vts.test.vm.jni.share.JNITest#execute() */ public boolean execute() throws Exception { char []chars = {'a', '1', '\u1212', 'W'}; String str1 = nativeExecute(chars); String str2 = new String(chars); return str2.equals(str1); } public static void main(String[] args){ System.exit(new NewStringTest().test()); } }
freeVM/freeVM
enhanced/buildtest/tests/vts/vm/src/test/vm/jni/string_operations/NewStringTest/NewStringTest.java
Java
apache-2.0
1,584
/* * Copyright 2016 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.springlets.web.autoconfigure; import static org.assertj.core.api.Assertions.assertThat; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.i18n.LocaleContextHolder; import org.springframework.validation.beanvalidation.LocalValidatorFactoryBean; import org.springframework.web.context.support.AnnotationConfigWebApplicationContext; import io.springlets.web.mvc.advice.JsonpAdvice; import io.springlets.web.mvc.advice.StringTrimmerAdvice; import io.springlets.web.mvc.advice.ValidatorAdvice; import io.springlets.web.mvc.config.SpringletsWebMvcConfiguration; import io.springlets.web.mvc.config.SpringletsWebMvcProperties; /** * Tests for {@link SpringletsWebMvcConfiguration} * * @author Enrique Ruiz at http://www.disid.com[DISID Corporation S.L.] * @author Juan Carlos García at http://www.disid.com[DISID Corporation S.L.] */ public class SpringletsWebMvcConfigurationTest { private AnnotationConfigWebApplicationContext context = new AnnotationConfigWebApplicationContext(); @Before public void setupContext() {} @After public void close() { LocaleContextHolder.resetLocaleContext(); if (this.context != null) { this.context.close(); } } /** * Check if "springlets.mvc.advices.enabled" is not defined the * StringTrimmerAdvice is registered and empty-as-null is true. * JsonpAdvice is registered with 'callback' as jsonp query parameter * * @throws Exception */ @Test public void defaultConfiguration() throws Exception { // Setup this.context.register(SpringletsWebMvcProperties.class); this.context.register(SpringletsWebMvcConfiguration.class); this.context.refresh(); // Exercise StringTrimmerAdvice advice = this.context.getBean(StringTrimmerAdvice.class); JsonpAdvice jsonpAdvice = this.context.getBean(JsonpAdvice.class); ValidatorAdvice validatorAdvice = this.context.getBean(ValidatorAdvice.class); // Verify assertThat(this.context.getBean(SpringletsWebMvcProperties.class)).isNotNull(); assertThat(advice).isNotNull(); assertThat(advice.isEmptyAsNull()).isEqualTo(true); assertThat(advice.getCharsToDelete()).isNull(); assertThat(jsonpAdvice).isNotNull(); assertThat(validatorAdvice).isNotNull(); } /** * Configure the {@link StringTrimmerAdvice}, the {@link JsonpAdvice} and the * {@link ValidatorAdvice} and check if they have the right settings. */ @Test public void configureAdvice() { // Setup this.context.register(DummyPropertiesConfiguration.class); this.context.register(SpringletsWebMvcConfiguration.class); this.context.refresh(); // Exercise StringTrimmerAdvice advice = this.context.getBean(StringTrimmerAdvice.class); JsonpAdvice jsonpAdvice = this.context.getBean(JsonpAdvice.class); ValidatorAdvice validatorAdvice = this.context.getBean(ValidatorAdvice.class); // Verify assertThat(this.context.getBean(SpringletsWebMvcProperties.class)).isNotNull(); assertThat(advice).isNotNull(); assertThat(advice.isEmptyAsNull()).isEqualTo(false); assertThat(advice.getCharsToDelete()).isEqualTo("abc"); assertThat(jsonpAdvice).isNotNull(); assertThat(validatorAdvice).isNotNull(); } /** * Test Configuration. */ @Configuration protected static class DummyPropertiesConfiguration { public boolean emptyAsNull = false; public String charsToDelete = "abc"; @Bean public SpringletsWebMvcProperties springletsWebMvcProperties() { SpringletsWebMvcProperties properties = new SpringletsWebMvcProperties(); properties.getAdvices().getTrimeditor().setCharsToDelete(this.charsToDelete); properties.getAdvices().getTrimeditor().setEmptyAsNull(this.emptyAsNull); return properties; } } }
DISID/springlets
springlets-framework/springlets-web/src/test/java/io/springlets/web/autoconfigure/SpringletsWebMvcConfigurationTest.java
Java
apache-2.0
4,572
import { async, ComponentFixture, TestBed } from '@angular/core/testing'; import { ListTrainingComponent } from './list-training.component'; describe('ListTrainingComponent', () => { let component: ListTrainingComponent; let fixture: ComponentFixture<ListTrainingComponent>; beforeEach(async(() => { TestBed.configureTestingModule({ declarations: [ ListTrainingComponent ] }) .compileComponents(); })); beforeEach(() => { fixture = TestBed.createComponent(ListTrainingComponent); component = fixture.componentInstance; fixture.detectChanges(); }); it('should be created', () => { expect(component).toBeTruthy(); }); });
Vi-dot/grobid-smecta
src/main/webapp/main/src/app/training/list-training/list-training.component.spec.ts
TypeScript
apache-2.0
675
/* * Copyright 1999-2012 Alibaba Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System.Collections.Generic; namespace Tup.Cobar4Net.Parser.Ast.Expression.Primary.Function.String { /// <author> /// <a href="mailto:shuo.qius@alibaba-inc.com">QIU Shuo</a> /// </author> public class Bin : FunctionExpression { public Bin(IList<IExpression> arguments) : base("BIN", arguments) { } public override FunctionExpression ConstructFunction(IList<IExpression> arguments) { return new Bin(arguments); } } }
tupunco/Tup.Cobar4Net
Tup.Cobar4Net/Parser/Ast/Expression/Primary/Function/String/Bin.cs
C#
apache-2.0
1,113
package cn.lonecloud.util; public class StringUtil { }
lonecloudStudy/QQMail
QQMail/src/main/java/cn/lonecloud/util/StringUtil.java
Java
apache-2.0
57
package com.borqs.server.market.utils.mybatis; import org.apache.ibatis.session.SqlSession; public interface SqlSessionHandler<T> { T handleSession(SqlSession session) throws Exception; }
wutongservice/virtualgoogdsman
Server/src/main/java/com/borqs/server/market/utils/mybatis/SqlSessionHandler.java
Java
apache-2.0
195
package com.ilad.pages; import org.openqa.selenium.By; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; import org.openqa.selenium.support.ui.ExpectedConditions; import org.openqa.selenium.support.ui.WebDriverWait; public class ElementOperations { WebDriver driver; WebDriverWait wait; public ElementOperations(WebDriver driver_, WebDriverWait wait_) { driver = driver_; wait = wait_; } public void clickOnElement(By locator) { wait.until(ExpectedConditions.elementToBeClickable(locator)).click(); } public void sendKeys(By locator, String keyToSend) { wait.until(ExpectedConditions.presenceOfElementLocated(locator)).sendKeys(keyToSend); } public WebElement findWebElement(By locator) { return wait.until(ExpectedConditions.presenceOfElementLocated(locator)); } /** * Find element in parent element by locators * * @param parentLocator * element that the child locator works on. * @param childLocator * child locator, with implicit wait * @return the child element * @author chelgo */ public WebElement findWebElementInParentElement(By parentLocator, By childLocator) { return findWebElement(parentLocator).findElement(childLocator); } }
chelgo/Infinity-automization
topq/src/test/java/com/ilad/pages/ElementOperations.java
Java
apache-2.0
1,241
from tornado.httpserver import HTTPRequest from ..functions import parse_request def test_get_stats(): """ Test parse_request for 'Get information on the stats returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/_stats', }, 'parsed_request': { 'call': '_stats', 'cluster': True, 'indices': [], 'scripted': False }, } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_get_search(): """ Test parse_request for 'Search by GET' returns correctly parsed request. """ request = { # Search by GET 'args': { 'method': 'GET', 'uri': '/twitter/tweet/_search?q=user:kimchy', }, 'parsed_request': { 'call': '_search', 'cluster': False, 'indices': ['twitter'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_create_by_put(): """ Test parse_request for 'Create by PUT' returns correctly parsed request. """ request = { 'args': { 'method': 'PUT', 'uri': '/twitter/tweet/1', 'body': '''{ "user" : "kimchy", "post_date" : "2009-11-15T14:12:12", "message" : "trying out Elasticsearch" }''' }, 'parsed_request': { 'call': '_document', 'cluster': False, 'indices': ['twitter'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_search_by_multi_index_get(): """ Test parse_request for 'Search by GET, MULTI INDEX' returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/twitter,index1,index2/tweet/_search?q=user:kimchy' }, 'parsed_request': { 'call': '_search', 'cluster': False, 'indices': ['twitter', 'index1', 'index2'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_delete_index(): """ Test parse_request for 'Delete the articles index' returns correctly parsed request. """ request = { 'args': { 'method': 'DELETE', 'uri': '/articles' }, 'parsed_request': { 'call': '_document', 'cluster': False, 'indices': ['articles'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_create_document_with_post(): """ Test parse_request for 'Create a new article document with POST' returns correctly parsed request. """ request = { 'args': { 'method': 'POST', 'uri': '/articles/article', 'body': '{"title" : "Two", "tags" : ["foo", "bar"]}' }, 'parsed_request': { 'call': '_document', 'cluster': False, 'indices': ['articles'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_update_document_with_script(): """ Test parse_request for 'Update via POST with script' returns correctly parsed request. """ request = { 'args': { 'method': 'POST', 'uri': '/test/type1/1/_update', # Note that in the python heredoc syntax # the backslashes have to be escaped 'body': '''{ "script" : "ctx._source.text = \\"some text\\"" }''' }, 'parsed_request': { 'call': '_update', 'cluster': False, 'indices': ['test'], 'scripted': True } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_update_document_without_script(): """ Test parse_request for 'Update via POST without script returns correctly parsed request. """ request = { 'args': { 'method': 'POST', 'uri': '/test/type1/1/_update', 'body': '''{ "doc" : { "name" : "new_name" } }''' }, 'parsed_request': { 'call': '_update', 'cluster': False, 'indices': ['test'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_query_by_post(): """ Test parse_request for 'Query via POST without script fields returns correctly parsed request. """ request = { 'args': { 'method': 'POST', 'uri': '/articles/_search?pretty=true', 'body': ''' { "query" : { "query_string" : {"query" : "T*"} }, "facets" : { "tags" : { "terms" : {"field" : "tags"} } } } ''' }, 'parsed_request': { 'call': '_search', 'cluster': False, 'indices': ['articles'], 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_query_by_post_with_script_fields(): """ Query via POST with script fields returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/articles/_search?pretty=true', 'body': ''' { "query" : { "query_string" : {"query" : "T*"} }, "script_fields" : { "test1" : { "script" : "doc['my_field_name'].value * 2" }, "test2" : { "script" : "doc['my_field_name'].value * factor", "params" : { "factor" : 2.0 } } } } ''' }, 'parsed_request': { 'call': '_search', 'cluster': False, 'indices': ['articles'], 'scripted': True } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_all_settings(): """ Test parse_request for '_all _settings GET' returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/_all/_settings', }, 'parsed_request': { 'indices': ['_all'], 'cluster': False, 'call': '_settings', 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request'] def test_home(): """ Test parse_request for '_all _settings GET' returns correctly parsed request. """ request = { 'args': { 'method': 'GET', 'uri': '/', }, 'parsed_request': { 'indices': [], 'cluster': True, 'call': '_home', 'scripted': False } } tornado_http_request = HTTPRequest(**request['args']) assert parse_request(tornado_http_request) == request['parsed_request']
HatPull/tornado-elasticsearch-proxy
es_proxy/tests/test_parse_request.py
Python
apache-2.0
8,311
/** * OLAT - Online Learning and Training<br> * http://www.olat.org * <p> * Licensed under the Apache License, Version 2.0 (the "License"); <br> * you may not use this file except in compliance with the License.<br> * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing,<br> * software distributed under the License is distributed on an "AS IS" BASIS, <br> * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. <br> * See the License for the specific language governing permissions and <br> * limitations under the License. * <p> * Copyright (c) since 2004 at Multimedia- & E-Learning Services (MELS),<br> * University of Zurich, Switzerland. * <hr> * <a href="http://www.openolat.org"> * OpenOLAT - Online Learning and Training</a><br> * This file has been modified by the OpenOLAT community. Changes are licensed * under the Apache 2.0 license as the original file. * <p> */ package org.olat.restapi; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.io.IOException; import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; import java.util.HashSet; import java.util.List; import java.util.Set; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.UriBuilder; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.util.EntityUtils; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.type.TypeReference; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.olat.basesecurity.BaseSecurity; import org.olat.basesecurity.BaseSecurityManager; import org.olat.basesecurity.Constants; import org.olat.basesecurity.SecurityGroup; import org.olat.collaboration.CollaborationTools; import org.olat.collaboration.CollaborationToolsFactory; import org.olat.core.commons.persistence.DBFactory; import org.olat.core.id.Identity; import org.olat.core.id.OLATResourceable; import org.olat.core.logging.OLog; import org.olat.core.logging.Tracing; import org.olat.core.util.resource.OresHelper; import org.olat.group.BusinessGroup; import org.olat.group.BusinessGroupService; import org.olat.modules.fo.Forum; import org.olat.modules.fo.ForumManager; import org.olat.modules.fo.Message; import org.olat.modules.fo.restapi.MessageVO; import org.olat.properties.NarrowedPropertyManager; import org.olat.properties.Property; import org.olat.repository.RepositoryEntry; import org.olat.repository.RepositoryManager; import org.olat.resource.OLATResource; import org.olat.resource.OLATResourceManager; import org.olat.restapi.support.vo.GroupConfigurationVO; import org.olat.restapi.support.vo.GroupInfoVO; import org.olat.restapi.support.vo.GroupVO; import org.olat.test.JunitTestHelper; import org.olat.test.OlatJerseyTestCase; import org.olat.user.restapi.UserVO; import org.springframework.beans.factory.annotation.Autowired; /** * * Description:<br> * Test the learning group web service * * <P> * Initial Date: 7 mai 2010 <br> * @author srosse, stephane.rosse@frentix.com */ public class GroupMgmtTest extends OlatJerseyTestCase { private static final OLog log = Tracing.createLoggerFor(GroupMgmtTest.class); private Identity owner1, owner2, owner3, part1, part2, part3; private BusinessGroup g1, g2; private BusinessGroup g3, g4; private OLATResource course; private Message m1, m2, m3, m4, m5; private RestConnection conn; @Autowired private BusinessGroupService businessGroupService; /** * Set up a course with learn group and group area * @see org.olat.test.OlatJerseyTestCase#setUp() */ @Before @Override public void setUp() throws Exception { super.setUp(); conn = new RestConnection(); //create a course with learn group owner1 = JunitTestHelper.createAndPersistIdentityAsUser("rest-one"); owner2 = JunitTestHelper.createAndPersistIdentityAsUser("rest-two"); owner3 = JunitTestHelper.createAndPersistIdentityAsUser("rest-three"); part1 = JunitTestHelper.createAndPersistIdentityAsUser("rest-four"); part2 = JunitTestHelper.createAndPersistIdentityAsUser("rest-five"); part3 = JunitTestHelper.createAndPersistIdentityAsUser("rest-six"); OLATResourceManager rm = OLATResourceManager.getInstance(); // create course and persist as OLATResourceImpl OLATResourceable resourceable = OresHelper.createOLATResourceableInstance("junitcourse",System.currentTimeMillis()); RepositoryEntry re = RepositoryManager.getInstance().createRepositoryEntryInstance("administrator"); re.setCanDownload(false); re.setCanLaunch(true); re.setDisplayname("rest-re"); re.setResourcename("-"); re.setAccess(0);// Access for nobody re.setOwnerGroup(null); // create security group BaseSecurity securityManager = BaseSecurityManager.getInstance(); SecurityGroup newGroup = securityManager.createAndPersistSecurityGroup(); // member of this group may modify member's membership securityManager.createAndPersistPolicy(newGroup, Constants.PERMISSION_ACCESS, newGroup); // members of this group are always authors also securityManager.createAndPersistPolicy(newGroup, Constants.PERMISSION_HASROLE, Constants.ORESOURCE_AUTHOR); securityManager.addIdentityToSecurityGroup(owner1, newGroup); re.setOwnerGroup(newGroup); course = rm.createOLATResourceInstance(resourceable); DBFactory.getInstance().saveObject(course); DBFactory.getInstance().intermediateCommit(); OLATResource ores = OLATResourceManager.getInstance().findOrPersistResourceable(resourceable); re.setOlatResource(ores); RepositoryManager.getInstance().saveRepositoryEntry(re); DBFactory.getInstance().intermediateCommit(); //create learn group BaseSecurity secm = BaseSecurityManager.getInstance(); // 1) context one: learning groups RepositoryEntry c1 = JunitTestHelper.createAndPersistRepositoryEntry(); // create groups without waiting list g1 = businessGroupService.createBusinessGroup(null, "rest-g1", null, 0, 10, false, false, c1); g2 = businessGroupService.createBusinessGroup(null, "rest-g2", null, 0, 10, false, false, c1); //permission to see owners and participants businessGroupService.updateDisplayMembers(g1, false, false, false, false, false, false, false); businessGroupService.updateDisplayMembers(g2, true, true, false, false, false, false, false); // members g1 secm.addIdentityToSecurityGroup(owner1, g1.getOwnerGroup()); secm.addIdentityToSecurityGroup(owner2, g1.getOwnerGroup()); secm.addIdentityToSecurityGroup(part1, g1.getPartipiciantGroup()); secm.addIdentityToSecurityGroup(part2, g1.getPartipiciantGroup()); // members g2 secm.addIdentityToSecurityGroup(owner1, g2.getOwnerGroup()); secm.addIdentityToSecurityGroup(part1, g2.getPartipiciantGroup()); // 2) context two: right groups RepositoryEntry c2 = JunitTestHelper.createAndPersistRepositoryEntry(); // groups g3 = businessGroupService.createBusinessGroup(null, "rest-g3", null, -1, -1, false, false, c2); g4 = businessGroupService.createBusinessGroup(null, "rest-g4", null, -1, -1, false, false, c2); // members secm.addIdentityToSecurityGroup(owner1, g3.getPartipiciantGroup()); secm.addIdentityToSecurityGroup(owner2, g4.getPartipiciantGroup()); DBFactory.getInstance().closeSession(); // simulate user clicks //3) collaboration tools CollaborationTools collabTools1 = CollaborationToolsFactory.getInstance().getOrCreateCollaborationTools(g1); collabTools1.setToolEnabled(CollaborationTools.TOOL_FORUM, true); collabTools1.setToolEnabled(CollaborationTools.TOOL_WIKI, true); collabTools1.saveNews("<p>Hello world</p>"); try { collabTools1.createForumController(null, null, true, false, null); } catch (Exception e) { //will fail but generate the forum key } CollaborationTools collabTools2 = CollaborationToolsFactory.getInstance().getOrCreateCollaborationTools(g2); collabTools2.setToolEnabled(CollaborationTools.TOOL_FORUM, true); DBFactory.getInstance().closeSession(); // simulate user clicks //4) fill forum for g1 NarrowedPropertyManager npm = NarrowedPropertyManager.getInstance(g1); Property forumKeyProperty = npm.findProperty(null, null, CollaborationTools.PROP_CAT_BG_COLLABTOOLS, CollaborationTools.KEY_FORUM); ForumManager fm = ForumManager.getInstance(); Forum forum = fm.loadForum(forumKeyProperty.getLongValue()); m1 = fm.createMessage(); m1.setTitle("Thread-1"); m1.setBody("Body of Thread-1"); fm.addTopMessage(owner1, forum, m1); m2 = fm.createMessage(); m2.setTitle("Thread-2"); m2.setBody("Body of Thread-2"); fm.addTopMessage(owner2, forum, m2); DBFactory.getInstance().intermediateCommit(); m3 = fm.createMessage(); m3.setTitle("Message-1.1"); m3.setBody("Body of Message-1.1"); fm.replyToMessage(m3, owner3, m1); m4 = fm.createMessage(); m4.setTitle("Message-1.1.1"); m4.setBody("Body of Message-1.1.1"); fm.replyToMessage(m4, part1, m3); m5 = fm.createMessage(); m5.setTitle("Message-1.2"); m5.setBody("Body of Message-1.2"); fm.replyToMessage(m5, part2, m1); DBFactory.getInstance().intermediateCommit(); } @After public void tearDown() throws Exception { try { if(conn != null) { conn.shutdown(); } } catch (Exception e) { log.error("Exception in tearDown(): " + e); e.printStackTrace(); throw e; } } @Test public void testGetGroupsAdmin() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("groups").build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); InputStream body = response.getEntity().getContent(); List<GroupVO> groups = parseGroupArray(body); assertNotNull(groups); assertTrue(groups.size() >= 4);//g1, g2, g3 and g4 + from olat Set<Long> keys = new HashSet<Long>(); for(GroupVO vo:groups) { keys.add(vo.getKey()); } assertTrue(keys.contains(g1.getKey())); assertTrue(keys.contains(g2.getKey())); assertTrue(keys.contains(g3.getKey())); assertTrue(keys.contains(g4.getKey())); } @Test public void testGetGroups() throws IOException, URISyntaxException { assertTrue(conn.login("rest-four", "A6B7C8")); URI request = UriBuilder.fromUri(getContextURI()).path("groups").build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); InputStream body = response.getEntity().getContent(); List<GroupVO> groups = parseGroupArray(body); assertNotNull(groups); assertTrue(groups.size() >= 2);//g1, g2, g3 and g4 + from olat Set<Long> keys = new HashSet<Long>(); for(GroupVO vo:groups) { keys.add(vo.getKey()); } assertTrue(keys.contains(g1.getKey())); assertTrue(keys.contains(g2.getKey())); assertFalse(keys.contains(g3.getKey())); assertFalse(keys.contains(g4.getKey())); } @Test public void testGetGroupAdmin() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("groups").path(g1.getKey().toString()).build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); GroupVO vo = conn.parse(response, GroupVO.class); assertNotNull(vo); assertEquals(vo.getKey(), g1.getKey()); } @Test public void testGetGroupInfos() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/infos").build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); GroupInfoVO vo = conn.parse(response, GroupInfoVO.class); assertNotNull(vo); assertEquals(Boolean.TRUE, vo.getHasWiki()); assertEquals("<p>Hello world</p>", vo.getNews()); assertNotNull(vo.getForumKey()); } //the web service generate the forum key @Test public void testGetGroupInfos2() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g2.getKey() + "/infos").build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); GroupInfoVO vo = conn.parse(response, GroupInfoVO.class); assertNotNull(vo); assertEquals(Boolean.FALSE, vo.getHasWiki()); assertNull(vo.getNews()); assertNotNull(vo.getForumKey()); } @Test public void testGetThreads() throws IOException, URISyntaxException { assertTrue(conn.login("rest-one", "A6B7C8")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/forum/threads").build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); InputStream body = response.getEntity().getContent(); List<MessageVO> messages = parseMessageArray(body); assertNotNull(messages); assertEquals(2, messages.size()); } @Test public void testGetMessages() throws IOException, URISyntaxException { assertTrue(conn.login("rest-one", "A6B7C8")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/forum/posts/" + m1.getKey()).build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); InputStream body = response.getEntity().getContent(); List<MessageVO> messages = parseMessageArray(body); assertNotNull(messages); assertEquals(4, messages.size()); } @Test public void testUpdateCourseGroup() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); GroupVO vo = new GroupVO(); vo.setKey(g1.getKey()); vo.setName("rest-g1-mod"); vo.setDescription("rest-g1 description"); vo.setMinParticipants(g1.getMinParticipants()); vo.setMaxParticipants(g1.getMaxParticipants()); vo.setType(g1.getType()); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey()).build(); HttpPost method = conn.createPost(request, MediaType.APPLICATION_JSON); conn.addJsonEntity(method, vo); HttpResponse response = conn.execute(method); assertTrue(response.getStatusLine().getStatusCode() == 200 || response.getStatusLine().getStatusCode() == 201); EntityUtils.consume(response.getEntity()); BusinessGroup bg = businessGroupService.loadBusinessGroup(g1.getKey()); assertNotNull(bg); assertEquals(bg.getKey(), vo.getKey()); assertEquals(bg.getName(), "rest-g1-mod"); assertEquals(bg.getDescription(), "rest-g1 description"); } @Test public void testCreateCourseGroup() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); GroupVO vo = new GroupVO(); vo.setName("rest-g5-new"); vo.setDescription("rest-g5 description"); vo.setType("BuddyGroup"); URI request = UriBuilder.fromUri(getContextURI()).path("groups").build(); HttpPut method = conn.createPut(request, MediaType.APPLICATION_JSON, true); conn.addJsonEntity(method, vo); HttpResponse response = conn.execute(method); assertTrue(response.getStatusLine().getStatusCode() == 200 || response.getStatusLine().getStatusCode() == 201); GroupVO newGroupVo = conn.parse(response, GroupVO.class); assertNotNull(newGroupVo); BusinessGroup bg = businessGroupService.loadBusinessGroup(newGroupVo.getKey()); assertNotNull(bg); assertEquals(bg.getKey(), newGroupVo.getKey()); assertEquals(bg.getName(), "rest-g5-new"); assertEquals(bg.getDescription(), "rest-g5 description"); } @Test public void testCreateCourseGroupWithConfiguration() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); //create the group GroupVO vo = new GroupVO(); vo.setName("rest-g6-new"); vo.setDescription("rest-g6 description"); vo.setType("BuddyGroup"); URI request = UriBuilder.fromUri(getContextURI()).path("groups").build(); HttpPut method = conn.createPut(request, MediaType.APPLICATION_JSON, true); conn.addJsonEntity(method, vo); HttpResponse response = conn.execute(method); assertTrue(response.getStatusLine().getStatusCode() == 200 || response.getStatusLine().getStatusCode() == 201); GroupVO newGroupVo = conn.parse(response, GroupVO.class); assertNotNull(newGroupVo); //update the configuration GroupConfigurationVO configVo = new GroupConfigurationVO(); configVo.setTools(new String[]{ "hasFolder", "hasNews" }); configVo.setOwnersVisible(Boolean.TRUE); configVo.setParticipantsVisible(Boolean.FALSE); URI configRequest = UriBuilder.fromUri(getContextURI()).path("groups").path(newGroupVo.getKey().toString()).path("configuration").build(); HttpPost configMethod = conn.createPost(configRequest, MediaType.APPLICATION_JSON); conn.addJsonEntity(configMethod, configVo); HttpResponse configResponse = conn.execute(configMethod); assertTrue(configResponse.getStatusLine().getStatusCode() == 200 || configResponse.getStatusLine().getStatusCode() == 201); EntityUtils.consume(configResponse.getEntity()); //check group BusinessGroup bg = businessGroupService.loadBusinessGroup(newGroupVo.getKey()); assertNotNull(bg); assertEquals(bg.getKey(), newGroupVo.getKey()); assertEquals(bg.getName(), "rest-g6-new"); assertEquals(bg.getDescription(), "rest-g6 description"); //check collaboration tools configuration CollaborationTools tools = CollaborationToolsFactory.getInstance().getCollaborationToolsIfExists(bg); assertNotNull(tools); assertTrue(tools.isToolEnabled(CollaborationTools.TOOL_FOLDER)); assertTrue(tools.isToolEnabled(CollaborationTools.TOOL_NEWS)); assertFalse(tools.isToolEnabled(CollaborationTools.TOOL_CALENDAR)); assertFalse(tools.isToolEnabled(CollaborationTools.TOOL_CHAT)); assertFalse(tools.isToolEnabled(CollaborationTools.TOOL_CONTACT)); assertFalse(tools.isToolEnabled(CollaborationTools.TOOL_FORUM)); assertFalse(tools.isToolEnabled(CollaborationTools.TOOL_PORTFOLIO)); assertFalse(tools.isToolEnabled(CollaborationTools.TOOL_WIKI)); //check display members assertTrue(bg.isOwnersVisibleIntern()); assertFalse(bg.isParticipantsVisibleIntern()); assertFalse(bg.isWaitingListVisibleIntern()); } @Test public void testDeleteCourseGroup() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey()).build(); HttpDelete method = conn.createDelete(request, MediaType.APPLICATION_JSON); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); BusinessGroup bg = businessGroupService.loadBusinessGroup(g1.getKey()); assertNull(bg); } @Test public void testGetParticipantsAdmin() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/participants").build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); InputStream body = response.getEntity().getContent(); List<UserVO> participants = parseUserArray(body); assertNotNull(participants); assertEquals(participants.size(), 2); Long idKey1 = null; Long idKey2 = null; for(UserVO participant:participants) { if(participant.getKey().equals(part1.getKey())) { idKey1 = part1.getKey(); } else if(participant.getKey().equals(part2.getKey())) { idKey2 = part2.getKey(); } } assertNotNull(idKey1); assertNotNull(idKey2); } @Test public void testGetParticipants() throws IOException, URISyntaxException { assertTrue(conn.login("rest-four", "A6B7C8")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/participants").build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); //g1 not authorized assertEquals(401, response.getStatusLine().getStatusCode()); } @Test public void testGetOwnersAdmin() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/owners").build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); InputStream body = response.getEntity().getContent(); List<UserVO> owners = parseUserArray(body); assertNotNull(owners); assertEquals(owners.size(), 2); Long idKey1 = null; Long idKey2 = null; for(UserVO participant:owners) { if(participant.getKey().equals(owner1.getKey())) { idKey1 = owner1.getKey(); } else if(participant.getKey().equals(owner2.getKey())) { idKey2 = owner2.getKey(); } } assertNotNull(idKey1); assertNotNull(idKey2); } @Test public void testGetOwners() throws IOException, URISyntaxException { assertTrue(conn.login("rest-four", "A6B7C8")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/owners").build(); HttpGet method = conn.createGet(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); //not authorized assertEquals(401, response.getStatusLine().getStatusCode()); } @Test public void testAddParticipant() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/participants/" + part3.getKey()).build(); HttpPut method = conn.createPut(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertTrue(response.getStatusLine().getStatusCode() == 200 || response.getStatusLine().getStatusCode() == 201); BaseSecurity secm = BaseSecurityManager.getInstance(); List<Identity> participants = secm.getIdentitiesOfSecurityGroup(g1.getPartipiciantGroup()); boolean found = false; for(Identity participant:participants) { if(participant.getKey().equals(part3.getKey())) { found = true; } } assertTrue(found); } @Test public void testRemoveParticipant() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/participants/" + part2.getKey()).build(); HttpDelete method = conn.createDelete(request, MediaType.APPLICATION_JSON); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); BaseSecurity secm = BaseSecurityManager.getInstance(); List<Identity> participants = secm.getIdentitiesOfSecurityGroup(g1.getPartipiciantGroup()); boolean found = false; for(Identity participant:participants) { if(participant.getKey().equals(part2.getKey())) { found = true; } } assertFalse(found); } @Test public void testAddTutor() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/owners/" + owner3.getKey()).build(); HttpPut method = conn.createPut(request, MediaType.APPLICATION_JSON, true); HttpResponse response = conn.execute(method); assertTrue(response.getStatusLine().getStatusCode() == 200 || response.getStatusLine().getStatusCode() == 201); BaseSecurity secm = BaseSecurityManager.getInstance(); List<Identity> owners = secm.getIdentitiesOfSecurityGroup(g1.getOwnerGroup()); boolean found = false; for(Identity owner:owners) { if(owner.getKey().equals(owner3.getKey())) { found = true; } } assertTrue(found); } @Test public void testRemoveTutor() throws IOException, URISyntaxException { assertTrue(conn.login("administrator", "openolat")); URI request = UriBuilder.fromUri(getContextURI()).path("/groups/" + g1.getKey() + "/owners/" + owner2.getKey()).build(); HttpDelete method = conn.createDelete(request, MediaType.APPLICATION_JSON); HttpResponse response = conn.execute(method); assertEquals(200, response.getStatusLine().getStatusCode()); BaseSecurity secm = BaseSecurityManager.getInstance(); List<Identity> owners = secm.getIdentitiesOfSecurityGroup(g1.getOwnerGroup()); boolean found = false; for(Identity owner:owners) { if(owner.getKey().equals(owner2.getKey())) { found = true; } } assertFalse(found); } protected List<UserVO> parseUserArray(InputStream body) { try { ObjectMapper mapper = new ObjectMapper(jsonFactory); return mapper.readValue(body, new TypeReference<List<UserVO>>(){/* */}); } catch (Exception e) { e.printStackTrace(); return null; } } protected List<GroupVO> parseGroupArray(InputStream body) { try { ObjectMapper mapper = new ObjectMapper(jsonFactory); return mapper.readValue(body, new TypeReference<List<GroupVO>>(){/* */}); } catch (Exception e) { e.printStackTrace(); return null; } } protected List<MessageVO> parseMessageArray(InputStream body) { try { ObjectMapper mapper = new ObjectMapper(jsonFactory); return mapper.readValue(body, new TypeReference<List<MessageVO>>(){/* */}); } catch (Exception e) { e.printStackTrace(); return null; } } }
stevenhva/InfoLearn_OpenOLAT
src/test/java/org/olat/restapi/GroupMgmtTest.java
Java
apache-2.0
26,816
package com.example.tests; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Properties; import org.testng.annotations.AfterTest; import org.testng.annotations.BeforeTest; import org.testng.annotations.DataProvider; import com.example.fw.ApplicationManager; import static com.example.tests.GroupDataGenerator.generateRandomGroups; import static com.example.tests.GroupDataGenerator.loadGroupsFromXMLFile; import static com.example.tests.ContactDataGenerator.generateRandomContacts; public class BaseForTests { protected static ApplicationManager app; /*we need static app in order to run several classes in one test in testsuite "èíà÷å, îí ïðîñòî îñòà¸òñÿ â òîì îáúåêòå òåñòîâîãî êëàññà, êîòîðûé áûë ñîçäàí äëÿ âûïîëíåíèÿ ïåðâîãî òåñòà äëÿ âòîðîãî òåñòîâîãî êëàññà ñîçäà¸òñÿ íîâûé îáúåêò (äðóãîãî êëàññà) -- è â í¸ì ññûëêà íà ApplicationManager óæå íèêåì íå èíèöèàëèçèðóåòñÿ"*/ private int checkFrequency; private int checkCounter; @BeforeTest public void setUp() throws Exception { String configFile = System.getProperty("configFile","application.properties"); // required by user, default Properties properties = new Properties(); properties.load(new FileReader(new File(configFile))); app = new ApplicationManager(properties); checkCounter = 0; checkFrequency = Integer.parseInt(properties.getProperty("check.frequency","0")); } protected boolean wantToCheck(){ checkCounter++; if (checkCounter > checkFrequency) {checkCounter = 0; return true; }else{ return false; } } @AfterTest public void tearDown() throws Exception { app.stop(); } @DataProvider public Iterator<Object[]> groupsFromFile() throws IOException { return wrapGroupsForDataProvider(loadGroupsFromXMLFile(new File("groups.xml"))).iterator(); } @DataProvider public Iterator<Object[]> randomValidGroupGenerator(){ return wrapGroupsForDataProvider(generateRandomGroups(1)).iterator(); } @DataProvider public Iterator<Object[]> randomValidContactGenerator(){ return wrapContactsForDataProvider(generateRandomContacts(1)).iterator(); } public static List<Object[]> wrapGroupsForDataProvider(List<GroupData> groups) { List<Object[]> list = new ArrayList<Object[]>();//Object[] ïðîèçâîëüíûé ìàññèâ îáúåêòîâ - äâóìåðíûé. áóäåò ïîòîì ïåðåäàâàòüñÿ, êàê íàáîð //ïàðàìåòðîâ (ìîãóò èìåòü ïðîèçâîëüíûå òèïû).  íàøåì ñëó÷àå - ñïèñîê íàáîðîâ èç îäíîãî ýëåìåíòà //iterator äîëæåí ñãåíåðèðîâàòü ñïèñîê íàáîðîâ èç ïðîèçâîëüíûõ îáúåêòîâ â êîëè÷åñòâå, íåîáõîäèìîì òåñòîâîìó ìåòîäó for (GroupData group: groups) { list.add(new Object[]{group}); } return list; } public static List<Object[]> wrapContactsForDataProvider(List<ContactData> contacts) { List<Object[]> list = new ArrayList<Object[]>(); for (ContactData contact: contacts) { list.add(new Object[]{contact}); } return list; } }
Jineta/dev_java_test
addressbook/src/com/example/tests/BaseForTests.java
Java
apache-2.0
2,971
package com.medievallords.carbyne.duels.duel; import com.medievallords.carbyne.Carbyne; import com.medievallords.carbyne.duels.arena.Arena; import com.medievallords.carbyne.duels.duel.request.DuelRequest; import com.medievallords.carbyne.duels.duel.types.RegularDuel; import com.medievallords.carbyne.duels.duel.types.SquadDuel; import com.medievallords.carbyne.utils.LocationSerialization; import org.bukkit.Location; import org.bukkit.configuration.ConfigurationSection; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.UUID; import java.util.logging.Level; /** * Created by xwiena22 on 2017-03-14. * */ public class DuelManager { private Carbyne main = Carbyne.getInstance(); private List<Duel> duels = new ArrayList<>(); private List<Arena> arenas = new ArrayList<>(); public DuelManager() { loadArenas(); } public void loadArenas() { if (!arenas.isEmpty()) arenas.clear(); ConfigurationSection section = main.getArenasFileConfiguration().getConfigurationSection("Arenas"); if (section.getKeys(false).size() > 0) { main.getLogger().log(Level.INFO, "Preparing to load " + section.getKeys(false).size() + " arenas."); for (String id : section.getKeys(false)) { Location lobbyLocation = null; ArrayList<Location> spawnPointLocations = new ArrayList<>(); ArrayList<Location> pedastoolLocations = new ArrayList<>(); if (section.getString(id + ".LobbyLocation") != null && !section.getString(id + ".LobbyLocation").isEmpty()) lobbyLocation = LocationSerialization.deserializeLocation(section.getString(id + ".LobbyLocation")); for (String s : section.getStringList(id + ".SpawnPointLocations")) spawnPointLocations.add(LocationSerialization.deserializeLocation(s)); for (String s : section.getStringList(id + ".PedastoolLocations")) pedastoolLocations.add(LocationSerialization.deserializeLocation(s)); Arena arena = new Arena(id); arena.setLobbyLocation(lobbyLocation); arena.setSpawnPointLocations(spawnPointLocations.toArray(new Location[2])); arena.setPedastoolLocations(pedastoolLocations.toArray(new Location[2])); arenas.add(arena); } main.getLogger().log(Level.INFO, "Successfully loaded " + arenas.size() + " arenas."); } } public Duel getDuel(String arenaName) { Arena arena = null; for (Arena arenas : arenas) if (arenas.getArenaId().equalsIgnoreCase(arenaName)) arena = arenas; for (Duel duel : duels) if (duel.getArena().equals(arena)) return duel; return null; } public Duel getDuelFromUUID(UUID uuid) { for (Duel duel : duels) { if (duel instanceof RegularDuel) { RegularDuel regularDuel = (RegularDuel) duel; if (Arrays.asList(regularDuel.getParticipants()).contains(uuid)) return duel; } else if (duel instanceof SquadDuel) { SquadDuel squadDuel = (SquadDuel) duel; if (squadDuel.getSquadOne().getAllPlayers().contains(uuid) || squadDuel.getSquadTwo().getAllPlayers().contains(uuid)) return duel; } } return null; } public Arena getArena(String arenaId) { for (Arena arena : arenas) if (arena.getArenaId().equalsIgnoreCase(arenaId)) return arena; return null; } public Arena getArena(Location location) { for (Arena arena : arenas) if (arena.getLobbyLocation().equals(location) || Arrays.asList(arena.getSpawnPointLocations()).contains(location) || Arrays.asList(arena.getPedastoolLocations()).contains(location)) return arena; return null; } public List<Arena> getArenas() { return arenas; } public List<Duel> getDuels() { return duels; } public void cancelAll() { cancelDuels(); cancelRequests(); } public void cancelDuels() { for (Duel duel : duels) { if (duel == null) continue; duel.end(null); } } public void cancelRequests() { for (DuelRequest request : DuelRequest.requests) { if (request == null) continue; request.cancel(); } } }
YoungOG/CarbyneCore
src/com/medievallords/carbyne/duels/duel/DuelManager.java
Java
apache-2.0
4,655
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.processor; import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.camel.ContextTestSupport; import org.apache.camel.Exchange; import org.apache.camel.Processor; import org.apache.camel.builder.RouteBuilder; import org.apache.camel.component.mock.MockEndpoint; import org.apache.camel.support.ServiceSupport; import org.apache.camel.throttling.ThrottlingExceptionHalfOpenHandler; import org.apache.camel.throttling.ThrottlingExceptionRoutePolicy; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.awaitility.Awaitility.await; public class ThrottlingExceptionRoutePolicyHalfOpenHandlerSedaTest extends ContextTestSupport { private static Logger log = LoggerFactory.getLogger(ThrottlingExceptionRoutePolicyHalfOpenHandlerSedaTest.class); private String url = "seda:foo?concurrentConsumers=2"; private MockEndpoint result; @Before public void setUp() throws Exception { super.setUp(); this.setUseRouteBuilder(true); result = getMockEndpoint("mock:result"); context.getShutdownStrategy().setTimeout(1); } @Test public void testHalfOpenCircuit() throws Exception { result.expectedMessageCount(2); List<String> bodies = Arrays.asList("Message One", "Message Two"); result.expectedBodiesReceivedInAnyOrder(bodies); result.whenAnyExchangeReceived(new Processor() { @Override public void process(Exchange exchange) throws Exception { String msg = exchange.getIn().getBody(String.class); exchange.setException(new ThrottlingException(msg)); } }); // send two messages which will fail sendMessage("Message One"); sendMessage("Message Two"); final ServiceSupport consumer = (ServiceSupport) context.getRoute("foo").getConsumer(); // wait long enough to have the consumer suspended await().atMost(2, TimeUnit.SECONDS).until(consumer::isSuspended); // send more messages // but should get there (yet) // due to open circuit // SEDA will queue it up log.debug("sending message three"); sendMessage("Message Three"); assertMockEndpointsSatisfied(); result.reset(); result.expectedMessageCount(2); bodies = Arrays.asList("Message Three", "Message Four"); result.expectedBodiesReceivedInAnyOrder(bodies); // wait long enough to have the consumer resumed await().atMost(2, TimeUnit.SECONDS).until(consumer::isStarted); // send message // should get through log.debug("sending message four"); sendMessage("Message Four"); assertMockEndpointsSatisfied(); } @Override protected RouteBuilder createRouteBuilder() throws Exception { return new RouteBuilder() { @Override public void configure() throws Exception { int threshold = 2; long failureWindow = 30; long halfOpenAfter = 250; ThrottlingExceptionRoutePolicy policy = new ThrottlingExceptionRoutePolicy(threshold, failureWindow, halfOpenAfter, null); policy.setHalfOpenHandler(new AlwaysCloseHandler()); from(url).routeId("foo") .routePolicy(policy) .log("${body}") .to("log:foo?groupSize=10") .to("mock:result"); } }; } public class AlwaysCloseHandler implements ThrottlingExceptionHalfOpenHandler { @Override public boolean isReadyToBeClosed() { return true; } } protected void sendMessage(String bodyText) { try { template.sendBody(url, bodyText); } catch (Exception e) { log.debug("Error sending:" + e.getCause().getMessage()); } } }
kevinearls/camel
camel-core/src/test/java/org/apache/camel/processor/ThrottlingExceptionRoutePolicyHalfOpenHandlerSedaTest.java
Java
apache-2.0
4,945
/********************************************************************************** * $URL: https://source.sakaiproject.org/svn/osp/tags/sakai-10.1/common/tool-lib/src/java/org/theospi/portfolio/style/tool/AddStyleController.java $ * $Id: AddStyleController.java 105079 2012-02-24 23:08:11Z ottenhoff@longsight.com $ *********************************************************************************** * * Copyright (c) 2005, 2006, 2008 The Sakai Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ECL-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * **********************************************************************************/ package org.theospi.portfolio.style.tool; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import org.sakaiproject.component.cover.ComponentManager; import org.sakaiproject.content.api.ContentHostingService; import org.sakaiproject.content.api.FilePickerHelper; import org.sakaiproject.entity.api.EntityManager; import org.sakaiproject.entity.api.Reference; import org.sakaiproject.exception.IdUnusedException; import org.sakaiproject.exception.PermissionException; import org.sakaiproject.exception.TypeException; import org.sakaiproject.metaobj.shared.model.Id; import org.sakaiproject.metaobj.utils.mvc.intf.CustomCommandController; import org.sakaiproject.metaobj.utils.mvc.intf.FormController; import org.sakaiproject.metaobj.utils.mvc.intf.LoadObjectController; import org.sakaiproject.tool.api.SessionManager; import org.sakaiproject.tool.api.ToolSession; import org.sakaiproject.tool.cover.ToolManager; import org.springframework.validation.Errors; import org.springframework.web.servlet.ModelAndView; import org.theospi.portfolio.shared.model.Node; import org.theospi.portfolio.style.model.Style; public class AddStyleController extends AbstractStyleController implements CustomCommandController, FormController, LoadObjectController { public static final String STYLE_FILE = "osp.style.styleFile"; protected static final String STYLE_SESSION_TAG = "osp.style.AddStyleController.style"; private SessionManager sessionManager; private ContentHostingService contentHosting; private EntityManager entityManager; public Object formBackingObject(Map request, Map session, Map application) { Style style; if (request.get("style_id") != null && !request.get("style_id").equals("")) { Id id = getIdManager().getId((String)request.get("style_id")); style = getStyleManager().getStyle(id); } else { style = new Style(); style.setOwner(getAuthManager().getAgent()); style.setSiteId(ToolManager.getCurrentPlacement().getContext()); } return style; } public Object fillBackingObject(Object incomingModel, Map request, Map session, Map application) throws Exception { if (session.get(STYLE_SESSION_TAG) != null) { return session.remove(STYLE_SESSION_TAG); } else { return incomingModel; } } public ModelAndView handleRequest(Object requestModel, Map request, Map session, Map application, Errors errors) { Style style = (Style) requestModel; if (STYLE_FILE.equals(style.getFilePickerAction())) { session.put(STYLE_SESSION_TAG, style); //session.put(FilePickerHelper.FILE_PICKER_FROM_TEXT, request.get("filePickerFrom")); String filter = ""; List files = new ArrayList(); String id = ""; if (STYLE_FILE.equals(style.getFilePickerAction())) { filter = "org.sakaiproject.content.api.ContentResourceFilter.styleFile"; if (style.getStyleFile() != null) id = getContentHosting().resolveUuid(style.getStyleFile().getValue()); } if (id != null && !id.equals("")) { Reference ref; try { ref = getEntityManager().newReference(getContentHosting().getResource(id).getReference()); files.add(ref); session.put(FilePickerHelper.FILE_PICKER_ATTACHMENTS, files); } catch (PermissionException e) { logger.error("", e); } catch (IdUnusedException e) { logger.error("", e); } catch (TypeException e) { logger.error("", e); } } if (!filter.equals("")) session.put(FilePickerHelper.FILE_PICKER_RESOURCE_FILTER, ComponentManager.get(filter)); session.put(FilePickerHelper.FILE_PICKER_MAX_ATTACHMENTS, Integer.valueOf(1)); return new ModelAndView("pickStyleFiles"); } if (request.get("save") != null) { style.setSiteId(ToolManager.getCurrentPlacement().getContext()); save(style, errors); } return new ModelAndView("success"); } protected void save(Style style, Errors errors) { getStyleManager().mergeStyle(style); } public Map referenceData(Map request, Object command, Errors errors) { Map model = new HashMap(); model.put("STYLE_FILE", STYLE_FILE); Style style = (Style) command; ToolSession session = getSessionManager().getCurrentToolSession(); if (session.getAttribute(FilePickerHelper.FILE_PICKER_CANCEL) == null && session.getAttribute(FilePickerHelper.FILE_PICKER_ATTACHMENTS) != null) { // here is where we setup the id List refs = (List)session.getAttribute(FilePickerHelper.FILE_PICKER_ATTACHMENTS); Id nodeId = null; String nodeName = ""; if (refs.size() == 1) { Reference ref = (Reference)refs.get(0); Node node = getStyleManager().getNode(ref); nodeId = node.getId(); nodeName = node.getDisplayName(); } if (STYLE_FILE.equals(style.getFilePickerAction())) { style.setStyleFile(nodeId); style.setStyleFileName(nodeName); } } if (style.getStyleFile() != null) { Node styleFile = getStyleManager().getNode(style.getStyleFile()); model.put("styleFileName", styleFile.getDisplayName()); } session.removeAttribute(FilePickerHelper.FILE_PICKER_ATTACHMENTS); session.removeAttribute(FilePickerHelper.FILE_PICKER_CANCEL); return model; } public SessionManager getSessionManager() { return sessionManager; } public void setSessionManager(SessionManager sessionManager) { this.sessionManager = sessionManager; } public ContentHostingService getContentHosting() { return contentHosting; } public void setContentHosting(ContentHostingService contentHosting) { this.contentHosting = contentHosting; } public EntityManager getEntityManager() { return entityManager; } public void setEntityManager(EntityManager entityManager) { this.entityManager = entityManager; } }
harfalm/Sakai-10.1
osp/common/tool-lib/src/java/org/theospi/portfolio/style/tool/AddStyleController.java
Java
apache-2.0
7,587
package com.vxml.tag; import org.w3c.dom.Node; import com.vxml.core.browser.VxmlBrowser; public class ElseTag extends AbstractTag { public ElseTag(Node node) { super(node); } @Override public void startTag() { } @Override public void execute() { Boolean isIfCondition = (Boolean) VxmlBrowser.getContext().executeScript( "_vxmlExecutionContext.ifConditionLevel_" + ifConditionLevel); if (isIfCondition) { setSkipExecute(true); } else { setSkipExecute(false); } } }
catchme1412/vxml-player
com.vxml.browser/src/main/java/com/vxml/tag/ElseTag.java
Java
apache-2.0
583
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.ranger.rest; import java.io.File; import java.security.cert.X509Certificate; import java.util.ArrayList; import java.util.List; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.DELETE; import javax.ws.rs.Encoded; import javax.ws.rs.GET; import javax.ws.rs.POST; import javax.ws.rs.PUT; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Context; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.apache.ranger.admin.client.datatype.RESTResponse; import org.apache.ranger.biz.AssetMgr; import org.apache.ranger.biz.RangerBizUtil; import org.apache.ranger.common.PropertiesUtil; import org.apache.ranger.common.RESTErrorUtil; import org.apache.ranger.common.RangerSearchUtil; import org.apache.ranger.common.SearchCriteria; import org.apache.ranger.common.ServiceUtil; import org.apache.ranger.common.StringUtil; import org.apache.ranger.common.annotation.RangerAnnotationClassName; import org.apache.ranger.common.annotation.RangerAnnotationJSMgrName; import org.apache.ranger.db.RangerDaoManager; import org.apache.ranger.entity.XXServiceDef; import org.apache.ranger.plugin.model.RangerPolicy; import org.apache.ranger.plugin.model.RangerService; import org.apache.ranger.plugin.store.EmbeddedServiceDefsUtil; import org.apache.ranger.plugin.util.GrantRevokeRequest; import org.apache.ranger.plugin.util.SearchFilter; import org.apache.ranger.plugin.util.ServicePolicies; import org.apache.ranger.security.context.RangerAPIList; import org.apache.ranger.service.XAccessAuditService; import org.apache.ranger.service.XAssetService; import org.apache.ranger.service.XCredentialStoreService; import org.apache.ranger.service.XPolicyExportAuditService; import org.apache.ranger.service.XPolicyService; import org.apache.ranger.service.XResourceService; import org.apache.ranger.service.XTrxLogService; import org.apache.ranger.view.*; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Scope; import org.springframework.security.access.prepost.PreAuthorize; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; @Path("assets") @Component @Scope("request") @RangerAnnotationJSMgrName("AssetMgr") @Transactional(propagation = Propagation.REQUIRES_NEW) public class AssetREST { private static final Logger logger = Logger.getLogger(AssetREST.class); @Autowired RangerSearchUtil searchUtil; @Autowired AssetMgr assetMgr; @Autowired XAssetService xAssetService; @Autowired XResourceService xResourceService; @Autowired XPolicyService xPolicyService; @Autowired XCredentialStoreService xCredentialStoreService; @Autowired RESTErrorUtil restErrorUtil; @Autowired XPolicyExportAuditService xPolicyExportAudits; @Autowired XTrxLogService xTrxLogService; @Autowired RangerBizUtil msBizUtil; @Autowired XAccessAuditService xAccessAuditService; @Autowired ServiceUtil serviceUtil; @Autowired ServiceREST serviceREST; @Autowired RangerDaoManager daoManager; @GET @Path("/assets/{id}") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.GET_X_ASSET + "\")") public VXAsset getXAsset(@PathParam("id") Long id) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.getXAsset(" + id + ")"); } RangerService service = serviceREST.getService(id); VXAsset ret = serviceUtil.toVXAsset(service); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.getXAsset(" + id + "): " + ret); } return ret; } @POST @Path("/assets") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.CREATE_X_ASSET + "\")") public VXAsset createXAsset(VXAsset vXAsset) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.createXAsset(" + vXAsset + ")"); } RangerService service = serviceUtil.toRangerService(vXAsset); RangerService createdService = serviceREST.createService(service); VXAsset ret = serviceUtil.toVXAsset(createdService); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.createXAsset(" + vXAsset + "): " + ret); } return ret; } @PUT @Path("/assets/{id}") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.UPDATE_X_ASSET + "\")") public VXAsset updateXAsset(VXAsset vXAsset) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.updateXAsset(" + vXAsset + ")"); } RangerService service = serviceUtil.toRangerService(vXAsset); RangerService updatedService = serviceREST.updateService(service, null); VXAsset ret = serviceUtil.toVXAsset(updatedService); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.updateXAsset(" + vXAsset + "): " + ret); } return ret; } @DELETE @Path("/assets/{id}") @RangerAnnotationClassName(class_name = VXAsset.class) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.DELETE_X_ASSET + "\")") public void deleteXAsset(@PathParam("id") Long id, @Context HttpServletRequest request) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.deleteXAsset(" + id + ")"); } serviceREST.deleteService(id); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.deleteXAsset(" + id + ")"); } } @POST @Path("/assets/testConfig") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.TEST_CONFIG + "\")") public VXResponse configTest(VXAsset vXAsset) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.configTest(" + vXAsset + ")"); } RangerService service = serviceUtil.toRangerService(vXAsset); VXResponse ret = serviceREST.validateConfig(service); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.testConfig(" + vXAsset + "): " + ret); } return ret; } @GET @Path("/assets") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.SEARCH_X_ASSETS + "\")") public VXAssetList searchXAssets(@Context HttpServletRequest request) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.searchXAssets()"); } VXAssetList ret = new VXAssetList(); SearchFilter filter = searchUtil.getSearchFilterFromLegacyRequestForRepositorySearch(request, xAssetService.sortFields); List<RangerService> services = serviceREST.getServices(filter); if(services != null) { List<VXAsset> assets = new ArrayList<VXAsset>(); for(RangerService service : services) { VXAsset asset = serviceUtil.toVXAsset(service); if(asset != null) { assets.add(asset); } } ret.setVXAssets(assets); } if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.searchXAssets(): count=" + ret.getListSize()); } return ret; } @GET @Path("/assets/count") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.COUNT_X_ASSETS + "\")") public VXLong countXAssets(@Context HttpServletRequest request) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.countXAssets()"); } VXLong ret = new VXLong(); ret.setValue(searchXAssets(request).getListSize()); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.countXAssets(): " + ret); } return ret; } @GET @Path("/resources/{id}") @Produces({ "application/xml", "application/json" }) public VXResource getXResource(@PathParam("id") Long id) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.getXResource(" + id + ")"); } RangerPolicy policy = null; RangerService service = null; policy = serviceREST.getPolicy(id); if(policy != null) { service = serviceREST.getServiceByName(policy.getService()); } VXResource ret = serviceUtil.toVXResource(policy, service); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.getXResource(" + id + "): " + ret); } return ret; } @POST @Path("/resources") @Produces({ "application/xml", "application/json" }) public VXResource createXResource(VXResource vXResource) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.createXResource(" + vXResource + ")"); } RangerService service = serviceREST.getService(vXResource.getAssetId()); RangerPolicy policy = serviceUtil.toRangerPolicy(vXResource, service); RangerPolicy createdPolicy = serviceREST.createPolicy(policy, null); VXResource ret = serviceUtil.toVXResource(createdPolicy, service); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.createXResource(" + vXResource + "): " + ret); } return ret; } @PUT @Path("/resources/{id}") @Produces({ "application/xml", "application/json" }) public VXResource updateXResource(VXResource vXResource) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.updateXResource(" + vXResource + ")"); } RangerService service = serviceREST.getService(vXResource.getAssetId()); RangerPolicy policy = serviceUtil.toRangerPolicy(vXResource, service); RangerPolicy updatedPolicy = serviceREST.updatePolicy(policy); VXResource ret = serviceUtil.toVXResource(updatedPolicy, service); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.updateXResource(" + vXResource + "): " + ret); } return ret; } @DELETE @Path("/resources/{id}") @PreAuthorize("hasRole('ROLE_SYS_ADMIN')") @RangerAnnotationClassName(class_name = VXResource.class) public void deleteXResource(@PathParam("id") Long id, @Context HttpServletRequest request) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.deleteXResource(" + id + ")"); } serviceREST.deletePolicy(id); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.deleteXResource(" + id + ")"); } } @GET @Path("/resources") @Produces({ "application/xml", "application/json" }) public VXResourceList searchXResources(@Context HttpServletRequest request) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.searchXResources()"); } VXResourceList ret = new VXResourceList(); SearchFilter filter = searchUtil.getSearchFilterFromLegacyRequest(request, xResourceService.sortFields); List<RangerPolicy> policies = serviceREST.getPolicies(filter); if(policies != null) { List<VXResource> resources = new ArrayList<VXResource>(); for(RangerPolicy policy : policies) { RangerService service = serviceREST.getServiceByName(policy.getService()); VXResource resource = serviceUtil.toVXResource(policy, service); if(resource != null) { resources.add(resource); } } ret.setVXResources(resources); } if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.searchXResources(): count=" + ret.getResultSize()); } return ret; } @GET @Path("/resources/count") @Produces({ "application/xml", "application/json" }) public VXLong countXResources(@Context HttpServletRequest request) { if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.countXResources()"); } VXLong ret = new VXLong(); ret.setValue(searchXResources(request).getListSize()); if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.countXAssets(): " + ret); } return ret; } @GET @Path("/credstores/{id}") @Produces({ "application/xml", "application/json" }) public VXCredentialStore getXCredentialStore(@PathParam("id") Long id) { return assetMgr.getXCredentialStore(id); } @POST @Path("/credstores") @Produces({ "application/xml", "application/json" }) public VXCredentialStore createXCredentialStore( VXCredentialStore vXCredentialStore) { return assetMgr.createXCredentialStore(vXCredentialStore); } @PUT @Path("/credstores") @Produces({ "application/xml", "application/json" }) public VXCredentialStore updateXCredentialStore( VXCredentialStore vXCredentialStore) { return assetMgr.updateXCredentialStore(vXCredentialStore); } @DELETE @Path("/credstores/{id}") @PreAuthorize("hasRole('ROLE_SYS_ADMIN')") @RangerAnnotationClassName(class_name = VXCredentialStore.class) public void deleteXCredentialStore(@PathParam("id") Long id, @Context HttpServletRequest request) { boolean force = false; assetMgr.deleteXCredentialStore(id, force); } @GET @Path("/credstores") @Produces({ "application/xml", "application/json" }) public VXCredentialStoreList searchXCredentialStores( @Context HttpServletRequest request) { SearchCriteria searchCriteria = searchUtil.extractCommonCriterias( request, xCredentialStoreService.sortFields); return assetMgr.searchXCredentialStores(searchCriteria); } @GET @Path("/credstores/count") @Produces({ "application/xml", "application/json" }) public VXLong countXCredentialStores(@Context HttpServletRequest request) { SearchCriteria searchCriteria = searchUtil.extractCommonCriterias( request, xCredentialStoreService.sortFields); return assetMgr.getXCredentialStoreSearchCount(searchCriteria); } @GET @Path("/resource/{id}") public Response getXResourceFile(@Context HttpServletRequest request, @PathParam("id") Long id) { String fileType = searchUtil.extractString(request, new SearchCriteria(), "fileType", "File type", StringUtil.VALIDATION_TEXT); VXResource resource = getXResource(id); Response response=null; if(resource!=null && StringUtils.isNotEmpty(fileType)){ File file = null; file=assetMgr.getXResourceFile(resource, fileType); if(file!=null){ response=Response.ok(file, MediaType.APPLICATION_OCTET_STREAM).header("Content-Disposition","attachment;filename=" + file.getName()).build(); file=null; } } return response; } @GET @Path("/policyList/{repository}") @Encoded public String getResourceJSON(@Context HttpServletRequest request, @PathParam("repository") String repository) { String epoch = request.getParameter("epoch"); X509Certificate[] certchain = (X509Certificate[]) request.getAttribute("javax.servlet.request.X509Certificate"); String ipAddress = request.getHeader("X-FORWARDED-FOR"); boolean isSecure = request.isSecure(); String policyCount = request.getParameter("policyCount"); String agentId = request.getParameter("agentId"); Long lastKnowPolicyVersion = Long.valueOf(-1); if (ipAddress == null) { ipAddress = request.getRemoteAddr(); } boolean httpEnabled = PropertiesUtil.getBooleanProperty("ranger.service.http.enabled",true); ServicePolicies servicePolicies = null; try { servicePolicies = serviceREST.getServicePoliciesIfUpdated(repository, lastKnowPolicyVersion, 0L, agentId, "", "", false, request); } catch(Exception excp) { logger.error("failed to retrieve policies for repository " + repository, excp); } RangerService service = serviceUtil.getServiceByName(repository); List<RangerPolicy> policies = servicePolicies != null ? servicePolicies.getPolicies() : null; long policyUpdTime = (servicePolicies != null && servicePolicies.getPolicyUpdateTime() != null) ? servicePolicies.getPolicyUpdateTime().getTime() : 0l; VXAsset vAsset = serviceUtil.toVXAsset(service); List<VXResource> vResourceList = new ArrayList<VXResource>(); if(policies != null) { for(RangerPolicy policy : policies) { vResourceList.add(serviceUtil.toVXResource(policy, service)); } } String file = assetMgr.getLatestRepoPolicy(vAsset, vResourceList, policyUpdTime, certchain, httpEnabled, epoch, ipAddress, isSecure, policyCount, agentId); return file; } @GET @Path("/exportAudit") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.SEARCH_X_POLICY_EXPORT_AUDITS + "\")") public VXPolicyExportAuditList searchXPolicyExportAudits( @Context HttpServletRequest request) { SearchCriteria searchCriteria = searchUtil.extractCommonCriterias( request, xPolicyExportAudits.sortFields); searchUtil.extractString(request, searchCriteria, "agentId", "The XA agent id pulling the policies.", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "clientIP", "The XA agent ip pulling the policies.", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "repositoryName", "Repository name for which export was done.", StringUtil.VALIDATION_TEXT); searchUtil.extractInt(request, searchCriteria, "httpRetCode", "HTTP response code for exported policy."); searchUtil.extractDate(request, searchCriteria, "startDate", "Start Date", null); searchUtil.extractDate(request, searchCriteria, "endDate", "End Date", null); searchUtil.extractString(request, searchCriteria, "cluster", "Cluster Name", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "zoneName", "Zone Name", StringUtil.VALIDATION_TEXT); return assetMgr.searchXPolicyExportAudits(searchCriteria); } @GET @Path("/report") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.GET_REPORT_LOGS + "\")") public VXTrxLogList getReportLogs(@Context HttpServletRequest request){ SearchCriteria searchCriteria = searchUtil.extractCommonCriterias( request, xTrxLogService.sortFields); searchUtil.extractInt(request, searchCriteria, "objectClassType", "audit type."); searchUtil.extractString(request, searchCriteria, "attributeName", "Attribute Name", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "action", "CRUD Action Type", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "sessionId", "Session Id", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "owner", "Owner", StringUtil.VALIDATION_TEXT); searchUtil.extractDate(request, searchCriteria, "startDate", "Trasaction date since", "MM/dd/yyyy"); searchUtil.extractDate(request, searchCriteria, "endDate", "Trasaction date till", "MM/dd/yyyy"); return assetMgr.getReportLogs(searchCriteria); } @GET @Path("/report/{transactionId}") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.GET_TRANSACTION_REPORT + "\")") public VXTrxLogList getTransactionReport(@Context HttpServletRequest request, @PathParam("transactionId") String transactionId){ return assetMgr.getTransactionReport(transactionId); } @GET @Path("/accessAudit") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.GET_ACCESS_LOGS + "\")") public VXAccessAuditList getAccessLogs(@Context HttpServletRequest request){ SearchCriteria searchCriteria = searchUtil.extractCommonCriterias( request, xAccessAuditService.sortFields); searchUtil.extractString(request, searchCriteria, "accessType", "Access Type", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "aclEnforcer", "Access Enforcer", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "agentId", "Application", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "repoName", "Service Name", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "sessionId", "Session ID", StringUtil.VALIDATION_TEXT); searchUtil.extractStringList(request, searchCriteria, "requestUser", "Users", "requestUser", null, StringUtil.VALIDATION_TEXT); searchUtil.extractStringList(request, searchCriteria, "excludeUser", "Exclude Users", "-requestUser", null, StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "requestData", "Request Data", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "resourcePath", "Resource Name", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "clientIP", "Client IP", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "resourceType", "Resource Type", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request,searchCriteria,"excludeServiceUser", "Exclude Service User",StringUtil.VALIDATION_TEXT); searchUtil.extractInt(request, searchCriteria, "auditType", "Audit Type"); searchUtil.extractInt(request, searchCriteria, "accessResult", "Result"); searchUtil.extractInt(request, searchCriteria, "assetId", "Asset ID"); searchUtil.extractLong(request, searchCriteria, "policyId", "Policy ID"); searchUtil.extractInt(request, searchCriteria, "repoType", "Service Type"); searchUtil.extractDate(request, searchCriteria, "startDate", "Start Date", "MM/dd/yyyy"); searchUtil.extractDate(request, searchCriteria, "endDate", "End Date", "MM/dd/yyyy"); searchUtil.extractString(request, searchCriteria, "tags", "tags", null); searchUtil.extractString(request, searchCriteria, "cluster", "Cluster Name", StringUtil.VALIDATION_TEXT); searchUtil.extractStringList(request, searchCriteria, "zoneName", "Zone Name List", "zoneName", null, null); searchUtil.extractString(request, searchCriteria, "agentHost", "Agent Host Name", StringUtil.VALIDATION_TEXT); boolean isKeyAdmin = msBizUtil.isKeyAdmin(); boolean isAuditKeyAdmin = msBizUtil.isAuditKeyAdmin(); XXServiceDef xxServiceDef = daoManager.getXXServiceDef().findByName(EmbeddedServiceDefsUtil.EMBEDDED_SERVICEDEF_KMS_NAME); if(isKeyAdmin && xxServiceDef != null || isAuditKeyAdmin && xxServiceDef != null){ searchCriteria.getParamList().put("repoType", xxServiceDef.getId()); } else if (xxServiceDef != null) { searchCriteria.getParamList().put("-repoType", xxServiceDef.getId()); } return assetMgr.getAccessLogs(searchCriteria); } @POST @Path("/resources/grant") @Produces({ "application/xml", "application/json" }) public VXPolicy grantPermission(@Context HttpServletRequest request,VXPolicy vXPolicy) { RESTResponse ret = null; if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.grantPermission(" + vXPolicy + ")"); } if ( vXPolicy != null) { String serviceName = vXPolicy.getRepositoryName(); GrantRevokeRequest grantRevokeRequest = serviceUtil.toGrantRevokeRequest(vXPolicy); try { ret = serviceREST.grantAccess(serviceName, grantRevokeRequest, request); } catch(WebApplicationException excp) { throw excp; } catch (Throwable e) { logger.error( HttpServletResponse.SC_BAD_REQUEST + "Grant Access Failed for the request " + vXPolicy, e); throw restErrorUtil.createRESTException("Grant Access Failed for the request: " + vXPolicy + ". " + e.getMessage()); } } else { logger.error( HttpServletResponse.SC_BAD_REQUEST + "Bad Request parameter"); throw restErrorUtil.createRESTException("Bad Request parameter"); } if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.grantPermission(" + ret + ")"); } // TO DO Current Grant REST doesn't return a policy so returning a null value. Has to be replace with VXpolicy. return vXPolicy; } @POST @Path("/resources/revoke") @Produces({ "application/xml", "application/json" }) public VXPolicy revokePermission(@Context HttpServletRequest request,VXPolicy vXPolicy) { RESTResponse ret = null; if(logger.isDebugEnabled()) { logger.debug("==> AssetREST.revokePermission(" + vXPolicy + ")"); } if ( vXPolicy != null) { String serviceName = vXPolicy.getRepositoryName(); GrantRevokeRequest grantRevokeRequest = serviceUtil.toGrantRevokeRequest(vXPolicy); try { ret = serviceREST.revokeAccess(serviceName, grantRevokeRequest, request); } catch(WebApplicationException excp) { throw excp; } catch (Throwable e) { logger.error( HttpServletResponse.SC_BAD_REQUEST + "Revoke Access Failed for the request " + vXPolicy, e); throw restErrorUtil.createRESTException("Revoke Access Failed for the request: " + vXPolicy + ". " + e.getMessage()); } } else { logger.error( HttpServletResponse.SC_BAD_REQUEST + "Bad Request parameter"); throw restErrorUtil.createRESTException("Bad Request parameter"); } if(logger.isDebugEnabled()) { logger.debug("<== AssetREST.revokePermission(" + ret + ")"); } return vXPolicy; } @GET @Path("/ugsyncAudits") @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.GET_UGSYNC_AUDITS + "\")") public VXUgsyncAuditInfoList getUgsyncAudits(@Context HttpServletRequest request){ SearchCriteria searchCriteria = searchUtil.extractCommonCriterias( request, xAccessAuditService.sortFields); searchUtil.extractString(request, searchCriteria, "userName", "User Name", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "sessionId", "Session Id", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "syncSource", "Sync Source", StringUtil.VALIDATION_TEXT); searchUtil.extractString(request, searchCriteria, "syncSourceInfo", "Sync Source Info", StringUtil.VALIDATION_TEXT); searchUtil.extractLong(request, searchCriteria, "noOfUsers", "No of Users"); searchUtil.extractLong(request, searchCriteria, "noOfGroups", "No of Groups"); searchUtil.extractDate(request, searchCriteria, "startDate", "Start Date", "MM/dd/yyyy"); searchUtil.extractDate(request, searchCriteria, "endDate", "End Date", "MM/dd/yyyy"); return assetMgr.getUgsyncAudits(searchCriteria); } @GET @Path("/ugsyncAudits/{syncSource}") @Encoded @Produces({ "application/xml", "application/json" }) @PreAuthorize("@rangerPreAuthSecurityHandler.isAPIAccessible(\"" + RangerAPIList.GET_UGSYNC_AUDITS_BY_SYNCSOURCE + "\")") public VXUgsyncAuditInfoList getUgsyncAuditsBySyncSource(@PathParam("syncSource") String syncSource){ VXUgsyncAuditInfoList vxUgsyncAuditInfoList = new VXUgsyncAuditInfoList(); vxUgsyncAuditInfoList = assetMgr.getUgsyncAuditsBySyncSource(syncSource); return vxUgsyncAuditInfoList; } }
gzsombor/ranger
security-admin/src/main/java/org/apache/ranger/rest/AssetREST.java
Java
apache-2.0
27,720
/* * Copyright 2021 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.go.presentation.pipelinehistory; import java.util.Date; import com.thoughtworks.go.domain.JobResult; import com.thoughtworks.go.domain.JobState; import org.junit.jupiter.api.Test; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; public class JobHistoryItemTest { @Test public void shouldBeUnsuccessfullyCompletedWhenFailedOrCancelled() { assertThat(new JobHistoryItem("", JobState.Completed, JobResult.Failed, new Date()).hasUnsuccessfullyCompleted(),is(true)); assertThat(new JobHistoryItem("", JobState.Completed, JobResult.Cancelled, new Date()).hasUnsuccessfullyCompleted(),is(true)); } @Test public void shouldPassedWhenJobCompletesSuccessfully() { assertThat(new JobHistoryItem("", JobState.Completed, JobResult.Passed, new Date()).hasPassed(),is(true)); } @Test public void shouldBeRunningBasedOnJobState() { assertThat(new JobHistoryItem("", JobState.Assigned, JobResult.Unknown, new Date()).isRunning(),is(true)); assertThat(new JobHistoryItem("", JobState.Building, JobResult.Unknown, new Date()).isRunning(),is(true)); assertThat(new JobHistoryItem("", JobState.Completing, JobResult.Unknown, new Date()).isRunning(),is(true)); assertThat(new JobHistoryItem("", JobState.Preparing, JobResult.Unknown, new Date()).isRunning(),is(true)); assertThat(new JobHistoryItem("", JobState.Scheduled, JobResult.Unknown, new Date()).isRunning(),is(true)); } }
GaneshSPatil/gocd
common/src/test/java/com/thoughtworks/go/presentation/pipelinehistory/JobHistoryItemTest.java
Java
apache-2.0
2,124
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.tools.idea.avdmanager; import org.jetbrains.annotations.NotNull; import java.awt.event.ActionEvent; /** * Create a new {@link com.android.sdklib.devices.Device} */ public class CreateDeviceAction extends DeviceUiAction { public CreateDeviceAction(@NotNull DeviceProvider provider) { super(provider, "Create"); } public CreateDeviceAction(@NotNull DeviceProvider provider, @NotNull String text) { super(provider, text); } @Override public boolean isEnabled() { return true; } @Override public void actionPerformed(ActionEvent e) { DeviceEditWizard wizard = new DeviceEditWizard(null, true); wizard.init(); boolean success = wizard.showAndGet(); if (success) { myProvider.refreshDevices(); myProvider.setDevice(wizard.getEditedDevice()); } } }
consulo/consulo-android
android/android/src/com/android/tools/idea/avdmanager/CreateDeviceAction.java
Java
apache-2.0
1,454
package ru.stqa.pft.mantis.appmanager; import org.openqa.selenium.By; import ru.lanwen.verbalregex.VerbalExpression; import ru.stqa.pft.mantis.model.MailMessage; import ru.stqa.pft.mantis.model.UserData; import javax.mail.MessagingException; import java.io.IOException; import java.sql.*; import java.util.List; public class ChangePassHelper extends HelperBase { public UserData user = new UserData().withId('0').withLogin(""); public ChangePassHelper(ApplicationManager app) { super(app); } public void goManagePage() { wd.get(app.getProperty("web.baseUrl") + "/login_page.php"); type(By.name("username"), app.getProperty("web.adminLogin")); // click(By.cssSelector("input[value='Войти']")); type(By.name("password"), app.getProperty("web.adminPassword")); // click(By.cssSelector("input[value='Войти']")); click(By.cssSelector("input[value='Login']")); wd.get(app.getProperty("web.baseUrl") + "/manage_user_page.php"); // wd.get(app.getProperty("web.baseUrl") + "/account_page.php"); // wd.get(app.getProperty("web.baseUrl") + "/manage_user_create_page.php"); } public UserData findUser() throws SQLException { Connection conn = null; try { conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/bugtracker?useUnicode=true&useJDBCCompliantTimezoneShift=true&useLegacyDatetimeCode=false&serverTimezone=UTC" + "&user=root&password="); Statement st = conn.createStatement(); ResultSet rs = st.executeQuery("select id, username from mantis_user_table where username!='administrator' LIMIT 1 "); while (rs.next()) { user = new UserData().withId(rs.getInt("id")) .withLogin(rs.getString("username")); } rs.close(); st.close(); conn.close(); } catch (SQLException ex) { System.out.println("SQLException: " + ex.getMessage()); System.out.println("SQLState: " + ex.getSQLState()); System.out.println("VendorError: " + ex.getErrorCode()); } return user; } public void resetPassword() throws SQLException, InterruptedException { wd.get(app.getProperty("web.baseUrl") + "manage_user_edit_page.php?user_id=" + user.getId()); // click(By.cssSelector("input[value='Сбросить пароль']")); // wd.manage().timeouts().implicitlyWait(100, TimeUnit.SECONDS); // Thread.sleep(1000); // WebElement webElem = wd.findElement(By.cssSelector("input[value='Сбросить пароль']")); // Actions action = new Actions(wd); // action.moveToElement(webElem).click().perform(); // WebDriverWait wait = new WebDriverWait(wd, 20); // final WebElement kload= wait.until(ExpectedConditions.visibilityOfElementLocated(By.cssSelector("input[value='Сбросить пароль']"))); // wait.until(ExpectedConditions.stalenessOf(kload)); // click(By.cssSelector("input[value='Сбросить пароль']")); // click(By.xpath("//form[@id='manage-user-reset-form']/fieldset/span/input")); click(By.cssSelector("input[value='Reset Password']")); // wd.findElement(By.cssSelector("input[value='Сбросить пароль']")).click(); } public void changePassword(String newPassword) throws IOException, MessagingException, SQLException { List<MailMessage> mailMessages = app.mail().waitForMail(1, 10000); String confirmationLink = findConfirmationLink(mailMessages); wd.get(confirmationLink); type(By.name("password"), newPassword); type(By.name("password_confirm"), newPassword); //click(By.cssSelector(".width-100.width-40.pull-right.btn.btn-success.btn-inverse.bigger-110")); click(By.cssSelector("input[value='Update User']")); // type(By.name("username"), user.getLogin()); // type(By.name("password"), newPassword); // click(By.cssSelector("input[value='Login']")); // wd.get(app.getProperty("web.baseUrl") + "manage_user_edit_page.php?user_id=" + user.getId()); // http://joxi.ru/gmvljeeCpeWWAa } private String findConfirmationLink(List<MailMessage> mailMessages) throws SQLException { MailMessage mailMessage = mailMessages.get(0); VerbalExpression regex = VerbalExpression.regex().find("http://").nonSpace().oneOrMore().build(); return regex.getText(mailMessage.text); } }
AlexVprofit/java_pft_39
mantis-tests/src/test/java/ru/stqa/pft/mantis/appmanager/ChangePassHelper.java
Java
apache-2.0
4,398
using System; using System.Collections.Generic; namespace Rosie.ZWave { public class ZWaveNode { public byte Id { get; set; } public uint HomeId { get; set; } public string Name { get; set; } public string Location { get; set; } public string Label { get; set; } public string Manufacturer { get; set; } public string Product { get; set; } public List<ZWaveCommandTypes> CommandTypes { get; set; } } }
Clancey/Rosie
src/ZWave/Rosie.ZWave/ZWaveNode.cs
C#
apache-2.0
431
import java.util.*; public class Splitting{ public static String knights = "Then, when you have found the shrubbery," + " you must cut down the mightiest tree in the forest... " + "with... a herring!"; public static void splitting(String regex){ System.out.println(Arrays.toString(knights.split(regex))); } public static void main(String[] args){ splitting(" "); splitting("\\W+"); splitting("n\\W+"); } }
Vayne-Lover/Java
thinking-in-java/chapter13/Splitting.java
Java
apache-2.0
530
package com.netflix.priam; import java.util.Arrays; import java.util.List; import com.google.common.collect.Lists; import com.google.inject.Singleton; import com.netflix.priam.IConfiguration; import com.netflix.priam.defaultimpl.PriamConfiguration; @Singleton public class FakeConfiguration implements IConfiguration { public static final String FAKE_REGION = "us-east-1"; public String region; public String appName; public String zone; public String instance_id; public String restorePrefix; public FakeConfiguration() { this(FAKE_REGION, "my_fake_cluster", "my_zone", "i-01234567"); } public FakeConfiguration(String region, String appName, String zone, String ins_id) { this.region = region; this.appName = appName; this.zone = zone; this.instance_id = ins_id; this.restorePrefix = ""; } @Override public void intialize() { // TODO Auto-generated method stub } @Override public String getBackupLocation() { // TODO Auto-generated method stub return "casstestbackup"; } @Override public String getBackupPrefix() { // TODO Auto-generated method stub return "TEST-netflix.platform.S3"; } @Override public boolean isCommitLogBackup() { // TODO Auto-generated method stub return false; } @Override public String getCommitLogLocation() { // TODO Auto-generated method stub return "cass/commitlog"; } @Override public String getDataFileLocation() { // TODO Auto-generated method stub return "target/data"; } @Override public String getCacheLocation() { // TODO Auto-generated method stub return "cass/caches"; } @Override public List<String> getRacs() { return Arrays.asList("az1", "az2", "az3"); } @Override public int getJmxPort() { return 7199; } @Override public int getThriftPort() { return 9160; } @Override public int getNativeTransportPort() { return 9042; } @Override public String getSnitch() { return "org.apache.cassandra.locator.SimpleSnitch"; } @Override public String getRac() { return this.zone; } @Override public String getHostname() { // TODO Auto-generated method stub return instance_id; } @Override public String getInstanceName() { return instance_id; } @Override public String getHeapSize() { // TODO Auto-generated method stub return null; } @Override public String getHeapNewSize() { // TODO Auto-generated method stub return null; } @Override public int getBackupHour() { // TODO Auto-generated method stub return 12; } @Override public String getRestoreSnapshot() { // TODO Auto-generated method stub return null; } @Override public String getAppName() { return appName; } @Override public String getACLGroupName() { return this.getAppName(); } @Override public int getMaxBackupUploadThreads() { // TODO Auto-generated method stub return 2; } @Override public String getDC() { // TODO Auto-generated method stub return this.region; } @Override public int getMaxBackupDownloadThreads() { // TODO Auto-generated method stub return 3; } public void setRestorePrefix(String prefix) { // TODO Auto-generated method stub restorePrefix = prefix; } @Override public String getRestorePrefix() { // TODO Auto-generated method stub return restorePrefix; } @Override public String getBackupCommitLogLocation() { return "cass/backup/cl/"; } @Override public boolean isMultiDC() { // TODO Auto-generated method stub return false; } @Override public String getASGName() { // TODO Auto-generated method stub return null; } @Override public boolean isIncrBackup() { return true; } @Override public String getHostIP() { // TODO Auto-generated method stub return null; } @Override public int getUploadThrottle() { // TODO Auto-generated method stub return 0; } @Override public boolean isLocalBootstrapEnabled() { // TODO Auto-generated method stub return false; } @Override public int getInMemoryCompactionLimit() { return 8; } @Override public int getCompactionThroughput() { // TODO Auto-generated method stub return 0; } @Override public String getMaxDirectMemory() { // TODO Auto-generated method stub return null; } @Override public String getBootClusterName() { // TODO Auto-generated method stub return "cass_bootstrap"; } @Override public String getCassHome() { return "/tmp/priam"; } @Override public String getCassStartupScript() { // TODO Auto-generated method stub return "true"; } @Override public List<String> getRestoreKeySpaces() { // TODO Auto-generated method stub return Lists.newArrayList(); } @Override public long getBackupChunkSize() { return 5L*1024*1024; } @Override public void setDC(String region) { // TODO Auto-generated method stub } @Override public boolean isRestoreClosestToken() { // TODO Auto-generated method stub return false; } @Override public String getCassStopScript() { return "true"; } @Override public int getStoragePort() { return 7101; } @Override public String getSeedProviderName() { return "org.apache.cassandra.locator.SimpleSeedProvider"; } @Override public int getBackupRetentionDays() { return 5; } @Override public List<String> getBackupRacs() { return Lists.newArrayList(); } public int getMaxHintWindowInMS() { return 36000; } public int getHintedHandoffThrottleKb() { return 1024; } public int getMaxHintThreads() { return 1; } @Override public int getMemtableTotalSpaceMB() { return 0; } @Override public int getStreamingThroughputMB() { return 400; } @Override public boolean getMultithreadedCompaction() { return false; } public String getPartitioner() { return "org.apache.cassandra.dht.RandomPartitioner"; } @Override public int getSSLStoragePort() { // TODO Auto-generated method stub return 7103; } public String getKeyCacheSizeInMB() { return "16"; } public String getKeyCacheKeysToSave() { return "32"; } public String getRowCacheSizeInMB() { return "4"; } public String getRowCacheKeysToSave() { return "4"; } @Override public String getCassProcessName() { return "CassandraDaemon"; } public int getNumTokens() { return 1; } public String getYamlLocation() { return "conf/cassandra.yaml"; } public String getAuthenticator() { return PriamConfiguration.DEFAULT_AUTHENTICATOR; } public String getAuthorizer() { return PriamConfiguration.DEFAULT_AUTHORIZER; } @Override public String getTargetKSName() { return null; } @Override public String getTargetCFName() { return null; } @Override public boolean doesCassandraStartManually() { return false; } @Override public boolean isVpcRing() { return false; } public String getInternodeCompression() { return "all"; } @Override public boolean isBackingUpCommitLogs() { return false; } @Override public String getCommitLogBackupArchiveCmd() { return null; } @Override public String getCommitLogBackupRestoreCmd() { return null; } @Override public String getCommitLogBackupRestoreFromDirs() { return null; } @Override public String getCommitLogBackupRestorePointInTime() { return null; } public void setRestoreKeySpaces(List<String> keyspaces) { } @Override public int maxCommitLogsRestore() { return 0; } public boolean isClientSslEnabled() { return true; } public String getInternodeEncryption() { return "all"; } public boolean isDynamicSnitchEnabled() { return true; } public boolean isThriftEnabled() { return true; } public boolean isNativeTransportEnabled() { return false; } @Override public String getS3EndPoint() { return "s3-external-1.amazonaws.com"; } public int getConcurrentReadsCnt() { return 8; } public int getConcurrentWritesCnt() { return 8; } public int getConcurrentCompactorsCnt() { return 1; } @Override public String getRpcServerType() { return "hsha"; } @Override public int getRpcMaxThreads() { return 2096; } @Override public int getIndexInterval() { return 0; } public String getExtraConfigParams() { return null; } public String getCassYamlVal(String priamKey) { return ""; } @Override public boolean getAutoBoostrap() { // TODO Auto-generated method stub return false; } }
eg-eng/Priam
priam/src/test/java/com/netflix/priam/FakeConfiguration.java
Java
apache-2.0
10,037
/** * Created by Pawan on 1/4/2016. */ /** * Created by Pawan on 12/11/2015. */ (function () { var commoncontroller = function ($http,$mdDialog,$mdMedia) { var showConfirm = function(title, label, okbutton, cancelbutton, content, OkCallback, CancelCallBack, okObj) { var confirm = $mdDialog.confirm() .title(title) .content(content) .ok(okbutton) .cancel(cancelbutton); $mdDialog.show(confirm).then(function() { OkCallback(); }, function() { CancelCallBack(); }); }; var showAdvanced = function(controller,template,clickOutSt) { var useFullScreen = ($mdMedia('sm') || $mdMedia('xs')) && $scope.customFullscreen; $mdDialog.show({ controller: controller, templateUrl: template, //parent: angular.element(document.body), clickOutsideToClose:clickOutSt, fullscreen: useFullScreen }) .then(function(answer) { //$scope.status = 'You said the information was "' + answer + '".'; }, function() { //$scope.status = 'You cancelled the dialog.'; }); /*$scope.$watch(function() { return $mdMedia('xs') || $mdMedia('sm'); }, function(wantsFullScreen) { $scope.customFullscreen = (wantsFullScreen === true); });*/ }; var showAlert = function(title,content) { $mdDialog.show( $mdDialog.alert() //.parent(angular.element(document.querySelector('#popupContainer'))) .clickOutsideToClose(false) .title(title) .content(content) .ariaLabel('Alert Dialog Demo') .ok('OK') // .targetEvent(ev) ); } return{ showConfirm:showConfirm, showAdvanced:showAdvanced, showAlert:showAlert }; }; var module = angular.module("trunkApp"); module.factory("commoncontroller",commoncontroller); }());
DuoSoftware/DVP-Apps
DVP-TrunkApp/scripts/services/CommonController.js
JavaScript
apache-2.0
1,940
package com.acme.example.time; import static org.junit.Assert.*; import java.text.SimpleDateFormat; import org.hamcrest.CustomMatcher; import org.hamcrest.Matcher; import org.junit.Test; public class UsingOwnHamcrestMatcher { private RecordConverter recordConverter = new WebInputRecordConverter(); @Test public void webInputConverterCreatesRecordWithCorrectStartTime() { Record record = recordConverter.createRecordForToday("07:30", "11:30"); assertThat(record, startsAt("07:30")); } private Matcher<Record> startsAt(final String startTime) { return new CustomMatcher<Record>("Record should start at "+startTime) { @Override public boolean matches(Object o) { if(o instanceof Record) { Record record = (Record) o; SimpleDateFormat dateFormat = new SimpleDateFormat("HH:mm"); return dateFormat.format(record.getStartTime()).equals(startTime); } return false; } }; } }
christophstrobl/junit-example
src/test/java/com/acme/example/time/UsingOwnHamcrestMatcher.java
Java
apache-2.0
925
/* Generated by camel build tools - do NOT edit this file! */ package org.apache.camel.component.redis; import java.net.URISyntaxException; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import org.apache.camel.spi.EndpointUriFactory; /** * Generated by camel build tools - do NOT edit this file! */ public class RedisEndpointUriFactory extends org.apache.camel.support.component.EndpointUriFactorySupport implements EndpointUriFactory { private static final String BASE = ":host:port"; private static final Set<String> PROPERTY_NAMES; private static final Set<String> SECRET_PROPERTY_NAMES; static { Set<String> props = new HashSet<>(13); props.add("redisTemplate"); props.add("synchronous"); props.add("exchangePattern"); props.add("serializer"); props.add("command"); props.add("lazyStartProducer"); props.add("bridgeErrorHandler"); props.add("channels"); props.add("port"); props.add("connectionFactory"); props.add("host"); props.add("listenerContainer"); props.add("exceptionHandler"); PROPERTY_NAMES = Collections.unmodifiableSet(props); SECRET_PROPERTY_NAMES = Collections.emptySet(); } @Override public boolean isEnabled(String scheme) { return "spring-redis".equals(scheme); } @Override public String buildUri(String scheme, Map<String, Object> properties, boolean encode) throws URISyntaxException { String syntax = scheme + BASE; String uri = syntax; Map<String, Object> copy = new HashMap<>(properties); uri = buildPathParameter(syntax, uri, "host", null, true, copy); uri = buildPathParameter(syntax, uri, "port", null, true, copy); uri = buildQueryParameters(uri, copy, encode); return uri; } @Override public Set<String> propertyNames() { return PROPERTY_NAMES; } @Override public Set<String> secretPropertyNames() { return SECRET_PROPERTY_NAMES; } @Override public boolean isLenientProperties() { return false; } }
nicolaferraro/camel
components/camel-spring-redis/src/generated/java/org/apache/camel/component/redis/RedisEndpointUriFactory.java
Java
apache-2.0
2,222
// stdafx.cpp : source file that includes just the standard includes // StatCrunch.pch will be the pre-compiled header // stdafx.obj will contain the pre-compiled type information #include "stdafx.h" // TODO: reference any additional headers you need in STDAFX.H // and not in this file
SneakyTactician/Code_Base
StatCrunch/stdafx.cpp
C++
apache-2.0
288
package com.test.coolweather.model; /** * 市实体类 * * @author sungang * */ public class City { private int id; private String cityName; private String cityCode; private int provinceId; public int getId() { return id; } public void setId(int id) { this.id = id; } public String getCityName() { return cityName; } public void setCityName(String cityName) { this.cityName = cityName; } public String getCityCode() { return cityCode; } public void setCityCode(String cityCode) { this.cityCode = cityCode; } public int getProvinceId() { return provinceId; } public void setProvinceId(int provinceId) { this.provinceId = provinceId; } }
sgiceleo/coolweather
src/com/test/coolweather/model/City.java
Java
apache-2.0
689
/* * To change this template, choose Tools | Templates * and open the template in the editor. */ package es.tid.emulator.node.transport.rsvp; import java.util.Timer; /** * * @author fmn */ public class RSVPSession extends Thread { private RSVPListener listener; private RSVPSender sender; private boolean no_delay=false; private String ID_dest; private int port; private int deadTimerLocal = 0; private Timer timer; private int keepAliveLocal = 0; private boolean running = true; public RSVPSession(String ip_destination, int port, boolean no_delay){ this.no_delay=no_delay; this.ID_dest=ip_destination; this.port=port; this.keepAliveLocal=30; this.deadTimerLocal=120; this.timer = new Timer(); } public void run(){ running=true; /*System.out.println("Nueva Sesion RSVP"); System.out.println("Opening new RSVP Session with host "+ ID_dest + " on port " + port);*/ } }
telefonicaid/netphony-gmpls-emulator
src/main/java/es/tid/emulator/node/transport/rsvp/RSVPSession.java
Java
apache-2.0
1,000
package de.taimos.dao.mongo; /* * #%L * Spring DAO Mongo * %% * Copyright (C) 2013 - 2015 Taimos GmbH * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ public class TestDAO extends AbstractMongoDAO<TestObject> { @Override protected Class<TestObject> getEntityClass() { return TestObject.class; } public TestObject findByName(String name) { return this.findFirstByQuery("{name:#}", null, name); } }
taimos/spring-dao-mongo
src/test/java/de/taimos/dao/mongo/TestDAO.java
Java
apache-2.0
942
package org.apereo.cas.support.saml.authentication; import org.apereo.cas.authentication.CoreAuthenticationTestUtils; import org.apereo.cas.authentication.Credential; import org.apereo.cas.authentication.DefaultAuthenticationTransaction; import org.apereo.cas.authentication.credential.UsernamePasswordCredential; import lombok.val; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.util.HashMap; import static org.junit.jupiter.api.Assertions.*; /** * @author Scott Battaglia * @since 3.1 */ public class SamlAuthenticationMetaDataPopulatorTests { private SamlAuthenticationMetaDataPopulator populator; @BeforeEach public void initialize() { this.populator = new SamlAuthenticationMetaDataPopulator(); } @Test public void verifyAuthenticationTypeFound() { val credentials = new UsernamePasswordCredential(); val builder = CoreAuthenticationTestUtils.getAuthenticationBuilder(); this.populator.populateAttributes(builder, DefaultAuthenticationTransaction.of(credentials)); val auth = builder.build(); assertEquals(SamlAuthenticationMetaDataPopulator.AUTHN_METHOD_PASSWORD, auth.getAttributes().get(SamlAuthenticationMetaDataPopulator.ATTRIBUTE_AUTHENTICATION_METHOD)); } @Test public void verifyAuthenticationTypeFoundByDefault() { val credentials = new CustomCredential(); val builder = CoreAuthenticationTestUtils.getAuthenticationBuilder(); this.populator.populateAttributes(builder, DefaultAuthenticationTransaction.of(credentials)); val auth = builder.build(); assertNotNull(auth.getAttributes().get(SamlAuthenticationMetaDataPopulator.ATTRIBUTE_AUTHENTICATION_METHOD)); } @Test public void verifyAuthenticationTypeFoundCustom() { val credentials = new CustomCredential(); val added = new HashMap<String, String>(); added.put(CustomCredential.class.getName(), "FF"); this.populator.setUserDefinedMappings(added); val builder = CoreAuthenticationTestUtils.getAuthenticationBuilder(); this.populator.populateAttributes(builder, DefaultAuthenticationTransaction.of(credentials)); val auth = builder.build(); assertEquals( "FF", auth.getAttributes().get(SamlAuthenticationMetaDataPopulator.ATTRIBUTE_AUTHENTICATION_METHOD)); } private static class CustomCredential implements Credential { private static final long serialVersionUID = 8040541789035593268L; @Override public String getId() { return "nobody"; } } }
GIP-RECIA/cas
support/cas-server-support-saml/src/test/java/org/apereo/cas/support/saml/authentication/SamlAuthenticationMetaDataPopulatorTests.java
Java
apache-2.0
2,666
// Copyright 2020 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package restore import ( "bytes" "context" "fmt" "io" "path/filepath" "reflect" "sort" "strconv" "strings" "github.com/docker/go-units" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/kv" "github.com/pingcap/tidb/br/pkg/lightning/checkpoints" "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/lightning/mydump" "github.com/pingcap/tidb/br/pkg/lightning/verification" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" "github.com/tikv/pd/server/api" pdconfig "github.com/tikv/pd/server/config" "go.uber.org/zap" "modernc.org/mathutil" ) const ( pdStores = "/pd/api/v1/stores" pdReplicate = "/pd/api/v1/config/replicate" pdEmptyRegions = "/pd/api/v1/regions/check/empty-region" defaultCSVSize = 10 * units.GiB maxSampleDataSize = 10 * 1024 * 1024 maxSampleRowCount = 10 * 1024 warnEmptyRegionCntPerStore = 500 errorEmptyRegionCntPerStore = 1000 warnRegionCntMinMaxRatio = 0.75 errorRegionCntMinMaxRatio = 0.5 // We only check RegionCntMaxMinRatio when the maximum region count of all stores is larger than this threshold. checkRegionCntRatioThreshold = 1000 ) func (rc *Controller) isSourceInLocal() bool { return strings.HasPrefix(rc.store.URI(), storage.LocalURIPrefix) } func (rc *Controller) getReplicaCount(ctx context.Context) (uint64, error) { result := &pdconfig.ReplicationConfig{} err := rc.tls.WithHost(rc.cfg.TiDB.PdAddr).GetJSON(ctx, pdReplicate, &result) if err != nil { return 0, errors.Trace(err) } return result.MaxReplicas, nil } func (rc *Controller) getClusterAvail(ctx context.Context) (uint64, error) { result := &api.StoresInfo{} if err := rc.tls.WithHost(rc.cfg.TiDB.PdAddr).GetJSON(ctx, pdStores, result); err != nil { return 0, errors.Trace(err) } clusterAvail := uint64(0) for _, store := range result.Stores { clusterAvail += uint64(store.Status.Available) } return clusterAvail, nil } // clusterResource check cluster has enough resource to import data. this test can by skipped. func (rc *Controller) clusterResource(ctx context.Context, localSource int64) error { passed := true message := "Cluster resources are rich for this import task" defer func() { rc.checkTemplate.Collect(Critical, passed, message) }() var ( clusterAvail uint64 clusterSource uint64 ) if rc.taskMgr == nil { var err error clusterAvail, err = rc.getClusterAvail(ctx) if err != nil { return errors.Trace(err) } clusterSource = uint64(localSource) } else { if err := rc.taskMgr.CheckTasksExclusively(ctx, func(tasks []taskMeta) ([]taskMeta, error) { clusterAvail = 0 clusterSource = 0 restoreStarted := false for _, task := range tasks { if task.status > taskMetaStatusInitial { restoreStarted = true } clusterSource += task.sourceBytes if task.clusterAvail > 0 { clusterAvail = task.clusterAvail } } if restoreStarted || clusterAvail > 0 { return nil, nil } var err error clusterAvail, err = rc.getClusterAvail(ctx) if err != nil { return nil, errors.Trace(err) } newTasks := append([]taskMeta(nil), tasks...) for i := 0; i < len(newTasks); i++ { newTasks[i].clusterAvail = clusterAvail } return newTasks, nil }); err != nil { return errors.Trace(err) } } replicaCount, err := rc.getReplicaCount(ctx) if err != nil { return errors.Trace(err) } estimateSize := clusterSource * replicaCount if estimateSize > clusterAvail { passed = false message = fmt.Sprintf("Cluster doesn't have enough space, available is %s, but we need %s", units.BytesSize(float64(clusterAvail)), units.BytesSize(float64(estimateSize))) } else { message = fmt.Sprintf("Cluster available is rich, available is %s, we need %s", units.BytesSize(float64(clusterAvail)), units.BytesSize(float64(estimateSize))) } return nil } // ClusterIsAvailable check cluster is available to import data. this test can be skipped. func (rc *Controller) ClusterIsAvailable(ctx context.Context) error { passed := true message := "Cluster is available" defer func() { rc.checkTemplate.Collect(Critical, passed, message) }() checkCtx := &backend.CheckCtx{ DBMetas: rc.dbMetas, } if err := rc.backend.CheckRequirements(ctx, checkCtx); err != nil { passed = false message = fmt.Sprintf("cluster available check failed: %s", err.Error()) } return nil } func (rc *Controller) checkEmptyRegion(ctx context.Context) error { passed := true message := "Cluster doesn't have too many empty regions" defer func() { rc.checkTemplate.Collect(Critical, passed, message) }() storeInfo := &api.StoresInfo{} err := rc.tls.WithHost(rc.cfg.TiDB.PdAddr).GetJSON(ctx, pdStores, storeInfo) if err != nil { return errors.Trace(err) } if len(storeInfo.Stores) <= 1 { return nil } var result api.RegionsInfo if err := rc.tls.WithHost(rc.cfg.TiDB.PdAddr).GetJSON(ctx, pdEmptyRegions, &result); err != nil { return errors.Trace(err) } regions := make(map[uint64]int) stores := make(map[uint64]*api.StoreInfo) for _, region := range result.Regions { for _, peer := range region.Peers { regions[peer.StoreId]++ } } for _, store := range storeInfo.Stores { stores[store.Store.Id] = store } tableCount := 0 for _, db := range rc.dbMetas { info, ok := rc.dbInfos[db.Name] if !ok { continue } tableCount += len(info.Tables) } errorThrehold := mathutil.Max(errorEmptyRegionCntPerStore, tableCount*3) warnThrehold := mathutil.Max(warnEmptyRegionCntPerStore, tableCount) var ( errStores []string warnStores []string ) for storeID, regionCnt := range regions { if store, ok := stores[storeID]; ok { if store.Store.State != metapb.StoreState_Up { continue } if version.IsTiFlash(store.Store.Store) { continue } if regionCnt > errorThrehold { errStores = append(errStores, strconv.Itoa(int(storeID))) } else if regionCnt > warnThrehold { warnStores = append(warnStores, strconv.Itoa(int(storeID))) } } } var messages []string if len(errStores) > 0 { passed = false messages = append(messages, fmt.Sprintf("TiKV stores (%s) contains more than %v empty regions respectively, "+ "which will greatly affect the import speed and success rate", strings.Join(errStores, ", "), errorEmptyRegionCntPerStore)) } if len(warnStores) > 0 { messages = append(messages, fmt.Sprintf("TiKV stores (%s) contains more than %v empty regions respectively, "+ "which will affect the import speed and success rate", strings.Join(warnStores, ", "), warnEmptyRegionCntPerStore)) } if len(messages) > 0 { message = strings.Join(messages, "\n") } return nil } // checkRegionDistribution checks if regions distribution is unbalanced. func (rc *Controller) checkRegionDistribution(ctx context.Context) error { passed := true message := "Cluster region distribution is balanced" defer func() { rc.checkTemplate.Collect(Critical, passed, message) }() result := &api.StoresInfo{} err := rc.tls.WithHost(rc.cfg.TiDB.PdAddr).GetJSON(ctx, pdStores, result) if err != nil { return errors.Trace(err) } stores := make([]*api.StoreInfo, 0, len(result.Stores)) for _, store := range result.Stores { if store.Store.State != metapb.StoreState_Up { continue } if version.IsTiFlash(store.Store.Store) { continue } stores = append(stores, store) } if len(stores) <= 1 { return nil } sort.Slice(stores, func(i, j int) bool { return stores[i].Status.RegionCount < stores[j].Status.RegionCount }) minStore := stores[0] maxStore := stores[len(stores)-1] tableCount := 0 for _, db := range rc.dbMetas { info, ok := rc.dbInfos[db.Name] if !ok { continue } tableCount += len(info.Tables) } threhold := mathutil.Max(checkRegionCntRatioThreshold, tableCount) if maxStore.Status.RegionCount <= threhold { return nil } ratio := float64(minStore.Status.RegionCount) / float64(maxStore.Status.RegionCount) if ratio < errorRegionCntMinMaxRatio { passed = false message = fmt.Sprintf("Region distribution is unbalanced, the ratio of the regions count of the store(%v) "+ "with least regions(%v) to the store(%v) with most regions(%v) is %v, but we expect it must not be less than %v", minStore.Store.Id, minStore.Status.RegionCount, maxStore.Store.Id, maxStore.Status.RegionCount, ratio, errorRegionCntMinMaxRatio) } else if ratio < warnRegionCntMinMaxRatio { message = fmt.Sprintf("Region distribution is unbalanced, the ratio of the regions count of the store(%v) "+ "with least regions(%v) to the store(%v) with most regions(%v) is %v, but we expect it should not be less than %v", minStore.Store.Id, minStore.Status.RegionCount, maxStore.Store.Id, maxStore.Status.RegionCount, ratio, warnRegionCntMinMaxRatio) } return nil } // checkClusterRegion checks cluster if there are too many empty regions or region distribution is unbalanced. func (rc *Controller) checkClusterRegion(ctx context.Context) error { err := rc.taskMgr.CheckTasksExclusively(ctx, func(tasks []taskMeta) ([]taskMeta, error) { restoreStarted := false for _, task := range tasks { if task.status > taskMetaStatusInitial { restoreStarted = true break } } if restoreStarted { return nil, nil } if err := rc.checkEmptyRegion(ctx); err != nil { return nil, errors.Trace(err) } if err := rc.checkRegionDistribution(ctx); err != nil { return nil, errors.Trace(err) } return nil, nil }) return errors.Trace(err) } // StoragePermission checks whether Lightning has enough permission to storage. // this test cannot be skipped. func (rc *Controller) StoragePermission(ctx context.Context) error { passed := true message := "Lightning has the correct storage permission" defer func() { rc.checkTemplate.Collect(Critical, passed, message) }() u, err := storage.ParseBackend(rc.cfg.Mydumper.SourceDir, nil) if err != nil { return errors.Annotate(err, "parse backend failed") } _, err = storage.New(ctx, u, &storage.ExternalStorageOptions{ CheckPermissions: []storage.Permission{ storage.ListObjects, storage.GetObject, }, }) if err != nil { passed = false message = err.Error() } return nil } // HasLargeCSV checks whether input csvs is fit for Lightning import. // If strictFormat is false, and csv file is large. Lightning will have performance issue. // this test cannot be skipped. func (rc *Controller) HasLargeCSV(dbMetas []*mydump.MDDatabaseMeta) error { passed := true message := "Source csv files size is proper" defer func() { rc.checkTemplate.Collect(Warn, passed, message) }() if !rc.cfg.Mydumper.StrictFormat { for _, db := range dbMetas { for _, t := range db.Tables { for _, f := range t.DataFiles { if f.FileMeta.FileSize > defaultCSVSize { message = fmt.Sprintf("large csv: %s file exists and it will slow down import performance", f.FileMeta.Path) passed = false } } } } } else { message = "Skip the csv size check, because config.StrictFormat is true" } return nil } func (rc *Controller) estimateSourceData(ctx context.Context) (int64, error) { sourceSize := int64(0) originSource := int64(0) bigTableCount := 0 tableCount := 0 unSortedTableCount := 0 for _, db := range rc.dbMetas { info, ok := rc.dbInfos[db.Name] if !ok { continue } for _, tbl := range db.Tables { originSource += tbl.TotalSize tableInfo, ok := info.Tables[tbl.Name] if ok { // Do not sample small table because there may a large number of small table and it will take a long // time to sample data for all of them. if rc.cfg.TikvImporter.Backend == config.BackendTiDB || tbl.TotalSize < int64(config.SplitRegionSize) { sourceSize += tbl.TotalSize tbl.IndexRatio = 1.0 tbl.IsRowOrdered = false } else { if err := rc.sampleDataFromTable(ctx, db.Name, tbl, tableInfo.Core); err != nil { return sourceSize, errors.Trace(err) } sourceSize += int64(float64(tbl.TotalSize) * tbl.IndexRatio) if tbl.TotalSize > int64(config.DefaultBatchSize)*2 { bigTableCount += 1 if !tbl.IsRowOrdered { unSortedTableCount += 1 } } } tableCount += 1 } } } if rc.status != nil { rc.status.TotalFileSize.Store(originSource) } // Do not import with too large concurrency because these data may be all unsorted. if bigTableCount > 0 && unSortedTableCount > 0 { if rc.cfg.App.TableConcurrency > rc.cfg.App.IndexConcurrency { rc.cfg.App.TableConcurrency = rc.cfg.App.IndexConcurrency } } return sourceSize, nil } // localResource checks the local node has enough resources for this import when local backend enabled; func (rc *Controller) localResource(sourceSize int64) error { if rc.isSourceInLocal() { sourceDir := strings.TrimPrefix(rc.cfg.Mydumper.SourceDir, storage.LocalURIPrefix) same, err := common.SameDisk(sourceDir, rc.cfg.TikvImporter.SortedKVDir) if err != nil { return errors.Trace(err) } if same { rc.checkTemplate.Collect(Warn, false, fmt.Sprintf("sorted-kv-dir:%s and data-source-dir:%s are in the same disk, may slow down performance", rc.cfg.TikvImporter.SortedKVDir, sourceDir)) } } storageSize, err := common.GetStorageSize(rc.cfg.TikvImporter.SortedKVDir) if err != nil { return errors.Trace(err) } localAvailable := storageSize.Available var message string var passed bool switch { case localAvailable > uint64(sourceSize): message = fmt.Sprintf("local disk resources are rich, estimate sorted data size %s, local available is %s", units.BytesSize(float64(sourceSize)), units.BytesSize(float64(localAvailable))) passed = true default: if int64(rc.cfg.TikvImporter.DiskQuota) > int64(localAvailable) { message = fmt.Sprintf("local disk space may not enough to finish import"+ "estimate sorted data size is %s, but local available is %s,"+ "you need a smaller number for tikv-importer.disk-quota (%s) to finish imports", units.BytesSize(float64(sourceSize)), units.BytesSize(float64(localAvailable)), units.BytesSize(float64(rc.cfg.TikvImporter.DiskQuota))) passed = false log.L().Error(message) } else { message = fmt.Sprintf("local disk space may not enough to finish import, "+ "estimate sorted data size is %s, but local available is %s,"+ "we will use disk-quota (size: %s) to finish imports, which may slow down import", units.BytesSize(float64(sourceSize)), units.BytesSize(float64(localAvailable)), units.BytesSize(float64(rc.cfg.TikvImporter.DiskQuota))) passed = true log.L().Warn(message) } } rc.checkTemplate.Collect(Critical, passed, message) return nil } // CheckpointIsValid checks whether we can start this import with this checkpoint. func (rc *Controller) CheckpointIsValid(ctx context.Context, tableInfo *mydump.MDTableMeta) ([]string, bool, error) { msgs := make([]string, 0) uniqueName := common.UniqueTable(tableInfo.DB, tableInfo.Name) tableCheckPoint, err := rc.checkpointsDB.Get(ctx, uniqueName) if err != nil { // there is no checkpoint log.L().Debug("no checkpoint detected", zap.String("table", uniqueName)) return nil, true, nil } // if checkpoint enable and not missing, we skip the check table empty progress. if tableCheckPoint.Status <= checkpoints.CheckpointStatusMissing { return nil, false, nil } if tableCheckPoint.Status <= checkpoints.CheckpointStatusMaxInvalid { failedStep := tableCheckPoint.Status * 10 var action strings.Builder action.WriteString("./tidb-lightning-ctl --checkpoint-error-") switch failedStep { case checkpoints.CheckpointStatusAlteredAutoInc, checkpoints.CheckpointStatusAnalyzed: action.WriteString("ignore") default: action.WriteString("destroy") } action.WriteString("='") action.WriteString(uniqueName) action.WriteString("' --config=...") msgs = append(msgs, fmt.Sprintf("TiDB Lightning has failed last time. To prevent data loss, this run will stop now, "+ "%s failed in step(%s), please run command %s,"+ "You may also run `./tidb-lightning-ctl --checkpoint-error-destroy=all --config=...` to start from scratch,"+ "For details of this failure, read the log file from the PREVIOUS run", uniqueName, failedStep.MetricName(), action.String())) return msgs, false, nil } dbInfo, ok := rc.dbInfos[tableInfo.DB] if ok { t, ok := dbInfo.Tables[tableInfo.Name] if ok { if tableCheckPoint.TableID > 0 && tableCheckPoint.TableID != t.ID { msgs = append(msgs, fmt.Sprintf("TiDB Lightning has detected tables with illegal checkpoints. To prevent data loss, this run will stop now,"+ "please run command \"./tidb-lightning-ctl --checkpoint-remove='%s' --config=...\""+ "You may also run `./tidb-lightning-ctl --checkpoint-error-destroy=all --config=...` to start from scratch,"+ "For details of this failure, read the log file from the PREVIOUS run", uniqueName)) return msgs, false, nil } } } var permFromCheckpoint []int var columns []string for _, eng := range tableCheckPoint.Engines { if len(eng.Chunks) > 0 { chunk := eng.Chunks[0] permFromCheckpoint = chunk.ColumnPermutation columns = chunk.Chunk.Columns if filepath.Dir(chunk.FileMeta.Path) != rc.cfg.Mydumper.SourceDir { message := fmt.Sprintf("chunk checkpoints path is not equal to config"+ "checkpoint is %s, config source dir is %s", chunk.FileMeta.Path, rc.cfg.Mydumper.SourceDir) msgs = append(msgs, message) } } } if len(columns) == 0 { log.L().Debug("no valid checkpoint detected", zap.String("table", uniqueName)) return nil, false, nil } info := rc.dbInfos[tableInfo.DB].Tables[tableInfo.Name] if info != nil { permFromTiDB, err := parseColumnPermutations(info.Core, columns, nil) if err != nil { msgs = append(msgs, fmt.Sprintf("failed to calculate columns %s, table %s's info has changed,"+ "consider remove this checkpoint, and start import again.", err.Error(), uniqueName)) } if !reflect.DeepEqual(permFromCheckpoint, permFromTiDB) { msgs = append(msgs, fmt.Sprintf("compare columns perm failed. table %s's info has changed,"+ "consider remove this checkpoint, and start import again.", uniqueName)) } } return msgs, false, nil } // hasDefault represents col has default value. func hasDefault(col *model.ColumnInfo) bool { return col.DefaultIsExpr || col.DefaultValue != nil || !mysql.HasNotNullFlag(col.Flag) || col.IsGenerated() || mysql.HasAutoIncrementFlag(col.Flag) } func (rc *Controller) readFirstRow(ctx context.Context, dataFileMeta mydump.SourceFileMeta) (cols []string, row []types.Datum, err error) { var reader storage.ReadSeekCloser if dataFileMeta.Type == mydump.SourceTypeParquet { reader, err = mydump.OpenParquetReader(ctx, rc.store, dataFileMeta.Path, dataFileMeta.FileSize) } else { reader, err = rc.store.Open(ctx, dataFileMeta.Path) } if err != nil { return nil, nil, errors.Trace(err) } var parser mydump.Parser blockBufSize := int64(rc.cfg.Mydumper.ReadBlockSize) switch dataFileMeta.Type { case mydump.SourceTypeCSV: hasHeader := rc.cfg.Mydumper.CSV.Header // Create a utf8mb4 convertor to encode and decode data with the charset of CSV files. charsetConvertor, err := mydump.NewCharsetConvertor(rc.cfg.Mydumper.DataCharacterSet, rc.cfg.Mydumper.DataInvalidCharReplace) if err != nil { return nil, nil, errors.Trace(err) } parser, err = mydump.NewCSVParser(&rc.cfg.Mydumper.CSV, reader, blockBufSize, rc.ioWorkers, hasHeader, charsetConvertor) if err != nil { return nil, nil, errors.Trace(err) } case mydump.SourceTypeSQL: parser = mydump.NewChunkParser(rc.cfg.TiDB.SQLMode, reader, blockBufSize, rc.ioWorkers) case mydump.SourceTypeParquet: parser, err = mydump.NewParquetParser(ctx, rc.store, reader, dataFileMeta.Path) if err != nil { return nil, nil, errors.Trace(err) } default: panic(fmt.Sprintf("unknown file type '%s'", dataFileMeta.Type)) } defer parser.Close() err = parser.ReadRow() if err != nil && errors.Cause(err) != io.EOF { return nil, nil, errors.Trace(err) } return parser.Columns(), parser.LastRow().Row, nil } // SchemaIsValid checks the import file and cluster schema is match. func (rc *Controller) SchemaIsValid(ctx context.Context, tableInfo *mydump.MDTableMeta) ([]string, error) { if len(tableInfo.DataFiles) == 0 { log.L().Info("no data files detected", zap.String("db", tableInfo.DB), zap.String("table", tableInfo.Name)) return nil, nil } msgs := make([]string, 0) info, ok := rc.dbInfos[tableInfo.DB].Tables[tableInfo.Name] if !ok { msgs = append(msgs, fmt.Sprintf("TiDB schema `%s`.`%s` doesn't exists,"+ "please give a schema file in source dir or create table manually", tableInfo.DB, tableInfo.Name)) return msgs, nil } igCols := make(map[string]struct{}) igCol, err := rc.cfg.Mydumper.IgnoreColumns.GetIgnoreColumns(tableInfo.DB, tableInfo.Name, rc.cfg.Mydumper.CaseSensitive) if err != nil { return nil, errors.Trace(err) } for _, col := range igCol.Columns { igCols[col] = struct{}{} } colCountFromTiDB := len(info.Core.Columns) core := info.Core defaultCols := make(map[string]struct{}) for _, col := range core.Columns { if hasDefault(col) || (info.Core.ContainsAutoRandomBits() && mysql.HasPriKeyFlag(col.Flag)) { // this column has default value or it's auto random id, so we can ignore it defaultCols[col.Name.L] = struct{}{} } } // tidb_rowid have a default value. defaultCols[model.ExtraHandleName.String()] = struct{}{} // only check the first file of this table. dataFile := tableInfo.DataFiles[0] log.L().Info("datafile to check", zap.String("db", tableInfo.DB), zap.String("table", tableInfo.Name), zap.String("path", dataFile.FileMeta.Path)) // get columns name from data file. dataFileMeta := dataFile.FileMeta if tp := dataFileMeta.Type; tp != mydump.SourceTypeCSV && tp != mydump.SourceTypeSQL && tp != mydump.SourceTypeParquet { msgs = append(msgs, fmt.Sprintf("file '%s' with unknown source type '%s'", dataFileMeta.Path, dataFileMeta.Type.String())) return msgs, nil } colsFromDataFile, row, err := rc.readFirstRow(ctx, dataFileMeta) if err != nil { return nil, errors.Trace(err) } if colsFromDataFile == nil && len(row) == 0 { log.L().Info("file contains no data, skip checking against schema validity", zap.String("path", dataFileMeta.Path)) return msgs, nil } if colsFromDataFile == nil { // when there is no columns name in data file. we must insert data in order. // so the last several columns either can be ignored or has a default value. for i := len(row); i < colCountFromTiDB; i++ { if _, ok := defaultCols[core.Columns[i].Name.L]; !ok { msgs = append(msgs, fmt.Sprintf("TiDB schema `%s`.`%s` has %d columns,"+ "and data file has %d columns, but column %s are missing the default value,"+ "please give column a default value to skip this check", tableInfo.DB, tableInfo.Name, colCountFromTiDB, len(row), core.Columns[i].Name.L)) } } return msgs, nil } // compare column names and make sure // 1. TiDB table info has data file's all columns(besides ignore columns) // 2. Those columns not introduced in data file always have a default value. colMap := make(map[string]struct{}) for col := range igCols { colMap[col] = struct{}{} } for _, col := range core.Columns { if _, ok := colMap[col.Name.L]; ok { // tidb's column is ignored // we need ensure this column has the default value. if _, hasDefault := defaultCols[col.Name.L]; !hasDefault { msgs = append(msgs, fmt.Sprintf("TiDB schema `%s`.`%s`'s column %s cannot be ignored,"+ "because it doesn't have a default value, please set tables.ignoreColumns properly", tableInfo.DB, tableInfo.Name, col.Name.L)) } } else { colMap[col.Name.L] = struct{}{} } } // tidb_rowid can be ignored in check colMap[model.ExtraHandleName.String()] = struct{}{} for _, col := range colsFromDataFile { if _, ok := colMap[col]; !ok { checkMsg := "please check table schema" if dataFileMeta.Type == mydump.SourceTypeCSV && rc.cfg.Mydumper.CSV.Header { checkMsg += " and csv file header" } msgs = append(msgs, fmt.Sprintf("TiDB schema `%s`.`%s` doesn't have column %s, "+ "%s or use tables.ignoreColumns to ignore %s", tableInfo.DB, tableInfo.Name, col, checkMsg, col)) } else { // remove column for next iteration delete(colMap, col) } } // if theses rest columns don't have a default value. for col := range colMap { if _, ok := defaultCols[col]; ok { continue } msgs = append(msgs, fmt.Sprintf("TiDB schema `%s`.`%s` doesn't have the default value for %s"+ "please give a default value for %s or choose another column to ignore or add this column in data file", tableInfo.DB, tableInfo.Name, col, col)) } return msgs, nil } // checkCSVHeader try to check whether the csv header config is consistent with the source csv files by: // 1. pick one table with two CSV files and a unique/primary key // 2. read the first row of those two CSV files // 3. checks if the content of those first rows are compatible with the table schema, and whether the // two rows are identical, to determine if the first rows are a header rows. func (rc *Controller) checkCSVHeader(ctx context.Context, dbMetas []*mydump.MDDatabaseMeta) error { // if cfg set header = ture but source files actually contain not header, former SchemaCheck should // return error in this situation, so we need do it again. if rc.cfg.Mydumper.CSV.Header { return nil } var ( tableMeta *mydump.MDTableMeta csvCount int hasUniqueIdx bool ) // only check one table source files for better performance. The checked table is chosen based on following two factor: // 1. contains at least 1 csv source file, 2 is preferable // 2. table schema contains primary key or unique key // if the two factors can't be both satisfied, the first one has a higher priority outer: for _, dbMeta := range dbMetas { for _, tblMeta := range dbMeta.Tables { if len(tblMeta.DataFiles) == 0 { continue } tableHasUniqueIdx := false tableCSVCount := 0 for _, f := range tblMeta.DataFiles { if f.FileMeta.Type == mydump.SourceTypeCSV { tableCSVCount++ if tableCSVCount >= 2 { break } } } if tableCSVCount == 0 { continue } info := rc.dbInfos[tblMeta.DB].Tables[tblMeta.Name] for _, idx := range info.Core.Indices { if idx.Primary || idx.Unique { tableHasUniqueIdx = true } } if tableCSVCount >= 2 && hasUniqueIdx { tableMeta = tblMeta csvCount = tableCSVCount hasUniqueIdx = tableHasUniqueIdx // if a perfect table source is found, we can stop check more tables break outer } if tableCSVCount > csvCount || (tableCSVCount == csvCount && !hasUniqueIdx && tableHasUniqueIdx) { tableMeta = tblMeta csvCount = tableCSVCount hasUniqueIdx = tableHasUniqueIdx } } } if tableMeta == nil { return nil } var rows [][]types.Datum for _, f := range tableMeta.DataFiles { if f.FileMeta.Type != mydump.SourceTypeCSV { continue } _, row, err := rc.readFirstRow(ctx, f.FileMeta) if err != nil { return errors.Trace(err) } if len(row) > 0 { rows = append(rows, row) } // only check at most two of all the files if len(rows) >= 2 { break } } if len(rows) == 0 { return nil } else if len(rows) >= 2 { // if the first row in two source files are not the same, they should not be the header line // NOTE: though lightning's logic allows different source files contains different columns or the // order is difference, here we only check if they are exactly the same because this is the common case. if len(rows[0]) != len(rows[1]) { return nil } for i := 0; i < len(rows[0]); i++ { if rows[0][i].GetString() != rows[1][i].GetString() { return nil } } } // check if some fields are unique and not ignored // if at least one field appears in a unique key, we can sure there is something wrong, // they should be either the header line or the data is duplicated. tableInfo := rc.dbInfos[tableMeta.DB].Tables[tableMeta.Name] tableFields := make(map[string]struct{}) uniqueIdxFields := make(map[string]struct{}) ignoreColumns, err := rc.cfg.Mydumper.IgnoreColumns.GetIgnoreColumns(tableMeta.DB, tableMeta.Name, rc.cfg.Mydumper.CaseSensitive) if err != nil { return errors.Trace(err) } ignoreColsSet := make(map[string]struct{}) for _, col := range ignoreColumns.Columns { ignoreColsSet[col] = struct{}{} } for _, idx := range tableInfo.Core.Indices { if !idx.Unique && !idx.Primary { continue } for _, col := range idx.Columns { if _, ok := ignoreColsSet[col.Name.L]; !ok { uniqueIdxFields[col.Name.L] = struct{}{} } } } for _, f := range tableInfo.Core.Columns { tableFields[f.Name.L] = struct{}{} } if common.TableHasAutoRowID(tableInfo.Core) { tableFields[model.ExtraHandleName.L] = struct{}{} } hasUniqueField := false for _, d := range rows[0] { val := strings.ToLower(d.GetString()) if _, ok := tableFields[val]; !ok { return nil } if _, ok := uniqueIdxFields[val]; ok { hasUniqueField = true break } } msg := fmt.Sprintf("source csv files contains header row but `mydumper.csv.header` is false, checked table is `%s`.`%s`", tableMeta.DB, tableMeta.Name) level := Warn if hasUniqueField && len(rows) > 1 { level = Critical } else if !checkFieldCompatibility(tableInfo.Core, ignoreColsSet, rows[0]) { // if there are only 1 csv file or there is not unique key, try to check if all columns are compatible with string value level = Critical } rc.checkTemplate.Collect(level, false, msg) return nil } func checkFieldCompatibility(tbl *model.TableInfo, ignoreCols map[string]struct{}, values []types.Datum) bool { se := kv.NewSession(&kv.SessionOptions{ SQLMode: mysql.ModeStrictTransTables, }) for i, col := range tbl.Columns { // do not check ignored columns if _, ok := ignoreCols[col.Name.L]; ok { continue } if i >= len(values) { break } _, err := table.CastValue(se, values[i], col, true, false) if err != nil { log.L().Error("field value is not consistent with column type", zap.String("value", values[i].GetString()), zap.Any("column_info", col), zap.Error(err)) return false } } return true } func (rc *Controller) sampleDataFromTable(ctx context.Context, dbName string, tableMeta *mydump.MDTableMeta, tableInfo *model.TableInfo) error { if len(tableMeta.DataFiles) == 0 { return nil } sampleFile := tableMeta.DataFiles[0].FileMeta var reader storage.ReadSeekCloser var err error if sampleFile.Type == mydump.SourceTypeParquet { reader, err = mydump.OpenParquetReader(ctx, rc.store, sampleFile.Path, sampleFile.FileSize) } else { reader, err = rc.store.Open(ctx, sampleFile.Path) } if err != nil { return errors.Trace(err) } idAlloc := kv.NewPanickingAllocators(0) tbl, err := tables.TableFromMeta(idAlloc, tableInfo) kvEncoder, err := rc.backend.NewEncoder(tbl, &kv.SessionOptions{ SQLMode: rc.cfg.TiDB.SQLMode, Timestamp: 0, SysVars: rc.sysVars, AutoRandomSeed: 0, }) blockBufSize := int64(rc.cfg.Mydumper.ReadBlockSize) var parser mydump.Parser switch tableMeta.DataFiles[0].FileMeta.Type { case mydump.SourceTypeCSV: hasHeader := rc.cfg.Mydumper.CSV.Header // Create a utf8mb4 convertor to encode and decode data with the charset of CSV files. charsetConvertor, err := mydump.NewCharsetConvertor(rc.cfg.Mydumper.DataCharacterSet, rc.cfg.Mydumper.DataInvalidCharReplace) if err != nil { return errors.Trace(err) } parser, err = mydump.NewCSVParser(&rc.cfg.Mydumper.CSV, reader, blockBufSize, rc.ioWorkers, hasHeader, charsetConvertor) if err != nil { return errors.Trace(err) } case mydump.SourceTypeSQL: parser = mydump.NewChunkParser(rc.cfg.TiDB.SQLMode, reader, blockBufSize, rc.ioWorkers) case mydump.SourceTypeParquet: parser, err = mydump.NewParquetParser(ctx, rc.store, reader, sampleFile.Path) if err != nil { return errors.Trace(err) } default: panic(fmt.Sprintf("file '%s' with unknown source type '%s'", sampleFile.Path, sampleFile.Type.String())) } defer parser.Close() logTask := log.With(zap.String("table", tableMeta.Name)).Begin(zap.InfoLevel, "sample file") igCols, err := rc.cfg.Mydumper.IgnoreColumns.GetIgnoreColumns(dbName, tableMeta.Name, rc.cfg.Mydumper.CaseSensitive) if err != nil { return errors.Trace(err) } initializedColumns, reachEOF := false, false var columnPermutation []int var kvSize uint64 = 0 var rowSize uint64 = 0 rowCount := 0 dataKVs := rc.backend.MakeEmptyRows() indexKVs := rc.backend.MakeEmptyRows() lastKey := make([]byte, 0) tableMeta.IsRowOrdered = true tableMeta.IndexRatio = 1.0 outloop: for !reachEOF { offset, _ := parser.Pos() err = parser.ReadRow() columnNames := parser.Columns() switch errors.Cause(err) { case nil: if !initializedColumns { if len(columnPermutation) == 0 { columnPermutation, err = createColumnPermutation(columnNames, igCols.Columns, tableInfo) if err != nil { return errors.Trace(err) } } initializedColumns = true } case io.EOF: reachEOF = true break outloop default: err = errors.Annotatef(err, "in file offset %d", offset) return errors.Trace(err) } lastRow := parser.LastRow() rowSize += uint64(lastRow.Length) rowCount += 1 var dataChecksum, indexChecksum verification.KVChecksum kvs, encodeErr := kvEncoder.Encode(logTask.Logger, lastRow.Row, lastRow.RowID, columnPermutation, sampleFile.Path, offset) parser.RecycleRow(lastRow) if encodeErr != nil { err = errors.Annotatef(encodeErr, "in file at offset %d", offset) return errors.Trace(err) } if tableMeta.IsRowOrdered { kvs.ClassifyAndAppend(&dataKVs, &dataChecksum, &indexKVs, &indexChecksum) for _, kv := range kv.KvPairsFromRows(dataKVs) { if len(lastKey) == 0 { lastKey = kv.Key } else if bytes.Compare(lastKey, kv.Key) > 0 { tableMeta.IsRowOrdered = false break } } dataKVs = dataKVs.Clear() indexKVs = indexKVs.Clear() } kvSize += kvs.Size() failpoint.Inject("mock-kv-size", func(val failpoint.Value) { kvSize += uint64(val.(int)) }) if rowSize > maxSampleDataSize && rowCount > maxSampleRowCount { break } } if rowSize > 0 && kvSize > rowSize { tableMeta.IndexRatio = float64(kvSize) / float64(rowSize) } log.L().Info("Sample source data", zap.String("table", tableMeta.Name), zap.Float64("IndexRatio", tableMeta.IndexRatio), zap.Bool("IsSourceOrder", tableMeta.IsRowOrdered)) return nil }
c4pt0r/tidb
br/pkg/lightning/restore/check_info.go
GO
apache-2.0
35,743
#!/usr/bin/env python # Copyright 2016 Criteo # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific lanbg_guage governing permissions and # limitations under the License. from __future__ import print_function import unittest from biggraphite import graphite_utils as bg_graphite_utils class TestGraphiteUtils(unittest.TestCase): def test_accessor_from_settings(self): import types settings = types.ModuleType("settings") settings.BG_DRIVER = "memory" accessor = bg_graphite_utils.accessor_from_settings(settings) self.assertNotEquals(accessor, None) def test_cache_from_settings(self): import types settings = types.ModuleType("settings") settings.BG_CACHE = "memory" settings.BG_CACHE_SIZE = 10 settings.BG_CACHE_TTL = 60 settings.BG_CACHE_SYNC = False cache = bg_graphite_utils.cache_from_settings('fake', settings) self.assertNotEquals(cache, None) if __name__ == "__main__": unittest.main()
dpanth3r/biggraphite
tests/test_graphite_utils.py
Python
apache-2.0
1,444
/* * Copyright (C) 2015 University of Oregon * * You may distribute under the terms of either the GNU General Public * License or the Apache License, as specified in the LICENSE file. * * For more information, see the LICENSE file. */ package vnmr.templates; import java.awt.*; import java.awt.event.*; import java.util.*; import java.io.*; import javax.swing.tree.*; import com.sun.xml.tree.*; import org.w3c.dom.*; /** * The base class for all template ElementNode objects * @author Dean Sindorf */ public class VElement extends ElementNode implements MutableTreeNode { protected Template template=null; protected boolean init_flag=false; protected boolean m_bShow = true; private int m_indent = 0; // ElementNode utilities public String type() { return getLocalName(); } public boolean hasAttribute(String s) { return getAttribute(s).length()==0? false:true; } public Template getTemplate(){ return template; } public void setTemplate(Template t){ template=t; } public boolean isVisible() { return m_bShow; } public void setVisible(boolean bShow) { m_bShow = bShow; } // member i/o routines public String text() { return null; } public String toString() { return type(); } public void setXMLAttributes() { } public void getXMLAttributes() { } // status methods public boolean isInitialized() { return init_flag;} public boolean notInitialized() { return !init_flag;} public boolean isActive() { return false;} public boolean isGroup() { return !isLeaf();} //---------------------------------------------------------------- /** initialization method (calls getXMLAttributes) */ //---------------------------------------------------------------- public void init(Template m) { template=m; getXMLAttributes(); if(template.debug_init) System.out.println("init :"+type()); init_flag=true; } //---------------------------------------------------------------- /** overrides Element.writeXml (calls setXMLAttributes) */ //---------------------------------------------------------------- public void writeXml(XmlWriteContext context) throws IOException{ setXMLAttributes(); super.writeXml(context); } //---------------------------------------------------------------- /** Print element properties (System.out) */ //---------------------------------------------------------------- public void dump() { String s; System.out.print(type()); NamedNodeMap attrs=getAttributes(); for(int i=0;i<attrs.getLength();i++){ Node a=attrs.item(i); System.out.print(" "+a.getNodeName()+'='+'"'+a.getNodeValue()+'"'); } System.out.print("\n"); } //************* TreeNode interface implementation ************** public TreeNode getChildAt (int n) { //return (TreeNode)getChildNodes().item(n); int j = -1; TreeNode treenode = null; NodeList nodes = getChildNodes(); int nSize = nodes.getLength(); for (int i = 0; i < nSize; i++) { Node node = nodes.item(i); if (!(node instanceof VProtocolElement) || ((VProtocolElement)node).isVisible()) j = j+1; if (j == n) { treenode = (TreeNode)node; break; } } return treenode; } public int getChildCount () { return getChildNodes().getLength();} public TreeNode getParent () { Node parent=getParentNode(); if((parent != null) && (parent instanceof TreeNode)) return (TreeNode)parent; return null; } public int getIndex (TreeNode node) { /*Vector v=childElements(); return v.indexOf(node); */ int j = -1; Vector nodes=childElements(); int pos = nodes.indexOf(node); int nLength = nodes.size(); for (int i = 0; i < nLength; i++) { Node node2 = (Node)nodes.get(i); if (!(node2 instanceof VProtocolElement) || ((VProtocolElement)node2).isVisible()) j=j+1; if (node2.equals(node)) return j; } return pos; } public boolean getAllowsChildren () { return true; } public boolean isLeaf() { return getChildCount()==0?true:false;} public Enumeration children () { return childElements().elements();} //************* MutableTreeNode interface implementation *********** public void insert(MutableTreeNode child, int index){ Node ref=(Node)getChildAt(index); insertBefore((Node)child,ref); ((VElement)child).init(template); } public void remove(int n) { MutableTreeNode child=(MutableTreeNode)getChildAt(n); if(child !=null) removeChild((Node)child); } public void remove(MutableTreeNode node) {remove(node);} public void removeFromParent() {remove();} public void setParent(MutableTreeNode newParent) { System.out.println("setParent not supported"); } public void setUserObject(Object object) {} //******************************************************************* public void insertBefore(MutableTreeNode child, MutableTreeNode ref){ insertBefore((Node)child,(Node)ref); ((VElement)child).init(template); } public void moveChild(MutableTreeNode child, MutableTreeNode ref){ if(child!=ref){ removeChild((Node)child); insertBefore((Node)child,(Node)ref); } } //---------------------------------------------------------------- /** return a vector containing all Element child nodes */ //---------------------------------------------------------------- protected Vector childElements() { Vector v=new Vector(); NodeList nodes=getChildNodes(); for(int i=0;i<nodes.getLength();i++){ Node node=nodes.item(i); v.add(node); } //System.out.println(toString()+" "+nodes.getLength()+" "+v.size()); return v; } //---------------------------------------------------------------- /** return a vector containing all Element child nodes */ //---------------------------------------------------------------- protected void removeChildren() { NodeList nodes=getChildNodes(); for(int i=0;i<nodes.getLength();i++){ Node node=nodes.item(i); removeChild(node); } } //---------------------------------------------------------------- /** return first Element child */ //---------------------------------------------------------------- public VElement getFirstElement() { Enumeration elems=children(); if(elems.hasMoreElements()) return (VElement)elems.nextElement(); return null; } //---------------------------------------------------------------- /** add a child node (overrides DOM method) */ //---------------------------------------------------------------- public Node appendChild(Node child){ if(child instanceof VElement) return super.appendChild(child); // removes text nodes return null; } //---------------------------------------------------------------- /** add a child node */ //---------------------------------------------------------------- public VElement add(VElement child){ VElement elem=(VElement)appendChild(child); child.init(template); return elem; } //---------------------------------------------------------------- /** remove child node */ //---------------------------------------------------------------- public VElement remove(VElement child){ removeChild(child); return this; } //---------------------------------------------------------------- /** remove self */ //--------------------------------------------------------------- public VElement remove (){ Node parent=getParentNode(); if(parent != null) parent.removeChild(this); return this; } /** * Set the indentation for this node. * @param indent The indent distance (pixels). */ public void setIndent(int indent) { m_indent = indent; } /** * Get the indentation for this node. * @return The indent distance (pixels). */ public int getIndent() { return m_indent ; } public int getIntAttribute(String attr, int defaultValue) { int rtn = defaultValue; try { rtn = Integer.parseInt(getAttribute(attr)); } catch (NumberFormatException nfe) { } return rtn; } }
OpenVnmrJ/OpenVnmrJ
src/vnmrj/src/vnmr/templates/VElement.java
Java
apache-2.0
8,486
/* * @license * Copyright Hôpitaux Universitaires de Genève. All Rights Reserved. * * Use of this source code is governed by an Apache-2.0 license that can be * found in the LICENSE file at https://github.com/DSI-HUG/dejajs-components/blob/master/LICENSE */ import { CommonModule } from '@angular/common'; import { NgModule } from '@angular/core'; import { DejaMonacoEditorComponent } from './monaco-editor.component'; import { MonacoEditorService } from './monaco-editor.service'; @NgModule({ declarations: [DejaMonacoEditorComponent], exports: [DejaMonacoEditorComponent], imports: [ CommonModule, ], providers: [MonacoEditorService], }) export class DejaMonacoEditorModule {} export * from './options/editor-language.model'; export * from './options/editor-theme.component'; export * from './monaco-editor.component';
rtrompier/deja-test
src/component/monaco-editor/index.ts
TypeScript
apache-2.0
865
<?php require_once('header.php'); ?> <body style="background: #34495e;"> <div class="container"> <header> <h1>Contact Us<span>For Any Details Regarding the Event, Contact any one of us: <br> Mayank Gupta 9582645012 <br> Shubham Jindal 9899269537<br>Abhay Mitra 8130243421</span></h1></h1> </header> </div> </body>
skjindal93/Ultimate-Tic-Tac-Toe
contact.php
PHP
apache-2.0
327
/* * Copyright 2018 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.stunner.bpmn.backend.converters.customproperties; import java.util.Optional; import org.eclipse.bpmn2.BaseElement; import org.jboss.drools.DroolsPackage; import org.kie.workbench.common.stunner.bpmn.definition.property.diagram.Package; import org.kie.workbench.common.stunner.core.graph.content.view.Point2D; public class CustomAttribute<T> { private static final String droolsns = DroolsPackage.eNS_URI; public static final AttributeDefinition<Boolean> independent = new BooleanAttribute(droolsns, "independent", false); public static final AttributeDefinition<Boolean> adHoc = new BooleanAttribute(droolsns, "adHoc", false); public static final AttributeDefinition<Boolean> waitForCompletion = new BooleanAttribute(droolsns, "waitForCompletion", false); public static final AttributeDefinition<String> serviceTaskName = new StringAttribute(droolsns, "taskName", ""); public static final AttributeDefinition<String> ruleFlowGroup = new StringAttribute(droolsns, "ruleFlowGroup", ""); public static final AttributeDefinition<String> packageName = new StringAttribute(droolsns, "packageName", null) { @Override public void setValue(BaseElement element, String value) { // do not set if null or the XML serializer will NPE if (value != null) { super.setValue(element, value.isEmpty() ? Package.DEFAULT_PACKAGE : value); } } }; public static final AttributeDefinition<String> version = new StringAttribute(droolsns, "version", "1.0"); public static final AttributeDefinition<String> errorName = new StringAttribute(droolsns, "erefname", ""); public static final AttributeDefinition<String> msgref = new StringAttribute(droolsns, "msgref", ""); public static final AttributeDefinition<String> esccode = new StringAttribute(droolsns, "esccode", ""); public static final AttributeDefinition<Boolean> boundarycaForBoundaryEvent = new BooleanAttribute(droolsns, "boundaryca", false) { @Override public Boolean getValue(BaseElement element) { // this is for compatibility with legacy marshallers // always return the default regardless the string was empty in the file // or it was actually undefined String value = super.getStringValue(element).orElse(""); return value.isEmpty() ? defaultValue : Boolean.parseBoolean(value); } }; public static final AttributeDefinition<Boolean> boundarycaForEvent = new BooleanAttribute(droolsns, "boundaryca", true) { @Override public Boolean getValue(BaseElement element) { // this is for compatibility with legacy marshallers // always return the default regardless the string was empty in the file // or it was actually undefined String value = super.getStringValue(element).orElse(""); return value.isEmpty() ? defaultValue : Boolean.parseBoolean(value); } }; public static final AttributeDefinition<String> priority = new StringAttribute(droolsns, "priority", null); public static final AttributeDefinition<String> dtype = new StringAttribute(droolsns, "dtype", ""); public static final AttributeDefinition<String> dg = new StringAttribute(droolsns, "dg", "") { @Override public String getValue(BaseElement element) { // this is for compatibility with legacy marshallers // always return null regardless the string was empty in the file // or it was actually undefined String value = super.getValue(element); return value.isEmpty() ? null : value; } }; public static final AttributeDefinition<Point2D> dockerInfo = new AttributeDefinition<Point2D>(droolsns, "dockerinfo", Point2D.create(0, 0)) { @Override public Point2D getValue(BaseElement element) { Optional<String> attribute = getStringValue(element); if (attribute.isPresent()) { String dockerInfoStr = attribute.get(); dockerInfoStr = dockerInfoStr.substring(0, dockerInfoStr.length() - 1); String[] dockerInfoParts = dockerInfoStr.split("\\|"); String infoPartsToUse = dockerInfoParts[0]; String[] infoPartsToUseParts = infoPartsToUse.split("\\^"); double x = Double.valueOf(infoPartsToUseParts[0]); double y = Double.valueOf(infoPartsToUseParts[1]); return Point2D.create(x, y); } else { return Point2D.create(0, 0); } } @Override public void setValue(BaseElement element, Point2D value) { setStringValue(element, String.format("%.1f^%.1f|", value.getX(), value.getY())); } }; private final AttributeDefinition<T> attributeDefinition; private final BaseElement element; public CustomAttribute(AttributeDefinition<T> attributeDefinition, BaseElement element) { this.attributeDefinition = attributeDefinition; this.element = element; } public T get() { return attributeDefinition.getValue(element); } public void set(T value) { if (value != null) { attributeDefinition.setValue(element, value); } } }
jhrcek/kie-wb-common
kie-wb-common-stunner/kie-wb-common-stunner-sets/kie-wb-common-stunner-bpmn/kie-wb-common-stunner-bpmn-backend/src/main/java/org/kie/workbench/common/stunner/bpmn/backend/converters/customproperties/CustomAttribute.java
Java
apache-2.0
6,012
// Copyright 2007-2015 Chris Patterson, Dru Sellers, Travis Smith, et. al. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. namespace MassTransit.Tests.Courier { using System; using System.Threading.Tasks; using MassTransit.Courier; using MassTransit.Courier.Contracts; using NUnit.Framework; using TestFramework; using TestFramework.Courier; [TestFixture] public class Executing_a_faulting_routing_slip_with_compensating_activities : InMemoryActivityTestFixture { Task<ConsumeContext<RoutingSlipFaulted>> _faulted; Task<ConsumeContext<RoutingSlipActivityCompleted>> _activityCompleted; Task<ConsumeContext<RoutingSlipActivityFaulted>> _activityFaulted; Guid _trackingNumber; Task<ConsumeContext<RoutingSlipActivityCompleted>> _secondActivityCompleted; Task<ConsumeContext<RoutingSlipActivityCompensated>> _activityCompensated; [OneTimeSetUp] public async Task Setup() { _faulted = SubscribeHandler<RoutingSlipFaulted>(); _activityCompleted = SubscribeHandler<RoutingSlipActivityCompleted>(x => x.Message.ActivityName.Equals("Test")); _activityCompensated = SubscribeHandler<RoutingSlipActivityCompensated>(x => x.Message.ActivityName.Equals("Test")); _secondActivityCompleted = SubscribeHandler<RoutingSlipActivityCompleted>(x => x.Message.ActivityName.Equals("SecondTest")); _activityFaulted = SubscribeHandler<RoutingSlipActivityFaulted>(x => x.Message.ActivityName.Equals("Faulty")); _trackingNumber = NewId.NextGuid(); var builder = new RoutingSlipBuilder(_trackingNumber); builder.AddSubscription(Bus.Address, RoutingSlipEvents.All); ActivityTestContext testActivity = GetActivityContext<TestActivity>(); builder.AddActivity(testActivity.Name, testActivity.ExecuteUri, new { Value = "Hello", }); testActivity = GetActivityContext<SecondTestActivity>(); builder.AddActivity(testActivity.Name, testActivity.ExecuteUri); testActivity = GetActivityContext<FaultyActivity>(); builder.AddActivity(testActivity.Name, testActivity.ExecuteUri); builder.AddVariable("Variable", "Knife"); await Bus.Execute(builder.Build()); } protected override void SetupActivities() { AddActivityContext<TestActivity, TestArguments, TestLog>(() => new TestActivity()); AddActivityContext<SecondTestActivity, TestArguments, TestLog>(() => new SecondTestActivity()); AddActivityContext<FaultyActivity, FaultyArguments, FaultyLog>(() => new FaultyActivity()); } [Test] public async Task Should_compensate_completed_activity() { ConsumeContext<RoutingSlipActivityCompensated> compensated = await _activityCompensated; ConsumeContext<RoutingSlipActivityCompleted> completed = await _activityCompleted; Assert.AreEqual(completed.Message.TrackingNumber, compensated.Message.TrackingNumber); } [Test] public async Task Should_compensate_first_activity() { ConsumeContext<RoutingSlipActivityCompensated> context = await _activityCompensated; Assert.AreEqual(_trackingNumber, context.Message.TrackingNumber); } [Test] public async Task Should_complete_activity_with_log() { ConsumeContext<RoutingSlipActivityCompleted> context = await _activityCompleted; Assert.AreEqual("Hello", context.Message.GetResult<string>("OriginalValue")); } [Test] public async Task Should_complete_first_activity() { ConsumeContext<RoutingSlipActivityCompleted> context = await _activityCompleted; Assert.AreEqual(_trackingNumber, context.Message.TrackingNumber); } [Test] public async Task Should_complete_second_activity() { ConsumeContext<RoutingSlipActivityCompleted> context = await _secondActivityCompleted; Assert.AreEqual(_trackingNumber, context.Message.TrackingNumber); } [Test] public async Task Should_complete_with_variable() { ConsumeContext<RoutingSlipActivityCompleted> context = await _activityCompleted; Assert.AreEqual("Knife", context.Message.GetVariable<string>("Variable")); } [Test] public async Task Should_fault_activity_with_variable() { ConsumeContext<RoutingSlipActivityFaulted> context = await _activityFaulted; Assert.AreEqual("Knife", context.Message.GetVariable<string>("Variable")); } [Test] public async Task Should_fault_third_activity() { ConsumeContext<RoutingSlipActivityFaulted> context = await _activityFaulted; Assert.AreEqual(_trackingNumber, context.Message.TrackingNumber); } [Test] public async Task Should_fault_with_variable() { ConsumeContext<RoutingSlipFaulted> context = await _faulted; Assert.AreEqual("Knife", context.Message.GetVariable<string>("Variable")); } } }
drusellers/MassTransit
src/MassTransit.Tests/Courier/FaultActivityEvent_Specs.cs
C#
apache-2.0
5,994
/* * Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.mturk.model.transform; import java.math.*; import javax.annotation.Generated; import com.amazonaws.services.mturk.model.*; import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*; import com.amazonaws.transform.*; import com.fasterxml.jackson.core.JsonToken; import static com.fasterxml.jackson.core.JsonToken.*; /** * NotificationSpecification JSON Unmarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class NotificationSpecificationJsonUnmarshaller implements Unmarshaller<NotificationSpecification, JsonUnmarshallerContext> { public NotificationSpecification unmarshall(JsonUnmarshallerContext context) throws Exception { NotificationSpecification notificationSpecification = new NotificationSpecification(); int originalDepth = context.getCurrentDepth(); String currentParentElement = context.getCurrentParentElement(); int targetDepth = originalDepth + 1; JsonToken token = context.getCurrentToken(); if (token == null) token = context.nextToken(); if (token == VALUE_NULL) { return null; } while (true) { if (token == null) break; if (token == FIELD_NAME || token == START_OBJECT) { if (context.testExpression("Destination", targetDepth)) { context.nextToken(); notificationSpecification.setDestination(context.getUnmarshaller(String.class).unmarshall(context)); } if (context.testExpression("Transport", targetDepth)) { context.nextToken(); notificationSpecification.setTransport(context.getUnmarshaller(String.class).unmarshall(context)); } if (context.testExpression("Version", targetDepth)) { context.nextToken(); notificationSpecification.setVersion(context.getUnmarshaller(String.class).unmarshall(context)); } if (context.testExpression("EventTypes", targetDepth)) { context.nextToken(); notificationSpecification.setEventTypes(new ListUnmarshaller<String>(context.getUnmarshaller(String.class)).unmarshall(context)); } } else if (token == END_ARRAY || token == END_OBJECT) { if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) { if (context.getCurrentDepth() <= originalDepth) break; } } token = context.nextToken(); } return notificationSpecification; } private static NotificationSpecificationJsonUnmarshaller instance; public static NotificationSpecificationJsonUnmarshaller getInstance() { if (instance == null) instance = new NotificationSpecificationJsonUnmarshaller(); return instance; } }
jentfoo/aws-sdk-java
aws-java-sdk-mechanicalturkrequester/src/main/java/com/amazonaws/services/mturk/model/transform/NotificationSpecificationJsonUnmarshaller.java
Java
apache-2.0
3,636
/** * Copyright (c) 2017, Stupid Bird and/or its affiliates. All rights reserved. * STUPID BIRD PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. * * @Project : demo * @Package : net.lizhaoweb.demo.hadoop.hdfs2mysql * @author <a href="http://www.lizhaoweb.net">李召(John.Lee)</a> * @EMAIL 404644381@qq.com * @Time : 18:57 */ package net.lizhaoweb.demo.hadoop.hdfs2mysql; import net.lizhaoweb.common.util.argument.ArgumentFactory; import net.lizhaoweb.spring.hadoop.commons.argument.MapReduceConstant; import net.lizhaoweb.spring.hadoop.commons.argument.model.Argument; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.db.DBConfiguration; import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.util.Tool; /** * @author <a href="http://www.lizhaoweb.cn">李召(John.Lee)</a> * @version 1.0.0.0.1 * @EMAIL 404644381@qq.com * @notes Created on 2017年08月01日<br> * Revision of last commit:$Revision$<br> * Author of last commit:$Author$<br> * Date of last commit:$Date$<br> */ public class HDFS2MysqlScheduler extends Configured implements Tool { @Override public int run(String[] args) throws Exception { try { MapReduceConstant.CommonVariables.initMapReduce(this.getConf(), args); // 任务名称 String jobName = ArgumentFactory.getParameterValue(Argument.JobName); ArgumentFactory.printInputArgument(Argument.JobName, jobName, false); // Hdfs 读取路径 String[] hdfsInputPathArray = ArgumentFactory.getParameterValues(Argument.InputPath); ArgumentFactory.printInputArgument(Argument.InputPath, hdfsInputPathArray); // 准备工作 ArgumentFactory.checkNullValuesForArgument(Argument.InputPath, hdfsInputPathArray); if (StringUtils.isBlank(jobName)) { jobName = this.getClass().getName(); } // 这句话很关键 this.getConf().set("mapred.job.tracker", "localhost:9001"); DBConfiguration.configureDB(this.getConf(), "com.mysql.jdbc.Driver", "jdbc:mysql://localhost:3307/cloud?useSSL=false&useUnicode=true&characterEncoding=utf8&characterSetResults=utf8&zeroDateTimeBehavior=convertToNull", "root", "123456"); Job job = Job.getInstance(this.getConf(), jobName); job.setJarByClass(this.getClass()); for (String hdfsInputPath : hdfsInputPathArray) { Path inputPath = new Path(hdfsInputPath); FileInputFormat.addInputPath(job, inputPath); } job.setMapperClass(HDFSInputMapper.class); job.setMapOutputKeyClass(LongWritable.class); job.setMapOutputValueClass(Text.class); // job.setOutputKeyClass(LongWritable.class); // job.setOutputValueClass(Text.class); job.setReducerClass(DBOutputReducer.class); job.setOutputFormatClass(DBOutputFormat.class); DBOutputFormat.setOutput(job, "test_media", "id", "name", "description", "duration", "surfix", "type", "md5", "oss_addr", "oss_etag", "file_path", "creator_id", "creator", "checker_id", "create_time", "flag", "state"); boolean status = job.waitForCompletion(true); if (!status) { throw new Exception("MapReduce task execute failed........."); } return 0; } catch (Exception e) { e.printStackTrace(); return 1; } } }
JohnLee-Organization/demo
hadoop/hdfs/mysql/src/main/java/net/lizhaoweb/demo/hadoop/hdfs2mysql/HDFS2MysqlScheduler.java
Java
apache-2.0
3,806
package org.coderswithoutborders.deglancer.func_debug.stage2; import org.coderswithoutborders.deglancer.model.Stage; /** * Created by Renier on 2016/05/06. */ public interface IDebugStage2View { void finishActivity(); void moveToStage3View(); void moveToStage1View(); void setStage(Stage stage); void setTitleStage(String stage); }
coderswithoutborders/deglancer
Deglancer/app/src/main/java/org/coderswithoutborders/deglancer/func_debug/stage2/IDebugStage2View.java
Java
apache-2.0
356
/* * Copyright (C) 2020 AlexMofer * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package am.util.mvp.ui; import androidx.annotation.ContentView; import androidx.annotation.NonNull; import androidx.lifecycle.Lifecycle; import androidx.lifecycle.LifecycleEventObserver; import androidx.lifecycle.LifecycleOwner; import am.appcompat.app.BaseActivity; import am.util.mvp.core.MVPView; import am.util.mvp.core.MVPViewHolder; /** * MVP Activity * Created by Alex on 2020/3/11. */ public abstract class MVPActivity extends BaseActivity implements MVPView { private final MVPViewHolder<MVPView> mViewHolder = new MVPViewHolder<>(); private final LifecycleEventObserver mLifecycleEventObserver = this::onLifecycleOwnerStateChanged; public MVPActivity() { getLifecycle().addObserver(mLifecycleEventObserver); } @ContentView public MVPActivity(int contentLayoutId) { super(contentLayoutId); getLifecycle().addObserver(mLifecycleEventObserver); } private void onLifecycleOwnerStateChanged(@NonNull LifecycleOwner source, @NonNull Lifecycle.Event event) { switch (event) { case ON_CREATE: mViewHolder.setView(this); break; case ON_DESTROY: mViewHolder.setView(null); source.getLifecycle().removeObserver(mLifecycleEventObserver); break; } } /** * 获取ViewHolder * * @return ViewHolder */ @NonNull protected MVPViewHolder<? extends MVPView> getViewHolder() { return mViewHolder; } /** * 设置View * 便于在View已创建后手动调用 * * @param view View */ protected void setView(MVPView view) { mViewHolder.setView(view); } }
AlexMofer/AMWidget
mvp-ui/src/main/java/am/util/mvp/ui/MVPActivity.java
Java
apache-2.0
2,375
package au.com.dius.pact.consumer; import au.com.dius.pact.consumer.model.MockProviderConfig; import au.com.dius.pact.core.model.PactSpecVersion; import au.com.dius.pact.core.model.RequestResponsePact; import org.junit.Test; import java.io.BufferedReader; import java.io.DataOutputStream; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; import static au.com.dius.pact.consumer.ConsumerPactRunnerKt.runConsumerTest; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; public class PactDefectTest { private static final String method = "POST"; private static final String path = "/ping"; @Test public void json() { test("{\"msg\" : \"ping\"}", "{\"msg\":\"pong\"}", "application/json"); } @Test public void jsonWithCharset() { test("{\"msg\" : \"ping\"}", "{\"msg\":\"pong\"}", "application/json; charset=utf8"); } @Test public void xml() { test("<ping />", "<pong />", "application/xml"); } @Test public void xmlWithCharset() { test("<ping />", "<pong />", "application/xml; charset=utf-8"); } @Test public void plain() { test("ping", "pong", "text/plain"); } @Test public void plainWithCharset() { test("ping", "pong", "text/plain; charset=utf-8"); } private void test(final String requestBody, final String expectedResponseBody, final String contentType) { RequestResponsePact pact = ConsumerPactBuilder .consumer("ping_consumer") .hasPactWith("ping_provider") .uponReceiving("Ping with " + contentType) .path(path) .method(method) .body(requestBody, contentType) .willRespondWith() .status(200) .body(expectedResponseBody, contentType) .toPact(); PactVerificationResult result = runConsumerTest(pact, new MockProviderConfig("localhost", 0, PactSpecVersion.V3), (mockServer, context) -> { try { URL url = new URL(mockServer.getUrl() + path); String response = post(url, contentType, requestBody); assertEquals(expectedResponseBody, response); } catch (Exception e) { throw new RuntimeException(e); } return true; }); if (result instanceof PactVerificationResult.Error) { throw new RuntimeException(((PactVerificationResult.Error)result).getError()); } assertThat(result, is(instanceOf(PactVerificationResult.Ok.class))); } private String post(URL url, String contentType, String requestBody) { try { HttpURLConnection http = (HttpURLConnection) url.openConnection(); http.setRequestMethod("POST"); http.setRequestProperty("Content-Type", contentType); http.setDoOutput(true); http.setDoInput(true); DataOutputStream wr = new DataOutputStream(http.getOutputStream()); wr.writeBytes(requestBody); wr.flush(); wr.close(); BufferedReader in = new BufferedReader( new InputStreamReader(http.getInputStream())); String inputLine; StringBuilder httpResponse = new StringBuilder(); while ((inputLine = in.readLine()) != null) { httpResponse.append(inputLine); } in.close(); return httpResponse.toString(); } catch (Exception e) { throw new RuntimeException(e); } } }
DiUS/pact-jvm
consumer/src/test/java/au/com/dius/pact/consumer/PactDefectTest.java
Java
apache-2.0
3,733
/** * Opensec OVAL - https://nakamura5akihito.github.io/ * Copyright (C) 2015 Akihito Nakamura * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.opensec.oval.model.windows; import io.opensec.oval.model.ComponentType; import io.opensec.oval.model.ElementRef; import io.opensec.oval.model.Family; import io.opensec.oval.model.definitions.EntityStateAnySimpleType; import io.opensec.oval.model.definitions.EntityStateStringType; import io.opensec.oval.model.definitions.StateType; import java.util.ArrayList; import java.util.Collection; /** * The activedirectory state defines the different information * that can be used to evaluate the specified entries in active directory. * * @author Akihito Nakamura, AIST * @see <a href="http://oval.mitre.org/language/">OVAL Language</a> */ public class ActiveDirectoryState extends StateType { private EntityStateNamingContextType naming_context; //{0..1} private EntityStateStringType relative_dn; //{0..1} private EntityStateStringType attribute; //{0..1} private EntityStateStringType object_class; //{0..1} private EntityStateAdstypeType adstype; //{0..1} private EntityStateAnySimpleType value; //{0..1} /** * Constructor. */ public ActiveDirectoryState() { this( null, 0 ); } public ActiveDirectoryState( final String id, final int version ) { this( id, version, null ); } public ActiveDirectoryState( final String id, final int version, final String comment ) { super( id, version, comment ); // _oval_platform_type = OvalPlatformType.windows; // _oval_component_type = OvalComponentType.activedirectory; _oval_family = Family.WINDOWS; _oval_component = ComponentType.ACTIVEDIRECTORY; } /** */ public void setNamingContext( final EntityStateNamingContextType hive ) { naming_context = hive; } public EntityStateNamingContextType getNamingContext() { return naming_context; } /** */ public void setRelativeDn( final EntityStateStringType relative_dn ) { this.relative_dn = relative_dn; } public EntityStateStringType getRelativeDn() { return relative_dn; } /** */ public void setAttribute( final EntityStateStringType attribute ) { this.attribute = attribute; } public EntityStateStringType getAttribute() { return attribute; } /** */ public void setObjectClass( final EntityStateStringType object_class ) { this.object_class = object_class; } public EntityStateStringType getObjectClass() { return object_class; } /** */ public void setAdstype( final EntityStateAdstypeType adstype ) { this.adstype = adstype; } public EntityStateAdstypeType getAdstype() { return adstype; } /** */ public void setValue( final EntityStateAnySimpleType value ) { this.value = value; } public EntityStateAnySimpleType getValue() { return value; } //********************************************************************* // DefinitionsElement //********************************************************************* @Override public Collection<ElementRef> ovalGetElementRef() { Collection<ElementRef> ref_list = new ArrayList<ElementRef>(); ref_list.add( getNamingContext() ); ref_list.add( getRelativeDn() ); ref_list.add( getAttribute() ); ref_list.add( getObjectClass() ); ref_list.add( getAdstype() ); ref_list.add( getValue() ); return ref_list; } //************************************************************** // java.lang.Object //************************************************************** @Override public int hashCode() { return super.hashCode(); } @Override public boolean equals( final Object obj ) { if (!(obj instanceof ActiveDirectoryState)) { return false; } return super.equals( obj ); } @Override public String toString() { return "activedirectory_state[" + super.toString() + ", naming_context=" + getNamingContext() + ", relative_dn=" + getRelativeDn() + ", attribute=" + getAttribute() + ", object_class=" + getObjectClass() + ", adstype=" + getAdstype() + ", value=" + getValue() + "]"; } } //ActiveDirectoryState
nakamura5akihito/opensec-oval
src/main/java/io/opensec/oval/model/windows/ActiveDirectoryState.java
Java
apache-2.0
5,755
package com.mumux.androidtesting.actions.argument; import org.junit.Assert; import org.junit.Test; public class ArgumentTypeTest { @Test public void testParseValue() { Object value; try { ArgumentType.ON_OFF.parseValue("xx"); } catch (IllegalArgumentException e) { Assert.assertEquals("Invalid value xx - should be 'on' or 'off'", e.getMessage()); } value = ArgumentType.ON_OFF.parseValue("on"); Assert.assertTrue(value instanceof Boolean); Assert.assertTrue((Boolean) value); try { ArgumentType.INTEGER.parseValue("xx"); } catch (java.lang.NumberFormatException e) { Assert.assertEquals("For input string: \"xx\"", e.getMessage()); } value = ArgumentType.INTEGER.parseValue("1000"); Assert.assertTrue(value instanceof Integer); Assert.assertEquals(1000, ((Integer) value).intValue()); value = ArgumentType.STRING.parseValue("Click"); Assert.assertTrue(value instanceof String); Assert.assertEquals("Click", value.toString()); } }
fabricereix/AndroidFunctionalTester
src/test/java/com/mumux/androidtesting/actions/argument/ArgumentTypeTest.java
Java
apache-2.0
1,131
/* * Copyright 2014 MIR@MU. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cz.muni.fi.mir.db.service.impl; import cz.muni.fi.mir.db.dao.AnnotationValueDAO; import cz.muni.fi.mir.db.dao.CanonicOutputDAO; import cz.muni.fi.mir.db.dao.FormulaDAO; import cz.muni.fi.mir.db.domain.Annotation; import cz.muni.fi.mir.db.domain.AnnotationValue; import cz.muni.fi.mir.db.domain.CanonicOutput; import cz.muni.fi.mir.db.domain.Configuration; import cz.muni.fi.mir.db.domain.Revision; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.TreeMap; import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; import org.apache.log4j.Logger; import org.joda.time.DateTime; import org.springframework.scheduling.annotation.Scheduled; import org.springframework.transaction.annotation.Transactional; import cz.muni.fi.mir.db.domain.Statistics; import cz.muni.fi.mir.db.domain.StatisticsHolder; import cz.muni.fi.mir.db.service.StatisticsService; import java.util.ArrayList; import java.util.HashMap; import java.util.Objects; import javax.persistence.NoResultException; import org.hibernate.Hibernate; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * * @author Dominik Szalai - emptulik at gmail.com */ @Service(value = "statisticsService") public class StatisticsServiceImpl implements StatisticsService { @Autowired private AnnotationValueDAO annotationValueDAO; @Autowired private CanonicOutputDAO canonicOutputDAO; @Autowired private FormulaDAO formulaDAO; @PersistenceContext private EntityManager entityManager; private static final Logger logger = Logger.getLogger(StatisticsServiceImpl.class); @Override @Transactional(readOnly = false) public void calculate() { Statistics statistics = new Statistics(); Map<StatPair,Map<String,Integer>> temp = new HashMap<>(); List<AnnotationValue> aValues = annotationValueDAO.getAll(); List<StatisticsHolder> holders = new ArrayList<>(); for(AnnotationValue av : aValues) { List<Annotation> annotations = entityManager.createQuery("SELECT a FROM annotation a WHERE a.annotationContent LIKE :annotationTag", Annotation.class) .setParameter("annotationTag", "%"+av.getValue()+"%").getResultList(); logger.debug(av.getValue()+"$"+annotations.size()); if(annotations.isEmpty()) { continue; } else { for(Annotation a : annotations) { logger.trace("Procesing annotation:"+a.getAnnotationContent()); if(av.getType().equals(AnnotationValue.Type.CANONICOUTPUT)) { logger.trace("Matched type is canonic output"); CanonicOutput co = canonicOutputDAO.getCanonicOutputByAnnotation(a); Hibernate.initialize(co.getApplicationRun()); StatPair sp = new StatPair(co.getApplicationRun().getConfiguration(), co.getApplicationRun().getRevision()); logger.trace("Pair: "+sp); if(temp.containsKey(sp)) { logger.trace("Pair is there"); Map<String,Integer> subResult = temp.get(sp); if(subResult.containsKey(av.getValue())) { logger.trace("SubResult contains key "+av.getValue()); logger.trace("Old subresult "+subResult.get(av.getValue())); int i = subResult.get(av.getValue())+1; logger.trace("Putting "+av.getValue()+" with value "+i); subResult.put(av.getValue(), i); } else { logger.trace("SubResult does not contain key "+av.getValue()); logger.trace("Putting "+av.getValue()+" with value "+1); subResult.put(av.getValue(),1); } temp.put(sp, subResult); } else { logger.trace("Pair is missing"); Map<String,Integer> subResult = new HashMap(); subResult.put(av.getValue(),1); logger.trace("Putting pair "+ sp +" value "+subResult); temp.put(sp, subResult); } } } } } for(StatPair sp : temp.keySet()) { Map<String,Integer> subResults = temp.get(sp); for(String s : subResults.keySet()) { StatisticsHolder sh = new StatisticsHolder(); sh.setAnnotation(s); sh.setConfiguration(sp.getConfiguration()); sh.setRevision(sp.getRevision()); sh.setCount(subResults.get(s)); holders.add(sh); } } statistics.setStatisticsHolders(holders); statistics.setCalculationDate(DateTime.now()); statistics.setTotalFormulas(formulaDAO.getNumberOfRecords()); int totalCanon = 0; try { totalCanon = entityManager.createQuery("SELECT COUNT(co) FROM canonicOutput co", Long.class) .getSingleResult().intValue(); } catch(NoResultException nre) { logger.info(nre); } statistics.setTotalCanonicOutputs(totalCanon); entityManager.persist(statistics); } @Scheduled(cron = "${statistics.generate.cron}" ) @Transactional(readOnly = false) public void scheduledCalculation() { calculate(); } @Override @Transactional(readOnly = true) public Statistics getLatestStatistics() { Statistics statistics = null; try { statistics = entityManager.createQuery("SELECT s FROM statistics s ORDER BY s.id DESC", Statistics.class).setFirstResult(0).setMaxResults(1).getSingleResult(); } catch(Exception e) { logger.error(e); } return statistics; } @Override @Transactional(readOnly = true) public Map<Long, DateTime> getStatisticsMap() { List<Object[]> results = entityManager.createQuery("SELECT s.id,s.calculationDate FROM statistics s") .getResultList(); Map<Long,DateTime> resultMap = new TreeMap<>(Collections.reverseOrder()); for(Object[] result : results) { resultMap.put((Long) result[0], (DateTime) result[1]); } return resultMap; } @Override @Transactional(readOnly = true) public Statistics getStatisticsByID(Long id) { return entityManager.find(Statistics.class, id); } private class StatPair { private Configuration configuration; private Revision revision; public StatPair() { } public StatPair(Configuration configuration, Revision revision) { this.configuration = configuration; this.revision = revision; } public Configuration getConfiguration() { return configuration; } public void setConfiguration(Configuration configuration) { this.configuration = configuration; } public Revision getRevision() { return revision; } public void setRevision(Revision revision) { this.revision = revision; } @Override public int hashCode() { int hash = 7; hash = 83 * hash + Objects.hashCode(this.configuration); hash = 83 * hash + Objects.hashCode(this.revision); return hash; } @Override public boolean equals(Object obj) { if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } final StatPair other = (StatPair) obj; if (!Objects.equals(this.configuration, other.configuration)) { return false; } return Objects.equals(this.revision, other.revision); } @Override public String toString() { return "StatPair{" + "c=" + configuration + ", r=" + revision + '}'; } } }
michal-ruzicka/MathMLCanEval
mathmlcaneval-backend/src/main/java/cz/muni/fi/mir/db/service/impl/StatisticsServiceImpl.java
Java
apache-2.0
9,760
// Copyright (c) 2014 Couchbase, Inc. // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file // except in compliance with the License. You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed under the // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing permissions // and limitations under the License. // TODO: // 1. functions in this package directly access SystemConfig, instead it // is suggested to pass config via function argument. package indexer import ( "errors" "fmt" c "github.com/couchbase/indexing/secondary/common" "github.com/couchbase/indexing/secondary/logging" projClient "github.com/couchbase/indexing/secondary/projector/client" protobuf "github.com/couchbase/indexing/secondary/protobuf/projector" "github.com/golang/protobuf/proto" "time" ) const ( HTTP_PREFIX string = "http://" MAX_KV_REQUEST_RETRY int = 0 BACKOFF_FACTOR int = 2 MAX_CLUSTER_FETCH_RETRY int = 600 ) //KVSender provides the mechanism to talk to KV(projector, router etc) type KVSender interface { } type kvSender struct { supvCmdch MsgChannel //supervisor sends commands on this channel supvRespch MsgChannel //channel to send any message to supervisor cInfoCache *c.ClusterInfoCache config c.Config } func NewKVSender(supvCmdch MsgChannel, supvRespch MsgChannel, config c.Config) (KVSender, Message) { var cinfo *c.ClusterInfoCache url, err := c.ClusterAuthUrl(config["clusterAddr"].String()) if err == nil { cinfo, err = c.NewClusterInfoCache(url, DEFAULT_POOL) } if err != nil { panic("Unable to initialize cluster_info - " + err.Error()) } //Init the kvSender struct k := &kvSender{ supvCmdch: supvCmdch, supvRespch: supvRespch, cInfoCache: cinfo, config: config, } k.cInfoCache.SetMaxRetries(MAX_CLUSTER_FETCH_RETRY) k.cInfoCache.SetLogPrefix("KVSender: ") //start kvsender loop which listens to commands from its supervisor go k.run() return k, &MsgSuccess{} } //run starts the kvsender loop which listens to messages //from it supervisor(indexer) func (k *kvSender) run() { //main KVSender loop loop: for { select { case cmd, ok := <-k.supvCmdch: if ok { if cmd.GetMsgType() == KV_SENDER_SHUTDOWN { logging.Infof("KVSender::run Shutting Down") k.supvCmdch <- &MsgSuccess{} break loop } k.handleSupvervisorCommands(cmd) } else { //supervisor channel closed. exit break loop } } } } func (k *kvSender) handleSupvervisorCommands(cmd Message) { switch cmd.GetMsgType() { case OPEN_STREAM: k.handleOpenStream(cmd) case ADD_INDEX_LIST_TO_STREAM: k.handleAddIndexListToStream(cmd) case REMOVE_INDEX_LIST_FROM_STREAM: k.handleRemoveIndexListFromStream(cmd) case REMOVE_BUCKET_FROM_STREAM: k.handleRemoveBucketFromStream(cmd) case CLOSE_STREAM: k.handleCloseStream(cmd) case KV_SENDER_RESTART_VBUCKETS: k.handleRestartVbuckets(cmd) case CONFIG_SETTINGS_UPDATE: k.handleConfigUpdate(cmd) default: logging.Errorf("KVSender::handleSupvervisorCommands "+ "Received Unknown Command %v", cmd) } } func (k *kvSender) handleOpenStream(cmd Message) { streamId := cmd.(*MsgStreamUpdate).GetStreamId() indexInstList := cmd.(*MsgStreamUpdate).GetIndexList() restartTs := cmd.(*MsgStreamUpdate).GetRestartTs() respCh := cmd.(*MsgStreamUpdate).GetResponseChannel() stopCh := cmd.(*MsgStreamUpdate).GetStopChannel() bucket := cmd.(*MsgStreamUpdate).GetBucket() logging.LazyDebug(func() string { return fmt.Sprintf("KVSender::handleOpenStream %v %v %v", streamId, bucket, cmd) }) go k.openMutationStream(streamId, indexInstList, restartTs, respCh, stopCh) k.supvCmdch <- &MsgSuccess{} } func (k *kvSender) handleAddIndexListToStream(cmd Message) { streamId := cmd.(*MsgStreamUpdate).GetStreamId() bucket := cmd.(*MsgStreamUpdate).GetBucket() addIndexList := cmd.(*MsgStreamUpdate).GetIndexList() respCh := cmd.(*MsgStreamUpdate).GetResponseChannel() stopCh := cmd.(*MsgStreamUpdate).GetStopChannel() logging.LazyDebug(func() string { return fmt.Sprintf("KVSender::handleAddIndexListToStream %v %v %v", streamId, bucket, cmd) }) go k.addIndexForExistingBucket(streamId, bucket, addIndexList, respCh, stopCh) k.supvCmdch <- &MsgSuccess{} } func (k *kvSender) handleRemoveIndexListFromStream(cmd Message) { logging.LazyDebug(func() string { return fmt.Sprintf("KVSender::handleRemoveIndexListFromStream %v", cmd) }) streamId := cmd.(*MsgStreamUpdate).GetStreamId() delIndexList := cmd.(*MsgStreamUpdate).GetIndexList() respCh := cmd.(*MsgStreamUpdate).GetResponseChannel() stopCh := cmd.(*MsgStreamUpdate).GetStopChannel() go k.deleteIndexesFromStream(streamId, delIndexList, respCh, stopCh) k.supvCmdch <- &MsgSuccess{} } func (k *kvSender) handleRemoveBucketFromStream(cmd Message) { streamId := cmd.(*MsgStreamUpdate).GetStreamId() bucket := cmd.(*MsgStreamUpdate).GetBucket() respCh := cmd.(*MsgStreamUpdate).GetResponseChannel() stopCh := cmd.(*MsgStreamUpdate).GetStopChannel() logging.LazyDebug(func() string { return fmt.Sprintf("KVSender::handleRemoveBucketFromStream %v %v %v", streamId, bucket, cmd) }) go k.deleteBucketsFromStream(streamId, []string{bucket}, respCh, stopCh) k.supvCmdch <- &MsgSuccess{} } func (k *kvSender) handleCloseStream(cmd Message) { streamId := cmd.(*MsgStreamUpdate).GetStreamId() bucket := cmd.(*MsgStreamUpdate).GetBucket() respCh := cmd.(*MsgStreamUpdate).GetResponseChannel() stopCh := cmd.(*MsgStreamUpdate).GetStopChannel() logging.LazyDebug(func() string { return fmt.Sprintf("KVSender::handleCloseStream %v %v %v", streamId, bucket, cmd) }) go k.closeMutationStream(streamId, bucket, respCh, stopCh) k.supvCmdch <- &MsgSuccess{} } func (k *kvSender) handleRestartVbuckets(cmd Message) { streamId := cmd.(*MsgRestartVbuckets).GetStreamId() bucket := cmd.(*MsgRestartVbuckets).GetBucket() restartTs := cmd.(*MsgRestartVbuckets).GetRestartTs() respCh := cmd.(*MsgRestartVbuckets).GetResponseCh() stopCh := cmd.(*MsgRestartVbuckets).GetStopChannel() connErrVbs := cmd.(*MsgRestartVbuckets).ConnErrVbs() logging.LazyDebug(func() string { return fmt.Sprintf("KVSender::handleRestartVbuckets %v %v %v", streamId, bucket, cmd) }) go k.restartVbuckets(streamId, restartTs, connErrVbs, respCh, stopCh) k.supvCmdch <- &MsgSuccess{} } func (k *kvSender) openMutationStream(streamId c.StreamId, indexInstList []c.IndexInst, restartTs *c.TsVbuuid, respCh MsgChannel, stopCh StopChannel) { if len(indexInstList) == 0 { logging.Warnf("KVSender::openMutationStream Empty IndexList. Nothing to do.") respCh <- &MsgSuccess{} return } protoInstList := convertIndexListToProto(k.config, k.cInfoCache, indexInstList, streamId) bucket := indexInstList[0].Defn.Bucket //use any bucket as list of vbs remain the same for all buckets vbnos, err := k.getAllVbucketsInCluster(bucket) if err != nil { logging.Errorf("KVSender::openMutationStream %v %v Error in fetching vbuckets info %v", streamId, restartTs.Bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } restartTsList, err := k.makeRestartTsForVbs(bucket, restartTs, vbnos) if err != nil { logging.Errorf("KVSender::openMutationStream %v %v Error making restart ts %v", streamId, bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } addrs, err := k.getAllProjectorAddrs() if err != nil { logging.Errorf("KVSender::openMutationStream %v %v Error Fetching Projector Addrs %v", streamId, bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } var rollbackTs *protobuf.TsVbuuid var activeTs *protobuf.TsVbuuid topic := getTopicForStreamId(streamId) fn := func(r int, err error) error { //clear the error before every retry err = nil for _, addr := range addrs { execWithStopCh(func() { ap := newProjClient(addr) if res, ret := k.sendMutationTopicRequest(ap, topic, restartTsList, protoInstList); ret != nil { //for all errors, retry logging.Errorf("KVSender::openMutationStream %v %v Error Received %v from %v", streamId, bucket, ret, addr) err = ret } else { activeTs = updateActiveTsFromResponse(bucket, activeTs, res) rollbackTs = updateRollbackTsFromResponse(bucket, rollbackTs, res) } }, stopCh) } if rollbackTs != nil { //no retry required for rollback return nil } else if err != nil { //retry for any error return err } else { //check if we have received activeTs for all vbuckets retry := false if activeTs == nil || activeTs.Len() != len(vbnos) { retry = true } if retry { return errors.New("ErrPartialVbStart") } else { return nil } } } rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn) err = rh.Run() if rollbackTs != nil { logging.Infof("KVSender::openMutationStream %v %v Rollback Received %v", streamId, bucket, rollbackTs) //convert from protobuf to native format numVbuckets := k.config["numVbuckets"].Int() nativeTs := rollbackTs.ToTsVbuuid(numVbuckets) respCh <- &MsgRollback{streamId: streamId, bucket: bucket, rollbackTs: nativeTs} } else if err != nil { logging.Errorf("KVSender::openMutationStream %v %v Error Received %v", streamId, bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} } else { numVbuckets := k.config["numVbuckets"].Int() respCh <- &MsgSuccessOpenStream{activeTs: activeTs.ToTsVbuuid(numVbuckets)} } } func (k *kvSender) restartVbuckets(streamId c.StreamId, restartTs *c.TsVbuuid, connErrVbs []Vbucket, respCh MsgChannel, stopCh StopChannel) { addrs, err := k.getProjAddrsForVbuckets(restartTs.Bucket, restartTs.GetVbnos()) if err != nil { logging.Errorf("KVSender::restartVbuckets %v %v Error in fetching cluster info %v", streamId, restartTs.Bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } //convert TS to protobuf format var protoRestartTs *protobuf.TsVbuuid numVbuckets := k.config["numVbuckets"].Int() protoTs := protobuf.NewTsVbuuid(DEFAULT_POOL, restartTs.Bucket, numVbuckets) protoRestartTs = protoTs.FromTsVbuuid(restartTs) var rollbackTs *protobuf.TsVbuuid topic := getTopicForStreamId(streamId) rollback := false fn := func(r int, err error) error { for _, addr := range addrs { ap := newProjClient(addr) if res, ret := k.sendRestartVbuckets(ap, topic, connErrVbs, protoRestartTs); ret != nil { //retry for all errors logging.Errorf("KVSender::restartVbuckets %v %v Error Received %v from %v", streamId, restartTs.Bucket, ret, addr) err = ret } else { rollbackTs = updateRollbackTsFromResponse(restartTs.Bucket, rollbackTs, res) } } if rollbackTs != nil && checkVbListInTS(protoRestartTs.GetVbnos(), rollbackTs) { //if rollback, no need to retry rollback = true return nil } else { return err } } rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn) err = rh.Run() //if any of the requested vb is in rollback ts, send rollback //msg to caller if rollback { //convert from protobuf to native format nativeTs := rollbackTs.ToTsVbuuid(numVbuckets) respCh <- &MsgRollback{streamId: streamId, rollbackTs: nativeTs} } else if err != nil { //if there is a topicMissing/genServer.Closed error, a fresh //MutationTopicRequest is required. if err.Error() == projClient.ErrorTopicMissing.Error() || err.Error() == c.ErrorClosed.Error() || err.Error() == projClient.ErrorInvalidBucket.Error() { respCh <- &MsgKVStreamRepair{ streamId: streamId, bucket: restartTs.Bucket, } } else { respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} } } else { respCh <- &MsgSuccess{} } } func (k *kvSender) addIndexForExistingBucket(streamId c.StreamId, bucket string, indexInstList []c.IndexInst, respCh MsgChannel, stopCh StopChannel) { addrs, err := k.getAllProjectorAddrs() if err != nil { logging.Errorf("KVSender::addIndexForExistingBucket %v %v Error in fetching cluster info %v", streamId, bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } var currentTs *protobuf.TsVbuuid protoInstList := convertIndexListToProto(k.config, k.cInfoCache, indexInstList, streamId) topic := getTopicForStreamId(streamId) fn := func(r int, err error) error { //clear the error before every retry err = nil for _, addr := range addrs { execWithStopCh(func() { ap := newProjClient(addr) if res, ret := sendAddInstancesRequest(ap, topic, protoInstList); ret != nil { logging.Errorf("KVSender::addIndexForExistingBucket %v %v Error Received %v from %v", streamId, bucket, ret, addr) err = ret } else { currentTs = updateCurrentTsFromResponse(bucket, currentTs, res) } }, stopCh) } //check if we have received currentTs for all vbuckets numVbuckets := k.config["numVbuckets"].Int() if currentTs == nil || currentTs.Len() != numVbuckets { return errors.New("ErrPartialVbStart") } else { return err } } rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn) err = rh.Run() if err != nil { logging.Errorf("KVSender::addIndexForExistingBucket %v %v Error Received %v", streamId, bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } numVbuckets := k.config["numVbuckets"].Int() nativeTs := currentTs.ToTsVbuuid(numVbuckets) respCh <- &MsgStreamUpdate{mType: MSG_SUCCESS, streamId: streamId, bucket: bucket, restartTs: nativeTs} } func (k *kvSender) deleteIndexesFromStream(streamId c.StreamId, indexInstList []c.IndexInst, respCh MsgChannel, stopCh StopChannel) { addrs, err := k.getAllProjectorAddrs() if err != nil { logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error in fetching cluster info %v", streamId, indexInstList[0].Defn.Bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } var uuids []uint64 for _, indexInst := range indexInstList { uuids = append(uuids, uint64(indexInst.InstId)) } topic := getTopicForStreamId(streamId) fn := func(r int, err error) error { //clear the error before every retry err = nil for _, addr := range addrs { execWithStopCh(func() { ap := newProjClient(addr) if ret := sendDelInstancesRequest(ap, topic, uuids); ret != nil { logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error Received %v from %v", streamId, indexInstList[0].Defn.Bucket, ret, addr) //Treat TopicMissing/GenServer.Closed/InvalidBucket as success if ret.Error() == projClient.ErrorTopicMissing.Error() || ret.Error() == c.ErrorClosed.Error() || ret.Error() == projClient.ErrorInvalidBucket.Error() { logging.Infof("KVSender::deleteIndexesFromStream %v %v Treating %v As Success", streamId, indexInstList[0].Defn.Bucket, ret) } else { err = ret } } }, stopCh) } return err } rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn) err = rh.Run() if err != nil { logging.Errorf("KVSender::deleteIndexesFromStream %v %v Error Received %v", streamId, indexInstList[0].Defn.Bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } respCh <- &MsgSuccess{} } func (k *kvSender) deleteBucketsFromStream(streamId c.StreamId, buckets []string, respCh MsgChannel, stopCh StopChannel) { addrs, err := k.getAllProjectorAddrs() if err != nil { logging.Errorf("KVSender::deleteBucketsFromStream %v %v Error in fetching cluster info %v", streamId, buckets[0], err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } topic := getTopicForStreamId(streamId) fn := func(r int, err error) error { //clear the error before every retry err = nil for _, addr := range addrs { execWithStopCh(func() { ap := newProjClient(addr) if ret := sendDelBucketsRequest(ap, topic, buckets); ret != nil { logging.Errorf("KVSender::deleteBucketsFromStream %v %v Error Received %v from %v", streamId, buckets[0], ret, addr) //Treat TopicMissing/GenServer.Closed as success if ret.Error() == projClient.ErrorTopicMissing.Error() || ret.Error() == c.ErrorClosed.Error() { logging.Infof("KVSender::deleteBucketsFromStream %v %v Treating %v As Success", streamId, buckets[0], ret) } else { err = ret } } }, stopCh) } return err } rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn) err = rh.Run() if err != nil { logging.Errorf("KVSender::deleteBucketsFromStream %v %v Error Received %v", streamId, buckets[0], err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } respCh <- &MsgSuccess{} } func (k *kvSender) closeMutationStream(streamId c.StreamId, bucket string, respCh MsgChannel, stopCh StopChannel) { addrs, err := k.getAllProjectorAddrs() if err != nil { logging.Errorf("KVSender::closeMutationStream %v %v Error in fetching cluster info %v", streamId, bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } topic := getTopicForStreamId(streamId) fn := func(r int, err error) error { //clear the error before every retry err = nil for _, addr := range addrs { execWithStopCh(func() { ap := newProjClient(addr) if ret := sendShutdownTopic(ap, topic); ret != nil { logging.Errorf("KVSender::closeMutationStream %v %v Error Received %v from %v", streamId, bucket, ret, addr) //Treat TopicMissing/GenServer.Closed as success if ret.Error() == projClient.ErrorTopicMissing.Error() || ret.Error() == c.ErrorClosed.Error() { logging.Infof("KVSender::closeMutationStream %v %v Treating %v As Success", streamId, bucket, ret) } else { err = ret } } }, stopCh) } return err } rh := c.NewRetryHelper(MAX_KV_REQUEST_RETRY, time.Second, BACKOFF_FACTOR, fn) err = rh.Run() if err != nil { logging.Errorf("KVSender::closeMutationStream %v %v Error Received %v", streamId, bucket, err) respCh <- &MsgError{ err: Error{code: ERROR_KVSENDER_STREAM_REQUEST_ERROR, severity: FATAL, cause: err}} return } respCh <- &MsgSuccess{} } //send the actual MutationStreamRequest on adminport func (k *kvSender) sendMutationTopicRequest(ap *projClient.Client, topic string, reqTimestamps *protobuf.TsVbuuid, instances []*protobuf.Instance) (*protobuf.TopicResponse, error) { logging.Infof("KVSender::sendMutationTopicRequest Projector %v Topic %v %v \n\tInstances %v", ap, topic, reqTimestamps.GetBucket(), instances) logging.LazyVerbosef("KVSender::sendMutationTopicRequest RequestTS %v", reqTimestamps.Repr) endpointType := "dataport" if res, err := ap.MutationTopicRequest(topic, endpointType, []*protobuf.TsVbuuid{reqTimestamps}, instances); err != nil { logging.Fatalf("KVSender::sendMutationTopicRequest Projector %v Topic %v %v \n\tUnexpected Error %v", ap, topic, reqTimestamps.GetBucket(), err) return res, err } else { logging.Infof("KVSender::sendMutationTopicRequest Success Projector %v Topic %v %v InstanceIds %v", ap, topic, reqTimestamps.GetBucket(), res.GetInstanceIds()) if logging.IsEnabled(logging.Verbose) { logging.Verbosef("KVSender::sendMutationTopicRequest ActiveTs %v \n\tRollbackTs %v", debugPrintTs(res.GetActiveTimestamps(), reqTimestamps.GetBucket()), debugPrintTs(res.GetRollbackTimestamps(), reqTimestamps.GetBucket())) } return res, nil } } func (k *kvSender) sendRestartVbuckets(ap *projClient.Client, topic string, connErrVbs []Vbucket, restartTs *protobuf.TsVbuuid) (*protobuf.TopicResponse, error) { logging.Infof("KVSender::sendRestartVbuckets Projector %v Topic %v %v", ap, topic, restartTs.GetBucket()) logging.LazyVerbosef("KVSender::sendRestartVbuckets RestartTs %v", restartTs.Repr) //Shutdown the vbucket before restart if there was a ConnErr. If the vbucket is already //running, projector will ignore the request otherwise if len(connErrVbs) != 0 { logging.Infof("KVSender::sendRestartVbuckets ShutdownVbuckets %v Topic %v %v ConnErrVbs %v", ap, topic, restartTs.GetBucket(), connErrVbs) // Only shutting down the Vb that receieve connection error. It is probably not harmful // to shutdown every VB in the repairTS, including those that only receive StreamEnd. // But due to network / projecctor latency, a VB StreamBegin may be coming on the way // for those VB (especially when RepairStream has already retried a couple of times). // So shutting all VB in restartTs may unnecessarily causing race condition and // make the protocol longer to converge. ShutdownVbuckets should have no effect on // projector that does not own the Vb. shutdownTs := k.computeShutdownTs(restartTs, connErrVbs) logging.Infof("KVSender::sendRestartVbuckets ShutdownVbuckets Projector %v Topic %v %v \n\tShutdownTs %v", ap, topic, restartTs.GetBucket(), shutdownTs.Repr()) if err := ap.ShutdownVbuckets(topic, []*protobuf.TsVbuuid{shutdownTs}); err != nil { logging.Errorf("KVSender::sendRestartVbuckets Unexpected Error During "+ "ShutdownVbuckets Request for Projector %v Topic %v. Err %v.", ap, topic, err) //all shutdownVbuckets errors are treated as success as it is a best-effort call. //RestartVbuckets errors will be acted upon. } } if res, err := ap.RestartVbuckets(topic, []*protobuf.TsVbuuid{restartTs}); err != nil { logging.Fatalf("KVSender::sendRestartVbuckets Unexpected Error During "+ "Restart Vbuckets Request for Projector %v Topic %v %v . Err %v.", ap, topic, restartTs.GetBucket(), err) return res, err } else { logging.Infof("KVSender::sendRestartVbuckets Success Projector %v Topic %v %v", ap, topic, restartTs.GetBucket()) if logging.IsEnabled(logging.Verbose) { logging.Verbosef("KVSender::sendRestartVbuckets \nActiveTs %v \nRollbackTs %v", debugPrintTs(res.GetActiveTimestamps(), restartTs.GetBucket()), debugPrintTs(res.GetRollbackTimestamps(), restartTs.GetBucket())) } return res, nil } } //send the actual AddInstances request on adminport func sendAddInstancesRequest(ap *projClient.Client, topic string, instances []*protobuf.Instance) (*protobuf.TimestampResponse, error) { logging.Infof("KVSender::sendAddInstancesRequest Projector %v Topic %v \nInstances %v", ap, topic, instances) if res, err := ap.AddInstances(topic, instances); err != nil { logging.Fatalf("KVSender::sendAddInstancesRequest Unexpected Error During "+ "Add Instances Request Projector %v Topic %v IndexInst %v. Err %v", ap, topic, instances, err) return res, err } else { logging.Infof("KVSender::sendAddInstancesRequest Success Projector %v Topic %v", ap, topic) logging.LazyDebug(func() string { return fmt.Sprintf( "KVSender::sendAddInstancesRequest \n\tActiveTs %v ", debugPrintTs(res.GetCurrentTimestamps(), "")) }) return res, nil } } //send the actual DelInstances request on adminport func sendDelInstancesRequest(ap *projClient.Client, topic string, uuids []uint64) error { logging.Infof("KVSender::sendDelInstancesRequest Projector %v Topic %v Instances %v", ap, topic, uuids) if err := ap.DelInstances(topic, uuids); err != nil { logging.Fatalf("KVSender::sendDelInstancesRequest Unexpected Error During "+ "Del Instances Request Projector %v Topic %v Instances %v. Err %v", ap, topic, uuids, err) return err } else { logging.Infof("KVSender::sendDelInstancesRequest Success Projector %v Topic %v", ap, topic) return nil } } //send the actual DelBuckets request on adminport func sendDelBucketsRequest(ap *projClient.Client, topic string, buckets []string) error { logging.Infof("KVSender::sendDelBucketsRequest Projector %v Topic %v Buckets %v", ap, topic, buckets) if err := ap.DelBuckets(topic, buckets); err != nil { logging.Fatalf("KVSender::sendDelBucketsRequest Unexpected Error During "+ "Del Buckets Request Projector %v Topic %v Buckets %v. Err %v", ap, topic, buckets, err) return err } else { logging.Infof("KVSender::sendDelBucketsRequest Success Projector %v Topic %v Buckets %v", ap, topic, buckets) return nil } } //send the actual ShutdownStreamRequest on adminport func sendShutdownTopic(ap *projClient.Client, topic string) error { logging.Infof("KVSender::sendShutdownTopic Projector %v Topic %v", ap, topic) if err := ap.ShutdownTopic(topic); err != nil { logging.Fatalf("KVSender::sendShutdownTopic Unexpected Error During "+ "Shutdown Projector %v Topic %v. Err %v", ap, topic, err) return err } else { logging.Infof("KVSender::sendShutdownTopic Success Projector %v Topic %v", ap, topic) return nil } } func getTopicForStreamId(streamId c.StreamId) string { return StreamTopicName[streamId] } func (k *kvSender) computeShutdownTs(restartTs *protobuf.TsVbuuid, connErrVbs []Vbucket) *protobuf.TsVbuuid { numVbuckets := k.config["numVbuckets"].Int() shutdownTs := protobuf.NewTsVbuuid(*restartTs.Pool, *restartTs.Bucket, numVbuckets) for _, vbno1 := range connErrVbs { for i, vbno2 := range restartTs.Vbnos { // connErrVbs is a subset of Vb in restartTs. if uint32(vbno1) == vbno2 { shutdownTs.Append(uint16(vbno1), restartTs.Seqnos[i], restartTs.Vbuuids[i], *restartTs.Snapshots[i].Start, *restartTs.Snapshots[i].End) } } } return shutdownTs } func (k *kvSender) makeRestartTsForVbs(bucket string, tsVbuuid *c.TsVbuuid, vbnos []uint32) (*protobuf.TsVbuuid, error) { var err error var ts *protobuf.TsVbuuid if tsVbuuid == nil { ts, err = k.makeInitialTs(bucket, vbnos) } else { ts, err = makeRestartTsFromTsVbuuid(bucket, tsVbuuid, vbnos) } if err != nil { return nil, err } return ts, nil } func updateActiveTsFromResponse(bucket string, activeTs *protobuf.TsVbuuid, res *protobuf.TopicResponse) *protobuf.TsVbuuid { activeTsList := res.GetActiveTimestamps() for _, ts := range activeTsList { if ts != nil && !ts.IsEmpty() && ts.GetBucket() == bucket { if activeTs == nil { activeTs = ts.Clone() } else { activeTs = activeTs.Union(ts) } } } return activeTs } func updateRollbackTsFromResponse(bucket string, rollbackTs *protobuf.TsVbuuid, res *protobuf.TopicResponse) *protobuf.TsVbuuid { rollbackTsList := res.GetRollbackTimestamps() for _, ts := range rollbackTsList { if ts != nil && !ts.IsEmpty() && ts.GetBucket() == bucket { if rollbackTs == nil { rollbackTs = ts.Clone() } else { rollbackTs = rollbackTs.Union(ts) } } } return rollbackTs } func updateCurrentTsFromResponse(bucket string, currentTs *protobuf.TsVbuuid, res *protobuf.TimestampResponse) *protobuf.TsVbuuid { currentTsList := res.GetCurrentTimestamps() for _, ts := range currentTsList { if ts != nil && !ts.IsEmpty() && ts.GetBucket() == bucket { if currentTs == nil { currentTs = ts.Clone() } else { currentTs = currentTs.Union(ts) } } } return currentTs } func (k *kvSender) makeInitialTs(bucket string, vbnos []uint32) (*protobuf.TsVbuuid, error) { flogs, err := k.getFailoverLogs(bucket, vbnos) if err != nil { logging.Fatalf("KVSender::makeInitialTs Unexpected Error During Failover "+ "Log Request for Bucket %v. Err %v", bucket, err) return nil, err } ts := protobuf.NewTsVbuuid(DEFAULT_POOL, bucket, len(vbnos)) ts = ts.InitialRestartTs(flogs.ToFailoverLog(c.Vbno32to16(vbnos))) return ts, nil } func (k *kvSender) makeRestartTsFromKV(bucket string, vbnos []uint32) (*protobuf.TsVbuuid, error) { flogs, err := k.getFailoverLogs(bucket, vbnos) if err != nil { logging.Fatalf("KVSender::makeRestartTS Unexpected Error During Failover "+ "Log Request for Bucket %v. Err %v", bucket, err) return nil, err } ts := protobuf.NewTsVbuuid(DEFAULT_POOL, bucket, len(vbnos)) ts = ts.ComputeRestartTs(flogs.ToFailoverLog(c.Vbno32to16(vbnos))) return ts, nil } func makeRestartTsFromTsVbuuid(bucket string, tsVbuuid *c.TsVbuuid, vbnos []uint32) (*protobuf.TsVbuuid, error) { ts := protobuf.NewTsVbuuid(DEFAULT_POOL, bucket, len(vbnos)) for _, vbno := range vbnos { ts.Append(uint16(vbno), tsVbuuid.Seqnos[vbno], tsVbuuid.Vbuuids[vbno], tsVbuuid.Snapshots[vbno][0], tsVbuuid.Snapshots[vbno][1]) } return ts, nil } func (k *kvSender) getFailoverLogs(bucket string, vbnos []uint32) (*protobuf.FailoverLogResponse, error) { var err error var res *protobuf.FailoverLogResponse addrs, err := k.getAllProjectorAddrs() if err != nil { return nil, err } loop: for _, addr := range addrs { //create client for node's projectors client := newProjClient(addr) if res, err = client.GetFailoverLogs(DEFAULT_POOL, bucket, vbnos); err == nil { break loop } } if logging.IsEnabled(logging.Debug) { s := "" for _, l := range res.GetLogs() { s += fmt.Sprintf("\t%v\n", l) } logging.Debugf("KVSender::getFailoverLogs Failover Log Response Error %v \n%v", err, s) } return res, err } func (k *kvSender) getAllVbucketsInCluster(bucket string) ([]uint32, error) { k.cInfoCache.Lock() defer k.cInfoCache.Unlock() err := k.cInfoCache.Fetch() if err != nil { return nil, err } //get all kv nodes nodes, err := k.cInfoCache.GetNodesByBucket(bucket) if err != nil { return nil, err } var vbs []uint32 for _, nid := range nodes { //get the list of vbnos for this kv if vbnos, err := k.cInfoCache.GetVBuckets(nid, bucket); err != nil { return nil, err } else { vbs = append(vbs, vbnos...) } } return vbs, nil } func (k *kvSender) getAllProjectorAddrs() ([]string, error) { k.cInfoCache.Lock() defer k.cInfoCache.Unlock() err := k.cInfoCache.Fetch() if err != nil { return nil, err } nodes := k.cInfoCache.GetNodesByServiceType("projector") var addrList []string for _, nid := range nodes { addr, err := k.cInfoCache.GetServiceAddress(nid, "projector") if err != nil { return nil, err } addrList = append(addrList, addr) } return addrList, nil } func (k *kvSender) getProjAddrsForVbuckets(bucket string, vbnos []uint16) ([]string, error) { k.cInfoCache.Lock() defer k.cInfoCache.Unlock() err := k.cInfoCache.Fetch() if err != nil { return nil, err } var addrList []string nodes := k.cInfoCache.GetNodesByServiceType("projector") for _, n := range nodes { vbs, err := k.cInfoCache.GetVBuckets(n, bucket) if err != nil { return nil, err } found := false outerloop: for _, vb := range vbs { for _, vbc := range vbnos { if vb == uint32(vbc) { found = true break outerloop } } } if found { addr, err := k.cInfoCache.GetServiceAddress(n, "projector") if err != nil { return nil, err } addrList = append(addrList, addr) } } return addrList, nil } func (k *kvSender) handleConfigUpdate(cmd Message) { cfgUpdate := cmd.(*MsgConfigUpdate) k.config = cfgUpdate.GetConfig() k.supvCmdch <- &MsgSuccess{} } // convert IndexInst to protobuf format func convertIndexListToProto(cfg c.Config, cinfo *c.ClusterInfoCache, indexList []c.IndexInst, streamId c.StreamId) []*protobuf.Instance { protoList := make([]*protobuf.Instance, 0) for _, index := range indexList { protoInst := convertIndexInstToProtoInst(cfg, cinfo, index, streamId) protoList = append(protoList, protoInst) } return protoList } // convert IndexInst to protobuf format func convertIndexInstToProtoInst(cfg c.Config, cinfo *c.ClusterInfoCache, indexInst c.IndexInst, streamId c.StreamId) *protobuf.Instance { protoDefn := convertIndexDefnToProtobuf(indexInst.Defn) protoInst := convertIndexInstToProtobuf(cfg, indexInst, protoDefn) addPartnInfoToProtoInst(cfg, cinfo, indexInst, streamId, protoInst) return &protobuf.Instance{IndexInstance: protoInst} } func convertIndexDefnToProtobuf(indexDefn c.IndexDefn) *protobuf.IndexDefn { using := protobuf.StorageType( protobuf.StorageType_value[string(indexDefn.Using)]).Enum() exprType := protobuf.ExprType( protobuf.ExprType_value[string(indexDefn.ExprType)]).Enum() partnScheme := protobuf.PartitionScheme( protobuf.PartitionScheme_value[string(indexDefn.PartitionScheme)]).Enum() defn := &protobuf.IndexDefn{ DefnID: proto.Uint64(uint64(indexDefn.DefnId)), Bucket: proto.String(indexDefn.Bucket), IsPrimary: proto.Bool(indexDefn.IsPrimary), Name: proto.String(indexDefn.Name), Using: using, ExprType: exprType, SecExpressions: indexDefn.SecExprs, PartitionScheme: partnScheme, PartnExpression: proto.String(indexDefn.PartitionKey), WhereExpression: proto.String(indexDefn.WhereExpr), } return defn } func convertIndexInstToProtobuf(cfg c.Config, indexInst c.IndexInst, protoDefn *protobuf.IndexDefn) *protobuf.IndexInst { state := protobuf.IndexState(int32(indexInst.State)).Enum() instance := &protobuf.IndexInst{ InstId: proto.Uint64(uint64(indexInst.InstId)), State: state, Definition: protoDefn, } return instance } func addPartnInfoToProtoInst(cfg c.Config, cinfo *c.ClusterInfoCache, indexInst c.IndexInst, streamId c.StreamId, protoInst *protobuf.IndexInst) { switch partn := indexInst.Pc.(type) { case *c.KeyPartitionContainer: //Right now the fill the SinglePartition as that is the only //partition structure supported partnDefn := partn.GetAllPartitions() //TODO move this to indexer init. These addresses cannot change. //Better to get these once and store. cinfo.Lock() defer cinfo.Unlock() err := cinfo.Fetch() c.CrashOnError(err) nid := cinfo.GetCurrentNode() streamMaintAddr, err := cinfo.GetServiceAddress(nid, "indexStreamMaint") c.CrashOnError(err) streamInitAddr, err := cinfo.GetServiceAddress(nid, "indexStreamInit") c.CrashOnError(err) streamCatchupAddr, err := cinfo.GetServiceAddress(nid, "indexStreamCatchup") c.CrashOnError(err) var endpoints []string for _, p := range partnDefn { for _, e := range p.Endpoints() { //Set the right endpoint based on streamId switch streamId { case c.MAINT_STREAM: e = c.Endpoint(streamMaintAddr) case c.CATCHUP_STREAM: e = c.Endpoint(streamCatchupAddr) case c.INIT_STREAM: e = c.Endpoint(streamInitAddr) } endpoints = append(endpoints, string(e)) } } protoInst.SinglePartn = &protobuf.SinglePartition{ Endpoints: endpoints, } } } //create client for node's projectors func newProjClient(addr string) *projClient.Client { config := c.SystemConfig.SectionConfig("indexer.projectorclient.", true) config.SetValue("retryInterval", 0) //no retry maxvbs := c.SystemConfig["maxVbuckets"].Int() return projClient.NewClient(addr, maxvbs, config) } func compareIfActiveTsEqual(origTs, compTs *c.TsVbuuid) bool { vbnosOrig := origTs.GetVbnos() vbnosComp := compTs.GetVbnos() for i, vb := range vbnosOrig { if vbnosComp[i] != vb { return false } } return true } //check if any vb in vbList is part of the given ts func checkVbListInTS(vbList []uint32, ts *protobuf.TsVbuuid) bool { for _, vb := range vbList { if ts.Contains(uint16(vb)) == true { return true } } return false } func execWithStopCh(fn func(), stopCh StopChannel) { select { case <-stopCh: return default: fn() } } func debugPrintTs(tsList []*protobuf.TsVbuuid, bucket string) string { if len(tsList) == 0 { return "" } for _, ts := range tsList { if bucket == "" { return ts.Repr() } else if ts.GetBucket() == bucket { return ts.Repr() } } return "" }
jchris/indexing
secondary/indexer/kv_sender.go
GO
apache-2.0
36,703
package gateway import ( "bytes" "crypto/sha1" //nolint:gosec "fmt" "io/ioutil" "net/http" "os" "path/filepath" "regexp" "strings" "time" "github.com/42wim/matterbridge/bridge" "github.com/42wim/matterbridge/bridge/config" "github.com/42wim/matterbridge/gateway/bridgemap" ) // handleEventFailure handles failures and reconnects bridges. func (r *Router) handleEventFailure(msg *config.Message) { if msg.Event != config.EventFailure { return } for _, gw := range r.Gateways { for _, br := range gw.Bridges { if msg.Account == br.Account { go gw.reconnectBridge(br) return } } } } // handleEventGetChannelMembers handles channel members func (r *Router) handleEventGetChannelMembers(msg *config.Message) { if msg.Event != config.EventGetChannelMembers { return } for _, gw := range r.Gateways { for _, br := range gw.Bridges { if msg.Account == br.Account { cMembers := msg.Extra[config.EventGetChannelMembers][0].(config.ChannelMembers) r.logger.Debugf("Syncing channelmembers from %s", msg.Account) br.SetChannelMembers(&cMembers) return } } } } // handleEventRejoinChannels handles rejoining of channels. func (r *Router) handleEventRejoinChannels(msg *config.Message) { if msg.Event != config.EventRejoinChannels { return } for _, gw := range r.Gateways { for _, br := range gw.Bridges { if msg.Account == br.Account { br.Joined = make(map[string]bool) if err := br.JoinChannels(); err != nil { r.logger.Errorf("channel join failed for %s: %s", msg.Account, err) } } } } } // handleFiles uploads or places all files on the given msg to the MediaServer and // adds the new URL of the file on the MediaServer onto the given msg. func (gw *Gateway) handleFiles(msg *config.Message) { reg := regexp.MustCompile("[^a-zA-Z0-9]+") // If we don't have a attachfield or we don't have a mediaserver configured return if msg.Extra == nil || (gw.BridgeValues().General.MediaServerUpload == "" && gw.BridgeValues().General.MediaDownloadPath == "") { return } // If we don't have files, nothing to upload. if len(msg.Extra["file"]) == 0 { return } for i, f := range msg.Extra["file"] { fi := f.(config.FileInfo) ext := filepath.Ext(fi.Name) fi.Name = fi.Name[0 : len(fi.Name)-len(ext)] fi.Name = reg.ReplaceAllString(fi.Name, "_") fi.Name += ext sha1sum := fmt.Sprintf("%x", sha1.Sum(*fi.Data))[:8] //nolint:gosec if gw.BridgeValues().General.MediaServerUpload != "" { // Use MediaServerUpload. Upload using a PUT HTTP request and basicauth. if err := gw.handleFilesUpload(&fi); err != nil { gw.logger.Error(err) continue } } else { // Use MediaServerPath. Place the file on the current filesystem. if err := gw.handleFilesLocal(&fi); err != nil { gw.logger.Error(err) continue } } // Download URL. durl := gw.BridgeValues().General.MediaServerDownload + "/" + sha1sum + "/" + fi.Name gw.logger.Debugf("mediaserver download URL = %s", durl) // We uploaded/placed the file successfully. Add the SHA and URL. extra := msg.Extra["file"][i].(config.FileInfo) extra.URL = durl extra.SHA = sha1sum msg.Extra["file"][i] = extra } } // handleFilesUpload uses MediaServerUpload configuration to upload the file. // Returns error on failure. func (gw *Gateway) handleFilesUpload(fi *config.FileInfo) error { client := &http.Client{ Timeout: time.Second * 5, } // Use MediaServerUpload. Upload using a PUT HTTP request and basicauth. sha1sum := fmt.Sprintf("%x", sha1.Sum(*fi.Data))[:8] //nolint:gosec url := gw.BridgeValues().General.MediaServerUpload + "/" + sha1sum + "/" + fi.Name req, err := http.NewRequest("PUT", url, bytes.NewReader(*fi.Data)) if err != nil { return fmt.Errorf("mediaserver upload failed, could not create request: %#v", err) } gw.logger.Debugf("mediaserver upload url: %s", url) req.Header.Set("Content-Type", "binary/octet-stream") _, err = client.Do(req) if err != nil { return fmt.Errorf("mediaserver upload failed, could not Do request: %#v", err) } return nil } // handleFilesLocal use MediaServerPath configuration, places the file on the current filesystem. // Returns error on failure. func (gw *Gateway) handleFilesLocal(fi *config.FileInfo) error { sha1sum := fmt.Sprintf("%x", sha1.Sum(*fi.Data))[:8] //nolint:gosec dir := gw.BridgeValues().General.MediaDownloadPath + "/" + sha1sum err := os.Mkdir(dir, os.ModePerm) if err != nil && !os.IsExist(err) { return fmt.Errorf("mediaserver path failed, could not mkdir: %s %#v", err, err) } path := dir + "/" + fi.Name gw.logger.Debugf("mediaserver path placing file: %s", path) err = ioutil.WriteFile(path, *fi.Data, os.ModePerm) if err != nil { return fmt.Errorf("mediaserver path failed, could not writefile: %s %#v", err, err) } return nil } // ignoreEvent returns true if we need to ignore this event for the specified destination bridge. func (gw *Gateway) ignoreEvent(event string, dest *bridge.Bridge) bool { switch event { case config.EventAvatarDownload: // Avatar downloads are only relevant for telegram and mattermost for now if dest.Protocol != "mattermost" && dest.Protocol != "telegram" && dest.Protocol != "xmpp" { return true } case config.EventJoinLeave: // only relay join/part when configured if !dest.GetBool("ShowJoinPart") { return true } case config.EventTopicChange: // only relay topic change when used in some way on other side if !dest.GetBool("ShowTopicChange") && !dest.GetBool("SyncTopic") { return true } } return false } // handleMessage makes sure the message get sent to the correct bridge/channels. // Returns an array of msg ID's func (gw *Gateway) handleMessage(rmsg *config.Message, dest *bridge.Bridge) []*BrMsgID { var brMsgIDs []*BrMsgID // Not all bridges support "user is typing" indications so skip the message // if the targeted bridge does not support it. if rmsg.Event == config.EventUserTyping { if _, ok := bridgemap.UserTypingSupport[dest.Protocol]; !ok { return nil } } // if we have an attached file, or other info if rmsg.Extra != nil && len(rmsg.Extra[config.EventFileFailureSize]) != 0 && rmsg.Text == "" { return brMsgIDs } if gw.ignoreEvent(rmsg.Event, dest) { return brMsgIDs } // broadcast to every out channel (irc QUIT) if rmsg.Channel == "" && rmsg.Event != config.EventJoinLeave { gw.logger.Debug("empty channel") return brMsgIDs } // Get the ID of the parent message in thread var canonicalParentMsgID string if rmsg.ParentID != "" && dest.GetBool("PreserveThreading") { canonicalParentMsgID = gw.FindCanonicalMsgID(rmsg.Protocol, rmsg.ParentID) } channels := gw.getDestChannel(rmsg, *dest) for idx := range channels { channel := &channels[idx] msgID, err := gw.SendMessage(rmsg, dest, channel, canonicalParentMsgID) if err != nil { gw.logger.Errorf("SendMessage failed: %s", err) continue } if msgID == "" { continue } brMsgIDs = append(brMsgIDs, &BrMsgID{dest, dest.Protocol + " " + msgID, channel.ID}) } return brMsgIDs } func (gw *Gateway) handleExtractNicks(msg *config.Message) { var err error br := gw.Bridges[msg.Account] for _, outer := range br.GetStringSlice2D("ExtractNicks") { search := outer[0] replace := outer[1] msg.Username, msg.Text, err = extractNick(search, replace, msg.Username, msg.Text) if err != nil { gw.logger.Errorf("regexp in %s failed: %s", msg.Account, err) break } } } // extractNick searches for a username (based on "search" a regular expression). // if this matches it extracts a nick (based on "extract" another regular expression) from text // and replaces username with this result. // returns error if the regexp doesn't compile. func extractNick(search, extract, username, text string) (string, string, error) { re, err := regexp.Compile(search) if err != nil { return username, text, err } if re.MatchString(username) { re, err = regexp.Compile(extract) if err != nil { return username, text, err } res := re.FindAllStringSubmatch(text, 1) // only replace if we have exactly 1 match if len(res) > 0 && len(res[0]) == 2 { username = res[0][1] text = strings.Replace(text, res[0][0], "", 1) } } return username, text, nil }
42wim/matterbridge
gateway/handlers.go
GO
apache-2.0
8,288
/* * Copyright 2015 TripleD framework. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package eu.tripledframework.eventstore.domain.annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Annotation to map a method parameter to an event property. This is used in conjunction with the * {@link ConstructionHandler} to map event properties to method parameters. */ @Target(ElementType.PARAMETER) @Retention(RetentionPolicy.RUNTIME) public @interface EP { /** * The name of the event property to map to. * * @return The name of the event property */ String value(); }
domenique/tripled-framework
eventstore-core/src/main/java/eu/tripledframework/eventstore/domain/annotation/EP.java
Java
apache-2.0
1,228
package com.algorithm.tree; /** * Date 07/04/2014 * * @author tusroy * * Video link - https://youtu.be/rbg7Qf8GkQ4 * * Write a program to insert into an AVL tree. * * AVL tree is self balancing binary tree. Difference of height of left * or right subtree cannot be greater than one. * * There are four different use cases to insert into AVL tree left left * - needs ones right rotation left right - needs one left and one right * rotation right left - needs one right and one left rotation right * right - needs one left rotation * * Follow rotation rules to keep tree balanced. * * At every node we will also keep height of the tree so that we don't * have to recalculate values again. * * Runtime complexity to insert into AVL tree is O(logn). * * References http://en.wikipedia.org/wiki/AVL_tree * http://www.geeksforgeeks.org/avl-tree-set-1-insertion/ * */ public class AVLTree { private Node leftRotate(Node root) { Node newRoot = root.right; root.right = root.right.left; newRoot.left = root; root.height = setHeight(root); root.size = setSize(root); newRoot.height = setHeight(newRoot); newRoot.size = setSize(newRoot); return newRoot; } private Node rightRotate(Node root) { Node newRoot = root.left; root.left = root.left.right; newRoot.right = root; root.height = setHeight(root); root.size = setSize(root); newRoot.height = setHeight(newRoot); newRoot.size = setSize(newRoot); return newRoot; } private int setHeight(Node root) { if (root == null) { return 0; } return 1 + Math.max((root.left != null ? root.left.height : 0), (root.right != null ? root.right.height : 0)); } private int height(Node root) { if (root == null) { return 0; } else { return root.height; } } private int setSize(Node root) { if (root == null) { return 0; } return 1 + Math.max((root.left != null ? root.left.size : 0), (root.right != null ? root.right.size : 0)); } public Node insert(Node root, int data) { if (root == null) { return Node.newNode(data); } if (root.data <= data) { root.right = insert(root.right, data); } else { root.left = insert(root.left, data); } int balance = balance(root.left, root.right); if (balance > 1) { if (height(root.left.left) >= height(root.left.right)) { root = rightRotate(root); } else { root.left = leftRotate(root.left); root = rightRotate(root); } } else if (balance < -1) { if (height(root.right.right) >= height(root.right.left)) { root = leftRotate(root); } else { root.right = rightRotate(root.right); root = leftRotate(root); } } else { root.height = setHeight(root); root.size = setSize(root); } return root; } private int balance(Node rootLeft, Node rootRight) { return height(rootLeft) - height(rootRight); } public static void main(String args[]) { AVLTree avlTree = new AVLTree(); Node root = null; root = avlTree.insert(root, -10); root = avlTree.insert(root, 2); root = avlTree.insert(root, 13); root = avlTree.insert(root, -13); root = avlTree.insert(root, -15); root = avlTree.insert(root, 15); root = avlTree.insert(root, 17); root = avlTree.insert(root, 20); TreeTraversals tt = new TreeTraversals(); tt.inOrder(root); System.out.println(); tt.preOrder(root); } }
toanqc/algorithm
src/com/algorithm/tree/AVLTree.java
Java
apache-2.0
3,454
package io.github.ibuildthecloud.dstack.api.pubsub.model; import java.util.List; import io.github.ibuildthecloud.gdapi.annotation.Field; import io.github.ibuildthecloud.gdapi.annotation.Type; @Type(pluralName = "subscribe") public interface Subscribe { @Field(create = true, minLength = 1, validChars = "*._0-9a-zA-Z;=") List<String> getEventNames(); Long getAgentId(); }
ibuildthecloud/dstack
code/framework/api-pub-sub/src/main/java/io/github/ibuildthecloud/dstack/api/pubsub/model/Subscribe.java
Java
apache-2.0
391
package com.anteoy.decisiveBattle.created.factories.abstractFactory; /** * Created by zhoudazhuang on 17-12-9. */ public class RedColor implements Color { @Override public void fill() { System.out.println("fill red color"); } }
Anteoy/jottings
src/main/java/com/anteoy/decisiveBattle/created/factories/abstractFactory/RedColor.java
Java
apache-2.0
251
using System; using System.Collections.Generic; using Elasticsearch.Net; using Nest; using Tests.Framework; using Tests.Framework.Integration; using Xunit; namespace Tests.XPack.License.PostLicense { [Collection(TypeOfCluster.ReadOnly)] public class PostLicenseApiTests : ApiTestBase<IPostLicenseResponse, IPostLicenseRequest, PostLicenseDescriptor, PostLicenseRequest> { public PostLicenseApiTests(ReadOnlyCluster cluster, EndpointUsage usage) : base(cluster, usage) { } protected override LazyResponses ClientUsage() => Calls( fluent: (client, f) => client.PostLicense(f), fluentAsync: (client, f) => client.PostLicenseAsync(f), request: (client, r) => client.PostLicense(r), requestAsync: (client, r) => client.PostLicenseAsync(r) ); protected override HttpMethod HttpMethod => HttpMethod.PUT; protected override string UrlPath => $"/_xpack/license?acknowledge=true"; protected override bool SupportsDeserialization => false; protected override object ExpectJson { get; } = new { license = new { expiry_date_in_millis = 1, issue_date_in_millis = 2, issued_to = "nest test framework", issuer = "martijn", max_nodes = 20, signature = "<redacted>", type = "gold", uid = "uuid" } }; private Nest.License FakeLicense { get; } = new Nest.License { UID = "uuid", ExpiryDateInMilliseconds = 1, IssueDateInMilliseconds = 2, IssuedTo = "nest test framework", Issuer = "martijn", Type = LicenseType.Gold, MaxNodes = 20, Signature = "<redacted>" }; protected override Func<PostLicenseDescriptor, IPostLicenseRequest> Fluent => d => d .Acknowledge() .License(this.FakeLicense); protected override PostLicenseRequest Initializer => new PostLicenseRequest { Acknowledge = true, License= this.FakeLicense }; } }
TheFireCookie/elasticsearch-net
src/Tests/XPack/License/PostLicense/PostLicenseApiTests.cs
C#
apache-2.0
1,834
/* * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. */ package com.sun.org.apache.bcel.internal.generic; /* ==================================================================== * The Apache Software License, Version 1.1 * * Copyright (c) 2001 The Apache Software Foundation. All rights * reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. The end-user documentation included with the redistribution, * if any, must include the following acknowledgment: * "This product includes software developed by the * Apache Software Foundation (http://www.apache.org/)." * Alternately, this acknowledgment may appear in the software itself, * if and wherever such third-party acknowledgments normally appear. * * 4. The names "Apache" and "Apache Software Foundation" and * "Apache BCEL" must not be used to endorse or promote products * derived from this software without prior written permission. For * written permission, please contact apache@apache.org. * * 5. Products derived from this software may not be called "Apache", * "Apache BCEL", nor may "Apache" appear in their name, without * prior written permission of the Apache Software Foundation. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * ==================================================================== * * This software consists of voluntary contributions made by many * individuals on behalf of the Apache Software Foundation. For more * information on the Apache Software Foundation, please see * <http://www.apache.org/>. */ /** * IFEQ - Branch if int comparison with zero succeeds * * <PRE>Stack: ..., value -&gt; ...</PRE> * * @author <A HREF="mailto:markus.dahm@berlin.de">M. Dahm</A> */ public class IFEQ extends IfInstruction { /** * Empty constructor needed for the Class.newInstance() statement in * Instruction.readInstruction(). Not to be used otherwise. */ IFEQ() {} public IFEQ(InstructionHandle target) { super(com.sun.org.apache.bcel.internal.Constants.IFEQ, target); } /** * @return negation of instruction, e.g. IFEQ.negate() == IFNE */ public IfInstruction negate() { return new IFNE(target); } /** * Call corresponding visitor method(s). The order is: * Call visitor methods of implemented interfaces first, then * call methods according to the class hierarchy in descending order, * i.e., the most specific visitXXX() call comes last. * * @param v Visitor object */ public void accept(Visitor v) { v.visitStackConsumer(this); v.visitBranchInstruction(this); v.visitIfInstruction(this); v.visitIFEQ(this); } }
haikuowuya/android_system_code
src/com/sun/org/apache/bcel/internal/generic/IFEQ.java
Java
apache-2.0
3,961
var fileio = require("../fileio")(); var wildcard = require("../wildcard"); var fs = require("fs"); var items = []; var cells = [ ["Medication","dropdown"], ["Dosage","text"], ["Per Use","text"], ["Frequency","dropdown"], ["Type of Med.","text"], ["Method","text"], ["To Dispense","text"], ["PRN","checkbox"] ]; var settings; function buildTable() { var table = document.getElementById("table"); while ( table.firstChild ) { table.removeChild(table.firstChild); } var row = document.createElement("tr"); var col = document.createElement("td"); col.innerText = "X"; row.appendChild(col); for ( var i = 0; i < cells.length; i++ ) { var col = document.createElement("td"); col.innerText = cells[i][0]; row.appendChild(col); } table.appendChild(row); for ( var i = 0; i < items.length; i++ ) { var row = document.createElement("tr"); var col = document.createElement("td"); var button = document.createElement("button"); button.innerText = "X"; button.className = "del"; button.id = "d:" + i; button.onclick = function() { var index = parseInt(this.id.split(":")[1]); items = items.slice(0,index).concat(items.slice(index + 1)); buildTable(); } col.appendChild(button); row.appendChild(col); for ( var j = 0; j < cells.length; j++ ) { if ( cells[j][1] == "dropdown" ) { var col = document.createElement("td"); var select = document.createElement("select"); for ( var k = 0; k < (cells[j][0] == "Medication" ? settings.medications : settings.frequencies).length; k++ ) { var option = document.createElement("option"); var item = cells[j][0] == "Medication" ? settings.medications[k][0] : settings.frequencies[k]; option.innerText = item; option.value = item; select.appendChild(option); } select.value = items[i][j] || ""; select.id = "i:" + i + ":" + j; select.onchange = function() { var index = this.id.split(":").map(item => parseInt(item)); cells[index[2]][0] == "Medication" ? items[index[1]] = settings.medications.filter(item => item[0] == this.value)[0] : items[index[1]][index[2]] = this.value; buildTable(); } col.appendChild(select); row.appendChild(col); } else if ( cells[j][1] == "text" ) { var col = document.createElement("td"); var input = document.createElement("input"); input.size = 10; input.value = items[i][j] || ""; input.id = "i:" + i + ":" + j; input.onkeyup = function() { var index = this.id.split(":").map(item => parseInt(item)); items[index[1]][index[2]] = this.value; } col.appendChild(input); row.appendChild(col); } else if ( cells[j][1] == "checkbox" ) { var col = document.createElement("td"); var box = document.createElement("input"); box.type = "checkbox"; box.checked = items[i][j]; box.id = "i:" + i + ":" + j; box.onchange = function() { var index = this.id.split(":").map(item => parseInt(item)); items[index[1]][index[2]] = this.checked; } col.appendChild(box); row.appendChild(col); } } table.appendChild(row); } } function buildList(data) { var select = document.getElementById("patients"); for ( var i = 0; i < data.length; i++ ) { var option = document.createElement("option"); option.innerText = data[i]["Name"]; option.value = i; select.appendChild(option); } select.value = ""; } function addMedication() { items.push([]); buildTable(); } function toPrint() { localStorage.setItem("items",JSON.stringify([items,parseInt(document.getElementById("patients").value)])); open(__dirname + "/print/index.html","","width=600,height=600"); } function toList() { location.href = __dirname + "/../list/index.html"; } window.onload = function() { wildcard.onLoad(function(char) { if ( char == "p" ) toPrint(); }); fileio.loadData("patients",function(hold) { var patients = JSON.parse(hold.toString()); fileio.loadData("settings",function(hold) { hold = JSON.parse(hold.toString()); settings = { medications: hold.prescript.medications, frequencies: hold.prescript.frequencies }; buildList(patients); buildTable(); }); }); };
Simas2006/Treat
client/prescript/script.js
JavaScript
apache-2.0
4,465
/* * Copyright (C) 2016 Singular Studios (a.k.a Atom Tecnologia) - www.opensingular.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.opensingular.form.converter; import org.opensingular.form.SInstance; import java.io.Serializable; /** * @param <T> O tipo do objeto a ser convertido */ public interface SInstanceConverter<T extends Serializable, S extends SInstance> { void fillInstance(S ins, T obj); T toObject(S ins); }
opensingular/singular-core
form/core/src/main/java/org/opensingular/form/converter/SInstanceConverter.java
Java
apache-2.0
965
var searchData= [ ['all',['all',['../unionMQTT__Connect__Header__Flags.html#aac6e7ff76de4ce79fb198a5c043614ce',1,'MQTT_Connect_Header_Flags::all()'],['../unionMQTT__Connack__Header__Flags.html#ac1d68764617031a6d7f0e48c21b11807',1,'MQTT_Connack_Header_Flags::all()']]] ];
chainlinq/boatkeeper
linux_mqtt_mbedtls-2.1.1/docs/html/search/variables_0.js
JavaScript
apache-2.0
273
# -*- coding: utf-8 -*- ''' The crypt module manages all of the cryptography functions for minions and masters, encrypting and decrypting payloads, preparing messages, and authenticating peers ''' # Import python libs import os import sys import time import hmac import shutil import hashlib import logging # Import third party libs try: from M2Crypto import RSA, EVP from Crypto.Cipher import AES except ImportError: # No need for crypt in local mode pass # Import salt libs import salt.utils import salt.payload import salt.utils.verify import salt.version from salt.exceptions import ( AuthenticationError, SaltClientError, SaltReqTimeoutError ) log = logging.getLogger(__name__) def dropfile(cachedir, user=None): ''' Set an aes dropfile to update the publish session key ''' dfnt = os.path.join(cachedir, '.dfnt') dfn = os.path.join(cachedir, '.dfn') def ready(): ''' Because MWorker._update_aes uses second-precision mtime to detect changes to the file, we must avoid writing two versions with the same mtime. Note that this only makes rapid updates in serial safe: concurrent updates could still both pass this check and then write two different keys with the same mtime. ''' try: stats = os.stat(dfn) except os.error: # Not there, go ahead and write it return True else: if stats.st_mtime == time.time(): # The mtime is the current time, we must # wait until time has moved on. return False else: return True while not ready(): log.warning('Waiting before writing {0}'.format(dfn)) time.sleep(1) aes = Crypticle.generate_key_string() mask = os.umask(191) with salt.utils.fopen(dfnt, 'w+') as fp_: fp_.write(aes) if user: try: import pwd uid = pwd.getpwnam(user).pw_uid os.chown(dfnt, uid, -1) shutil.move(dfnt, dfn) except (KeyError, ImportError, OSError, IOError): pass os.umask(mask) def gen_keys(keydir, keyname, keysize, user=None): ''' Generate a keypair for use with salt ''' base = os.path.join(keydir, keyname) priv = '{0}.pem'.format(base) pub = '{0}.pub'.format(base) gen = RSA.gen_key(keysize, 65537, callback=lambda x, y, z: None) cumask = os.umask(191) gen.save_key(priv, None) os.umask(cumask) gen.save_pub_key(pub) os.chmod(priv, 256) if user: try: import pwd uid = pwd.getpwnam(user).pw_uid os.chown(priv, uid, -1) os.chown(pub, uid, -1) except (KeyError, ImportError, OSError): # The specified user was not found, allow the backup systems to # report the error pass return priv def sign_message(privkey_path, message): ''' Use M2Crypto's EVP ("Envelope") functions to sign a message. Returns the signature. ''' log.debug('salt.crypt.sign_message: Loading private key') evp_rsa = EVP.load_key(privkey_path) evp_rsa.sign_init() evp_rsa.sign_update(message) log.debug('salt.crypt.sign_message: Signing message.') return evp_rsa.sign_final() def verify_signature(pubkey_path, message, signature): ''' Use M2Crypto's EVP ("Envelope") functions to verify the signature on a message. Returns True for valid signature. ''' # Verify that the signature is valid log.debug('salt.crypt.verify_signature: Loading public key') pubkey = RSA.load_pub_key(pubkey_path) verify_evp = EVP.PKey() verify_evp.assign_rsa(pubkey) verify_evp.verify_init() verify_evp.verify_update(message) log.debug('salt.crypt.verify_signature: Verifying signature') result = verify_evp.verify_final(signature) return result class MasterKeys(dict): ''' The Master Keys class is used to manage the public key pair used for authentication by the master. ''' def __init__(self, opts): super(MasterKeys, self).__init__() self.opts = opts self.pub_path = os.path.join(self.opts['pki_dir'], 'master.pub') self.rsa_path = os.path.join(self.opts['pki_dir'], 'master.pem') self.key = self.__get_keys() self.token = self.__gen_token() def __get_keys(self): ''' Returns a key objects for the master ''' if os.path.exists(self.rsa_path): key = RSA.load_key(self.rsa_path) log.debug('Loaded master key: {0}'.format(self.rsa_path)) else: log.info('Generating keys: {0}'.format(self.opts['pki_dir'])) gen_keys(self.opts['pki_dir'], 'master', self.opts['keysize'], self.opts.get('user')) key = RSA.load_key(self.rsa_path) return key def __gen_token(self): ''' Generate the authentication token ''' return self.key.private_encrypt('salty bacon', 5) def get_pub_str(self): ''' Return the string representation of the public key ''' if not os.path.isfile(self.pub_path): key = self.__get_keys() key.save_pub_key(self.pub_path) return salt.utils.fopen(self.pub_path, 'r').read() class Auth(object): ''' The Auth class provides the sequence for setting up communication with the master server from a minion. ''' def __init__(self, opts): self.opts = opts self.token = Crypticle.generate_key_string() self.serial = salt.payload.Serial(self.opts) self.pub_path = os.path.join(self.opts['pki_dir'], 'minion.pub') self.rsa_path = os.path.join(self.opts['pki_dir'], 'minion.pem') if 'syndic_master' in self.opts: self.mpub = 'syndic_master.pub' elif 'alert_master' in self.opts: self.mpub = 'monitor_master.pub' else: self.mpub = 'minion_master.pub' def get_keys(self): ''' Returns a key objects for the minion ''' # Make sure all key parent directories are accessible user = self.opts.get('user', 'root') salt.utils.verify.check_path_traversal(self.opts['pki_dir'], user) if os.path.exists(self.rsa_path): key = RSA.load_key(self.rsa_path) log.debug('Loaded minion key: {0}'.format(self.rsa_path)) else: log.info('Generating keys: {0}'.format(self.opts['pki_dir'])) gen_keys(self.opts['pki_dir'], 'minion', self.opts['keysize'], self.opts.get('user')) key = RSA.load_key(self.rsa_path) return key def gen_token(self, clear_tok): ''' Encrypt a string with the minion private key to verify identity with the master. ''' return self.get_keys().private_encrypt(clear_tok, 5) def minion_sign_in_payload(self): ''' Generates the payload used to authenticate with the master server. This payload consists of the passed in id_ and the ssh public key to encrypt the AES key sent back form the master. ''' payload = {} key = self.get_keys() tmp_pub = salt.utils.mkstemp() key.save_pub_key(tmp_pub) payload['enc'] = 'clear' payload['load'] = {} payload['load']['cmd'] = '_auth' payload['load']['id'] = self.opts['id'] try: pub = RSA.load_pub_key( os.path.join(self.opts['pki_dir'], self.mpub) ) payload['load']['token'] = pub.public_encrypt(self.token, RSA.pkcs1_oaep_padding) except Exception: pass with salt.utils.fopen(tmp_pub, 'r') as fp_: payload['load']['pub'] = fp_.read() os.remove(tmp_pub) return payload def decrypt_aes(self, payload, master_pub=True): ''' This function is used to decrypt the aes seed phrase returned from the master server, the seed phrase is decrypted with the ssh rsa host key. Pass in the encrypted aes key. Returns the decrypted aes seed key, a string ''' log.debug('Decrypting the current master AES key') key = self.get_keys() key_str = key.private_decrypt(payload['aes'], RSA.pkcs1_oaep_padding) if 'sig' in payload: m_path = os.path.join(self.opts['pki_dir'], self.mpub) if os.path.exists(m_path): try: mkey = RSA.load_pub_key(m_path) except Exception: return '', '' digest = hashlib.sha256(key_str).hexdigest() m_digest = mkey.public_decrypt(payload['sig'], 5) if m_digest != digest: return '', '' else: return '', '' if '_|-' in key_str: return key_str.split('_|-') else: if 'token' in payload: token = key.private_decrypt(payload['token'], RSA.pkcs1_oaep_padding) return key_str, token elif not master_pub: return key_str, '' return '', '' def verify_master(self, payload): ''' Verify that the master is the same one that was previously accepted ''' m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub) if os.path.isfile(m_pub_fn) and not self.opts['open_mode']: local_master_pub = salt.utils.fopen(m_pub_fn).read() if payload['pub_key'] != local_master_pub: # This is not the last master we connected to log.error('The master key has changed, the salt master could ' 'have been subverted, verify salt master\'s public ' 'key') return '' try: aes, token = self.decrypt_aes(payload) if token != self.token: log.error( 'The master failed to decrypt the random minion token' ) return '' except Exception: log.error( 'The master failed to decrypt the random minion token' ) return '' return aes else: salt.utils.fopen(m_pub_fn, 'w+').write(payload['pub_key']) aes, token = self.decrypt_aes(payload, False) return aes def sign_in(self, timeout=60, safe=True): ''' Send a sign in request to the master, sets the key information and returns a dict containing the master publish interface to bind to and the decrypted aes key for transport decryption. ''' auth = {} m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub) try: self.opts['master_ip'] = salt.utils.dns_check( self.opts['master'], True, self.opts['ipv6'] ) except SaltClientError as e: if safe: log.warning('SaltClientError: {0}'.format(e)) return 'retry' raise SaltClientError if self.opts['master_ip'] not in self.opts['master_uri']: self.opts['master_uri'] = (self.opts['master_uri'].replace( self.opts['master_uri'].split(':')[1][2:], self.opts['master_ip'])) sreq = salt.payload.SREQ( self.opts['master_uri'], ) try: payload = sreq.send_auto( self.minion_sign_in_payload(), timeout=timeout ) except SaltReqTimeoutError as e: if safe: log.warning('SaltReqTimeoutError: {0}'.format(e)) return 'retry' raise SaltClientError if 'load' in payload: if 'ret' in payload['load']: if not payload['load']['ret']: log.critical( 'The Salt Master has rejected this minion\'s public ' 'key!\nTo repair this issue, delete the public key ' 'for this minion on the Salt Master and restart this ' 'minion.\nOr restart the Salt Master in open mode to ' 'clean out the keys. The Salt Minion will now exit.' ) sys.exit(0) else: log.error( 'The Salt Master has cached the public key for this ' 'node, this salt minion will wait for {0} seconds ' 'before attempting to re-authenticate'.format( self.opts['acceptance_wait_time'] ) ) return 'retry' auth['aes'] = self.verify_master(payload) if not auth['aes']: log.critical( 'The Salt Master server\'s public key did not authenticate!\n' 'The master may need to be updated if it is a version of Salt ' 'lower than {0}, or\n' 'If you are confident that you are connecting to a valid Salt ' 'Master, then remove the master public key and restart the ' 'Salt Minion.\nThe master public key can be found ' 'at:\n{1}'.format(salt.version.__version__, m_pub_fn) ) sys.exit(42) if self.opts.get('master_finger', False): if salt.utils.pem_finger(m_pub_fn) != self.opts['master_finger']: log.critical( 'The specified fingerprint in the master configuration ' 'file:\n{0}\nDoes not match the authenticating master\'s ' 'key:\n{1}\nVerify that the configured fingerprint ' 'matches the fingerprint of the correct master and that ' 'this minion is not subject to a man in the middle attack' .format( self.opts['master_finger'], salt.utils.pem_finger(m_pub_fn) ) ) sys.exit(42) auth['publish_port'] = payload['publish_port'] return auth class Crypticle(object): ''' Authenticated encryption class Encryption algorithm: AES-CBC Signing algorithm: HMAC-SHA256 ''' PICKLE_PAD = 'pickle::' AES_BLOCK_SIZE = 16 SIG_SIZE = hashlib.sha256().digest_size def __init__(self, opts, key_string, key_size=192): self.keys = self.extract_keys(key_string, key_size) self.key_size = key_size self.serial = salt.payload.Serial(opts) @classmethod def generate_key_string(cls, key_size=192): key = os.urandom(key_size // 8 + cls.SIG_SIZE) return key.encode('base64').replace('\n', '') @classmethod def extract_keys(cls, key_string, key_size): key = key_string.decode('base64') assert len(key) == key_size / 8 + cls.SIG_SIZE, 'invalid key' return key[:-cls.SIG_SIZE], key[-cls.SIG_SIZE:] def encrypt(self, data): ''' encrypt data with AES-CBC and sign it with HMAC-SHA256 ''' aes_key, hmac_key = self.keys pad = self.AES_BLOCK_SIZE - len(data) % self.AES_BLOCK_SIZE data = data + pad * chr(pad) iv_bytes = os.urandom(self.AES_BLOCK_SIZE) cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes) data = iv_bytes + cypher.encrypt(data) sig = hmac.new(hmac_key, data, hashlib.sha256).digest() return data + sig def decrypt(self, data): ''' verify HMAC-SHA256 signature and decrypt data with AES-CBC ''' aes_key, hmac_key = self.keys sig = data[-self.SIG_SIZE:] data = data[:-self.SIG_SIZE] mac_bytes = hmac.new(hmac_key, data, hashlib.sha256).digest() if len(mac_bytes) != len(sig): log.debug('Failed to authenticate message') raise AuthenticationError('message authentication failed') result = 0 for zipped_x, zipped_y in zip(mac_bytes, sig): result |= ord(zipped_x) ^ ord(zipped_y) if result != 0: log.debug('Failed to authenticate message') raise AuthenticationError('message authentication failed') iv_bytes = data[:self.AES_BLOCK_SIZE] data = data[self.AES_BLOCK_SIZE:] cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes) data = cypher.decrypt(data) return data[:-ord(data[-1])] def dumps(self, obj): ''' Serialize and encrypt a python object ''' return self.encrypt(self.PICKLE_PAD + self.serial.dumps(obj)) def loads(self, data): ''' Decrypt and un-serialize a python object ''' data = self.decrypt(data) # simple integrity check to verify that we got meaningful data if not data.startswith(self.PICKLE_PAD): return {} return self.serial.loads(data[len(self.PICKLE_PAD):]) class SAuth(Auth): ''' Set up an object to maintain the standalone authentication session with the salt master ''' def __init__(self, opts): super(SAuth, self).__init__(opts) self.crypticle = self.__authenticate() def __authenticate(self): ''' Authenticate with the master, this method breaks the functional paradigm, it will update the master information from a fresh sign in, signing in can occur as often as needed to keep up with the revolving master aes key. ''' while True: creds = self.sign_in( self.opts['auth_timeout'], self.opts.get('_safe_auth', True) ) if creds == 'retry': if self.opts.get('caller'): print('Minion failed to authenticate with the master, ' 'has the minion key been accepted?') sys.exit(2) time.sleep(self.opts['acceptance_wait_time']) continue break return Crypticle(self.opts, creds['aes'])
MadeiraCloud/salt
sources/salt/crypt.py
Python
apache-2.0
18,535
/* * Copyright 2022 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.android.libraries.mobiledatadownload.file.openers; import android.net.Uri; import com.google.android.libraries.mobiledatadownload.file.OpenContext; import com.google.android.libraries.mobiledatadownload.file.Opener; import com.google.android.libraries.mobiledatadownload.file.common.FileChannelConvertible; import com.google.android.libraries.mobiledatadownload.file.common.ReleasableResource; import java.io.Closeable; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.channels.FileChannel; import javax.annotation.Nullable; /** * An opener for acquiring lock files. * * <p>Lock files are used to separate lock acquisition from IO on the target file itself. For a * target file "data.txt", an associated lock file "data.txt.lock" is created and used to control * locking instead of acquiring a file lock on "data.txt" itself. This means the lock holder can * perform a wider range of operations on the target file than would have been possible with a * simple file lock on the target; the lock acts as an independent semaphore. * * <p>Note that this opener is incompatible with opaque URIs, e.g. "file:///foo.txt" is compatible * whereas "memory:foo.txt" is not. * * <p>TODO: consider allowing client to specify lock file in order to support opaque URIs. */ public final class LockFileOpener implements Opener<Closeable> { private static final String LOCK_SUFFIX = ".lock"; private final boolean shared; private final boolean readOnly; private boolean isNonBlocking; private LockFileOpener(boolean shared, boolean readOnly) { this.shared = shared; this.readOnly = readOnly; } /** * Creates an instance that will acquire an exclusive lock on the file. {@link #open} will create * the lock file if it doesn't already exist. */ public static LockFileOpener createExclusive() { return new LockFileOpener(/* shared= */ false, /* readOnly= */ false); } /** * Creates an instance that will acquire a shared lock on the file (shared across processes; * multiple threads in the same process exclude one another). {@link #open} won't create the lock * file if it doesn't already exist (instead throwing {@code FileNotFoundException}), meaning this * opener is read-only. */ public static LockFileOpener createReadOnlyShared() { return new LockFileOpener(/* shared= */ true, /* readOnly= */ true); } /** * Creates an instance that will acquire a shared lock on the file (shared across processes; * multiple threads in the same process exclude one another). {@link #open} *will* create the lock * file if it doesn't already exist. */ public static LockFileOpener createShared() { return new LockFileOpener(/* shared= */ true, /* readOnly= */ false); } /** * If enabled and the lock cannot be acquired immediately, {@link #open} will return {@code null} * instead of waiting until the lock can be acquired. */ public LockFileOpener nonBlocking(boolean isNonBlocking) { this.isNonBlocking = isNonBlocking; return this; } // TODO(b/131180722): consider adding option for blocking with timeout @Override @Nullable public Closeable open(OpenContext openContext) throws IOException { // Clearing fragment is necessary to open a FileChannelConvertible stream. Uri lockUri = openContext .originalUri() .buildUpon() .path(openContext.encodedUri().getPath() + LOCK_SUFFIX) .fragment("") .build(); try (ReleasableResource<Closeable> threadLockResource = ReleasableResource.create(openThreadLock(openContext, lockUri))) { if (threadLockResource.get() == null) { return null; } try (ReleasableResource<Closeable> streamResource = ReleasableResource.create(openStreamForLocking(openContext, lockUri)); ReleasableResource<Closeable> fileLockResource = ReleasableResource.create(openFileLock(openContext, streamResource.get()))) { if (fileLockResource.get() == null) { return null; } // The thread lock guards access to the stream and file lock so *must* be closed last, and // a file lock must be closed before its underlying file so *must* be closed first. Closeable threadLock = threadLockResource.release(); Closeable stream = streamResource.release(); Closeable fileLock = fileLockResource.release(); return () -> { try (Closeable last = threadLock; Closeable middle = stream; Closeable first = fileLock) {} }; } } } /** * Acquires (or tries to acquire) the cross-thread lock for {@code lockUri}. This is a * sub-operation of {@link #open}. */ @Nullable private Closeable openThreadLock(OpenContext openContext, Uri lockUri) throws IOException { if (isNonBlocking) { return openContext.backend().lockScope().tryThreadLock(lockUri); } else { return openContext.backend().lockScope().threadLock(lockUri); } } /** Opens a stream to {@code lockUri}. This is a sub-operation of {@link #open}. */ private Closeable openStreamForLocking(OpenContext openContext, Uri lockUri) throws IOException { if (shared && readOnly) { return openContext.backend().openForRead(lockUri); } else if (shared && !readOnly) { return openContext.storage().open(lockUri, RandomAccessFileOpener.createForReadWrite()); } else { return openContext.backend().openForWrite(lockUri); } } /** * Acquires (or tries to acquire) the cross-process lock for {@code stream}. Fails if the stream * can't be converted to FileChannel. This is a sub-operation of {@link #open}. */ @Nullable private Closeable openFileLock(OpenContext openContext, Closeable closeable) throws IOException { FileChannel channel = getFileChannelFromCloseable(closeable); if (isNonBlocking) { return openContext.backend().lockScope().tryFileLock(channel, shared); } else { return openContext.backend().lockScope().fileLock(channel, shared); } } private static FileChannel getFileChannelFromCloseable(Closeable closeable) throws IOException { // TODO(b/181119642): Update code so we are not casing on instanceof. if (closeable instanceof FileChannelConvertible) { return ((FileChannelConvertible) closeable).toFileChannel(); } else if (closeable instanceof RandomAccessFile) { return ((RandomAccessFile) closeable).getChannel(); } else { throw new IOException("Lock stream not convertible to FileChannel"); } } }
google/mobile-data-download
java/com/google/android/libraries/mobiledatadownload/file/openers/LockFileOpener.java
Java
apache-2.0
7,265
package com.filebreaker.samples; import java.math.BigDecimal; import java.util.Date; public class Sample { private Integer id; private Integer helixAngle; private Integer distanceBetweenTurns; private Integer curvatureAngle; private Integer curvatureRadius; private Integer uses; private Integer sterilizations; private String fileType; private BigDecimal apicalDiameter; private Integer engineAngularSpeed; private BigDecimal engineTorque; private Integer rootCanalSpeed; private Integer movementTypeId; private Integer studyTypeId; private String studyGroup; private Integer experimentId; private Integer metalCompositionId; private Date creationDate; private Date modificationDate; private BigDecimal conicity; private BigDecimal section; private int oscillations; private long durationMillis; public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public Integer getHelixAngle() { return helixAngle; } public void setHelixAngle(Integer helixAngle) { this.helixAngle = helixAngle; } public Integer getDistanceBetweenTurns() { return distanceBetweenTurns; } public void setDistanceBetweenTurns(Integer distanceBetweenTurns) { this.distanceBetweenTurns = distanceBetweenTurns; } public Integer getCurvatureAngle() { return curvatureAngle; } public void setCurvatureAngle(Integer curvatureAngle) { this.curvatureAngle = curvatureAngle; } public Integer getCurvatureRadius() { return curvatureRadius; } public void setCurvatureRadius(Integer curvatureRadius) { this.curvatureRadius = curvatureRadius; } public Integer getUses() { return uses; } public void setUses(Integer uses) { this.uses = uses; } public Integer getSterilizations() { return sterilizations; } public void setSterilizations(Integer sterilizations) { this.sterilizations = sterilizations; } public String getFileType() { return fileType; } public void setFileType(String fileType) { this.fileType = fileType; } public BigDecimal getApicalDiameter() { return apicalDiameter; } public void setApicalDiameter(BigDecimal apicalDiameter) { this.apicalDiameter = apicalDiameter; } public Integer getEngineAngularSpeed() { return engineAngularSpeed; } public void setEngineAngularSpeed(Integer engineAngularSpeed) { this.engineAngularSpeed = engineAngularSpeed; } public BigDecimal getEngineTorque() { return engineTorque; } public void setEngineTorque(BigDecimal engineTorque) { this.engineTorque = engineTorque; } public Integer getMovementTypeId() { return movementTypeId; } public void setMovementTypeId(Integer movementTypeId) { this.movementTypeId = movementTypeId; } public Integer getStudyTypeId() { return studyTypeId; } public void setStudyTypeId(Integer studyTypeId) { this.studyTypeId = studyTypeId; } public String getStudyGroup() { return studyGroup; } public void setStudyGroup(String studyGroup) { this.studyGroup = studyGroup; } public Integer getRootCanalSpeed() { return rootCanalSpeed; } public void setRootCanalSpeed(Integer rootCanalSpeed) { this.rootCanalSpeed = rootCanalSpeed; } public Integer getExperimentId() { return experimentId; } public void setExperimentId(Integer experimentId) { this.experimentId = experimentId; } public Integer getMetalCompositionId() { return metalCompositionId; } public void setMetalCompositionId(Integer metalCompositionId) { this.metalCompositionId = metalCompositionId; } public Date getCreationDate() { return creationDate; } public void setCreationDate(Date creationDate) { this.creationDate = creationDate; } public Date getModificationDate() { return modificationDate; } public void setModificationDate(Date modificationDate) { this.modificationDate = modificationDate; } public BigDecimal getConicity() { return conicity; } public void setConicity(BigDecimal conicity) { this.conicity = conicity; } public int getOscillations() { return oscillations; } public BigDecimal getSection() { return section; } public void setSection(BigDecimal section) { this.section = section; } public void setOscillations(int oscillations) { this.oscillations = oscillations; } public long getDurationMillis() { return durationMillis; } public void setDurationMillis(long durationMillis) { this.durationMillis = durationMillis; } }
FileBreaker/Manager
src/main/java/com/filebreaker/samples/Sample.java
Java
apache-2.0
4,470
// Copyright 2015 go-swagger maintainers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package swag import ( "reflect" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) type testNameStruct struct { Name string `json:"name"` NotTheSame int64 `json:"plain"` Ignored string `json:"-"` } func TestNameProvider(t *testing.T) { provider := NewNameProvider() var obj = testNameStruct{} nm, ok := provider.GetGoName(obj, "name") assert.True(t, ok) assert.Equal(t, "Name", nm) nm, ok = provider.GetGoName(obj, "plain") assert.True(t, ok) assert.Equal(t, "NotTheSame", nm) nm, ok = provider.GetGoName(obj, "doesNotExist") assert.False(t, ok) assert.Empty(t, nm) nm, ok = provider.GetGoName(obj, "ignored") assert.False(t, ok) assert.Empty(t, nm) tpe := reflect.TypeOf(obj) nm, ok = provider.GetGoNameForType(tpe, "name") assert.True(t, ok) assert.Equal(t, "Name", nm) nm, ok = provider.GetGoNameForType(tpe, "plain") assert.True(t, ok) assert.Equal(t, "NotTheSame", nm) nm, ok = provider.GetGoNameForType(tpe, "doesNotExist") assert.False(t, ok) assert.Empty(t, nm) nm, ok = provider.GetGoNameForType(tpe, "ignored") assert.False(t, ok) assert.Empty(t, nm) ptr := &obj nm, ok = provider.GetGoName(ptr, "name") assert.True(t, ok) assert.Equal(t, "Name", nm) nm, ok = provider.GetGoName(ptr, "plain") assert.True(t, ok) assert.Equal(t, "NotTheSame", nm) nm, ok = provider.GetGoName(ptr, "doesNotExist") assert.False(t, ok) assert.Empty(t, nm) nm, ok = provider.GetGoName(ptr, "ignored") assert.False(t, ok) assert.Empty(t, nm) nm, ok = provider.GetJSONName(obj, "Name") assert.True(t, ok) assert.Equal(t, "name", nm) nm, ok = provider.GetJSONName(obj, "NotTheSame") assert.True(t, ok) assert.Equal(t, "plain", nm) nm, ok = provider.GetJSONName(obj, "DoesNotExist") assert.False(t, ok) assert.Empty(t, nm) nm, ok = provider.GetJSONName(obj, "Ignored") assert.False(t, ok) assert.Empty(t, nm) nm, ok = provider.GetJSONNameForType(tpe, "Name") assert.True(t, ok) assert.Equal(t, "name", nm) nm, ok = provider.GetJSONNameForType(tpe, "NotTheSame") assert.True(t, ok) assert.Equal(t, "plain", nm) nm, ok = provider.GetJSONNameForType(tpe, "doesNotExist") assert.False(t, ok) assert.Empty(t, nm) nm, ok = provider.GetJSONNameForType(tpe, "Ignored") assert.False(t, ok) assert.Empty(t, nm) nm, ok = provider.GetJSONName(ptr, "Name") assert.True(t, ok) assert.Equal(t, "name", nm) nm, ok = provider.GetJSONName(ptr, "NotTheSame") assert.True(t, ok) assert.Equal(t, "plain", nm) nm, ok = provider.GetJSONName(ptr, "doesNotExist") assert.False(t, ok) assert.Empty(t, nm) nm, ok = provider.GetJSONName(ptr, "Ignored") assert.False(t, ok) assert.Empty(t, nm) nms := provider.GetJSONNames(ptr) assert.Len(t, nms, 2) assert.Len(t, provider.index, 1) } func TestJSONConcatenation(t *testing.T) { assert.Nil(t, ConcatJSON()) assert.Equal(t, ConcatJSON([]byte(`{"id":1}`)), []byte(`{"id":1}`)) assert.Equal(t, ConcatJSON([]byte(`{}`), []byte(`{}`)), []byte(`{}`)) assert.Equal(t, ConcatJSON([]byte(`[]`), []byte(`[]`)), []byte(`[]`)) assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{"name":"Rachel"}`)), []byte(`{"id":1,"name":"Rachel"}`)) assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[{"name":"Rachel"}]`)), []byte(`[{"id":1},{"name":"Rachel"}]`)) assert.Equal(t, ConcatJSON([]byte(`{}`), []byte(`{"name":"Rachel"}`)), []byte(`{"name":"Rachel"}`)) assert.Equal(t, ConcatJSON([]byte(`[]`), []byte(`[{"name":"Rachel"}]`)), []byte(`[{"name":"Rachel"}]`)) assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{}`)), []byte(`{"id":1}`)) assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[]`)), []byte(`[{"id":1}]`)) assert.Equal(t, ConcatJSON([]byte(`{}`), []byte(`{}`), []byte(`{}`)), []byte(`{}`)) assert.Equal(t, ConcatJSON([]byte(`[]`), []byte(`[]`), []byte(`[]`)), []byte(`[]`)) assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{"name":"Rachel"}`), []byte(`{"age":32}`)), []byte(`{"id":1,"name":"Rachel","age":32}`)) assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[{"name":"Rachel"}]`), []byte(`[{"age":32}]`)), []byte(`[{"id":1},{"name":"Rachel"},{"age":32}]`)) assert.Equal(t, ConcatJSON([]byte(`{}`), []byte(`{"name":"Rachel"}`), []byte(`{"age":32}`)), []byte(`{"name":"Rachel","age":32}`)) assert.Equal(t, ConcatJSON([]byte(`[]`), []byte(`[{"name":"Rachel"}]`), []byte(`[{"age":32}]`)), []byte(`[{"name":"Rachel"},{"age":32}]`)) assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{}`), []byte(`{"age":32}`)), []byte(`{"id":1,"age":32}`)) assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[]`), []byte(`[{"age":32}]`)), []byte(`[{"id":1},{"age":32}]`)) assert.Equal(t, ConcatJSON([]byte(`{"id":1}`), []byte(`{"name":"Rachel"}`), []byte(`{}`)), []byte(`{"id":1,"name":"Rachel"}`)) assert.Equal(t, ConcatJSON([]byte(`[{"id":1}]`), []byte(`[{"name":"Rachel"}]`), []byte(`[]`)), []byte(`[{"id":1},{"name":"Rachel"}]`)) // add test on null assert.Equal(t, ConcatJSON([]byte(nil)), []byte(nil)) assert.Equal(t, ConcatJSON([]byte(`null`)), []byte(nil)) assert.Equal(t, ConcatJSON([]byte(nil), []byte(`null`)), []byte(nil)) assert.Equal(t, ConcatJSON([]byte(`{"id":null}`), []byte(`null`)), []byte(`{"id":null}`)) assert.Equal(t, ConcatJSON([]byte(`{"id":null}`), []byte(`null`), []byte(`{"name":"Rachel"}`)), []byte(`{"id":null,"name":"Rachel"}`)) } type SharedCounters struct { Counter1 int64 `json:"Counter1,omitempty"` Counter2 int64 `json:"Counter2:,omitempty"` } type AggregationObject struct { SharedCounters Count int64 `json:"Count,omitempty"` } func (m *AggregationObject) UnmarshalJSON(raw []byte) error { // AO0 var aO0 SharedCounters if err := ReadJSON(raw, &aO0); err != nil { return err } m.SharedCounters = aO0 // now for regular properties var propsAggregationObject struct { Count int64 `json:"Count,omitempty"` } if err := ReadJSON(raw, &propsAggregationObject); err != nil { return err } m.Count = propsAggregationObject.Count return nil } // MarshalJSON marshals this object to a JSON structure func (m AggregationObject) MarshalJSON() ([]byte, error) { _parts := make([][]byte, 0, 1) aO0, err := WriteJSON(m.SharedCounters) if err != nil { return nil, err } _parts = append(_parts, aO0) // now for regular properties var propsAggregationObject struct { Count int64 `json:"Count,omitempty"` } propsAggregationObject.Count = m.Count jsonDataPropsAggregationObject, errAggregationObject := WriteJSON(propsAggregationObject) if errAggregationObject != nil { return nil, errAggregationObject } _parts = append(_parts, jsonDataPropsAggregationObject) return ConcatJSON(_parts...), nil } func TestIssue2350(t *testing.T) { obj := AggregationObject{Count: 290, SharedCounters: SharedCounters{Counter1: 304, Counter2: 948}} rtjson, err := WriteJSON(obj) require.NoError(t, err) otjson, err := obj.MarshalJSON() require.NoError(t, err) require.JSONEq(t, string(rtjson), string(otjson)) var obj1 AggregationObject require.NoError(t, ReadJSON(rtjson, &obj1)) require.Equal(t, obj, obj1) var obj11 AggregationObject require.NoError(t, obj11.UnmarshalJSON(rtjson)) require.Equal(t, obj, obj11) jsons := `{"Counter1":123,"Counter2:":456,"Count":999}` var obj2 AggregationObject require.NoError(t, ReadJSON([]byte(jsons), &obj2)) require.Equal(t, AggregationObject{SharedCounters: SharedCounters{Counter1: 123, Counter2: 456}, Count: 999}, obj2) }
go-openapi/swag
json_test.go
GO
apache-2.0
8,055
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.history.core.tree; import com.intellij.history.core.revisions.Difference; import com.intellij.history.core.storage.Content; import com.intellij.history.core.storage.Stream; import java.io.IOException; import java.util.List; public class FileEntry extends Entry { private long myTimestamp; private boolean isReadOnly; private Content myContent; public FileEntry(int id, String name, Content content, long timestamp, boolean isReadOnly) { super(id, name); myTimestamp = timestamp; this.isReadOnly = isReadOnly; myContent = content; } public FileEntry(Stream s) throws IOException { super(s); myTimestamp = s.readLong(); isReadOnly = s.readBoolean(); myContent = s.readContent(); } @Override public void write(Stream s) throws IOException { super.write(s); s.writeLong(myTimestamp); s.writeBoolean(isReadOnly); s.writeContent(myContent); } @Override public long getTimestamp() { return myTimestamp; } @Override public boolean isReadOnly() { return isReadOnly; } @Override public void setReadOnly(boolean isReadOnly) { this.isReadOnly = isReadOnly; } @Override public Content getContent() { return myContent; } @Override public boolean hasUnavailableContent(List<Entry> entriesWithUnavailableContent) { if (myContent.isAvailable()) return false; entriesWithUnavailableContent.add(this); return true; } @Override public FileEntry copy() { return new FileEntry(myId, myName, myContent, myTimestamp, isReadOnly); } @Override public void changeContent(Content newContent, long newTimestamp) { myContent = newContent; myTimestamp = newTimestamp; } @Override public void collectDifferencesWith(Entry e, List<Difference> result) { if (getPath().equals(e.getPath()) && myContent.equals(e.getContent()) && isReadOnly == e.isReadOnly()) return; result.add(new Difference(true, this, e)); } @Override protected void collectCreatedDifferences(List<Difference> result) { result.add(new Difference(true, null, this)); } @Override protected void collectDeletedDifferences(List<Difference> result) { result.add(new Difference(true, this, null)); } }
jexp/idea2
platform/lvcs-impl/src/com/intellij/history/core/tree/FileEntry.java
Java
apache-2.0
2,874
/*! * ${copyright} */ sap.ui.define([ "jquery.sap.global", "./Base", "sap/ui/fl/Utils" ], function( jQuery, Base, FlexUtils ) { "use strict"; /** * Change handler for moving of an element. * * @alias sap.ui.fl.changeHandler.MoveControls * @author SAP SE * @version ${version} * @experimental Since 1.46 */ var MoveControls = { }; // Defines object which contains constants used in the handler MoveControls.SOURCE_ALIAS = "source"; MoveControls.TARGET_ALIAS = "target"; MoveControls.MOVED_ELEMENTS_ALIAS = "movedElements"; MoveControls._checkConditions = function (oChange, oModifier, oView, oAppComponent) { if (!oChange) { throw new Error("No change instance"); } var oChangeContent = oChange.getContent(); if (!oChangeContent || !oChangeContent.movedElements || oChangeContent.movedElements.length === 0) { throw new Error("Change format invalid"); } if (!oChangeContent.source || !oChangeContent.source.selector) { throw new Error("No source supplied for move"); } if (!oChangeContent.target || !oChangeContent.target.selector) { throw new Error("No target supplied for move"); } if (!oModifier.bySelector(oChangeContent.source.selector, oAppComponent, oView)) { throw new Error("Move source parent not found"); } if (!oModifier.bySelector(oChangeContent.target.selector, oAppComponent, oView)) { throw new Error("Move target parent not found"); } if (!oChangeContent.source.selector.aggregation) { throw new Error("No source aggregation supplied for move"); } if (!oChangeContent.target.selector.aggregation) { throw new Error("No target aggregation supplied for move"); } }; MoveControls._getElementControlOrThrowError = function(mMovedElement, oModifier, oAppComponent, oView) { if (!mMovedElement.selector && !mMovedElement.id) { throw new Error("Change format invalid - moveElements element has no id attribute"); } if (typeof mMovedElement.targetIndex !== "number") { throw new Error("Missing targetIndex for element with id '" + mMovedElement.selector.id + "' in movedElements supplied"); } var oControl = oModifier.bySelector(mMovedElement.selector || mMovedElement.id, oAppComponent, oView); if (!oControl) { throw new Error("Control to move was not found. Id: '" + mMovedElement.selector.id + "'"); } return oControl; }; MoveControls._checkCompleteChangeContentConditions = function(mSpecificChangeInfo) { if (!mSpecificChangeInfo.movedElements) { throw new Error("mSpecificChangeInfo.movedElements attribute required"); } if (mSpecificChangeInfo.movedElements.length === 0) { throw new Error("MovedElements array is empty"); } mSpecificChangeInfo.movedElements.forEach(function (mElement) { if (!mElement.id) { throw new Error("MovedControls element has no id attribute"); } if (typeof (mElement.sourceIndex) !== "number") { throw new Error("SourceIndex attribute at MovedElements element is no number"); } if (typeof (mElement.targetIndex) !== "number") { throw new Error("TargetIndex attribute at MovedElements element is no number"); } }); }; MoveControls._getSpecificChangeInfo = function(oModifier, mSpecificChangeInfo, oAppComponent) { delete mSpecificChangeInfo.source.publicAggregation; delete mSpecificChangeInfo.target.publicAggregation; var oSourceParent = mSpecificChangeInfo.source.parent || oModifier.bySelector(mSpecificChangeInfo.source.id, oAppComponent); var oTargetParent = mSpecificChangeInfo.target.parent || oModifier.bySelector(mSpecificChangeInfo.target.id, oAppComponent); var sSourceAggregation = mSpecificChangeInfo.source.aggregation; var sTargetAggregation = mSpecificChangeInfo.target.aggregation; var mAdditionalSourceInfo = { aggregation: mSpecificChangeInfo.source.aggregation, type: oModifier.getControlType(oSourceParent) }; var mAdditionalTargetInfo = { aggregation: mSpecificChangeInfo.target.aggregation, type: oModifier.getControlType(oTargetParent) }; var mSpecificInfo = { source : { id : oSourceParent.getId(), aggregation : sSourceAggregation, type : mAdditionalSourceInfo.type, selector : oModifier.getSelector(mSpecificChangeInfo.source.id, oAppComponent, mAdditionalSourceInfo) }, target : { id : oTargetParent.getId(), aggregation : sTargetAggregation, type : mAdditionalTargetInfo.type, selector : oModifier.getSelector(mSpecificChangeInfo.target.id, oAppComponent, mAdditionalTargetInfo) }, movedElements : mSpecificChangeInfo.movedElements }; return mSpecificInfo; }; /** * Moves an element from one aggregation to another. * * @param {sap.ui.fl.Change} oChange change object with instructions to be applied on the control map * @param {sap.ui.core.Control} oRelevantContainer control that matches the change selector for applying the change, which is the source of the move * @param {object} mPropertyBag - map of properties * @param {object} mPropertyBag.view - xml node representing a ui5 view * @param {string} [mPropertyBag.sourceAggregation] - name of the source aggregation. Overwrites the aggregation from the change. Can be provided by a custom ChangeHandler, that uses this ChangeHandler * @param {string} [mPropertyBag.targetAggregation] - name of the target aggregation. Overwrites the aggregation from the change. Can be provided by a custom ChangeHandler, that uses this ChangeHandler * @param {sap.ui.core.util.reflection.BaseTreeModifier} mPropertyBag.modifier - modifier for the controls * @param {sap.ui.core.UIComponent} mPropertyBag.appComponent - appComopnent * @return {boolean} Returns true if change could be applied, otherwise undefined * @public * @function * @name sap.ui.fl.changeHandler.MoveControls#applyChange */ MoveControls.applyChange = function(oChange, oRelevantContainer, mPropertyBag) { var oModifier = mPropertyBag.modifier; var oView = mPropertyBag.view; var oAppComponent = mPropertyBag.appComponent; this._checkConditions(oChange, oModifier, oView, oAppComponent); var oChangeContent = oChange.getContent(); var sSourceAggregation = mPropertyBag.sourceAggregation || oChangeContent.source.selector.aggregation; var oTargetParent = oModifier.bySelector(oChangeContent.target.selector, oAppComponent, oView); var sTargetAggregation = mPropertyBag.targetAggregation || oChangeContent.target.selector.aggregation; var aRevertData = []; oChangeContent.movedElements.forEach(function(mMovedElement, iElementIndex) { var oMovedElement = this._getElementControlOrThrowError(mMovedElement, oModifier, oAppComponent, oView); var oSourceParent = oModifier.getParent(oMovedElement); var iInsertIndex = mMovedElement.targetIndex; // save the current index, sourceParent and sourceAggregation for revert var iIndex = oModifier.findIndexInParentAggregation(oMovedElement); if (iIndex > -1) { // mPropertyBag.sourceAggregation should always be used when available sSourceAggregation = mPropertyBag.sourceAggregation || oModifier.getParentAggregationName(oMovedElement, oSourceParent); // if iIndex === iInsertIndex the operation was already performed // in this case we need the sourceIndex that is saved in the change in order to revert it to the correct index if (iIndex === iInsertIndex) { iIndex = mMovedElement.sourceIndex; } aRevertData.unshift({ index: iIndex, aggregation: sSourceAggregation, sourceParent: oSourceParent }); } oModifier.removeAggregation(oSourceParent, sSourceAggregation, oMovedElement); oModifier.insertAggregation(oTargetParent, sTargetAggregation, oMovedElement, iInsertIndex, oView); }, this); oChange.setRevertData(aRevertData); return true; }; /** * Reverts the Change MoveControls. * * @param {sap.ui.fl.Change} oChange change object with instructions to be applied on the control map * @param {sap.ui.core.Control} oRelevantContainer control that matches the change selector for applying the change, which is the source of the move * @param {object} mPropertyBag - map of properties * @param {object} mPropertyBag.view - xml node representing a ui5 view * @param {sap.ui.core.util.reflection.BaseTreeModifier} mPropertyBag.modifier - modifier for the controls * @param {sap.ui.core.UIComponent} mPropertyBag.appComponent - appComopnent * @return {boolean} true - if change could be applied * @public * @function * @name sap.ui.fl.changeHandler.MoveControls#revertChange */ MoveControls.revertChange = function(oChange, oRelevantContainer, mPropertyBag) { var oModifier = mPropertyBag.modifier; var oView = mPropertyBag.view; var oAppComponent = mPropertyBag.appComponent; this._checkConditions(oChange, oModifier, oView, oAppComponent); // we still have to set sourceParent and sourceAggregation initially from the change data, // because for XML changes this data can't be stored in the revertData yet. var oChangeContent = oChange.getContent(); var oSourceParent = oModifier.bySelector(oChangeContent.source.selector, oAppComponent, oView); var sSourceAggregation = oChangeContent.source.selector.aggregation; var oTargetParent = oModifier.bySelector(oChangeContent.target.selector, oAppComponent, oView); var sTargetAggregation = oChangeContent.target.selector.aggregation; var aRevertData = oChange.getRevertData(); oChange.getContent().movedElements.reverse(); oChangeContent.movedElements.forEach(function(mMovedElement, iElementIndex) { var oMovedElement = this._getElementControlOrThrowError(mMovedElement, oModifier, oAppComponent, oView); if (!oMovedElement) { FlexUtils.log.warning("Element to move not found"); return; } var iInsertIndex = mMovedElement.sourceIndex; if (aRevertData) { oSourceParent = aRevertData[iElementIndex].sourceParent; sSourceAggregation = aRevertData[iElementIndex].aggregation; iInsertIndex = aRevertData[iElementIndex].index; } oModifier.removeAggregation(oTargetParent, sTargetAggregation, oMovedElement); oModifier.insertAggregation(oSourceParent, sSourceAggregation, oMovedElement, iInsertIndex, oView); }, this); oChange.resetRevertData(); return true; }; /** * Completes the change by adding change handler specific content. * * @param {sap.ui.fl.Change} oChange change object to be completed * @param {object} mSpecificChangeInfo as an empty object since no additional attributes are required for this operation * @param {object} mPropertyBag - map of properties * @param {sap.ui.core.UiComponent} mPropertyBag.appComponent component in which the change should be applied * @public * @function * @name sap.ui.fl.changeHandler.MoveControls#completeChangeContent */ MoveControls.completeChangeContent = function(oChange, mSpecificChangeInfo, mPropertyBag) { this._checkCompleteChangeContentConditions(mSpecificChangeInfo); var oModifier = mPropertyBag.modifier; var oAppComponent = mPropertyBag.appComponent; var mChangeData = oChange.getDefinition(); mSpecificChangeInfo = this._getSpecificChangeInfo(oModifier, mSpecificChangeInfo, oAppComponent); mChangeData.content = { movedElements : [], source : { selector : mSpecificChangeInfo.source.selector }, target : { selector : mSpecificChangeInfo.target.selector } }; mSpecificChangeInfo.movedElements.forEach(function(mElement) { var oElement = mElement.element || oModifier.bySelector(mElement.id, oAppComponent); mChangeData.content.movedElements.push({ selector: oModifier.getSelector(oElement, oAppComponent), sourceIndex : mElement.sourceIndex, targetIndex : mElement.targetIndex }); }); oChange.addDependentControl(mSpecificChangeInfo.source.id, MoveControls.SOURCE_ALIAS, mPropertyBag); oChange.addDependentControl(mSpecificChangeInfo.target.id, MoveControls.TARGET_ALIAS, mPropertyBag); oChange.addDependentControl(mSpecificChangeInfo.movedElements.map(function (element) { return element.id; }), MoveControls.MOVED_ELEMENTS_ALIAS, mPropertyBag); }; return MoveControls; }, /* bExport= */true);
cschuff/openui5
src/sap.ui.fl/src/sap/ui/fl/changeHandler/MoveControls.js
JavaScript
apache-2.0
12,171
package com.sequenceiq.cloudbreak.telemetry.orchestrator; public enum TelemetryOrchestratorModule { TELEMETRY("telemetry"), FLUENT("fluent"), FILECOLLECTOR("filecollector"), NODESTATUS("nodestatus"), DATABUS("databus"), MONITORING("monitoring"), METERING("metering"); private final String value; TelemetryOrchestratorModule(String value) { this.value = value; } public String getValue() { return value; } }
hortonworks/cloudbreak
telemetry-common/src/main/java/com/sequenceiq/cloudbreak/telemetry/orchestrator/TelemetryOrchestratorModule.java
Java
apache-2.0
476
/** * */ package net.sf.jabb.util.stat; import java.io.Serializable; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoUnit; import java.time.temporal.IsoFields; import java.time.temporal.TemporalUnit; import java.time.temporal.WeekFields; import java.util.List; import java.util.Set; import java.util.stream.Collectors; /** * The scheme of year, month, day, hour, minute * @author James Hu * */ public class DefaultAggregationPeriodKeyScheme implements HierarchicalAggregationPeriodKeyScheme, Serializable{ private static final long serialVersionUID = -3654502940787144075L; protected AggregationPeriodHierarchy<?> aph; protected boolean enableCompression; protected DefaultAggregationPeriodKeyScheme(AggregationPeriodHierarchy<?> aggregationPeriodHierarchy, boolean enableCompression){ aggregationPeriodHierarchy.codeMapping.values().stream() .map(node->node.aggregationPeriodAndAttachment.aggregationPeriod).forEach(ap->{ validateAggregationPeriod(ap); }); this.aph = aggregationPeriodHierarchy; this.enableCompression = enableCompression; } /** * Validate aggregation period. * It ensures that the following units can only have 1 as amount: YEAR_MONTH_DAY, WEEK_BASED_YEAR_WEEK, YEAR_WEEK_ISO, YEAR_WEEK_SUNDAY_START * @param ap the aggregation period to be validated. */ static protected void validateAggregationPeriod(AggregationPeriod ap){ switch(ap.unit){ case YEAR_MONTH_DAY: case WEEK_BASED_YEAR_WEEK: case YEAR_WEEK_ISO: case YEAR_WEEK_SUNDAY_START: if (ap.amount != 1){ throw new IllegalArgumentException("Aggregation periods with " + ap.unit + " as unit can only have 1 as amount: " + ap.amount); } break; default: // do nothing } } @Override public String generateKey(String apCode, int year, int month, int dayOfMonth, int hour, int minute) { return generateKey(aph.get(apCode), year, month, dayOfMonth, hour, minute); } @Override public long generateKeyNumber(String apCode, int year, int month, int dayOfMonth, int hour, int minute) { return generateKeyNumber(aph.get(apCode), year, month, dayOfMonth, hour, minute); } @Override public String generateKey(AggregationPeriod ap, int year, int month, int dayOfMonth, int hour, int minute) { return staticGenerateKey(ap, year, month, dayOfMonth, hour, minute, enableCompression); } @Override public long generateKeyNumber(AggregationPeriod ap, int year, int month, int dayOfMonth, int hour, int minute) { return staticGenerateKeyNumber(ap, year, month, dayOfMonth, hour, minute, enableCompression) >> 5; } @Override public String generateKey(String apCode, LocalDateTime dateTimeWithoutZone) { return generateKey(aph.get(apCode), dateTimeWithoutZone); } @Override public long generateKeyNumber(String apCode, LocalDateTime dateTimeWithoutZone) { return generateKeyNumber(aph.get(apCode), dateTimeWithoutZone); } @Override public String generateKey(AggregationPeriod ap, LocalDateTime dateTimeWithoutZone){ return staticGenerateKey(ap, dateTimeWithoutZone, enableCompression); } @Override public long generateKeyNumber(AggregationPeriod ap, LocalDateTime dateTimeWithoutZone){ return staticGenerateKeyNumber(ap, dateTimeWithoutZone, enableCompression) >> 5; } @Override public String generateKey(AggregationPeriod ap, long keyNumber){ return staticGenerateKey(ap, keyNumber, enableCompression); } @Override public String generateKey(String apCode, long keyNumber){ return staticGenerateKey(aph.get(apCode), keyNumber, enableCompression); } @Override public int getKeyNumberLength(AggregationPeriod ap){ return staticGetKeyNumberLength(ap, enableCompression); } @Override public int getKeyNumberLength(String apCode){ return staticGetKeyNumberLength(aph.get(apCode), enableCompression); } static protected int staticGetKeyNumberLength(AggregationPeriod ap, boolean enableCompression){ switch(ap.unit){ case YEAR: case WEEK_BASED_YEAR: return 4; case YEAR_MONTH: return enableCompression && ap.amount > 1 ? 5 : 6; case WEEK_BASED_YEAR_WEEK: case YEAR_WEEK_ISO: case YEAR_WEEK_SUNDAY_START: return 6; case YEAR_MONTH_DAY: return 8; case YEAR_MONTH_DAY_HOUR: return enableCompression && ap.amount > 2 ? 9 : 10; case YEAR_MONTH_DAY_HOUR_MINUTE: return enableCompression && (ap.amount == 10 || ap.amount == 20 || ap.amount >= 6) ? 11 : 12; default: throw new IllegalArgumentException("Unknown aggregation period: " + ap); } } static protected long staticGenerateKeyNumber(AggregationPeriod ap, int year, int month, int dayOfMonth, int hour, int minute, boolean enableCompression) { switch(ap.unit){ case YEAR: return compress(enableCompression, ap, year - year % ap.amount, 4); case YEAR_MONTH: return compress(enableCompression, ap, year*100 + month - ((month - 1) % ap.amount), 6); case YEAR_MONTH_DAY: return compress(enableCompression, ap, year*10000 + month * 100 + dayOfMonth, 8); // amount must be 1 case YEAR_MONTH_DAY_HOUR: return compress(enableCompression, ap, year*1000000L + month * 10000 + dayOfMonth * 100 + hour - hour % ap.amount, 10); case YEAR_MONTH_DAY_HOUR_MINUTE: return compress(enableCompression, ap, year*100000000L + month * 1000000 + dayOfMonth * 10000 + hour * 100 + minute - minute % ap.amount, 12); default: return staticGenerateKeyNumber(ap, year, month, dayOfMonth, hour, minute, enableCompression); } } static protected long staticGenerateKeyNumber(AggregationPeriod ap, LocalDateTime dateTimeWithoutZone, boolean enableCompression){ int year; int week; switch(ap.unit){ case WEEK_BASED_YEAR: year = dateTimeWithoutZone.get(IsoFields.WEEK_BASED_YEAR); return compress(enableCompression, ap, year - year % ap.amount, 4); case WEEK_BASED_YEAR_WEEK: year = dateTimeWithoutZone.get(IsoFields.WEEK_BASED_YEAR); week = dateTimeWithoutZone.get(IsoFields.WEEK_OF_WEEK_BASED_YEAR); return compress(enableCompression, ap, year*100 + week, 6); // amount must be 1 case YEAR_WEEK_ISO: year = dateTimeWithoutZone.getYear(); week = dateTimeWithoutZone.get(WeekFields.ISO.weekOfYear()); return compress(enableCompression, ap, year*100 + week, 6); // amount must be 1 case YEAR_WEEK_SUNDAY_START: year = dateTimeWithoutZone.getYear(); week = dateTimeWithoutZone.get(WeekFields.SUNDAY_START.weekOfYear()); return compress(enableCompression, ap, year*100 + week, 6); // amount must be 1 default: return staticGenerateKeyNumber(ap, dateTimeWithoutZone.getYear(), dateTimeWithoutZone.getMonthValue(), dateTimeWithoutZone.getDayOfMonth(), dateTimeWithoutZone.getHour(), dateTimeWithoutZone.getMinute(), enableCompression); } } static protected String staticGenerateKey(AggregationPeriod ap, int year, int month, int dayOfMonth, int hour, int minute, boolean enableCompression) { return toString(ap.getCodeName(), staticGenerateKeyNumber(ap, year, month, dayOfMonth, hour, minute, enableCompression)); } static protected String staticGenerateKey(AggregationPeriod ap, LocalDateTime dateTimeWithoutZone, boolean enableCompression){ return toString(ap.getCodeName(), staticGenerateKeyNumber(ap, dateTimeWithoutZone, enableCompression)); } static protected String staticGenerateKey(AggregationPeriod ap, long number, boolean enableCompression){ int length = staticGetKeyNumberLength(ap, enableCompression); return toString(ap.getCodeName(), number, length); } /** * Retrieve the aggregation period information from the key * @param key the key starts with aggregation period code name * @return the aggregation period, or null if not found */ @Override public AggregationPeriod retrieveAggregationPeriod(String key){ int i = endOfAggregationPeriod(key); return i > 0 ? AggregationPeriod.parse(key.substring(0, i)) : null; } /** * Separate the part representing AggregationPeriod from the key * @param key the key starts with aggregation period code name * @return An array that the first element is the code name of the AggregationPeriod or null if something went wrong, * and the second element is the remaining part of the key */ @Override public String[] separateAggregationPeriod(String key){ return staticSeparateAggregationPeriod(key); } /** * Separate the part representing AggregationPeriod from the key * @param key the key starts with aggregation period code name * @return An array that the first element is the code name of the AggregationPeriod or null if something went wrong, * and the second element is the remaining part of the key */ static public String[] staticSeparateAggregationPeriod(String key){ int i = endOfAggregationPeriod(key); if ( i > 0){ return new String[] {key.substring(0, i), key.substring(i)}; }else{ return new String[] {null, key}; } } /** * Find the end position of the aggregation period code * @param key the key starts with aggregation period code name * @return the next position after the last character of aggregation period code name, or -1 if not found */ static protected int endOfAggregationPeriod(String key){ for (int i = key.length() - 1; i >= 0; i --){ if (!Character.isDigit(key.charAt(i))){ return i + 1; } } return -1; } /** * Get the start time (inclusive) of the time period represented by the key. * It accepts keys generated with any aggregation period. * The key always marks the start time so there is no time zone information needed as argument. * @param key the time period key * @return the start time (inclusive) of the time period. It should be interpreted as in the same time zone in which the key is generated. */ @Override public LocalDateTime getStartTime(String key) { AggregationPeriod ap = retrieveAggregationPeriod(key); return getStartTime(ap, key, enableCompression); } static protected LocalDateTime getStartTime(AggregationPeriod ap, String key, boolean uncompress){ long k = Long.parseLong(key.substring(ap.getCodeName().length())); if (uncompress){ k = uncompress(ap, k); } int i = (int)k; StringBuilder sb; switch(ap.unit){ case YEAR: return LocalDateTime.of(i, 1, 1, 0, 0); case YEAR_MONTH: return LocalDateTime.of(i / 100, i % 100, 1, 0, 0); case YEAR_MONTH_DAY: return LocalDateTime.of(i / 10000, (i % 10000)/100, i % 100, 0, 0); case YEAR_MONTH_DAY_HOUR: return LocalDateTime.of((int)(k/1000000), (int)(k % 1000000)/10000, (int)(k % 10000) / 100, (int)(k % 100), 0); case YEAR_MONTH_DAY_HOUR_MINUTE: return LocalDateTime.of((int)(k/100000000L), (int)((k % 100000000L)/1000000), (int)(k % 1000000)/10000, (int)(k % 10000) / 100, (int)(k % 100)); case WEEK_BASED_YEAR: sb = new StringBuilder(); sb.append(i); // year sb.append("-W01-1"); return LocalDateTime.parse(sb.toString(), DateTimeFormatter.ISO_WEEK_DATE); case WEEK_BASED_YEAR_WEEK: sb = new StringBuilder(); sb.append(i/100); // year sb.append(toString("-W", i % 100, 2)); // week; sb.append("-1"); return LocalDate.parse(sb.toString(), DateTimeFormatter.ISO_WEEK_DATE).atTime(0, 0); case YEAR_WEEK_ISO: return LocalDateTime.of(i/100, 1, 1, 0, 0).with(WeekFields.ISO.weekOfYear(), i % 100); case YEAR_WEEK_SUNDAY_START: return LocalDateTime.of(i/100, 1, 1, 0, 0).with(WeekFields.SUNDAY_START.weekOfYear(), i % 100); default: throw new IllegalArgumentException("Unknown aggregation period unit: " + ap.unit); } } @Override public ZonedDateTime getEndTime(String key) { AggregationPeriod ap = retrieveAggregationPeriod(key); return getEndTime(ap, key, enableCompression); } static protected ZonedDateTime getEndTime(AggregationPeriod ap, String key, boolean enableCompression) { ZonedDateTime thisStart = ZonedDateTime.of(getStartTime(ap, key, enableCompression), ap.zone); ZonedDateTime nextStart = thisStart.plus(ap.amount, ap.unit.getTemporalUnit()); return nextStart; } /** * Iterate along time to find next key * @param ap the aggregation period * @param key the key at the start point * @param step step to move forward (if positive) or backward (if negative) * @param unit unit of the step * @param zone the time zone * @param enableCompression whether to apply compression or not * @return the first key found that is different form the key at the start point */ protected static String findNextKey(AggregationPeriod ap, String key, int step, TemporalUnit unit, ZoneId zone, boolean enableCompression){ for (ZonedDateTime time = ZonedDateTime.of(getStartTime(ap, key, enableCompression), zone).plus(step, unit);; time = time.plus(step, unit)){ String nextKey = staticGenerateKey(ap, time.toLocalDateTime(), enableCompression); if (!nextKey.equals(key)){ return nextKey; } } } @Override public String previousKey(String key){ AggregationPeriod ap = retrieveAggregationPeriod(key); return previousKey(ap, key, enableCompression); } static protected String previousKey(AggregationPeriod ap, String key, boolean enableCompression){ String apCode; int year; int week; switch(ap.unit){ case WEEK_BASED_YEAR: case YEAR: // this is a performance optimization apCode = ap.getCodeName(); year = Integer.parseInt(key.substring(apCode.length())); if (enableCompression){ year = (int)uncompress(ap, year); } year -= ap.amount; return toString(enableCompression, ap, year, 4); case YEAR_WEEK_ISO: case YEAR_WEEK_SUNDAY_START: // if the week is the first of the year, we have to iterate through days apCode = ap.getCodeName(); week = Integer.parseInt(key.substring(apCode.length() + 4, key.length())); if (enableCompression){ week = (int)uncompress(ap, week); } if (week <= 1){ return findNextKey(ap, key, -1, ChronoUnit.DAYS, ap.zone, enableCompression); } // else fall down case WEEK_BASED_YEAR_WEEK: // fall down default: // it is safe to simply jump to the start of previous period ZonedDateTime thisStart = ZonedDateTime.of(getStartTime(ap, key, enableCompression), ap.zone); ZonedDateTime previousStart = thisStart.plus(-ap.amount, ap.unit.getTemporalUnit()); return staticGenerateKey(ap, previousStart.toLocalDateTime(), enableCompression); } } @Override public String nextKey(String key) { AggregationPeriod ap = retrieveAggregationPeriod(key); return nextKey(ap, key, enableCompression); } static protected String nextKey(AggregationPeriod ap, String key, boolean enableCompression) { String apCode; int year; int week; switch(ap.unit){ case WEEK_BASED_YEAR: case YEAR: // this is a performance optimization apCode = ap.getCodeName(); year = Integer.parseInt(key.substring(apCode.length())); if (enableCompression){ year = (int)uncompress(ap, year); } year += ap.amount; return toString(enableCompression, ap, year, 4); case YEAR_WEEK_ISO: case YEAR_WEEK_SUNDAY_START: // if the week is the first of the year, we have to iterate through days apCode = ap.getCodeName(); week = Integer.parseInt(key.substring(apCode.length() + 4, key.length())); if (enableCompression){ week = (int)uncompress(ap, week); } if (week >= 51){ return findNextKey(ap, key, 1, ChronoUnit.DAYS, ap.zone, enableCompression); } // else fall down case WEEK_BASED_YEAR_WEEK: // fall down default: // it is safe to simply jump to the start of previous period ZonedDateTime thisStart = ZonedDateTime.of(getStartTime(ap, key, enableCompression), ap.zone); ZonedDateTime nextStart = thisStart.plus(ap.amount, ap.unit.getTemporalUnit()); return staticGenerateKey(ap, nextStart.toLocalDateTime(), enableCompression); } } @Override public String upperLevelKey(String key) { AggregationPeriod ap = retrieveAggregationPeriod(key); Set<AggregationPeriod> uaps = aph.getUpperLevelAggregationPeriods(ap); if (uaps.size() > 0){ AggregationPeriod uap = uaps.iterator().next(); return generateKey(uap, getStartTime(ap, key, enableCompression)); }else{ return null; } } @Override public List<String> upperLevelKeys(String key) { AggregationPeriod ap = retrieveAggregationPeriod(key); LocalDateTime startTime = getStartTime(ap, key, enableCompression); Set<AggregationPeriod> uaps = aph.getUpperLevelAggregationPeriods(ap); return uaps.stream().map(p->generateKey(p, startTime)).collect(Collectors.toList()); } @Override public String firstLowerLevelKey(String key) { AggregationPeriod ap = retrieveAggregationPeriod(key); AggregationPeriod lap = aph.getLowerLevelAggregationPeriod(ap); return generateKey(lap, getStartTime(ap, key, enableCompression)); } @Override public String toString(){ return super.toString(); } /** * Take a non-negative number, possibly transform it to a smaller number according to the aggregation period, * then convert the result to a fixed-length string format. For internal usage. * @param useCompression use compression or not * @param period the aggregation period * @param originalNumber the number to be converted, must not be negative * @param numberLength required length of the number in the returned string, * if the number is shorter than this length, it will be left padded with '0's * @return the string representing the number with possible leading zeros. * Length of the string may be greater than numberLength+prefix.length() if the number is too large to be fitted within numberLength. * Length of the string may be greater than numberLength+prefix.length() if the transformation of the number makes its length shorter. */ protected static String toString(boolean useCompression, AggregationPeriod period, long originalNumber, int numberLength){ long number; if (!useCompression || period.amount == 1){ number = originalNumber; }else{ number = compress(period, originalNumber); numberLength += (int)number & 0x1F; number >>>= 5; } String str = Long.toString(number); int len = str.length(); StringBuilder sb = new StringBuilder(); sb.append(period.getCodeName()); for(int i = numberLength; i > len; i--){ sb.append('0'); } sb.append(str); return sb.toString(); } /** * Convert a non-negative number to a fixed-length string format. For internal usage. * @param prefix the prefix to be appended * @param number the potentially compressed form of the key shifted 5 bits towards higher end, and the delta of key length in the lower 5 bits * @return the string representing the number with possible leading zeros. * Length of the string may be greater than numberLength+prefix.length() if the number is too large to be fitted within numberLength. */ protected static String toString(String prefix, long number){ int numberLength = (int)number & 0x1F; number >>>= 5; String str = Long.toString(number); int len = str.length(); StringBuilder sb = new StringBuilder(); sb.append(prefix); for(int i = numberLength; i > len; i--){ sb.append('0'); } sb.append(str); return sb.toString(); } /** Convert a non-negative number to a fixed-length string format. For internal usage. * @param prefix the prefix to be appended * @param nonNegativeNumber the number to be converted, must not be negative * @param numberLength required length of the number in the returned string, * if the number is shorter than this length, it will be left padded with '0's * @return the string representing the number with possible leading zeros. * Length of the string may be greater than numberLength+prefix.length() if the number is too large to be fitted within numberLength. */ protected static String toString(String prefix, long nonNegativeNumber, int numberLength){ String str = Long.toString(nonNegativeNumber); int len = str.length(); StringBuilder sb = new StringBuilder(); sb.append(prefix); for(int i = numberLength; i > len; i--){ sb.append('0'); } sb.append(str); return sb.toString(); } protected static long compress(boolean enableCompression, AggregationPeriod ap, long x, int length){ if (enableCompression){ long compressed = compress(ap, x); return (compressed & 0xFFFFFFFFFFFFFFE0L) | ((((int)compressed & 0x1F) + length) & 0x1F); }else{ return (x << 5) | (length & 0x1F); } } /** * Compress the number part of the key * @param ap the aggregation period * @param x the original form of the key * @return the potentially compressed form of the key shifted 5 bits towards higher end, and the delta of key length in the lower 5 bits */ protected static long compress(AggregationPeriod ap, long x){ int lengthDelta = 0; int amount = ap.amount; long compressed = x; if (amount != 1){ switch(ap.unit){ case YEAR: case WEEK_BASED_YEAR: compressed = x / amount; break; case YEAR_MONTH: compressed = (x / 100) * 10 + ((x % 100) - 1) / amount; // converted month to 0 based and reduced 1 digit lengthDelta --; break; case YEAR_MONTH_DAY_HOUR: if (amount == 2){ compressed = x / amount; }else{ compressed = (x / 100) * 10 + (x % 100) / amount; // reduced 1 digit (24/3=8) lengthDelta --; } break; case YEAR_MONTH_DAY_HOUR_MINUTE: if (amount == 2 || amount == 4 || amount ==5){ compressed = x / amount; }else if (amount == 10 || amount == 20){ compressed = x / amount; lengthDelta --; }else if (amount >= 6){ compressed = (x / 100) * 10 + (x % 100) / amount; // reduced 1 digit (60/6=10) lengthDelta --; }else{ // < 6 compressed = (x / 100) * 100 + (x % 100) / amount; } break; case YEAR_MONTH_DAY: case WEEK_BASED_YEAR_WEEK: case YEAR_WEEK_ISO: case YEAR_WEEK_SUNDAY_START: default: // do nothing } } return (compressed << 5) | (lengthDelta & 0x1F); } /** * Uncompress the number part of the key * @param ap the aggregation period * @param x potentially compressed form of the number part * @return the original form of the number part */ protected static long uncompress(AggregationPeriod ap, long x){ if (ap.amount == 1){ return x; } int amount = ap.amount; switch(ap.unit){ case YEAR: case WEEK_BASED_YEAR: return x * amount; case YEAR_MONTH: return (x / 10) * 100 + 1 + (x % 10) * amount; case YEAR_MONTH_DAY_HOUR: if (amount == 2){ return x * amount; }else{ return (x / 10) * 100 + (x % 10) * amount; } case YEAR_MONTH_DAY_HOUR_MINUTE: if (amount == 2 || amount == 4 || amount ==5 || amount == 10 || amount == 20){ return x * amount; }else if (amount >= 6){ return (x / 10) * 100 + (x % 10) * amount; }else{ // < 6 return (x / 100) * 100 + (x % 100) * amount; } case YEAR_MONTH_DAY: case WEEK_BASED_YEAR_WEEK: case YEAR_WEEK_ISO: case YEAR_WEEK_SUNDAY_START: default: return x; } } /** * Create a hierarchical instance * Compression in the created instance will be disabled. * @param aph the hierarchy of aggregation periods * @return the HierarchicalAggregationPeriodKeyScheme */ static public HierarchicalAggregationPeriodKeyScheme newInstance(AggregationPeriodHierarchy<?> aph){ return new DefaultAggregationPeriodKeyScheme(aph, false); } /** * Create a hierarchical instance * @param aph the hierarchy of aggregation periods * @param enableCompression Whether the keys should be compressed to make them more compact * @return the HierarchicalAggregationPeriodKeyScheme */ static public HierarchicalAggregationPeriodKeyScheme newInstance(AggregationPeriodHierarchy<?> aph, boolean enableCompression){ return new DefaultAggregationPeriodKeyScheme(aph, enableCompression); } /** * Create an non-hierarchical instance specific to an aggregation period. * Compression in the created instance will be disabled. * @param ap the aggregation period * @return a AggregationPeriodKeyScheme specific to the aggregation period */ static public AggregationPeriodKeyScheme newInstance(AggregationPeriod ap){ return newInstance(ap, false); } /** * Create an non-hierarchical instance specific to an aggregation period. * @param ap the aggregation period * @param enableCompression Whether the keys should be compressed to make them more compact * @return a AggregationPeriodKeyScheme specific to the aggregation period */ static public AggregationPeriodKeyScheme newInstance(AggregationPeriod ap, boolean enableCompression){ validateAggregationPeriod(ap); return new AggregationPeriodKeyScheme(){ @Override public LocalDateTime getStartTime(String key) { return DefaultAggregationPeriodKeyScheme.getStartTime(ap, key, enableCompression); } @Override public ZonedDateTime getEndTime(String key) { return DefaultAggregationPeriodKeyScheme.getEndTime(ap, key, enableCompression); } @Override public String previousKey(String key) { return DefaultAggregationPeriodKeyScheme.previousKey(ap, key, enableCompression); } @Override public String generateKey(int year, int month, int dayOfMonth, int hour, int minute) { return DefaultAggregationPeriodKeyScheme.staticGenerateKey(ap, year, month, dayOfMonth, hour, minute, enableCompression); } @Override public long generateKeyNumber(int year, int month, int dayOfMonth, int hour, int minute) { return DefaultAggregationPeriodKeyScheme.staticGenerateKeyNumber(ap, year, month, dayOfMonth, hour, minute, enableCompression) >>> 5; } @Override public String generateKey(LocalDateTime dateTimeWithoutZone) { return DefaultAggregationPeriodKeyScheme.staticGenerateKey(ap, dateTimeWithoutZone, enableCompression); } @Override public long generateKeyNumber(LocalDateTime dateTimeWithoutZone) { return DefaultAggregationPeriodKeyScheme.staticGenerateKeyNumber(ap, dateTimeWithoutZone, enableCompression) >>> 5; } @Override public String[] separateAggregationPeriod(String key) { return DefaultAggregationPeriodKeyScheme.staticSeparateAggregationPeriod(key); } @Override public int getKeyNumberLength(){ return DefaultAggregationPeriodKeyScheme.staticGetKeyNumberLength(ap, enableCompression); } @Override public String generateKey(long keyNumber) { return DefaultAggregationPeriodKeyScheme.staticGenerateKey(ap, keyNumber, enableCompression); } }; } }
james-hu/jabb-core-java8
src/main/java/net/sf/jabb/util/stat/DefaultAggregationPeriodKeyScheme.java
Java
apache-2.0
26,905
/* * Copyright 2016 TomeOkin * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.tomeokin.lspush.biz.base; import android.support.v7.app.AppCompatActivity; import com.tomeokin.lspush.LsPushApplication; import com.tomeokin.lspush.injection.component.AppComponent; import com.tomeokin.lspush.injection.module.ActivityModule; public abstract class BaseActivity extends AppCompatActivity { protected AppComponent getAppComponent() { return ((LsPushApplication) getApplication()).appComponent(); } protected ActivityModule getActivityModule() { return new ActivityModule(this); } }
TomeOkin/LsPush
app/src/main/java/com/tomeokin/lspush/biz/base/BaseActivity.java
Java
apache-2.0
1,139
/* * Copyright 2002-2019 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.messaging.rsocket; import java.util.function.Consumer; import org.junit.jupiter.api.Test; import org.springframework.core.ReactiveAdapterRegistry; import org.springframework.core.codec.ByteArrayDecoder; import org.springframework.core.codec.ByteArrayEncoder; import org.springframework.core.codec.ByteBufferDecoder; import org.springframework.core.codec.ByteBufferEncoder; import org.springframework.core.codec.CharSequenceEncoder; import org.springframework.core.codec.DataBufferDecoder; import org.springframework.core.codec.DataBufferEncoder; import org.springframework.core.codec.StringDecoder; import org.springframework.util.AntPathMatcher; import org.springframework.util.SimpleRouteMatcher; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; /** * Unit tests for {@link RSocketStrategies}. * @author Rossen Stoyanchev * @since 5.2 */ class DefaultRSocketStrategiesTests { @Test void defaultSettings() { RSocketStrategies strategies = RSocketStrategies.create(); assertThat(strategies.encoders()).hasSize(4).hasOnlyElementsOfTypes( CharSequenceEncoder.class, ByteArrayEncoder.class, ByteBufferEncoder.class, DataBufferEncoder.class); assertThat(strategies.decoders()).hasSize(4).hasOnlyElementsOfTypes( StringDecoder.class, ByteArrayDecoder.class, ByteBufferDecoder.class, DataBufferDecoder.class); assertThat(strategies.routeMatcher()).isNotNull(); assertThat(strategies.metadataExtractor()).isNotNull(); assertThat(strategies.reactiveAdapterRegistry()).isNotNull(); assertThat(((DefaultMetadataExtractor) strategies.metadataExtractor()).getDecoders()).hasSize(4); } @Test void explicitValues() { SimpleRouteMatcher matcher = new SimpleRouteMatcher(new AntPathMatcher()); DefaultMetadataExtractor extractor = new DefaultMetadataExtractor(); ReactiveAdapterRegistry registry = new ReactiveAdapterRegistry(); RSocketStrategies strategies = RSocketStrategies.builder() .encoders(encoders -> { encoders.clear(); encoders.add(new ByteArrayEncoder()); }) .decoders(decoders -> { decoders.clear(); decoders.add(new ByteArrayDecoder()); }) .routeMatcher(matcher) .metadataExtractor(extractor) .reactiveAdapterStrategy(registry) .build(); assertThat(strategies.encoders()).hasSize(1); assertThat(strategies.decoders()).hasSize(1); assertThat(strategies.routeMatcher()).isSameAs(matcher); assertThat(strategies.metadataExtractor()).isSameAs(extractor); assertThat(strategies.reactiveAdapterRegistry()).isSameAs(registry); } @Test void copyConstructor() { RSocketStrategies strategies1 = RSocketStrategies.create(); RSocketStrategies strategies2 = strategies1.mutate().build(); assertThat(strategies1.encoders()).hasSameElementsAs(strategies2.encoders()); assertThat(strategies1.decoders()).hasSameElementsAs(strategies2.decoders()); assertThat(strategies1.routeMatcher()).isSameAs(strategies2.routeMatcher()); assertThat(strategies1.metadataExtractor()).isSameAs(strategies2.metadataExtractor()); assertThat(strategies1.reactiveAdapterRegistry()).isSameAs(strategies2.reactiveAdapterRegistry()); } @Test @SuppressWarnings("unchecked") void applyMetadataExtractors() { Consumer<MetadataExtractorRegistry> consumer = mock(Consumer.class); RSocketStrategies.builder().metadataExtractorRegistry(consumer).build(); verify(consumer, times(1)).accept(any()); } }
spring-projects/spring-framework
spring-messaging/src/test/java/org/springframework/messaging/rsocket/DefaultRSocketStrategiesTests.java
Java
apache-2.0
4,253
// Copyright 2014 The Noda Time Authors. All rights reserved. // Use of this source code is governed by the Apache License 2.0, // as found in the LICENSE.txt file. using System; using System.Collections.Generic; using System.Globalization; using NodaTime.Calendars; using NUnit.Framework; namespace NodaTime.Test.Calendars { [TestFixture] public class UmAlQuraYearMonthDayCalculatorTest { private static readonly Calendar BclCalendar = GetBclCalendar(); private static readonly UmAlQuraYearMonthDayCalculator Calculator = UmAlQuraYearMonthDayCalculator.IsSupported ? new UmAlQuraYearMonthDayCalculator() : null; // Horrible way to conditionalize tests at execution time... private static readonly IEnumerable<string> Supported = UmAlQuraYearMonthDayCalculator.IsSupported ? new[] { "(Supported)" } : new string[0]; private static Calendar GetBclCalendar() { // Always get it with reflection in the test, just for simplicity. try { var type = typeof(Calendar).Assembly.GetType("System.Globalization.UmAlQuraCalendar"); if (type == null) { return null; } return (Calendar) Activator.CreateInstance(type); } catch { return null; } } [Test] public void CannotInstantiateOnUnsupportedPlatform() { if (!UmAlQuraYearMonthDayCalculator.IsSupported) { Assert.Throws<NotSupportedException>(() => new UmAlQuraYearMonthDayCalculator()); } } [Test, TestCaseSource(nameof(Supported))] public void GetDaysInMonth(string ignored) { for (int year = Calculator.MinYear; year <= Calculator.MaxYear; year++) { for (int month = 1; month <= 12; month++) { Assert.AreEqual(BclCalendar.GetDaysInMonth(year, month), Calculator.GetDaysInMonth(year, month), "year={0}; month={1}", year, month); } } } [Test, TestCaseSource(nameof(Supported))] public void GetDaysInYear(string ignored) { for (int year = Calculator.MinYear; year <= Calculator.MaxYear; year++) { Assert.AreEqual(BclCalendar.GetDaysInYear(year), Calculator.GetDaysInYear(year), "year={0}", year); } } [Test, TestCaseSource(nameof(Supported))] public void IsLeapYear(string ignored) { for (int year = Calculator.MinYear; year <= Calculator.MaxYear; year++) { Assert.AreEqual(BclCalendar.IsLeapYear(year), Calculator.IsLeapYear(year), "year={0}", year); } } [Test, TestCaseSource(nameof(Supported))] public void GetStartOfYearInDays(string ignored) { // This exercises CalculateStartOfYearInDays too. for (int year = Calculator.MinYear; year <= Calculator.MaxYear; year++) { var bcl = new DateTime(year, 1, 1, BclCalendar); var days = (bcl - new DateTime(1970, 1, 1)).Days; Assert.AreEqual(days, Calculator.GetStartOfYearInDays(year), "year={0}", year); } } [Test, TestCaseSource(nameof(Supported))] public void GetYearMonthDay_DaysSinceEpoch(string ignored) { int daysSinceEpoch = Calculator.GetStartOfYearInDays(Calculator.MinYear); for (int year = Calculator.MinYear; year <= Calculator.MaxYear; year++) { for (int month = 1; month <= 12; month++) { for (int day = 1; day <= Calculator.GetDaysInMonth(year, month); day++) { var actual = Calculator.GetYearMonthDay(daysSinceEpoch); var expected = new YearMonthDay(year, month, day); Assert.AreEqual(expected, actual, "daysSinceEpoch={0}", daysSinceEpoch); daysSinceEpoch++; } } } } [Test, TestCaseSource(nameof(Supported))] public void GetYearMonthDay_YearAndDayOfYear(string ignored) { for (int year = Calculator.MinYear; year <= Calculator.MaxYear; year++) { int dayOfYear = 1; for (int month = 1; month <= 12; month++) { for (int day = 1; day <= Calculator.GetDaysInMonth(year, month); day++) { var actual = Calculator.GetYearMonthDay(year, dayOfYear); var expected = new YearMonthDay(year, month, day); Assert.AreEqual(expected, actual, "year={0}; dayOfYear={1}", year, dayOfYear); dayOfYear++; } } } } [Test, TestCaseSource(nameof(Supported))] public void GetDaysFromStartOfYearToStartOfMonth(string ignored) { for (int year = Calculator.MinYear; year <= Calculator.MaxYear; year++) { int dayOfYear = 1; for (int month = 1; month <= 12; month++) { // This delegates to GetDaysFromStartOfYearToStartOfMonth (which is protected). Assert.AreEqual(dayOfYear, Calculator.GetDayOfYear(new YearMonthDay(year, month, 1)), "year={0}; month={1}", year, month); dayOfYear += Calculator.GetDaysInMonth(year, month); } } } } }
BenJenkinson/nodatime
src/NodaTime.Test/Calendars/UmAlQuraYearMonthDayCalculatorTest.cs
C#
apache-2.0
5,786
/** * gprdb - a simple and flexible framework for gene-pair relation database construction * Copyright 2015 gprdb developers * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package jp.ac.tohoku.ecei.sb.gprdb.dataset.relation; import jp.ac.tohoku.ecei.sb.gprdb.bean.Identifiable; import org.openrdf.model.IRI; import org.openrdf.model.impl.SimpleIRI; import javax.validation.constraints.NotNull; /** * @author Shu Tadaka */ class AbstractIdentifiableImpl<T extends GenePairRelationConfig.IdentifiableConfig> implements Identifiable { protected final T config; public AbstractIdentifiableImpl(@NotNull T config) { this.config = config; } @Override public String getId() { return this.config.getId(); } @Override public String getName() { return this.config.getName(); } @Override public String getShortName() { return this.config.getShortName(); } @Override public IRI getUri() { return new SimpleIRI(this.config.getUri()); } @Override public boolean equals(Object other) { // if (this == other) { return true; } if (other == null || getClass() != other.getClass()) { return false; } // AbstractIdentifiableImpl<?> that = (AbstractIdentifiableImpl<?>)other; return this.config.getId().equals(that.config.getId()); } @Override public int hashCode() { return this.config.getId().hashCode(); } }
east301/gprdb-experimental
gprdb-impl/src/main/java/jp/ac/tohoku/ecei/sb/gprdb/dataset/relation/AbstractIdentifiableImpl.java
Java
apache-2.0
2,036
var skin = { // recent news title bar color and image // if use image, bar color and title should empty // if use text title, bar image should empty "RECENT_BAR_IMAGE" : "", // example: imgs/wpapp.png, size: 320x45 "RECENT_BAR_COLOR" : "#0e8dd0", // example: #ff840a // category title bar color and image // if use image, bar color and title should empty // if use text title, bar image should empty "CATEG_BAR_IMAGE" : "", // example: imgs/wpapp.png, size: 320x45 "CATEG_BAR_COLOR" : "#0e8dd0", // example: #ff840a "CATEG_DASHBOARD_BGCOLOR" : "#333", // blog post title bar color and image // if use image, bar color should empty // if use text title, bar image should empty "POST_BAR_IMAGE" : "", // example: imgs/wpapp.png, size: 320x45 "POST_BAR_COLOR" : "#0e8dd0", // example: #ff840a // about tab bar color and image // if use image, bar color and title should empty // if use text title, bar image should empty "ABOUT_BAR_IMAGE" : "", // example: imgs/wpapp.png, size: 320x45 "ABOUT_BAR_COLOR" : "#0e8dd0", // example: #ff840a "WEBVIEW_BAR_IMAGE" : "", // example: imgs/wpapp.png, size: 320x45 "WEBVIEW_BAR_COLOR" : "#0e8dd0", // example: #ff840a "TWITTER_BAR_IMAGE" : "", // example: imgs/wpapp.png, size: 320x45 "TWITTER_BAR_COLOR" : "#0e8dd0", // example: #ff840a "PAGES_BAR_IMAGE" : "", // example: imgs/wpapp.png, size: 320x45 "PAGES_BAR_COLOR" : "#0e8dd0", // example: #ff840a // table view for recent news "RECENT_TV_BGCOLOR" : "#eee", "RECENT_TV_BGCOLOR_ALT": "#eee", "RECENT_TV_SEPARATOR_COLOR": "#eee", "RECENT_TV_TITLE_COLOR": "#23589a", "RECENT_TV_META_COLOR" : "#6c6c6c", // table view for categories "CATEG_TV_BGCOLOR" : "#eee", "CATEG_TV_BGCOLOR_ALT": "#eee", "CATEG_TV_SEPARATOR_COLOR": "#eee", "CATEG_TV_TITLE_COLOR": "#23589a", "CATEG_TV_META_COLOR" : "#6c6c6c", // table view for twitter "TWITTER_TV_BGCOLOR" : "#eee", "TWITTER_TV_BGCOLOR_ALT": "#eee", "TWITTER_TV_SEPARATOR_COLOR": "#eee", "TWITTER_TV_TITLE_COLOR": "#23589a", "TWITTER_TV_META_COLOR" : "#6c6c6c", // table view for pages "PAGES_TV_BGCOLOR" : "#eee", "PAGES_TV_BGCOLOR_ALT": "#eee", "PAGES_TV_TITLE_COLOR": "#23589a", "DUMMY" : "" }; // skin.RECENT_TV_GRADIENT = { // type:'linear', // colors:[{color:'#d4d4d4',position:0.0}, // {color:'#c4c4c4',position:0.50},{color:'#b4b4b4',position:1.0} // ]}; // skin.CATEG_TV_GRADIENT = { // type:'linear', // colors:[{color:'#d4d4d4',position:0.0}, // {color:'#c4c4c4',position:0.50},{color:'#b4b4b4',position:1.0} // ]}; // skin.TWITTER_TV_GRADIENT = { // type:'linear', // colors:[ {color:'#404040',position:0.0},{color:'#2f2f2f',position:0.5},{color:'#272727',position:1.0} ] // };
TagYourIt/inspirationapp
Resources/skin/blue.js
JavaScript
apache-2.0
3,029
/* Android Asynchronous Http Client Copyright (c) 2011 James Smith <james@loopj.com> http://loopj.com Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package io.openkit.asynchttp; import java.io.IOException; import java.io.InputStream; import java.lang.ref.WeakReference; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.WeakHashMap; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.zip.GZIPInputStream; import org.apache.http.Header; import org.apache.http.HeaderElement; import org.apache.http.HttpEntity; import org.apache.http.HttpRequest; import org.apache.http.HttpRequestInterceptor; import org.apache.http.HttpResponse; import org.apache.http.HttpResponseInterceptor; import org.apache.http.HttpVersion; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CookieStore; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.client.protocol.ClientContext; import org.apache.http.conn.params.ConnManagerParams; import org.apache.http.conn.params.ConnPerRouteBean; import org.apache.http.conn.scheme.PlainSocketFactory; import org.apache.http.conn.scheme.Scheme; import org.apache.http.conn.scheme.SchemeRegistry; import org.apache.http.conn.ssl.SSLSocketFactory; import org.apache.http.entity.HttpEntityWrapper; import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager; import org.apache.http.params.BasicHttpParams; import org.apache.http.params.HttpParams; import org.apache.http.params.HttpConnectionParams; import org.apache.http.params.HttpProtocolParams; import org.apache.http.protocol.BasicHttpContext; import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.SyncBasicHttpContext; import android.content.Context; /** * The AsyncHttpClient can be used to make asynchronous GET, POST, PUT and * DELETE HTTP requests in your Android applications. Requests can be made * with additional parameters by passing a {@link RequestParams} instance, * and responses can be handled by passing an anonymously overridden * {@link AsyncHttpResponseHandler} instance. * <p> * For example: * <p> * <pre> * AsyncHttpClient client = new AsyncHttpClient(); * client.get("http://www.google.com", new AsyncHttpResponseHandler() { * &#064;Override * public void onSuccess(String response) { * System.out.println(response); * } * }); * </pre> */ public class AsyncHttpClient { private static final String VERSION = "1.4.1"; private static final int DEFAULT_MAX_CONNECTIONS = 10; private static final int DEFAULT_SOCKET_TIMEOUT = 10 * 1000; private static final int DEFAULT_MAX_RETRIES = 5; private static final int DEFAULT_SOCKET_BUFFER_SIZE = 8192; private static final String HEADER_ACCEPT_ENCODING = "Accept-Encoding"; private static final String ENCODING_GZIP = "gzip"; private static int maxConnections = DEFAULT_MAX_CONNECTIONS; private static int socketTimeout = DEFAULT_SOCKET_TIMEOUT; private final DefaultHttpClient httpClient; private final HttpContext httpContext; private ThreadPoolExecutor threadPool; private final Map<Context, List<WeakReference<Future<?>>>> requestMap; private final Map<String, String> clientHeaderMap; /** * Creates a new AsyncHttpClient. */ public AsyncHttpClient() { BasicHttpParams httpParams = new BasicHttpParams(); ConnManagerParams.setTimeout(httpParams, socketTimeout); ConnManagerParams.setMaxConnectionsPerRoute(httpParams, new ConnPerRouteBean(maxConnections)); ConnManagerParams.setMaxTotalConnections(httpParams, DEFAULT_MAX_CONNECTIONS); HttpConnectionParams.setSoTimeout(httpParams, socketTimeout); HttpConnectionParams.setConnectionTimeout(httpParams, socketTimeout); HttpConnectionParams.setTcpNoDelay(httpParams, true); HttpConnectionParams.setSocketBufferSize(httpParams, DEFAULT_SOCKET_BUFFER_SIZE); HttpProtocolParams.setVersion(httpParams, HttpVersion.HTTP_1_1); HttpProtocolParams.setUserAgent(httpParams, String.format("android-async-http/%s (http://loopj.com/android-async-http)", VERSION)); SchemeRegistry schemeRegistry = new SchemeRegistry(); schemeRegistry.register(new Scheme("http", PlainSocketFactory.getSocketFactory(), 80)); schemeRegistry.register(new Scheme("https", SSLSocketFactory.getSocketFactory(), 443)); ThreadSafeClientConnManager cm = new ThreadSafeClientConnManager(httpParams, schemeRegistry); httpContext = new SyncBasicHttpContext(new BasicHttpContext()); httpClient = new DefaultHttpClient(cm, httpParams); httpClient.addRequestInterceptor(new HttpRequestInterceptor() { public void process(HttpRequest request, HttpContext context) { if (!request.containsHeader(HEADER_ACCEPT_ENCODING)) { request.addHeader(HEADER_ACCEPT_ENCODING, ENCODING_GZIP); } for (String header : clientHeaderMap.keySet()) { request.addHeader(header, clientHeaderMap.get(header)); } } }); httpClient.addResponseInterceptor(new HttpResponseInterceptor() { public void process(HttpResponse response, HttpContext context) { final HttpEntity entity = response.getEntity(); if (entity == null) { return; } final Header encoding = entity.getContentEncoding(); if (encoding != null) { for (HeaderElement element : encoding.getElements()) { if (element.getName().equalsIgnoreCase(ENCODING_GZIP)) { response.setEntity(new InflatingEntity(response.getEntity())); break; } } } } }); httpClient.setHttpRequestRetryHandler(new RetryHandler(DEFAULT_MAX_RETRIES)); threadPool = (ThreadPoolExecutor)Executors.newCachedThreadPool(); requestMap = new WeakHashMap<Context, List<WeakReference<Future<?>>>>(); clientHeaderMap = new HashMap<String, String>(); } /** * Get the underlying HttpClient instance. This is useful for setting * additional fine-grained settings for requests by accessing the * client's ConnectionManager, HttpParams and SchemeRegistry. */ public HttpClient getHttpClient() { return this.httpClient; } /** * Get the underlying HttpContext instance. This is useful for getting * and setting fine-grained settings for requests by accessing the * context's attributes such as the CookieStore. */ public HttpContext getHttpContext() { return this.httpContext; } /** * Sets an optional CookieStore to use when making requests * @param cookieStore The CookieStore implementation to use, usually an instance of {@link PersistentCookieStore} */ public void setCookieStore(CookieStore cookieStore) { httpContext.setAttribute(ClientContext.COOKIE_STORE, cookieStore); } /** * Overrides the threadpool implementation used when queuing/pooling * requests. By default, Executors.newCachedThreadPool() is used. * @param threadPool an instance of {@link ThreadPoolExecutor} to use for queuing/pooling requests. */ public void setThreadPool(ThreadPoolExecutor threadPool) { this.threadPool = threadPool; } /** * Sets the User-Agent header to be sent with each request. By default, * "Android Asynchronous Http Client/VERSION (http://loopj.com/android-async-http/)" is used. * @param userAgent the string to use in the User-Agent header. */ public void setUserAgent(String userAgent) { HttpProtocolParams.setUserAgent(this.httpClient.getParams(), userAgent); } /** * Sets the connection time oout. By default, 10 seconds * @param timeout the connect/socket timeout in milliseconds */ public void setTimeout(int timeout){ final HttpParams httpParams = this.httpClient.getParams(); ConnManagerParams.setTimeout(httpParams, timeout); HttpConnectionParams.setSoTimeout(httpParams, timeout); HttpConnectionParams.setConnectionTimeout(httpParams, timeout); } /** * Sets the SSLSocketFactory to user when making requests. By default, * a new, default SSLSocketFactory is used. * @param sslSocketFactory the socket factory to use for https requests. */ public void setSSLSocketFactory(SSLSocketFactory sslSocketFactory) { this.httpClient.getConnectionManager().getSchemeRegistry().register(new Scheme("https", sslSocketFactory, 443)); } /** * Sets headers that will be added to all requests this client makes (before sending). * @param header the name of the header * @param value the contents of the header */ public void addHeader(String header, String value) { clientHeaderMap.put(header, value); } /** * Sets basic authentication for the request. Uses AuthScope.ANY. This is the same as * setBasicAuth('username','password',AuthScope.ANY) * @param username * @param password */ public void setBasicAuth(String user, String pass){ AuthScope scope = AuthScope.ANY; setBasicAuth(user, pass, scope); } /** * Sets basic authentication for the request. You should pass in your AuthScope for security. It should be like this * setBasicAuth("username","password", new AuthScope("host",port,AuthScope.ANY_REALM)) * @param username * @param password * @param scope - an AuthScope object * */ public void setBasicAuth( String user, String pass, AuthScope scope){ UsernamePasswordCredentials credentials = new UsernamePasswordCredentials(user,pass); this.httpClient.getCredentialsProvider().setCredentials(scope, credentials); } /** * Cancels any pending (or potentially active) requests associated with the * passed Context. * <p> * <b>Note:</b> This will only affect requests which were created with a non-null * android Context. This method is intended to be used in the onDestroy * method of your android activities to destroy all requests which are no * longer required. * * @param context the android Context instance associated to the request. * @param mayInterruptIfRunning specifies if active requests should be cancelled along with pending requests. */ public void cancelRequests(Context context, boolean mayInterruptIfRunning) { List<WeakReference<Future<?>>> requestList = requestMap.get(context); if(requestList != null) { for(WeakReference<Future<?>> requestRef : requestList) { Future<?> request = requestRef.get(); if(request != null) { request.cancel(mayInterruptIfRunning); } } } requestMap.remove(context); } // // HTTP GET Requests // /** * Perform a HTTP GET request, without any parameters. * @param url the URL to send the request to. * @param responseHandler the response handler instance that should handle the response. */ public void get(String url, AsyncHttpResponseHandler responseHandler) { get(null, url, null, responseHandler); } /** * Perform a HTTP GET request with parameters. * @param url the URL to send the request to. * @param params additional GET parameters to send with the request. * @param responseHandler the response handler instance that should handle the response. */ public void get(String url, RequestParams params, AsyncHttpResponseHandler responseHandler) { get(null, url, params, responseHandler); } /** * Perform a HTTP GET request without any parameters and track the Android Context which initiated the request. * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param responseHandler the response handler instance that should handle the response. */ public void get(Context context, String url, AsyncHttpResponseHandler responseHandler) { get(context, url, null, responseHandler); } /** * Perform a HTTP GET request and track the Android Context which initiated the request. * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param params additional GET parameters to send with the request. * @param responseHandler the response handler instance that should handle the response. */ public void get(Context context, String url, RequestParams params, AsyncHttpResponseHandler responseHandler) { sendRequest(httpClient, httpContext, new HttpGet(getUrlWithQueryString(url, params)), null, responseHandler, context); } /** * Perform a HTTP GET request and track the Android Context which initiated * the request with customized headers * * @param url the URL to send the request to. * @param headers set headers only for this request * @param params additional GET parameters to send with the request. * @param responseHandler the response handler instance that should handle * the response. */ public void get(Context context, String url, Header[] headers, RequestParams params, AsyncHttpResponseHandler responseHandler) { HttpUriRequest request = new HttpGet(getUrlWithQueryString(url, params)); if(headers != null) request.setHeaders(headers); sendRequest(httpClient, httpContext, request, null, responseHandler, context); } // // HTTP POST Requests // /** * Perform a HTTP POST request, without any parameters. * @param url the URL to send the request to. * @param responseHandler the response handler instance that should handle the response. */ public void post(String url, AsyncHttpResponseHandler responseHandler) { post(null, url, null, responseHandler); } /** * Perform a HTTP POST request with parameters. * @param url the URL to send the request to. * @param params additional POST parameters or files to send with the request. * @param responseHandler the response handler instance that should handle the response. */ public void post(String url, RequestParams params, AsyncHttpResponseHandler responseHandler) { post(null, url, params, responseHandler); } /** * Perform a HTTP POST request and track the Android Context which initiated the request. * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param params additional POST parameters or files to send with the request. * @param responseHandler the response handler instance that should handle the response. */ public void post(Context context, String url, RequestParams params, AsyncHttpResponseHandler responseHandler) { post(context, url, paramsToEntity(params), null, responseHandler); } /** * Perform a HTTP POST request and track the Android Context which initiated the request. * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param entity a raw {@link HttpEntity} to send with the request, for example, use this to send string/json/xml payloads to a server by passing a {@link org.apache.http.entity.StringEntity}. * @param contentType the content type of the payload you are sending, for example application/json if sending a json payload. * @param responseHandler the response handler instance that should handle the response. */ public void post(Context context, String url, HttpEntity entity, String contentType, AsyncHttpResponseHandler responseHandler) { sendRequest(httpClient, httpContext, addEntityToRequestBase(new HttpPost(url), entity), contentType, responseHandler, context); } /** * Perform a HTTP POST request and track the Android Context which initiated * the request. Set headers only for this request * * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param headers set headers only for this request * @param params additional POST parameters to send with the request. * @param contentType the content type of the payload you are sending, for * example application/json if sending a json payload. * @param responseHandler the response handler instance that should handle * the response. */ public void post(Context context, String url, Header[] headers, RequestParams params, String contentType, AsyncHttpResponseHandler responseHandler) { HttpEntityEnclosingRequestBase request = new HttpPost(url); if(params != null) request.setEntity(paramsToEntity(params)); if(headers != null) request.setHeaders(headers); sendRequest(httpClient, httpContext, request, contentType, responseHandler, context); } /** * Perform a HTTP POST request and track the Android Context which initiated * the request. Set headers only for this request * * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param headers set headers only for this request * @param entity a raw {@link HttpEntity} to send with the request, for * example, use this to send string/json/xml payloads to a server by * passing a {@link org.apache.http.entity.StringEntity}. * @param contentType the content type of the payload you are sending, for * example application/json if sending a json payload. * @param responseHandler the response handler instance that should handle * the response. */ public void post(Context context, String url, Header[] headers, HttpEntity entity, String contentType, AsyncHttpResponseHandler responseHandler) { HttpEntityEnclosingRequestBase request = addEntityToRequestBase(new HttpPost(url), entity); if(headers != null) request.setHeaders(headers); sendRequest(httpClient, httpContext, request, contentType, responseHandler, context); } // // HTTP PUT Requests // /** * Perform a HTTP PUT request, without any parameters. * @param url the URL to send the request to. * @param responseHandler the response handler instance that should handle the response. */ public void put(String url, AsyncHttpResponseHandler responseHandler) { put(null, url, null, responseHandler); } /** * Perform a HTTP PUT request with parameters. * @param url the URL to send the request to. * @param params additional PUT parameters or files to send with the request. * @param responseHandler the response handler instance that should handle the response. */ public void put(String url, RequestParams params, AsyncHttpResponseHandler responseHandler) { put(null, url, params, responseHandler); } /** * Perform a HTTP PUT request and track the Android Context which initiated the request. * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param params additional PUT parameters or files to send with the request. * @param responseHandler the response handler instance that should handle the response. */ public void put(Context context, String url, RequestParams params, AsyncHttpResponseHandler responseHandler) { put(context, url, paramsToEntity(params), null, responseHandler); } /** * Perform a HTTP PUT request and track the Android Context which initiated the request. * And set one-time headers for the request * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param entity a raw {@link HttpEntity} to send with the request, for example, use this to send string/json/xml payloads to a server by passing a {@link org.apache.http.entity.StringEntity}. * @param contentType the content type of the payload you are sending, for example application/json if sending a json payload. * @param responseHandler the response handler instance that should handle the response. */ public void put(Context context, String url, HttpEntity entity, String contentType, AsyncHttpResponseHandler responseHandler) { sendRequest(httpClient, httpContext, addEntityToRequestBase(new HttpPut(url), entity), contentType, responseHandler, context); } /** * Perform a HTTP PUT request and track the Android Context which initiated the request. * And set one-time headers for the request * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param headers set one-time headers for this request * @param entity a raw {@link HttpEntity} to send with the request, for example, use this to send string/json/xml payloads to a server by passing a {@link org.apache.http.entity.StringEntity}. * @param contentType the content type of the payload you are sending, for example application/json if sending a json payload. * @param responseHandler the response handler instance that should handle the response. */ public void put(Context context, String url,Header[] headers, HttpEntity entity, String contentType, AsyncHttpResponseHandler responseHandler) { HttpEntityEnclosingRequestBase request = addEntityToRequestBase(new HttpPut(url), entity); if(headers != null) request.setHeaders(headers); sendRequest(httpClient, httpContext, request, contentType, responseHandler, context); } // // HTTP DELETE Requests // /** * Perform a HTTP DELETE request. * @param url the URL to send the request to. * @param responseHandler the response handler instance that should handle the response. */ public void delete(String url, AsyncHttpResponseHandler responseHandler) { delete(null, url, responseHandler); } /** * Perform a HTTP DELETE request. * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param responseHandler the response handler instance that should handle the response. */ public void delete(Context context, String url, AsyncHttpResponseHandler responseHandler) { final HttpDelete delete = new HttpDelete(url); sendRequest(httpClient, httpContext, delete, null, responseHandler, context); } /** * Perform a HTTP DELETE request. * @param context the Android Context which initiated the request. * @param url the URL to send the request to. * @param headers set one-time headers for this request * @param responseHandler the response handler instance that should handle the response. */ public void delete(Context context, String url, Header[] headers, AsyncHttpResponseHandler responseHandler) { final HttpDelete delete = new HttpDelete(url); if(headers != null) delete.setHeaders(headers); sendRequest(httpClient, httpContext, delete, null, responseHandler, context); } // Private stuff protected void sendRequest(DefaultHttpClient client, HttpContext httpContext, HttpUriRequest uriRequest, String contentType, AsyncHttpResponseHandler responseHandler, Context context) { if(contentType != null) { uriRequest.addHeader("Content-Type", contentType); } Future<?> request = threadPool.submit(new AsyncHttpRequest(client, httpContext, uriRequest, responseHandler)); if(context != null) { // Add request to request map List<WeakReference<Future<?>>> requestList = requestMap.get(context); if(requestList == null) { requestList = new LinkedList<WeakReference<Future<?>>>(); requestMap.put(context, requestList); } requestList.add(new WeakReference<Future<?>>(request)); // TODO: Remove dead weakrefs from requestLists? } } public static String getUrlWithQueryString(String url, RequestParams params) { if(params != null) { String paramString = params.getParamString(); if (url.indexOf("?") == -1) { url += "?" + paramString; } else { url += "&" + paramString; } } return url; } private HttpEntity paramsToEntity(RequestParams params) { HttpEntity entity = null; if(params != null) { entity = params.getEntity(); } return entity; } private HttpEntityEnclosingRequestBase addEntityToRequestBase(HttpEntityEnclosingRequestBase requestBase, HttpEntity entity) { if(entity != null){ requestBase.setEntity(entity); } return requestBase; } private static class InflatingEntity extends HttpEntityWrapper { public InflatingEntity(HttpEntity wrapped) { super(wrapped); } @Override public InputStream getContent() throws IOException { return new GZIPInputStream(wrappedEntity.getContent()); } @Override public long getContentLength() { return -1; } } }
oyatsukai/openkit-android-beta
OpenKitSDK/src/io/openkit/asynchttp/AsyncHttpClient.java
Java
apache-2.0
27,033
package de.st_ddt.crazylogin.commands; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; import org.bukkit.Bukkit; import org.bukkit.OfflinePlayer; import org.bukkit.command.CommandSender; import de.st_ddt.crazylogin.CrazyLogin; import de.st_ddt.crazylogin.data.LoginPlayerData; import de.st_ddt.crazylogin.exceptions.PasswordRejectedException; import de.st_ddt.crazyplugin.exceptions.CrazyCommandAlreadyExistsException; import de.st_ddt.crazyplugin.exceptions.CrazyCommandCircumstanceException; import de.st_ddt.crazyplugin.exceptions.CrazyCommandErrorException; import de.st_ddt.crazyplugin.exceptions.CrazyCommandUsageException; import de.st_ddt.crazyplugin.exceptions.CrazyException; import de.st_ddt.crazyutil.ChatHelper; import de.st_ddt.crazyutil.ChatHelperExtended; import de.st_ddt.crazyutil.source.Localized; import de.st_ddt.crazyutil.source.Permission; public class CommandPlayerCreate extends CommandExecutor { public CommandPlayerCreate(final CrazyLogin plugin) { super(plugin); } @Override @Localized({ "CRAZYLOGIN.COMMAND.PLAYER.CREATE.SUCCESS $Name$", "CRAZYLOGIN.COMMAND.REGISTER.WARNCONFIRMPASSWORDDISABLED" }) public void command(final CommandSender sender, final String[] args) throws CrazyException { if (owner.getCrazyDatabase() == null) throw new CrazyCommandCircumstanceException("when database is accessible"); if (args.length < (owner.isConfirmNewPasswordEnabled() ? 3 : 2)) throw new CrazyCommandUsageException("<Player> <Passwort>" + (owner.isConfirmNewPasswordEnabled() ? " <Password>" : "")); final String name = args[0]; if (owner.hasPlayerData(name)) throw new CrazyCommandAlreadyExistsException("Account", name); final LoginPlayerData data = new LoginPlayerData(name); final String[] passwordArgs = ChatHelperExtended.shiftArray(args, 1); String password = null; if (owner.isConfirmNewPasswordEnabled()) { if (passwordArgs.length % 2 == 1) throw new CrazyCommandUsageException("<Player> <Password> <Password>"); password = ChatHelper.listingString(" ", ChatHelperExtended.cutArray(passwordArgs, passwordArgs.length / 2)); if (passwordArgs.length > 0) if (!password.equals(ChatHelper.listingString(" ", ChatHelperExtended.shiftArray(passwordArgs, passwordArgs.length / 2)))) throw new CrazyCommandUsageException("<Player> <Password> <Password>"); } else password = ChatHelper.listingString(" ", passwordArgs); try { data.setPassword(password); } catch (final PasswordRejectedException e) { throw new CrazyCommandErrorException(e); } catch (final Exception e) { throw new CrazyCommandErrorException(e); } // if (data.isOnline()) // owner.getMessageListener().sendPluginMessage(data.getPlayer(), "Q_StorePW " + password); owner.sendLocaleMessage("COMMAND.PLAYER.CREATE.SUCCESS", sender, name); owner.getCrazyDatabase().save(data); owner.getCrazyLogger().log("Account", data.getName() + " registered successfully (via " + sender.getName() + ")."); if (!owner.isConfirmNewPasswordEnabled()) if (passwordArgs.length % 2 == 0) if (ChatHelper.listingString(" ", ChatHelperExtended.cutArray(passwordArgs, passwordArgs.length / 2)).equals(ChatHelper.listingString(" ", ChatHelperExtended.shiftArray(passwordArgs, passwordArgs.length / 2)))) owner.sendLocaleMessage("COMMAND.REGISTER.WARNCONFIRMPASSWORDDISABLED", sender); } @Override public List<String> tab(final CommandSender sender, final String[] args) { if (args.length != 1) return null; final List<String> res = new ArrayList<String>(); final Pattern pattern = Pattern.compile(args[0], Pattern.CASE_INSENSITIVE); for (final OfflinePlayer player : Bukkit.getOfflinePlayers()) if (pattern.matcher(player.getName()).find()) if (!owner.hasPlayerData(player)) res.add(player.getName()); return res; } @Override @Permission("crazylogin.player.create") public boolean hasAccessPermission(final CommandSender sender) { return sender.hasPermission("crazylogin.player.create"); } }
ST-DDT/CrazyLogin
src/main/java/de/st_ddt/crazylogin/commands/CommandPlayerCreate.java
Java
apache-2.0
4,146
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package io.cloudsoft.win4j.client; import org.apache.cxf.Bus; import org.apache.cxf.BusFactory; import org.apache.cxf.transport.http.asyncclient.AsyncHTTPConduit; import org.apache.cxf.transport.http.asyncclient.AsyncHTTPConduitFactory; import org.apache.cxf.transport.http.asyncclient.AsyncHTTPConduitFactory.UseAsyncPolicy; public class WinRmClientContext { public static WinRmClientContext newInstance() { Bus bus = configureBus(BusFactory.newInstance().createBus()); return new WinRmClientContext(bus); } static Bus configureBus(Bus bus) { // Needed to be async to force the use of Apache HTTP Components client. // Details at http://cxf.apache.org/docs/asynchronous-client-http-transport.html. // Apache HTTP Components needed to support NTLM authentication. bus.getProperties().put(AsyncHTTPConduit.USE_ASYNC, Boolean.TRUE); bus.getProperties().put(AsyncHTTPConduitFactory.USE_POLICY, UseAsyncPolicy.ALWAYS); return bus; } private final Bus bus; private WinRmClientContext(Bus bus) { this.bus = bus; } public void shutdown() { bus.shutdown(true); } Bus getBus() { return bus; } }
TANGKUO/twoMan
src/main/java/io/cloudsoft/win4j/client/WinRmClientContext.java
Java
apache-2.0
2,092
import re import string import sys from pyspark import SparkContext exclude = set(string.punctuation) def get_hash_tag(word, rmPunc): pattern = re.compile("^#(.*)") m = pattern.match(word) tag = None if m: match = m.groups() for m_word in match: tag = ''.join(letter for letter in m_word if letter not in rmPunc) if tag is not None: return tag sc = SparkContext("local", "Finidng Hash Tags") rmPunc = sc.broadcast(exclude) mydata = sc.textFile("hdfs://<hostname>:<port>/path/to/parsedata<first job output>") wordsRDD = mydata.flatMap( lambda line : line.split("\t")[1].split(" ")) tagsRDD = wordsRDD.map( lambda word : get_hash_tag(word, rmPunc.value)) hashtagsRDD = tagsRDD.filter( lambda word : word is not None) hashtagsRDD.saveAsTextFile("hdfs://<hostname>:<port>/path/to/hashtags")
malli3131/SparkApps
Batch_sentiment/spark_hashtag.py
Python
apache-2.0
828
// notice_start /* * Copyright 2018 Keith Woods * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // notice_end import espReact from '../src/index'; import { RouterProvider, SmartComponent, ViewBinder, viewBinding, ModelSelector, shouldUpdateMixin } from '../src/index'; describe('index exports', () => { it('should export RouterProvider', () => { expect(espReact.RouterProvider).toBeDefined(); expect(RouterProvider).toBeDefined(); }); it('should export SmartComponent', () => { expect(espReact.SmartComponent).toBeDefined(); expect(SmartComponent).toBeDefined(); }); it('should export ViewBinder', () => { expect(espReact.ViewBinder).toBeDefined(); expect(ViewBinder).toBeDefined(); }); it('should export viewBinding', () => { expect(espReact.viewBinding).toBeDefined(); expect(viewBinding).toBeDefined(); }); it('should export ModelSelector', () => { expect(espReact.ModelSelector).toBeDefined(); expect(ModelSelector).toBeDefined(); }); it('should export shouldUpdateMixin', () => { expect(espReact.shouldUpdateMixin).toBeDefined(); expect(shouldUpdateMixin).toBeDefined(); }); });
esp/esp-js-react
tests/index.test.ts
TypeScript
apache-2.0
1,771
'use strict'; var util = require('../../../util/index'); var DimensionError = require('../../../error/DimensionError'); var string = util.string, isString = string.isString; function factory (type, config, load, typed) { var equalScalar = load(require('../../../function/relational/equalScalar')); var SparseMatrix = type.SparseMatrix; /** * Iterates over SparseMatrix A and invokes the callback function f(Aij, Bij). * Callback function invoked NZA times, number of nonzero elements in A. * * * ┌ f(Aij, Bij) ; A(i,j) !== 0 * C(i,j) = ┤ * └ 0 ; otherwise * * * @param {Matrix} a The SparseMatrix instance (A) * @param {Matrix} b The SparseMatrix instance (B) * @param {Function} callback The f(Aij,Bij) operation to invoke * * @return {Matrix} SparseMatrix (C) * * see https://github.com/josdejong/mathjs/pull/346#issuecomment-97620294 */ var algorithm09 = function (a, b, callback) { // sparse matrix arrays var avalues = a._values; var aindex = a._index; var aptr = a._ptr; var asize = a._size; var adt = a._datatype; // sparse matrix arrays var bvalues = b._values; var bindex = b._index; var bptr = b._ptr; var bsize = b._size; var bdt = b._datatype; // validate dimensions if (asize.length !== bsize.length) throw new DimensionError(asize.length, bsize.length); // check rows & columns if (asize[0] !== bsize[0] || asize[1] !== bsize[1]) throw new RangeError('Dimension mismatch. Matrix A (' + asize + ') must match Matrix B (' + bsize + ')'); // rows & columns var rows = asize[0]; var columns = asize[1]; // datatype var dt; // equal signature to use var eq = equalScalar; // zero value var zero = 0; // callback signature to use var cf = callback; // process data types if (adt && bdt && adt === bdt && isString(adt)) { // datatype dt = adt; // find signature that matches (dt, dt) eq = typed.find(equalScalar, [dt, dt]); // convert 0 to the same datatype zero = typed.convert(0, dt); // callback cf = typed.find(callback, [dt, dt]); } // result arrays var cvalues = avalues && bvalues ? [] : undefined; var cindex = []; var cptr = []; // matrix var c = new SparseMatrix({ values: cvalues, index: cindex, ptr: cptr, size: [rows, columns], datatype: dt }); // workspaces var x = cvalues ? [] : undefined; // marks indicating we have a value in x for a given column var w = []; // vars var i, j, k, k0, k1; // loop columns for (j = 0; j < columns; j++) { // update cptr cptr[j] = cindex.length; // column mark var mark = j + 1; // check we need to process values if (x) { // loop B(:,j) for (k0 = bptr[j], k1 = bptr[j + 1], k = k0; k < k1; k++) { // row i = bindex[k]; // update workspace w[i] = mark; x[i] = bvalues[k]; } } // loop A(:,j) for (k0 = aptr[j], k1 = aptr[j + 1], k = k0; k < k1; k++) { // row i = aindex[k]; // check we need to process values if (x) { // b value @ i,j var vb = w[i] === mark ? x[i] : zero; // invoke f var vc = cf(avalues[k], vb); // check zero value if (!eq(vc, zero)) { // push index cindex.push(i); // push value cvalues.push(vc); } } else { // push index cindex.push(i); } } } // update cptr cptr[columns] = cindex.length; // return sparse matrix return c; }; return algorithm09; } exports.name = 'algorithm09'; exports.factory = factory;
mikberg/mathjs
lib/type/matrix/util/algorithm09.js
JavaScript
apache-2.0
3,994
// Generated by the protocol buffer compiler. DO NOT EDIT! // source: ObserverStatisticsService.proto package coprocessor.generated; public final class ObserverStatisticsProtos { private ObserverStatisticsProtos() {} public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { } public interface NameInt32PairOrBuilder extends // @@protoc_insertion_point(interface_extends:NameInt32Pair) com.google.protobuf.MessageOrBuilder { /** * <code>optional string name = 1;</code> */ boolean hasName(); /** * <code>optional string name = 1;</code> */ java.lang.String getName(); /** * <code>optional string name = 1;</code> */ com.google.protobuf.ByteString getNameBytes(); /** * <code>optional int32 value = 2;</code> */ boolean hasValue(); /** * <code>optional int32 value = 2;</code> */ int getValue(); } /** * Protobuf type {@code NameInt32Pair} */ public static final class NameInt32Pair extends com.google.protobuf.GeneratedMessage implements // @@protoc_insertion_point(message_implements:NameInt32Pair) NameInt32PairOrBuilder { // Use NameInt32Pair.newBuilder() to construct. private NameInt32Pair(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private NameInt32Pair(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final NameInt32Pair defaultInstance; public static NameInt32Pair getDefaultInstance() { return defaultInstance; } public NameInt32Pair getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private NameInt32Pair( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; name_ = bs; break; } case 16: { bitField0_ |= 0x00000002; value_ = input.readInt32(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_NameInt32Pair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_NameInt32Pair_fieldAccessorTable .ensureFieldAccessorsInitialized( coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.class, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder.class); } public static com.google.protobuf.Parser<NameInt32Pair> PARSER = new com.google.protobuf.AbstractParser<NameInt32Pair>() { public NameInt32Pair parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new NameInt32Pair(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<NameInt32Pair> getParserForType() { return PARSER; } private int bitField0_; public static final int NAME_FIELD_NUMBER = 1; private java.lang.Object name_; /** * <code>optional string name = 1;</code> */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>optional string name = 1;</code> */ public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } } /** * <code>optional string name = 1;</code> */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int VALUE_FIELD_NUMBER = 2; private int value_; /** * <code>optional int32 value = 2;</code> */ public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>optional int32 value = 2;</code> */ public int getValue() { return value_; } private void initFields() { name_ = ""; value_ = 0; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt32(2, value_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, getNameBytes()); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(2, value_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair)) { return super.equals(obj); } coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair other = (coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair) obj; boolean result = true; result = result && (hasName() == other.hasName()); if (hasName()) { result = result && getName() .equals(other.getName()); } result = result && (hasValue() == other.hasValue()); if (hasValue()) { result = result && (getValue() == other.getValue()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasName()) { hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); } if (hasValue()) { hash = (37 * hash) + VALUE_FIELD_NUMBER; hash = (53 * hash) + getValue(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code NameInt32Pair} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:NameInt32Pair) coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_NameInt32Pair_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_NameInt32Pair_fieldAccessorTable .ensureFieldAccessorsInitialized( coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.class, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder.class); } // Construct using coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); name_ = ""; bitField0_ = (bitField0_ & ~0x00000001); value_ = 0; bitField0_ = (bitField0_ & ~0x00000002); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_NameInt32Pair_descriptor; } public coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair getDefaultInstanceForType() { return coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.getDefaultInstance(); } public coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair build() { coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair buildPartial() { coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair result = new coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.name_ = name_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair) { return mergeFrom((coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair other) { if (other == coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.getDefaultInstance()) return this; if (other.hasName()) { bitField0_ |= 0x00000001; name_ = other.name_; onChanged(); } if (other.hasValue()) { setValue(other.getValue()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object name_ = ""; /** * <code>optional string name = 1;</code> */ public boolean hasName() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>optional string name = 1;</code> */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); if (bs.isValidUtf8()) { name_ = s; } return s; } else { return (java.lang.String) ref; } } /** * <code>optional string name = 1;</code> */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * <code>optional string name = 1;</code> */ public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } /** * <code>optional string name = 1;</code> */ public Builder clearName() { bitField0_ = (bitField0_ & ~0x00000001); name_ = getDefaultInstance().getName(); onChanged(); return this; } /** * <code>optional string name = 1;</code> */ public Builder setNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; name_ = value; onChanged(); return this; } private int value_ ; /** * <code>optional int32 value = 2;</code> */ public boolean hasValue() { return ((bitField0_ & 0x00000002) == 0x00000002); } /** * <code>optional int32 value = 2;</code> */ public int getValue() { return value_; } /** * <code>optional int32 value = 2;</code> */ public Builder setValue(int value) { bitField0_ |= 0x00000002; value_ = value; onChanged(); return this; } /** * <code>optional int32 value = 2;</code> */ public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); value_ = 0; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:NameInt32Pair) } static { defaultInstance = new NameInt32Pair(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:NameInt32Pair) } public interface StatisticsRequestOrBuilder extends // @@protoc_insertion_point(interface_extends:StatisticsRequest) com.google.protobuf.MessageOrBuilder { /** * <code>optional bool clear = 1 [default = false];</code> */ boolean hasClear(); /** * <code>optional bool clear = 1 [default = false];</code> */ boolean getClear(); } /** * Protobuf type {@code StatisticsRequest} */ public static final class StatisticsRequest extends com.google.protobuf.GeneratedMessage implements // @@protoc_insertion_point(message_implements:StatisticsRequest) StatisticsRequestOrBuilder { // Use StatisticsRequest.newBuilder() to construct. private StatisticsRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StatisticsRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StatisticsRequest defaultInstance; public static StatisticsRequest getDefaultInstance() { return defaultInstance; } public StatisticsRequest getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StatisticsRequest( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 8: { bitField0_ |= 0x00000001; clear_ = input.readBool(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest.class, coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest.Builder.class); } public static com.google.protobuf.Parser<StatisticsRequest> PARSER = new com.google.protobuf.AbstractParser<StatisticsRequest>() { public StatisticsRequest parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StatisticsRequest(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<StatisticsRequest> getParserForType() { return PARSER; } private int bitField0_; public static final int CLEAR_FIELD_NUMBER = 1; private boolean clear_; /** * <code>optional bool clear = 1 [default = false];</code> */ public boolean hasClear() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>optional bool clear = 1 [default = false];</code> */ public boolean getClear() { return clear_; } private void initFields() { clear_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, clear_); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(1, clear_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest)) { return super.equals(obj); } coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest other = (coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest) obj; boolean result = true; result = result && (hasClear() == other.hasClear()); if (hasClear()) { result = result && (getClear() == other.getClear()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasClear()) { hash = (37 * hash) + CLEAR_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getClear()); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code StatisticsRequest} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:StatisticsRequest) coprocessor.generated.ObserverStatisticsProtos.StatisticsRequestOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsRequest_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsRequest_fieldAccessorTable .ensureFieldAccessorsInitialized( coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest.class, coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest.Builder.class); } // Construct using coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); clear_ = false; bitField0_ = (bitField0_ & ~0x00000001); return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsRequest_descriptor; } public coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest getDefaultInstanceForType() { return coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest.getDefaultInstance(); } public coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest build() { coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest buildPartial() { coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest result = new coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.clear_ = clear_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest) { return mergeFrom((coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest other) { if (other == coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest.getDefaultInstance()) return this; if (other.hasClear()) { setClear(other.getClear()); } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private boolean clear_ ; /** * <code>optional bool clear = 1 [default = false];</code> */ public boolean hasClear() { return ((bitField0_ & 0x00000001) == 0x00000001); } /** * <code>optional bool clear = 1 [default = false];</code> */ public boolean getClear() { return clear_; } /** * <code>optional bool clear = 1 [default = false];</code> */ public Builder setClear(boolean value) { bitField0_ |= 0x00000001; clear_ = value; onChanged(); return this; } /** * <code>optional bool clear = 1 [default = false];</code> */ public Builder clearClear() { bitField0_ = (bitField0_ & ~0x00000001); clear_ = false; onChanged(); return this; } // @@protoc_insertion_point(builder_scope:StatisticsRequest) } static { defaultInstance = new StatisticsRequest(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:StatisticsRequest) } public interface StatisticsResponseOrBuilder extends // @@protoc_insertion_point(interface_extends:StatisticsResponse) com.google.protobuf.MessageOrBuilder { /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ java.util.List<coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair> getAttributeList(); /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair getAttribute(int index); /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ int getAttributeCount(); /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ java.util.List<? extends coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder> getAttributeOrBuilderList(); /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder getAttributeOrBuilder( int index); } /** * Protobuf type {@code StatisticsResponse} */ public static final class StatisticsResponse extends com.google.protobuf.GeneratedMessage implements // @@protoc_insertion_point(message_implements:StatisticsResponse) StatisticsResponseOrBuilder { // Use StatisticsResponse.newBuilder() to construct. private StatisticsResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } private StatisticsResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private static final StatisticsResponse defaultInstance; public static StatisticsResponse getDefaultInstance() { return defaultInstance; } public StatisticsResponse getDefaultInstanceForType() { return defaultInstance; } private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private StatisticsResponse( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } case 10: { if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { attribute_ = new java.util.ArrayList<coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair>(); mutable_bitField0_ |= 0x00000001; } attribute_.add(input.readMessage(coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { attribute_ = java.util.Collections.unmodifiableList(attribute_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.class, coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.Builder.class); } public static com.google.protobuf.Parser<StatisticsResponse> PARSER = new com.google.protobuf.AbstractParser<StatisticsResponse>() { public StatisticsResponse parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new StatisticsResponse(input, extensionRegistry); } }; @java.lang.Override public com.google.protobuf.Parser<StatisticsResponse> getParserForType() { return PARSER; } public static final int ATTRIBUTE_FIELD_NUMBER = 1; private java.util.List<coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair> attribute_; /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public java.util.List<coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair> getAttributeList() { return attribute_; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public java.util.List<? extends coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder> getAttributeOrBuilderList() { return attribute_; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public int getAttributeCount() { return attribute_.size(); } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair getAttribute(int index) { return attribute_.get(index); } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder getAttributeOrBuilder( int index) { return attribute_.get(index); } private void initFields() { attribute_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); for (int i = 0; i < attribute_.size(); i++) { output.writeMessage(1, attribute_.get(i)); } getUnknownFields().writeTo(output); } private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; for (int i = 0; i < attribute_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, attribute_.get(i)); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse)) { return super.equals(obj); } coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse other = (coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse) obj; boolean result = true; result = result && getAttributeList() .equals(other.getAttributeList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (getAttributeCount() > 0) { hash = (37 * hash) + ATTRIBUTE_FIELD_NUMBER; hash = (53 * hash) + getAttributeList().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } public static coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code StatisticsResponse} */ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:StatisticsResponse) coprocessor.generated.ObserverStatisticsProtos.StatisticsResponseOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsResponse_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsResponse_fieldAccessorTable .ensureFieldAccessorsInitialized( coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.class, coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.Builder.class); } // Construct using coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getAttributeFieldBuilder(); } } private static Builder create() { return new Builder(); } public Builder clear() { super.clear(); if (attributeBuilder_ == null) { attribute_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); } else { attributeBuilder_.clear(); } return this; } public Builder clone() { return create().mergeFrom(buildPartial()); } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return coprocessor.generated.ObserverStatisticsProtos.internal_static_StatisticsResponse_descriptor; } public coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse getDefaultInstanceForType() { return coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.getDefaultInstance(); } public coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse build() { coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse buildPartial() { coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse result = new coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse(this); int from_bitField0_ = bitField0_; if (attributeBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001)) { attribute_ = java.util.Collections.unmodifiableList(attribute_); bitField0_ = (bitField0_ & ~0x00000001); } result.attribute_ = attribute_; } else { result.attribute_ = attributeBuilder_.build(); } onBuilt(); return result; } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse) { return mergeFrom((coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse)other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse other) { if (other == coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.getDefaultInstance()) return this; if (attributeBuilder_ == null) { if (!other.attribute_.isEmpty()) { if (attribute_.isEmpty()) { attribute_ = other.attribute_; bitField0_ = (bitField0_ & ~0x00000001); } else { ensureAttributeIsMutable(); attribute_.addAll(other.attribute_); } onChanged(); } } else { if (!other.attribute_.isEmpty()) { if (attributeBuilder_.isEmpty()) { attributeBuilder_.dispose(); attributeBuilder_ = null; attribute_ = other.attribute_; bitField0_ = (bitField0_ & ~0x00000001); attributeBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getAttributeFieldBuilder() : null; } else { attributeBuilder_.addAllMessages(other.attribute_); } } } this.mergeUnknownFields(other.getUnknownFields()); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.util.List<coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair> attribute_ = java.util.Collections.emptyList(); private void ensureAttributeIsMutable() { if (!((bitField0_ & 0x00000001) == 0x00000001)) { attribute_ = new java.util.ArrayList<coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair>(attribute_); bitField0_ |= 0x00000001; } } private com.google.protobuf.RepeatedFieldBuilder< coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder, coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder> attributeBuilder_; /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public java.util.List<coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair> getAttributeList() { if (attributeBuilder_ == null) { return java.util.Collections.unmodifiableList(attribute_); } else { return attributeBuilder_.getMessageList(); } } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public int getAttributeCount() { if (attributeBuilder_ == null) { return attribute_.size(); } else { return attributeBuilder_.getCount(); } } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair getAttribute(int index) { if (attributeBuilder_ == null) { return attribute_.get(index); } else { return attributeBuilder_.getMessage(index); } } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public Builder setAttribute( int index, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair value) { if (attributeBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureAttributeIsMutable(); attribute_.set(index, value); onChanged(); } else { attributeBuilder_.setMessage(index, value); } return this; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public Builder setAttribute( int index, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder builderForValue) { if (attributeBuilder_ == null) { ensureAttributeIsMutable(); attribute_.set(index, builderForValue.build()); onChanged(); } else { attributeBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public Builder addAttribute(coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair value) { if (attributeBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureAttributeIsMutable(); attribute_.add(value); onChanged(); } else { attributeBuilder_.addMessage(value); } return this; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public Builder addAttribute( int index, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair value) { if (attributeBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureAttributeIsMutable(); attribute_.add(index, value); onChanged(); } else { attributeBuilder_.addMessage(index, value); } return this; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public Builder addAttribute( coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder builderForValue) { if (attributeBuilder_ == null) { ensureAttributeIsMutable(); attribute_.add(builderForValue.build()); onChanged(); } else { attributeBuilder_.addMessage(builderForValue.build()); } return this; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public Builder addAttribute( int index, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder builderForValue) { if (attributeBuilder_ == null) { ensureAttributeIsMutable(); attribute_.add(index, builderForValue.build()); onChanged(); } else { attributeBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public Builder addAllAttribute( java.lang.Iterable<? extends coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair> values) { if (attributeBuilder_ == null) { ensureAttributeIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, attribute_); onChanged(); } else { attributeBuilder_.addAllMessages(values); } return this; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public Builder clearAttribute() { if (attributeBuilder_ == null) { attribute_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000001); onChanged(); } else { attributeBuilder_.clear(); } return this; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public Builder removeAttribute(int index) { if (attributeBuilder_ == null) { ensureAttributeIsMutable(); attribute_.remove(index); onChanged(); } else { attributeBuilder_.remove(index); } return this; } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder getAttributeBuilder( int index) { return getAttributeFieldBuilder().getBuilder(index); } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder getAttributeOrBuilder( int index) { if (attributeBuilder_ == null) { return attribute_.get(index); } else { return attributeBuilder_.getMessageOrBuilder(index); } } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public java.util.List<? extends coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder> getAttributeOrBuilderList() { if (attributeBuilder_ != null) { return attributeBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(attribute_); } } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder addAttributeBuilder() { return getAttributeFieldBuilder().addBuilder( coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.getDefaultInstance()); } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder addAttributeBuilder( int index) { return getAttributeFieldBuilder().addBuilder( index, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.getDefaultInstance()); } /** * <code>repeated .NameInt32Pair attribute = 1;</code> */ public java.util.List<coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder> getAttributeBuilderList() { return getAttributeFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder, coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder> getAttributeFieldBuilder() { if (attributeBuilder_ == null) { attributeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair, coprocessor.generated.ObserverStatisticsProtos.NameInt32Pair.Builder, coprocessor.generated.ObserverStatisticsProtos.NameInt32PairOrBuilder>( attribute_, ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); attribute_ = null; } return attributeBuilder_; } // @@protoc_insertion_point(builder_scope:StatisticsResponse) } static { defaultInstance = new StatisticsResponse(true); defaultInstance.initFields(); } // @@protoc_insertion_point(class_scope:StatisticsResponse) } /** * Protobuf service {@code ObserverStatisticsService} */ public static abstract class ObserverStatisticsService implements com.google.protobuf.Service { protected ObserverStatisticsService() {} public interface Interface { /** * <code>rpc getStatistics(.StatisticsRequest) returns (.StatisticsResponse);</code> */ public abstract void getStatistics( com.google.protobuf.RpcController controller, coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest request, com.google.protobuf.RpcCallback<coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse> done); } public static com.google.protobuf.Service newReflectiveService( final Interface impl) { return new ObserverStatisticsService() { @java.lang.Override public void getStatistics( com.google.protobuf.RpcController controller, coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest request, com.google.protobuf.RpcCallback<coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse> done) { impl.getStatistics(controller, request, done); } }; } public static com.google.protobuf.BlockingService newReflectiveBlockingService(final BlockingInterface impl) { return new com.google.protobuf.BlockingService() { public final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final com.google.protobuf.Message callBlockingMethod( com.google.protobuf.Descriptors.MethodDescriptor method, com.google.protobuf.RpcController controller, com.google.protobuf.Message request) throws com.google.protobuf.ServiceException { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callBlockingMethod() given method descriptor for " + "wrong service type."); } switch(method.getIndex()) { case 0: return impl.getStatistics(controller, (coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getResponsePrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } }; } /** * <code>rpc getStatistics(.StatisticsRequest) returns (.StatisticsResponse);</code> */ public abstract void getStatistics( com.google.protobuf.RpcController controller, coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest request, com.google.protobuf.RpcCallback<coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse> done); public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { return coprocessor.generated.ObserverStatisticsProtos.getDescriptor().getServices().get(0); } public final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptorForType() { return getDescriptor(); } public final void callMethod( com.google.protobuf.Descriptors.MethodDescriptor method, com.google.protobuf.RpcController controller, com.google.protobuf.Message request, com.google.protobuf.RpcCallback< com.google.protobuf.Message> done) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.callMethod() given method descriptor for wrong " + "service type."); } switch(method.getIndex()) { case 0: this.getStatistics(controller, (coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest)request, com.google.protobuf.RpcUtil.<coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse>specializeCallback( done)); return; default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public final com.google.protobuf.Message getResponsePrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getResponsePrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } } public static Stub newStub( com.google.protobuf.RpcChannel channel) { return new Stub(channel); } public static final class Stub extends coprocessor.generated.ObserverStatisticsProtos.ObserverStatisticsService implements Interface { private Stub(com.google.protobuf.RpcChannel channel) { this.channel = channel; } private final com.google.protobuf.RpcChannel channel; public com.google.protobuf.RpcChannel getChannel() { return channel; } public void getStatistics( com.google.protobuf.RpcController controller, coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest request, com.google.protobuf.RpcCallback<coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse> done) { channel.callMethod( getDescriptor().getMethods().get(0), controller, request, coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.getDefaultInstance(), com.google.protobuf.RpcUtil.generalizeCallback( done, coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.class, coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.getDefaultInstance())); } } public static BlockingInterface newBlockingStub( com.google.protobuf.BlockingRpcChannel channel) { return new BlockingStub(channel); } public interface BlockingInterface { public coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse getStatistics( com.google.protobuf.RpcController controller, coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest request) throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { this.channel = channel; } private final com.google.protobuf.BlockingRpcChannel channel; public coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse getStatistics( com.google.protobuf.RpcController controller, coprocessor.generated.ObserverStatisticsProtos.StatisticsRequest request) throws com.google.protobuf.ServiceException { return (coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse) channel.callBlockingMethod( getDescriptor().getMethods().get(0), controller, request, coprocessor.generated.ObserverStatisticsProtos.StatisticsResponse.getDefaultInstance()); } } // @@protoc_insertion_point(class_scope:ObserverStatisticsService) } private static final com.google.protobuf.Descriptors.Descriptor internal_static_NameInt32Pair_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_NameInt32Pair_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor internal_static_StatisticsRequest_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_StatisticsRequest_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor internal_static_StatisticsResponse_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_StatisticsResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\037ObserverStatisticsService.proto\",\n\rNam" + "eInt32Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 \001(\005\"" + ")\n\021StatisticsRequest\022\024\n\005clear\030\001 \001(\010:\005fal" + "se\"7\n\022StatisticsResponse\022!\n\tattribute\030\001 " + "\003(\0132\016.NameInt32Pair2U\n\031ObserverStatistic" + "sService\0228\n\rgetStatistics\022\022.StatisticsRe" + "quest\032\023.StatisticsResponseB9\n\025coprocesso" + "r.generatedB\030ObserverStatisticsProtosH\001\210" + "\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; return null; } }; com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { }, assigner); internal_static_NameInt32Pair_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_NameInt32Pair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameInt32Pair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_StatisticsRequest_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_StatisticsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StatisticsRequest_descriptor, new java.lang.String[] { "Clear", }); internal_static_StatisticsResponse_descriptor = getDescriptor().getMessageTypes().get(2); internal_static_StatisticsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StatisticsResponse_descriptor, new java.lang.String[] { "Attribute", }); } // @@protoc_insertion_point(outer_class_scope) }
larsgeorge/hbase-book
ch04/src/main/java/coprocessor/generated/ObserverStatisticsProtos.java
Java
apache-2.0
77,188
/* * Copyright (c) 2014-2018 University of Ulm * * See the NOTICE file distributed with this work for additional information * regarding copyright ownership. Licensed under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package de.uniulm.omi.cloudiator.sword.drivers.google.config; import com.google.inject.Injector; import de.uniulm.omi.cloudiator.sword.domain.TemplateOptions; import de.uniulm.omi.cloudiator.sword.drivers.google.GoogleJCloudsViewFactory; import de.uniulm.omi.cloudiator.sword.drivers.google.converters.TemplateOptionsToGoogleTemplateOptions; import de.uniulm.omi.cloudiator.sword.drivers.jclouds.JCloudsViewFactory; import de.uniulm.omi.cloudiator.sword.drivers.jclouds.config.JCloudsComputeModule; import de.uniulm.omi.cloudiator.util.OneWayConverter; /** * Created by daniel on 12.01.16. */ public class GoogleCloudComputeModule extends JCloudsComputeModule { @Override protected JCloudsViewFactory overrideJCloudsViewFactory(Injector injector, JCloudsViewFactory originalFactory) { return injector.getInstance(GoogleJCloudsViewFactory.class); } @Override protected Class<? extends OneWayConverter<TemplateOptions, org.jclouds.compute.options.TemplateOptions>> templateOptionsConverter() { return TemplateOptionsToGoogleTemplateOptions.class; } }
cloudiator/sword
drivers/google/src/main/java/de/uniulm/omi/cloudiator/sword/drivers/google/config/GoogleCloudComputeModule.java
Java
apache-2.0
1,805
var singleQuote = require('..'); describe('singlequote with \\r', function () { var result; before(function () { function x() { return 'hello" I am a string\'s for sure'; } result = singleQuote(x.toString().replace(/\r/g, '').replace(/\n/g, '\r\n')); //make sure we have \r on both mac an win }); it('should return code with single quotes string', function () { function x() { return 'hello" I am a string\'s for sure'; } expect(result, result).to.equal(x.toString().replace(/\r/g, '').replace(/\n/g, '\r\n')); }); }); describe('singlequote without \\r', function () { var result; before(function () { function x() { return 'hello" I am a string\'s for sure'; } result = singleQuote(x.toString().replace(/\r/g, '')); }); it('should return code with single quotes string', function () { function x() { return 'hello" I am a string\'s for sure'; } expect(result, result).to.equal(x.toString().replace(/\r/g, '')); }); }); describe('singlequote already single quoted string', function () { var result; before(function () { function x() { return 'when customer payment term is "dueDate"'; } result = singleQuote(x.toString()); }); it('should return code with single quotes string', function () { function x() { return 'when customer payment term is "dueDate"'; } expect(result, result).to.equal(x.toString()); }); }); describe('singlequote with tab in string', function () { var code, result; before(function () { code = 'var x="\t"'; result = singleQuote(code); }); it('should return code with tab in string', function () { expect(result, result).to.equal('var x=\'\t\''); }); }); describe('singlequote with \\t in string', function () { var code, result; before(function () { code = 'var x="\\t"'; result = singleQuote(code); }); it('should return code with \\t in string', function () { expect(result, result).to.equal('var x=\'\\t\''); }); }); describe('singlequote with #!/usr/bin/env node', function () { var result; before(function () { function x() { return 'hello" I am a string\'s for sure'; } result = singleQuote('#!/usr/bin/env node\n' + x.toString().replace(/\r/g, '')); }); it('should return code with single quotes string', function () { function x() { return 'hello" I am a string\'s for sure'; } expect(result, result).to.equal('#!/usr/bin/env node\n' + x.toString().replace(/\r/g, '')); }); }); describe('singlequote code with UTF-8 Byte Order Mark and #!/usr/bin/env node', function () { var result; before(function () { function x() { return 'hello" I am a string\'s for sure'; } result = singleQuote('\ufeff#!/usr/bin/env node\n' + x.toString().replace(/\r/g, '')); }); it('should return code with single quotes string', function () { function x() { return 'hello" I am a string\'s for sure'; } expect(result, result).to.equal('\ufeff#!/usr/bin/env node\n' + x.toString().replace(/\r/g, '')); }); }); describe('singlequote code with return-statment in main cod (allowed in node.js)', function () { var code, result; before(function () { code = 'var x=3;\nreturn;'; result = singleQuote(code); }); it('should return code with \\t in string', function () { expect(result, result).to.equal(code); }); });
ebdrup/singlequote
test/basic.spec.js
JavaScript
apache-2.0
3,283
/* Generated by camel build tools - do NOT edit this file! */ package org.apache.camel.component.openstack.keystone; import java.util.Map; import org.apache.camel.CamelContext; import org.apache.camel.spi.ConfigurerStrategy; import org.apache.camel.spi.GeneratedPropertyConfigurer; import org.apache.camel.spi.PropertyConfigurerGetter; import org.apache.camel.util.CaseInsensitiveMap; import org.apache.camel.support.component.PropertyConfigurerSupport; /** * Generated by camel build tools - do NOT edit this file! */ @SuppressWarnings("unchecked") public class KeystoneComponentConfigurer extends PropertyConfigurerSupport implements GeneratedPropertyConfigurer, PropertyConfigurerGetter { private static final Map<String, Object> ALL_OPTIONS; static { Map<String, Object> map = new CaseInsensitiveMap(); map.put("lazyStartProducer", boolean.class); map.put("basicPropertyBinding", boolean.class); ALL_OPTIONS = map; ConfigurerStrategy.addConfigurerClearer(KeystoneComponentConfigurer::clearConfigurers); } @Override public boolean configure(CamelContext camelContext, Object obj, String name, Object value, boolean ignoreCase) { KeystoneComponent target = (KeystoneComponent) obj; switch (ignoreCase ? name.toLowerCase() : name) { case "basicpropertybinding": case "basicPropertyBinding": target.setBasicPropertyBinding(property(camelContext, boolean.class, value)); return true; case "lazystartproducer": case "lazyStartProducer": target.setLazyStartProducer(property(camelContext, boolean.class, value)); return true; default: return false; } } @Override public Map<String, Object> getAllOptions(Object target) { return ALL_OPTIONS; } public static void clearBootstrapConfigurers() { } public static void clearConfigurers() { ALL_OPTIONS.clear(); } @Override public Object getOptionValue(Object obj, String name, boolean ignoreCase) { KeystoneComponent target = (KeystoneComponent) obj; switch (ignoreCase ? name.toLowerCase() : name) { case "basicpropertybinding": case "basicPropertyBinding": return target.isBasicPropertyBinding(); case "lazystartproducer": case "lazyStartProducer": return target.isLazyStartProducer(); default: return null; } } }
mcollovati/camel
components/camel-openstack/src/generated/java/org/apache/camel/component/openstack/keystone/KeystoneComponentConfigurer.java
Java
apache-2.0
2,417
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #include <aws/inspector2/model/ListMembersRequest.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Inspector2::Model; using namespace Aws::Utils::Json; using namespace Aws::Utils; ListMembersRequest::ListMembersRequest() : m_maxResults(0), m_maxResultsHasBeenSet(false), m_nextTokenHasBeenSet(false), m_onlyAssociated(false), m_onlyAssociatedHasBeenSet(false) { } Aws::String ListMembersRequest::SerializePayload() const { JsonValue payload; if(m_maxResultsHasBeenSet) { payload.WithInteger("maxResults", m_maxResults); } if(m_nextTokenHasBeenSet) { payload.WithString("nextToken", m_nextToken); } if(m_onlyAssociatedHasBeenSet) { payload.WithBool("onlyAssociated", m_onlyAssociated); } return payload.View().WriteReadable(); }
aws/aws-sdk-cpp
aws-cpp-sdk-inspector2/source/model/ListMembersRequest.cpp
C++
apache-2.0
959
/**************************************************************************** * Copyright 2010 kraigs.android@gmail.com * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ****************************************************************************/ package io.github.carlorodriguez.alarmon; import java.util.TreeMap; import android.app.AlarmManager; import android.app.PendingIntent; import android.content.Context; import android.content.Intent; import android.os.Build; /** * This container holds a list of all currently scheduled alarms. * Adding/removing alarms to this container schedules/unschedules PendingIntents * with the android AlarmManager service. */ public final class PendingAlarmList { // Maps alarmId -> alarm. private TreeMap<Long, PendingAlarm> pendingAlarms; // Maps alarm time -> alarmId. private TreeMap<AlarmTime, Long> alarmTimes; private AlarmManager alarmManager; private Context context; public PendingAlarmList(Context context) { pendingAlarms = new TreeMap<>(); alarmTimes = new TreeMap<>(); alarmManager = (AlarmManager) context.getSystemService(Context.ALARM_SERVICE); this.context = context; } public int size() { if (pendingAlarms.size() != alarmTimes.size()) { throw new IllegalStateException("Inconsistent pending alarms: " + pendingAlarms.size() + " vs " + alarmTimes.size()); } return pendingAlarms.size(); } public void put(long alarmId, AlarmTime time) { // Remove this alarm if it exists already. remove(alarmId); // Intents are considered equal if they have the same action, data, type, // class, and categories. In order to schedule multiple alarms, every // pending intent must be different. This means that we must encode // the alarm id in the data section of the intent rather than in // the extras bundle. Intent notifyIntent = new Intent(context, ReceiverAlarm.class); notifyIntent.setData(AlarmUtil.alarmIdToUri(alarmId)); PendingIntent scheduleIntent = PendingIntent.getBroadcast(context, 0, notifyIntent, 0); // Schedule the alarm with the AlarmManager. // Previous instances of this intent will be overwritten in // the alarm manager. if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) { Intent intent = new Intent(context, ActivityAlarmClock.class); PendingIntent showIntent = PendingIntent.getActivity(context, 0, intent, PendingIntent.FLAG_UPDATE_CURRENT); AlarmManager.AlarmClockInfo alarmClockInfo = new AlarmManager. AlarmClockInfo(time.calendar().getTimeInMillis(), showIntent ); alarmManager.setAlarmClock(alarmClockInfo, scheduleIntent); } else if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) { alarmManager.setExact(AlarmManager.RTC_WAKEUP, time.calendar().getTimeInMillis(), scheduleIntent); } else { alarmManager.set(AlarmManager.RTC_WAKEUP, time.calendar().getTimeInMillis(), scheduleIntent); } // Keep track of all scheduled alarms. pendingAlarms.put(alarmId, new PendingAlarm(time, scheduleIntent)); alarmTimes.put(time, alarmId); if (pendingAlarms.size() != alarmTimes.size()) { throw new IllegalStateException("Inconsistent pending alarms: " + pendingAlarms.size() + " vs " + alarmTimes.size()); } } public boolean remove(long alarmId) { PendingAlarm alarm = pendingAlarms.remove(alarmId); if (alarm == null) { return false; } Long expectedAlarmId = alarmTimes.remove(alarm.time()); alarmManager.cancel(alarm.pendingIntent()); alarm.pendingIntent().cancel(); if (expectedAlarmId != alarmId) { throw new IllegalStateException("Internal inconsistency in PendingAlarmList"); } if (pendingAlarms.size() != alarmTimes.size()) { throw new IllegalStateException("Inconsistent pending alarms: " + pendingAlarms.size() + " vs " + alarmTimes.size()); } return true; } public AlarmTime nextAlarmTime() { if (alarmTimes.size() == 0) { return null; } return alarmTimes.firstKey(); } public long nextAlarmId() { if (alarmTimes.size() == 0) { return AlarmClockServiceBinder.NO_ALARM_ID; } return alarmTimes.get(alarmTimes.firstKey()); } public AlarmTime pendingTime(long alarmId) { PendingAlarm alarm = pendingAlarms.get(alarmId); return alarm == null ? null : alarm.time(); } public AlarmTime[] pendingTimes() { AlarmTime[] times = new AlarmTime[alarmTimes.size()]; alarmTimes.keySet().toArray(times); return times; } public Long[] pendingAlarms() { Long[] alarmIds = new Long[pendingAlarms.size()]; pendingAlarms.keySet().toArray(alarmIds); return alarmIds; } private class PendingAlarm { private AlarmTime time; private PendingIntent pendingIntent; PendingAlarm(AlarmTime time, PendingIntent pendingIntent) { this.time = time; this.pendingIntent = pendingIntent; } public AlarmTime time() { return time; } public PendingIntent pendingIntent() { return pendingIntent; } } }
CarloRodriguez/AlarmOn
app/src/main/java/io/github/carlorodriguez/alarmon/PendingAlarmList.java
Java
apache-2.0
5,784
package org.pac4j.oauth.run; import org.pac4j.core.client.IndirectClient; import org.pac4j.core.profile.CommonProfile; import org.pac4j.core.run.RunClient; import org.pac4j.oauth.client.HiOrgServerClient; import org.pac4j.oauth.profile.hiorgserver.HiOrgServerProfile; import static org.junit.Assert.*; /** * Run manually a test for the {@link HiOrgServerClient}. * * @author Martin Böhmer * @since 3.1.1 */ public class RunHiOrgServerClient extends RunClient { public static void main(String[] args) { new RunHiOrgServerClient().run(); } @Override protected String getLogin() { return "testscribeup@gmail.com"; } @Override protected String getPassword() { return "testpwdscribeup"; } @Override protected boolean canCancel() { return true; } @Override protected IndirectClient getClient() { final var client = new HiOrgServerClient(); client.setKey("your client id"); client.setSecret("your secret"); client.setCallbackUrl(PAC4J_BASE_URL); return client; } @Override protected void verifyProfile(CommonProfile userProfile) { final var profile = (HiOrgServerProfile) userProfile; assertEquals("1a396c7895f10eac304a81eef63ca0e2", profile.getId()); assertEquals("doej", profile.getUsername().toLowerCase()); assertEquals("John", profile.getFirstName()); assertEquals("Doe", profile.getFamilyName()); assertEquals("John Doe", profile.getDisplayName()); assertEquals("erk", profile.getOrganisationId()); assertEquals("DRK im Ennepe-Ruhr-Kreis Kreisverband e. V.", profile.getOrganisationName()); assertEquals("Kreisrotkreuzleiter", profile.getPosition()); assertEquals(Boolean.TRUE, profile.isLeader()); } }
pac4j/pac4j
pac4j-oauth/src/test/java/org/pac4j/oauth/run/RunHiOrgServerClient.java
Java
apache-2.0
1,837
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @author Aleksei V. Ivaschenko * @version $Revision: 1.2 $ */ package org.apache.harmony.x.print; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.ArrayList; import javax.print.DocFlavor; import javax.print.DocPrintJob; import javax.print.PrintService; import javax.print.ServiceUIFactory; import javax.print.StreamPrintService; import javax.print.StreamPrintServiceFactory; import javax.print.attribute.Attribute; import javax.print.attribute.AttributeSet; import javax.print.attribute.AttributeSetUtilities; import javax.print.attribute.HashAttributeSet; import javax.print.attribute.PrintServiceAttribute; import javax.print.attribute.PrintServiceAttributeSet; import javax.print.event.PrintServiceAttributeListener; public class DefaultPrintService implements PrintService { //= Fields ===============================================================// private PrintClient client = null; private EventNotifier notifier = null; private String serviceName = null; //= Constructors =========================================================// public DefaultPrintService(String servicename, PrintClient printclient) { if (printclient == null || servicename == null) { throw new NullPointerException("Argument is null"); } this.client = printclient; this.serviceName = servicename; notifier = EventNotifier.getNotifier(); } //= Basic methods ======================================================// PrintClient getPrintClient() { return client; } public String getName() { return serviceName; } public boolean equals(Object obj) { if (obj instanceof DefaultPrintService) { DefaultPrintService service = (DefaultPrintService) obj; if (service.getName().equals(serviceName)) { return true; } } return false; } public int hashCode() { return serviceName.hashCode(); } public String toString() { return "Printer : " + serviceName; } //= Print service attributes ===========================================// public PrintServiceAttribute getAttribute(Class category) { if (!PrintServiceAttribute.class.isAssignableFrom(category)) { throw new IllegalArgumentException(); } PrintServiceAttributeSet attributes = getAttributes(); if (attributes.containsKey(category)) { PrintServiceAttribute attribute = (PrintServiceAttribute) attributes .get(category); return attribute; } return null; } public PrintServiceAttributeSet getAttributes() { return AttributeSetUtilities.unmodifiableView(client.getAttributes()); } //= Print request attributes =============================================// public Class[] getSupportedAttributeCategories() { return client.getSupportedAttributeCategories(); } public boolean isAttributeCategorySupported(Class category) { if (category == null) { throw new NullPointerException("Argument 'category' is null"); } if (!(Attribute.class.isAssignableFrom(category))) { throw new IllegalArgumentException( "Argument 'category' must implement interface Attribute"); } Class[] categories = getSupportedAttributeCategories(); for (int i = 0; i < categories.length; i++) { if (categories[i].equals(category)) { return true; } } return false; } public AttributeSet getUnsupportedAttributes(DocFlavor flavor, AttributeSet attributes) { if (attributes == null) { return null; } if (flavor != null && !isDocFlavorSupported(flavor)) { throw new IllegalArgumentException("Flavor " + flavor.getMimeType() + " is not supported by print service"); } Attribute[] attrs = attributes.toArray(); HashAttributeSet unsupported = new HashAttributeSet(); for (int i = 0; i < attrs.length; i++) { if (!isAttributeValueSupported(attrs[i], flavor, attributes)) { unsupported.add(attrs[i]); } } if (unsupported.size() > 0) { return unsupported; } return null; } public Object getDefaultAttributeValue(Class category) { if (category == null) { throw new NullPointerException("Argument 'category' is null"); } if (!(Attribute.class.isAssignableFrom(category))) { throw new IllegalArgumentException( "Argument 'category' must implement interface Attribute"); } return client.getDefaultAttributeValue(category); } public Object getSupportedAttributeValues(Class category, DocFlavor flavor, AttributeSet attributes) { if (category == null) { throw new NullPointerException("Argument is null"); } if (!(Attribute.class.isAssignableFrom(category))) { throw new IllegalArgumentException( "Argument must implement interface Attribute"); } if (flavor == null) { return client.getSupportedAttributeValues(category, flavor, attributes); } DocFlavor clientFlavors[] = client.getSupportedDocFlavors(); if (isDocFlavorSupportedByClient(flavor, clientFlavors)) { return client.getSupportedAttributeValues(category, flavor, attributes); } /* * Searching stream print service factories, which * able to convert print data to flavor supported by * PrintClient (both user and internal). And then, * return supported attributes by created stream print * service */ for (int i = 0; i < clientFlavors.length; i++) { StreamPrintServiceFactory[] factories = StreamPrintServiceFactory .lookupStreamPrintServiceFactories(flavor, clientFlavors[i] .getMimeType()); for (int j = 0; j < factories.length; j++) { StreamPrintService sps = factories[j] .getPrintService(new ByteArrayOutputStream()); if (sps != null) { try { sps.getOutputStream().close(); } catch (IOException e) { // just ignore } sps.dispose(); //return sps.getSupportedAttributeValues(category, // flavor, attributes); return client.getSupportedAttributeValues(category, clientFlavors[i], attributes); } } } throw new IllegalArgumentException("DocFlavor '" + flavor + "' is not supported by the print service"); } public boolean isAttributeValueSupported(Attribute attrval, DocFlavor flavor, AttributeSet attributes) { if (attrval == null) { throw new NullPointerException("Argument is null"); } if (flavor == null) { return client .isAttributeValueSupported(attrval, flavor, attributes); } DocFlavor clientFlavors[] = client.getSupportedDocFlavors(); if (isDocFlavorSupportedByClient(flavor, clientFlavors)) { return client .isAttributeValueSupported(attrval, flavor, attributes); } /* * Searching stream print service factories, which * able to convert print data to flavor supported by * PrintClient (both user and internal). And then, * return supported attributes by created stream print * service */ for (int i = 0; i < clientFlavors.length; i++) { StreamPrintServiceFactory[] factories = StreamPrintServiceFactory .lookupStreamPrintServiceFactories(flavor, clientFlavors[i] .getMimeType()); for (int j = 0; j < factories.length; j++) { StreamPrintService sps = factories[j] .getPrintService(new ByteArrayOutputStream()); if (sps != null) { try { sps.getOutputStream().close(); } catch (IOException e) { // just ignore } sps.dispose(); //return sps.isAttributeValueSupported(attrval, flavor, attributes); return client.isAttributeValueSupported(attrval, clientFlavors[i], attributes); } } } throw new IllegalArgumentException("DocFlavor '" + flavor + "' is not supported by the print service"); } //= Listeners ============================================================// public void addPrintServiceAttributeListener( PrintServiceAttributeListener listener) { notifier.addListener(this, listener); } public void removePrintServiceAttributeListener( PrintServiceAttributeListener listener) { notifier.removeListener(this, listener); } //= DocFlavors ===========================================================// /* * Returns two categories of DocFlavors: * 1) DocFlavors supported by PrintClient * 2) DocFlavors that can be converted by StreamPrintServices to * PrintClient's DocFlavors * * If there is a DocFlavor that supported by PrintClient and by * StreamPrintService, the method returns PrintClient's one only. */ public DocFlavor[] getSupportedDocFlavors() { DocFlavor clientFlavors[] = client.getSupportedDocFlavors(); ArrayList flavors = new ArrayList(); /* * Putting all PrintClient's supported flavors (except * internal flavors) into list of flavors supported by * this print service. */ for (int i = 0; i < clientFlavors.length; i++) { if (!isInternalDocFlavor(clientFlavors[i])) { flavors.add(clientFlavors[i]); } } /* * Searching stream print service factories, which * able to convert print data to flavor supported by * PrintClient (both user and internal). And then, * gathering all flavors supported by those factories * and putting them into list of flavors supported * by this print service. */ for (int i = 0; i < clientFlavors.length; i++) { StreamPrintServiceFactory[] factories = StreamPrintServiceFactory .lookupStreamPrintServiceFactories(null, clientFlavors[i] .getMimeType()); for (int j = 0; j < factories.length; j++) { DocFlavor[] factoryFlavors = factories[j] .getSupportedDocFlavors(); for (int k = 0; k < factoryFlavors.length; k++) { if (!flavors.contains(factoryFlavors[k])) { flavors.add(factoryFlavors[k]); } } } } return (DocFlavor[]) flavors.toArray(new DocFlavor[0]); } public boolean isDocFlavorSupported(DocFlavor flavor) { if (flavor == null) { throw new NullPointerException("DocFlavor flavor is null"); } DocFlavor[] flavors = getSupportedDocFlavors(); for (int i = 0; i < flavors.length; i++) { if (flavors[i].equals(flavor)) { return true; } } return false; } /* * Checks, whether specified falvor is internal or not. */ private boolean isInternalDocFlavor(DocFlavor flavor) { if (flavor.getMimeType().toLowerCase().indexOf("internal") != -1) { return true; } return false; } /* * Checks, whether specified falvor is supported by * PrintClient or not. */ boolean isDocFlavorSupportedByClient(DocFlavor flavor) { DocFlavor clientFlavors[] = client.getSupportedDocFlavors(); for (int i = 0; i < clientFlavors.length; i++) { if (clientFlavors[i].equals(flavor)) { return true; } } return false; } boolean isDocFlavorSupportedByClient(DocFlavor flavor, DocFlavor[] clientFlavors) { for (int i = 0; i < clientFlavors.length; i++) { if (clientFlavors[i].equals(flavor)) { return true; } } return false; } //= Service user interface factory =======================================// public ServiceUIFactory getServiceUIFactory() { // We have not service user interface factory return null; } //= DocPrintJob ==========================================================// public DocPrintJob createPrintJob() { return new DefaultPrintJob(this); } }
freeVM/freeVM
enhanced/archive/classlib/java6/modules/print/src/main/java/common/org/apache/harmony/x/print/DefaultPrintService.java
Java
apache-2.0
14,216
/* * Copyright 2016-present Open Networking Laboratory * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Package for Huawei device drivers util. */ package org.onosproject.drivers.huawei.util;
mengmoya/onos
drivers/huawei/src/main/java/org/onosproject/drivers/huawei/util/package-info.java
Java
apache-2.0
714
"""Base Command class, and related routines""" import sys import os import socket import urllib2 import urllib from cStringIO import StringIO import traceback import time from pip.log import logger from pip.baseparser import parser, ConfigOptionParser, UpdatingDefaultsHelpFormatter from pip.exceptions import InstallationError, UninstallationError from pip.venv import restart_in_venv __all__ = ['command_dict', 'Command', 'load_all_commands', 'load_command', 'command_names'] command_dict = {} class Command(object): name = None usage = None hidden = False def __init__(self): assert self.name self.parser = ConfigOptionParser( usage=self.usage, prog='%s %s' % (sys.argv[0], self.name), version=parser.version, formatter=UpdatingDefaultsHelpFormatter(), name=self.name) for option in parser.option_list: if not option.dest or option.dest == 'help': # -h, --version, etc continue self.parser.add_option(option) command_dict[self.name] = self def merge_options(self, initial_options, options): # Make sure we have all global options carried over for attr in ['log', 'venv', 'proxy', 'venv_base', 'require_venv', 'respect_venv', 'log_explicit_levels', 'log_file', 'timeout', 'default_vcs', 'skip_requirements_regex']: setattr(options, attr, getattr(initial_options, attr) or getattr(options, attr)) options.quiet += initial_options.quiet options.verbose += initial_options.verbose def main(self, complete_args, args, initial_options): options, args = self.parser.parse_args(args) self.merge_options(initial_options, options) if options.require_venv and not options.venv: # If a venv is required check if it can really be found if not os.environ.get('VIRTUAL_ENV'): print 'Could not find an activated virtualenv (required).' sys.exit(3) # Automatically install in currently activated venv if required options.respect_venv = True if args and args[-1] == '___VENV_RESTART___': ## FIXME: We don't do anything this this value yet: venv_location = args[-2] args = args[:-2] options.venv = None else: # If given the option to respect the activated environment # check if no venv is given as a command line parameter if options.respect_venv and os.environ.get('VIRTUAL_ENV'): if options.venv and os.path.exists(options.venv): # Make sure command line venv and environmental are the same if (os.path.realpath(os.path.expanduser(options.venv)) != os.path.realpath(os.environ.get('VIRTUAL_ENV'))): print ("Given virtualenv (%s) doesn't match " "currently activated virtualenv (%s)." % (options.venv, os.environ.get('VIRTUAL_ENV'))) sys.exit(3) else: options.venv = os.environ.get('VIRTUAL_ENV') print 'Using already activated environment %s' % options.venv level = 1 # Notify level += options.verbose level -= options.quiet level = logger.level_for_integer(4-level) complete_log = [] logger.consumers.extend( [(level, sys.stdout), (logger.DEBUG, complete_log.append)]) if options.log_explicit_levels: logger.explicit_levels = True if options.venv: if options.verbose > 0: # The logger isn't setup yet print 'Running in environment %s' % options.venv site_packages=False if options.site_packages: site_packages=True restart_in_venv(options.venv, options.venv_base, site_packages, complete_args) # restart_in_venv should actually never return, but for clarity... return ## FIXME: not sure if this sure come before or after venv restart if options.log: log_fp = open_logfile_append(options.log) logger.consumers.append((logger.DEBUG, log_fp)) else: log_fp = None socket.setdefaulttimeout(options.timeout or None) setup_proxy_handler(options.proxy) exit = 0 try: self.run(options, args) except (InstallationError, UninstallationError), e: logger.fatal(str(e)) logger.info('Exception information:\n%s' % format_exc()) exit = 1 except: logger.fatal('Exception:\n%s' % format_exc()) exit = 2 if log_fp is not None: log_fp.close() if exit: log_fn = options.log_file text = '\n'.join(complete_log) logger.fatal('Storing complete log in %s' % log_fn) log_fp = open_logfile_append(log_fn) log_fp.write(text) log_fp.close() return exit ## FIXME: should get moved somewhere else: def setup_proxy_handler(proxystr=''): """Set the proxy handler given the option passed on the command line. If an empty string is passed it looks at the HTTP_PROXY environment variable. """ proxy = get_proxy(proxystr) if proxy: proxy_support = urllib2.ProxyHandler({"http": proxy, "ftp": proxy}) opener = urllib2.build_opener(proxy_support, urllib2.CacheFTPHandler) urllib2.install_opener(opener) def get_proxy(proxystr=''): """Get the proxy given the option passed on the command line. If an empty string is passed it looks at the HTTP_PROXY environment variable.""" if not proxystr: proxystr = os.environ.get('HTTP_PROXY', '') if proxystr: if '@' in proxystr: user_password, server_port = proxystr.split('@', 1) if ':' in user_password: user, password = user_password.split(':', 1) else: user = user_password import getpass prompt = 'Password for %s@%s: ' % (user, server_port) password = urllib.quote(getpass.getpass(prompt)) return '%s:%s@%s' % (user, password, server_port) else: return proxystr else: return None def format_exc(exc_info=None): if exc_info is None: exc_info = sys.exc_info() out = StringIO() traceback.print_exception(*exc_info, **dict(file=out)) return out.getvalue() def open_logfile_append(filename): """Open the named log file in append mode. If the file already exists, a separator will also be printed to the file to separate past activity from current activity. """ exists = os.path.exists(filename) log_fp = open(filename, 'a') if exists: print >> log_fp, '-'*60 print >> log_fp, '%s run on %s' % (sys.argv[0], time.strftime('%c')) return log_fp def load_command(name): full_name = 'pip.commands.%s' % name if full_name in sys.modules: return try: __import__(full_name) except ImportError: pass def load_all_commands(): for name in command_names(): load_command(name) def command_names(): dir = os.path.join(os.path.dirname(__file__), 'commands') names = [] for name in os.listdir(dir): if name.endswith('.py') and os.path.isfile(os.path.join(dir, name)): names.append(os.path.splitext(name)[0]) return names
2013Commons/HUE-SHARK
build/env/lib/python2.7/site-packages/pip-0.6.3-py2.7.egg/pip/basecommand.py
Python
apache-2.0
7,777
/* * Copyright 2016 MICRORISC s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.microrisc.simply.iqrf.dpa.v22x.examples.services; import com.microrisc.simply.SimplyException; import com.microrisc.simply.iqrf.dpa.DPA_Network; import com.microrisc.simply.iqrf.dpa.DPA_Node; import com.microrisc.simply.iqrf.dpa.DPA_Simply; import com.microrisc.simply.iqrf.dpa.v22x.DPA_SimplyFactory; import com.microrisc.simply.iqrf.dpa.v22x.services.node.load_code.LoadCodeProcessingInfo; import com.microrisc.simply.iqrf.dpa.v22x.services.node.load_code.LoadCodeResult; import com.microrisc.simply.iqrf.dpa.v22x.services.node.load_code.LoadCodeService; import com.microrisc.simply.iqrf.dpa.v22x.services.node.load_code.LoadCodeServiceParameters; import com.microrisc.simply.iqrf.dpa.v22x.services.node.load_code.errors.LoadCodeError; import com.microrisc.simply.iqrf.dpa.v22x.types.LoadingCodeProperties; import com.microrisc.simply.services.ServiceResult; import java.io.File; import java.util.Map; /** * Loading code into one specified node. * * @author Michal Konopa * @author Martin Strouhal */ public class LoadCodeServiceExample { private static DPA_Simply simply = null; // prints out specified message, destroys the Simply and exits private static void printMessageAndExit(String message) { System.out.println(message); if ( simply != null) { simply.destroy(); } System.exit(1); } public static void main(String[] args) { // creating Simply instance try { simply = DPA_SimplyFactory.getSimply("config" + File.separator + "simply" + File.separator + "Simply.properties"); } catch ( SimplyException ex ) { printMessageAndExit("Error while creating Simply: " + ex.getMessage()); } // getting network 1 DPA_Network network1 = simply.getNetwork("1", DPA_Network.class); if ( network1 == null ) { printMessageAndExit("Network 1 doesn't exist"); } // getting coordinator DPA_Node coordinator = network1.getNode("0"); if ( coordinator == null ) { printMessageAndExit("Coordinator doesn't exist."); } // getting Load Code Service on node 0 LoadCodeService loadCodeService = coordinator.getService(LoadCodeService.class); if ( loadCodeService == null ) { printMessageAndExit("Coordinator doesn't support Load Code Service."); } // loading code ServiceResult<LoadCodeResult, LoadCodeProcessingInfo> serviceResult = loadCodeService.loadCode( new LoadCodeServiceParameters( "config" + File.separator + "custom-dpa-handlers" + File.separator + "CustomDpaHandler-LED-Green-On-7xD-V228-160912.hex", 0x0800, LoadingCodeProperties.LoadingAction.ComputeAndMatchChecksumWithCodeLoading, LoadingCodeProperties.LoadingContent.Hex ) ); /* ServiceResult<LoadCodeResult, LoadCodeProcessingInfo> serviceResult = loadCodeService.loadCode( new LoadCodeServiceParameters( "config" + File.separator + "custom-dpa-handlers" + File.separator + "CustomDpaHandler-LED-Red-On-7xD-V228-160912.hex" 0x0800, LoadingCodeProperties.LoadingAction.ComputeAndMatchChecksumWithCodeLoading, LoadingCodeProperties.LoadingContent.Hex ) ); */ // getting results if ( serviceResult.getStatus() == ServiceResult.Status.SUCCESSFULLY_COMPLETED ) { System.out.println("Code successfully loaded."); } else { System.out.println("Code load was NOT successful."); // find out details about errors for nodes, which failed to load code into LoadCodeResult loadCodeResult = serviceResult.getResult(); if ( loadCodeResult != null ) { System.out.println("Errors: "); printLoadCodeErrors(loadCodeResult); } else { LoadCodeProcessingInfo procInfo = serviceResult.getProcessingInfo(); System.out.println(procInfo); } } simply.destroy(); } // prints info about error for each failed node private static void printLoadCodeErrors(LoadCodeResult loadCodeResult) { for ( Map.Entry<String, LoadCodeError> entry : loadCodeResult.getErrorsMap().entrySet() ) { System.out.println("Node " + entry.getKey() + ": " + entry.getValue()); } } }
iqrfsdk/jsimply
simply-modules/simply-iqrf-dpa22x/simply-iqrf-dpa-v22x-examples/src/main/java/com/microrisc/simply/iqrf/dpa/v22x/examples/services/LoadCodeServiceExample.java
Java
apache-2.0
5,315
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.rest.action.cat; import com.google.common.collect.Maps; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.XContentThrowableRestResponse; import org.elasticsearch.rest.action.support.RestTable; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolStats; import java.io.IOException; import java.util.*; import static org.elasticsearch.rest.RestRequest.Method.GET; public class RestThreadPoolAction extends AbstractCatAction { private final static String[] SUPPORTED_NAMES = new String[] { ThreadPool.Names.BULK, ThreadPool.Names.FLUSH, ThreadPool.Names.GENERIC, ThreadPool.Names.GET, ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.MERGE, ThreadPool.Names.OPTIMIZE, ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT, ThreadPool.Names.SUGGEST, ThreadPool.Names.WARMER }; private final static String[] SUPPORTED_ALIASES = new String[] { "b", "f", "ge", "g", "i", "ma", "m", "o", "p", "r", "s", "sn", "su", "w" }; private final static String[] DEFAULT_THREAD_POOLS = new String[] { ThreadPool.Names.BULK, ThreadPool.Names.INDEX, ThreadPool.Names.SEARCH, }; private final static Map<String, String> ALIAS_TO_THREAD_POOL; private final static Map<String, String> THREAD_POOL_TO_ALIAS; static { ALIAS_TO_THREAD_POOL = Maps.newHashMapWithExpectedSize(SUPPORTED_NAMES.length); for (String supportedThreadPool : SUPPORTED_NAMES) { ALIAS_TO_THREAD_POOL.put(supportedThreadPool.substring(0, 3), supportedThreadPool); } THREAD_POOL_TO_ALIAS = Maps.newHashMapWithExpectedSize(SUPPORTED_NAMES.length); for (int i = 0; i < SUPPORTED_NAMES.length; i++) { THREAD_POOL_TO_ALIAS.put(SUPPORTED_NAMES[i], SUPPORTED_ALIASES[i]); } } @Inject public RestThreadPoolAction(Settings settings, Client client, RestController controller) { super(settings, client); controller.registerHandler(GET, "/_cat/thread_pool", this); } @Override void documentation(StringBuilder sb) { sb.append("/_cat/thread_pool\n"); } @Override public void doRequest(final RestRequest request, final RestChannel channel) { final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); final String[] pools = fetchSortedPools(request, DEFAULT_THREAD_POOLS); client.admin().cluster().state(clusterStateRequest, new ActionListener<ClusterStateResponse>() { @Override public void onResponse(final ClusterStateResponse clusterStateResponse) { NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.clear().process(true); client.admin().cluster().nodesInfo(nodesInfoRequest, new ActionListener<NodesInfoResponse>() { @Override public void onResponse(final NodesInfoResponse nodesInfoResponse) { NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(); nodesStatsRequest.clear().threadPool(true); client.admin().cluster().nodesStats(nodesStatsRequest, new ActionListener<NodesStatsResponse>() { @Override public void onResponse(NodesStatsResponse nodesStatsResponse) { try { channel.sendResponse(RestTable.buildResponse(buildTable(request, clusterStateResponse, nodesInfoResponse, nodesStatsResponse, pools), request, channel)); } catch (Throwable e) { onFailure(e); } } @Override public void onFailure(Throwable e) { try { channel.sendResponse(new XContentThrowableRestResponse(request, e)); } catch (IOException e1) { logger.error("Failed to send failure response", e1); } } }); } @Override public void onFailure(Throwable e) { try { channel.sendResponse(new XContentThrowableRestResponse(request, e)); } catch (IOException e1) { logger.error("Failed to send failure response", e1); } } }); } @Override public void onFailure(Throwable e) { try { channel.sendResponse(new XContentThrowableRestResponse(request, e)); } catch (IOException e1) { logger.error("Failed to send failure response", e1); } } }); } @Override Table getTableWithHeader(final RestRequest request) { Table table = new Table(); table.startHeaders(); table.addCell("id", "default:false;alias:id,nodeId;desc:unique node id"); table.addCell("pid", "default:false;alias:p;desc:process id"); table.addCell("host", "alias:h;desc:host name"); table.addCell("ip", "alias:i;desc:ip address"); table.addCell("port", "default:false;alias:po;desc:bound transport port"); final String[] requestedPools = fetchSortedPools(request, DEFAULT_THREAD_POOLS); for (String pool : SUPPORTED_NAMES) { String poolAlias = THREAD_POOL_TO_ALIAS.get(pool); boolean display = false; for (String requestedPool : requestedPools) { if (pool.equals(requestedPool)) { display = true; break; } } String defaultDisplayVal = Boolean.toString(display); table.addCell( pool + ".active", "alias:" + poolAlias + "a;default:" + defaultDisplayVal + ";text-align:right;desc:number of active " + pool + " threads" ); table.addCell( pool + ".size", "alias:" + poolAlias + "s;default:false;text-align:right;desc:number of active " + pool + " threads" ); table.addCell( pool + ".queue", "alias:" + poolAlias + "q;default:" + defaultDisplayVal + ";text-align:right;desc:number of " + pool + " threads in queue" ); table.addCell( pool + ".rejected", "alias:" + poolAlias + "r;default:" + defaultDisplayVal + ";text-align:right;desc:number of rejected " + pool + " threads" ); table.addCell( pool + ".largest", "alias:" + poolAlias + "l;default:false;text-align:right;desc:highest number of seen active " + pool + " threads" ); table.addCell( pool + ".completed", "alias:" + poolAlias + "c;default:false;text-align:right;desc:number of completed " + pool + " threads" ); } table.endHeaders(); return table; } private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoResponse nodesInfo, NodesStatsResponse nodesStats, String[] pools) { boolean fullId = req.paramAsBoolean("full_id", false); DiscoveryNodes nodes = state.getState().nodes(); Table table = getTableWithHeader(req); for (DiscoveryNode node : nodes) { NodeInfo info = nodesInfo.getNodesMap().get(node.id()); NodeStats stats = nodesStats.getNodesMap().get(node.id()); table.startRow(); table.addCell(fullId ? node.id() : Strings.substring(node.getId(), 0, 4)); table.addCell(info == null ? null : info.getProcess().id()); table.addCell(node.getHostName()); table.addCell(node.getHostAddress()); if (node.address() instanceof InetSocketTransportAddress) { table.addCell(((InetSocketTransportAddress) node.address()).address().getPort()); } else { table.addCell("-"); } final Map<String, ThreadPoolStats.Stats> poolThreadStats; if (stats == null) { poolThreadStats = Collections.emptyMap(); } else { poolThreadStats = new HashMap<String, ThreadPoolStats.Stats>(14); ThreadPoolStats threadPoolStats = stats.getThreadPool(); for (ThreadPoolStats.Stats threadPoolStat : threadPoolStats) { poolThreadStats.put(threadPoolStat.getName(), threadPoolStat); } } for (String pool : SUPPORTED_NAMES) { ThreadPoolStats.Stats poolStats = poolThreadStats.get(pool); table.addCell(poolStats == null ? null : poolStats.getActive()); table.addCell(poolStats == null ? null : poolStats.getThreads()); table.addCell(poolStats == null ? null : poolStats.getQueue()); table.addCell(poolStats == null ? null : poolStats.getRejected()); table.addCell(poolStats == null ? null : poolStats.getLargest()); table.addCell(poolStats == null ? null : poolStats.getCompleted()); } table.endRow(); } return table; } // The thread pool columns should always be in the same order. private String[] fetchSortedPools(RestRequest request, String[] defaults) { String[] headers = request.paramAsStringArray("h", null); if (headers == null) { return defaults; } else { Set<String> requestedPools = new LinkedHashSet<String>(headers.length); for (String header : headers) { int dotIndex = header.indexOf('.'); if (dotIndex != -1) { String headerPrefix = header.substring(0, dotIndex); if (THREAD_POOL_TO_ALIAS.containsKey(headerPrefix)) { requestedPools.add(headerPrefix); } } else if (ALIAS_TO_THREAD_POOL.containsKey(header)) { requestedPools.add(ALIAS_TO_THREAD_POOL.get(header)); } } return requestedPools.toArray(new String[0]); } } }
uboness/elasticsearch
src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java
Java
apache-2.0
13,300
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.protobuf import timestamp_pb2 # type: ignore __protobuf__ = proto.module( package="google.devtools.clouderrorreporting.v1beta1", manifest={ "ResolutionStatus", "ErrorGroup", "TrackingIssue", "ErrorEvent", "ServiceContext", "ErrorContext", "HttpRequestContext", "SourceLocation", }, ) class ResolutionStatus(proto.Enum): r"""Resolution status of an error group.""" RESOLUTION_STATUS_UNSPECIFIED = 0 OPEN = 1 ACKNOWLEDGED = 2 RESOLVED = 3 MUTED = 4 class ErrorGroup(proto.Message): r"""Description of a group of similar error events. Attributes: name (str): The group resource name. Example: <code>projects/my-project-123/groups/CNSgkpnppqKCUw</code> group_id (str): Group IDs are unique for a given project. If the same kind of error occurs in different service contexts, it will receive the same group ID. tracking_issues (Sequence[google.cloud.errorreporting_v1beta1.types.TrackingIssue]): Associated tracking issues. resolution_status (google.cloud.errorreporting_v1beta1.types.ResolutionStatus): Error group's resolution status. An unspecified resolution status will be interpreted as OPEN """ name = proto.Field(proto.STRING, number=1,) group_id = proto.Field(proto.STRING, number=2,) tracking_issues = proto.RepeatedField( proto.MESSAGE, number=3, message="TrackingIssue", ) resolution_status = proto.Field(proto.ENUM, number=5, enum="ResolutionStatus",) class TrackingIssue(proto.Message): r"""Information related to tracking the progress on resolving the error. Attributes: url (str): A URL pointing to a related entry in an issue tracking system. Example: ``https://github.com/user/project/issues/4`` """ url = proto.Field(proto.STRING, number=1,) class ErrorEvent(proto.Message): r"""An error event which is returned by the Error Reporting system. Attributes: event_time (google.protobuf.timestamp_pb2.Timestamp): Time when the event occurred as provided in the error report. If the report did not contain a timestamp, the time the error was received by the Error Reporting system is used. service_context (google.cloud.errorreporting_v1beta1.types.ServiceContext): The ``ServiceContext`` for which this error was reported. message (str): The stack trace that was reported or logged by the service. context (google.cloud.errorreporting_v1beta1.types.ErrorContext): Data about the context in which the error occurred. """ event_time = proto.Field(proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,) service_context = proto.Field(proto.MESSAGE, number=2, message="ServiceContext",) message = proto.Field(proto.STRING, number=3,) context = proto.Field(proto.MESSAGE, number=5, message="ErrorContext",) class ServiceContext(proto.Message): r"""Describes a running service that sends errors. Its version changes over time and multiple versions can run in parallel. Attributes: service (str): An identifier of the service, such as the name of the executable, job, or Google App Engine service name. This field is expected to have a low number of values that are relatively stable over time, as opposed to ``version``, which can be changed whenever new code is deployed. Contains the service name for error reports extracted from Google App Engine logs or ``default`` if the App Engine default service is used. version (str): Represents the source code version that the developer provided, which could represent a version label or a Git SHA-1 hash, for example. For App Engine standard environment, the version is set to the version of the app. resource_type (str): Type of the MonitoredResource. List of possible values: https://cloud.google.com/monitoring/api/resources Value is set automatically for incoming errors and must not be set when reporting errors. """ service = proto.Field(proto.STRING, number=2,) version = proto.Field(proto.STRING, number=3,) resource_type = proto.Field(proto.STRING, number=4,) class ErrorContext(proto.Message): r"""A description of the context in which an error occurred. This data should be provided by the application when reporting an error, unless the error report has been generated automatically from Google App Engine logs. Attributes: http_request (google.cloud.errorreporting_v1beta1.types.HttpRequestContext): The HTTP request which was processed when the error was triggered. user (str): The user who caused or was affected by the crash. This can be a user ID, an email address, or an arbitrary token that uniquely identifies the user. When sending an error report, leave this field empty if the user was not logged in. In this case the Error Reporting system will use other data, such as remote IP address, to distinguish affected users. See ``affected_users_count`` in ``ErrorGroupStats``. report_location (google.cloud.errorreporting_v1beta1.types.SourceLocation): The location in the source code where the decision was made to report the error, usually the place where it was logged. For a logged exception this would be the source line where the exception is logged, usually close to the place where it was caught. """ http_request = proto.Field(proto.MESSAGE, number=1, message="HttpRequestContext",) user = proto.Field(proto.STRING, number=2,) report_location = proto.Field(proto.MESSAGE, number=3, message="SourceLocation",) class HttpRequestContext(proto.Message): r"""HTTP request data that is related to a reported error. This data should be provided by the application when reporting an error, unless the error report has been generated automatically from Google App Engine logs. Attributes: method (str): The type of HTTP request, such as ``GET``, ``POST``, etc. url (str): The URL of the request. user_agent (str): The user agent information that is provided with the request. referrer (str): The referrer information that is provided with the request. response_status_code (int): The HTTP response status code for the request. remote_ip (str): The IP address from which the request originated. This can be IPv4, IPv6, or a token which is derived from the IP address, depending on the data that has been provided in the error report. """ method = proto.Field(proto.STRING, number=1,) url = proto.Field(proto.STRING, number=2,) user_agent = proto.Field(proto.STRING, number=3,) referrer = proto.Field(proto.STRING, number=4,) response_status_code = proto.Field(proto.INT32, number=5,) remote_ip = proto.Field(proto.STRING, number=6,) class SourceLocation(proto.Message): r"""Indicates a location in the source code of the service for which errors are reported. ``functionName`` must be provided by the application when reporting an error, unless the error report contains a ``message`` with a supported exception stack trace. All fields are optional for the later case. Attributes: file_path (str): The source code filename, which can include a truncated relative path, or a full path from a production machine. line_number (int): 1-based. 0 indicates that the line number is unknown. function_name (str): Human-readable name of a function or method. The value can include optional context like the class or package name. For example, ``my.package.MyClass.method`` in case of Java. """ file_path = proto.Field(proto.STRING, number=1,) line_number = proto.Field(proto.INT32, number=2,) function_name = proto.Field(proto.STRING, number=4,) __all__ = tuple(sorted(__protobuf__.manifest))
googleapis/python-error-reporting
google/cloud/errorreporting_v1beta1/types/common.py
Python
apache-2.0
9,405
// Copyright 2018 Peter Hallam // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package omakase.codegeneration; import omakase.printtree.ParseTreeWriter; import java.io.PrintStream; public class OptimizedTreeWriter extends ParseTreeWriter { public OptimizedTreeWriter(PrintStream out) { super(out); } protected void writeLine() { write(" "); } protected void outdent() {} protected void indent() {} }
peterhal/omakase
src/omakase/codegeneration/OptimizedTreeWriter.java
Java
apache-2.0
939
# # Copyright:: Copyright (c) 2016-2019 Chef Software Inc. # License:: Apache License, Version 2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # require_relative "dist" ChefDK.commands do |c| c.builtin "exec", :Exec, require_path: "chef-dk/command/exec", desc: "Runs the command in context of the embedded ruby" c.builtin "env", :Env, require_path: "chef-dk/command/env", desc: "Prints environment variables used by #{ChefDK::Dist::PRODUCT}" c.builtin "gem", :GemForwarder, require_path: "chef-dk/command/gem", desc: "Runs the `gem` command in context of the embedded Ruby" c.builtin "generate", :Generate, desc: "Generate a new repository, cookbook, or other component" c.builtin "shell-init", :ShellInit, desc: "Initialize your shell to use #{ChefDK::Dist::PRODUCT} as your primary Ruby" c.builtin "install", :Install, desc: "Install cookbooks from a Policyfile and generate a locked cookbook set" c.builtin "update", :Update, desc: "Updates a Policyfile.lock.json with latest run_list and cookbooks" c.builtin "push", :Push, desc: "Push a local policy lock to a policy group on the #{ChefDK::Dist::SERVER_PRODUCT}" c.builtin "push-archive", :PushArchive, desc: "Push a policy archive to a policy group on the #{ChefDK::Dist::SERVER_PRODUCT}" c.builtin "show-policy", :ShowPolicy, desc: "Show policyfile objects on the #{ChefDK::Dist::SERVER_PRODUCT}" c.builtin "diff", :Diff, desc: "Generate an itemized diff of two Policyfile lock documents" c.builtin "export", :Export, desc: "Export a policy lock as a #{ChefDK::Dist::ZERO_PRODUCT} code repo" c.builtin "clean-policy-revisions", :CleanPolicyRevisions, desc: "Delete unused policy revisions on the #{ChefDK::Dist::SERVER_PRODUCT}" c.builtin "clean-policy-cookbooks", :CleanPolicyCookbooks, desc: "Delete unused policyfile cookbooks on the #{ChefDK::Dist::SERVER_PRODUCT}" c.builtin "delete-policy-group", :DeletePolicyGroup, desc: "Delete a policy group on the #{ChefDK::Dist::SERVER_PRODUCT}" c.builtin "delete-policy", :DeletePolicy, desc: "Delete all revisions of a policy on the #{ChefDK::Dist::SERVER_PRODUCT}" c.builtin "undelete", :Undelete, desc: "Undo a delete command" c.builtin "describe-cookbook", :DescribeCookbook, require_path: "chef-dk/command/describe_cookbook", desc: "Prints cookbook checksum information used for cookbook identifier" c.builtin "verify", :Verify, desc: "Test the embedded #{ChefDK::Dist::PRODUCT} applications", hidden: true # deprecated command that throws a failure warning if used. This was removed 4.2019 c.builtin "provision", :Provision, desc: "Provision VMs and clusters via cookbook", hidden: true end
chef/chef-dk
lib/chef-dk/builtin_commands.rb
Ruby
apache-2.0
3,304
/* * Copyright © 2019 Cask Data, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package io.cdap.plugin.gcp.datastore.source; import com.google.cloud.Timestamp; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import com.google.common.collect.ImmutableSet; import com.google.datastore.v1.Filter; import com.google.datastore.v1.Key.PathElement; import com.google.datastore.v1.KindExpression; import com.google.datastore.v1.PartitionId; import com.google.datastore.v1.PropertyFilter; import com.google.datastore.v1.Value; import com.google.datastore.v1.client.DatastoreHelper; import io.cdap.cdap.api.annotation.Description; import io.cdap.cdap.api.annotation.Macro; import io.cdap.cdap.api.annotation.Name; import io.cdap.cdap.api.data.schema.Schema; import io.cdap.cdap.api.dataset.lib.KeyValue; import io.cdap.cdap.etl.api.FailureCollector; import io.cdap.plugin.common.KeyValueListParser; import io.cdap.plugin.gcp.common.GCPReferenceSourceConfig; import io.cdap.plugin.gcp.datastore.exception.DatastoreInitializationException; import io.cdap.plugin.gcp.datastore.source.util.DatastoreSourceConstants; import io.cdap.plugin.gcp.datastore.source.util.SourceKeyType; import io.cdap.plugin.gcp.datastore.util.DatastorePropertyUtil; import io.cdap.plugin.gcp.datastore.util.DatastoreUtil; import java.io.IOException; import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; import javax.annotation.Nullable; /** * This class {@link DatastoreSourceConfig} provides all the configuration required for * configuring the {@link DatastoreSource} plugin. */ public class DatastoreSourceConfig extends GCPReferenceSourceConfig { private static final KeyValueListParser KV_PARSER = new KeyValueListParser(";", "\\|"); // timestamps in CDAP are represented as LONG with TIMESTAMP_MICROS logical type // datetime logical type is represented as a string with ISO-8601 format private static final Set<Schema.LogicalType> supportedLogicalTypes = new ImmutableSet.Builder<Schema.LogicalType>() .add(Schema.LogicalType.DATETIME, Schema.LogicalType.TIMESTAMP_MICROS) .build(); @Name(DatastoreSourceConstants.PROPERTY_NAMESPACE) @Macro @Nullable @Description("Namespace of the entities to read. A namespace partitions entities into a subset of Cloud Datastore. " + "If no value is provided, the `default` namespace will be used.") private String namespace; @Name(DatastoreSourceConstants.PROPERTY_KIND) @Macro @Description("Kind of entities to read. Kinds are used to categorize entities in Cloud Datastore. " + "A kind is equivalent to the relational database table notion.") private String kind; @Name(DatastoreSourceConstants.PROPERTY_ANCESTOR) @Macro @Nullable @Description("Ancestor of entities to read. An ancestor identifies the common parent entity " + "that all the child entities share. The value must be provided in key literal format: " + "key(<kind>, <identifier>, <kind>, <identifier>, [...]). " + "For example: `key(kind_1, 'stringId', kind_2, 100)`") private String ancestor; @Name(DatastoreSourceConstants.PROPERTY_FILTERS) @Macro @Nullable @Description("List of filters to apply when reading entities from Cloud Datastore. " + "Only entities that satisfy all the filters will be read. " + "The filter key corresponds to a field in the schema. " + "The field type must be STRING, LONG, DOUBLE, BOOLEAN, or TIMESTAMP. " + "The filter value indicates what value that field must have in order to be read. " + "If no value is provided, it means the value must be null in order to be read. " + "TIMESTAMP string should be in the RFC 3339 format without the timezone offset (always ends in Z). " + "Expected pattern: `yyyy-MM-dd'T'HH:mm:ssX`, for example: `2011-10-02T13:12:55Z`.") private String filters; @Name(DatastoreSourceConstants.PROPERTY_NUM_SPLITS) @Macro @Description("Desired number of splits to divide the query into when reading from Cloud Datastore. " + "Fewer splits may be created if the query cannot be divided into the desired number of splits.") private int numSplits; @Name(DatastoreSourceConstants.PROPERTY_KEY_TYPE) @Macro @Description("Type of entity key read from the Cloud Datastore. The type can be one of three values: " + "`None` - key will not be included, `Key literal` - key will be included " + "in Cloud Datastore key literal format including complete path with ancestors, `URL-safe key` - key " + "will be included in the encoded form that can be used as part of a URL. " + "Note, if `Key literal` or `URL-safe key` is selected, default key name (`__key__`) or its alias must be present " + "in the schema with non-nullable STRING type.") private String keyType; @Name(DatastoreSourceConstants.PROPERTY_KEY_ALIAS) @Macro @Nullable @Description("Name of the field to set as the key field. This value is ignored if the `Key Type` is set to `None`. " + "If no value is provided, `__key__` is used.") private String keyAlias; @Name(DatastoreSourceConstants.PROPERTY_SCHEMA) @Macro @Nullable @Description("Schema of the data to read. Can be imported or fetched by clicking the `Get Schema` button.") private String schema; public DatastoreSourceConfig() { // needed for initialization } @VisibleForTesting DatastoreSourceConfig(String referenceName, String project, String serviceFilePath, @Nullable String namespace, String kind, @Nullable String ancestor, @Nullable String filters, int numSplits, String keyType, @Nullable String keyAlias, String schema) { this.referenceName = referenceName; this.project = project; this.serviceFilePath = serviceFilePath; this.namespace = namespace; this.kind = kind; this.ancestor = ancestor; this.filters = filters; this.numSplits = numSplits; this.keyType = keyType; this.keyAlias = keyAlias; this.schema = schema; } public String getReferenceName() { return referenceName; } public Schema getSchema(FailureCollector collector) { if (Strings.isNullOrEmpty(schema)) { return null; } try { return Schema.parseJson(schema); } catch (IOException e) { collector.addFailure("Invalid schema: " + e.getMessage(), null) .withConfigProperty(DatastoreSourceConstants.PROPERTY_SCHEMA); } // if there was an error that was added, it will throw an exception, otherwise, this statement will not be executed throw collector.getOrThrowException(); } public String getNamespace() { return DatastorePropertyUtil.getNamespace(namespace); } public String getKind() { return kind; } public List<PathElement> getAncestor(FailureCollector collector) { try { return DatastorePropertyUtil.parseKeyLiteral(ancestor); } catch (IllegalArgumentException e) { collector.addFailure(e.getMessage(), null).withConfigProperty(DatastoreSourceConstants.PROPERTY_ANCESTOR); } // if there was an error that was added, it will throw an exception, otherwise, this statement will not be executed throw collector.getOrThrowException(); } /** * Returns a map of filters. * @throws IllegalArgumentException if kv parse can not parse the filter property */ public Map<String, String> getFilters() { if (Strings.isNullOrEmpty(filters)) { return Collections.emptyMap(); } return StreamSupport.stream(KV_PARSER.parse(filters).spliterator(), false) .collect(Collectors.toMap( KeyValue::getKey, KeyValue::getValue, (o, n) -> n, LinkedHashMap::new)); } public int getNumSplits() { return numSplits; } public SourceKeyType getKeyType(FailureCollector collector) { Optional<SourceKeyType> sourceKeyType = SourceKeyType.fromValue(keyType); if (sourceKeyType.isPresent()) { return sourceKeyType.get(); } collector.addFailure("Unsupported key type value: " + keyType, String.format("Supported key types are: %s", SourceKeyType.getSupportedTypes())) .withConfigProperty(DatastoreSourceConstants.PROPERTY_KEY_TYPE); throw collector.getOrThrowException(); } public boolean isIncludeKey(FailureCollector collector) { return SourceKeyType.NONE != getKeyType(collector); } public String getKeyAlias() { return DatastorePropertyUtil.getKeyAlias(keyAlias); } @Override public void validate(FailureCollector collector) { super.validate(collector); validateDatastoreConnection(collector); validateKind(collector); validateAncestor(collector); validateNumSplits(collector); if (containsMacro(DatastoreSourceConstants.PROPERTY_SCHEMA)) { return; } Schema schema = getSchema(collector); if (schema != null) { validateSchema(schema, collector); validateFilters(schema, collector); validateKeyType(schema, collector); } } @VisibleForTesting void validateDatastoreConnection(FailureCollector collector) { if (!shouldConnect()) { return; } try { DatastoreUtil.getDatastoreV1(getServiceAccount(), isServiceAccountFilePath(), getProject()); } catch (DatastoreInitializationException e) { collector.addFailure(e.getMessage(), "Ensure properties like project, service account file path are correct.") .withConfigProperty(DatastoreSourceConstants.PROPERTY_SERVICE_FILE_PATH) .withConfigProperty(DatastoreSourceConstants.PROPERTY_PROJECT); } } private void validateKind(FailureCollector collector) { if (containsMacro(DatastoreSourceConstants.PROPERTY_KIND)) { return; } if (Strings.isNullOrEmpty(kind)) { collector.addFailure("Kind must be specified.", null) .withConfigProperty(DatastoreSourceConstants.PROPERTY_KIND); } } private void validateAncestor(FailureCollector collector) { if (!containsMacro(DatastoreSourceConstants.PROPERTY_ANCESTOR)) { getAncestor(collector); } } private void validateNumSplits(FailureCollector collector) { if (containsMacro(DatastoreSourceConstants.PROPERTY_NUM_SPLITS)) { return; } if (numSplits < 1) { collector.addFailure("Number of splits must be greater than 0", null) .withConfigProperty(DatastoreSourceConstants.PROPERTY_NUM_SPLITS); } } private void validateSchema(Schema schema, FailureCollector collector) { List<Schema.Field> fields = schema.getFields(); if (fields == null || fields.isEmpty()) { collector.addFailure("Source schema must contain at least one field", null) .withConfigProperty(DatastoreSourceConstants.PROPERTY_SCHEMA); } else { fields.forEach(f -> validateFieldSchema(f.getName(), f.getSchema(), collector)); } } /** * Validates given field schema to be compliant with Datastore types. * * @param fieldName field name * @param fieldSchema schema for CDAP field * @param collector failure collector to collect failures if schema contains unsupported type. */ private void validateFieldSchema(String fieldName, Schema fieldSchema, FailureCollector collector) { Schema.LogicalType logicalType = fieldSchema.getLogicalType(); if (logicalType != null) { if (!supportedLogicalTypes.contains(logicalType)) { collector.addFailure(String.format("Field '%s' is of unsupported type '%s'", fieldName, fieldSchema.getDisplayName()), "Supported types are: string, double, boolean, bytes, long, record, " + "array, union and timestamp.") .withOutputSchemaField(fieldName); return; } } switch (fieldSchema.getType()) { case STRING: case DOUBLE: case BOOLEAN: case BYTES: case LONG: case NULL: return; case RECORD: validateSchema(fieldSchema, collector); return; case ARRAY: if (fieldSchema.getComponentSchema() == null) { collector.addFailure(String.format("Field '%s' has no schema for array type", fieldName), "Ensure array component has schema.").withOutputSchemaField(fieldName); return; } Schema componentSchema = fieldSchema.getComponentSchema(); if (Schema.Type.ARRAY == componentSchema.getType()) { collector.addFailure(String.format("Field '%s' is of unsupported type array of array.", fieldName), "Ensure the field has valid type.") .withOutputSchemaField(fieldName); return; } validateFieldSchema(fieldName, componentSchema, collector); return; case UNION: fieldSchema.getUnionSchemas().forEach(unionSchema -> validateFieldSchema(fieldName, unionSchema, collector)); return; default: collector.addFailure(String.format("Field '%s' is of unsupported type '%s'", fieldName, fieldSchema.getDisplayName()), "Supported types are: string, double, boolean, bytes, long, record, " + "array, union and timestamp.") .withOutputSchemaField(fieldName); } } private void validateFilters(Schema schema, FailureCollector collector) { if (containsMacro(DatastoreSourceConstants.PROPERTY_FILTERS)) { return; } try { Map<String, String> filters = getFilters(); List<String> missingProperties = filters.keySet().stream() .filter(k -> schema.getField(k) == null) .collect(Collectors.toList()); for (String missingProperty : missingProperties) { collector.addFailure(String.format("Property '%s' does not exist in the schema.", missingProperty), "Change Property to be one of the schema fields.") .withConfigElement(DatastoreSourceConstants.PROPERTY_FILTERS, missingProperty + "|" + filters.get(missingProperty)); } } catch (IllegalArgumentException e) { // IllegalArgumentException is thrown from getFilters method. collector.addFailure(e.getMessage(), null) .withConfigProperty(DatastoreSourceConstants.PROPERTY_FILTERS); } } /** * Validates if key alias column is present in the schema and its type is {@link Schema.Type#STRING}. * * @param schema CDAP schema */ private void validateKeyType(Schema schema, FailureCollector collector) { if (containsMacro(DatastoreSourceConstants.PROPERTY_KEY_TYPE) || containsMacro(DatastoreSourceConstants.PROPERTY_KEY_ALIAS)) { return; } if (isIncludeKey(collector)) { String key = getKeyAlias(); Schema.Field field = schema.getField(key); if (field == null) { collector.addFailure(String.format("Key field '%s' does not exist in the schema.", key), "Change the Key field to be one of the schema fields.") .withConfigProperty(DatastoreSourceConstants.PROPERTY_KEY_ALIAS); return; } Schema fieldSchema = field.getSchema(); Schema.Type type = fieldSchema.getType(); if (Schema.Type.STRING != type) { fieldSchema = fieldSchema.isNullable() ? fieldSchema.getNonNullable() : fieldSchema; collector.addFailure(String.format("Key field '%s' is of unsupported type '%s'", key, fieldSchema.getDisplayName()), "Ensure the type is non-nullable String.") .withConfigProperty(DatastoreSourceConstants.PROPERTY_KEY_ALIAS).withOutputSchemaField(field.getName()); } } } /** * Constructs protobuf query instance which will be used for query splitting. * Adds ancestor and property filters if present in the given configuration. * * @param collector failure collector * @return protobuf query instance */ public com.google.datastore.v1.Query constructPbQuery(FailureCollector collector) { com.google.datastore.v1.Query.Builder builder = com.google.datastore.v1.Query.newBuilder() .addKind(KindExpression.newBuilder() .setName(getKind())); List<Filter> filters = getFilters().entrySet().stream() .map(e -> DatastoreHelper.makeFilter(e.getKey(), PropertyFilter.Operator.EQUAL, constructFilterValue(e.getKey(), e.getValue(), getSchema(collector))) .build()) .collect(Collectors.toList()); List<PathElement> ancestors = getAncestor(collector); if (!ancestors.isEmpty()) { filters.add(DatastoreHelper.makeAncestorFilter(constructKey(ancestors, getProject(), getNamespace())).build()); } if (!filters.isEmpty()) { builder.setFilter(DatastoreHelper.makeAndFilter(filters)); } return builder.build(); } /** * Constructs Datastore protobuf key instance based on given list of path elements * and Datastore configuration. * * @param pathElements list of path elements * @param project project ID * @param namespace namespace name * @return Datastore protobuf key instance */ private com.google.datastore.v1.Key constructKey(List<PathElement> pathElements, String project, String namespace) { Object[] elements = pathElements.stream() .flatMap(pathElement -> Stream.of(pathElement.getKind(), pathElement.getIdTypeCase() == PathElement.IdTypeCase.ID ? pathElement.getId() : pathElement.getName())) .toArray(); return DatastoreHelper.makeKey(elements) .setPartitionId(PartitionId.newBuilder() .setProjectId(project) .setNamespaceId(namespace) .build()) .build(); } /** * Transforms given value into value holder corresponding to the given field schema type. * If given value is empty, creates null value holder (`is null` clause). * * @param name field name * @param value filter value in string representation * @param schema field schema * @return protobuf value for filter */ private com.google.datastore.v1.Value constructFilterValue(String name, @Nullable String value, Schema schema) { Schema.Field field = Objects.requireNonNull(schema.getField(name)); Schema fieldSchema = field.getSchema(); if (Strings.isNullOrEmpty(value)) { return com.google.datastore.v1.Value.newBuilder() .setNullValue(com.google.protobuf.NullValue.NULL_VALUE) .build(); } return constructFilterValue(name, fieldSchema, value); } /** * Transforms given value into value holder corresponding to the given field schema type. * May call itself recursively of schema is of UNION type. * * @param name field name * @param schema field schema * @param value value in string representation * @return protobuf value for filter */ private com.google.datastore.v1.Value constructFilterValue(String name, Schema schema, String value) { Schema.LogicalType logicalType = schema.getLogicalType(); if (logicalType != null) { if (logicalType == Schema.LogicalType.TIMESTAMP_MICROS) { Timestamp timestamp = Timestamp.parseTimestamp(value); return Value.newBuilder() .setTimestampValue(timestamp.toProto()) .build(); } throw new IllegalStateException( String.format("Filter field '%s' is of unsupported type '%s'", name, logicalType.getToken())); } switch (schema.getType()) { case STRING: return DatastoreHelper.makeValue(value).build(); case DOUBLE: return DatastoreHelper.makeValue(Double.valueOf(value)).build(); case LONG: return DatastoreHelper.makeValue(Long.valueOf(value)).build(); case BOOLEAN: return DatastoreHelper.makeValue(Boolean.valueOf(value)).build(); case UNION: // nullable fields in CDAP are represented as UNION of NULL and FIELD_TYPE if (schema.isNullable()) { return constructFilterValue(name, schema.getNonNullable(), value); } throw new IllegalStateException( String.format("Filter field '%s' is of unsupported type 'complex UNION'", name)); default: throw new IllegalStateException( String.format("Filter field '%s' is of unsupported type '%s'", name, schema.getType())); } } @Override public String toString() { return "DatastoreSourceConfig{" + "referenceName='" + referenceName + '\'' + ", project='" + project + '\'' + ", serviceFilePath='" + serviceFilePath + '\'' + ", namespace='" + namespace + '\'' + ", kind='" + kind + '\'' + ", ancestor='" + ancestor + '\'' + ", filters='" + filters + '\'' + ", numSplits=" + numSplits + ", keyType='" + keyType + '\'' + ", keyAlias='" + keyAlias + '\'' + ", schema='" + schema + '\'' + "} "; } /** * Returns true if datastore can be connected to or schema is not a macro. */ boolean shouldConnect() { return !containsMacro(DatastoreSourceConstants.PROPERTY_SCHEMA) && !containsMacro(NAME_SERVICE_ACCOUNT_TYPE) && !containsMacro(DatastoreSourceConfig.NAME_SERVICE_ACCOUNT_FILE_PATH) && !containsMacro(DatastoreSourceConfig.NAME_SERVICE_ACCOUNT_JSON) && !containsMacro(DatastoreSourceConfig.NAME_PROJECT) && !containsMacro(DatastoreSourceConstants.PROPERTY_KIND) && !containsMacro(DatastoreSourceConstants.PROPERTY_NAMESPACE) && !containsMacro(DatastoreSourceConstants.PROPERTY_ANCESTOR) && !containsMacro(DatastoreSourceConstants.PROPERTY_KEY_TYPE) && !containsMacro(DatastoreSourceConstants.PROPERTY_KEY_ALIAS) && tryGetProject() != null && !autoServiceAccountUnavailable(); } }
data-integrations/google-cloud
src/main/java/io/cdap/plugin/gcp/datastore/source/DatastoreSourceConfig.java
Java
apache-2.0
22,996
/* * Copyright 2010 Helio Chissini de Castro <helio@collabora.co.uk> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "qfacebookgraphlink.h" QFacebookGraphLink::QFacebookGraphLink(QObject *parent) : QObject(parent) { m_id = QString::null; m_from = QStringList(); m_link = QUrl(); m_name = QString::null; m_caption = QString::null; m_description = QString::null; m_icon = QUrl(); m_picture = 0; m_message = QString::null; m_createdTime = QDateTime(); } QString QFacebookGraphLink::id() const { return m_id; } void QFacebookGraphLink::setId(const QString &id) { if( m_id != id ) m_id = id; } QStringList QFacebookGraphLink::from() const { return m_from; } void QFacebookGraphLink::setFrom(const QStringList &from) { if( m_from != from ) m_from = from; } QUrl QFacebookGraphLink::link() const { return m_link; } void QFacebookGraphLink::setLink(const QUrl &link) { if( m_link != link ) m_link = link; } QString QFacebookGraphLink::name() const { return m_name; } void QFacebookGraphLink::setName(const QString &name) { if( m_name != name ) m_name = name; } QString QFacebookGraphLink::caption() const { return m_caption; } void QFacebookGraphLink::setCaption(const QString &caption) { if( m_caption != caption ) m_caption = caption; } QString QFacebookGraphLink::description() const { return m_description; } void QFacebookGraphLink::setDescription(const QString &description) { if( m_description != description ) m_description = description; } QUrl QFacebookGraphLink::icon() const { return m_icon; } void QFacebookGraphLink::setIcon(const QUrl &icon) { if( m_icon != icon ) m_icon = icon; } QUrl QFacebookGraphLink::picture() const { return m_picture; } void QFacebookGraphLink::setPicture(const QUrl &picture) { if( m_picture != picture ) m_picture = picture; } QString QFacebookGraphLink::message() const { return m_message; } void QFacebookGraphLink::setMessage(const QString &message) { if( m_message != message ) m_message = message; } QDateTime QFacebookGraphLink::createdTime() const { return m_createdTime; } void QFacebookGraphLink::setCreatedtime(const QDateTime &createdTime) { if( m_createdTime != createdTime ) m_createdTime = createdTime; }
heliocastro/QFacebookGraph
src/graph/qfacebookgraphlink.cpp
C++
apache-2.0
2,870
/** * Copyright 2012 Twitter, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package parquet.thrift.struct; import org.apache.thrift.TBase; import org.codehaus.jackson.map.ObjectMapper; import parquet.thrift.ThriftSchemaConverter; import java.io.File; import java.io.IOException; import java.util.Arrays; import java.util.LinkedList; /** * Commandline runner for compatibility checker * * java CompatibilityRunner generate-json {category_name} {class_name} {dest_dir} * The above command will generate json representaion of thrift schema and store it as {dest_dir}/{category_name}.json * * java CompatibilityRunner compare-json {old_json_path} {new_json_path} * The above command will succeed when the new schema is compatible with the old schema. * It will fail when they are not compatible. For compatibility rules: {@link CompatibilityChecker} * * @author Tianshuo Deng */ public class CompatibilityRunner { public static void main(String[] args) throws Exception { LinkedList<String> arguments = new LinkedList<String>(Arrays.asList(args)); String operator = arguments.pollFirst(); if (operator.equals("generate-json")) { //java CompatibilityRunner generate-json tfe_request com.twitter.logs.TfeRequestLog old_json/ generateJson(arguments); } if (operator.equals("compare-json")) { compareJson(arguments); } } private static void compareJson(LinkedList<String> arguments) throws IOException { String oldJsonPath = arguments.pollFirst(); String newJsonPath = arguments.pollFirst(); File oldJsonFile = new File(oldJsonPath); checkExist(oldJsonFile); File newJsonFile = new File(newJsonPath); checkExist(newJsonFile); ObjectMapper mapper = new ObjectMapper(); ThriftType.StructType oldStruct = mapper.readValue(oldJsonFile, ThriftType.StructType.class); ThriftType.StructType newStruct = mapper.readValue(newJsonFile, ThriftType.StructType.class); CompatibilityReport report = new CompatibilityChecker().checkCompatibility(oldStruct, newStruct); if (!report.isCompatible) { System.err.println("schema not compatible"); System.err.println(report.getMessages()); System.exit(1); } System.out.println("[success] schema is compatible"); } private static void checkExist(File f) { if (!f.exists()) throw new RuntimeException("can not find file " + f); } private static void generateJson(LinkedList<String> arguments) throws ClassNotFoundException, IOException { String catName = arguments.pollFirst(); String className = arguments.pollFirst(); String storedPath = arguments.pollFirst(); File storeDir = new File(storedPath); ThriftType.StructType structType = new ThriftSchemaConverter().toStructType((Class<? extends TBase<?, ?>>) Class.forName(className)); ObjectMapper mapper = new ObjectMapper(); String fileName = catName + ".json"; mapper.writeValue(new File(storeDir, fileName), structType); } }
cloudera/parquet-mr
parquet-thrift/src/main/java/parquet/thrift/struct/CompatibilityRunner.java
Java
apache-2.0
3,513