repo_name
stringlengths
5
108
path
stringlengths
6
333
size
stringlengths
1
6
content
stringlengths
4
977k
license
stringclasses
15 values
rxin/jvm-unsafe-utils
core/src/test/java/com/databricks/unsafe/util/TestIntArray.java
1470
/* * Copyright 2014 Databricks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.databricks.unsafe.util; import org.junit.Assert; import org.junit.Test; import com.databricks.unsafe.util.memory.MemoryBlock; public class TestIntArray { private IntArray createTestData() { byte[] bytes = new byte[16]; IntArray arr = new IntArray(MemoryBlock.fromByteArray(bytes)); arr.set(0, 1); arr.set(1, 2); arr.set(2, 3); arr.set(3, 4); arr.set(3, 5); return arr; } @Test public void basicTest() { IntArray arr = createTestData(); Assert.assertEquals(4, arr.size()); Assert.assertEquals(1, arr.get(0)); Assert.assertEquals(2, arr.get(1)); Assert.assertEquals(3, arr.get(2)); Assert.assertEquals(5, arr.get(3)); } @Test public void toJvmArray() { IntArray arr = createTestData(); int[] expected = {1, 2, 3, 5}; Assert.assertArrayEquals(expected, arr.toJvmArray()); } }
apache-2.0
Q115/Goalie_Android
app/src/main/java/com/github/q115/goalie_android/models/User.java
2271
package com.github.q115.goalie_android.models; import android.graphics.Bitmap; import com.github.q115.goalie_android.MainDB; import com.raizlabs.android.dbflow.annotation.Column; import com.raizlabs.android.dbflow.annotation.ColumnIgnore; import com.raizlabs.android.dbflow.annotation.PrimaryKey; import com.raizlabs.android.dbflow.annotation.Table; import com.raizlabs.android.dbflow.structure.BaseModel; import java.util.HashMap; /* * Copyright 2017 Qi Li * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ @Table(database = MainDB.class) public class User extends BaseModel { @Column @PrimaryKey public String username; @Column public String bio; @Column public long lastPhotoModifiedTime; @Column public long reputation; @ColumnIgnore public Bitmap profileBitmapImage; @ColumnIgnore public final HashMap<String, Goal> activeGoals; // username -> goal @ColumnIgnore public HashMap<String, Goal> finishedGoals; // username -> goal public User() { username = ""; bio = ""; reputation = 100; lastPhotoModifiedTime = 0; profileBitmapImage = null; activeGoals = new HashMap<>(); finishedGoals = new HashMap<>(); } public User(String username) { this(); this.username = username; } public User(String username, long reputation) { this(); this.username = username; this.reputation = reputation; } public User(String username, String bio, long reputation, long lastPhotoModifiedTime) { this(); this.username = username; this.reputation = reputation; this.bio = bio; this.lastPhotoModifiedTime = lastPhotoModifiedTime; } }
apache-2.0
leedehua/DataMS
src/com/action/UserResetPwd.java
1497
package com.action; import java.io.IOException; import javax.servlet.ServletException; import javax.servlet.annotation.WebServlet; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import com.dao.UserDao; /** * @author dehua * 更改密码Servlet */ @WebServlet("/UserResetPwd") public class UserResetPwd extends HttpServlet { private static final long serialVersionUID = 1L; /** * @see HttpServlet#HttpServlet() */ public UserResetPwd() { super(); } /** * @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse response) */ protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { response.setCharacterEncoding("utf-8"); String username = (String) request.getSession().getAttribute("user"); String pwd = request.getParameter("resetpwd_pwd"); int res=0; UserDao ud=new UserDao(); res=ud.resetPassword(pwd, username); if(res==1){ response.getWriter().print("密码更改成功"); }else{ response.getWriter().print("密码更改失败!原因:未知"); } } /** * @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse response) */ protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { doGet(request, response); } }
apache-2.0
LotteryOne/tools
src/akka/agent/Counter.java
727
package four; import akka.actor.UntypedActor; import akka.dispatch.Mapper; import scala.concurrent.Future; /** * Created by BlueSky on 2/16/2017. */ public class CounterActor extends UntypedActor { Mapper addMapper = new Mapper<Integer, Integer>() { @Override public Integer apply(Integer parameter) { return parameter + 1; } }; @Override public void onReceive(Object message) throws Exception { if (message instanceof Integer) { for (int i = 0; i < 10000; i++) { Future f = AgentActor.countAgent.alter(addMapper); AgentActor.futures.add(f); } getContext().stop(getSelf()); } } }
apache-2.0
milg0/onvif-java-lib
src/org/onvif/ver10/schema/PTZCapabilities.java
3590
// // Diese Datei wurde mit der JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.5-2 generiert // Siehe <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // �nderungen an dieser Datei gehen bei einer Neukompilierung des Quellschemas verloren. // Generiert: 2014.02.04 um 12:22:03 PM CET // package org.onvif.ver10.schema; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAnyAttribute; import javax.xml.bind.annotation.XmlAnyElement; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlSchemaType; import javax.xml.bind.annotation.XmlType; import javax.xml.namespace.QName; import org.w3c.dom.Element; /** * <p> * Java-Klasse f�r PTZCapabilities complex type. * * <p> * Das folgende Schemafragment gibt den erwarteten Content an, der in dieser Klasse enthalten ist. * * <pre> * <complexType name="PTZCapabilities"> * <complexContent> * <restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * <sequence> * <element name="XAddr" type="{http://www.w3.org/2001/XMLSchema}anyURI"/> * <any processContents='lax' maxOccurs="unbounded" minOccurs="0"/> * </sequence> * <anyAttribute processContents='lax'/> * </restriction> * </complexContent> * </complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "PTZCapabilities", propOrder = { "xAddr", "any" }) public class PTZCapabilities { @XmlElement(name = "XAddr", required = true) @XmlSchemaType(name = "anyURI") protected String xAddr; @XmlAnyElement(lax = true) protected List<java.lang.Object> any; @XmlAnyAttribute private Map<QName, String> otherAttributes = new HashMap<QName, String>(); /** * Ruft den Wert der xAddr-Eigenschaft ab. * * @return possible object is {@link String } * */ public String getXAddr() { return xAddr; } /** * Legt den Wert der xAddr-Eigenschaft fest. * * @param value * allowed object is {@link String } * */ public void setXAddr(String value) { this.xAddr = value; } /** * Gets the value of the any property. * * <p> * This accessor method returns a reference to the live list, not a snapshot. Therefore any modification you make to the returned list will be present inside the JAXB object. * This is why there is not a <CODE>set</CODE> method for the any property. * * <p> * For example, to add a new item, do as follows: * * <pre> * getAny().add(newItem); * </pre> * * * <p> * Objects of the following type(s) are allowed in the list {@link Element } {@link java.lang.Object } * * */ public List<java.lang.Object> getAny() { if (any == null) { any = new ArrayList<java.lang.Object>(); } return this.any; } /** * Gets a map that contains attributes that aren't bound to any typed property on this class. * * <p> * the map is keyed by the name of the attribute and the value is the string value of the attribute. * * the map returned by this method is live, and you can add new attribute by updating the map directly. Because of this design, there's no setter. * * * @return always non-null */ public Map<QName, String> getOtherAttributes() { return otherAttributes; } }
apache-2.0
GitHubAFeng/AFengAndroid
app/src/main/java/com/afeng/xf/ui/movie/HotMovieBean.java
1079
package com.afeng.xf.ui.movie; /** * Created by Administrator on 2017/5/3. */ import java.io.Serializable; import java.util.List; /** * Created by jingbin on 2016/11/25. */ public class HotMovieBean implements Serializable { private static final long serialVersionUID = 8192290954083952985L; private int count; private int start; private int total; private String title; private List<SubjectsBean> subjects; public int getCount() { return count; } public int getStart() { return start; } public int getTotal() { return total; } public String getTitle() { return title; } public List<SubjectsBean> getSubjects() { return subjects; } public void setStart(int start) { this.start = start; } public void setTotal(int total) { this.total = total; } public void setTitle(String title) { this.title = title; } public void setSubjects(List<SubjectsBean> subjects) { this.subjects = subjects; } }
apache-2.0
helloyide/aris-connect-config-toolset
src/com/piapox/idea/acct/view/annotator/fix/AddInstanceConfigFile.java
2513
package com.piapox.idea.acct.view.annotator.fix; import com.intellij.codeInsight.intention.impl.BaseIntentionAction; import com.intellij.openapi.editor.Editor; import com.intellij.openapi.project.Project; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.PsiFile; import com.intellij.psi.PsiManager; import com.intellij.util.IncorrectOperationException; import com.intellij.util.PsiNavigateUtil; import com.piapox.idea.acct.util.FileHelper; import org.jetbrains.annotations.Nls; import org.jetbrains.annotations.NotNull; import java.io.IOException; public class AddInstanceConfigFile extends BaseIntentionAction { private Project project; private String configSetName; private String componentType; private String instanceId; private VirtualFile defaultInstanceConfigFile; public AddInstanceConfigFile(Project project, String configSetName, String componentType, String instanceId, VirtualFile defaultInstanceConfigFile) { this.project = project; this.configSetName = configSetName; this.componentType = componentType; this.instanceId = instanceId; this.defaultInstanceConfigFile = defaultInstanceConfigFile; } @Nls @NotNull @Override public String getFamilyName() { return "AddInstanceConfigFile"; } @NotNull @Override public String getText() { return "Create an new instance configuration based on the default configuration."; } @Override public boolean isAvailable(@NotNull Project project, Editor editor, PsiFile file) { return true; } @Override public void invoke(@NotNull Project project, Editor editor, PsiFile file) throws IncorrectOperationException { FileHelper.runWriteAction(()->{ VirtualFile parent = defaultInstanceConfigFile.getParent(); String newInstanceConfigFileName = instanceId + ".xml"; try { defaultInstanceConfigFile.copy(AddInstanceConfigFile.this, parent, newInstanceConfigFileName); VirtualFile newInstanceConfigFile = parent.findChild(newInstanceConfigFileName); if (newInstanceConfigFile != null) { PsiFile psiFile = PsiManager.getInstance(project).findFile(newInstanceConfigFile); PsiNavigateUtil.navigate(psiFile); } } catch (IOException e) { e.printStackTrace(); } }, project, file.getVirtualFile()); } }
apache-2.0
googleapis/java-vision
proto-google-cloud-vision-v1p3beta1/src/main/java/com/google/cloud/vision/v1p3beta1/ImportProductSetsResponseOrBuilder.java
4814
/* * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/cloud/vision/v1p3beta1/product_search_service.proto package com.google.cloud.vision.v1p3beta1; public interface ImportProductSetsResponseOrBuilder extends // @@protoc_insertion_point(interface_extends:google.cloud.vision.v1p3beta1.ImportProductSetsResponse) com.google.protobuf.MessageOrBuilder { /** * * * <pre> * The list of reference_images that are imported successfully. * </pre> * * <code>repeated .google.cloud.vision.v1p3beta1.ReferenceImage reference_images = 1;</code> */ java.util.List<com.google.cloud.vision.v1p3beta1.ReferenceImage> getReferenceImagesList(); /** * * * <pre> * The list of reference_images that are imported successfully. * </pre> * * <code>repeated .google.cloud.vision.v1p3beta1.ReferenceImage reference_images = 1;</code> */ com.google.cloud.vision.v1p3beta1.ReferenceImage getReferenceImages(int index); /** * * * <pre> * The list of reference_images that are imported successfully. * </pre> * * <code>repeated .google.cloud.vision.v1p3beta1.ReferenceImage reference_images = 1;</code> */ int getReferenceImagesCount(); /** * * * <pre> * The list of reference_images that are imported successfully. * </pre> * * <code>repeated .google.cloud.vision.v1p3beta1.ReferenceImage reference_images = 1;</code> */ java.util.List<? extends com.google.cloud.vision.v1p3beta1.ReferenceImageOrBuilder> getReferenceImagesOrBuilderList(); /** * * * <pre> * The list of reference_images that are imported successfully. * </pre> * * <code>repeated .google.cloud.vision.v1p3beta1.ReferenceImage reference_images = 1;</code> */ com.google.cloud.vision.v1p3beta1.ReferenceImageOrBuilder getReferenceImagesOrBuilder(int index); /** * * * <pre> * The rpc status for each ImportProductSet request, including both successes * and errors. * The number of statuses here matches the number of lines in the csv file, * and statuses[i] stores the success or failure status of processing the i-th * line of the csv, starting from line 0. * </pre> * * <code>repeated .google.rpc.Status statuses = 2;</code> */ java.util.List<com.google.rpc.Status> getStatusesList(); /** * * * <pre> * The rpc status for each ImportProductSet request, including both successes * and errors. * The number of statuses here matches the number of lines in the csv file, * and statuses[i] stores the success or failure status of processing the i-th * line of the csv, starting from line 0. * </pre> * * <code>repeated .google.rpc.Status statuses = 2;</code> */ com.google.rpc.Status getStatuses(int index); /** * * * <pre> * The rpc status for each ImportProductSet request, including both successes * and errors. * The number of statuses here matches the number of lines in the csv file, * and statuses[i] stores the success or failure status of processing the i-th * line of the csv, starting from line 0. * </pre> * * <code>repeated .google.rpc.Status statuses = 2;</code> */ int getStatusesCount(); /** * * * <pre> * The rpc status for each ImportProductSet request, including both successes * and errors. * The number of statuses here matches the number of lines in the csv file, * and statuses[i] stores the success or failure status of processing the i-th * line of the csv, starting from line 0. * </pre> * * <code>repeated .google.rpc.Status statuses = 2;</code> */ java.util.List<? extends com.google.rpc.StatusOrBuilder> getStatusesOrBuilderList(); /** * * * <pre> * The rpc status for each ImportProductSet request, including both successes * and errors. * The number of statuses here matches the number of lines in the csv file, * and statuses[i] stores the success or failure status of processing the i-th * line of the csv, starting from line 0. * </pre> * * <code>repeated .google.rpc.Status statuses = 2;</code> */ com.google.rpc.StatusOrBuilder getStatusesOrBuilder(int index); }
apache-2.0
ywendy/wendy-parent
wendy-structures-algorithm/src/main/java/com/wendy/algorithm/simple/ModuloOperation.java
454
package com.wendy.algorithm.simple; /** * 取模运算 a mod b = a%b a%b 运算步骤: c = a/b(取余数); a%b = a - c*b; * * @author tony * */ public class ModuloOperation { public static long mod(long a, long b) { long c = a / b; return a - c * b; } public static void main(String[] args) { long a = 9, b = 5; System.out.println(" a%b = " + a % b); System.out.println(" a mod b = " + mod(a, b)); } }
apache-2.0
vergilchiu/hive
metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventUtils.java
9722
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.hadoop.hive.metastore.messaging; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.thrift.TException; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; public class EventUtils { /** * Utility function that constructs a notification filter to match a given db name and/or table name. * If dbName == null, fetches all warehouse events. * If dnName != null, but tableName == null, fetches all events for the db * If dbName != null && tableName != null, fetches all events for the specified table * @param dbName * @param tableName * @return */ public static IMetaStoreClient.NotificationFilter getDbTblNotificationFilter(final String dbName, final String tableName){ return new IMetaStoreClient.NotificationFilter() { @Override public boolean accept(NotificationEvent event) { if (event == null){ return false; // get rid of trivial case first, so that we can safely assume non-null } if (dbName == null){ return true; // if our dbName is null, we're interested in all wh events } if (dbName.equalsIgnoreCase(event.getDbName())){ if ( (tableName == null) // if our dbName is equal, but tableName is blank, we're interested in this db-level event || (tableName.equalsIgnoreCase(event.getTableName())) // table level event that matches us ){ return true; } } return false; } }; } public static IMetaStoreClient.NotificationFilter restrictByMessageFormat(final String messageFormat){ return new IMetaStoreClient.NotificationFilter() { @Override public boolean accept(NotificationEvent event) { if (event == null){ return false; // get rid of trivial case first, so that we can safely assume non-null } if (messageFormat == null){ return true; // let's say that passing null in will not do any filtering. } if (messageFormat.equalsIgnoreCase(event.getMessageFormat())){ return true; } return false; } }; } public static IMetaStoreClient.NotificationFilter getEventBoundaryFilter(final Long eventFrom, final Long eventTo){ return new IMetaStoreClient.NotificationFilter() { @Override public boolean accept(NotificationEvent event) { if ( (event == null) || (event.getEventId() < eventFrom) || (event.getEventId() > eventTo)) { return false; } return true; } }; } public static IMetaStoreClient.NotificationFilter andFilter( final IMetaStoreClient.NotificationFilter... filters ) { return new IMetaStoreClient.NotificationFilter() { @Override public boolean accept(NotificationEvent event) { for (IMetaStoreClient.NotificationFilter filter : filters){ if (!filter.accept(event)){ return false; } } return true; } }; } public interface NotificationFetcher { public int getBatchSize() throws IOException; public long getCurrentNotificationEventId() throws IOException; public List<NotificationEvent> getNextNotificationEvents( long pos, IMetaStoreClient.NotificationFilter filter) throws IOException; } // MetaStoreClient-based impl of NotificationFetcher public static class MSClientNotificationFetcher implements NotificationFetcher{ private IMetaStoreClient msc = null; private Integer batchSize = null; public MSClientNotificationFetcher(IMetaStoreClient msc){ this.msc = msc; } @Override public int getBatchSize() throws IOException { if (batchSize == null){ try { batchSize = Integer.parseInt( msc.getConfigValue(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX.varname, "50")); // TODO: we're asking the metastore what its configuration for this var is - we may // want to revisit to pull from client side instead. The reason I have it this way // is because the metastore is more likely to have a reasonable config for this than // an arbitrary client. } catch (TException e) { throw new IOException(e); } } return batchSize; } @Override public long getCurrentNotificationEventId() throws IOException { try { return msc.getCurrentNotificationEventId().getEventId(); } catch (TException e) { throw new IOException(e); } } @Override public List<NotificationEvent> getNextNotificationEvents( long pos, IMetaStoreClient.NotificationFilter filter) throws IOException { try { return msc.getNextNotification(pos,getBatchSize(), filter).getEvents(); } catch (TException e) { throw new IOException(e); } } } public static class NotificationEventIterator implements Iterator<NotificationEvent> { private NotificationFetcher nfetcher; private IMetaStoreClient.NotificationFilter filter; private int maxEvents; private Iterator<NotificationEvent> batchIter = null; private List<NotificationEvent> batch = null; private long pos; private long maxPos; private int eventCount; public NotificationEventIterator( NotificationFetcher nfetcher, long eventFrom, int maxEvents, String dbName, String tableName) throws IOException { init(nfetcher, eventFrom, maxEvents, EventUtils.getDbTblNotificationFilter(dbName, tableName)); // using init(..) instead of this(..) because the EventUtils.getDbTblNotificationFilter // is an operation that needs to run before delegating to the other ctor, and this messes up chaining // ctors } public NotificationEventIterator( NotificationFetcher nfetcher, long eventFrom, int maxEvents, IMetaStoreClient.NotificationFilter filter) throws IOException { init(nfetcher,eventFrom,maxEvents,filter); } private void init( NotificationFetcher nfetcher, long eventFrom, int maxEvents, IMetaStoreClient.NotificationFilter filter) throws IOException { this.nfetcher = nfetcher; this.filter = filter; this.pos = eventFrom; if (maxEvents < 1){ // 0 or -1 implies fetch everything this.maxEvents = Integer.MAX_VALUE; } else { this.maxEvents = maxEvents; } this.eventCount = 0; this.maxPos = nfetcher.getCurrentNotificationEventId(); } private void fetchNextBatch() throws IOException { batch = nfetcher.getNextNotificationEvents(pos, filter); int batchSize = nfetcher.getBatchSize(); while ( ((batch == null) || (batch.isEmpty())) && (pos < maxPos) ){ // no valid events this batch, but we're still not done processing events pos += batchSize; batch = nfetcher.getNextNotificationEvents(pos,filter); } if (batch == null){ batch = new ArrayList<NotificationEvent>(); // instantiate empty list so that we don't error out on iterator fetching. // If we're here, then the next check of pos will show our caller that // that we've exhausted our event supply } batchIter = batch.iterator(); } @Override public boolean hasNext() { if (eventCount >= maxEvents){ // If we've already satisfied the number of events we were supposed to deliver, we end it. return false; } if ((batchIter != null) && (batchIter.hasNext())){ // If we have a valid batchIter and it has more elements, return them. return true; } // If we're here, we want more events, and either batchIter is null, or batchIter // has reached the end of the current batch. Let's fetch the next batch. try { fetchNextBatch(); } catch (IOException e) { // Regrettable that we have to wrap the IOException into a RuntimeException, // but throwing the exception is the appropriate result here, and hasNext() // signature will only allow RuntimeExceptions. Iterator.hasNext() really // should have allowed IOExceptions throw new RuntimeException(e); } // New batch has been fetched. If it's not empty, we have more elements to process. return !batch.isEmpty(); } @Override public NotificationEvent next() { eventCount++; NotificationEvent ev = batchIter.next(); pos = ev.getEventId(); return ev; } @Override public void remove() { throw new UnsupportedOperationException("remove() not supported on NotificationEventIterator"); } } }
apache-2.0
xiaguangme/simon_ide_tools
02.eclipse_enhance/org.eclipse.ui.workbench.texteditor/src/org/eclipse/ui/internal/texteditor/CompoundEditExitStrategy.java
8619
/******************************************************************************* * Copyright (c) 2005, 2008 IBM Corporation and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Public License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/legal/epl-v10.html * * Contributors: * IBM Corporation - initial API and implementation *******************************************************************************/ package org.eclipse.ui.internal.texteditor; import org.eclipse.swt.SWT; import org.eclipse.swt.custom.StyledText; import org.eclipse.swt.custom.VerifyKeyListener; import org.eclipse.swt.events.FocusEvent; import org.eclipse.swt.events.FocusListener; import org.eclipse.swt.events.MouseEvent; import org.eclipse.swt.events.MouseListener; import org.eclipse.swt.events.VerifyEvent; import org.eclipse.core.commands.ExecutionEvent; import org.eclipse.core.commands.ExecutionException; import org.eclipse.core.commands.IExecutionListener; import org.eclipse.core.commands.NotHandledException; import org.eclipse.core.runtime.IStatus; import org.eclipse.core.runtime.ListenerList; import org.eclipse.core.runtime.Status; import org.eclipse.jface.text.ITextViewer; import org.eclipse.ui.PlatformUI; import org.eclipse.ui.commands.ICommandService; /** * Exit strategy for commands that want to fold repeated execution into one compound edit. See * {@link org.eclipse.jface.text.IRewriteTarget#endCompoundChange() IRewriteTarget.endCompoundChange}. * As long as a strategy is installed on an {@link ITextViewer}, it will detect the end of a * compound operation when any of the following conditions becomes true: * <ul> * <li>the viewer's text widget loses the keyboard focus</li> * <li>the mouse is clicked or double clicked inside the viewer's widget</li> * <li>a command other than the ones specified is executed</li> * <li>the viewer receives any key events that are not modifier combinations</li> * </ul> * <p> * If the end of a compound edit is detected, any registered {@link ICompoundEditListener}s are * notified and the strategy is disarmed (spring-loaded). * </p> * * @since 3.1 */ public final class CompoundEditExitStrategy { /** * Listens for events that may trigger the end of a compound edit. */ private final class EventListener implements MouseListener, FocusListener, VerifyKeyListener, IExecutionListener { /* * @see org.eclipse.swt.events.MouseListener#mouseDoubleClick(org.eclipse.swt.events.MouseEvent) */ public void mouseDoubleClick(MouseEvent e) { // mouse actions end the compound change fireEndCompoundEdit(); } /* * @see org.eclipse.swt.events.MouseListener#mouseDown(org.eclipse.swt.events.MouseEvent) */ public void mouseDown(MouseEvent e) { // mouse actions end the compound change fireEndCompoundEdit(); } public void mouseUp(MouseEvent e) {} public void focusGained(FocusEvent e) {} /* * @see org.eclipse.swt.events.FocusListener#focusLost(org.eclipse.swt.events.FocusEvent) */ public void focusLost(FocusEvent e) { // losing focus ends the change fireEndCompoundEdit(); } public void notHandled(String commandId, NotHandledException exception) {} public void postExecuteFailure(String commandId, ExecutionException exception) {} public void postExecuteSuccess(String commandId, Object returnValue) {} /* * @see org.eclipse.core.commands.IExecutionListener#preExecute(java.lang.String, org.eclipse.core.commands.ExecutionEvent) */ public void preExecute(String commandId, ExecutionEvent event) { // any command other than the known ones end the compound change for (int i= 0; i < fCommandIds.length; i++) { if (commandId.equals(fCommandIds[i])) return; } fireEndCompoundEdit(); } /* * @see org.eclipse.swt.custom.VerifyKeyListener#verifyKey(org.eclipse.swt.events.VerifyEvent) */ public void verifyKey(VerifyEvent event) { // any key press that is not a modifier combo ends the compound change final int maskWithoutShift= SWT.MODIFIER_MASK & ~SWT.SHIFT; if ((event.keyCode & SWT.MODIFIER_MASK) == 0 && (event.stateMask & maskWithoutShift) == 0) fireEndCompoundEdit(); } } private final String[] fCommandIds; private final EventListener fEventListener= new EventListener(); private final ListenerList fListenerList= new ListenerList(ListenerList.IDENTITY); private ITextViewer fViewer; private StyledText fWidgetEventSource; /** * Creates a new strategy, equivalent to calling * {@linkplain #CompoundEditExitStrategy(String[]) CompoundEditExitStrategy(new String[] &#x7b; commandId &#x7d;)}. * * @param commandId the command id of the repeatable command */ public CompoundEditExitStrategy(String commandId) { if (commandId == null) throw new NullPointerException("commandId"); //$NON-NLS-1$ fCommandIds= new String[] {commandId}; } /** * Creates a new strategy, ending upon execution of any command other than the ones * specified. * * @param commandIds the ids of the repeatable commands */ public CompoundEditExitStrategy(String[] commandIds) { for (int i= 0; i < commandIds.length; i++) { if (commandIds[i] == null) throw new NullPointerException("commandIds[" + i + "]"); //$NON-NLS-1$ //$NON-NLS-2$ } fCommandIds= new String[commandIds.length]; System.arraycopy(commandIds, 0, fCommandIds, 0, commandIds.length); } /** * Installs the receiver on <code>viewer</code> and arms it. After this call returns, any * registered listeners will be notified if a compound edit ends. * * @param viewer the viewer to install on */ public void arm(ITextViewer viewer) { disarm(); if (viewer == null) throw new NullPointerException("editor"); //$NON-NLS-1$ fViewer= viewer; addListeners(fViewer); } /** * Disarms the receiver. After this call returns, any registered listeners will be not be * notified any more until <code>install</code> is called again. Note that the listeners are * not removed. * <p> * Note that the receiver is automatically disarmed when the end of a compound edit has * been detected and before the listeners are notified. * </p> */ public void disarm() { if (isInstalled()) { removeListeners(); fViewer= null; } } private void addListeners(ITextViewer viewer) { fWidgetEventSource= viewer.getTextWidget(); if (fWidgetEventSource != null) { fWidgetEventSource.addVerifyKeyListener(fEventListener); fWidgetEventSource.addMouseListener(fEventListener); fWidgetEventSource.addFocusListener(fEventListener); } ICommandService commandService= (ICommandService)PlatformUI.getWorkbench().getAdapter(ICommandService.class); if (commandService != null) commandService.addExecutionListener(fEventListener); } private void removeListeners() { ICommandService commandService= (ICommandService)PlatformUI.getWorkbench().getAdapter(ICommandService.class); if (commandService != null) commandService.removeExecutionListener(fEventListener); if (fWidgetEventSource != null) { fWidgetEventSource.removeFocusListener(fEventListener); fWidgetEventSource.removeMouseListener(fEventListener); fWidgetEventSource.removeVerifyKeyListener(fEventListener); fWidgetEventSource= null; } } private boolean isInstalled() { return fViewer != null; } private void fireEndCompoundEdit() { disarm(); Object[] listeners= fListenerList.getListeners(); for (int i= 0; i < listeners.length; i++) { ICompoundEditListener listener= (ICompoundEditListener) listeners[i]; try { listener.endCompoundEdit(); } catch (Exception e) { IStatus status= new Status(IStatus.ERROR, TextEditorPlugin.PLUGIN_ID, IStatus.OK, "listener notification failed", e); //$NON-NLS-1$ TextEditorPlugin.getDefault().getLog().log(status); } } } /** * Adds a compound edit listener. Multiple registration is possible. Note that the receiver is * automatically disarmed before the listeners are notified. * * @param listener the new listener */ public void addCompoundListener(ICompoundEditListener listener) { fListenerList.add(listener); } /** * Removes a compound edit listener. If <code>listener</code> is registered multiple times, an * arbitrary instance is removed. If <code>listener</code> is not currently registered, * nothing happens. * * @param listener the listener to be removed. */ public void removeCompoundListener(ICompoundEditListener listener) { fListenerList.remove(listener); } }
apache-2.0
spring-projects/spring-framework
spring-tx/src/main/java/org/springframework/transaction/reactive/TransactionalOperator.java
4898
/* * Copyright 2002-2020 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.transaction.reactive; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import org.springframework.transaction.ReactiveTransactionManager; import org.springframework.transaction.TransactionDefinition; import org.springframework.transaction.TransactionException; /** * Operator class that simplifies programmatic transaction demarcation and * transaction exception handling. * * <p>The central method is {@link #transactional}, supporting transactional wrapping * of functional sequences code that. This operator handles the transaction lifecycle * and possible exceptions such that neither the ReactiveTransactionCallback * implementation nor the calling code needs to explicitly handle transactions. * * <p>Typical usage: Allows for writing low-level data access objects that use * resources such as database connections but are not transaction-aware themselves. * Instead, they can implicitly participate in transactions handled by higher-level * application services utilizing this class, making calls to the low-level * services via an inner-class callback object. * * <p><strong>Note:</strong> Transactional Publishers should avoid Subscription * cancellation. See the * <a href="https://docs.spring.io/spring/docs/current/spring-framework-reference/data-access.html#tx-prog-operator-cancel">Cancel Signals</a> * section of the Spring Framework reference for more details. * * @author Mark Paluch * @author Juergen Hoeller * @since 5.2 * @see #execute * @see ReactiveTransactionManager */ public interface TransactionalOperator { /** * Wrap the functional sequence specified by the given Flux within a transaction. * @param flux the Flux that should be executed within the transaction * @return a result publisher returned by the callback, or {@code null} if none * @throws TransactionException in case of initialization, rollback, or system errors * @throws RuntimeException if thrown by the TransactionCallback */ default <T> Flux<T> transactional(Flux<T> flux) { return execute(it -> flux); } /** * Wrap the functional sequence specified by the given Mono within a transaction. * @param mono the Mono that should be executed within the transaction * @return a result publisher returned by the callback * @throws TransactionException in case of initialization, rollback, or system errors * @throws RuntimeException if thrown by the TransactionCallback */ <T> Mono<T> transactional(Mono<T> mono); /** * Execute the action specified by the given callback object within a transaction. * <p>Allows for returning a result object created within the transaction, that is, * a domain object or a collection of domain objects. A RuntimeException thrown * by the callback is treated as a fatal exception that enforces a rollback. * Such an exception gets propagated to the caller of the template. * @param action the callback object that specifies the transactional action * @return a result object returned by the callback * @throws TransactionException in case of initialization, rollback, or system errors * @throws RuntimeException if thrown by the TransactionCallback */ <T> Flux<T> execute(TransactionCallback<T> action) throws TransactionException; // Static builder methods /** * Create a new {@link TransactionalOperator} using {@link ReactiveTransactionManager}, * using a default transaction. * @param transactionManager the transaction management strategy to be used * @return the transactional operator */ static TransactionalOperator create(ReactiveTransactionManager transactionManager){ return create(transactionManager, TransactionDefinition.withDefaults()); } /** * Create a new {@link TransactionalOperator} using {@link ReactiveTransactionManager} * and {@link TransactionDefinition}. * @param transactionManager the transaction management strategy to be used * @param transactionDefinition the transaction definition to apply * @return the transactional operator */ static TransactionalOperator create( ReactiveTransactionManager transactionManager, TransactionDefinition transactionDefinition){ return new TransactionalOperatorImpl(transactionManager, transactionDefinition); } }
apache-2.0
guestful/module.jaxrs-http-patch
src/main/java/com/guestful/jaxrs/patch/PATCH.java
1435
/** * Copyright (C) 2013 Guestful (info@guestful.com) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.guestful.jaxrs.patch; import javax.ws.rs.HttpMethod; import javax.ws.rs.NameBinding; import java.lang.annotation.*; /** * HTTP "PATCH" method annotation. * <p> * The annotation acts at the same time as JAX-RS filter/interceptor {@link javax.ws.rs.NameBinding "named binder"}, * i.e. it can be applied to custom filter/interceptor that implements the PATCH support and JAX-RS runtime will * take care of automatically associating the filter/interceptor with the {@code &#64;PATCH}-annotated resource method. * </p> * * @author Gerard Davison (gerard.davison at oracle.com) * @author Marek Potociar (marek.potociar at oracle.com) */ @Target({ElementType.METHOD, ElementType.TYPE}) @Retention(RetentionPolicy.RUNTIME) @HttpMethod("PATCH") @Documented @NameBinding public @interface PATCH { }
apache-2.0
hortonworks/cloudbreak
template-manager-cmtemplate/src/main/java/com/sequenceiq/cloudbreak/cmtemplate/configproviders/solr/SolrRoles.java
233
package com.sequenceiq.cloudbreak.cmtemplate.configproviders.solr; public class SolrRoles { public static final String SOLR = "SOLR"; public static final String SOLR_SERVER = "SOLR_SERVER"; private SolrRoles() { } }
apache-2.0
GaloisInc/JavaFE
Javafe/java/javafe/reader/MethodSignature.java
3045
/* Copyright 2000, 2001, Compaq Computer Corporation */ /* ========================================================================= * MethodSignature.java * ========================================================================= */ package javafe.reader; import java.util.*; import javafe.ast.*; //@ model import javafe.util.Location; /* ------------------------------------------------------------------------- * MethodSignature * ------------------------------------------------------------------------- */ /** * Represents the signature of a method in terms of AST elements. */ class MethodSignature { /* -- package instance methods ------------------------------------------- */ /** * Construct a new method signature with an empty sequence of parameter * types and a void return type. */ //@ requires classLocation != Location.NULL; MethodSignature(int classLocation) { this.parameters = new Vector(); this.return_ = JavafePrimitiveType.make(ASTTagConstants.VOIDTYPE, classLocation); //@ set parameters.elementType = \type(Type); //@ set parameters.containsNull = false; } /** * Count the number of parameter types in this method signature. * @return the number of parameter types */ //@ ensures \result>=0; //@ ensures \result==parameters.elementCount; int countParameters() { return parameters.size(); } /** * Return a parameter type from this method signature. * @param index the index of the parameter type to return * @return the parameter type at index index */ //@ requires 0<=index && index<parameters.elementCount; //@ ensures \result.syntax; /*@non_null*/Type parameterAt(int index) { return (/*+@non_null*/Type)parameters.elementAt(index); } //@ nowarn Post; // Unenforceable invariant on parameters /** * Append a parameter type to this method signature. * @param parameterType the parameter type to append */ void appendParameter(/*@non_null*/Type parameterType) { parameters.addElement(parameterType); } /** * Return the return type of this method signature. * @return the return type */ //@ ensures \result.syntax; /*@non_null*/Type getReturn() { return return_; } /** * Change the return type of this method signature. * @param return_ the new return type */ //@ requires return_.syntax; void setReturn(/*@non_null*/Type return_) { this.return_ = return_; } /* -- private instance variables ----------------------------------------- */ /** * The parameter types of this method signature. * Initialized by constructor. */ //@ invariant parameters.elementType == \type(Type); //@ invariant !parameters.containsNull; // Unenforceable invariant: contents are syntax /*@spec_public*/ private /*@non_null*/Vector parameters; /** * The return type of this method signature. * Initialized by constructor. */ //@ invariant return_.syntax; //@ spec_public private /*@non_null*/Type return_; }
apache-2.0
corneliudascalu/google-glass-share-barcode-bluetooth
device/src/main/java/com/corneliudascalu/glass/device/data/DeviceRepository.java
587
package com.corneliudascalu.glass.device.data; import com.corneliudascalu.glass.device.model.Device; import android.content.Context; import java.io.IOException; import java.util.List; /** * @author Corneliu Dascalu <corneliu.dascalu@gmail.com> */ public interface DeviceRepository { List<Device> getDevices() throws IOException; boolean sendData(Device device, String data) throws IOException; boolean registerToServer(Device device) throws IOException; void saveSelectedDevice(Context context, Device device); Device getSelectedDevice(Context context); }
apache-2.0
ArturVasilov/QuantumLibrary
QuantumLibrary/core/src/main/java/ru/kpfu/arturvasilov/core/computer/QuantumRegister.java
376
package ru.kpfu.arturvasilov.core.computer; import ru.kpfu.arturvasilov.core.ComplexMatrix; /** * @author Artur Vasilov */ public interface QuantumRegister { long getId(); void apply(ComplexMatrix operator); void applyAtPositions(ComplexMatrix operator, int startQubit); QuantumRegister concatWith(QuantumRegister register); boolean[] measure(); }
apache-2.0
hivemq/hivemq-jmx-plugin
src/main/java/com/hivemq/plugins/metrics/jmx/plugin/JmxMetricsPluginModule.java
1433
/* * Copyright 2015 dc-square GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hivemq.plugins.metrics.jmx.plugin; import com.hivemq.spi.HiveMQPluginModule; import com.hivemq.spi.PluginEntryPoint; import com.hivemq.spi.plugin.meta.Information; /** * This plugin exposes every metric which is registered with the central * {@link com.codahale.metrics.MetricRegistry} via JMX * * @author Christoph Schaebel */ @Information(name = "HiveMQ JMX Metrics Reporting Plugin", author = "dc-square GmbH", version = "3.0.0") public class JmxMetricsPluginModule extends HiveMQPluginModule { @Override protected void configurePlugin() { } /** * This method needs to return the main class of the plugin. * * @return callback priority */ @Override protected Class<? extends PluginEntryPoint> entryPointClass() { return JmxMetricsMainClass.class; } }
apache-2.0
Groostav/CMPT880-term-project
randoop/src/main/java/randoop/contract/ToStringReturnsNormally.java
1489
package randoop.contract; import randoop.Globals; /** * Checks that calling toString() on an object does not throw an exception. */ public final class ToStringReturnsNormally implements ObjectContract { @Override public boolean equals(Object o) { if (o == null) return false; if (o == this) return true; if (!(o instanceof ToStringReturnsNormally)) { return false; } return true; // no state to compare. } @Override public int hashCode() { int h = 51; return h; // no state to compare. } @Override public boolean evaluate(Object... objects) { assert objects != null && objects.length == 1; Object o = objects[0]; assert o != null; o.toString(); return true; } @Override public int getArity() { return 1; } @Override public String toCommentString() { return "x0.toString() throws no Exception."; } @Override public String get_observer_str() { return "toString throws no Exception"; } @Override public boolean evalExceptionMeansFailure() { return true; } @Override public String toCodeString() { StringBuilder b = new StringBuilder(); b.append(Globals.lineSep); b.append("// Checks the contract: "); b.append(" " + toCommentString() + Globals.lineSep); b.append("org.junit.Assert.assertTrue("); b.append("\"Contract failed: " + toCommentString() + "\", "); b.append("x0.toString()"); b.append(");"); return b.toString(); } }
apache-2.0
oriontribunal/CoffeeMud
com/planet_ink/coffee_mud/core/database/BackLogLoader.java
5799
package com.planet_ink.coffee_mud.core.database; import com.planet_ink.coffee_mud.Libraries.interfaces.*; import com.planet_ink.coffee_mud.core.interfaces.*; import com.planet_ink.coffee_mud.core.*; import com.planet_ink.coffee_mud.core.collections.*; import com.planet_ink.coffee_mud.Abilities.interfaces.*; import com.planet_ink.coffee_mud.Areas.interfaces.*; import com.planet_ink.coffee_mud.Behaviors.interfaces.*; import com.planet_ink.coffee_mud.CharClasses.interfaces.*; import com.planet_ink.coffee_mud.Commands.interfaces.*; import com.planet_ink.coffee_mud.Common.interfaces.*; import com.planet_ink.coffee_mud.Exits.interfaces.*; import com.planet_ink.coffee_mud.Items.interfaces.*; import com.planet_ink.coffee_mud.Locales.interfaces.*; import com.planet_ink.coffee_mud.MOBS.interfaces.*; import com.planet_ink.coffee_mud.Races.interfaces.*; import java.sql.*; import java.util.*; /* Copyright 2014-2016 Bo Zimmerman Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ public class BackLogLoader { protected DBConnector DB=null; protected Map<String,int[]> counters = new Hashtable<String,int[]>(); public BackLogLoader(DBConnector newDB) { DB=newDB; } protected int getCounter(String channelName, boolean bump) { synchronized(("BACKLOG_"+channelName).intern()) { int[] counter = counters.get(channelName); if(counter == null) { DBConnection D=null; try { D=DB.DBFetch(); final ResultSet R=D.query("SELECT CMDATE FROM CMBKLG WHERE CMNAME='"+channelName+"' AND CMINDX = 0"); if(R.next()) { counters.put(channelName, new int[] { (int)DBConnections.getLongRes(R, "CMDATE") }); R.close(); } else { R.close(); D.update("INSERT INTO CMBKLG (CMNAME, CMINDX, CMDATE) VALUES ('"+channelName+"', 0, 0)", 0); counters.put(channelName, new int[] {0}); } } catch(final Exception sqle) { Log.errOut("Journal",sqle); } finally { DB.DBDone(D); } counter = counters.get(channelName); } if(bump) { DBConnection D=null; try { D=DB.DBFetch(); synchronized(counter) { counter[0]++; D.update("UPDATE CMBKLG SET CMDATE="+counter[0]+" WHERE CMNAME='"+channelName+"' AND CMINDX = 0", 0); } } catch(final Exception sqle) { Log.errOut("Journal",sqle); } finally { DB.DBDone(D); } } return counter[0]; } } public void addBackLogEntry(String channelName, final String entry) { if((entry == null) || (channelName == null) || (entry.length()==0)) return; channelName = channelName.toUpperCase().trim(); final int counter = getCounter(channelName, true); DBConnection D=null; try { D=DB.DBFetchPrepared("INSERT INTO CMBKLG (CMNAME, CMINDX, CMDATE, CMDATA) VALUES ('"+channelName+"', "+counter+", "+System.currentTimeMillis()+", ?)"); D.setPreparedClobs(new String[]{entry}); D.update("",0); } catch(final Exception sqle) { Log.errOut("Journal",sqle); } finally { DB.DBDone(D); } } public List<Pair<String,Long>> getBackLogEntries(String channelName, final int newestToSkip, final int numToReturn) { final List<Pair<String,Long>> list=new Vector<Pair<String,Long>>(); if(channelName == null) return list; channelName = channelName.toUpperCase().trim(); final int counter = getCounter(channelName, false); DBConnection D=null; try { final int number = numToReturn + newestToSkip; final int oldest = number >= counter ? 1 : (counter - number); final int newest = newestToSkip >= counter ? counter : (counter - newestToSkip); D=DB.DBFetch(); StringBuilder sql=new StringBuilder("SELECT CMDATA,CMDATE FROM CMBKLG WHERE CMNAME='"+channelName+"'"); sql.append(" AND CMINDX >="+oldest); sql.append(" AND CMINDX <="+newest); sql.append(" ORDER BY CMINDX"); final ResultSet R = D.query(sql.toString()); while(R.next()) list.add(new Pair<String,Long>(DB.getRes(R, "CMDATA"),Long.valueOf(DB.getLongRes(R, "CMDATE")))); R.close(); } catch(final Exception sqle) { Log.errOut("Journal",sqle); } finally { DB.DBDone(D); } return list; } public void trimBackLogEntries(final String[] channels, final int maxMessages, final long oldestTime) { for(final String channelName : channels) { final int counter = getCounter(channelName, false); DBConnection D=null; try { D=DB.DBFetch(); if((maxMessages == 0) && (D != null)) { D.update("DELETE FROM CMBKLG WHERE CMNAME='"+channelName+"'",0); } else if((maxMessages < counter) && (D != null)) { final int oldestCounter = counter - maxMessages; D.update("DELETE FROM CMBKLG WHERE CMNAME='"+channelName+"' AND CMINDX != 0 AND CMINDX < "+oldestCounter,0); } if((oldestTime > 0) && (D != null)) { D.update("DELETE FROM CMBKLG WHERE CMNAME='"+channelName+"' AND CMINDX != 0 AND CMDATE < "+oldestTime,0); } } catch(final Exception sqle) { Log.errOut("Journal",sqle); } finally { DB.DBDone(D); } } } }
apache-2.0
bia-code/healthy
core/service/src/main/java/com/sfl/coolmonkey/healthy/core/service/expection/ServiceRuntimeException.java
373
package com.sfl.coolmonkey.healthy.core.service.expection; /** * User: Arthur Asatryan * Company: SFL LLC * Date: 1/24/17 * Time: 6:43 PM */ public class ServiceRuntimeException extends RuntimeException { private static final long serialVersionUID = 752369715061987944L; public ServiceRuntimeException(final String message) { super(message); } }
apache-2.0
pantisocracy/follow
src/test/mqTest/topic/ReceiveLogsTopicForCritical.java
1768
package mqTest.topic; import com.rabbitmq.client.Channel; import com.rabbitmq.client.Connection; import com.rabbitmq.client.ConnectionFactory; import com.rabbitmq.client.QueueingConsumer; public class ReceiveLogsTopicForCritical { private final static String EXCHANGE_NAME = "ex_log"; private final static String userName = "admin"; private final static String password = "admin"; private final static int portNumber = 5672; private final static String host = "120.25.208.221"; public static void main(String[] args) throws Exception{ //创建连接和通道 ConnectionFactory factory = new ConnectionFactory(); factory.setUsername(userName); factory.setPassword(password); factory.setHost(host); factory.setPort(portNumber); factory.setVirtualHost("/"); Connection connection = factory.newConnection(); Channel channel = connection.createChannel(); //声明转发器 channel.exchangeDeclare(EXCHANGE_NAME, "topic",true); //随机生成一个队列 String queueName = channel.queueDeclare().getQueue(); //接收所有与kernel相关的消息 channel.queueBind(queueName, EXCHANGE_NAME, "*.critical"); QueueingConsumer consumer = new QueueingConsumer(channel); channel.basicConsume(queueName,true,consumer); while(true){ Thread.sleep(400); QueueingConsumer.Delivery delivery = consumer.nextDelivery(); String message = new String(delivery.getBody()); String routingKey = delivery.getEnvelope().getRoutingKey(); System.out.println(" [x] Received routingKey = " + routingKey + ",msg = " + message + "."); } } }
apache-2.0
ConsecroMUD/ConsecroMUD
com/suscipio_solutions/consecro_mud/Abilities/Druid/Chant_FurCoat.java
6265
package com.suscipio_solutions.consecro_mud.Abilities.Druid; import java.util.Vector; import com.suscipio_solutions.consecro_mud.Abilities.interfaces.Ability; import com.suscipio_solutions.consecro_mud.Common.interfaces.CMMsg; import com.suscipio_solutions.consecro_mud.Items.interfaces.Item; import com.suscipio_solutions.consecro_mud.Items.interfaces.RawMaterial; import com.suscipio_solutions.consecro_mud.Items.interfaces.Wearable; import com.suscipio_solutions.consecro_mud.Locales.interfaces.Room; import com.suscipio_solutions.consecro_mud.MOBS.interfaces.MOB; import com.suscipio_solutions.consecro_mud.core.CMClass; import com.suscipio_solutions.consecro_mud.core.CMLib; import com.suscipio_solutions.consecro_mud.core.interfaces.Environmental; import com.suscipio_solutions.consecro_mud.core.interfaces.Physical; @SuppressWarnings("rawtypes") public class Chant_FurCoat extends Chant { @Override public String ID() { return "Chant_FurCoat"; } private final static String localizedName = CMLib.lang().L("Fur Coat"); @Override public String name() { return localizedName; } private final static String localizedStaticDisplay = CMLib.lang().L("(Fur Coat)"); @Override public String displayText() { return localizedStaticDisplay; } @Override public int classificationCode(){return Ability.ACODE_CHANT|Ability.DOMAIN_SHAPE_SHIFTING;} @Override public int abstractQuality(){return Ability.QUALITY_BENEFICIAL_SELF;} @Override protected int canAffectCode(){return CAN_MOBS;} Item theArmor=null; @Override public void unInvoke() { // undo the affects of this spell if(!(affected instanceof MOB)) return; final MOB mob=(MOB)affected; if(canBeUninvoked()) if(theArmor!=null) { theArmor.destroy(); mob.location().recoverRoomStats(); } super.unInvoke(); if(canBeUninvoked()) if((mob.location()!=null)&&(!mob.amDead())) mob.location().show(mob,null,CMMsg.MSG_OK_VISUAL,L("<S-YOUPOSS> fur coat vanishes.")); } @Override public void executeMsg(final Environmental myHost, final CMMsg msg) { super.executeMsg(myHost,msg); if((affected!=null)&&(affected instanceof MOB)) { if((msg.amISource((MOB)affected))||msg.amISource(invoker)) { if(msg.sourceMinor()==CMMsg.TYP_QUIT) { unInvoke(); if(msg.source().playerStats()!=null) msg.source().playerStats().setLastUpdated(0); } else if(msg.sourceMinor()==CMMsg.TYP_DEATH) { unInvoke(); } } } } @Override public boolean okMessage(final Environmental myHost, final CMMsg msg) { if(!super.okMessage(myHost,msg)) return false; if(theArmor==null) return true; if((msg.source()==theArmor.owner()) &&(msg.tool() instanceof Druid_ShapeShift)) { unInvoke(); return true; } if((theArmor.amWearingAt(Wearable.IN_INVENTORY) ||(theArmor.owner()==null) ||(theArmor.owner() instanceof Room))) unInvoke(); final MOB mob=msg.source(); if(!msg.amITarget(theArmor)) return true; else if((msg.targetMinor()==CMMsg.TYP_REMOVE) ||(msg.targetMinor()==CMMsg.TYP_GET)) { mob.tell(L("The fur coat cannot be removed from where it is.")); return false; } return true; } @Override public int castingQuality(MOB mob, Physical target) { if(mob!=null) { if(target instanceof MOB) { if(Druid_ShapeShift.isShapeShifted((MOB)target)) return Ability.QUALITY_INDIFFERENT; if(((MOB)target).freeWearPositions(Wearable.WORN_TORSO,(short)-2048,(short)0)<=0) return Ability.QUALITY_INDIFFERENT; } } return super.castingQuality(mob,target); } @Override public boolean invoke(MOB mob, Vector commands, Physical givenTarget, boolean auto, int asLevel) { MOB target=mob; if((auto)&&(givenTarget!=null)&&(givenTarget instanceof MOB)) target=(MOB)givenTarget; if(target.fetchEffect(this.ID())!=null) { mob.tell(target,null,null,L("<S-NAME> already <S-HAS-HAVE> a fur coat.")); return false; } if(Druid_ShapeShift.isShapeShifted(target)) { mob.tell(L("You cannot invoke this chant in your present form.")); return false; } if(target.freeWearPositions(Wearable.WORN_TORSO,(short)-2048,(short)0)<=0) { mob.tell(L("You are already wearing something on your torso!")); return false; } // the invoke method for spells receives as // parameters the invoker, and the REMAINING // command line parameters, divided into words, // and added as String objects to a vector. if(!super.invoke(mob,commands,givenTarget,auto,asLevel)) return false; boolean success=proficiencyCheck(mob,0,auto); if(success) { // it worked, so build a copy of this ability, // and add it to the affects list of the // affected MOB. Then tell everyone else // what happened. invoker=mob; final CMMsg msg=CMClass.getMsg(mob,target,this,verbalCastCode(mob,target,auto),auto?L("A thick coat of fur appears on <T-NAME>."):L("^S<S-NAME> chant(s) for a thick coat of fur!^?")); if(mob.location().okMessage(mob,msg)) { mob.location().send(mob,msg); theArmor=CMClass.getArmor("GenArmor"); theArmor.setName(L("a fur coat")); theArmor.setDisplayText(""); theArmor.setDescription(L("The coat is made of thick black fur.")); theArmor.setMaterial(RawMaterial.RESOURCE_FUR); theArmor.basePhyStats().setArmor(2*CMLib.ableMapper().qualifyingClassLevel(mob,this)); final long wornCode=(Wearable.WORN_TORSO|Wearable.WORN_ARMS|Wearable.WORN_FEET|Wearable.WORN_WAIST|Wearable.WORN_LEGS); theArmor.setRawProperLocationBitmap(wornCode); theArmor.setRawLogicalAnd(true); for(int i=target.numItems()-1;i>=0;i--) { final Item I=mob.getItem(i); if((I.rawWornCode()&wornCode)>0) I.unWear(); } final Ability A=CMClass.getAbility("Prop_WearResister"); if( A != null ) { A.setMiscText("cold"); theArmor.addNonUninvokableEffect(A); } theArmor.recoverPhyStats(); theArmor.text(); target.addItem(theArmor); theArmor.wearAt(wornCode); success=beneficialAffect(mob,target,asLevel,0)!=null; mob.location().recoverRoomStats(); } } else return beneficialWordsFizzle(mob,target,L("<S-NAME> chant(s) for a thick coat of fur, but nothing happen(s).")); // return whether it worked return success; } }
apache-2.0
blackcathacker/kc.preclean
coeus-it/src/test/java/org/kuali/kra/irb/auth/ModifyProtocolModuleAuthorizerTestBase.java
3687
/* * Copyright 2005-2014 The Kuali Foundation * * Licensed under the Educational Community License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl1.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.kra.irb.auth; import org.kuali.kra.infrastructure.PermissionConstants; import org.kuali.kra.irb.ProtocolDocument; import org.kuali.kra.irb.actions.amendrenew.ProtocolAmendRenewModule; import org.kuali.kra.irb.actions.amendrenew.ProtocolAmendRenewal; import org.kuali.kra.irb.test.ProtocolFactory; import java.util.Collections; /** * Test the Modification of a Protocol Module Authorizer. */ public abstract class ModifyProtocolModuleAuthorizerTestBase extends ProtocolAuthorizerTestBase { /** * Test whether the user can modify the protocol if they have the Modify Protocol permission. */ protected void runModifyProtocolTest(String protocolNumber, boolean hasPermission, boolean expected) throws Exception { ProtocolDocument document = ProtocolFactory.createProtocolDocument(protocolNumber); document.getProtocol().setCorrectionMode(true); ProtocolAuthorizer authorizer = createProtocolAuthorizer(document, hasPermission, false, true); runTest(document, authorizer, expected); } /** * Test whether the user has permission to modify the specified module in an amendment. */ protected void runModifyProtocolAmendmentTest(String protocolNumber, String moduleTypeCode, boolean hasPermission, boolean expected) throws Exception { ProtocolDocument document = ProtocolFactory.createProtocolDocument(protocolNumber); ProtocolAmendRenewal amendRenewal = new ProtocolAmendRenewal(); ProtocolAmendRenewModule module = new ProtocolAmendRenewModule(); module.setProtocolModuleTypeCode(moduleTypeCode); amendRenewal.addModule(module); document.getProtocol().setProtocolAmendRenewal(amendRenewal); document.getProtocol().setCorrectionMode(true); ProtocolAuthorizer authorizer = createProtocolAuthorizer(document, hasPermission, false, true); runTest(document, authorizer, expected); } /** * Return the Protocol Module Type Code that will be tested. * @return the module type code to test */ protected abstract String getModuleTypeCode(); /** * Create the Authorizer that will be tested. * @return the authorizer to test */ protected abstract ModifyAmendmentAuthorizer createModifyAmendmentAuthorizer(); @Override protected ProtocolAuthorizer createProtocolAuthorizer(ProtocolDocument protocolDocument, boolean hasPermission, boolean isActionAllowed, boolean isInWorkflow) throws Exception { ModifyAmendmentAuthorizer authorizer = createModifyAmendmentAuthorizer(); authorizer.setKraAuthorizationService(buildKraAuthorizationService(protocolDocument, PermissionConstants.MODIFY_PROTOCOL, hasPermission)); authorizer.setKraWorkflowService(buildKraWorkflowService(protocolDocument, isInWorkflow)); authorizer.setProtocolAmendRenewService(buildProtocolAmendRenewService(protocolDocument, Collections.singletonList(getModuleTypeCode()))); return authorizer; } }
apache-2.0
Synthuse/synthuse-src
src/org/synthuse/controllers/SynthuseConfigDialogControllers.java
5871
package org.synthuse.controllers; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import javax.swing.JOptionPane; import org.synthuse.Config; import org.synthuse.views.SynthuseConfigPanel; public class SynthuseConfigDialogControllers { public static void initializeUI(SynthuseConfigPanel aSynthuseConfigPanel, Config aConfig) { aSynthuseConfigPanel.getAlwaysOnTopCheckBox().setSelected(aConfig.isAlwaysOnTop()); aSynthuseConfigPanel.getDisableFiltersUiaCheckBox().setSelected(aConfig.isFilterUiaDisabled()); aSynthuseConfigPanel.getDisableUiaBridgeCheckBox().setSelected(aConfig.isUiaBridgeDisabled()); aSynthuseConfigPanel.getRefreshKeyTextField().setText(Character.toString(aConfig.getRefreshKey())); aSynthuseConfigPanel.getStrongTextMatchingCheckBox().setSelected(aConfig.isUseStrongTextMatching()); aSynthuseConfigPanel.getTargetKeyTextField().setText(Character.toString(aConfig.getTargetKey())); aSynthuseConfigPanel.getXPathHighlightTextField().setText(aConfig.getXpathHighlight()); aSynthuseConfigPanel.getXPathListTextField().setText(aConfig.getXpathList()); } public static void bindActionControllers(final SynthuseConfigPanel aSynthuseConfigPanel, final Config aConfig) { aSynthuseConfigPanel.getAlwaysOnTopCheckBox().addActionListener(alwaysOnTopCheckboxActionHandler(aSynthuseConfigPanel, aConfig)); aSynthuseConfigPanel.getDisableFiltersUiaCheckBox().addActionListener(disableFiltersUiaCheckboxActionHandler(aSynthuseConfigPanel, aConfig)); aSynthuseConfigPanel.getDisableUiaBridgeCheckBox().addActionListener(disableUiaBridgeCheckboxActionHandler(aSynthuseConfigPanel, aConfig)); aSynthuseConfigPanel.getRefreshKeyTextField().addActionListener(refreshKeyCodeTextFieldActionHandler(aSynthuseConfigPanel, aConfig)); aSynthuseConfigPanel.getStrongTextMatchingCheckBox().addActionListener(strongTextMatchingCheckboxActionHandler(aSynthuseConfigPanel, aConfig)); aSynthuseConfigPanel.getTargetKeyTextField().addActionListener(targetKeyCodeTextFieldActionHandler(aSynthuseConfigPanel, aConfig)); aSynthuseConfigPanel.getXPathHighlightTextField().addActionListener(xpathHighlightTextFieldActionHandler(aSynthuseConfigPanel, aConfig)); aSynthuseConfigPanel.getXPathListTextField().addActionListener(xpathListTextFieldActionHandler(aSynthuseConfigPanel, aConfig)); } private static ActionListener xpathListTextFieldActionHandler(final SynthuseConfigPanel aSynthuseConfigPanel, final Config aConfig) { return new ActionListener() { @Override public void actionPerformed(ActionEvent aE) { aConfig.setXPathList(aSynthuseConfigPanel.getXPathListTextField().getText()); JOptionPane.showMessageDialog(aSynthuseConfigPanel, "May require restart to be effective"); } }; } private static ActionListener xpathHighlightTextFieldActionHandler(final SynthuseConfigPanel aSynthuseConfigPanel, final Config aConfig) { return new ActionListener() { @Override public void actionPerformed(ActionEvent aE) { aConfig.setXPathHighlight(aSynthuseConfigPanel.getXPathHighlightTextField().getText()); JOptionPane.showMessageDialog(aSynthuseConfigPanel, "May require restart to be effective"); } }; } private static ActionListener targetKeyCodeTextFieldActionHandler(final SynthuseConfigPanel aSynthuseConfigPanel, final Config aConfig) { return new ActionListener() { @Override public void actionPerformed(ActionEvent aE) { aConfig.setTargetKey(aSynthuseConfigPanel.getTargetKeyTextField().getText()); JOptionPane.showMessageDialog(aSynthuseConfigPanel, "May require restart to be effective"); } }; } private static ActionListener strongTextMatchingCheckboxActionHandler( final SynthuseConfigPanel aSynthuseConfigPanel, final Config aConfig) { return new ActionListener() { @Override public void actionPerformed(ActionEvent aE) { aConfig.setUseStrongTextMatching(aSynthuseConfigPanel.getStrongTextMatchingCheckBox().isSelected()); } }; } private static ActionListener refreshKeyCodeTextFieldActionHandler(final SynthuseConfigPanel aSynthuseConfigPanel, final Config aConfig) { return new ActionListener() { @Override public void actionPerformed(ActionEvent aE) { aConfig.setRefreshKey(aSynthuseConfigPanel.getRefreshKeyTextField().getText()); JOptionPane.showMessageDialog(aSynthuseConfigPanel, "May require restart to be effective"); } }; } private static ActionListener disableUiaBridgeCheckboxActionHandler(final SynthuseConfigPanel aSynthuseConfigPanel, final Config aConfig) { return new ActionListener() { @Override public void actionPerformed(ActionEvent aE) { aConfig.setDisableUiaBridge(aSynthuseConfigPanel.getDisableUiaBridgeCheckBox().isSelected()); JOptionPane.showMessageDialog(aSynthuseConfigPanel, "May require restart to be effective"); } }; } private static ActionListener disableFiltersUiaCheckboxActionHandler(final SynthuseConfigPanel aSynthuseConfigPanel, final Config aConfig) { return new ActionListener() { @Override public void actionPerformed(ActionEvent aE) { aConfig.setDisableFiltersUia(aSynthuseConfigPanel.getDisableFiltersUiaCheckBox().isSelected()); JOptionPane.showMessageDialog(aSynthuseConfigPanel, "May require restart to be effective"); } }; } private static ActionListener alwaysOnTopCheckboxActionHandler(final SynthuseConfigPanel aSynthuseConfigPanel, final Config aConfig) { return new ActionListener() { @Override public void actionPerformed(ActionEvent aE) { aConfig.setAlwaysOnTop(aSynthuseConfigPanel.getAlwaysOnTopCheckBox().isSelected()); JOptionPane.showMessageDialog(aSynthuseConfigPanel, "May require restart to be effective"); } }; } }
apache-2.0
chenxiaoqi/pickupweb
src/test/java/com/lazyman/pickupweb/test/lombok/MyList.java
486
package com.lazyman.pickupweb.test.lombok; import lombok.ToString; import lombok.experimental.Delegate; import java.util.ArrayList; import java.util.List; /** * <一句话功能简述> * <功能详细描述> * * @author c00286900 * @version [版本号, 2018/12/22] * @see [相关类/方法] * @since [产品/模块版本] */ @ToString public class MyList implements List<String> { @Delegate private List<String> list = new ArrayList<>(); }
apache-2.0
logginghub/core
logginghub-server/src/main/java/com/logginghub/logging/repository/processors/ConsoleLogDataProcessor.java
654
package com.logginghub.logging.repository.processors; import java.io.File; import com.logginghub.logging.LogEvent; import com.logginghub.logging.logeventformatters.SingleLineLogEventTextFormatter; import com.logginghub.logging.repository.LogDataProcessor; public class ConsoleLogDataProcessor implements LogDataProcessor { private SingleLineLogEventTextFormatter formatter = new SingleLineLogEventTextFormatter(); public void onNewLogEvent(LogEvent event) { System.out.println(formatter.format(event)); } public void processingStarted(File resultsFolder) {} public void processingEnded() {} }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-wellarchitected/src/main/java/com/amazonaws/services/wellarchitected/model/transform/ResourceNotFoundExceptionUnmarshaller.java
3440
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.wellarchitected.model.transform; import java.math.*; import javax.annotation.Generated; import com.amazonaws.services.wellarchitected.model.*; import com.amazonaws.transform.SimpleTypeJsonUnmarshallers.*; import com.amazonaws.transform.*; import com.fasterxml.jackson.core.JsonToken; import static com.fasterxml.jackson.core.JsonToken.*; /** * ResourceNotFoundException JSON Unmarshaller */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class ResourceNotFoundExceptionUnmarshaller extends EnhancedJsonErrorUnmarshaller { private ResourceNotFoundExceptionUnmarshaller() { super(com.amazonaws.services.wellarchitected.model.ResourceNotFoundException.class, "ResourceNotFoundException"); } @Override public com.amazonaws.services.wellarchitected.model.ResourceNotFoundException unmarshallFromContext(JsonUnmarshallerContext context) throws Exception { com.amazonaws.services.wellarchitected.model.ResourceNotFoundException resourceNotFoundException = new com.amazonaws.services.wellarchitected.model.ResourceNotFoundException( null); int originalDepth = context.getCurrentDepth(); String currentParentElement = context.getCurrentParentElement(); int targetDepth = originalDepth + 1; JsonToken token = context.getCurrentToken(); if (token == null) token = context.nextToken(); if (token == VALUE_NULL) { return null; } while (true) { if (token == null) break; if (token == FIELD_NAME || token == START_OBJECT) { if (context.testExpression("ResourceId", targetDepth)) { context.nextToken(); resourceNotFoundException.setResourceId(context.getUnmarshaller(String.class).unmarshall(context)); } if (context.testExpression("ResourceType", targetDepth)) { context.nextToken(); resourceNotFoundException.setResourceType(context.getUnmarshaller(String.class).unmarshall(context)); } } else if (token == END_ARRAY || token == END_OBJECT) { if (context.getLastParsedParentElement() == null || context.getLastParsedParentElement().equals(currentParentElement)) { if (context.getCurrentDepth() <= originalDepth) break; } } token = context.nextToken(); } return resourceNotFoundException; } private static ResourceNotFoundExceptionUnmarshaller instance; public static ResourceNotFoundExceptionUnmarshaller getInstance() { if (instance == null) instance = new ResourceNotFoundExceptionUnmarshaller(); return instance; } }
apache-2.0
aws/aws-sdk-java
aws-java-sdk-elasticache/src/main/java/com/amazonaws/services/elasticache/model/ReplicationGroupNotFoundException.java
1257
/* * Copyright 2017-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.elasticache.model; import javax.annotation.Generated; /** * <p> * The specified replication group does not exist. * </p> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class ReplicationGroupNotFoundException extends com.amazonaws.services.elasticache.model.AmazonElastiCacheException { private static final long serialVersionUID = 1L; /** * Constructs a new ReplicationGroupNotFoundException with the specified error message. * * @param message * Describes the error encountered. */ public ReplicationGroupNotFoundException(String message) { super(message); } }
apache-2.0
lakshmiDRIP/DRIP
src/main/java/org/drip/sample/semidefinite/DualConstrainedEllipsoidVariance.java
7701
package org.drip.sample.semidefinite; import org.drip.function.definition.RdToR1; import org.drip.function.rdtor1.*; import org.drip.function.rdtor1descent.LineStepEvolutionControl; import org.drip.function.rdtor1solver.*; import org.drip.quant.common.FormatUtil; import org.drip.service.env.EnvManager; /* * -*- mode: java; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /*! * Copyright (C) 2017 Lakshmi Krishnamurthy * Copyright (C) 2016 Lakshmi Krishnamurthy * * This file is part of DRIP, a free-software/open-source library for buy/side financial/trading model * libraries targeting analysts and developers * https://lakshmidrip.github.io/DRIP/ * * DRIP is composed of four main libraries: * * - DRIP Fixed Income - https://lakshmidrip.github.io/DRIP-Fixed-Income/ * - DRIP Asset Allocation - https://lakshmidrip.github.io/DRIP-Asset-Allocation/ * - DRIP Numerical Optimizer - https://lakshmidrip.github.io/DRIP-Numerical-Optimizer/ * - DRIP Statistical Learning - https://lakshmidrip.github.io/DRIP-Statistical-Learning/ * * - DRIP Fixed Income: Library for Instrument/Trading Conventions, Treasury Futures/Options, * Funding/Forward/Overnight Curves, Multi-Curve Construction/Valuation, Collateral Valuation and XVA * Metric Generation, Calibration and Hedge Attributions, Statistical Curve Construction, Bond RV * Metrics, Stochastic Evolution and Option Pricing, Interest Rate Dynamics and Option Pricing, LMM * Extensions/Calibrations/Greeks, Algorithmic Differentiation, and Asset Backed Models and Analytics. * * - DRIP Asset Allocation: Library for model libraries for MPT framework, Black Litterman Strategy * Incorporator, Holdings Constraint, and Transaction Costs. * * - DRIP Numerical Optimizer: Library for Numerical Optimization and Spline Functionality. * * - DRIP Statistical Learning: Library for Statistical Evaluation and Machine Learning. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. */ /** * DualConstrainedEllipsoidVariance demonstrates the Application of the Interior Point Method for Minimizing * the Variance Across The Specified Ellipsoid under both Normalization and first Moment Constraints. * * @author Lakshmi Krishnamurthy */ public class DualConstrainedEllipsoidVariance { public static final void main ( final String[] astrArgs) throws Exception { EnvManager.InitEnv (""); String[] astrEntityName = new String[] { "IBM", "ATT", "ALU", "QCO", "INT", "MSF", "VER" }; double[] adblEntityReturn = new double[] { 0.0264, 0.0332, 0.0400, 0.0468, 0.0536, 0.0604, 0.0672 }; double dblEntityDesignReturn = 0.0468; double[][] aadblEntityCovariance = new double[][] { {1.00, 0.76, 0.80, 0.38, 0.60, 0.61, 0.51}, {0.76, 1.00, 0.65, 0.35, 0.56, 0.43, 0.40}, {0.80, 0.65, 1.00, 0.68, 0.74, 0.40, 0.51}, {0.38, 0.35, 0.68, 1.00, 0.72, 0.02, 0.57}, {0.60, 0.56, 0.74, 0.72, 1.00, 0.31, 0.67}, {0.61, 0.43, 0.40, 0.02, 0.31, 1.00, 0.39}, {0.51, 0.40, 0.51, 0.57, 0.67, 0.39, 1.00} }; System.out.println ("\n\n\t|------------------------------------------------------||"); String strHeader = "\t| |"; for (int i = 0; i < astrEntityName.length; ++i) strHeader += " " + astrEntityName[i] + " |"; System.out.println (strHeader + "|"); System.out.println ("\t|------------------------------------------------------||"); for (int i = 0; i < astrEntityName.length; ++i) { String strDump = "\t| " + astrEntityName[i] + " "; for (int j = 0; j < astrEntityName.length; ++j) strDump += "|" + FormatUtil.FormatDouble (aadblEntityCovariance[i][j], 1, 2, 1.) + " "; System.out.println (strDump + "||"); } System.out.println ("\t|------------------------------------------------------||\n\n"); double dblEqualityConstraintConstant = -1.; int iNumEntity = aadblEntityCovariance.length; InteriorPointBarrierControl ipbc = InteriorPointBarrierControl.Standard(); RdToR1[] aRdToR1EqualityConstraint = new RdToR1[] { new AffineMultivariate ( ObjectiveConstraintVariateSet.Unitary (iNumEntity), dblEqualityConstraintConstant ), new AffineMultivariate ( adblEntityReturn, -1. * dblEntityDesignReturn ) }; int iNumEqualityConstraint = aRdToR1EqualityConstraint.length; RdToR1[] aRdToR1InequalityConstraint = new RdToR1[] { new AffineBoundMultivariate (false, 0, iNumEntity + iNumEqualityConstraint, 0.05), new AffineBoundMultivariate (true, 0, iNumEntity + iNumEqualityConstraint, 0.65), new AffineBoundMultivariate (false, 1, iNumEntity + iNumEqualityConstraint, 0.05), new AffineBoundMultivariate (true, 1, iNumEntity + iNumEqualityConstraint, 0.65), new AffineBoundMultivariate (false, 2, iNumEntity + iNumEqualityConstraint, 0.05), new AffineBoundMultivariate (true, 2, iNumEntity + iNumEqualityConstraint, 0.65), new AffineBoundMultivariate (false, 3, iNumEntity + iNumEqualityConstraint, 0.05), new AffineBoundMultivariate (true, 3, iNumEntity + iNumEqualityConstraint, 0.65), new AffineBoundMultivariate (false, 4, iNumEntity + iNumEqualityConstraint, 0.05), new AffineBoundMultivariate (true, 4, iNumEntity + iNumEqualityConstraint, 0.65), new AffineBoundMultivariate (false, 5, iNumEntity + iNumEqualityConstraint, 0.05), new AffineBoundMultivariate (true, 5, iNumEntity + iNumEqualityConstraint, 0.65), new AffineBoundMultivariate (false, 6, iNumEntity + iNumEqualityConstraint, 0.05), new AffineBoundMultivariate (true, 6, iNumEntity + iNumEqualityConstraint, 0.65) }; LagrangianMultivariate ceec = new LagrangianMultivariate ( new CovarianceEllipsoidMultivariate (aadblEntityCovariance), aRdToR1EqualityConstraint ); BarrierFixedPointFinder ifpm = new BarrierFixedPointFinder ( ceec, aRdToR1InequalityConstraint, ipbc, LineStepEvolutionControl.NocedalWrightStrongWolfe (false) ); VariateInequalityConstraintMultiplier vcmt = ifpm.solve ( ObjectiveConstraintVariateSet.Uniform ( iNumEntity, ceec.constraintFunctionDimension() ) ); double[] adblOptimalVariate = vcmt.variates(); System.out.println ("\t|----------------------||"); System.out.println ("\t| OPTIMAL ENTITIES ||"); System.out.println ("\t|----------------------||"); double dblExpectedReturn = 0.; for (int i = 0; i < iNumEntity; ++i) { System.out.println ("\t| " + astrEntityName[i] + " => " + FormatUtil.FormatDouble (adblOptimalVariate[i], 2, 2, 100.) + "% ||"); dblExpectedReturn += adblOptimalVariate[i] * adblEntityReturn[i]; } System.out.println ("\t|----------------------||\n"); System.out.println ("\t|------------------------------||"); System.out.println ("\t| DESIGN RETURN => " + FormatUtil.FormatDouble (dblEntityDesignReturn, 1, 5, 1.) + " ||"); System.out.println ("\t| EXPECTED RETURN => " + FormatUtil.FormatDouble (dblExpectedReturn, 1, 5, 1.) + " ||"); System.out.println ("\t| OPTIMAL VARIANCE => " + FormatUtil.FormatDouble (ceec.evaluate (adblOptimalVariate), 1, 5, 1.) + " ||"); System.out.println ("\t|------------------------------||\n"); } }
apache-2.0
fredji97/samza
samza-azure/src/test/java/org/apache/samza/system/eventhub/consumer/TestEventHubSystemConsumer.java
18475
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.samza.system.eventhub.consumer; import com.microsoft.azure.eventhubs.*; import org.apache.samza.Partition; import org.apache.samza.config.MapConfig; import org.apache.samza.metrics.Counter; import org.apache.samza.system.IncomingMessageEnvelope; import org.apache.samza.system.SystemStreamPartition; import org.apache.samza.system.eventhub.*; import org.apache.samza.system.eventhub.admin.PassThroughInterceptor; import org.apache.samza.system.eventhub.producer.SwapFirstLastByteInterceptor; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.powermock.core.classloader.annotations.PrepareForTest; import org.powermock.modules.junit4.PowerMockRunner; import java.util.*; import java.util.function.Function; import java.util.stream.Collectors; import static org.apache.samza.system.eventhub.MockEventHubConfigFactory.*; @RunWith(PowerMockRunner.class) @PrepareForTest({EventHubRuntimeInformation.class, PartitionRuntimeInformation.class, EventHubClient.class, PartitionReceiver.class, PartitionSender.class}) public class TestEventHubSystemConsumer { private static final String MOCK_ENTITY_1 = "mocktopic1"; private static final String MOCK_ENTITY_2 = "mocktopic2"; private void verifyEvents(List<IncomingMessageEnvelope> messages, List<EventData> eventDataList) { verifyEvents(messages, eventDataList, new PassThroughInterceptor()); } private void verifyEvents(List<IncomingMessageEnvelope> messages, List<EventData> eventDataList, Interceptor interceptor) { Assert.assertEquals(messages.size(), eventDataList.size()); for (int i = 0; i < messages.size(); i++) { IncomingMessageEnvelope message = messages.get(i); EventData eventData = eventDataList.get(i); Assert.assertEquals(message.getKey(), eventData.getSystemProperties().getPartitionKey()); Assert.assertEquals(message.getMessage(), interceptor.intercept(eventData.getBytes())); Assert.assertEquals(message.getOffset(), eventData.getSystemProperties().getOffset()); } } @Test public void testMultipleRegistersToSameSSP() throws Exception { String systemName = "eventhubs"; String streamName = "testStream"; int numEvents = 10; // needs to be less than BLOCKING_QUEUE_SIZE int partitionId = 0; TestMetricsRegistry testMetrics = new TestMetricsRegistry(); Map<SystemStreamPartition, List<EventData>> eventData = new HashMap<>(); SystemStreamPartition ssp = new SystemStreamPartition(systemName, streamName, new Partition(partitionId)); Map<String, Interceptor> interceptors = new HashMap<>(); interceptors.put(streamName, new PassThroughInterceptor()); // create EventData List<EventData> singlePartitionEventData = MockEventData.generateEventData(numEvents); eventData.put(ssp, singlePartitionEventData); // Set configs Map<String, String> configMap = new HashMap<>(); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_LIST, systemName), streamName); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName), EVENTHUB_NAMESPACE); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName), EVENTHUB_KEY_NAME); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName), EVENTHUB_KEY); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName), MOCK_ENTITY_1); MapConfig config = new MapConfig(configMap); MockEventHubClientManagerFactory eventHubClientWrapperFactory = new MockEventHubClientManagerFactory(eventData); EventHubSystemConsumer consumer = new EventHubSystemConsumer(new EventHubConfig(config), systemName, eventHubClientWrapperFactory, interceptors, testMetrics); consumer.register(ssp, "1"); consumer.register(ssp, EventHubSystemConsumer.END_OF_STREAM); consumer.register(ssp, EventHubSystemConsumer.START_OF_STREAM); consumer.start(); Assert.assertEquals(EventPosition.fromOffset(EventHubSystemConsumer.START_OF_STREAM, false).toString(), eventHubClientWrapperFactory.getPartitionOffset(String.valueOf(partitionId)).toString()); } @Test public void testSinglePartitionConsumptionHappyPath() throws Exception { String systemName = "eventhubs"; String streamName = "testStream"; int numEvents = 10; // needs to be less than BLOCKING_QUEUE_SIZE int partitionId = 0; TestMetricsRegistry testMetrics = new TestMetricsRegistry(); Map<SystemStreamPartition, List<EventData>> eventData = new HashMap<>(); SystemStreamPartition ssp = new SystemStreamPartition(systemName, streamName, new Partition(partitionId)); Map<String, Interceptor> interceptors = new HashMap<>(); interceptors.put(streamName, new PassThroughInterceptor()); // create EventData List<EventData> singlePartitionEventData = MockEventData.generateEventData(numEvents); eventData.put(ssp, singlePartitionEventData); // Set configs Map<String, String> configMap = new HashMap<>(); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_LIST, systemName), streamName); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName), EVENTHUB_NAMESPACE); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName), EVENTHUB_KEY_NAME); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName), EVENTHUB_KEY); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName), MOCK_ENTITY_1); MapConfig config = new MapConfig(configMap); MockEventHubClientManagerFactory eventHubClientWrapperFactory = new MockEventHubClientManagerFactory(eventData); EventHubSystemConsumer consumer = new EventHubSystemConsumer(new EventHubConfig(config), systemName, eventHubClientWrapperFactory, interceptors, testMetrics); consumer.register(ssp, EventHubSystemConsumer.END_OF_STREAM); consumer.start(); // Mock received data from EventHub eventHubClientWrapperFactory.sendToHandlers(consumer.streamPartitionHandlers); List<IncomingMessageEnvelope> result = consumer.poll(Collections.singleton(ssp), 1000).get(ssp); verifyEvents(result, singlePartitionEventData); Assert.assertEquals(testMetrics.getCounters(streamName).size(), 3); Assert.assertEquals(testMetrics.getGauges(streamName).size(), 2); Map<String, Counter> counters = testMetrics.getCounters(streamName).stream().collect(Collectors.toMap(Counter::getName, Function.identity())); Assert.assertEquals(counters.get(EventHubSystemConsumer.EVENT_READ_RATE).getCount(), numEvents); Assert.assertEquals(counters.get(EventHubSystemConsumer.READ_ERRORS).getCount(), 0); } @Test public void testSinglePartitionConsumptionInterceptor() throws Exception { String systemName = "eventhubs"; String streamName = "testStream"; int numEvents = 10; // needs to be less than BLOCKING_QUEUE_SIZE int partitionId = 0; Interceptor interceptor = new SwapFirstLastByteInterceptor(); TestMetricsRegistry testMetrics = new TestMetricsRegistry(); Map<SystemStreamPartition, List<EventData>> eventData = new HashMap<>(); SystemStreamPartition ssp = new SystemStreamPartition(systemName, streamName, new Partition(partitionId)); Map<String, Interceptor> interceptors = new HashMap<>(); interceptors.put(streamName, interceptor); // create EventData List<EventData> singlePartitionEventData = MockEventData.generateEventData(numEvents); eventData.put(ssp, singlePartitionEventData); // Set configs Map<String, String> configMap = new HashMap<>(); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_LIST, systemName), streamName); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName), EVENTHUB_NAMESPACE); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName), EVENTHUB_KEY_NAME); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName), EVENTHUB_KEY); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName), MOCK_ENTITY_1); MapConfig config = new MapConfig(configMap); MockEventHubClientManagerFactory eventHubClientWrapperFactory = new MockEventHubClientManagerFactory(eventData); EventHubSystemConsumer consumer = new EventHubSystemConsumer(new EventHubConfig(config), systemName, eventHubClientWrapperFactory, interceptors, testMetrics); consumer.register(ssp, EventHubSystemConsumer.END_OF_STREAM); consumer.start(); // Mock received data from EventHub eventHubClientWrapperFactory.sendToHandlers(consumer.streamPartitionHandlers); List<IncomingMessageEnvelope> result = consumer.poll(Collections.singleton(ssp), 1000).get(ssp); verifyEvents(result, singlePartitionEventData, interceptor); Assert.assertEquals(testMetrics.getCounters(streamName).size(), 3); Assert.assertEquals(testMetrics.getGauges(streamName).size(), 2); Map<String, Counter> counters = testMetrics.getCounters(streamName).stream().collect(Collectors.toMap(Counter::getName, Function.identity())); Assert.assertEquals(counters.get(EventHubSystemConsumer.EVENT_READ_RATE).getCount(), numEvents); Assert.assertEquals(counters.get(EventHubSystemConsumer.READ_ERRORS).getCount(), 0); } @Test public void testMultiPartitionConsumptionPerPartitionConnection() throws Exception { testMultiPartitionConsumptionHappyPath(true); } @Test public void testMultiPartitionConsumptionShareConnection() throws Exception { testMultiPartitionConsumptionHappyPath(false); } private void testMultiPartitionConsumptionHappyPath(boolean perPartitionConnection) throws Exception { String systemName = "eventhubs"; String streamName = "testStream"; int numEvents = 10; // needs to be less than BLOCKING_QUEUE_SIZE int partitionId1 = 0; int partitionId2 = 1; TestMetricsRegistry testMetrics = new TestMetricsRegistry(); Map<SystemStreamPartition, List<EventData>> eventData = new HashMap<>(); SystemStreamPartition ssp1 = new SystemStreamPartition(systemName, streamName, new Partition(partitionId1)); SystemStreamPartition ssp2 = new SystemStreamPartition(systemName, streamName, new Partition(partitionId2)); Map<String, Interceptor> interceptor = new HashMap<>(); interceptor.put(streamName, new PassThroughInterceptor()); // create EventData List<EventData> singlePartitionEventData1 = MockEventData.generateEventData(numEvents); List<EventData> singlePartitionEventData2 = MockEventData.generateEventData(numEvents); eventData.put(ssp1, singlePartitionEventData1); eventData.put(ssp2, singlePartitionEventData2); // Set configs Map<String, String> configMap = new HashMap<>(); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_LIST, systemName), streamName); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName), MOCK_ENTITY_1); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName), EVENTHUB_NAMESPACE); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName), EVENTHUB_KEY_NAME); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName), EVENTHUB_KEY); configMap.put(String.format(EventHubConfig.CONFIG_PER_PARTITION_CONNECTION, systemName), String.valueOf(perPartitionConnection)); MapConfig config = new MapConfig(configMap); MockEventHubClientManagerFactory eventHubClientWrapperFactory = new MockEventHubClientManagerFactory(eventData); EventHubSystemConsumer consumer = new EventHubSystemConsumer(new EventHubConfig(config), systemName, eventHubClientWrapperFactory, interceptor, testMetrics); consumer.register(ssp1, EventHubSystemConsumer.START_OF_STREAM); consumer.register(ssp2, EventHubSystemConsumer.START_OF_STREAM); consumer.start(); // Mock received data from EventHub eventHubClientWrapperFactory.sendToHandlers(consumer.streamPartitionHandlers); Set<SystemStreamPartition> ssps = new HashSet<>(); ssps.add(ssp1); ssps.add(ssp2); Map<SystemStreamPartition, List<IncomingMessageEnvelope>> results = consumer.poll(ssps, 1000); verifyEvents(results.get(ssp1), singlePartitionEventData1); verifyEvents(results.get(ssp2), singlePartitionEventData2); Assert.assertEquals(testMetrics.getCounters(streamName).size(), 3); Assert.assertEquals(testMetrics.getGauges(streamName).size(), 2); Map<String, Counter> counters = testMetrics.getCounters(streamName).stream().collect(Collectors.toMap(Counter::getName, Function.identity())); Assert.assertEquals(counters.get(EventHubSystemConsumer.EVENT_READ_RATE).getCount(), numEvents * 2); Assert.assertEquals(counters.get(EventHubSystemConsumer.READ_ERRORS).getCount(), 0); if (perPartitionConnection) { Assert.assertNotEquals("perPartitionConnection=true; SSPs should not share the same client", consumer.perPartitionEventHubManagers.get(ssp1), consumer.perPartitionEventHubManagers.get(ssp2)); } else { Assert.assertEquals("perPartitionConnection=false; SSPs should share the same client", consumer.perPartitionEventHubManagers.get(ssp1), consumer.perPartitionEventHubManagers.get(ssp2)); } } @Test public void testMultiStreamsConsumptionHappyPath() throws Exception { String systemName = "eventhubs"; String streamName1 = "testStream1"; String streamName2 = "testStream2"; int numEvents = 10; // needs to be less than BLOCKING_QUEUE_SIZE int partitionId = 0; TestMetricsRegistry testMetrics = new TestMetricsRegistry(); Map<SystemStreamPartition, List<EventData>> eventData = new HashMap<>(); SystemStreamPartition ssp1 = new SystemStreamPartition(systemName, streamName1, new Partition(partitionId)); SystemStreamPartition ssp2 = new SystemStreamPartition(systemName, streamName2, new Partition(partitionId)); Map<String, Interceptor> interceptor = new HashMap<>(); interceptor.put(streamName1, new PassThroughInterceptor()); interceptor.put(streamName2, new PassThroughInterceptor()); List<EventData> singlePartitionEventData1 = MockEventData.generateEventData(numEvents); List<EventData> singlePartitionEventData2 = MockEventData.generateEventData(numEvents); eventData.put(ssp1, singlePartitionEventData1); eventData.put(ssp2, singlePartitionEventData2); Map<String, String> configMap = new HashMap<>(); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_LIST, systemName), String.format("%s,%s", streamName1, streamName2)); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName1), MOCK_ENTITY_1); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName1), EVENTHUB_NAMESPACE); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName1), EVENTHUB_KEY_NAME); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName1), EVENTHUB_KEY); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_ENTITYPATH, streamName2), MOCK_ENTITY_2); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_NAMESPACE, streamName2), EVENTHUB_NAMESPACE); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_KEY_NAME, streamName2), EVENTHUB_KEY_NAME); configMap.put(String.format(EventHubConfig.CONFIG_STREAM_SAS_TOKEN, streamName2), EVENTHUB_KEY); MapConfig config = new MapConfig(configMap); MockEventHubClientManagerFactory eventHubClientWrapperFactory = new MockEventHubClientManagerFactory(eventData); EventHubSystemConsumer consumer = new EventHubSystemConsumer(new EventHubConfig(config), systemName, eventHubClientWrapperFactory, interceptor, testMetrics); consumer.register(ssp1, EventHubSystemConsumer.START_OF_STREAM); consumer.register(ssp2, EventHubSystemConsumer.START_OF_STREAM); consumer.start(); // Mock received data from EventHub eventHubClientWrapperFactory.sendToHandlers(consumer.streamPartitionHandlers); Set<SystemStreamPartition> ssps = new HashSet<>(); ssps.add(ssp1); ssps.add(ssp2); Map<SystemStreamPartition, List<IncomingMessageEnvelope>> results = consumer.poll(ssps, 1000); verifyEvents(results.get(ssp1), singlePartitionEventData1); verifyEvents(results.get(ssp2), singlePartitionEventData2); Assert.assertEquals(testMetrics.getCounters(streamName1).size(), 3); Assert.assertEquals(testMetrics.getGauges(streamName1).size(), 2); Assert.assertEquals(testMetrics.getCounters(streamName2).size(), 3); Assert.assertEquals(testMetrics.getGauges(streamName2).size(), 2); Map<String, Counter> counters1 = testMetrics.getCounters(streamName1).stream().collect(Collectors.toMap(Counter::getName, Function.identity())); Assert.assertEquals(counters1.get(EventHubSystemConsumer.EVENT_READ_RATE).getCount(), numEvents); Assert.assertEquals(counters1.get(EventHubSystemConsumer.READ_ERRORS).getCount(), 0); Map<String, Counter> counters2 = testMetrics.getCounters(streamName2).stream().collect(Collectors.toMap(Counter::getName, Function.identity())); Assert.assertEquals(counters2.get(EventHubSystemConsumer.EVENT_READ_RATE).getCount(), numEvents); Assert.assertEquals(counters2.get(EventHubSystemConsumer.READ_ERRORS).getCount(), 0); } }
apache-2.0
vichid/Ship-Android-Fast
app/src/main/java/com/example/app/di/modules/ApiModule.java
2545
package com.example.app.di.modules; import android.content.Context; import com.example.app.BuildConfig; import com.example.app.di.qualifiers.ClientCache; import com.example.data.net.githubapi.GithubApiService; import com.example.data.net.interceptor.CacheInterceptor; import com.example.data.net.interceptor.UserAgentInterceptor; import com.example.domain.executor.ThreadExecutor; import com.facebook.stetho.okhttp3.StethoInterceptor; import com.google.gson.Gson; import dagger.Module; import dagger.Provides; import java.io.File; import javax.inject.Named; import javax.inject.Singleton; import okhttp3.Cache; import okhttp3.OkHttpClient; import retrofit2.Retrofit; import retrofit2.adapter.rxjava.RxJavaCallAdapterFactory; import retrofit2.converter.gson.GsonConverterFactory; import rx.schedulers.Schedulers; /** * A dagger module that provides retrofit services */ @Module public class ApiModule { private static final int CACHE_SIZE_20MB = 20 * 1024 * 1024; @Provides @Singleton GithubApiService provideGithubApiService( Retrofit retrofit ) { return retrofit.create(GithubApiService.class); } @Provides @Singleton Retrofit provideRetrofit( OkHttpClient client, @Named("apiUrl") String endPoint, Gson gson, ThreadExecutor threadExecutor ) { return new Retrofit.Builder() .baseUrl(endPoint) .addConverterFactory(GsonConverterFactory.create(gson)) .addCallAdapterFactory(RxJavaCallAdapterFactory.createWithScheduler(Schedulers.from(threadExecutor))) .client(client) .validateEagerly(BuildConfig.DEBUG) .build(); } @Provides @Singleton OkHttpClient provideOkHttpClient( @ClientCache Cache cache, CacheInterceptor cacheInterceptor, //HttpLoggingInterceptor loggingInterceptor, @Named("userAgent") String userAgentValue ) { return new OkHttpClient.Builder() .cache(cache) //.addInterceptor(loggingInterceptor) .addNetworkInterceptor(new UserAgentInterceptor(userAgentValue)) .addNetworkInterceptor(new StethoInterceptor()) .addInterceptor(cacheInterceptor) .addNetworkInterceptor(cacheInterceptor) .build(); } @Provides @Singleton @ClientCache Cache provideCache( @ClientCache File path ) { return new Cache(path, CACHE_SIZE_20MB); } @Singleton @Provides @ClientCache File provideCacheFile(Context context) { return new File(context.getCacheDir(), "HttpResponseCache"); } }
apache-2.0
gallandarakhneorg/afc
core/maths/mathstochastic/src/main/java/org/arakhne/afc/math/stochastic/StochasticGenerator.java
3516
/* * $Id$ * This file is a part of the Arakhne Foundation Classes, http://www.arakhne.org/afc * * Copyright (c) 2000-2012 Stephane GALLAND. * Copyright (c) 2005-10, Multiagent Team, Laboratoire Systemes et Transports, * Universite de Technologie de Belfort-Montbeliard. * Copyright (c) 2013-2020 The original authors, and other authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.arakhne.afc.math.stochastic; import java.util.Random; import org.eclipse.xtext.xbase.lib.Pure; /** * Generator of random values according to stochastic laws. * * @author $Author: cbohrhauer$ * @version $FullVersion$ * @mavengroupid $GroupId$ * @mavenartifactid $ArtifactId$ * @since 13.0 */ public final class StochasticGenerator { private static Random uniformRandomVariableList; /** * Constructor. */ private StochasticGenerator() { // } private static void initRandomNumberList() { if (uniformRandomVariableList == null) { uniformRandomVariableList = new Random(); } } /** Generate a stochastic value according to the given law. * * <p>A probability {@code p} is randomly selected using the specified random number list. * The returned value * is when a randomly selected value inside the set of available values. * * <p>This method uses a {@link UniformStochasticLaw uniform distribution random number generation}. * * @param law is the stochastic law to use. * @return a value which was randomly selected according to a stochastic law. * @throws MathException in case the value could not be computed. */ @Pure public static double generateRandomValue(StochasticLaw law) throws MathException { initRandomNumberList(); return law.inverseF(uniformRandomVariableList); } /** Add a noise to the specified value. * * <p>The returned value is given by: * {@code (value-noise) &lt; value &lt; (value+noise)} * where {@code 0 &lt;= noise &lt;= max(abs(value), noiseLaw(value))}. * The {@code noise} is randomly selected according to the * given random number list. * * <p>This method uses a {@link UniformStochasticLaw uniform distribution random number generation}. * * @param value is the value to noise * @param noiseLaw is the law used to selected tyhe noise amount. * @return the value * @throws MathException is case the value is not valid */ @Pure public static double noiseValue(double value, MathFunction noiseLaw) throws MathException { try { double noise = Math.abs(noiseLaw.f(value)); initRandomNumberList(); noise *= uniformRandomVariableList.nextFloat(); if (uniformRandomVariableList.nextBoolean()) { noise = -noise; } return value + noise; } catch (MathException e) { return value; } } }
apache-2.0
taichi/org.handwerkszeug.riak
src/org/handwerkszeug/riak/util/HttpUtil.java
1243
package org.handwerkszeug.riak.util; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.Locale; import java.util.TimeZone; /** * @author taichi * @see <a href="http://www.ietf.org/rfc/rfc1123.txt">RFC1123 - Requirements for * Internet Hosts -- Application and Support</a> * @see <a href="http://www.ietf.org/rfc/rfc822.txt">RFC822 - STANDARD FOR THE * FORMAT OF ARPA INTERNET TEXT MESSAGES</a> */ public class HttpUtil { /** * RFC822 5. DATE AND TIME SPECIFICATION */ static final String RFC1123_DATEFORMAT = "EEE, dd MMM yyyy HH:mm:ss zzz"; static final TimeZone GMT = TimeZone.getTimeZone("GMT"); public static DateFormat newGMTFormatter() { SimpleDateFormat fmt = new SimpleDateFormat(RFC1123_DATEFORMAT, Locale.ENGLISH); fmt.setTimeZone(GMT); return fmt; } public static String format(Date date) { DateFormat fmt = newGMTFormatter(); return fmt.format(date); } public static Date parse(String date) { try { DateFormat fmt = newGMTFormatter(); return fmt.parse(date); } catch (ParseException e) { throw new IllegalArgumentException(date); } } }
apache-2.0
goodwinnk/intellij-community
platform/lang-impl/src/com/intellij/ide/projectView/impl/nodes/PsiDirectoryNode.java
14076
// Copyright 2000-2018 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file. package com.intellij.ide.projectView.impl.nodes; import com.intellij.ide.IconProvider; import com.intellij.ide.projectView.PresentationData; import com.intellij.ide.projectView.ViewSettings; import com.intellij.ide.projectView.impl.ProjectRootsUtil; import com.intellij.ide.util.treeView.AbstractTreeNode; import com.intellij.idea.ActionsBundle; import com.intellij.openapi.extensions.Extensions; import com.intellij.openapi.fileTypes.FileTypeRegistry; import com.intellij.openapi.module.Module; import com.intellij.openapi.module.ModuleGrouperKt; import com.intellij.openapi.module.ModuleUtilCore; import com.intellij.openapi.project.Project; import com.intellij.openapi.roots.OrderEntry; import com.intellij.openapi.roots.ProjectFileIndex; import com.intellij.openapi.roots.ProjectRootManager; import com.intellij.openapi.roots.libraries.LibraryUtil; import com.intellij.openapi.roots.ui.configuration.ProjectSettingsService; import com.intellij.openapi.util.Comparing; import com.intellij.openapi.util.registry.Registry; import com.intellij.openapi.util.text.StringUtil; import com.intellij.openapi.vfs.LocalFileSystem; import com.intellij.openapi.vfs.VfsUtil; import com.intellij.openapi.vfs.VfsUtilCore; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.pom.NavigatableWithText; import com.intellij.projectImport.ProjectAttachProcessor; import com.intellij.psi.PsiDirectory; import com.intellij.psi.PsiFile; import com.intellij.psi.PsiManager; import com.intellij.psi.impl.file.PsiDirectoryFactory; import com.intellij.ui.SimpleTextAttributes; import com.intellij.util.IconUtil; import com.intellij.util.PlatformUtils; import com.intellij.util.containers.SmartHashSet; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import javax.swing.*; import java.util.Collection; import java.util.Set; public class PsiDirectoryNode extends BasePsiNode<PsiDirectory> implements NavigatableWithText { // the chain from a parent directory to this one usually contains only one virtual file private final Set<VirtualFile> chain = new SmartHashSet<>(); private final PsiFileSystemItemFilter myFilter; public PsiDirectoryNode(Project project, PsiDirectory value, ViewSettings viewSettings) { this(project, value, viewSettings, null); } public PsiDirectoryNode(Project project, PsiDirectory value, ViewSettings viewSettings, @Nullable PsiFileSystemItemFilter filter) { super(project, value, viewSettings); myFilter = filter; } @Nullable public PsiFileSystemItemFilter getFilter() { return myFilter; } protected boolean shouldShowModuleName() { return !PlatformUtils.isCidr(); } protected boolean shouldShowSourcesRoot() { return true; } @Override protected void updateImpl(@NotNull PresentationData data) { Project project = getProject(); assert project != null : this; PsiDirectory psiDirectory = getValue(); assert psiDirectory != null : this; VirtualFile directoryFile = psiDirectory.getVirtualFile(); Object parentValue = getParentValue(); synchronized (chain) { if (chain.isEmpty()) { VirtualFile ancestor = getVirtualFile(parentValue); if (ancestor != null) { for (VirtualFile file = directoryFile; file != null && VfsUtilCore.isAncestor(ancestor, file, true); file = file.getParent()) { chain.add(file); } } if (chain.isEmpty()) chain.add(directoryFile); } } if (ProjectRootsUtil.isModuleContentRoot(directoryFile, project)) { ProjectFileIndex fi = ProjectRootManager.getInstance(project).getFileIndex(); Module module = fi.getModuleForFile(directoryFile); data.setPresentableText(directoryFile.getName()); if (module != null) { if (!(parentValue instanceof Module)) { if (!shouldShowModuleName()) { data.addText(directoryFile.getName() + " ", SimpleTextAttributes.REGULAR_ATTRIBUTES); } else if (moduleNameMatchesDirectoryName(module, directoryFile, fi)) { data.addText(directoryFile.getName(), SimpleTextAttributes.REGULAR_BOLD_ATTRIBUTES); } else { data.addText(directoryFile.getName() + " ", SimpleTextAttributes.REGULAR_ATTRIBUTES); data.addText("[" + module.getName() + "]", SimpleTextAttributes.REGULAR_BOLD_ATTRIBUTES); } } else { data.addText(directoryFile.getName(), SimpleTextAttributes.REGULAR_ATTRIBUTES); } boolean shouldShowUrl = getSettings().isShowURL() && (parentValue instanceof Module || parentValue instanceof Project); data.setLocationString(ProjectViewDirectoryHelper.getInstance(project).getLocationString(psiDirectory, shouldShowUrl, shouldShowSourcesRoot())); setupIcon(data, psiDirectory); return; } } String name = parentValue instanceof Project ? psiDirectory.getVirtualFile().getPresentableUrl() : ProjectViewDirectoryHelper.getInstance(psiDirectory.getProject()).getNodeName(getSettings(), parentValue, psiDirectory); if (name == null) { setValue(null); return; } data.setPresentableText(name); data.setLocationString(ProjectViewDirectoryHelper.getInstance(project).getLocationString(psiDirectory, false, false)); setupIcon(data, psiDirectory); } private static boolean moduleNameMatchesDirectoryName(@NotNull Module module, @NotNull VirtualFile directoryFile, @NotNull ProjectFileIndex fileIndex) { if (Registry.is("ide.hide.real.module.name")) return true; String moduleName = module.getName(); String directoryName = directoryFile.getName(); if (moduleName.equalsIgnoreCase(directoryName)) { return true; } if (ModuleGrouperKt.isQualifiedModuleNamesEnabled(module.getProject()) && StringUtil.endsWithIgnoreCase(moduleName, directoryName)) { int parentPrefixLength = moduleName.length() - directoryName.length() - 1; if (parentPrefixLength > 0 && moduleName.charAt(parentPrefixLength) == '.') { VirtualFile parentDirectory = directoryFile.getParent(); if (ProjectRootsUtil.isModuleContentRoot(parentDirectory, module.getProject())) { Module parentModule = fileIndex.getModuleForFile(parentDirectory); if (parentModule != null && parentModule.getName().length() == parentPrefixLength && moduleName.startsWith(parentModule.getName())) { return true; } } } } return false; } protected void setupIcon(PresentationData data, PsiDirectory psiDirectory) { final VirtualFile virtualFile = psiDirectory.getVirtualFile(); if (PlatformUtils.isAppCode()) { final Icon icon = IconUtil.getIcon(virtualFile, 0, myProject); if (icon != null) { data.setIcon(icon); } } else { for (final IconProvider provider : Extensions.getExtensions(IconProvider.EXTENSION_POINT_NAME)) { final Icon icon = provider.getIcon(psiDirectory, 0); if (icon != null) { data.setIcon(icon); return; } } } } @Override public Collection<AbstractTreeNode> getChildrenImpl() { return ProjectViewDirectoryHelper.getInstance(myProject).getDirectoryChildren(getValue(), getSettings(), true, getFilter()); } @Override @SuppressWarnings("deprecation") public String getTestPresentation() { return "PsiDirectory: " + getValue().getName(); } public boolean isFQNameShown() { return ProjectViewDirectoryHelper.getInstance(getProject()).isShowFQName(getSettings(), getParentValue(), getValue()); } @Override public boolean contains(@NotNull VirtualFile file) { final PsiDirectory value = getValue(); if (value == null) { return false; } VirtualFile directory = value.getVirtualFile(); if (directory.getFileSystem() instanceof LocalFileSystem) { file = VfsUtil.getLocalFile(file); } if (!VfsUtilCore.isAncestor(directory, file, false)) { return false; } final Project project = value.getProject(); PsiFileSystemItemFilter filter = getFilter(); if (filter != null) { PsiFile psiFile = PsiManager.getInstance(project).findFile(file); if (psiFile != null && !filter.shouldShow(psiFile)) return false; PsiDirectory psiDirectory = PsiManager.getInstance(project).findDirectory(file); if (psiDirectory != null && !filter.shouldShow(psiDirectory)) return false; } if (Registry.is("ide.hide.excluded.files")) { final ProjectFileIndex fileIndex = ProjectRootManager.getInstance(project).getFileIndex(); return !fileIndex.isExcluded(file); } else { return !FileTypeRegistry.getInstance().isFileIgnored(file); } } @Override public VirtualFile getVirtualFile() { PsiDirectory directory = getValue(); if (directory == null) return null; return directory.getVirtualFile(); } /** * @return a virtual file that identifies the given element */ @Nullable private static VirtualFile getVirtualFile(Object element) { if (element instanceof PsiDirectory) { PsiDirectory directory = (PsiDirectory)element; return directory.getVirtualFile(); } return element instanceof VirtualFile ? (VirtualFile)element : null; } @Override public boolean canRepresent(final Object element) { VirtualFile file = getVirtualFile(element); if (file != null) { synchronized (chain) { if (chain.contains(file)) return true; } } if (super.canRepresent(element)) return true; return ProjectViewDirectoryHelper.getInstance(getProject()) .canRepresent(element, getValue(), getParentValue(), getSettings()); } @Override public boolean isValid() { if (!super.isValid()) return false; return ProjectViewDirectoryHelper.getInstance(getProject()) .isValidDirectory(getValue(), getParentValue(), getSettings(), getFilter()); } @Override public boolean canNavigate() { VirtualFile file = getVirtualFile(); Project project = getProject(); ProjectSettingsService service = ProjectSettingsService.getInstance(myProject); return file != null && (ProjectRootsUtil.isModuleContentRoot(file, project) && service.canOpenModuleSettings() || ProjectRootsUtil.isModuleSourceRoot(file, project) && service.canOpenContentEntriesSettings() || ProjectRootsUtil.isLibraryRoot(file, project) && service.canOpenModuleLibrarySettings()); } @Override public boolean canNavigateToSource() { return false; } @Override public void navigate(final boolean requestFocus) { Module module = ModuleUtilCore.findModuleForPsiElement(getValue()); if (module != null) { final VirtualFile file = getVirtualFile(); final Project project = getProject(); ProjectSettingsService service = ProjectSettingsService.getInstance(myProject); if (ProjectRootsUtil.isModuleContentRoot(file, project)) { service.openModuleSettings(module); } else if (ProjectRootsUtil.isLibraryRoot(file, project)) { final OrderEntry orderEntry = LibraryUtil.findLibraryEntry(file, module.getProject()); if (orderEntry != null) { service.openLibraryOrSdkSettings(orderEntry); } } else { service.openContentEntriesSettings(module); } } } @Override public String getNavigateActionText(boolean focusEditor) { VirtualFile file = getVirtualFile(); Project project = getProject(); if (file != null && project != null) { if (ProjectRootsUtil.isModuleContentRoot(file, project) || ProjectRootsUtil.isModuleSourceRoot(file, project)) { return ActionsBundle.message("action.ModuleSettings.navigate"); } if (ProjectRootsUtil.isLibraryRoot(file, project)) { return ActionsBundle.message("action.LibrarySettings.navigate"); } } return null; } @Override public int getWeight() { ViewSettings settings = getSettings(); if (settings == null || settings.isFoldersAlwaysOnTop()) { return 20; } return isFQNameShown() ? 70 : 0; } @Override public String getTitle() { final PsiDirectory directory = getValue(); if (directory != null) { return PsiDirectoryFactory.getInstance(getProject()).getQualifiedName(directory, true); } return super.getTitle(); } @Override public Comparable getSortKey() { if (ProjectAttachProcessor.canAttachToProject()) { // primary module is always on top; attached modules are sorted alphabetically final VirtualFile file = getVirtualFile(); if (Comparing.equal(file, myProject.getBaseDir())) { return ""; // sorts before any other name } return getTitle(); } return null; } @Override public Comparable getTypeSortKey() { VirtualFile file = getVirtualFile(); if (file != null) { String extension = file.getExtension(); if (extension != null) { return new PsiFileNode.ExtensionSortKey(extension); } } return null; } @Override public String getQualifiedNameSortKey() { final PsiDirectoryFactory factory = PsiDirectoryFactory.getInstance(getProject()); return factory.getQualifiedName(getValue(), true); } @Override public int getTypeSortWeight(final boolean sortByType) { return 3; } @Override public boolean shouldDrillDownOnEmptyElement() { return true; } @Override public boolean isAlwaysShowPlus() { final VirtualFile file = getVirtualFile(); return file == null || file.getChildren().length > 0; } }
apache-2.0
chennemann/ADBConnect
app/src/test/java/de/androidbytes/adbconnect/ExampleUnitTest.java
341
package de.androidbytes.adbconnect; import org.junit.Test; import static org.junit.Assert.*; /** * To work on unit tests, switch the Test Artifact in the Build Variants view. */ public class ExampleUnitTest { @Test public void addition_isCorrect() throws Exception { assertEquals(4, 2 + 2); } }
apache-2.0
neolocn/iReach
app/src/main/java/cn/neolo/app/ireach/model/SettingFragment.java
3433
package cn.neolo.app.ireach.model; import android.app.Activity; import android.app.Service; import android.content.Intent; import android.content.pm.PackageInfo; import android.content.pm.PackageManager; import android.os.Bundle; import android.support.v7.widget.Toolbar; import android.view.View; import android.widget.TextView; import com.blankj.utilcode.util.ProcessUtils; import com.blankj.utilcode.util.ServiceUtils; import com.isnc.facesdk.SuperID; import com.isnc.facesdk.common.Cache; import java.util.Map; import butterknife.BindView; import butterknife.OnClick; import cn.bmob.v3.BmobUser; import cn.neolo.app.ireach.Activity.AboutActivity; import cn.neolo.app.ireach.Activity.LoginActivity; import cn.neolo.app.ireach.Activity.MainActivity; import cn.neolo.app.ireach.Activity.MainTeacherActivity; import cn.neolo.app.ireach.Activity.MyService; import cn.neolo.app.ireach.Activity.TeamActivity; import cn.neolo.app.ireach.Base.RxBaseLazyFragment; import cn.neolo.app.ireach.R; /** * Created by Neolo on 2017.8.20. */ public class SettingFragment extends RxBaseLazyFragment { @BindView(R.id.toolbar) Toolbar mToolbar; @BindView(R.id.app_version_code) TextView mVersionCode; public static SettingFragment newInstance() { return new SettingFragment(); } @Override public int getLayoutResId() { return R.layout.fragment_setting; } @Override public void finishCreateView(Bundle state) { mToolbar.setTitle("设置和帮助"); mToolbar.setNavigationIcon(R.drawable.ic_navigation_drawer); mToolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { Activity activity = getActivity(); if (activity instanceof MainActivity) { ((MainActivity) activity).toggleDrawer(); } if (activity instanceof MainTeacherActivity) { ((MainTeacherActivity) activity).toggleDrawer(); } } }); mVersionCode.setText("v" + getVersionCode()); } public String getVersionCode() { PackageInfo packageInfo = null; try { packageInfo = getActivity().getPackageManager().getPackageInfo(getActivity().getPackageName(), 0); } catch (PackageManager.NameNotFoundException e) { e.printStackTrace(); } assert packageInfo != null; return packageInfo.versionName; } //退出设置 @OnClick({R.id.btn_logout, R.id.layout_about_app, R.id.layout_about_me}) void onClick(View view) { switch (view.getId()) { case R.id.btn_logout: BmobUser.logOut(); SuperID.faceLogout(getContext()); // 将进程关闭 ServiceUtils.stopService(MyService.class); getActivity().stopService(new Intent(getContext(), MyService.class)); startActivity(new Intent(getContext(), LoginActivity.class)); getActivity().finish(); break; case R.id.layout_about_app: startActivity(new Intent(getActivity(), AboutActivity.class)); break; case R.id.layout_about_me: startActivity(new Intent(getActivity(), TeamActivity.class)); break; } } }
apache-2.0
gosu-lang/old-gosu-repo
gosu-core-api/src/main/java/gw/lang/parser/template/ITemplateObserver.java
617
/* * Copyright 2013 Guidewire Software, Inc. */ package gw.lang.parser.template; import gw.lang.GosuShop; import gw.lang.reflect.IType; import java.io.Writer; public interface ITemplateObserver { public boolean beforeTemplateRender(IType type, Writer writer); public StringEscaper getEscaper(); public void afterTemplateRender(IType type, Writer writer); public static final ITemplateObserverManager MANAGER = GosuShop.makeTemplateObserverManager(); interface ITemplateObserverManager { public void pushTemplateObserver(ITemplateObserver observer); public void popTemplateObserver(); } }
apache-2.0
SensorSink/pond
old-src/main/java/ac/bali/sensorsink/hauler/restlets/SensorServerResource.java
1139
package ac.bali.sensorsink.hauler.restlets; import com.google.appengine.api.datastore.KeyFactory; import org.restlet.Context; import org.restlet.Request; import org.restlet.Response; import org.restlet.resource.ServerResource; public class SensorServerResource extends ServerResource implements SensorResource { private Key key; private DatastoreService datastore; @Override public void init( Context context, Request request, Response response ) { super.init( context, request, response ); String id = (String) request.getAttributes().get( "id" ); key = KeyFactory.createKey( "Sensor", id ); datastore = DatastoreServiceFactory.getDatastoreService(); } @Override public SensorDetails retrieve() throws EntityNotFoundException { Entity entity = datastore.get( key ); return null; } @Override public void remove() { DatastoreService datastore = DatastoreServiceFactory.getDatastoreService(); datastore.delete( key ); } @Override public SensorDetails update() { return null; } }
apache-2.0
fancyerii/chinesesegmentor
src/main/java/com/antbrains/wordseg/luceneanalyzer/StandardTokenizerInterface.java
2039
package com.antbrains.wordseg.luceneanalyzer; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.Reader; import java.io.IOException; interface StandardTokenizerInterface { /** This character denotes the end of file */ public static final int YYEOF = -1; /** * Copies the matched text into the CharTermAttribute */ void getText(CharTermAttribute t); /** * Returns the current position. */ int yychar(); /** * Resets the scanner to read from a new input stream. Does not close the old reader. * * All internal variables are reset, the old input stream <b>cannot</b> be reused (internal buffer * is discarded and lost). Lexical state is set to <tt>ZZ_INITIAL</tt>. * * @param reader * the new input stream */ void yyreset(Reader reader); /** * Returns the length of the matched text region. */ int yylength(); /** * Resumes scanning until the next regular expression is matched, the end of input is encountered * or an I/O-Error occurs. * * @return the next token, {@link #YYEOF} on end of stream * @exception IOException * if any I/O-Error occurs */ int getNextToken() throws IOException; }
apache-2.0
bbssyyuui/zhihu
lib-base/src/main/java/com/zdf/lib_base/widget/recycler/helper/LoadMoreRecyclerHelper.java
3905
package com.zdf.lib_base.widget.recycler.helper; import android.support.annotation.NonNull; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import com.zdf.lib_base.widget.recycler.listener.OnLoadMoreListener; import com.zdf.lib_base.widget.recycler.view.LoadMoreView; import org.byteam.superadapter.SuperAdapter; /** * 上拉加载更多的帮助类 * * Created by XiaoFeng on 17/1/29. */ public class LoadMoreRecyclerHelper extends RecyclerView.OnScrollListener { private final int STATE_NONE = 0x00; private final int STATE_LOADING = 0x01; private final int STATE_FAILURE = 0x02; private final int STATE_COMPLETE = 0x03; // 滑到底部里最后一个个数的阀值 private static final int VISIBLE_THRESHOLD = 1; private int state; private boolean enabled = true; private OnLoadMoreListener listener; private SuperAdapter superAdapter; private LoadMoreView loadMoreView; private LinearLayoutManager layoutManager; public LoadMoreRecyclerHelper(@NonNull OnLoadMoreListener listener) { this.listener = listener; } public void setEnabled(boolean enabled) { this.enabled = enabled; } private boolean canLoadMore() { return state != STATE_LOADING && enabled; } private boolean isLoadMoreShown() { return state == STATE_LOADING || state == STATE_FAILURE; } @Override public void onScrolled(final RecyclerView recyclerView, int dx, int dy) { if (canLoadMore() && dy >= 0) { int totalItemCount = layoutManager.getItemCount(); int lastVisibleItem = layoutManager.findLastVisibleItemPosition(); if (lastVisibleItem + VISIBLE_THRESHOLD >= totalItemCount) { onLoadMore(recyclerView); } } } @Override public void onScrollStateChanged(final RecyclerView recyclerView, int newState) { super.onScrollStateChanged(recyclerView, newState); // if (newState == RecyclerView.SCROLL_STATE_IDLE // && shouldLoadMore(recyclerView, 0, 0)) { // onLoadMore(recyclerView); // } } private void onLoadMore(RecyclerView recyclerView) { state = STATE_LOADING; if (loadMoreView != null) { loadMoreView.loading(); } recyclerView.post(new Runnable() { @Override public void run() { if (state == STATE_LOADING) { if (superAdapter != null && loadMoreView != null) { superAdapter.addFooterView(loadMoreView); } } } }); if (listener != null) { listener.onLoadMore(); } } public void failed() { state = STATE_FAILURE; if (loadMoreView != null) { loadMoreView.failure(); } if (superAdapter != null) { superAdapter.removeFooterView(); } } public void complete() { state = STATE_COMPLETE; if (loadMoreView != null) { loadMoreView.complete(); } if (superAdapter != null) { superAdapter.removeFooterView(); } } public void attachToRecyclerView(RecyclerView recyclerView) { RecyclerView.Adapter adapter = recyclerView.getAdapter(); if (adapter instanceof SuperAdapter) { superAdapter = (SuperAdapter) adapter; } loadMoreView = new LoadMoreView(recyclerView.getContext()); layoutManager = (LinearLayoutManager) recyclerView.getLayoutManager(); recyclerView.addOnScrollListener(this); } public void detachFromRecyclerView(RecyclerView recyclerView) { superAdapter = null; listener = null; recyclerView.removeOnScrollListener(this); } }
apache-2.0
Popati/Android-BluetoothSPPLibrary-master
app/src/main/java/app/akexorcist/bluetoothspp/SetTemp.java
5184
package app.akexorcist.bluetoothspp; import android.app.Activity; import android.content.Context; import android.content.SharedPreferences; import android.os.Bundle; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.widget.EditText; import android.widget.Toast; public class SetTemp extends Activity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_set_temp); EditText mEdit11 = (EditText)findViewById(R.id.edit11); EditText mEdit12 = (EditText)findViewById(R.id.edit12); EditText mEdit21 = (EditText)findViewById(R.id.edit21); EditText mEdit22 = (EditText)findViewById(R.id.edit22); EditText mEdit31 = (EditText)findViewById(R.id.edit31); EditText mEdit32 = (EditText)findViewById(R.id.edit32); SharedPreferences sp = getSharedPreferences("TempNotification", Context.MODE_PRIVATE); float maxnode1=sp.getFloat("MaxNode1", 25); float minnode1=sp.getFloat("MinNode1", 20); float maxnode2=sp.getFloat("MaxNode2", 25); float minnode2=sp.getFloat("MinNode2", 20); float maxnode3=sp.getFloat("MaxNode3", 25); float minnode3=sp.getFloat("MinNode3", 20); mEdit11.setText(Float.toString(maxnode1)); mEdit12.setText(Float.toString(minnode1)); mEdit21.setText(Float.toString(maxnode2)); mEdit22.setText(Float.toString(minnode2)); mEdit31.setText(Float.toString(maxnode3)); mEdit32.setText(Float.toString(minnode3)); } @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.menu_set_temp, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle action bar item clicks here. The action bar will // automatically handle clicks on the Home/Up button, so long // as you specify a parent activity in AndroidManifest.xml. int id = item.getItemId(); //noinspection SimplifiableIfStatement if (id == R.id.action_settings) { return true; } return super.onOptionsItemSelected(item); } public void onclickSubmit(View view){ EditText mEdit11n = (EditText)findViewById(R.id.edit11); EditText mEdit12n = (EditText)findViewById(R.id.edit12); EditText mEdit21n = (EditText)findViewById(R.id.edit21); EditText mEdit22n = (EditText)findViewById(R.id.edit22); EditText mEdit31n = (EditText)findViewById(R.id.edit31); EditText mEdit32n = (EditText)findViewById(R.id.edit32); //Toast.makeText(this,mEdit11n.getText().toString(),Toast.LENGTH_SHORT).show(); if(mEdit11n.getText().toString().matches("") || mEdit12n.getText().toString().matches("") ||mEdit21n.getText().toString().matches("") || mEdit22n.getText().toString().matches("") ||mEdit31n.getText().toString().matches("") || mEdit32n.getText().toString().matches("") ){ Toast.makeText(this,"กรุณาใส่ข้อมูลให้ครบทุกช่อง",Toast.LENGTH_SHORT).show(); } if((Double.parseDouble(mEdit11n.getText().toString())) <= (Double.parseDouble(mEdit12n.getText().toString())) || (Double.parseDouble(mEdit21n.getText().toString())) <= (Double.parseDouble(mEdit22n.getText().toString())) || (Double.parseDouble(mEdit31n.getText().toString())) <= (Double.parseDouble(mEdit32n.getText().toString()))) { Toast.makeText(this,"กรุณาใส่ข้อมูลให้ถูกต้อง",Toast.LENGTH_SHORT).show(); } else { float maxnode11 = Float.parseFloat(mEdit11n.getText().toString()); float minnode11 = Float.parseFloat(mEdit12n.getText().toString()); float maxnode22 = Float.parseFloat(mEdit21n.getText().toString()); float minnode22 = Float.parseFloat(mEdit22n.getText().toString()); float maxnode33 = Float.parseFloat(mEdit31n.getText().toString()); float minnode33 = Float.parseFloat(mEdit32n.getText().toString()); SharedPreferences sp = getSharedPreferences("TempNotification", Context.MODE_PRIVATE); SharedPreferences.Editor editor = sp.edit(); editor.clear(); editor.putFloat("MaxNode1", maxnode11); editor.putFloat("MinNode1", minnode11); editor.putFloat("MaxNode2", maxnode22); editor.putFloat("MinNode2", minnode22); editor.putFloat("MaxNode3", maxnode33); editor.putFloat("MinNode3", minnode33); editor.commit(); Toast.makeText(getApplicationContext(), "บันทึกข้อมูลเรียบร้อย", Toast.LENGTH_SHORT).show(); finish(); } //Intent intent=new Intent(this,Home.class); //startActivity(intent); } }
apache-2.0
yuluo-ding/alluxio
keyvalue/client/src/main/java/alluxio/client/keyvalue/BaseKeyValueStoreReader.java
3612
/* * The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 * (the "License"). You may not use this work except in compliance with the License, which is * available at www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied, as more fully set forth in the License. * * See the NOTICE file distributed with this work for information regarding copyright ownership. */ package alluxio.client.keyvalue; import alluxio.AlluxioURI; import alluxio.client.file.FileSystemContext; import alluxio.exception.AlluxioException; import alluxio.thrift.PartitionInfo; import alluxio.util.io.BufferUtils; import com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.util.List; import javax.annotation.concurrent.NotThreadSafe; /** * Default implementation of {@link KeyValueStoreReader} to access an Alluxio key-value store. */ @NotThreadSafe class BaseKeyValueStoreReader implements KeyValueStoreReader { private static final Logger LOG = LoggerFactory.getLogger(BaseKeyValueStoreReader.class); private final InetSocketAddress mMasterAddress = FileSystemContext.INSTANCE.getMasterAddress(); private final KeyValueMasterClient mMasterClient; /** A list of partitions of the store. */ private final List<PartitionInfo> mPartitions; /** * Constructs a {@link BaseKeyValueStoreReader} instance. * * @param uri URI of the key-value store * @throws IOException if non-Alluxio error occurs * @throws AlluxioException if Alluxio error occurs */ BaseKeyValueStoreReader(AlluxioURI uri) throws IOException, AlluxioException { // TODO(binfan): use a thread pool to manage the client. LOG.info("Create KeyValueStoreReader for {}", uri); mMasterClient = new KeyValueMasterClient(mMasterAddress); mPartitions = mMasterClient.getPartitionInfo(uri); mMasterClient.close(); } @Override public void close() { } @Override public byte[] get(byte[] key) throws IOException, AlluxioException { ByteBuffer value = get(ByteBuffer.wrap(key)); if (value == null) { return null; } return BufferUtils.newByteArrayFromByteBuffer(value); } @Override public ByteBuffer get(ByteBuffer key) throws IOException, AlluxioException { Preconditions.checkNotNull(key); int left = 0; int right = mPartitions.size(); while (left < right) { int middle = (right + left) / 2; PartitionInfo partition = mPartitions.get(middle); // NOTE: keyStart and keyLimit are both inclusive if (key.compareTo(partition.bufferForKeyStart()) < 0) { right = middle; } else if (key.compareTo(partition.bufferForKeyLimit()) > 0) { left = middle + 1; } else { // The key is either in this partition or not in the key-value store long blockId = partition.getBlockId(); try (KeyValuePartitionReader reader = KeyValuePartitionReader.Factory.create(blockId)) { return reader.get(key); } } } return null; } @Override public KeyValueIterator iterator() throws IOException, AlluxioException { return new KeyValueStoreIterator(mPartitions); } @Override public int size() throws IOException, AlluxioException { int totalSize = 0; for (PartitionInfo partition : mPartitions) { totalSize += partition.getKeyCount(); } return totalSize; } }
apache-2.0
jmptrader/Strata
modules/pricer/src/main/java/com/opengamma/strata/pricer/bond/DiscountingCapitalIndexedBondTradePricer.java
32661
/** * Copyright (C) 2016 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.strata.pricer.bond; import java.time.LocalDate; import com.opengamma.strata.basics.ReferenceData; import com.opengamma.strata.basics.StandardId; import com.opengamma.strata.basics.currency.Currency; import com.opengamma.strata.basics.currency.CurrencyAmount; import com.opengamma.strata.basics.currency.MultiCurrencyAmount; import com.opengamma.strata.basics.currency.Payment; import com.opengamma.strata.collect.ArgChecker; import com.opengamma.strata.market.sensitivity.PointSensitivities; import com.opengamma.strata.market.sensitivity.PointSensitivityBuilder; import com.opengamma.strata.pricer.CompoundedRateType; import com.opengamma.strata.pricer.rate.RatesProvider; import com.opengamma.strata.product.bond.BondPaymentPeriod; import com.opengamma.strata.product.bond.CapitalIndexedBondPaymentPeriod; import com.opengamma.strata.product.bond.CapitalIndexedBondYieldConvention; import com.opengamma.strata.product.bond.KnownAmountBondPaymentPeriod; import com.opengamma.strata.product.bond.ResolvedCapitalIndexedBond; import com.opengamma.strata.product.bond.ResolvedCapitalIndexedBondTrade; /** * Pricer for for capital index bond trades. * <p> * This function provides the ability to price a {@link ResolvedCapitalIndexedBondTrade}. * * <h4>Price</h4> * Strata uses <i>decimal prices</i> for bonds in the trade model, pricers and market data. * For example, a price of 99.32% is represented in Strata by 0.9932. */ public class DiscountingCapitalIndexedBondTradePricer { /** * Default implementation. */ public static final DiscountingCapitalIndexedBondTradePricer DEFAULT = new DiscountingCapitalIndexedBondTradePricer(DiscountingCapitalIndexedBondProductPricer.DEFAULT); /** * Pricer for {@link ResolvedCapitalIndexedBond}. */ private final DiscountingCapitalIndexedBondProductPricer productPricer; /** * Creates an instance. * * @param productPricer pricer for {@link ResolvedCapitalIndexedBond} */ public DiscountingCapitalIndexedBondTradePricer(DiscountingCapitalIndexedBondProductPricer productPricer) { this.productPricer = ArgChecker.notNull(productPricer, "productPricer"); } //------------------------------------------------------------------------- /** * Calculates the present value of the bond trade. * <p> * The present value of the trade is the value on the valuation date. * The result is expressed using the payment currency of the bond. * <p> * Coupon payments of the underlying product are considered based on the settlement date of the trade. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param issuerDiscountFactorsProvider the discount factors provider * @param refData the reference data used to calculate the settlement date * @return the present value of the bond trade */ public CurrencyAmount presentValue( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData) { validate(ratesProvider, issuerDiscountFactorsProvider); LocalDate settlementDate = trade.getSettlementDate(); CurrencyAmount pvProduct = productPricer.presentValue(trade.getProduct(), ratesProvider, issuerDiscountFactorsProvider, settlementDate); return presentValueFromProductPresentValue(trade, ratesProvider, issuerDiscountFactorsProvider, pvProduct); } /** * Calculates the present value of the bond trade with z-spread. * <p> * The present value of the trade is the value on the valuation date. * The result is expressed using the payment currency of the bond. * <p> * The z-spread is a parallel shift applied to continuously compounded rates or periodic * compounded rates of the discounting curve. * <p> * Coupon payments of the underlying product are considered based on the settlement date of the trade. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param issuerDiscountFactorsProvider the discount factors provider * @param refData the reference data used to calculate the settlement date * @param zSpread the z-spread * @param compoundedRateType the compounded rate type * @param periodsPerYear the number of periods per year * @return the present value of the bond trade */ public CurrencyAmount presentValueWithZSpread( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData, double zSpread, CompoundedRateType compoundedRateType, int periodsPerYear) { validate(ratesProvider, issuerDiscountFactorsProvider); LocalDate settlementDate = trade.getSettlementDate(); CurrencyAmount pvProduct = productPricer.presentValueWithZSpread( trade.getProduct(), ratesProvider, issuerDiscountFactorsProvider, settlementDate, zSpread, compoundedRateType, periodsPerYear); return presentValueFromProductPresentValue(trade, ratesProvider, issuerDiscountFactorsProvider, pvProduct); } //------------------------------------------------------------------------- /** * Calculates the present value sensitivity of the bond trade. * <p> * The present value sensitivity of the trade is the sensitivity of the present value to * the underlying curves. * <p> * Coupon payments of the underlying product are considered based on the settlement date of the trade. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param issuerDiscountFactorsProvider the discount factors provider * @param refData the reference data used to calculate the settlement date * @return the present value sensitivity of the bond trade */ public PointSensitivities presentValueSensitivity( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData) { validate(ratesProvider, issuerDiscountFactorsProvider); LocalDate settlementDate = trade.getSettlementDate(); PointSensitivityBuilder productSensi = productPricer.presentValueSensitivity(trade.getProduct(), ratesProvider, issuerDiscountFactorsProvider, settlementDate); return presentValueSensitivityFromProductPresentValueSensitivity( trade, ratesProvider, issuerDiscountFactorsProvider, productSensi).build(); } /** * Calculates the present value sensitivity of the bond trade with z-spread. * <p> * The present value sensitivity of the trade is the sensitivity of the present value to * the underlying curves. * <p> * Coupon payments of the underlying product are considered based on the settlement date of the trade. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param issuerDiscountFactorsProvider the discount factors provider * @param refData the reference data used to calculate the settlement date * @param zSpread the z-spread * @param compoundedRateType the compounded rate type * @param periodsPerYear the number of periods per year * @return the present value sensitivity of the bond trade */ public PointSensitivities presentValueSensitivityWithZSpread( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData, double zSpread, CompoundedRateType compoundedRateType, int periodsPerYear) { validate(ratesProvider, issuerDiscountFactorsProvider); LocalDate settlementDate = trade.getSettlementDate(); PointSensitivityBuilder productSensi = productPricer.presentValueSensitivityWithZSpread(trade.getProduct(), ratesProvider, issuerDiscountFactorsProvider, settlementDate, zSpread, compoundedRateType, periodsPerYear); return presentValueSensitivityFromProductPresentValueSensitivity( trade, ratesProvider, issuerDiscountFactorsProvider, productSensi).build(); } //------------------------------------------------------------------------- /** * Calculates the present value of the bond trade from the clean price. * <p> * Since the sign of the settlement notional is opposite to that of the product, negative amount will be returned * for positive quantity of trade. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param refData the reference data used to calculate the settlement date * @param issuerDiscountFactorsProvider the discount factors provider * @param cleanRealPrice the clean real price * @return the present value of the settlement */ public CurrencyAmount presentValueFromCleanPrice( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData, double cleanRealPrice) { validate(ratesProvider, issuerDiscountFactorsProvider); ResolvedCapitalIndexedBond bond = trade.getProduct(); LocalDate standardSettlementDate = bond.calculateSettlementDateFromValuation(ratesProvider.getValuationDate(), refData); LocalDate tradeSettlementDate = trade.getSettlementDate(); StandardId legalEntityId = bond.getLegalEntityId(); Currency currency = bond.getCurrency(); double df = issuerDiscountFactorsProvider .repoCurveDiscountFactors(bond.getSecurityId(), legalEntityId, currency).discountFactor(standardSettlementDate); CurrencyAmount pvStandard = forecastValueStandardFromCleanPrice( bond, ratesProvider, standardSettlementDate, cleanRealPrice).multipliedBy(df); if (standardSettlementDate.isEqual(tradeSettlementDate)) { return presentValueFromProductPresentValue(trade, ratesProvider, issuerDiscountFactorsProvider, pvStandard); } // check coupon payment between two settlement dates IssuerCurveDiscountFactors discountFactors = issuerDiscountFactorsProvider.issuerCurveDiscountFactors(legalEntityId, currency); double pvDiff = 0d; if (standardSettlementDate.isAfter(tradeSettlementDate)) { pvDiff = -productPricer.presentValueCoupon( bond, ratesProvider, discountFactors, tradeSettlementDate, standardSettlementDate); } else { pvDiff = productPricer.presentValueCoupon( bond, ratesProvider, discountFactors, standardSettlementDate, tradeSettlementDate); } return presentValueFromProductPresentValue( trade, ratesProvider, issuerDiscountFactorsProvider, pvStandard.plus(pvDiff)); } /** * Calculates the present value of the settlement of the bond trade from the clean price with z-spread. * <p> * Since the sign of the settlement notional is opposite to that of the product, negative amount will be returned * for positive quantity of trade. * <p> * The z-spread is a parallel shift applied to continuously compounded rates or periodic * compounded rates of the discounting curve. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param issuerDiscountFactorsProvider the discount factors provider * @param refData the reference data used to calculate the settlement date * @param zSpread the z-spread * @param compoundedRateType the compounded rate type * @param periodsPerYear the number of periods per year * @param cleanRealPrice the clean real price * @return the present value of the settlement */ public CurrencyAmount presentValueFromCleanPriceWithZSpread( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData, double cleanRealPrice, double zSpread, CompoundedRateType compoundedRateType, int periodsPerYear) { validate(ratesProvider, issuerDiscountFactorsProvider); ResolvedCapitalIndexedBond bond = trade.getProduct(); LocalDate standardSettlementDate = bond.calculateSettlementDateFromValuation(ratesProvider.getValuationDate(), refData); LocalDate tradeSettlementDate = trade.getSettlementDate(); StandardId legalEntityId = bond.getLegalEntityId(); Currency currency = bond.getCurrency(); double df = issuerDiscountFactorsProvider .repoCurveDiscountFactors(bond.getSecurityId(), legalEntityId, currency).discountFactor(standardSettlementDate); CurrencyAmount pvStandard = forecastValueStandardFromCleanPrice( bond, ratesProvider, standardSettlementDate, cleanRealPrice).multipliedBy(df); if (standardSettlementDate.isEqual(tradeSettlementDate)) { return presentValueFromProductPresentValue(trade, ratesProvider, issuerDiscountFactorsProvider, pvStandard); } // check coupon payment between two settlement dates IssuerCurveDiscountFactors discountFactors = issuerDiscountFactorsProvider.issuerCurveDiscountFactors(legalEntityId, currency); double pvDiff = 0d; if (standardSettlementDate.isAfter(tradeSettlementDate)) { pvDiff = -productPricer.presentValueCouponWithZSpread( bond, ratesProvider, discountFactors, tradeSettlementDate, standardSettlementDate, zSpread, compoundedRateType, periodsPerYear); } else { pvDiff = productPricer.presentValueCouponWithZSpread( bond, ratesProvider, discountFactors, standardSettlementDate, tradeSettlementDate, zSpread, compoundedRateType, periodsPerYear); } return presentValueFromProductPresentValue( trade, ratesProvider, issuerDiscountFactorsProvider, pvStandard.plus(pvDiff)); } //------------------------------------------------------------------------- /** * Calculates the present value sensitivity of the settlement of the bond trade from the real clean price. * <p> * The present value sensitivity of the settlement is the sensitivity of the present value to * the underlying curves. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param refData the reference data used to calculate the settlement date * @param issuerDiscountFactorsProvider the discount factors provider * @param cleanRealPrice the clean real price * @return the present value sensitivity of the settlement */ public PointSensitivities presentValueSensitivityFromCleanPrice( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData, double cleanRealPrice) { validate(ratesProvider, issuerDiscountFactorsProvider); ResolvedCapitalIndexedBond bond = trade.getProduct(); LocalDate standardSettlementDate = bond.calculateSettlementDateFromValuation(ratesProvider.getValuationDate(), refData); LocalDate tradeSettlementDate = trade.getSettlementDate(); StandardId legalEntityId = bond.getLegalEntityId(); Currency currency = bond.getCurrency(); RepoCurveDiscountFactors repoDiscountFactors = issuerDiscountFactorsProvider.repoCurveDiscountFactors(bond.getSecurityId(), legalEntityId, currency); double df = repoDiscountFactors.discountFactor(standardSettlementDate); PointSensitivityBuilder dfSensi = repoDiscountFactors.zeroRatePointSensitivity(standardSettlementDate); PointSensitivityBuilder pvSensiStandard = forecastValueSensitivityStandardFromCleanPrice(bond, ratesProvider, standardSettlementDate, cleanRealPrice).multipliedBy(df).combinedWith(dfSensi.multipliedBy( forecastValueStandardFromCleanPrice(bond, ratesProvider, standardSettlementDate, cleanRealPrice) .getAmount())); if (standardSettlementDate.isEqual(tradeSettlementDate)) { return presentValueSensitivityFromProductPresentValueSensitivity( trade, ratesProvider, issuerDiscountFactorsProvider, pvSensiStandard).build(); } // check coupon payment between two settlement dates IssuerCurveDiscountFactors issuerDiscountFactors = issuerDiscountFactorsProvider.issuerCurveDiscountFactors(legalEntityId, currency); PointSensitivityBuilder pvSensiDiff = PointSensitivityBuilder.none(); if (standardSettlementDate.isAfter(tradeSettlementDate)) { pvSensiDiff = pvSensiDiff.combinedWith(productPricer.presentValueSensitivityCoupon(bond, ratesProvider, issuerDiscountFactors, tradeSettlementDate, standardSettlementDate).multipliedBy(-1d)); } else { pvSensiDiff = pvSensiDiff.combinedWith(productPricer.presentValueSensitivityCoupon(bond, ratesProvider, issuerDiscountFactors, standardSettlementDate, tradeSettlementDate)); } return presentValueSensitivityFromProductPresentValueSensitivity( trade, ratesProvider, issuerDiscountFactorsProvider, pvSensiStandard.combinedWith(pvSensiDiff)).build(); } /** * Calculates the present value sensitivity of the settlement of the bond trade from the real clean price * with z-spread. * <p> * The present value sensitivity of the settlement is the sensitivity of the present value to * the underlying curves. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param refData the reference data used to calculate the settlement date * @param issuerDiscountFactorsProvider the discount factors provider * @param zSpread the z-spread * @param compoundedRateType the compounded rate type * @param periodsPerYear the number of periods per year * @param cleanRealPrice the clean real price * @return the present value sensitivity of the settlement */ public PointSensitivities presentValueSensitivityFromCleanPriceWithZSpread( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData, double cleanRealPrice, double zSpread, CompoundedRateType compoundedRateType, int periodsPerYear) { validate(ratesProvider, issuerDiscountFactorsProvider); ResolvedCapitalIndexedBond bond = trade.getProduct(); LocalDate standardSettlementDate = bond.calculateSettlementDateFromValuation(ratesProvider.getValuationDate(), refData); LocalDate tradeSettlementDate = trade.getSettlementDate(); StandardId legalEntityId = bond.getLegalEntityId(); Currency currency = bond.getCurrency(); RepoCurveDiscountFactors repoDiscountFactors = issuerDiscountFactorsProvider.repoCurveDiscountFactors(bond.getSecurityId(), legalEntityId, currency); double df = repoDiscountFactors.discountFactor(standardSettlementDate); PointSensitivityBuilder dfSensi = repoDiscountFactors.zeroRatePointSensitivity(standardSettlementDate); PointSensitivityBuilder pvSensiStandard = forecastValueSensitivityStandardFromCleanPrice(bond, ratesProvider, standardSettlementDate, cleanRealPrice).multipliedBy(df).combinedWith(dfSensi.multipliedBy( forecastValueStandardFromCleanPrice(bond, ratesProvider, standardSettlementDate, cleanRealPrice) .getAmount())); if (standardSettlementDate.isEqual(tradeSettlementDate)) { return presentValueSensitivityFromProductPresentValueSensitivity( trade, ratesProvider, issuerDiscountFactorsProvider, pvSensiStandard).build(); } // check coupon payment between two settlement dates IssuerCurveDiscountFactors issuerDiscountFactors = issuerDiscountFactorsProvider.issuerCurveDiscountFactors(legalEntityId, currency); PointSensitivityBuilder pvSensiDiff = PointSensitivityBuilder.none(); if (standardSettlementDate.isAfter(tradeSettlementDate)) { pvSensiDiff = pvSensiDiff.combinedWith(productPricer.presentValueSensitivityCouponWithZSpread( bond, ratesProvider, issuerDiscountFactors, tradeSettlementDate, standardSettlementDate, zSpread, compoundedRateType, periodsPerYear) .multipliedBy(-1d)); } else { pvSensiDiff = pvSensiDiff.combinedWith(productPricer.presentValueSensitivityCouponWithZSpread( bond, ratesProvider, issuerDiscountFactors, standardSettlementDate, tradeSettlementDate, zSpread, compoundedRateType, periodsPerYear)); } return presentValueSensitivityFromProductPresentValueSensitivity( trade, ratesProvider, issuerDiscountFactorsProvider, pvSensiStandard.combinedWith(pvSensiDiff)).build(); } //------------------------------------------------------------------------- /** * Calculates the currency exposure of the bond trade. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param issuerDiscountFactorsProvider the discount factors provider * @param refData the reference data used to calculate the settlement date * @param cleanRealPrice the clean real price * @return the currency exposure of the trade */ public MultiCurrencyAmount currencyExposureFromCleanPrice( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData, double cleanRealPrice) { CurrencyAmount pv = presentValueFromCleanPrice( trade, ratesProvider, issuerDiscountFactorsProvider, refData, cleanRealPrice); return MultiCurrencyAmount.of(pv); } /** * Calculates the currency exposure of the bond trade. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param issuerDiscountFactorsProvider the discount factors provider * @param refData the reference data used to calculate the settlement date * @return the currency exposure of the trade */ public MultiCurrencyAmount currencyExposure( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData) { CurrencyAmount pv = presentValue(trade, ratesProvider, issuerDiscountFactorsProvider, refData); return MultiCurrencyAmount.of(pv); } /** * Calculates the currency exposure of the bond trade with z-spread. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param issuerDiscountFactorsProvider the discount factors provider * @param refData the reference data used to calculate the settlement date * @param zSpread the z-spread * @param compoundedRateType the compounded rate type * @param periodsPerYear the number of periods per year * @param cleanRealPrice the clean real price * @return the currency exposure of the trade */ public MultiCurrencyAmount currencyExposureFromCleanPriceWithZSpread( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData, double cleanRealPrice, double zSpread, CompoundedRateType compoundedRateType, int periodsPerYear) { CurrencyAmount pv = presentValueFromCleanPriceWithZSpread( trade, ratesProvider, issuerDiscountFactorsProvider, refData, cleanRealPrice, zSpread, compoundedRateType, periodsPerYear); return MultiCurrencyAmount.of(pv); } /** * Calculates the currency exposure of the bond trade with z-spread. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @param issuerDiscountFactorsProvider the discount factors provider * @param refData the reference data used to calculate the settlement date * @param zSpread the z-spread * @param compoundedRateType the compounded rate type * @param periodsPerYear the number of periods per year * @return the currency exposure of the trade */ public MultiCurrencyAmount currencyExposureWithZSpread( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, ReferenceData refData, double zSpread, CompoundedRateType compoundedRateType, int periodsPerYear) { CurrencyAmount pv = presentValueWithZSpread( trade, ratesProvider, issuerDiscountFactorsProvider, refData, zSpread, compoundedRateType, periodsPerYear); return MultiCurrencyAmount.of(pv); } /** * Calculates the current of the bond trade. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @return the current cash */ public CurrencyAmount currentCash( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider) { LocalDate valuationDate = ratesProvider.getValuationDate(); LocalDate settlementDate = trade.getSettlementDate(); BondPaymentPeriod settle = trade.getSettlement(); CurrencyAmount cashProduct = productPricer.currentCash(trade.getProduct(), ratesProvider, settlementDate); double cashSettle = settle.getPaymentDate().isEqual(valuationDate) ? netAmount(trade, ratesProvider).getAmount() : 0d; return cashProduct.plus(cashSettle); } //------------------------------------------------------------------------- /** * Calculates the net amount of the settlement of the bond trade. * <p> * Since the sign of the settlement notional is opposite to that of the product, negative amount will be returned * for positive quantity of trade. * * @param trade the trade * @param ratesProvider the rates provider, used to determine price index values * @return the net amount */ public CurrencyAmount netAmount( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider) { BondPaymentPeriod settlement = trade.getSettlement(); if (settlement instanceof KnownAmountBondPaymentPeriod) { Payment payment = ((KnownAmountBondPaymentPeriod) settlement).getPayment(); return payment.getValue(); } else if (settlement instanceof CapitalIndexedBondPaymentPeriod) { CapitalIndexedBondPaymentPeriod casted = (CapitalIndexedBondPaymentPeriod) settlement; double netAmount = productPricer.getPeriodPricer().forecastValue(casted, ratesProvider); return CurrencyAmount.of(casted.getCurrency(), netAmount); } throw new UnsupportedOperationException("unsupported settlement type"); } //------------------------------------------------------------------------- private CurrencyAmount presentValueSettlement( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider) { BondPaymentPeriod settlement = trade.getSettlement(); ResolvedCapitalIndexedBond product = trade.getProduct(); RepoCurveDiscountFactors discountFactors = issuerDiscountFactorsProvider.repoCurveDiscountFactors( product.getSecurityId(), product.getLegalEntityId(), product.getCurrency()); return netAmount(trade, ratesProvider).multipliedBy(discountFactors.discountFactor(settlement.getPaymentDate())); } private CurrencyAmount presentValueFromProductPresentValue( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, CurrencyAmount productPresentValue) { CurrencyAmount pvProduct = productPresentValue.multipliedBy(trade.getQuantity()); CurrencyAmount pvPayment = presentValueSettlement(trade, ratesProvider, issuerDiscountFactorsProvider); return pvProduct.plus(pvPayment); } CurrencyAmount forecastValueStandardFromCleanPrice( ResolvedCapitalIndexedBond product, RatesProvider ratesProvider, LocalDate standardSettlementDate, double realCleanPrice) { double notional = product.getNotional(); double netAmountReal = realCleanPrice * notional + product.accruedInterest(standardSettlementDate); double indexRatio = product.getYieldConvention().equals(CapitalIndexedBondYieldConvention.GB_IL_FLOAT) ? 1d : productPricer.indexRatio(product, ratesProvider, standardSettlementDate); return CurrencyAmount.of(product.getCurrency(), indexRatio * netAmountReal); } //------------------------------------------------------------------------- private PointSensitivityBuilder netAmountSensitivity( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider) { BondPaymentPeriod settlement = trade.getSettlement(); if (settlement instanceof KnownAmountBondPaymentPeriod) { return PointSensitivityBuilder.none(); } else if (settlement instanceof CapitalIndexedBondPaymentPeriod) { CapitalIndexedBondPaymentPeriod casted = (CapitalIndexedBondPaymentPeriod) settlement; return productPricer.getPeriodPricer().forecastValueSensitivity(casted, ratesProvider); } throw new UnsupportedOperationException("unsupported settlement type"); } private PointSensitivityBuilder presentValueSensitivitySettlement( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider) { BondPaymentPeriod settlement = trade.getSettlement(); ResolvedCapitalIndexedBond product = trade.getProduct(); RepoCurveDiscountFactors discountFactors = issuerDiscountFactorsProvider.repoCurveDiscountFactors( product.getSecurityId(), product.getLegalEntityId(), product.getCurrency()); double df = discountFactors.discountFactor(settlement.getPaymentDate()); double netAmount = netAmount(trade, ratesProvider).getAmount(); PointSensitivityBuilder dfSensi = discountFactors.zeroRatePointSensitivity(settlement.getPaymentDate()).multipliedBy(netAmount); PointSensitivityBuilder naSensi = netAmountSensitivity(trade, ratesProvider).multipliedBy(df); return dfSensi.combinedWith(naSensi); } private PointSensitivityBuilder presentValueSensitivityFromProductPresentValueSensitivity( ResolvedCapitalIndexedBondTrade trade, RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider, PointSensitivityBuilder productPresnetValueSensitivity) { PointSensitivityBuilder sensiProduct = productPresnetValueSensitivity.multipliedBy(trade.getQuantity()); PointSensitivityBuilder sensiPayment = presentValueSensitivitySettlement(trade, ratesProvider, issuerDiscountFactorsProvider); return sensiProduct.combinedWith(sensiPayment); } PointSensitivityBuilder forecastValueSensitivityStandardFromCleanPrice( ResolvedCapitalIndexedBond product, RatesProvider ratesProvider, LocalDate standardSettlementDate, double realCleanPrice) { if (product.getYieldConvention().equals(CapitalIndexedBondYieldConvention.GB_IL_FLOAT)) { return PointSensitivityBuilder.none(); } double notional = product.getNotional(); double netAmountReal = realCleanPrice * notional + product.accruedInterest(standardSettlementDate); PointSensitivityBuilder indexRatioSensi = productPricer.indexRatioSensitivity(product, ratesProvider, standardSettlementDate); return indexRatioSensi.multipliedBy(netAmountReal); } //------------------------------------------------------------------------- private void validate(RatesProvider ratesProvider, LegalEntityDiscountingProvider issuerDiscountFactorsProvider) { ArgChecker.isTrue(ratesProvider.getValuationDate().isEqual(issuerDiscountFactorsProvider.getValuationDate()), "the rates providers should be for the same date"); } }
apache-2.0
LegNeato/buck
src/com/facebook/buck/ide/intellij/lang/java/JavaTestModuleRule.java
3637
/* * Copyright 2015-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.facebook.buck.ide.intellij.lang.java; import com.facebook.buck.core.model.targetgraph.DescriptionWithTargetGraph; import com.facebook.buck.core.model.targetgraph.TargetNode; import com.facebook.buck.core.sourcepath.SourcePath; import com.facebook.buck.ide.intellij.BaseIjModuleRule; import com.facebook.buck.ide.intellij.ModuleBuildContext; import com.facebook.buck.ide.intellij.model.IjModuleFactoryResolver; import com.facebook.buck.ide.intellij.model.IjModuleType; import com.facebook.buck.ide.intellij.model.IjProjectConfig; import com.facebook.buck.ide.intellij.model.folders.IjResourceFolderType; import com.facebook.buck.io.filesystem.ProjectFilesystem; import com.facebook.buck.jvm.core.JavaPackageFinder; import com.facebook.buck.jvm.java.JavaTestDescription; import com.google.common.collect.ImmutableMultimap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSortedSet; import java.nio.file.Path; import java.util.Optional; public class JavaTestModuleRule extends BaseIjModuleRule<JavaTestDescription.CoreArg> { private final JavaPackageFinder packageFinder; public JavaTestModuleRule( ProjectFilesystem projectFilesystem, IjModuleFactoryResolver moduleFactoryResolver, IjProjectConfig projectConfig, JavaPackageFinder packageFinder) { super(projectFilesystem, moduleFactoryResolver, projectConfig); this.packageFinder = packageFinder; } @Override public Class<? extends DescriptionWithTargetGraph<?>> getDescriptionClass() { return JavaTestDescription.class; } @Override public void apply(TargetNode<JavaTestDescription.CoreArg, ?> target, ModuleBuildContext context) { Optional<Path> presetResourcesRoot = target.getConstructorArg().getResourcesRoot(); ImmutableSortedSet<SourcePath> resources = target.getConstructorArg().getResources(); ImmutableSet<Path> resourcePaths; if (presetResourcesRoot.isPresent()) { resourcePaths = getResourcePaths(target.getConstructorArg().getResources(), presetResourcesRoot.get()); addResourceFolders( IjResourceFolderType.JAVA_TEST_RESOURCE, resourcePaths, presetResourcesRoot.get(), context); } else { resourcePaths = getResourcePaths(resources); ImmutableMultimap<Path, Path> resourcesRootsToResources = getResourcesRootsToResources(packageFinder, resourcePaths); for (Path resourcesRoot : resourcesRootsToResources.keySet()) { addResourceFolders( IjResourceFolderType.JAVA_TEST_RESOURCE, resourcesRootsToResources.get(resourcesRoot), resourcesRoot, context); } } addDepsAndTestSources(target, true /* wantsPackagePrefix */, context, resourcePaths); JavaLibraryRuleHelper.addCompiledShadowIfNeeded(projectConfig, target, context); } @Override public IjModuleType detectModuleType(TargetNode<JavaTestDescription.CoreArg, ?> targetNode) { return IjModuleType.JAVA_MODULE; } }
apache-2.0
perezd/bazel
src/test/java/com/google/devtools/build/lib/profiler/ProfilerTest.java
29626
// Copyright 2015 The Bazel Authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.devtools.build.lib.profiler; import static com.google.common.collect.ImmutableList.toImmutableList; import static com.google.common.collect.MoreCollectors.onlyElement; import static com.google.common.truth.Truth.assertThat; import static com.google.devtools.build.lib.profiler.Profiler.Format.JSON_TRACE_FILE_FORMAT; import static org.junit.Assert.assertThrows; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.devtools.build.lib.bugreport.BugReporter; import com.google.devtools.build.lib.clock.BlazeClock; import com.google.devtools.build.lib.clock.Clock; import com.google.devtools.build.lib.clock.JavaClock; import com.google.devtools.build.lib.profiler.Profiler.SlowTask; import com.google.devtools.build.lib.testutil.ManualClock; import com.google.devtools.build.lib.testutil.TestUtils; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.time.Duration; import java.util.ArrayList; import java.util.List; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** Unit tests for the profiler. */ @RunWith(JUnit4.class) public final class ProfilerTest { private final Profiler profiler = Profiler.instance(); private final ManualClock clock = new ManualClock(); @Before public void setManualClock() { BlazeClock.setClock(clock); } @AfterClass public static void resetBlazeClock() { BlazeClock.setClock(new JavaClock()); } @After public void forceStopToAvoidPoisoningTheProfiler() { // If a test does not stop the profiler, e.g., due to a test failure, all subsequent tests fail // because the profiler is still running, so we force-stop the profiler here. try { profiler.stop(); } catch (IOException e) { throw new RuntimeException(e); } } private static ImmutableSet<ProfilerTask> getAllProfilerTasks() { return ImmutableSet.copyOf(ProfilerTask.values()); } private static ImmutableSet<ProfilerTask> getSlowestProfilerTasks() { ImmutableSet.Builder<ProfilerTask> profiledTasksBuilder = ImmutableSet.builder(); for (ProfilerTask profilerTask : ProfilerTask.values()) { if (profilerTask.collectsSlowestInstances()) { profiledTasksBuilder.add(profilerTask); } } return profiledTasksBuilder.build(); } private ByteArrayOutputStream start(ImmutableSet<ProfilerTask> tasks, Profiler.Format format) throws IOException { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); profiler.start( tasks, buffer, format, "dummy_output_base", UUID.randomUUID(), false, BlazeClock.instance(), BlazeClock.nanoTime(), /*slimProfile=*/ false, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); return buffer; } private void startUnbuffered(ImmutableSet<ProfilerTask> tasks) throws IOException { profiler.start( tasks, null, null, "dummy_output_base", UUID.randomUUID(), false, BlazeClock.instance(), BlazeClock.nanoTime(), /*slimProfile=*/ false, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); } @Test public void testProfilerActivation() throws Exception { assertThat(profiler.isActive()).isFalse(); start(getAllProfilerTasks(), JSON_TRACE_FILE_FORMAT); assertThat(profiler.isActive()).isTrue(); profiler.stop(); assertThat(profiler.isActive()).isFalse(); } @Test public void testProfiler() throws Exception { ByteArrayOutputStream buffer = start(getAllProfilerTasks(), JSON_TRACE_FILE_FORMAT); profiler.logSimpleTask(BlazeClock.instance().nanoTime(), ProfilerTask.PHASE, "profiler start"); try (SilentCloseable c = profiler.profile(ProfilerTask.ACTION, "complex task")) { profiler.logEvent(ProfilerTask.PHASE, "event1"); try (SilentCloseable c2 = profiler.profile(ProfilerTask.ACTION_CHECK, "complex subtask")) { // next task takes less than 10 ms and should be only aggregated profiler.logSimpleTask(BlazeClock.instance().nanoTime(), ProfilerTask.VFS_STAT, "stat1"); long startTime = BlazeClock.instance().nanoTime(); clock.advanceMillis(20); // this one will take at least 20 ms and should be present profiler.logSimpleTask(startTime, ProfilerTask.VFS_STAT, "stat2"); } } profiler.stop(); // all other calls to profiler should be ignored profiler.logEvent(ProfilerTask.PHASE, "should be ignored"); JsonProfile jsonProfile = new JsonProfile(new ByteArrayInputStream(buffer.toByteArray())); assertThat(removeUsageEvents(jsonProfile.getTraceEvents())) .hasSize( 2 /* thread names */ + 2 /* thread indices */ + 2 /* build phase marker */ + 1 /* VFS event, the first is too short */ + 2 /* action + action dependency checking */ + 1 /* action counters */ + 1 /* finishing */); assertThat( jsonProfile.getTraceEvents().stream() .filter(traceEvent -> "thread_name".equals(traceEvent.name())) .collect(Collectors.toList())) .hasSize(2); assertThat( jsonProfile.getTraceEvents().stream() .filter(traceEvent -> "thread_sort_index".equals(traceEvent.name())) .collect(Collectors.toList())) .hasSize(2); assertThat( jsonProfile.getTraceEvents().stream() .filter(traceEvent -> ProfilerTask.PHASE.description.equals(traceEvent.category())) .collect(Collectors.toList())) .hasSize(2); TraceEvent vfsStat = Iterables.getOnlyElement( jsonProfile.getTraceEvents().stream() .filter( traceEvent -> ProfilerTask.VFS_STAT.description.equals(traceEvent.category())) .collect(Collectors.toList())); assertThat(vfsStat.duration()).isEqualTo(Duration.ofMillis(20)); assertThat( jsonProfile.getTraceEvents().stream() .filter( traceEvent -> traceEvent.category() != null && traceEvent.category().startsWith("action")) .collect(Collectors.toList())) .hasSize(2); assertThat(Iterables.filter(jsonProfile.getTraceEvents(), t -> t.name().equals("action count"))) .hasSize(1); assertThat( jsonProfile.getTraceEvents().stream() .filter(traceEvent -> "Finishing".equals(traceEvent.name())) .collect(Collectors.toList())) .hasSize(1); } @Test public void testProfilerRecordingAllEvents() throws Exception { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); profiler.start( getAllProfilerTasks(), buffer, JSON_TRACE_FILE_FORMAT, "dummy_output_base", UUID.randomUUID(), true, clock, clock.nanoTime(), /*slimProfile=*/ false, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); try (SilentCloseable c = profiler.profile(ProfilerTask.ACTION, "action task")) { // Next task takes less than 10 ms but should be recorded anyway. long before = clock.nanoTime(); clock.advanceMillis(1); profiler.logSimpleTask(before, ProfilerTask.VFS_STAT, "stat1"); } profiler.stop(); JsonProfile jsonProfile = new JsonProfile(new ByteArrayInputStream(buffer.toByteArray())); assertThat(jsonProfile.getTraceEvents()) .hasSize( 2 /* thread names */ + 2 /* thread sort indices */ + 1 /* VFS */ + 1 /* action */ + 1 /* action counters */ + 1 /* finishing */); TraceEvent vfsStat = Iterables.getOnlyElement( jsonProfile.getTraceEvents().stream() .filter( traceEvent -> ProfilerTask.VFS_STAT.description.equals(traceEvent.category())) .collect(Collectors.toList())); assertThat(vfsStat.duration().toMillis()).isLessThan(ProfilerTask.VFS_STAT.minDuration); assertThat(Iterables.filter(jsonProfile.getTraceEvents(), t -> t.name().equals("action count"))) .hasSize(1); } @Test public void testProfilerRecordingOnlySlowestEvents() throws Exception { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); profiler.start( getSlowestProfilerTasks(), buffer, JSON_TRACE_FILE_FORMAT, "dummy_output_base", UUID.randomUUID(), true, BlazeClock.instance(), BlazeClock.instance().nanoTime(), /*slimProfile=*/ false, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); profiler.logSimpleTask(10000, 20000, ProfilerTask.VFS_STAT, "stat"); // Unlike the VFS_STAT event above, the remote execution event will not be recorded since we // don't record the slowest remote exec events (see ProfilerTask.java). profiler.logSimpleTask(20000, 30000, ProfilerTask.REMOTE_EXECUTION, "remote execution"); assertThat(profiler.isProfiling(ProfilerTask.VFS_STAT)).isTrue(); assertThat(profiler.isProfiling(ProfilerTask.REMOTE_EXECUTION)).isFalse(); profiler.stop(); JsonProfile jsonProfile = new JsonProfile(new ByteArrayInputStream(buffer.toByteArray())); assertThat(jsonProfile.getTraceEvents()) .hasSize(2 /*threads */ + 2 /*threads sort index */ + 1 /*VFS */); assertThat( jsonProfile.getTraceEvents().stream() .filter( traceEvent -> !"thread_name".equals(traceEvent.name()) && !"thread_sort_index".equals(traceEvent.name())) .collect(Collectors.toList())) .hasSize(1); } @Test public void testSlowestTasks() throws Exception { startUnbuffered(getAllProfilerTasks()); profiler.logSimpleTaskDuration( Profiler.nanoTimeMaybe(), Duration.ofSeconds(10), ProfilerTask.LOCAL_PARSE, "foo"); Iterable<SlowTask> slowestTasks = profiler.getSlowestTasks(); assertThat(slowestTasks).hasSize(1); SlowTask task = slowestTasks.iterator().next(); assertThat(task.type).isEqualTo(ProfilerTask.LOCAL_PARSE); profiler.stop(); } @Test public void testGetSlowestTasksCapped() throws Exception { startUnbuffered(getSlowestProfilerTasks()); // Add some fast tasks - these shouldn't show up in the slowest. for (int i = 0; i < 30; i++) { profiler.logSimpleTask( /*startTimeNanos=*/ 1, /*stopTimeNanos=*/ ProfilerTask.VFS_STAT.minDuration + 10, ProfilerTask.VFS_STAT, "stat"); } // Add some slow tasks we expect to show up in the slowest. List<Long> expectedSlowestDurations = new ArrayList<>(); for (int i = 0; i < 30; i++) { long fakeDuration = ProfilerTask.VFS_STAT.minDuration + i + 10_000; profiler.logSimpleTask( /*startTimeNanos=*/ 1, /*stopTimeNanos=*/ fakeDuration + 1, ProfilerTask.VFS_STAT, "stat"); expectedSlowestDurations.add(fakeDuration); } // Sprinkle in a whole bunch of fast tasks from different thread ids - necessary because // internally aggregation is sharded across several aggregators, sharded by thread id. // It's possible all these threads wind up in the same shard, we'll take our chances. ImmutableList.Builder<Thread> threadsBuilder = ImmutableList.builder(); try { for (int i = 0; i < 32; i++) { Thread thread = new Thread( () -> { for (int j = 0; j < 100; j++) { profiler.logSimpleTask( /*startTimeNanos=*/ 1, /*stopTimeNanos=*/ ProfilerTask.VFS_STAT.minDuration + j + 1, ProfilerTask.VFS_STAT, "stat"); } }); threadsBuilder.add(thread); thread.start(); } } finally { threadsBuilder.build().forEach( t -> { try { t.join(TestUtils.WAIT_TIMEOUT_MILLISECONDS); } catch (InterruptedException e) { t.interrupt(); // This'll go ahead and interrupt all the others. The thread we just interrupted is // lightweight enough that it's reasonable to assume it'll exit. Thread.currentThread().interrupt(); } }); } ImmutableList<SlowTask> slowTasks = ImmutableList.copyOf(profiler.getSlowestTasks()); assertThat(slowTasks).hasSize(30); ImmutableList<Long> slowestDurations = slowTasks.stream().map(SlowTask::getDurationNanos).collect(toImmutableList()); assertThat(slowestDurations).containsExactlyElementsIn(expectedSlowestDurations); } @Test public void testProfilerRecordsNothing() throws Exception { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); profiler.start( ImmutableSet.of(), buffer, JSON_TRACE_FILE_FORMAT, "dummy_output_base", UUID.randomUUID(), true, BlazeClock.instance(), BlazeClock.instance().nanoTime(), /*slimProfile=*/ false, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); profiler.logSimpleTask(10000, 20000, ProfilerTask.VFS_STAT, "stat"); assertThat(ProfilerTask.VFS_STAT.collectsSlowestInstances()).isTrue(); assertThat(profiler.isProfiling(ProfilerTask.VFS_STAT)).isFalse(); profiler.stop(); JsonProfile jsonProfile = new JsonProfile(new ByteArrayInputStream(buffer.toByteArray())); assertThat(jsonProfile.getTraceEvents()).hasSize(2); assertThat( jsonProfile.getTraceEvents().stream() .filter( traceEvent -> !"thread_name".equals(traceEvent.name()) && !"thread_sort_index".equals(traceEvent.name())) .collect(Collectors.toList())) .isEmpty(); } @Test public void testConcurrentProfiling() throws Exception { ByteArrayOutputStream buffer = start(getAllProfilerTasks(), JSON_TRACE_FILE_FORMAT); Thread thread1 = new Thread( () -> { for (int i = 0; i < 10000; i++) { Profiler.instance().logEvent(ProfilerTask.INFO, "thread1"); } }); Thread thread2 = new Thread( () -> { for (int i = 0; i < 10000; i++) { Profiler.instance().logEvent(ProfilerTask.INFO, "thread2"); } }); try (SilentCloseable c = profiler.profile(ProfilerTask.PHASE, "main task")) { profiler.logEvent(ProfilerTask.INFO, "starting threads"); thread1.start(); thread2.start(); thread2.join(); thread1.join(); profiler.logEvent(ProfilerTask.INFO, "joined"); } profiler.stop(); JsonProfile jsonProfile = new JsonProfile(new ByteArrayInputStream(buffer.toByteArray())); assertThat(removeUsageEvents(jsonProfile.getTraceEvents())) .hasSize( 4 /* thread names */ + 4 /* thread indices */ + 1 /* main task phase marker */ + 2 /* starting, joining events */ + 2 * 10000 /* thread1/thread2 events */ + 1 /* finishing */); long tid1 = jsonProfile.getTraceEvents().stream() .filter(traceEvent -> "thread1".equals(traceEvent.name())) .map(TraceEvent::threadId) .distinct() .collect(onlyElement()); long tid2 = jsonProfile.getTraceEvents().stream() .filter(traceEvent -> "thread2".equals(traceEvent.name())) .map(TraceEvent::threadId) .distinct() .collect(onlyElement()); assertThat(tid1).isNotEqualTo(tid2); assertThat(tid1).isEqualTo(thread1.getId()); assertThat(tid2).isEqualTo(thread2.getId()); } @Test public void testPhaseTasks() throws Exception { ByteArrayOutputStream buffer = start(getAllProfilerTasks(), JSON_TRACE_FILE_FORMAT); Thread thread1 = new Thread( () -> { for (int i = 0; i < 100; i++) { Profiler.instance().logEvent(ProfilerTask.INFO, "thread1"); } }); profiler.markPhase(ProfilePhase.INIT); // Empty phase. profiler.markPhase(ProfilePhase.TARGET_PATTERN_EVAL); thread1.start(); thread1.join(); clock.advanceMillis(1); profiler.markPhase(ProfilePhase.ANALYZE); Thread thread2 = new Thread( () -> { try (SilentCloseable c = profiler.profile(ProfilerTask.INFO, "complex task")) { for (int i = 0; i < 100; i++) { Profiler.instance().logEvent(ProfilerTask.INFO, "thread2a"); } } try { profiler.markPhase(ProfilePhase.EXECUTE); } catch (InterruptedException e) { throw new IllegalStateException(e); } for (int i = 0; i < 100; i++) { Profiler.instance().logEvent(ProfilerTask.INFO, "thread2b"); } }); thread2.start(); thread2.join(); profiler.logEvent(ProfilerTask.INFO, "last task"); clock.advanceMillis(1); profiler.stop(); JsonProfile jsonProfile = new JsonProfile(new ByteArrayInputStream(buffer.toByteArray())); List<TraceEvent> filteredEvents = removeUsageEvents(jsonProfile.getTraceEvents()); assertThat(filteredEvents) .hasSize( 4 /* thread names */ + 4 /* threads sort index */ + 4 /* build phase marker */ + 3 * 100 /* thread1, thread2a, thread2b */ + 1 /* complex task */ + 1 /* last task */ + 1 /* finishing */); assertThat(getTraceEventsForPhase(ProfilePhase.INIT, filteredEvents)).isEmpty(); assertThat(getTraceEventsForPhase(ProfilePhase.TARGET_PATTERN_EVAL, filteredEvents)) .hasSize(100); // thread1 assertThat(getTraceEventsForPhase(ProfilePhase.ANALYZE, filteredEvents)) .hasSize(101); // complex task and thread2a assertThat(getTraceEventsForPhase(ProfilePhase.EXECUTE, filteredEvents)) .hasSize(102); // thread2b + last task + finishing } // Filter out CPU and memory usage events. These are non-deterministic depending on the duration // of the profile. private static List<TraceEvent> removeUsageEvents(List<TraceEvent> events) { return events.stream().filter(e -> !e.name().contains("usage")).collect(Collectors.toList()); } /** * Extracts all events for a given phase. * * <p>Excludes thread_name and thread_sort_index events. */ private static List<TraceEvent> getTraceEventsForPhase( ProfilePhase phase, List<TraceEvent> traceEvents) { List<TraceEvent> filteredEvents = new ArrayList<>(); boolean foundPhase = false; for (TraceEvent traceEvent : traceEvents) { if (ProfilerTask.PHASE.description.equals(traceEvent.category())) { if (foundPhase) { break; } else if (phase.description.equals(traceEvent.name())) { foundPhase = true; continue; } } if (foundPhase && !"thread_name".equals(traceEvent.name()) && !"thread_sort_index".equals(traceEvent.name())) { filteredEvents.add(traceEvent); } } return filteredEvents; } @Test public void testResilenceToNonDecreasingNanoTimes() throws Exception { final long initialNanoTime = BlazeClock.instance().nanoTime(); final AtomicInteger numNanoTimeCalls = new AtomicInteger(0); Clock badClock = new Clock() { @Override public long currentTimeMillis() { return BlazeClock.instance().currentTimeMillis(); } @Override public long nanoTime() { return initialNanoTime - numNanoTimeCalls.addAndGet(1); } }; profiler.start( getAllProfilerTasks(), new ByteArrayOutputStream(), JSON_TRACE_FILE_FORMAT, "dummy_output_base", UUID.randomUUID(), false, badClock, initialNanoTime, /*slimProfile=*/ false, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); profiler.logSimpleTask(badClock.nanoTime(), ProfilerTask.INFO, "some task"); profiler.stop(); } /** Checks that the histograms are cleared in the stop call. */ @Test public void testEmptyTaskHistograms() throws Exception { startUnbuffered(getAllProfilerTasks()); profiler.logSimpleTaskDuration( Profiler.nanoTimeMaybe(), Duration.ofSeconds(10), ProfilerTask.INFO, "foo"); for (StatRecorder recorder : profiler.tasksHistograms) { assertThat(recorder).isNotNull(); } profiler.stop(); for (StatRecorder recorder : profiler.tasksHistograms) { assertThat(recorder).isNull(); } } @Test public void testTaskHistograms() throws Exception { startUnbuffered(getAllProfilerTasks()); profiler.logSimpleTaskDuration( Profiler.nanoTimeMaybe(), Duration.ofSeconds(10), ProfilerTask.INFO, "foo"); ImmutableList<StatRecorder> histograms = profiler.getTasksHistograms(); StatRecorder infoStatRecorder = histograms.get(ProfilerTask.INFO.ordinal()); assertThat(infoStatRecorder.isEmpty()).isFalse(); // This is the only provided API to get the contents of the StatRecorder. assertThat(infoStatRecorder.toString()).contains("'INFO'"); assertThat(infoStatRecorder.toString()).contains("Count: 1"); assertThat(infoStatRecorder.toString()).contains("[8192..16384 ms]"); // The stop() call is here because the histograms are cleared in the stop call. See the // documentation of {@link Profiler#getTasksHistograms}. profiler.stop(); } @Test public void testIOExceptionInOutputStreamBinaryFormat() throws Exception { OutputStream failingOutputStream = new OutputStream() { @Override public void write(int b) throws IOException { throw new IOException("Expected failure."); } }; profiler.start( getAllProfilerTasks(), failingOutputStream, JSON_TRACE_FILE_FORMAT, "dummy_output_base", UUID.randomUUID(), false, BlazeClock.instance(), BlazeClock.instance().nanoTime(), /*slimProfile=*/ false, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); profiler.logSimpleTaskDuration( Profiler.nanoTimeMaybe(), Duration.ofSeconds(10), ProfilerTask.INFO, "foo"); IOException expected = assertThrows(IOException.class, profiler::stop); assertThat(expected).hasMessageThat().isEqualTo("Expected failure."); } @Test public void testIOExceptionInOutputStreamJsonFormat() throws Exception { OutputStream failingOutputStream = new OutputStream() { @Override public void write(int b) throws IOException { throw new IOException("Expected failure."); } }; profiler.start( getAllProfilerTasks(), failingOutputStream, JSON_TRACE_FILE_FORMAT, "dummy_output_base", UUID.randomUUID(), false, BlazeClock.instance(), BlazeClock.instance().nanoTime(), /*slimProfile=*/ false, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); profiler.logSimpleTaskDuration( Profiler.nanoTimeMaybe(), Duration.ofSeconds(10), ProfilerTask.INFO, "foo"); IOException expected = assertThrows(IOException.class, profiler::stop); assertThat(expected).hasMessageThat().isEqualTo("Expected failure."); } @Test public void testPrimaryOutputForAction() throws Exception { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); profiler.start( getAllProfilerTasks(), buffer, JSON_TRACE_FILE_FORMAT, "dummy_output_base", UUID.randomUUID(), true, clock, clock.nanoTime(), /*slimProfile=*/ false, /*includePrimaryOutput=*/ true, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); try (SilentCloseable c = profiler.profileAction(ProfilerTask.ACTION, "test", "foo.out", "")) { profiler.logEvent(ProfilerTask.PHASE, "event1"); } profiler.stop(); JsonProfile jsonProfile = new JsonProfile(new ByteArrayInputStream(buffer.toByteArray())); assertThat( jsonProfile.getTraceEvents().stream() .filter(traceEvent -> "foo.out".equals(traceEvent.primaryOutputPath())) .collect(Collectors.toList())) .hasSize(1); } @Test public void testTargetLabelForAction() throws Exception { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); profiler.start( getAllProfilerTasks(), buffer, JSON_TRACE_FILE_FORMAT, "dummy_output_base", UUID.randomUUID(), true, clock, clock.nanoTime(), /*slimProfile=*/ false, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ true, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); try (SilentCloseable c = profiler.profileAction(ProfilerTask.ACTION, "test", "foo.out", "//foo:bar")) { profiler.logEvent(ProfilerTask.PHASE, "event1"); } profiler.stop(); JsonProfile jsonProfile = new JsonProfile(new ByteArrayInputStream(buffer.toByteArray())); assertThat( jsonProfile.getTraceEvents().stream() .filter(traceEvent -> "//foo:bar".equals(traceEvent.targetLabel())) .collect(Collectors.toList())) .hasSize(1); } private ByteArrayOutputStream getJsonProfileOutputStream(boolean slimProfile) throws IOException { ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); profiler.start( getAllProfilerTasks(), outputStream, JSON_TRACE_FILE_FORMAT, "dummy_output_base", UUID.randomUUID(), false, BlazeClock.instance(), BlazeClock.instance().nanoTime(), slimProfile, /*includePrimaryOutput=*/ false, /*includeTargetLabel=*/ false, /*collectTaskHistograms=*/ true, BugReporter.defaultInstance()); long curTime = Profiler.nanoTimeMaybe(); for (int i = 0; i < 100_000; i++) { Duration duration; if (i % 100 == 0) { duration = Duration.ofSeconds(1); } else { duration = Duration.ofMillis(i % 250); } profiler.logSimpleTaskDuration(curTime, duration, ProfilerTask.INFO, "foo"); curTime += duration.toNanos(); } profiler.stop(); return outputStream; } @Test public void testSlimProfileSize() throws Exception { ByteArrayOutputStream fatOutputStream = getJsonProfileOutputStream(/*slimProfile=*/ false); String fatOutput = fatOutputStream.toString(); assertThat(fatOutput).doesNotContain("merged"); ByteArrayOutputStream slimOutputStream = getJsonProfileOutputStream(/*slimProfile=*/ true); String slimOutput = slimOutputStream.toString(); assertThat(slimOutput).contains("merged"); long fatProfileLen = fatOutputStream.size(); long slimProfileLen = slimOutputStream.size(); assertThat(fatProfileLen).isAtLeast(8 * slimProfileLen); long fatProfileLineCount = fatOutput.split("\n").length; long slimProfileLineCount = slimOutput.split("\n").length; assertThat(fatProfileLineCount).isAtLeast(8 * slimProfileLineCount); } }
apache-2.0
Limseunghwan/oss
app/models/PostingComment.java
2110
/** * Yobi, Project Hosting SW * * Copyright 2013 NAVER Corp. * http://yobi.io * * @author Yi EungJun * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package models; import models.enumeration.ResourceType; import models.resource.Resource; import javax.persistence.Entity; import javax.persistence.ManyToOne; @Entity public class PostingComment extends Comment { private static final long serialVersionUID = 1L; public static final Finder<Long, PostingComment> find = new Finder<>(Long.class, PostingComment.class); @ManyToOne public Posting posting; public PostingComment(Posting posting, User author, String contents) { super(author, contents); this.posting = posting; } /** * @see Comment#getParent() */ public AbstractPosting getParent() { return posting; } /** * @see Comment#asResource() */ @Override public Resource asResource() { return new Resource() { @Override public String getId() { return id.toString(); } @Override public Project getProject() { return posting.project; } @Override public ResourceType getType() { return ResourceType.NONISSUE_COMMENT; } @Override public Long getAuthorId() { return authorId; } @Override public Resource getContainer() { return posting.asResource(); } }; } }
apache-2.0
blipinsk/RxAnimationBinding
rxanimationbinding/src/main/java/com/bartoszlipinski/rxanimationbinding/ValueAnimatorUpdateListenerOnSubscribe.java
1679
/** * Copyright 2016 Bartosz Lipinski * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.bartoszlipinski.rxanimationbinding; import android.animation.ValueAnimator; import rx.Observable; import rx.Subscriber; final class ValueAnimatorUpdateListenerOnSubscribe implements Observable.OnSubscribe<ValueAnimator> { private final ValueAnimator animator; ValueAnimatorUpdateListenerOnSubscribe(ValueAnimator animator) { this.animator = animator; } @Override public void call(final Subscriber<? super ValueAnimator> subscriber) { final ValueAnimator.AnimatorUpdateListener listener = new ValueAnimator.AnimatorUpdateListener() { @Override public void onAnimationUpdate(ValueAnimator animator) { if (!subscriber.isUnsubscribed()) { subscriber.onNext(animator); } } }; animator.addUpdateListener(listener); subscriber.add(new OnUnsubscribedCallback() { @Override protected void onUnsubscribe() { animator.removeUpdateListener(listener); } }); } }
apache-2.0
vivantech/kc_fixes
src/main/java/org/kuali/kra/s2s/generator/impl/EDGEPA427V1_0Generator.java
4221
/* * Copyright 2005-2014 The Kuali Foundation. * * Licensed under the Educational Community License, Version 1.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.opensource.org/licenses/ecl1.php * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kuali.kra.s2s.generator.impl; import gov.grants.apply.forms.edGEPA427V10.GEPA427AttachmentsDocument; import gov.grants.apply.forms.edGEPA427V10.GEPA427AttachmentsDocument.GEPA427Attachments; import gov.grants.apply.system.attachmentsV10.AttachedFileDataType; import org.apache.xmlbeans.XmlObject; import org.kuali.kra.proposaldevelopment.bo.Narrative; import org.kuali.kra.proposaldevelopment.document.ProposalDevelopmentDocument; import org.kuali.kra.s2s.util.S2SConstants; /** * * This class is used to generate XML Document object for grants.gov EDGEPA427V1.0. This form is generated using XMLBean API's * generated by compiling EDGEPA427V1.0 schema. * * @author Kuali Research Administration Team (kualidev@oncourse.iu.edu) */ public class EDGEPA427V1_0Generator extends EDGEPA427BaseGenerator { /** * * This method returns GEPA427AttachmentsDocument object based on proposal development document which contains the * GEPA427AttachmentsDocument informations about GEPA type attachment for a particular proposal * * @return gepa427Document(GEPA427AttachmentsDocument) {@link XmlObject} of type GEPA427AttachmentsDocument. */ private GEPA427AttachmentsDocument getGEPA427Attachments() { GEPA427AttachmentsDocument gepa427Document = GEPA427AttachmentsDocument.Factory.newInstance(); GEPA427Attachments gepAttachments = GEPA427Attachments.Factory.newInstance(); gepAttachments.setFormVersion(S2SConstants.FORMVERSION_1_0); AttachedFileDataType attachedFileDataType = null; for (Narrative narrative : pdDoc.getDevelopmentProposal().getNarratives()) { if (narrative.getNarrativeTypeCode() != null && Integer.parseInt(narrative.getNarrativeTypeCode()) == NARRATIVE_TYPE_ED_GEPA427) { attachedFileDataType = getAttachedFileType(narrative); if(attachedFileDataType != null){ gepAttachments.setAttachments(attachedFileDataType); break; } } } gepa427Document.setGEPA427Attachments(gepAttachments); return gepa427Document; } /** * This method creates {@link XmlObject} of type {@link GEPA427AttachmentsDocument} by populating data from the given * {@link ProposalDevelopmentDocument} * * @param proposalDevelopmentDocument for which the {@link XmlObject} needs to be created * @return {@link XmlObject} which is generated using the given {@link ProposalDevelopmentDocument} * @see org.kuali.kra.s2s.generator.S2SFormGenerator#getFormObject(ProposalDevelopmentDocument) * */ public XmlObject getFormObject(ProposalDevelopmentDocument proposalDevelopmentDocument) { this.pdDoc = proposalDevelopmentDocument; return getGEPA427Attachments(); } /** * This method typecasts the given {@link XmlObject} to the required generator type and returns back the document of that * generator type. * * @param xmlObject which needs to be converted to the document type of the required generator * @return {@link XmlObject} document of the required generator type * @see org.kuali.kra.s2s.generator.S2SFormGenerator#getFormObject(XmlObject) */ public XmlObject getFormObject(XmlObject xmlObject) { GEPA427AttachmentsDocument gepa427Document = GEPA427AttachmentsDocument.Factory.newInstance(); GEPA427Attachments gepAttachments = (GEPA427Attachments) xmlObject; gepa427Document.setGEPA427Attachments(gepAttachments); return gepa427Document; } }
apache-2.0
prestodb/presto
presto-main/src/main/java/com/facebook/presto/operator/aggregation/ParametricAggregation.java
10204
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.operator.aggregation; import com.facebook.presto.bytecode.DynamicClassLoader; import com.facebook.presto.common.type.Type; import com.facebook.presto.common.type.TypeSignature; import com.facebook.presto.metadata.BoundVariables; import com.facebook.presto.metadata.FunctionAndTypeManager; import com.facebook.presto.metadata.SqlAggregationFunction; import com.facebook.presto.operator.ParametricImplementationsGroup; import com.facebook.presto.operator.aggregation.AggregationMetadata.AccumulatorStateDescriptor; import com.facebook.presto.operator.aggregation.AggregationMetadata.ParameterMetadata; import com.facebook.presto.operator.aggregation.AggregationMetadata.ParameterMetadata.ParameterType; import com.facebook.presto.operator.aggregation.state.StateCompiler; import com.facebook.presto.spi.PrestoException; import com.facebook.presto.spi.function.AccumulatorStateFactory; import com.facebook.presto.spi.function.AccumulatorStateSerializer; import com.facebook.presto.spi.function.Signature; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import java.lang.invoke.MethodHandle; import java.util.List; import java.util.Optional; import static com.facebook.presto.metadata.SignatureBinder.applyBoundVariables; import static com.facebook.presto.operator.ParametricFunctionHelpers.bindDependencies; import static com.facebook.presto.operator.aggregation.AggregationUtils.generateAggregationName; import static com.facebook.presto.operator.aggregation.state.StateCompiler.generateStateSerializer; import static com.facebook.presto.spi.StandardErrorCode.AMBIGUOUS_FUNCTION_CALL; import static com.facebook.presto.spi.StandardErrorCode.FUNCTION_IMPLEMENTATION_MISSING; import static com.google.common.base.Throwables.throwIfUnchecked; import static com.google.common.collect.ImmutableList.toImmutableList; import static java.lang.String.format; import static java.util.Objects.requireNonNull; public class ParametricAggregation extends SqlAggregationFunction { final AggregationHeader details; final ParametricImplementationsGroup<AggregationImplementation> implementations; public ParametricAggregation( Signature signature, AggregationHeader details, ParametricImplementationsGroup<AggregationImplementation> implementations) { super(signature, details.getVisibility()); this.details = requireNonNull(details, "details is null"); this.implementations = requireNonNull(implementations, "implementations is null"); } @Override public InternalAggregationFunction specialize(BoundVariables variables, int arity, FunctionAndTypeManager functionAndTypeManager) { // Bind variables Signature boundSignature = applyBoundVariables(getSignature(), variables, arity); // Find implementation matching arguments AggregationImplementation concreteImplementation = findMatchingImplementation(boundSignature, variables, functionAndTypeManager); // Build argument and return Types from signatures List<Type> inputTypes = boundSignature.getArgumentTypes().stream().map(functionAndTypeManager::getType).collect(toImmutableList()); Type outputType = functionAndTypeManager.getType(boundSignature.getReturnType()); // Create classloader for additional aggregation dependencies Class<?> definitionClass = concreteImplementation.getDefinitionClass(); DynamicClassLoader classLoader = new DynamicClassLoader(definitionClass.getClassLoader(), getClass().getClassLoader()); // Build state factory and serializer Class<?> stateClass = concreteImplementation.getStateClass(); AccumulatorStateSerializer<?> stateSerializer = getAccumulatorStateSerializer(concreteImplementation, variables, functionAndTypeManager, stateClass, classLoader); AccumulatorStateFactory<?> stateFactory = StateCompiler.generateStateFactory(stateClass, classLoader); // Bind provided dependencies to aggregation method handlers MethodHandle inputHandle = bindDependencies(concreteImplementation.getInputFunction(), concreteImplementation.getInputDependencies(), variables, functionAndTypeManager); MethodHandle combineHandle = bindDependencies(concreteImplementation.getCombineFunction(), concreteImplementation.getCombineDependencies(), variables, functionAndTypeManager); MethodHandle outputHandle = bindDependencies(concreteImplementation.getOutputFunction(), concreteImplementation.getOutputDependencies(), variables, functionAndTypeManager); // Build metadata of input parameters List<ParameterMetadata> parametersMetadata = buildParameterMetadata(concreteImplementation.getInputParameterMetadataTypes(), inputTypes); // Generate Aggregation name String aggregationName = generateAggregationName(getSignature().getNameSuffix(), outputType.getTypeSignature(), signaturesFromTypes(inputTypes)); // Collect all collected data in Metadata AggregationMetadata metadata = new AggregationMetadata( aggregationName, parametersMetadata, inputHandle, combineHandle, outputHandle, ImmutableList.of(new AccumulatorStateDescriptor( stateClass, stateSerializer, stateFactory)), outputType); // Create specialized InternalAggregationFunction for Presto return new InternalAggregationFunction(getSignature().getNameSuffix(), inputTypes, ImmutableList.of(stateSerializer.getSerializedType()), outputType, details.isDecomposable(), details.isOrderSensitive(), new LazyAccumulatorFactoryBinder(metadata, classLoader)); } @VisibleForTesting public ParametricImplementationsGroup<AggregationImplementation> getImplementations() { return implementations; } @Override public String getDescription() { return details.getDescription().orElse(""); } private AggregationImplementation findMatchingImplementation(Signature boundSignature, BoundVariables variables, FunctionAndTypeManager functionAndTypeManager) { Optional<AggregationImplementation> foundImplementation = Optional.empty(); if (implementations.getExactImplementations().containsKey(boundSignature)) { foundImplementation = Optional.of(implementations.getExactImplementations().get(boundSignature)); } else { for (AggregationImplementation candidate : implementations.getGenericImplementations()) { if (candidate.areTypesAssignable(boundSignature, variables, functionAndTypeManager)) { if (foundImplementation.isPresent()) { throw new PrestoException(AMBIGUOUS_FUNCTION_CALL, format("Ambiguous function call (%s) for %s", variables, getSignature())); } foundImplementation = Optional.of(candidate); } } } if (!foundImplementation.isPresent()) { throw new PrestoException(FUNCTION_IMPLEMENTATION_MISSING, format("Unsupported type parameters (%s) for %s", variables, getSignature())); } return foundImplementation.get(); } private static AccumulatorStateSerializer<?> getAccumulatorStateSerializer(AggregationImplementation implementation, BoundVariables variables, FunctionAndTypeManager functionAndTypeManager, Class<?> stateClass, DynamicClassLoader classLoader) { AccumulatorStateSerializer<?> stateSerializer; Optional<MethodHandle> stateSerializerFactory = implementation.getStateSerializerFactory(); if (stateSerializerFactory.isPresent()) { try { MethodHandle factoryHandle = bindDependencies(stateSerializerFactory.get(), implementation.getStateSerializerFactoryDependencies(), variables, functionAndTypeManager); stateSerializer = (AccumulatorStateSerializer<?>) factoryHandle.invoke(); } catch (Throwable t) { throwIfUnchecked(t); throw new RuntimeException(t); } } else { stateSerializer = generateStateSerializer(stateClass, classLoader); } return stateSerializer; } private static List<TypeSignature> signaturesFromTypes(List<Type> types) { return types .stream() .map(Type::getTypeSignature) .collect(toImmutableList()); } private static List<ParameterMetadata> buildParameterMetadata(List<ParameterType> parameterMetadataTypes, List<Type> inputTypes) { ImmutableList.Builder<ParameterMetadata> builder = ImmutableList.builder(); int inputId = 0; for (ParameterType parameterMetadataType : parameterMetadataTypes) { switch (parameterMetadataType) { case STATE: case BLOCK_INDEX: builder.add(new ParameterMetadata(parameterMetadataType)); break; case INPUT_CHANNEL: case BLOCK_INPUT_CHANNEL: case NULLABLE_BLOCK_INPUT_CHANNEL: builder.add(new ParameterMetadata(parameterMetadataType, inputTypes.get(inputId++))); break; } } return builder.build(); } }
apache-2.0
Sargul/dbeaver
plugins/org.jkiss.dbeaver.ext.postgresql/src/org/jkiss/dbeaver/ext/postgresql/sql/PostgreEscapeStringRule.java
2825
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2021 DBeaver Corp and others * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.postgresql.sql; import org.jkiss.dbeaver.model.sql.parser.tokens.SQLTokenType; import org.jkiss.dbeaver.model.text.parser.*; /** * This rule matches string literals with C-Style escapes, as * described in <b>4.1.2.2</b> chapter of PostgreSQL documentation. * * @see <a href="https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-STRINGS-ESCAPE">4.1.2.2. String Constants with C-Style Escapes</a> */ public class PostgreEscapeStringRule implements TPPredicateRule { private final TPToken stringToken = new TPTokenDefault(SQLTokenType.T_STRING); @Override public TPToken getSuccessToken() { return stringToken; } @Override public TPToken evaluate(TPCharacterScanner scanner, boolean resume) { int ch; int chRead = 2; if (scanner.getColumn() > 0) { scanner.unread(); if (Character.isLetterOrDigit(ch = scanner.read()) || ch == '_') { // Previous character is a part of identifier, we // don't want to take a bite of it by accident return TPTokenAbstract.UNDEFINED; } } if ((ch = scanner.read()) != 'e' && ch != 'E') { scanner.unread(); return TPTokenAbstract.UNDEFINED; } if (scanner.read() != '\'') { scanner.unread(); scanner.unread(); return TPTokenAbstract.UNDEFINED; } do { ch = scanner.read(); chRead++; if (ch == '\\') { ch = scanner.read(); chRead++; if (ch == '\'') { // Don't care about other escape sequences continue; } } if (ch == '\'') { return stringToken; } } while (ch != TPCharacterScanner.EOF); while (chRead-- > 0) { scanner.unread(); } return TPTokenAbstract.UNDEFINED; } @Override public TPToken evaluate(TPCharacterScanner scanner) { return evaluate(scanner, false); } }
apache-2.0
Sargul/dbeaver
plugins/org.jkiss.dbeaver.ext.oracle/src/org/jkiss/dbeaver/ext/oracle/model/OracleRole.java
3480
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2021 DBeaver Corp and others * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ext.oracle.model; import org.jkiss.code.NotNull; import org.jkiss.code.Nullable; import org.jkiss.dbeaver.DBException; import org.jkiss.dbeaver.Log; import org.jkiss.dbeaver.model.access.DBARole; import org.jkiss.dbeaver.model.exec.jdbc.JDBCPreparedStatement; import org.jkiss.dbeaver.model.exec.jdbc.JDBCResultSet; import org.jkiss.dbeaver.model.exec.jdbc.JDBCSession; import org.jkiss.dbeaver.model.exec.jdbc.JDBCStatement; import org.jkiss.dbeaver.model.impl.jdbc.JDBCUtils; import org.jkiss.dbeaver.model.impl.jdbc.cache.JDBCObjectCache; import org.jkiss.dbeaver.model.meta.Association; import org.jkiss.dbeaver.model.meta.Property; import org.jkiss.dbeaver.model.runtime.DBRProgressMonitor; import org.jkiss.dbeaver.model.struct.DBSObject; import java.sql.ResultSet; import java.sql.SQLException; import java.util.Collection; /** * OracleRole */ public class OracleRole extends OracleGrantee implements DBARole { private static final Log log = Log.getLog(OracleRole.class); private String name; private String authentication; private final UserCache userCache = new UserCache(); public OracleRole(OracleDataSource dataSource, ResultSet resultSet) { super(dataSource); this.name = JDBCUtils.safeGetString(resultSet, "ROLE"); this.authentication = JDBCUtils.safeGetStringTrimmed(resultSet, "PASSWORD_REQUIRED"); } @NotNull @Override @Property(viewable = true, order = 2) public String getName() { return name; } @Property(viewable = true, order = 3) public String getAuthentication() { return authentication; } @Association public Collection<OraclePrivUser> getUserPrivs(DBRProgressMonitor monitor) throws DBException { return userCache.getAllObjects(monitor, this); } @Nullable @Override public DBSObject refreshObject(@NotNull DBRProgressMonitor monitor) throws DBException { userCache.clearCache(); return super.refreshObject(monitor); } static class UserCache extends JDBCObjectCache<OracleRole, OraclePrivUser> { @NotNull @Override protected JDBCStatement prepareObjectsStatement(@NotNull JDBCSession session, @NotNull OracleRole owner) throws SQLException { final JDBCPreparedStatement dbStat = session.prepareStatement( "SELECT * FROM DBA_ROLE_PRIVS WHERE GRANTED_ROLE=? ORDER BY GRANTEE"); dbStat.setString(1, owner.getName()); return dbStat; } @Override protected OraclePrivUser fetchObject(@NotNull JDBCSession session, @NotNull OracleRole owner, @NotNull JDBCResultSet resultSet) throws SQLException, DBException { return new OraclePrivUser(owner, resultSet); } } }
apache-2.0
quarkusio/quarkus
integration-tests/funqy-amazon-lambda/src/main/java/io/quarkus/funqy/test/NoArgFun.java
163
package io.quarkus.funqy.test; import io.quarkus.funqy.Funq; public class NoArgFun { @Funq public String noArgFun() { return "noArgFun"; } }
apache-2.0
iamtowne/yscardII
src/main/java/com/yscardII/framework/hibernate/bo/AGcard.java
3061
package com.yscardII.framework.hibernate.bo; import java.io.Serializable; import java.sql.Date; import java.sql.Timestamp; import javax.persistence.Entity; import javax.persistence.FetchType; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.ManyToOne; import javax.persistence.Table; //卡表分表ag @SuppressWarnings("serial") @Entity @Table(name = "ag_card") public class AGcard implements Serializable { private Integer card_id; private Binfor binfor; // 商户 private Userinfor userinfor; // 用户 private String card; private Double card_balance; private Integer card_v_stat; private Integer card_state; private Timestamp card_log_tmd; private String u_iphone; @Id @GeneratedValue(strategy = GenerationType.AUTO) public Integer getCard_id() { return card_id; } public void setCard_id(Integer card_id) { this.card_id = card_id; } @ManyToOne(fetch = FetchType.LAZY) @JoinColumn(name="b_id") public Binfor getBinfor() { return binfor; } public void setBinfor(Binfor binfor) { this.binfor = binfor; } @ManyToOne(fetch = FetchType.LAZY) @JoinColumn(name="u_id") public Userinfor getUserinfor() { return userinfor; } public void setUserinfor(Userinfor userinfor) { this.userinfor = userinfor; } public String getCard() { return card; } public void setCard(String card) { this.card = card; } public Double getCard_balance() { return card_balance; } public void setCard_balance(Double card_balance) { this.card_balance = card_balance; } public Integer getCard_v_stat() { return card_v_stat; } public void setCard_v_stat(Integer card_v_stat) { this.card_v_stat = card_v_stat; } public Integer getCard_state() { return card_state; } public void setCard_state(Integer card_state) { this.card_state = card_state; } public Timestamp getCard_log_tmd() { return card_log_tmd; } public void setCard_log_tmd(Timestamp card_log_tmd) { this.card_log_tmd = card_log_tmd; } public String getU_iphone() { return u_iphone; } public void setU_iphone(String u_iphone) { this.u_iphone = u_iphone; } /** * 覆盖hashCode方法(根据area和name判断) */ // @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((card_id == null) ? 0 : card_id.hashCode()); result = prime * result + ((card == null) ? 0 : card.hashCode()); return result; } /** * 覆盖equals(根据area和name判断) */ @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; final AGcard other = (AGcard) obj; if (card_id == null) { if (other.card_id != null) return false; } else if (!card_id.equals(other.card_id)) return false; if (card == null) { if (other.card != null) return false; } else if (!card.equals(other.card)) return false; return true; } }
apache-2.0
GoogleCloudPlatform/google-cloud-eclipse
plugins/com.google.cloud.tools.eclipse.appengine.facets/src/com/google/cloud/tools/eclipse/appengine/facets/ui/navigator/AppEngineActionProvider.java
3089
/* * Copyright 2018 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.cloud.tools.eclipse.appengine.facets.ui.navigator; import com.google.common.base.Preconditions; import org.eclipse.jface.action.IMenuManager; import org.eclipse.jface.viewers.IStructuredSelection; import org.eclipse.ui.IActionBars; import org.eclipse.ui.IWorkbench; import org.eclipse.ui.IWorkbenchPage; import org.eclipse.ui.IWorkbenchWindow; import org.eclipse.ui.PlatformUI; import org.eclipse.ui.actions.ActionContext; import org.eclipse.ui.actions.OpenFileAction; import org.eclipse.ui.navigator.CommonActionProvider; import org.eclipse.ui.navigator.ICommonActionConstants; import org.eclipse.ui.navigator.ICommonActionExtensionSite; /** Provides App Engine relevant actions. */ public class AppEngineActionProvider extends CommonActionProvider { private OpenFileAction openFileAction; @Override public void init(ICommonActionExtensionSite aSite) { super.init(aSite); openFileAction = new OpenFileAction(getWorkbenchPage()); } @Override public void fillContextMenu(IMenuManager menu) { menu.add(openFileAction); } @Override public void fillActionBars(IActionBars actionBars) { if (openFileAction.isEnabled()) { actionBars.setGlobalActionHandler(ICommonActionConstants.OPEN, openFileAction); } } /** Provides opportunity for actions to update based on current selection. */ @Override public void setContext(ActionContext context) { if (context != null && context.getSelection() instanceof IStructuredSelection) { IStructuredSelection selection = (IStructuredSelection) context.getSelection(); openFileAction.selectionChanged(selection); } } private IWorkbenchPage getWorkbenchPage() { IWorkbenchPage page = getActionSite().getViewSite().getAdapter(IWorkbenchPage.class); if (page != null) { return page; } IWorkbenchWindow window = getActionSite().getViewSite().getAdapter(IWorkbenchWindow.class); if (window != null) { return window.getActivePage(); } IWorkbench workbench = getActionSite().getViewSite().getAdapter(IWorkbench.class); if (workbench == null) { workbench = PlatformUI.getWorkbench(); } Preconditions.checkNotNull(workbench); window = workbench.getActiveWorkbenchWindow(); if (window == null) { Preconditions.checkState(workbench.getWorkbenchWindowCount() > 0); window = workbench.getWorkbenchWindows()[0]; } Preconditions.checkNotNull(window); return window.getActivePage(); } }
apache-2.0
markpollack/spring-cloud-skipper
spring-cloud-skipper-server-core/src/test/java/org/springframework/cloud/skipper/server/deployer/AppDeploymentRequestFactoryTests.java
5405
/* * Copyright 2017 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.cloud.skipper.server.deployer; import org.junit.Test; import org.springframework.cloud.deployer.resource.support.DelegatingResourceLoader; import org.springframework.cloud.skipper.SkipperException; import org.springframework.cloud.skipper.domain.SpringCloudDeployerApplicationManifest; import org.springframework.cloud.skipper.domain.SpringCloudDeployerApplicationSpec; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Fail.fail; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; /** * @author Ilayaperumal Gopinathan */ public class AppDeploymentRequestFactoryTests { @Test @SuppressWarnings("unchecked") public void testGetResourceExceptionHandler() { DelegatingResourceLoader resourceLoader = mock(DelegatingResourceLoader.class); AppDeploymentRequestFactory appDeploymentRequestFactory = new AppDeploymentRequestFactory(resourceLoader); when(resourceLoader.getResource(anyString())).thenThrow(Exception.class); SpringCloudDeployerApplicationManifest applicationSpec = mock(SpringCloudDeployerApplicationManifest.class); SpringCloudDeployerApplicationSpec springCloudDeployerApplicationSpec = mock(SpringCloudDeployerApplicationSpec.class); when(applicationSpec.getSpec()).thenReturn(springCloudDeployerApplicationSpec); String specResource = "http://test"; when(springCloudDeployerApplicationSpec.getResource()).thenReturn(specResource); when(springCloudDeployerApplicationSpec.getApplicationProperties()).thenReturn(null); try { appDeploymentRequestFactory.createAppDeploymentRequest(applicationSpec, "release1", "1.0.0"); fail("SkipperException is expected to be thrown."); } catch (SkipperException e) { assertThat(e.getMessage()).contains("Could not load Resource " + specResource + "."); } } @Test public void testGetResourceLocation() { SpringCloudDeployerApplicationSpec springBootAppSpec1 = mock(SpringCloudDeployerApplicationSpec.class); String mavenSpecResource = "maven://org.springframework.cloud.stream.app:log-sink-rabbit"; String mavenSpecVersion = "1.2.0.RELEASE"; when(springBootAppSpec1.getResource()).thenReturn(mavenSpecResource); when(springBootAppSpec1.getVersion()).thenReturn(mavenSpecVersion); SpringCloudDeployerApplicationSpec springBootAppSpec2 = mock(SpringCloudDeployerApplicationSpec.class); String dockerSpecResource = "docker:springcloudstream/log-sink-rabbit"; String dockerSpecVersion = "1.2.0.RELEASE"; when(springBootAppSpec2.getResource()).thenReturn(dockerSpecResource); when(springBootAppSpec2.getVersion()).thenReturn(dockerSpecVersion); SpringCloudDeployerApplicationSpec springBootAppSpec3 = mock(SpringCloudDeployerApplicationSpec.class); String httpSpecResource = "http://repo.spring.io/libs-release/org/springframework/cloud/stream/app/" + "log-sink-rabbit/1.2.0.RELEASE/log-sink-rabbit"; when(springBootAppSpec3.getResource()).thenReturn(httpSpecResource); when(springBootAppSpec3.getVersion()).thenReturn("1.2.0.RELEASE"); assertThat(AppDeploymentRequestFactory.getResourceLocation(springBootAppSpec1.getResource(), springBootAppSpec1.getVersion())) .isEqualTo(String.format("%s:%s", mavenSpecResource, mavenSpecVersion)); assertThat(AppDeploymentRequestFactory.getResourceLocation(springBootAppSpec2.getResource(), springBootAppSpec2.getVersion())) .isEqualTo(String.format("%s:%s", dockerSpecResource, dockerSpecVersion)); assertThat(AppDeploymentRequestFactory.getResourceLocation(springBootAppSpec3.getResource(), springBootAppSpec3.getVersion())) .isEqualTo(httpSpecResource + "-1.2.0.RELEASE.jar"); SpringCloudDeployerApplicationSpec springBootAppSpec4 = mock(SpringCloudDeployerApplicationSpec.class); String mavenSpecResource2 = "maven://org.springframework.cloud.stream.app:log-sink-rabbit:1.2.0.RELEASE"; String mavenSpecVersion2 = "1.2.0.RELEASE"; when(springBootAppSpec4.getResource()).thenReturn(mavenSpecResource2); when(springBootAppSpec4.getVersion()).thenReturn(mavenSpecVersion2); assertThat(AppDeploymentRequestFactory.getResourceLocation(springBootAppSpec4.getResource(), springBootAppSpec4.getVersion())) .isEqualTo(mavenSpecResource2); String mavenSpecResource3 = "maven://org.springframework.cloud.stream.app:log-sink-rabbit:1.2.0.RELEASE"; SpringCloudDeployerApplicationSpec springBootAppSpec5 = mock(SpringCloudDeployerApplicationSpec.class); when(springBootAppSpec5.getResource()).thenReturn(mavenSpecResource3); when(springBootAppSpec5.getVersion()).thenReturn(null); assertThat(AppDeploymentRequestFactory.getResourceLocation(springBootAppSpec4.getResource(), springBootAppSpec4.getVersion())) .isEqualTo(mavenSpecResource3); } }
apache-2.0
Cyboot/jpad
src/de/timweb/jpad/core/Gamepad.java
2513
package de.timweb.jpad.core; import java.util.Arrays; import org.lwjgl.input.Controller; public class Gamepad { public static final String LEFT_STICK = "Left Stick"; public static final String RIGHT_STICK = "Right Stick"; private final String name; private final Stick stickLeft = new Stick(LEFT_STICK); private final Stick stickRight = new Stick(RIGHT_STICK); private final Button[] buttons; public Gamepad() { this("DefaultGamepad"); } public Gamepad(final String name) { this.name = name; buttons = new Button[20]; for (int i = 0; i < buttons.length; i++) { buttons[i] = new Button(i); } } void update(final Controller controller) { if (name.contains("ps3")) { stickRight.valueX = controller.getAxisValue(1); stickRight.valueY = controller.getAxisValue(0); stickLeft.valueX = controller.getAxisValue(3); stickLeft.valueY = controller.getAxisValue(2); } else { stickLeft.valueX = controller.getAxisValue(1); stickLeft.valueY = controller.getAxisValue(0); stickRight.valueX = controller.getAxisValue(3); stickRight.valueY = controller.getAxisValue(2); } for (int i = 0; i < controller.getButtonCount(); i++) { buttons[i].name = controller.getButtonName(i); buttons[i].value = controller.isButtonPressed(i); } } public boolean isPressed(final int buttonIndex) { return buttons[buttonIndex].value; } public Stick getStickLeft() { return stickLeft; } public Stick getStickRight() { return stickRight; } public String getName() { return name; } @Override public String toString() { return stickLeft.toString() + ", " + stickRight.toString() + ", " + Arrays.toString(buttons); } public static class Stick { String name; float valueX; float valueY; public Stick(final String name) { this.name = name; } public String getName() { return name; } public float getValueX() { return valueX; } public float getValueY() { return valueY; } @Override public String toString() { return "[" + name + ", x=" + valueX + ", y=" + valueY + "]"; } } public static class Button { int index; String name; boolean value; public Button(final int index) { this.index = index; } public int getIndex() { return index; } public String getName() { return name; } public boolean isValue() { return value; } @Override public String toString() { String x = "-"; if (value) x = "#"; return "[" + index + "] " + x; } } }
apache-2.0
googleads/googleads-java-lib
modules/dfp_appengine/src/main/java/com/google/api/ads/admanager/jaxws/v202111/ContentServiceInterfacegetContentByStatement.java
3595
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.api.ads.admanager.jaxws.v202111; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlType; /** * * Gets a {@link ContentPage} of {@link Content} objects that satisfy the given {@link * Statement#query}. The following fields are supported for filtering: * * <table> * <tr> * <th scope="col">PQL Property</th> <th scope="col">Object Property</th> * </tr> * <tr> * <td>{@code id}</td> * <td>{@link Content#id}</td> * </tr> * <tr> * <td>{@code status}</td> * <td>{@link Content#status}</td> * </tr> * <tr> * <td>{@code name}</td> * <td>{@link Content#name}</td> * </tr> * <tr> * <td>{@code lastModifiedDateTime}</td> * <td>{@link Content#lastModifiedDateTime}</td> * </tr> * <tr> * <td>{@code lastDaiIngestDateTime}</td> * <td>{@link Content#lastDaiIngestDateTime}</td> * </tr> * <tr> * <td>{@code daiIngestStatus}</td> * <td>{@link Content#daiIngestStatus}</td> * </tr> * </table> * * @param statement a Publisher Query Language statement used to filter a set of content * @return the content that matches the given filter * * * <p>Java class for getContentByStatement element declaration. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;element name="getContentByStatement"> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="statement" type="{https://www.google.com/apis/ads/publisher/v202111}Statement" minOccurs="0"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * &lt;/element> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "", propOrder = { "statement" }) @XmlRootElement(name = "getContentByStatement") public class ContentServiceInterfacegetContentByStatement { protected Statement statement; /** * Gets the value of the statement property. * * @return * possible object is * {@link Statement } * */ public Statement getStatement() { return statement; } /** * Sets the value of the statement property. * * @param value * allowed object is * {@link Statement } * */ public void setStatement(Statement value) { this.statement = value; } }
apache-2.0
TVLuke/DynamixContextInterfaceLibrary
src/org/ambientdynamix/contextplugins/action/meta/IContextActio.java
1141
/* * Copyright (C) Institute of Telematics, Lukas Ruge * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.ambientdynamix.contextplugins.action.meta; /** * For now this is a dummy interface that allows for actions to implement their own interface even though this interface doesn't do anything. * * The intent is to later have IContext with some commom methods and split that up into IContextInfo and IContextActio being two interfaces that * entail methoids specific to Informing on Context or Acting in Context. * * @author lukas * */ public interface IContextActio { }
apache-2.0
OurOpenProject/ourProject
app/src/main/java/com/ourproject/ui/sq/adapter/TopicAdapter.java
2093
package com.ourproject.ui.sq.adapter; import android.content.Context; import android.content.Intent; import android.support.v7.widget.RecyclerView; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import com.bumptech.glide.Glide; import com.ourproject.R; import com.ourproject.R2; import com.ourproject.ui.sq.activity.TuiJanDetailActivity; import com.ourproject.ui.sq.bean.SqBean; import java.util.List; import butterknife.BindView; import butterknife.ButterKnife; /** * Created by admin on 2017/4/10. */ public class TopicAdapter extends RecyclerView.Adapter<TopicAdapter.TopicViewHolder> { private List<SqBean.DataBeanX.ShequTopicsBean.DataBean> data; private Context context; public TopicAdapter(List<SqBean.DataBeanX.ShequTopicsBean.DataBean> data, Context context) { this.data = data; this.context = context; } @Override public TopicViewHolder onCreateViewHolder(ViewGroup parent, int viewType) { View itemView = LayoutInflater.from(context).inflate(R.layout.fragment_jx_detail, parent, false); return new TopicViewHolder(itemView); } @Override public void onBindViewHolder(TopicViewHolder holder, final int position) { holder.img_jx.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { Intent intent = new Intent(context, TuiJanDetailActivity.class); intent.putExtra("postId", data.get(position).getId()); context.startActivity(intent); } }); Glide.with(context).load(data.get(position).getImage()).into(holder.img_jx); } @Override public int getItemCount() { return data != null ? data.size() : 0; } class TopicViewHolder extends RecyclerView.ViewHolder { @BindView(R2.id.img_jx) ImageView img_jx; public TopicViewHolder(View itemView) { super(itemView); ButterKnife.bind(this, itemView); } } }
apache-2.0
asiaon123/hsweb-framework
hsweb-system/hsweb-system-authorization/hsweb-system-authorization-service/hsweb-system-authorization-service-simple/src/main/java/org/hswebframework/web/service/authorization/simple/SimpleMenuGroupService.java
5444
/* * Copyright 2016 http://www.hswebframework.org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.hswebframework.web.service.authorization.simple; import org.hswebframework.web.commons.entity.TreeSupportEntity; import org.hswebframework.web.dao.authorization.MenuGroupDao; import org.hswebframework.web.entity.authorization.MenuEntity; import org.hswebframework.web.entity.authorization.MenuGroupBindEntity; import org.hswebframework.web.entity.authorization.MenuGroupEntity; import org.hswebframework.web.id.IDGenerator; import org.hswebframework.web.service.AbstractTreeSortService; import org.hswebframework.web.service.DefaultDSLUpdateService; import org.hswebframework.web.service.authorization.MenuGroupBindService; import org.hswebframework.web.service.authorization.MenuGroupService; import org.hswebframework.web.service.authorization.MenuService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.cache.annotation.CacheConfig; import org.springframework.cache.annotation.CacheEvict; import org.springframework.cache.annotation.Cacheable; import org.springframework.stereotype.Service; import org.springframework.util.StringUtils; import java.util.LinkedList; import java.util.List; import java.util.stream.Collectors; import static org.hswebframework.web.service.authorization.simple.CacheConstants.MENU_CACHE_NAME; /** * 默认的服务实现 * * @author hsweb-generator-online */ @Service("menuGroupService") @CacheConfig(cacheNames = MENU_CACHE_NAME) public class SimpleMenuGroupService extends AbstractTreeSortService<MenuGroupEntity, String> implements MenuGroupService { @Autowired private MenuGroupDao menuGroupDao; @Autowired private MenuService menuService; @Autowired private MenuGroupBindService menuGroupBindService; @Override protected IDGenerator<String> getIDGenerator() { return IDGenerator.MD5; } @Override public MenuGroupDao getDao() { return menuGroupDao; } @Override @Cacheable(key = "'group-id-list:'+(#groupId==null?0:#groupId.hashCode())") public List<MenuEntity> getMenuByGroupId(List<String> groupId) { List<MenuGroupBindEntity> bindEntities = menuGroupBindService.selectByPk(groupId); if (bindEntities == null || bindEntities.isEmpty()) { return new LinkedList<>(); } return menuService.selectByPk(bindEntities.stream() .map(MenuGroupBindEntity::getMenuId) .distinct() .collect(Collectors.toList())); } @Override @CacheEvict(allEntries = true) public String insert(MenuGroupEntity entity) { entity.setStatus((byte) 1); String id = super.insert(entity); List<MenuGroupBindEntity> bindEntities = entity.getBindInfo(); if (bindEntities != null && !bindEntities.isEmpty()) { TreeSupportEntity.forEach(bindEntities, bindEntity -> { bindEntity.setGroupId(id); entity.setStatus((byte) 1); }); menuGroupBindService.insertBatch(bindEntities); } return id; } @Override @CacheEvict(allEntries = true) public int updateByPk(MenuGroupEntity entity) { int size = super.updateByPk(entity); List<MenuGroupBindEntity> bindEntities = entity.getBindInfo(); if (bindEntities != null && !bindEntities.isEmpty()) { TreeSupportEntity.forEach(bindEntities, bindEntity -> { bindEntity.setGroupId(entity.getId()); }); menuGroupBindService.deleteByGroupId(entity.getId()); menuGroupBindService.insertBatch(bindEntities); } return size; } @CacheEvict(allEntries = true) @Override public int updateByPk(List<MenuGroupEntity> data) { return super.updateByPk(data); } @Override @CacheEvict(allEntries = true) public int updateByPk(String id, MenuGroupEntity entity) { return super.updateByPk(id, entity); } @Override @CacheEvict(allEntries = true) public int deleteByPk(String id) { return super.deleteByPk(id); } @Override @CacheEvict(allEntries = true) public void enable(String id) { tryValidateProperty(StringUtils.hasLength(id), MenuGroupEntity.id, "{id_is_null}"); createUpdate() .set(MenuGroupEntity.status, 1) .where(MenuGroupEntity.id, id) .exec(); } @Override @CacheEvict(allEntries = true) public void disable(String id) { tryValidateProperty(StringUtils.hasLength(id), MenuGroupEntity.id, "{id_is_null}"); DefaultDSLUpdateService .createUpdate(getDao()) .set(MenuGroupEntity.status, 0) .where(MenuGroupEntity.id, id) .exec(); } }
apache-2.0
DICE-UNC/indexing
src/databook/edsl/map/DmapDhasPart.java
757
/** Code generated by EriLex */ package databook.edsl.map; public class DmapDhasPart<__t,__E> extends Dmap<__t,__E> { DarrayTail<__t,__E> i0; Dmap<__t,__E> i1; public DmapDhasPart( final DarrayTail<__t,__E> i0, final Dmap<__t,__E> i1) { super(new erilex.tree.ASTValueData( "map", "map") , new erilex.data.generic.Tree( new erilex.tree.ASTValueData( "hasPart", "hasPart")), i0, i1); this.i0=i0; this.i1=i1; } public java.lang.Object accept( final Visitor v, final java.lang.String key) { return v.visit(key, this); } public static <__t,__E> DmapDhasPart<__t,__E> hasPart( final DarrayTail<__t,__E> i0, final Dmap<__t,__E> i1) { return new DmapDhasPart<__t,__E>( i0, i1); } }
apache-2.0
bonigarcia/webdrivermanager
src/main/java/io/github/bonigarcia/wdm/config/ConfigKey.java
1567
/* * (C) Copyright 2018 Boni Garcia (https://bonigarcia.github.io/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package io.github.bonigarcia.wdm.config; /** * Configuration key class. * * @author Boni Garcia * @since 2.2.0 */ public class ConfigKey<T> { String name; Class<T> type; T value; T defaultValue; public ConfigKey(Class<T> type) { this.type = type; } public ConfigKey(String name, Class<T> type) { this.name = name; this.type = type; } public ConfigKey(String name, Class<T> type, T value) { this.name = name; this.type = type; this.value = value; this.defaultValue = value; } public String getName() { return name; } public Class<T> getType() { return type; } public T getValue() { return value; } public void reset() { value = defaultValue; } @SuppressWarnings("unchecked") public void setValue(Object value) { this.value = (T) value; } }
apache-2.0
lshmouse/hbase
hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
101830
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.security.access; import static org.apache.hadoop.hbase.AuthUtil.toGroupEntry; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; import java.security.PrivilegedAction; import java.util.Arrays; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.CountRequest; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.CountResponse; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.HelloRequest; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.HelloResponse; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.IncrementCountRequest; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.IncrementCountResponse; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.NoopRequest; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.NoopResponse; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingRequest; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingResponse; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingService; import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; import com.google.protobuf.ServiceException; /** * Performs authorization checks for common operations, according to different * levels of authorized users. */ @Category({SecurityTests.class, LargeTests.class}) public class TestAccessController extends SecureTestUtil { private static final Log LOG = LogFactory.getLog(TestAccessController.class); static { Logger.getLogger(AccessController.class).setLevel(Level.TRACE); Logger.getLogger(AccessControlFilter.class).setLevel(Level.TRACE); Logger.getLogger(TableAuthManager.class).setLevel(Level.TRACE); } private static TableName TEST_TABLE = TableName.valueOf("testtable1"); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static Configuration conf; /** The systemUserConnection created here is tied to the system user. In case, you are planning * to create AccessTestAction, DON'T use this systemUserConnection as the 'doAs' user * gets eclipsed by the system user. */ private static Connection systemUserConnection; // user with all permissions private static User SUPERUSER; // user granted with all global permission private static User USER_ADMIN; // user with rw permissions on column family. private static User USER_RW; // user with read-only permissions private static User USER_RO; // user is table owner. will have all permissions on table private static User USER_OWNER; // user with create table permissions alone private static User USER_CREATE; // user with no permissions private static User USER_NONE; // user with admin rights on the column family private static User USER_ADMIN_CF; private static final String GROUP_ADMIN = "group_admin"; private static final String GROUP_CREATE = "group_create"; private static final String GROUP_READ = "group_read"; private static final String GROUP_WRITE = "group_write"; private static User USER_GROUP_ADMIN; private static User USER_GROUP_CREATE; private static User USER_GROUP_READ; private static User USER_GROUP_WRITE; // TODO: convert this test to cover the full matrix in // https://hbase.apache.org/book/appendix_acl_matrix.html // creating all Scope x Permission combinations private static TableName TEST_TABLE2 = TableName.valueOf("testtable2"); private static byte[] TEST_FAMILY = Bytes.toBytes("f1"); private static byte[] TEST_QUALIFIER = Bytes.toBytes("q1"); private static byte[] TEST_ROW = Bytes.toBytes("r1"); private static MasterCoprocessorEnvironment CP_ENV; private static AccessController ACCESS_CONTROLLER; private static RegionServerCoprocessorEnvironment RSCP_ENV; private static RegionCoprocessorEnvironment RCP_ENV; @BeforeClass public static void setupBeforeClass() throws Exception { // setup configuration conf = TEST_UTIL.getConfiguration(); // Enable security enableSecurity(conf); // In this particular test case, we can't use SecureBulkLoadEndpoint because its doAs will fail // to move a file for a random user conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName()); // Verify enableSecurity sets up what we require verifyConfiguration(conf); // Enable EXEC permission checking conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); TEST_UTIL.startMiniCluster(); MasterCoprocessorHost cpHost = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(AccessController.class.getName()); CP_ENV = cpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) .getRegionServerCoprocessorHost(); RSCP_ENV = rsHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Wait for the ACL table to become available TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME); // create a set of test users SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); USER_ADMIN = User.createUserForTesting(conf, "admin2", new String[0]); USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]); USER_RO = User.createUserForTesting(conf, "rouser", new String[0]); USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]); USER_CREATE = User.createUserForTesting(conf, "tbl_create", new String[0]); USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]); USER_ADMIN_CF = User.createUserForTesting(conf, "col_family_admin", new String[0]); USER_GROUP_ADMIN = User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN }); USER_GROUP_CREATE = User.createUserForTesting(conf, "user_group_create", new String[] { GROUP_CREATE }); USER_GROUP_READ = User.createUserForTesting(conf, "user_group_read", new String[] { GROUP_READ }); USER_GROUP_WRITE = User.createUserForTesting(conf, "user_group_write", new String[] { GROUP_WRITE }); systemUserConnection = TEST_UTIL.getConnection(); setUpTableAndUserPermissions(); } @AfterClass public static void tearDownAfterClass() throws Exception { cleanUp(); TEST_UTIL.shutdownMiniCluster(); } private static void setUpTableAndUserPermissions() throws Exception { HTableDescriptor htd = new HTableDescriptor(TEST_TABLE); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY); hcd.setMaxVersions(100); htd.addFamily(hcd); htd.setOwner(USER_OWNER); createTable(TEST_UTIL, htd, new byte[][] { Bytes.toBytes("s") }); Region region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE).get(0); RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); RCP_ENV = rcpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); // Set up initial grants grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN, Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ, Permission.Action.WRITE); // USER_CREATE is USER_RW plus CREATE permissions grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), TEST_TABLE, null, null, Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE); grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); grantOnTable(TEST_UTIL, USER_ADMIN_CF.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.ADMIN, Permission.Action.CREATE); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_CREATE), Permission.Action.CREATE); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_READ), Permission.Action.READ); grantGlobal(TEST_UTIL, toGroupEntry(GROUP_WRITE), Permission.Action.WRITE); assertEquals(5, AccessControlLists.getTablePermissions(conf, TEST_TABLE).size()); try { assertEquals(5, AccessControlClient.getUserPermissions(systemUserConnection, TEST_TABLE.toString()).size()); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.getUserPermissions. ", e); } } private static void cleanUp() throws Exception { // Clean the _acl_ table try { deleteTable(TEST_UTIL, TEST_TABLE); } catch (TableNotFoundException ex) { // Test deleted the table, no problem LOG.info("Test deleted table " + TEST_TABLE); } // Verify all table/namespace permissions are erased assertEquals(0, AccessControlLists.getTablePermissions(conf, TEST_TABLE).size()); assertEquals( 0, AccessControlLists.getNamespacePermissions(conf, TEST_TABLE.getNamespaceAsString()).size()); } @Test public void testTableCreate() throws Exception { AccessTestAction createTable = new AccessTestAction() { @Override public Object run() throws Exception { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testnewtable")); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); ACCESS_CONTROLLER.preCreateTable(ObserverContext.createAndPrepare(CP_ENV, null), htd, null); return null; } }; // verify that superuser can create tables verifyAllowed(createTable, SUPERUSER, USER_ADMIN, USER_GROUP_CREATE); // all others should be denied verifyDenied(createTable, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_ADMIN, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testTableModify() throws Exception { AccessTestAction modifyTable = new AccessTestAction() { @Override public Object run() throws Exception { HTableDescriptor htd = new HTableDescriptor(TEST_TABLE); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); htd.addFamily(new HColumnDescriptor("fam_" + User.getCurrent().getShortName())); ACCESS_CONTROLLER.preModifyTable(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE, htd); return null; } }; verifyAllowed(modifyTable, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(modifyTable, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testTableDelete() throws Exception { AccessTestAction deleteTable = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER .preDeleteTable(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE); return null; } }; verifyAllowed(deleteTable, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(deleteTable, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testTableTruncate() throws Exception { AccessTestAction truncateTable = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER .preTruncateTable(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE); return null; } }; verifyAllowed(truncateTable, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(truncateTable, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testAddColumn() throws Exception { final HColumnDescriptor hcd = new HColumnDescriptor("fam_new"); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preAddColumnFamily(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE, hcd); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(action, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testModifyColumn() throws Exception { final HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY); hcd.setMaxVersions(10); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preModifyColumnFamily(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE, hcd); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_ADMIN_CF, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(action, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testDeleteColumn() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preDeleteColumnFamily(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE, TEST_FAMILY); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_ADMIN_CF, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(action, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testTableDisable() throws Exception { AccessTestAction disableTable = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preDisableTable(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE); return null; } }; AccessTestAction disableAclTable = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preDisableTable(ObserverContext.createAndPrepare(CP_ENV, null), AccessControlLists.ACL_TABLE_NAME); return null; } }; verifyAllowed(disableTable, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(disableTable, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); // No user should be allowed to disable _acl_ table verifyDenied(disableAclTable, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_GROUP_CREATE, USER_GROUP_ADMIN, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testTableEnable() throws Exception { AccessTestAction enableTable = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER .preEnableTable(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE); return null; } }; verifyAllowed(enableTable, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(enableTable, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testMove() throws Exception { List<HRegionLocation> regions; try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); final HRegionInfo hri = location.getRegionInfo(); final ServerName server = location.getServerName(); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preMove(ObserverContext.createAndPrepare(CP_ENV, null), hri, server, server); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testAssign() throws Exception { List<HRegionLocation> regions; try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); final HRegionInfo hri = location.getRegionInfo(); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preAssign(ObserverContext.createAndPrepare(CP_ENV, null), hri); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testUnassign() throws Exception { List<HRegionLocation> regions; try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); final HRegionInfo hri = location.getRegionInfo(); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preUnassign(ObserverContext.createAndPrepare(CP_ENV, null), hri, false); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testRegionOffline() throws Exception { List<HRegionLocation> regions; try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE)) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); final HRegionInfo hri = location.getRegionInfo(); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preRegionOffline(ObserverContext.createAndPrepare(CP_ENV, null), hri); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testBalance() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preBalance(ObserverContext.createAndPrepare(CP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testBalanceSwitch() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preBalanceSwitch(ObserverContext.createAndPrepare(CP_ENV, null), true); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testShutdown() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preShutdown(ObserverContext.createAndPrepare(CP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testStopMaster() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preStopMaster(ObserverContext.createAndPrepare(CP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } private void verifyWrite(AccessTestAction action) throws Exception { verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW, USER_GROUP_WRITE); verifyDenied(action, USER_NONE, USER_RO, USER_GROUP_ADMIN, USER_GROUP_READ, USER_GROUP_CREATE); } @Test public void testSplit() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSplit(ObserverContext.createAndPrepare(RCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testSplitWithSplitRow() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSplit( ObserverContext.createAndPrepare(RCP_ENV, null), TEST_ROW); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testMergeRegions() throws Exception { final TableName tname = TableName.valueOf("testMergeRegions"); createTestTable(tname); try { final List<HRegion> regions = TEST_UTIL.getHBaseCluster().findRegionsForTable(tname); assertTrue("not enough regions: " + regions.size(), regions.size() >= 2); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preMerge(ObserverContext.createAndPrepare(RSCP_ENV, null), regions.get(0), regions.get(1)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } finally { deleteTable(TEST_UTIL, tname); } } @Test public void testFlush() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preFlush(ObserverContext.createAndPrepare(RCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(action, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testCompact() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preCompact(ObserverContext.createAndPrepare(RCP_ENV, null), null, null, ScanType.COMPACT_RETAIN_DELETES); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(action, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } private void verifyRead(AccessTestAction action) throws Exception { verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW, USER_RO, USER_GROUP_READ); verifyDenied(action, USER_NONE, USER_GROUP_CREATE, USER_GROUP_ADMIN, USER_GROUP_WRITE); } private void verifyReadWrite(AccessTestAction action) throws Exception { verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW); verifyDenied(action, USER_NONE, USER_RO, USER_GROUP_ADMIN, USER_GROUP_CREATE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testRead() throws Exception { // get action AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addFamily(TEST_FAMILY); try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.get(g); } return null; } }; verifyRead(getAction); // action for scanning AccessTestAction scanAction = new AccessTestAction() { @Override public Object run() throws Exception { Scan s = new Scan(); s.addFamily(TEST_FAMILY); try(Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(TEST_TABLE)) { ResultScanner scanner = table.getScanner(s); try { for (Result r = scanner.next(); r != null; r = scanner.next()) { // do nothing } } catch (IOException e) { } finally { scanner.close(); } } return null; } }; verifyRead(scanAction); } @Test // test put, delete, increment public void testWrite() throws Exception { // put action AccessTestAction putAction = new AccessTestAction() { @Override public Object run() throws Exception { Put p = new Put(TEST_ROW); p.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1)); try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.put(p); } return null; } }; verifyWrite(putAction); // delete action AccessTestAction deleteAction = new AccessTestAction() { @Override public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteFamily(TEST_FAMILY); try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.delete(d); } return null; } }; verifyWrite(deleteAction); // increment action AccessTestAction incrementAction = new AccessTestAction() { @Override public Object run() throws Exception { Increment inc = new Increment(TEST_ROW); inc.addColumn(TEST_FAMILY, TEST_QUALIFIER, 1); try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE);) { t.increment(inc); } return null; } }; verifyWrite(incrementAction); } @Test public void testReadWrite() throws Exception { // action for checkAndDelete AccessTestAction checkAndDeleteAction = new AccessTestAction() { @Override public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteFamily(TEST_FAMILY); try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE);) { t.checkAndDelete(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("test_value"), d); } return null; } }; verifyReadWrite(checkAndDeleteAction); // action for checkAndPut() AccessTestAction checkAndPut = new AccessTestAction() { @Override public Object run() throws Exception { Put p = new Put(TEST_ROW); p.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1)); try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE);) { t.checkAndPut(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("test_value"), p); } return null; } }; verifyReadWrite(checkAndPut); } @Test public void testBulkLoad() throws Exception { try { FileSystem fs = TEST_UTIL.getTestFileSystem(); final Path dir = TEST_UTIL.getDataTestDirOnTestFS("testBulkLoad"); fs.mkdirs(dir); // need to make it globally writable // so users creating HFiles have write permissions fs.setPermission(dir, FsPermission.valueOf("-rwxrwxrwx")); AccessTestAction bulkLoadAction = new AccessTestAction() { @Override public Object run() throws Exception { int numRows = 3; // Making the assumption that the test table won't split between the range byte[][][] hfileRanges = { { { (byte) 0 }, { (byte) 9 } } }; Path bulkLoadBasePath = new Path(dir, new Path(User.getCurrent().getName())); new BulkLoadHelper(bulkLoadBasePath).bulkLoadHFile(TEST_TABLE, TEST_FAMILY, TEST_QUALIFIER, hfileRanges, numRows); return null; } }; // User performing bulk loads must have privilege to read table metadata // (ADMIN or CREATE) verifyAllowed(bulkLoadAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_GROUP_CREATE); verifyDenied(bulkLoadAction, USER_RW, USER_NONE, USER_RO, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_ADMIN); } finally { // Reinit after the bulk upload TEST_UTIL.getHBaseAdmin().disableTable(TEST_TABLE); TEST_UTIL.getHBaseAdmin().enableTable(TEST_TABLE); } } public class BulkLoadHelper { private final FileSystem fs; private final Path loadPath; private final Configuration conf; public BulkLoadHelper(Path loadPath) throws IOException { fs = TEST_UTIL.getTestFileSystem(); conf = TEST_UTIL.getConfiguration(); loadPath = loadPath.makeQualified(fs); this.loadPath = loadPath; } private void createHFile(Path path, byte[] family, byte[] qualifier, byte[] startKey, byte[] endKey, int numRows) throws IOException { HFile.Writer writer = null; long now = System.currentTimeMillis(); try { HFileContext context = new HFileContextBuilder().build(); writer = HFile.getWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, path) .withFileContext(context) .create(); // subtract 2 since numRows doesn't include boundary keys for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) { KeyValue kv = new KeyValue(key, family, qualifier, now, key); writer.append(kv); } } finally { if(writer != null) writer.close(); } } private void bulkLoadHFile( TableName tableName, byte[] family, byte[] qualifier, byte[][][] hfileRanges, int numRowsPerRange) throws Exception { Path familyDir = new Path(loadPath, Bytes.toString(family)); fs.mkdirs(familyDir); int hfileIdx = 0; for (byte[][] range : hfileRanges) { byte[] from = range[0]; byte[] to = range[1]; createHFile(new Path(familyDir, "hfile_"+(hfileIdx++)), family, qualifier, from, to, numRowsPerRange); } //set global read so RegionServer can move it setPermission(loadPath, FsPermission.valueOf("-rwxrwxrwx")); try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin(); RegionLocator locator = conn.getRegionLocator(tableName); Table table = conn.getTable(tableName)) { TEST_UTIL.waitUntilAllRegionsAssigned(tableName); LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); loader.doBulkLoad(loadPath, admin, table, locator); } } public void setPermission(Path dir, FsPermission perm) throws IOException { if(!fs.getFileStatus(dir).isDirectory()) { fs.setPermission(dir,perm); } else { for(FileStatus el : fs.listStatus(dir)) { fs.setPermission(el.getPath(), perm); setPermission(el.getPath() , perm); } } } } @Test public void testAppend() throws Exception { AccessTestAction appendAction = new AccessTestAction() { @Override public Object run() throws Exception { byte[] row = TEST_ROW; byte[] qualifier = TEST_QUALIFIER; Put put = new Put(row); put.add(TEST_FAMILY, qualifier, Bytes.toBytes(1)); Append append = new Append(row); append.add(TEST_FAMILY, qualifier, Bytes.toBytes(2)); try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { t.put(put); t.append(append); } return null; } }; verifyAllowed(appendAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW, USER_GROUP_WRITE); verifyDenied(appendAction, USER_RO, USER_NONE, USER_GROUP_CREATE, USER_GROUP_READ, USER_GROUP_ADMIN); } @Test public void testGrantRevoke() throws Exception { AccessTestAction grantAction = new AccessTestAction() { @Override public Object run() throws Exception { try(Connection conn = ConnectionFactory.createConnection(conf); Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME)) { BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null, Action.READ); } return null; } }; AccessTestAction revokeAction = new AccessTestAction() { @Override public Object run() throws Exception { try(Connection conn = ConnectionFactory.createConnection(conf); Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME)) { BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.revoke(protocol, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null, Action.READ); } return null; } }; AccessTestAction getTablePermissionsAction = new AccessTestAction() { @Override public Object run() throws Exception { try(Connection conn = ConnectionFactory.createConnection(conf); Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME)){ BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.getUserPermissions(protocol, TEST_TABLE); } return null; } }; AccessTestAction getGlobalPermissionsAction = new AccessTestAction() { @Override public Object run() throws Exception { try(Connection conn = ConnectionFactory.createConnection(conf); Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME);) { BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_START_ROW); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.getUserPermissions(protocol); } return null; } }; verifyAllowed(grantAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(grantAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); try { verifyAllowed(revokeAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(revokeAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(getTablePermissionsAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(getTablePermissionsAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(getGlobalPermissionsAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(getGlobalPermissionsAction, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } finally { // Cleanup, Grant the revoked permission back to the user grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); } } @Test public void testPostGrantRevoke() throws Exception { final TableName tableName = TableName.valueOf("TempTable"); final byte[] family1 = Bytes.toBytes("f1"); final byte[] family2 = Bytes.toBytes("f2"); final byte[] qualifier = Bytes.toBytes("q"); // create table Admin admin = TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { deleteTable(TEST_UTIL, tableName); } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family2)); createTable(TEST_UTIL, htd); try { // create temp users User tblUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "tbluser", new String[0]); User gblUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "gbluser", new String[0]); // prepare actions: AccessTestAction putActionAll = new AccessTestAction() { @Override public Object run() throws Exception { Put p = new Put(Bytes.toBytes("a")); p.add(family1, qualifier, Bytes.toBytes("v1")); p.add(family2, qualifier, Bytes.toBytes("v2")); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName);) { t.put(p); } return null; } }; AccessTestAction putAction1 = new AccessTestAction() { @Override public Object run() throws Exception { Put p = new Put(Bytes.toBytes("a")); p.add(family1, qualifier, Bytes.toBytes("v1")); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName)) { t.put(p); } return null; } }; AccessTestAction putAction2 = new AccessTestAction() { @Override public Object run() throws Exception { Put p = new Put(Bytes.toBytes("a")); p.add(family2, qualifier, Bytes.toBytes("v2")); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName);) { t.put(p); } return null; } }; AccessTestAction getActionAll = new AccessTestAction() { @Override public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addFamily(family1); g.addFamily(family2); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName);) { t.get(g); } return null; } }; AccessTestAction getAction1 = new AccessTestAction() { @Override public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addFamily(family1); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName)) { t.get(g); } return null; } }; AccessTestAction getAction2 = new AccessTestAction() { @Override public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addFamily(family2); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName)) { t.get(g); } return null; } }; AccessTestAction deleteActionAll = new AccessTestAction() { @Override public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteFamily(family1); d.deleteFamily(family2); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName)) { t.delete(d); } return null; } }; AccessTestAction deleteAction1 = new AccessTestAction() { @Override public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteFamily(family1); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName)) { t.delete(d); } return null; } }; AccessTestAction deleteAction2 = new AccessTestAction() { @Override public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteFamily(family2); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName)) { t.delete(d); } return null; } }; // initial check: verifyDenied(tblUser, getActionAll, getAction1, getAction2); verifyDenied(tblUser, putActionAll, putAction1, putAction2); verifyDenied(tblUser, deleteActionAll, deleteAction1, deleteAction2); verifyDenied(gblUser, getActionAll, getAction1, getAction2); verifyDenied(gblUser, putActionAll, putAction1, putAction2); verifyDenied(gblUser, deleteActionAll, deleteAction1, deleteAction2); // grant table read permission grantGlobal(TEST_UTIL, gblUser.getShortName(), Permission.Action.READ); grantOnTable(TEST_UTIL, tblUser.getShortName(), tableName, null, null, Permission.Action.READ); // check verifyAllowed(tblUser, getActionAll, getAction1, getAction2); verifyDenied(tblUser, putActionAll, putAction1, putAction2); verifyDenied(tblUser, deleteActionAll, deleteAction1, deleteAction2); verifyAllowed(gblUser, getActionAll, getAction1, getAction2); verifyDenied(gblUser, putActionAll, putAction1, putAction2); verifyDenied(gblUser, deleteActionAll, deleteAction1, deleteAction2); // grant table write permission while revoking read permissions grantGlobal(TEST_UTIL, gblUser.getShortName(), Permission.Action.WRITE); grantOnTable(TEST_UTIL, tblUser.getShortName(), tableName, null, null, Permission.Action.WRITE); verifyDenied(tblUser, getActionAll, getAction1, getAction2); verifyAllowed(tblUser, putActionAll, putAction1, putAction2); verifyAllowed(tblUser, deleteActionAll, deleteAction1, deleteAction2); verifyDenied(gblUser, getActionAll, getAction1, getAction2); verifyAllowed(gblUser, putActionAll, putAction1, putAction2); verifyAllowed(gblUser, deleteActionAll, deleteAction1, deleteAction2); // revoke table permissions revokeGlobal(TEST_UTIL, gblUser.getShortName()); revokeFromTable(TEST_UTIL, tblUser.getShortName(), tableName, null, null); verifyDenied(tblUser, getActionAll, getAction1, getAction2); verifyDenied(tblUser, putActionAll, putAction1, putAction2); verifyDenied(tblUser, deleteActionAll, deleteAction1, deleteAction2); verifyDenied(gblUser, getActionAll, getAction1, getAction2); verifyDenied(gblUser, putActionAll, putAction1, putAction2); verifyDenied(gblUser, deleteActionAll, deleteAction1, deleteAction2); // grant column family read permission grantGlobal(TEST_UTIL, gblUser.getShortName(), Permission.Action.READ); grantOnTable(TEST_UTIL, tblUser.getShortName(), tableName, family1, null, Permission.Action.READ); // Access should be denied for family2 verifyAllowed(tblUser, getActionAll, getAction1); verifyDenied(tblUser, getAction2); verifyDenied(tblUser, putActionAll, putAction1, putAction2); verifyDenied(tblUser, deleteActionAll, deleteAction1, deleteAction2); verifyAllowed(gblUser, getActionAll, getAction1, getAction2); verifyDenied(gblUser, putActionAll, putAction1, putAction2); verifyDenied(gblUser, deleteActionAll, deleteAction1, deleteAction2); // grant column family write permission grantGlobal(TEST_UTIL, gblUser.getShortName(), Permission.Action.WRITE); grantOnTable(TEST_UTIL, tblUser.getShortName(), tableName, family2, null, Permission.Action.WRITE); // READ from family1, WRITE to family2 are allowed verifyAllowed(tblUser, getActionAll, getAction1); verifyAllowed(tblUser, putAction2, deleteAction2); verifyDenied(tblUser, getAction2); verifyDenied(tblUser, putActionAll, putAction1); verifyDenied(tblUser, deleteActionAll, deleteAction1); verifyDenied(gblUser, getActionAll, getAction1, getAction2); verifyAllowed(gblUser, putActionAll, putAction1, putAction2); verifyAllowed(gblUser, deleteActionAll, deleteAction1, deleteAction2); // revoke column family permission revokeGlobal(TEST_UTIL, gblUser.getShortName()); revokeFromTable(TEST_UTIL, tblUser.getShortName(), tableName, family2, null); // Revoke on family2 should not have impact on family1 permissions verifyAllowed(tblUser, getActionAll, getAction1); verifyDenied(tblUser, getAction2); verifyDenied(tblUser, putActionAll, putAction1, putAction2); verifyDenied(tblUser, deleteActionAll, deleteAction1, deleteAction2); // Should not have access as global permissions are completely revoked verifyDenied(gblUser, getActionAll, getAction1, getAction2); verifyDenied(gblUser, putActionAll, putAction1, putAction2); verifyDenied(gblUser, deleteActionAll, deleteAction1, deleteAction2); } finally { // delete table deleteTable(TEST_UTIL, tableName); } } private boolean hasFoundUserPermission(UserPermission userPermission, List<UserPermission> perms) { return perms.contains(userPermission); } @Test public void testPostGrantRevokeAtQualifierLevel() throws Exception { final TableName tableName = TableName.valueOf("testGrantRevokeAtQualifierLevel"); final byte[] family1 = Bytes.toBytes("f1"); final byte[] family2 = Bytes.toBytes("f2"); final byte[] qualifier = Bytes.toBytes("q"); // create table Admin admin = TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { deleteTable(TEST_UTIL, tableName); } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family2)); createTable(TEST_UTIL, htd); try { // create temp users User user = User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new String[0]); AccessTestAction getQualifierAction = new AccessTestAction() { @Override public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addColumn(family1, qualifier); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName)) { t.get(g); } return null; } }; AccessTestAction putQualifierAction = new AccessTestAction() { @Override public Object run() throws Exception { Put p = new Put(TEST_ROW); p.add(family1, qualifier, Bytes.toBytes("v1")); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName)) { t.put(p); } return null; } }; AccessTestAction deleteQualifierAction = new AccessTestAction() { @Override public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteColumn(family1, qualifier); // d.deleteFamily(family1); try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(tableName)) { t.delete(d); } return null; } }; revokeFromTable(TEST_UTIL, user.getShortName(), tableName, family1, null); verifyDenied(user, getQualifierAction); verifyDenied(user, putQualifierAction); verifyDenied(user, deleteQualifierAction); grantOnTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.READ); verifyAllowed(user, getQualifierAction); verifyDenied(user, putQualifierAction); verifyDenied(user, deleteQualifierAction); // only grant write permission // TODO: comment this portion after HBASE-3583 grantOnTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.WRITE); verifyDenied(user, getQualifierAction); verifyAllowed(user, putQualifierAction); verifyAllowed(user, deleteQualifierAction); // grant both read and write permission grantOnTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.READ, Permission.Action.WRITE); verifyAllowed(user, getQualifierAction); verifyAllowed(user, putQualifierAction); verifyAllowed(user, deleteQualifierAction); // revoke family level permission won't impact column level revokeFromTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier); verifyDenied(user, getQualifierAction); verifyDenied(user, putQualifierAction); verifyDenied(user, deleteQualifierAction); } finally { // delete table deleteTable(TEST_UTIL, tableName); } } @Test public void testPermissionList() throws Exception { final TableName tableName = TableName.valueOf("testPermissionList"); final byte[] family1 = Bytes.toBytes("f1"); final byte[] family2 = Bytes.toBytes("f2"); final byte[] qualifier = Bytes.toBytes("q"); // create table Admin admin = TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { deleteTable(TEST_UTIL, tableName); } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family2)); htd.setOwner(USER_OWNER); createTable(TEST_UTIL, htd); try { List<UserPermission> perms; Table acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(protocol, tableName); } finally { acl.close(); } UserPermission ownerperm = new UserPermission(Bytes.toBytes(USER_OWNER.getName()), tableName, null, Action.values()); assertTrue("Owner should have all permissions on table", hasFoundUserPermission(ownerperm, perms)); User user = User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new String[0]); byte[] userName = Bytes.toBytes(user.getShortName()); UserPermission up = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.READ); assertFalse("User should not be granted permission: " + up.toString(), hasFoundUserPermission(up, perms)); // grant read permission grantOnTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.READ); acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(protocol, tableName); } finally { acl.close(); } UserPermission upToVerify = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.READ); assertTrue("User should be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); upToVerify = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.WRITE); assertFalse("User should not be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); // grant read+write grantOnTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.WRITE, Permission.Action.READ); acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(protocol, tableName); } finally { acl.close(); } upToVerify = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.WRITE, Permission.Action.READ); assertTrue("User should be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); // revoke revokeFromTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.WRITE, Permission.Action.READ); acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(protocol, tableName); } finally { acl.close(); } assertFalse("User should not be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); // disable table before modification admin.disableTable(tableName); User newOwner = User.createUserForTesting(conf, "new_owner", new String[] {}); htd.setOwner(newOwner); admin.modifyTable(tableName, htd); acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(protocol, tableName); } finally { acl.close(); } UserPermission newOwnerperm = new UserPermission(Bytes.toBytes(newOwner.getName()), tableName, null, Action.values()); assertTrue("New owner should have all permissions on table", hasFoundUserPermission(newOwnerperm, perms)); } finally { // delete table deleteTable(TEST_UTIL, tableName); } } @Test public void testGlobalPermissionList() throws Exception { List<UserPermission> perms; Table acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_START_ROW); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(protocol); } finally { acl.close(); } UserPermission adminPerm = new UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()), AccessControlLists.ACL_TABLE_NAME, null, null, Bytes.toBytes("ACRW")); assertTrue("Only global users and user admin has permission on table _acl_ per setup", perms.size() == 5 && hasFoundUserPermission(adminPerm, perms)); } /** global operations */ private void verifyGlobal(AccessTestAction action) throws Exception { verifyAllowed(action, SUPERUSER); verifyDenied(action, USER_CREATE, USER_RW, USER_NONE, USER_RO); } @Test public void testCheckPermissions() throws Exception { // -------------------------------------- // test global permissions AccessTestAction globalAdmin = new AccessTestAction() { @Override public Void run() throws Exception { checkGlobalPerms(TEST_UTIL, Permission.Action.ADMIN); return null; } }; // verify that only superuser can admin verifyGlobal(globalAdmin); // -------------------------------------- // test multiple permissions AccessTestAction globalReadWrite = new AccessTestAction() { @Override public Void run() throws Exception { checkGlobalPerms(TEST_UTIL, Permission.Action.READ, Permission.Action.WRITE); return null; } }; verifyGlobal(globalReadWrite); // -------------------------------------- // table/column/qualifier level permissions final byte[] TEST_Q1 = Bytes.toBytes("q1"); final byte[] TEST_Q2 = Bytes.toBytes("q2"); User userTable = User.createUserForTesting(conf, "user_check_perms_table", new String[0]); User userColumn = User.createUserForTesting(conf, "user_check_perms_family", new String[0]); User userQualifier = User.createUserForTesting(conf, "user_check_perms_q", new String[0]); grantOnTable(TEST_UTIL, userTable.getShortName(), TEST_TABLE, null, null, Permission.Action.READ); grantOnTable(TEST_UTIL, userColumn.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); grantOnTable(TEST_UTIL, userQualifier.getShortName(), TEST_TABLE, TEST_FAMILY, TEST_Q1, Permission.Action.READ); try { AccessTestAction tableRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, null, null, Permission.Action.READ); return null; } }; AccessTestAction columnRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); return null; } }; AccessTestAction qualifierRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, TEST_FAMILY, TEST_Q1, Permission.Action.READ); return null; } }; AccessTestAction multiQualifierRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, new Permission[] { new TablePermission(TEST_TABLE, TEST_FAMILY, TEST_Q1, Permission.Action.READ), new TablePermission(TEST_TABLE, TEST_FAMILY, TEST_Q2, Permission.Action.READ), }); return null; } }; AccessTestAction globalAndTableRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, new Permission[] { new Permission(Permission.Action.READ), new TablePermission(TEST_TABLE, null, (byte[]) null, Permission.Action.READ), }); return null; } }; AccessTestAction noCheck = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, new Permission[0]); return null; } }; verifyAllowed(tableRead, SUPERUSER, userTable); verifyDenied(tableRead, userColumn, userQualifier); verifyAllowed(columnRead, SUPERUSER, userTable, userColumn); verifyDenied(columnRead, userQualifier); verifyAllowed(qualifierRead, SUPERUSER, userTable, userColumn, userQualifier); verifyAllowed(multiQualifierRead, SUPERUSER, userTable, userColumn); verifyDenied(multiQualifierRead, userQualifier); verifyAllowed(globalAndTableRead, SUPERUSER); verifyDenied(globalAndTableRead, userTable, userColumn, userQualifier); verifyAllowed(noCheck, SUPERUSER, userTable, userColumn, userQualifier); // -------------------------------------- // test family level multiple permissions AccessTestAction familyReadWrite = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ, Permission.Action.WRITE); return null; } }; verifyAllowed(familyReadWrite, SUPERUSER, USER_OWNER, USER_CREATE, USER_RW); verifyDenied(familyReadWrite, USER_NONE, USER_RO); // -------------------------------------- // check for wrong table region CheckPermissionsRequest checkRequest = CheckPermissionsRequest .newBuilder() .addPermission( AccessControlProtos.Permission .newBuilder() .setType(AccessControlProtos.Permission.Type.Table) .setTablePermission( AccessControlProtos.TablePermission.newBuilder() .setTableName(ProtobufUtil.toProtoTableName(TEST_TABLE)) .addAction(AccessControlProtos.Permission.Action.CREATE))).build(); Table acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel channel = acl.coprocessorService(new byte[0]); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(channel); try { // but ask for TablePermissions for TEST_TABLE protocol.checkPermissions(null, checkRequest); fail("this should have thrown CoprocessorException"); } catch (ServiceException ex) { // expected } } finally { acl.close(); } } finally { revokeFromTable(TEST_UTIL, userTable.getShortName(), TEST_TABLE, null, null, Permission.Action.READ); revokeFromTable(TEST_UTIL, userColumn.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); revokeFromTable(TEST_UTIL, userQualifier.getShortName(), TEST_TABLE, TEST_FAMILY, TEST_Q1, Permission.Action.READ); } } @Test public void testStopRegionServer() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preStopRegionServer(ObserverContext.createAndPrepare(RSCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testRollWALWriterRequest() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preRollWALWriterRequest(ObserverContext.createAndPrepare(RSCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testOpenRegion() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preOpen(ObserverContext.createAndPrepare(RCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testCloseRegion() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preClose(ObserverContext.createAndPrepare(RCP_ENV, null), false); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_READ, USER_GROUP_WRITE); } @Test public void testSnapshot() throws Exception { Admin admin = TEST_UTIL.getHBaseAdmin(); final HTableDescriptor htd = admin.getTableDescriptor(TEST_TABLE); SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); builder.setName(TEST_TABLE.getNameAsString() + "-snapshot"); builder.setTable(TEST_TABLE.getNameAsString()); final SnapshotDescription snapshot = builder.build(); AccessTestAction snapshotAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), snapshot, htd); return null; } }; AccessTestAction deleteAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), snapshot); return null; } }; AccessTestAction restoreAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), snapshot, htd); return null; } }; AccessTestAction cloneAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preCloneSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), null, null); return null; } }; verifyAllowed(snapshotAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(snapshotAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(cloneAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(deleteAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(restoreAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(restoreAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(deleteAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(cloneAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testSnapshotWithOwner() throws Exception { Admin admin = TEST_UTIL.getHBaseAdmin(); final HTableDescriptor htd = admin.getTableDescriptor(TEST_TABLE); SnapshotDescription.Builder builder = SnapshotDescription.newBuilder(); builder.setName(TEST_TABLE.getNameAsString() + "-snapshot"); builder.setTable(TEST_TABLE.getNameAsString()); builder.setOwner(USER_OWNER.getName()); final SnapshotDescription snapshot = builder.build(); AccessTestAction snapshotAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), snapshot, htd); return null; } }; verifyAllowed(snapshotAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(snapshotAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); AccessTestAction deleteAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), snapshot); return null; } }; verifyAllowed(deleteAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(deleteAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); AccessTestAction restoreAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), snapshot, htd); return null; } }; verifyAllowed(restoreAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(restoreAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); AccessTestAction cloneAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preCloneSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), null, null); return null; } }; // Clone by snapshot owner is not allowed , because clone operation creates a new table, // which needs global admin permission. verifyAllowed(cloneAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(cloneAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testGlobalAuthorizationForNewRegisteredRS() throws Exception { LOG.debug("Test for global authorization for a new registered RegionServer."); MiniHBaseCluster hbaseCluster = TEST_UTIL.getHBaseCluster(); final Admin admin = TEST_UTIL.getHBaseAdmin(); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE2); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); createTable(TEST_UTIL, htd); // Starting a new RegionServer. JVMClusterUtil.RegionServerThread newRsThread = hbaseCluster .startRegionServer(); final HRegionServer newRs = newRsThread.getRegionServer(); // Move region to the new RegionServer. List<HRegionLocation> regions; try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE2)) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); final HRegionInfo hri = location.getRegionInfo(); final ServerName server = location.getServerName(); try (Table table = systemUserConnection.getTable(TEST_TABLE2)) { AccessTestAction moveAction = new AccessTestAction() { @Override public Object run() throws Exception { admin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(newRs.getServerName().getServerName())); return null; } }; SUPERUSER.runAs(moveAction); final int RETRIES_LIMIT = 10; int retries = 0; while (newRs.getOnlineRegions(TEST_TABLE2).size() < 1 && retries < RETRIES_LIMIT) { LOG.debug("Waiting for region to be opened. Already retried " + retries + " times."); try { Thread.sleep(1000); } catch (InterruptedException e) { } retries++; if (retries == RETRIES_LIMIT - 1) { fail("Retry exhaust for waiting region to be opened."); } } // Verify write permission for user "admin2" who has the global // permissions. AccessTestAction putAction = new AccessTestAction() { @Override public Object run() throws Exception { Put put = new Put(Bytes.toBytes("test")); put.add(TEST_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value")); table.put(put); return null; } }; USER_ADMIN.runAs(putAction); } } @Test public void testTableDescriptorsEnumeration() throws Exception { User TABLE_ADMIN = User.createUserForTesting(conf, "UserA", new String[0]); // Grant TABLE ADMIN privs grantOnTable(TEST_UTIL, TABLE_ADMIN.getShortName(), TEST_TABLE, null, null, Permission.Action.ADMIN); try { AccessTestAction listTablesAction = new AccessTestAction() { @Override public Object run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Admin admin = conn.getAdmin()) { return Arrays.asList(admin.listTables()); } } }; AccessTestAction getTableDescAction = new AccessTestAction() { @Override public Object run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Admin admin = conn.getAdmin();) { return admin.getTableDescriptor(TEST_TABLE); } } }; verifyAllowed(listTablesAction, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, TABLE_ADMIN, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyIfEmptyList(listTablesAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); verifyAllowed(getTableDescAction, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, TABLE_ADMIN, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(getTableDescAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } finally { // Cleanup, revoke TABLE ADMIN privs revokeFromTable(TEST_UTIL, TABLE_ADMIN.getShortName(), TEST_TABLE, null, null, Permission.Action.ADMIN); } } @Test public void testTableNameEnumeration() throws Exception { AccessTestAction listTablesAction = new AccessTestAction() { @Override public Object run() throws Exception { Connection unmanagedConnection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Admin admin = unmanagedConnection.getAdmin(); try { return Arrays.asList(admin.listTableNames()); } finally { admin.close(); unmanagedConnection.close(); } } }; verifyAllowed(listTablesAction, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_GROUP_CREATE, USER_GROUP_ADMIN, USER_GROUP_READ, USER_GROUP_WRITE); verifyIfEmptyList(listTablesAction, USER_NONE); } @Test public void testTableDeletion() throws Exception { User TABLE_ADMIN = User.createUserForTesting(conf, "TestUser", new String[0]); final TableName tname = TableName.valueOf("testTableDeletion"); createTestTable(tname); // Grant TABLE ADMIN privs grantOnTable(TEST_UTIL, TABLE_ADMIN.getShortName(), tname, null, null, Permission.Action.ADMIN); AccessTestAction deleteTableAction = new AccessTestAction() { @Override public Object run() throws Exception { Connection unmanagedConnection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Admin admin = unmanagedConnection.getAdmin(); try { deleteTable(TEST_UTIL, admin, tname); } finally { admin.close(); unmanagedConnection.close(); } return null; } }; verifyDenied(deleteTableAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); verifyAllowed(deleteTableAction, TABLE_ADMIN); } private void createTestTable(TableName tname) throws Exception { HTableDescriptor htd = new HTableDescriptor(tname); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY); hcd.setMaxVersions(100); htd.addFamily(hcd); htd.setOwner(USER_OWNER); createTable(TEST_UTIL, htd, new byte[][] { Bytes.toBytes("s") }); } @Test public void testNamespaceUserGrant() throws Exception { AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE);) { return t.get(new Get(TEST_ROW)); } } }; String namespace = TEST_TABLE.getNamespaceAsString(); // Grant namespace READ to USER_NONE, this should supersede any table permissions grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ); // Now USER_NONE should be able to read verifyAllowed(getAction, USER_NONE); // Revoke namespace READ to USER_NONE revokeFromNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ); verifyDenied(getAction, USER_NONE); } @Test public void testAccessControlClientGrantRevoke() throws Exception { // Create user for testing, who has no READ privileges by default. User testGrantRevoke = User.createUserForTesting(conf, "testGrantRevoke", new String[0]); AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE);) { return t.get(new Get(TEST_ROW)); } } }; verifyDenied(getAction, testGrantRevoke); // Grant table READ permissions to testGrantRevoke. try { grantOnTableUsingAccessControlClient(TEST_UTIL, systemUserConnection, testGrantRevoke.getShortName(), TEST_TABLE, null, null, Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.grant. ", e); } // Now testGrantRevoke should be able to read also verifyAllowed(getAction, testGrantRevoke); // Revoke table READ permission to testGrantRevoke. try { revokeFromTableUsingAccessControlClient(TEST_UTIL, systemUserConnection, testGrantRevoke.getShortName(), TEST_TABLE, null, null, Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.revoke ", e); } // Now testGrantRevoke shouldn't be able read verifyDenied(getAction, testGrantRevoke); } @Test public void testAccessControlClientGlobalGrantRevoke() throws Exception { // Create user for testing, who has no READ privileges by default. User testGlobalGrantRevoke = User.createUserForTesting(conf, "testGlobalGrantRevoke", new String[0]); AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE)) { return t.get(new Get(TEST_ROW)); } } }; verifyDenied(getAction, testGlobalGrantRevoke); // Grant table READ permissions to testGlobalGrantRevoke. String userName = testGlobalGrantRevoke.getShortName(); try { grantGlobalUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.grant. ", e); } try { // Now testGlobalGrantRevoke should be able to read also verifyAllowed(getAction, testGlobalGrantRevoke); // Revoke table READ permission to testGlobalGrantRevoke. try { revokeGlobalUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.revoke ", e); } // Now testGlobalGrantRevoke shouldn't be able read verifyDenied(getAction, testGlobalGrantRevoke); } finally { revokeGlobal(TEST_UTIL, userName, Permission.Action.READ); } } @Test public void testAccessControlClientGrantRevokeOnNamespace() throws Exception { // Create user for testing, who has no READ privileges by default. User testNS = User.createUserForTesting(conf, "testNS", new String[0]); AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE);) { return t.get(new Get(TEST_ROW)); } } }; verifyDenied(getAction, testNS); String userName = testNS.getShortName(); String namespace = TEST_TABLE.getNamespaceAsString(); // Grant namespace READ to testNS, this should supersede any table permissions try { grantOnNamespaceUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, namespace, Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.grant. ", e); } try { // Now testNS should be able to read also verifyAllowed(getAction, testNS); // Revoke namespace READ to testNS, this should supersede any table permissions try { revokeFromNamespaceUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName, namespace, Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.revoke ", e); } // Now testNS shouldn't be able read verifyDenied(getAction, testNS); } finally { revokeFromNamespace(TEST_UTIL, userName, namespace, Permission.Action.READ); } } public static class PingCoprocessor extends PingService implements Coprocessor, CoprocessorService { @Override public void start(CoprocessorEnvironment env) throws IOException { } @Override public void stop(CoprocessorEnvironment env) throws IOException { } @Override public Service getService() { return this; } @Override public void ping(RpcController controller, PingRequest request, RpcCallback<PingResponse> callback) { callback.run(PingResponse.newBuilder().setPong("Pong!").build()); } @Override public void count(RpcController controller, CountRequest request, RpcCallback<CountResponse> callback) { callback.run(CountResponse.newBuilder().build()); } @Override public void increment(RpcController controller, IncrementCountRequest requet, RpcCallback<IncrementCountResponse> callback) { callback.run(IncrementCountResponse.newBuilder().build()); } @Override public void hello(RpcController controller, HelloRequest request, RpcCallback<HelloResponse> callback) { callback.run(HelloResponse.newBuilder().setResponse("Hello!").build()); } @Override public void noop(RpcController controller, NoopRequest request, RpcCallback<NoopResponse> callback) { callback.run(NoopResponse.newBuilder().build()); } } @Test public void testCoprocessorExec() throws Exception { // Set up our ping endpoint service on all regions of our test table for (JVMClusterUtil.RegionServerThread thread: TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) { HRegionServer rs = thread.getRegionServer(); for (Region region: rs.getOnlineRegions(TEST_TABLE)) { region.getCoprocessorHost().load(PingCoprocessor.class, Coprocessor.PRIORITY_USER, conf); } } // Create users for testing, and grant EXEC privileges on our test table // only to user A User userA = User.createUserForTesting(conf, "UserA", new String[0]); User userB = User.createUserForTesting(conf, "UserB", new String[0]); grantOnTable(TEST_UTIL, userA.getShortName(), TEST_TABLE, null, null, Permission.Action.EXEC); try { // Create an action for invoking our test endpoint AccessTestAction execEndpointAction = new AccessTestAction() { @Override public Object run() throws Exception { try (Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE);) { BlockingRpcChannel service = t.coprocessorService(HConstants.EMPTY_BYTE_ARRAY); PingCoprocessor.newBlockingStub(service).noop(null, NoopRequest.newBuilder().build()); } return null; } }; String namespace = TEST_TABLE.getNamespaceAsString(); // Now grant EXEC to the entire namespace to user B grantOnNamespace(TEST_UTIL, userB.getShortName(), namespace, Permission.Action.EXEC); // User B should now be allowed also verifyAllowed(execEndpointAction, userA, userB); revokeFromNamespace(TEST_UTIL, userB.getShortName(), namespace, Permission.Action.EXEC); // Verify that EXEC permission is checked correctly verifyDenied(execEndpointAction, userB); verifyAllowed(execEndpointAction, userA); } finally { // Cleanup, revoke the userA privileges revokeFromTable(TEST_UTIL, userA.getShortName(), TEST_TABLE, null, null, Permission.Action.EXEC); } } @Test public void testReservedCellTags() throws Exception { AccessTestAction putWithReservedTag = new AccessTestAction() { @Override public Object run() throws Exception { try(Connection conn = ConnectionFactory.createConnection(conf); Table t = conn.getTable(TEST_TABLE);) { KeyValue kv = new KeyValue(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, HConstants.EMPTY_BYTE_ARRAY, new Tag[] { new Tag(AccessControlLists.ACL_TAG_TYPE, ProtobufUtil.toUsersAndPermissions(USER_OWNER.getShortName(), new Permission(Permission.Action.READ)).toByteArray()) }); t.put(new Put(TEST_ROW).add(kv)); } return null; } }; // Current user is superuser verifyAllowed(putWithReservedTag, User.getCurrent()); // No other user should be allowed verifyDenied(putWithReservedTag, USER_OWNER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO); } @Test public void testSetQuota() throws Exception { AccessTestAction setUserQuotaAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSetUserQuota(ObserverContext.createAndPrepare(CP_ENV, null), null, null); return null; } }; AccessTestAction setUserTableQuotaAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSetUserQuota(ObserverContext.createAndPrepare(CP_ENV, null), null, TEST_TABLE, null); return null; } }; AccessTestAction setUserNamespaceQuotaAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSetUserQuota(ObserverContext.createAndPrepare(CP_ENV, null), null, (String)null, null); return null; } }; AccessTestAction setTableQuotaAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSetTableQuota(ObserverContext.createAndPrepare(CP_ENV, null), TEST_TABLE, null); return null; } }; AccessTestAction setNamespaceQuotaAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSetNamespaceQuota(ObserverContext.createAndPrepare(CP_ENV, null), null, null); return null; } }; verifyAllowed(setUserQuotaAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(setUserQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(setUserTableQuotaAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(setUserTableQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(setUserNamespaceQuotaAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(setUserNamespaceQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); verifyAllowed(setTableQuotaAction, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(setTableQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE); verifyAllowed(setNamespaceQuotaAction, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(setNamespaceQuotaAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } @Test public void testGetNamespacePermission() throws Exception { String namespace = "testGetNamespacePermission"; NamespaceDescriptor desc = NamespaceDescriptor.create(namespace).build(); createNamespace(TEST_UTIL, desc); grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ); try { List<UserPermission> namespacePermissions = AccessControlClient.getUserPermissions( systemUserConnection, AccessControlLists.toNamespaceEntry(namespace)); assertTrue(namespacePermissions != null); assertTrue(namespacePermissions.size() == 1); } catch (Throwable thw) { throw new HBaseException(thw); } deleteNamespace(TEST_UTIL, namespace); } @Test public void testTruncatePerms() throws Exception { try { List<UserPermission> existingPerms = AccessControlClient.getUserPermissions( systemUserConnection, TEST_TABLE.getNameAsString()); assertTrue(existingPerms != null); assertTrue(existingPerms.size() > 1); TEST_UTIL.getHBaseAdmin().disableTable(TEST_TABLE); TEST_UTIL.truncateTable(TEST_TABLE); TEST_UTIL.waitTableAvailable(TEST_TABLE); List<UserPermission> perms = AccessControlClient.getUserPermissions( systemUserConnection, TEST_TABLE.getNameAsString()); assertTrue(perms != null); assertEquals(existingPerms.size(), perms.size()); } catch (Throwable e) { throw new HBaseIOException(e); } } private PrivilegedAction<List<UserPermission>> getPrivilegedAction(final String regex) { return new PrivilegedAction<List<UserPermission>>() { @Override public List<UserPermission> run() { try(Connection conn = ConnectionFactory.createConnection(conf);) { return AccessControlClient.getUserPermissions(conn, regex); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.getUserPermissions.", e); return null; } } }; } @Test public void testAccessControlClientUserPerms() throws Exception { TableName tname = TableName.valueOf("testAccessControlClientUserPerms"); createTestTable(tname); try { final String regex = tname.getNameWithNamespaceInclAsString(); User testUserPerms = User.createUserForTesting(conf, "testUserPerms", new String[0]); assertEquals(0, testUserPerms.runAs(getPrivilegedAction(regex)).size()); // Grant TABLE ADMIN privs to testUserPerms grantOnTable(TEST_UTIL, testUserPerms.getShortName(), tname, null, null, Action.ADMIN); List<UserPermission> perms = testUserPerms.runAs(getPrivilegedAction(regex)); assertNotNull(perms); // Superuser, testUserPerms assertEquals(2, perms.size()); } finally { deleteTable(TEST_UTIL, tname); } } @Test public void testAccessControllerUserPermsRegexHandling() throws Exception { User testRegexHandler = User.createUserForTesting(conf, "testRegexHandling", new String[0]); final String REGEX_ALL_TABLES = ".*"; final String tableName = "testRegex"; final TableName table1 = TableName.valueOf(tableName); final byte[] family = Bytes.toBytes("f1"); // create table in default ns Admin admin = TEST_UTIL.getHBaseAdmin(); HTableDescriptor htd = new HTableDescriptor(table1); htd.addFamily(new HColumnDescriptor(family)); createTable(TEST_UTIL, htd); // creating the ns and table in it String ns = "testNamespace"; NamespaceDescriptor desc = NamespaceDescriptor.create(ns).build(); final TableName table2 = TableName.valueOf(ns, tableName); createNamespace(TEST_UTIL, desc); htd = new HTableDescriptor(table2); htd.addFamily(new HColumnDescriptor(family)); createTable(TEST_UTIL, htd); // Verify that we can read sys-tables String aclTableName = AccessControlLists.ACL_TABLE_NAME.getNameAsString(); assertEquals(5, SUPERUSER.runAs(getPrivilegedAction(aclTableName)).size()); assertEquals(0, testRegexHandler.runAs(getPrivilegedAction(aclTableName)).size()); // Grant TABLE ADMIN privs to testUserPerms assertEquals(0, testRegexHandler.runAs(getPrivilegedAction(REGEX_ALL_TABLES)).size()); grantOnTable(TEST_UTIL, testRegexHandler.getShortName(), table1, null, null, Action.ADMIN); assertEquals(2, testRegexHandler.runAs(getPrivilegedAction(REGEX_ALL_TABLES)).size()); grantOnTable(TEST_UTIL, testRegexHandler.getShortName(), table2, null, null, Action.ADMIN); assertEquals(4, testRegexHandler.runAs(getPrivilegedAction(REGEX_ALL_TABLES)).size()); // USER_ADMIN, testUserPerms must have a row each. assertEquals(2, testRegexHandler.runAs(getPrivilegedAction(tableName)).size()); assertEquals(2, testRegexHandler.runAs(getPrivilegedAction( NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + TableName.NAMESPACE_DELIM + tableName) ).size()); assertEquals(2, testRegexHandler.runAs(getPrivilegedAction( ns + TableName.NAMESPACE_DELIM + tableName)).size()); assertEquals(0, testRegexHandler.runAs(getPrivilegedAction("notMatchingAny")).size()); deleteTable(TEST_UTIL, table1); deleteTable(TEST_UTIL, table2); deleteNamespace(TEST_UTIL, ns); } private void verifyAnyCreate(AccessTestAction action) throws Exception { verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_ADMIN_CF, USER_GROUP_CREATE); verifyDenied(action, USER_NONE, USER_RO, USER_RW, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_ADMIN); } @Test public void testPrepareAndCleanBulkLoad() throws Exception { AccessTestAction prepareBulkLoadAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.prePrepareBulkLoad(ObserverContext.createAndPrepare(RCP_ENV, null), null); return null; } }; AccessTestAction cleanupBulkLoadAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preCleanupBulkLoad(ObserverContext.createAndPrepare(RCP_ENV, null), null); return null; } }; verifyAnyCreate(prepareBulkLoadAction); verifyAnyCreate(cleanupBulkLoadAction); } @Test public void testReplicateLogEntries() throws Exception { AccessTestAction replicateLogEntriesAction = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preReplicateLogEntries(ObserverContext.createAndPrepare(RSCP_ENV, null), null, null); ACCESS_CONTROLLER.postReplicateLogEntries(ObserverContext.createAndPrepare(RSCP_ENV, null), null, null); return null; } }; verifyAllowed(replicateLogEntriesAction, SUPERUSER, USER_ADMIN, USER_GROUP_WRITE); verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_READ, USER_GROUP_ADMIN, USER_GROUP_CREATE); } }
apache-2.0
ox-it/cucm-http-api
src/main/java/com/cisco/axl/api/_8/GetCcdAdvertisingServiceRes.java
3634
package com.cisco.axl.api._8; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for GetCcdAdvertisingServiceRes complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType name="GetCcdAdvertisingServiceRes"> * &lt;complexContent> * &lt;extension base="{http://www.cisco.com/AXL/API/8.0}APIResponse"> * &lt;sequence> * &lt;element name="return"> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="ccdAdvertisingService" type="{http://www.cisco.com/AXL/API/8.0}RCcdAdvertisingService"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * &lt;/element> * &lt;/sequence> * &lt;/extension> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "GetCcdAdvertisingServiceRes", propOrder = { "_return" }) public class GetCcdAdvertisingServiceRes extends APIResponse { @XmlElement(name = "return", required = true) protected GetCcdAdvertisingServiceRes.Return _return; /** * Gets the value of the return property. * * @return * possible object is * {@link GetCcdAdvertisingServiceRes.Return } * */ public GetCcdAdvertisingServiceRes.Return getReturn() { return _return; } /** * Sets the value of the return property. * * @param value * allowed object is * {@link GetCcdAdvertisingServiceRes.Return } * */ public void setReturn(GetCcdAdvertisingServiceRes.Return value) { this._return = value; } /** * <p>Java class for anonymous complex type. * * <p>The following schema fragment specifies the expected content contained within this class. * * <pre> * &lt;complexType> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;sequence> * &lt;element name="ccdAdvertisingService" type="{http://www.cisco.com/AXL/API/8.0}RCcdAdvertisingService"/> * &lt;/sequence> * &lt;/restriction> * &lt;/complexContent> * &lt;/complexType> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "", propOrder = { "ccdAdvertisingService" }) public static class Return { @XmlElement(required = true) protected RCcdAdvertisingService ccdAdvertisingService; /** * Gets the value of the ccdAdvertisingService property. * * @return * possible object is * {@link RCcdAdvertisingService } * */ public RCcdAdvertisingService getCcdAdvertisingService() { return ccdAdvertisingService; } /** * Sets the value of the ccdAdvertisingService property. * * @param value * allowed object is * {@link RCcdAdvertisingService } * */ public void setCcdAdvertisingService(RCcdAdvertisingService value) { this.ccdAdvertisingService = value; } } }
apache-2.0
CapeSepias/big-data-plugin
legacy/src/main/java/org/pentaho/hbase/mapping/MappingEditor.java
44809
/******************************************************************************* * * Pentaho Big Data * * Copyright (C) 2002-2015 by Pentaho : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.hbase.mapping; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.TreeSet; import org.eclipse.jface.dialogs.MessageDialog; import org.eclipse.swt.SWT; import org.eclipse.swt.custom.CCombo; import org.eclipse.swt.events.DisposeEvent; import org.eclipse.swt.events.DisposeListener; import org.eclipse.swt.events.FocusEvent; import org.eclipse.swt.events.FocusListener; import org.eclipse.swt.events.KeyAdapter; import org.eclipse.swt.events.KeyEvent; import org.eclipse.swt.events.SelectionAdapter; import org.eclipse.swt.events.SelectionEvent; import org.eclipse.swt.layout.FormAttachment; import org.eclipse.swt.layout.FormData; import org.eclipse.swt.layout.FormLayout; import org.eclipse.swt.widgets.Button; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Label; import org.eclipse.swt.widgets.Shell; import org.eclipse.swt.widgets.Table; import org.eclipse.swt.widgets.TableItem; import org.pentaho.di.core.Const; import org.pentaho.di.core.namedcluster.model.NamedCluster; import org.pentaho.di.core.row.RowMetaInterface; import org.pentaho.di.core.row.ValueMetaInterface; import org.pentaho.di.trans.TransMeta; import org.pentaho.di.trans.steps.hbaseinput.HBaseInputData; import org.pentaho.di.trans.steps.hbaseinput.Messages; import org.pentaho.di.ui.core.PropsUI; import org.pentaho.di.ui.core.dialog.ErrorDialog; import org.pentaho.di.ui.core.gui.GUIResource; import org.pentaho.di.ui.core.namedcluster.NamedClusterUIHelper; import org.pentaho.di.ui.core.namedcluster.NamedClusterWidget; import org.pentaho.di.ui.core.widget.ColumnInfo; import org.pentaho.di.ui.core.widget.ComboValuesSelectionListener; import org.pentaho.di.ui.core.widget.TableView; import org.pentaho.hbase.shim.api.HBaseValueMeta; import org.pentaho.hbase.shim.api.Mapping; import org.pentaho.hbase.shim.spi.HBaseConnection; /** * A re-usable composite for creating and editing table mappings for HBase. Also has the (optional) ability to create * the table if the table for which the mapping is being created does not exist. When creating a new table, the name * supplied may be optionally suffixed with some parameters for compression and bloom filter type. If no parameters are * supplied then the HBase defaults of no compression and no bloom filter(s) are used. The table name may be suffixed * with * * @[NONE | GZ | LZO][@[NONE | ROW | ROWCOL]] for compression and bloom filter type respectively. Note that LZO * compression requires LZO libraries to be installed on the HBase nodes. * * @author Mark Hall (mhall{[at]}pentaho{[dot]}com) */ public class MappingEditor extends Composite implements ConfigurationProducer { protected Shell m_shell; protected Composite m_parent; protected boolean m_allowTableCreate; protected NamedClusterWidget namedClusterWidget; // table name line protected CCombo m_existingTableNamesCombo; protected Button m_getTableNames; protected boolean m_familiesInvalidated; // mapping name line protected CCombo m_existingMappingNamesCombo; // fields view protected TableView m_fieldsView; protected ColumnInfo m_keyCI; protected ColumnInfo m_familyCI; protected ColumnInfo m_typeCI; protected Button m_saveBut; protected Button m_deleteBut; protected Button m_getFieldsBut; protected Button m_keyValueTupleBut; protected MappingAdmin m_admin; protected ConfigurationProducer m_configProducer; protected FieldProducer m_incomingFieldsProducer; /** default family name to use when creating a new table using incoming fields */ protected static final String DEFAULT_FAMILY = "Family1"; protected String m_currentConfiguration = ""; protected boolean m_connectionProblem; protected TransMeta m_transMeta; public MappingEditor( Shell shell, Composite parent, ConfigurationProducer configProducer, FieldProducer fieldProducer, int tableViewStyle, boolean allowTableCreate, PropsUI props, TransMeta transMeta ) { super( parent, SWT.NONE ); m_shell = shell; m_parent = parent; m_transMeta = transMeta; boolean showConnectWidgets = false; m_configProducer = configProducer; if ( m_configProducer != null ) { m_currentConfiguration = m_configProducer.getCurrentConfiguration(); } else { showConnectWidgets = true; m_configProducer = this; } m_incomingFieldsProducer = fieldProducer; m_allowTableCreate = allowTableCreate; int middle = props.getMiddlePct(); int margin = Const.MARGIN; FormLayout controlLayout = new FormLayout(); controlLayout.marginWidth = 3; controlLayout.marginHeight = 3; setLayout( controlLayout ); props.setLook( this ); if ( showConnectWidgets ) { Label namedClusterLabel = new Label( this, SWT.RIGHT ); namedClusterLabel.setText( Messages.getString( "MappingDialog.NamedCluster.Label" ) ); props.setLook( namedClusterLabel ); FormData fd = new FormData(); fd.left = new FormAttachment( 0, 0 ); fd.top = new FormAttachment( 0, 10 ); fd.right = new FormAttachment( middle, -margin ); namedClusterLabel.setLayoutData( fd ); namedClusterWidget = NamedClusterUIHelper.getNamedClusterUIFactory().createNamedClusterWidget( this, false ); namedClusterWidget.initiate(); props.setLook( namedClusterWidget.getComposite() ); fd = new FormData(); fd.left = new FormAttachment( middle, 0 ); fd.top = new FormAttachment( 0, margin ); fd.right = new FormAttachment( 100, 0 ); namedClusterWidget.getComposite().setLayoutData( fd ); m_currentConfiguration = m_configProducer.getCurrentConfiguration(); } parent.addDisposeListener( new DisposeListener() { @Override public void widgetDisposed( DisposeEvent de ) { try { resetConnection(); } catch ( Exception e ) { // we have to swallow it. } } } ); // table names Label tableNameLab = new Label( this, SWT.RIGHT ); tableNameLab.setText( Messages.getString( "MappingDialog.TableName.Label" ) ); props.setLook( tableNameLab ); FormData fd = new FormData(); fd.left = new FormAttachment( 0, 0 ); if ( showConnectWidgets ) { fd.top = new FormAttachment( namedClusterWidget.getComposite(), margin ); } else { fd.top = new FormAttachment( 0, margin ); } fd.right = new FormAttachment( middle, -margin ); tableNameLab.setLayoutData( fd ); m_getTableNames = new Button( this, SWT.PUSH | SWT.CENTER ); props.setLook( m_getTableNames ); m_getTableNames.setText( Messages.getString( "MappingDialog.TableName.GetTableNames" ) ); fd = new FormData(); fd.right = new FormAttachment( 100, 0 ); if ( showConnectWidgets ) { fd.top = new FormAttachment( namedClusterWidget.getComposite(), 0 ); } else { fd.top = new FormAttachment( 0, 0 ); } m_getTableNames.setLayoutData( fd ); m_getTableNames.addSelectionListener( new SelectionAdapter() { @Override public void widgetSelected( SelectionEvent e ) { populateTableCombo( false ); } } ); m_existingTableNamesCombo = new CCombo( this, SWT.BORDER ); props.setLook( m_existingTableNamesCombo ); fd = new FormData(); fd.left = new FormAttachment( middle, 0 ); fd.right = new FormAttachment( m_getTableNames, -margin ); if ( showConnectWidgets ) { fd.top = new FormAttachment( namedClusterWidget.getComposite(), margin ); } else { fd.top = new FormAttachment( 0, margin ); } m_existingTableNamesCombo.setLayoutData( fd ); // allow or disallow table creation by enabling/disabling the ability // to type into this combo m_existingTableNamesCombo.setEditable( m_allowTableCreate ); // mapping names Label mappingNameLab = new Label( this, SWT.RIGHT ); mappingNameLab.setText( Messages.getString( "MappingDialog.MappingName.Label" ) ); props.setLook( mappingNameLab ); fd = new FormData(); fd.left = new FormAttachment( 0, 0 ); fd.top = new FormAttachment( m_getTableNames, margin ); fd.right = new FormAttachment( middle, -margin ); mappingNameLab.setLayoutData( fd ); m_existingMappingNamesCombo = new CCombo( this, SWT.BORDER ); props.setLook( m_existingMappingNamesCombo ); fd = new FormData(); fd.left = new FormAttachment( middle, 0 ); fd.top = new FormAttachment( m_getTableNames, margin ); fd.right = new FormAttachment( 100, 0 ); m_existingMappingNamesCombo.setLayoutData( fd ); m_existingTableNamesCombo.addSelectionListener( new SelectionAdapter() { @Override public void widgetSelected( SelectionEvent e ) { m_familiesInvalidated = true; populateMappingComboAndFamilyStuff(); } @Override public void widgetDefaultSelected( SelectionEvent e ) { m_familiesInvalidated = true; populateMappingComboAndFamilyStuff(); } } ); m_existingTableNamesCombo.addKeyListener( new KeyAdapter() { @Override public void keyPressed( KeyEvent e ) { m_familiesInvalidated = true; } } ); m_existingTableNamesCombo.addFocusListener( new FocusListener() { public void focusGained( FocusEvent e ) { // populateTableCombo(false); } public void focusLost( FocusEvent e ) { m_familiesInvalidated = true; populateMappingComboAndFamilyStuff(); } } ); m_existingMappingNamesCombo.addSelectionListener( new SelectionAdapter() { @Override public void widgetSelected( SelectionEvent e ) { loadTableViewFromMapping(); } @Override public void widgetDefaultSelected( SelectionEvent e ) { loadTableViewFromMapping(); } } ); // fields ColumnInfo[] colinf = new ColumnInfo[] { new ColumnInfo( Messages.getString( "HBaseInputDialog.Fields.FIELD_ALIAS" ), ColumnInfo.COLUMN_TYPE_TEXT, false ), new ColumnInfo( Messages.getString( "HBaseInputDialog.Fields.FIELD_KEY" ), ColumnInfo.COLUMN_TYPE_CCOMBO, true ), new ColumnInfo( Messages.getString( "HBaseInputDialog.Fields.FIELD_FAMILY" ), ColumnInfo.COLUMN_TYPE_CCOMBO, true ), new ColumnInfo( Messages.getString( "HBaseInputDialog.Fields.FIELD_NAME" ), ColumnInfo.COLUMN_TYPE_TEXT, false ), new ColumnInfo( Messages.getString( "HBaseInputDialog.Fields.FIELD_TYPE" ), ColumnInfo.COLUMN_TYPE_CCOMBO, true ), new ColumnInfo( Messages.getString( "HBaseInputDialog.Fields.FIELD_INDEXED" ), ColumnInfo.COLUMN_TYPE_TEXT, false ), }; m_keyCI = colinf[1]; m_keyCI.setComboValues( new String[] { "N", "Y" } ); m_familyCI = colinf[2]; m_familyCI.setComboValues( new String[] { "" } ); m_typeCI = colinf[4]; // default types for non-key fields m_typeCI.setComboValues( new String[] { "String", "Integer", "Long", "Float", "Double", "Date", "BigNumber", "Serializable", "Binary" } ); m_keyCI.setComboValuesSelectionListener( new ComboValuesSelectionListener() { public String[] getComboValues( TableItem tableItem, int rowNr, int colNr ) { tableItem.setText( 5, "" ); return m_keyCI.getComboValues(); } } ); m_typeCI.setComboValuesSelectionListener( new ComboValuesSelectionListener() { public String[] getComboValues( TableItem tableItem, int rowNr, int colNr ) { String[] comboValues = null; String keyOrNot = tableItem.getText( 2 ); if ( Const.isEmpty( keyOrNot ) || keyOrNot.equalsIgnoreCase( "N" ) ) { comboValues = new String[] { "String", "Integer", "Long", "Float", "Double", "Boolean", "Date", "BigNumber", "Serializable", "Binary" }; } else { comboValues = new String[] { "String", "Integer", "UnsignedInteger", "Long", "UnsignedLong", "Date", "UnsignedDate", "Binary" }; } return comboValues; } } ); m_saveBut = new Button( this, SWT.PUSH | SWT.CENTER ); props.setLook( m_saveBut ); m_saveBut.setText( Messages.getString( "MappingDialog.SaveMapping" ) ); m_saveBut.setToolTipText( Messages.getString( "MappingDialog.SaveMapping.TipText" ) ); fd = new FormData(); fd.left = new FormAttachment( 0, margin ); fd.bottom = new FormAttachment( 100, -margin * 2 ); m_saveBut.setLayoutData( fd ); m_saveBut.addSelectionListener( new SelectionAdapter() { @Override public void widgetSelected( SelectionEvent e ) { saveMapping(); } } ); m_deleteBut = new Button( this, SWT.PUSH | SWT.CENTER ); props.setLook( m_deleteBut ); m_deleteBut.setText( Messages.getString( "MappingDialog.DeleteMapping" ) ); fd = new FormData(); fd.left = new FormAttachment( m_saveBut, margin ); fd.bottom = new FormAttachment( 100, -margin * 2 ); m_deleteBut.setLayoutData( fd ); m_deleteBut.addSelectionListener( new SelectionAdapter() { @Override public void widgetSelected( SelectionEvent e ) { deleteMapping(); } } ); if ( m_allowTableCreate ) { m_getFieldsBut = new Button( this, SWT.PUSH | SWT.CENTER ); props.setLook( m_getFieldsBut ); m_getFieldsBut.setText( Messages.getString( "MappingDialog.GetIncomingFields" ) ); fd = new FormData(); fd.right = new FormAttachment( 100, 0 ); fd.bottom = new FormAttachment( 100, -margin * 2 ); m_getFieldsBut.setLayoutData( fd ); m_getFieldsBut.addSelectionListener( new SelectionAdapter() { @Override public void widgetSelected( SelectionEvent e ) { populateTableWithIncomingFields(); } } ); } else { m_keyValueTupleBut = new Button( this, SWT.PUSH | SWT.CENTER ); props.setLook( m_keyValueTupleBut ); m_keyValueTupleBut.setText( Messages.getString( "MappingDialog.KeyValueTemplate" ) ); m_keyValueTupleBut.setToolTipText( Messages.getString( "MappingDialog.KeyValueTemplate.TipText" ) ); fd = new FormData(); fd.right = new FormAttachment( 100, 0 ); fd.bottom = new FormAttachment( 100, -margin * 2 ); m_keyValueTupleBut.setLayoutData( fd ); m_keyValueTupleBut.addSelectionListener( new SelectionAdapter() { @Override public void widgetSelected( SelectionEvent e ) { populateTableWithTupleTemplate(); } } ); } m_fieldsView = new TableView( transMeta, this, tableViewStyle, colinf, 1, null, props ); fd = new FormData(); fd.top = new FormAttachment( m_existingMappingNamesCombo, margin * 2 ); fd.bottom = new FormAttachment( m_saveBut, -margin * 2 ); fd.left = new FormAttachment( 0, 0 ); fd.right = new FormAttachment( 100, 0 ); m_fieldsView.setLayoutData( fd ); } private void populateTableWithTupleTemplate() { Table table = m_fieldsView.table; Set<String> existingRowAliases = new HashSet<String>(); for ( int i = 0; i < table.getItemCount(); i++ ) { TableItem tableItem = table.getItem( i ); String alias = tableItem.getText( 1 ); if ( !Const.isEmpty( alias ) ) { existingRowAliases.add( alias ); } } int choice = 0; if ( existingRowAliases.size() > 0 ) { // Ask what we should do with existing mapping data MessageDialog md = new MessageDialog( m_shell, Messages.getString( "MappingDialog.GetFieldsChoice.Title" ), null, Messages .getString( "MappingDialog.GetFieldsChoice.Message", "" + existingRowAliases.size(), "" + 5 ), MessageDialog.WARNING, new String[] { Messages.getString( "MappingOutputDialog.ClearAndAdd" ), Messages.getString( "MappingOutputDialog.Cancel" ), }, 0 ); MessageDialog.setDefaultImage( GUIResource.getInstance().getImageSpoon() ); int idx = md.open(); choice = idx & 0xFF; } if ( choice == 1 || choice == 255 /* 255 = escape pressed */ ) { return; // Cancel } m_fieldsView.clearAll(); TableItem item = new TableItem( table, SWT.NONE ); item.setText( 1, "KEY" ); item.setText( 2, "Y" ); item = new TableItem( table, SWT.NONE ); item.setText( 1, "Family" ); item.setText( 2, "N" ); item.setText( 5, "String" ); item = new TableItem( table, SWT.NONE ); item.setText( 1, "Column" ); item.setText( 2, "N" ); item = new TableItem( table, SWT.NONE ); item.setText( 1, "Value" ); item.setText( 2, "N" ); item = new TableItem( table, SWT.NONE ); item.setText( 1, "Timestamp" ); item.setText( 2, "N" ); item.setText( 5, "Long" ); m_fieldsView.removeEmptyRows(); m_fieldsView.setRowNums(); m_fieldsView.optWidth( true ); } private void populateTableWithIncomingFields() { if ( m_incomingFieldsProducer != null ) { RowMetaInterface incomingRowMeta = m_incomingFieldsProducer.getIncomingFields(); Table table = m_fieldsView.table; if ( incomingRowMeta != null ) { Set<String> existingRowAliases = new HashSet<String>(); for ( int i = 0; i < table.getItemCount(); i++ ) { TableItem tableItem = table.getItem( i ); String alias = tableItem.getText( 1 ); if ( !Const.isEmpty( alias ) ) { existingRowAliases.add( alias ); } } int choice = 0; if ( existingRowAliases.size() > 0 ) { // Ask what we should do with existing mapping data MessageDialog md = new MessageDialog( m_shell, Messages.getString( "MappingDialog.GetFieldsChoice.Title" ), null, Messages .getString( "MappingDialog.GetFieldsChoice.Message", "" + existingRowAliases.size(), "" + incomingRowMeta.size() ), MessageDialog.WARNING, new String[] { Messages.getString( "MappingDialog.AddNew" ), Messages.getString( "MappingOutputDialog.Add" ), Messages.getString( "MappingOutputDialog.ClearAndAdd" ), Messages.getString( "MappingOutputDialog.Cancel" ), }, 0 ); MessageDialog.setDefaultImage( GUIResource.getInstance().getImageSpoon() ); int idx = md.open(); choice = idx & 0xFF; } if ( choice == 3 || choice == 255 /* 255 = escape pressed */ ) { return; // Cancel } if ( choice == 2 ) { m_fieldsView.clearAll(); } for ( int i = 0; i < incomingRowMeta.size(); i++ ) { ValueMetaInterface vm = incomingRowMeta.getValueMeta( i ); boolean addIt = true; if ( choice == 0 ) { // only add if its not already in the table if ( existingRowAliases.contains( vm.getName() ) ) { addIt = false; } } if ( addIt ) { TableItem item = new TableItem( m_fieldsView.table, SWT.NONE ); item.setText( 1, vm.getName() ); item.setText( 2, "N" ); if ( m_familyCI.getComboValues()[0].length() > 0 ) { // use existing first column family name as the default item.setText( 3, m_familyCI.getComboValues()[0] ); } else { // default item.setText( 3, DEFAULT_FAMILY ); } item.setText( 4, vm.getName() ); item.setText( 5, vm.getTypeDesc() ); if ( vm.getType() == ValueMetaInterface.TYPE_INTEGER ) { item.setText( 5, "Long" ); } if ( vm.getType() == ValueMetaInterface.TYPE_NUMBER ) { item.setText( 5, "Double" ); } if ( vm.getStorageType() == ValueMetaInterface.STORAGE_TYPE_INDEXED ) { Object[] indexValus = vm.getIndex(); String indexValsS = HBaseValueMeta.objectIndexValuesToString( indexValus ); item.setText( 6, indexValsS ); } } } m_fieldsView.removeEmptyRows(); m_fieldsView.setRowNums(); m_fieldsView.optWidth( true ); } } } private void populateTableCombo( boolean force ) { if ( m_configProducer == null ) { return; } if ( m_connectionProblem ) { if ( !m_currentConfiguration.equals( m_configProducer.getCurrentConfiguration() ) ) { // try again - perhaps the user has corrected connection information m_connectionProblem = false; m_currentConfiguration = m_configProducer.getCurrentConfiguration(); } } if ( ( m_existingTableNamesCombo.getItemCount() == 0 || force ) && !m_connectionProblem ) { String existingName = m_existingTableNamesCombo.getText(); m_existingTableNamesCombo.removeAll(); try { resetConnection(); HBaseConnection hbAdmin = m_configProducer.getHBaseConnection(); hbAdmin.checkHBaseAvailable(); m_admin = new MappingAdmin( hbAdmin ); List<String> tables = hbAdmin.listTableNames(); for ( String currentTableName : tables ) { m_existingTableNamesCombo.add( currentTableName ); } // restore any previous value if ( !Const.isEmpty( existingName ) ) { m_existingTableNamesCombo.setText( existingName ); } } catch ( Exception e ) { m_connectionProblem = true; showConnectionErrorDialog( e ); } } } private void resetConnection() throws Exception { if ( m_admin != null && m_admin.getConnection() != null ) { m_admin.getConnection().close(); } m_admin = null; } private void showConnectionErrorDialog( Exception ex ) { new ErrorDialog( m_shell, Messages.getString( "MappingDialog.Error.Title.UnableToConnect" ), Messages .getString( "MappingDialog.Error.Message.UnableToConnect" ) + "\n\n", ex ); } private void deleteMapping() { String tableName = ""; if ( !Const.isEmpty( m_existingTableNamesCombo.getText().trim() ) ) { tableName = m_existingTableNamesCombo.getText().trim(); if ( tableName.indexOf( '@' ) > 0 ) { tableName = tableName.substring( 0, tableName.indexOf( '@' ) ); } } if ( Const.isEmpty( tableName ) || Const.isEmpty( m_existingMappingNamesCombo.getText().trim() ) ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.MissingTableMappingName" ), Messages.getString( "MappingDialog.Error.Message.MissingTableMappingName" ) ); return; } try { boolean ok = MessageDialog.openConfirm( m_shell, Messages.getString( "MappingDialog.Info.Title.ConfirmDelete" ), Messages .getString( "MappingDialog.Info.Message.ConfirmDelete", m_existingMappingNamesCombo.getText().trim(), tableName ) ); if ( ok ) { boolean result = m_admin.deleteMapping( m_existingTableNamesCombo.getText().trim(), m_existingMappingNamesCombo.getText() .trim() ); if ( result ) { MessageDialog.openConfirm( m_shell, Messages.getString( "MappingDialog.Info.Title.MappingDeleted" ), Messages .getString( "MappingDialog.Info.Message.MappingDeleted", m_existingMappingNamesCombo.getText().trim(), tableName ) ); // make sure that the list of mappings for the selected table gets // updated. populateMappingComboAndFamilyStuff(); } else { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.DeleteMapping" ), Messages .getString( "MappingDialog.Error.Message.DeleteMapping", m_existingMappingNamesCombo.getText().trim(), tableName ) ); } } return; } catch ( Exception ex ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.DeleteMapping" ), Messages .getString( "MappingDialog.Error.Message.DeleteMappingIO", m_existingMappingNamesCombo.getText().trim(), tableName, ex.getMessage() ) ); } } public Mapping getMapping( boolean performChecksAndShowGUIErrorDialog, List<String> problems ) { String tableName = ""; if ( !Const.isEmpty( m_existingTableNamesCombo.getText().trim() ) ) { tableName = m_existingTableNamesCombo.getText().trim(); if ( tableName.indexOf( '@' ) > 0 ) { tableName = tableName.substring( 0, tableName.indexOf( '@' ) ); } } // empty table name or mapping name does not force an abort if ( performChecksAndShowGUIErrorDialog && ( Const.isEmpty( m_existingMappingNamesCombo.getText().trim() ) || Const.isEmpty( tableName ) ) ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.MissingTableMappingName" ), Messages.getString( "MappingDialog.Error.Message.MissingTableMappingName" ) ); if ( problems != null ) { problems.add( Messages.getString( "MappingDialog.Error.Message.MissingTableMappingName" ) ); } return null; } // do we have any non-empty rows in the table? if ( m_fieldsView.nrNonEmpty() == 0 && performChecksAndShowGUIErrorDialog ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.NoFieldsDefined" ), Messages .getString( "MappingDialog.Error.Message.NoFieldsDefined" ) ); if ( problems != null ) { problems.add( Messages.getString( "MappingDialog.Error.Message.NoFieldsDefined" ) ); } return null; } // do we have a key defined in the table? Mapping theMapping = new Mapping( tableName, m_existingMappingNamesCombo.getText().trim() ); boolean keyDefined = false; boolean moreThanOneKey = false; List<String> missingFamilies = new ArrayList<String>(); List<String> missingColumnNames = new ArrayList<String>(); List<String> missingTypes = new ArrayList<String>(); int nrNonEmpty = m_fieldsView.nrNonEmpty(); // is the mapping a tuple mapping? boolean isTupleMapping = false; int tupleIdCount = 0; if ( nrNonEmpty == 5 ) { for ( int i = 0; i < nrNonEmpty; i++ ) { if ( m_fieldsView.getNonEmpty( i ).getText( 1 ).equals( "KEY" ) || m_fieldsView.getNonEmpty( i ).getText( 1 ).equals( "Family" ) || m_fieldsView.getNonEmpty( i ).getText( 1 ).equals( "Column" ) || m_fieldsView.getNonEmpty( i ).getText( 1 ).equals( "Value" ) || m_fieldsView.getNonEmpty( i ).getText( 1 ).equals( "Timestamp" ) ) { tupleIdCount++; } } } if ( tupleIdCount == 5 ) { isTupleMapping = true; theMapping.setTupleMapping( true ); } for ( int i = 0; i < nrNonEmpty; i++ ) { TableItem item = m_fieldsView.getNonEmpty( i ); boolean isKey = false; String alias = null; if ( !Const.isEmpty( item.getText( 1 ) ) ) { alias = item.getText( 1 ).trim(); } if ( !Const.isEmpty( item.getText( 2 ) ) ) { isKey = item.getText( 2 ).trim().equalsIgnoreCase( "Y" ); if ( isKey && keyDefined ) { // more than one key, break here moreThanOneKey = true; break; } if ( isKey ) { keyDefined = true; } } // String family = null; String family = ""; if ( !Const.isEmpty( item.getText( 3 ) ) ) { family = item.getText( 3 ); } else { if ( !isKey && !isTupleMapping ) { missingFamilies.add( item.getText( 0 ) ); } } // String colName = null; String colName = ""; if ( !Const.isEmpty( item.getText( 4 ) ) ) { colName = item.getText( 4 ); } else { if ( !isKey && !isTupleMapping ) { missingColumnNames.add( item.getText( 0 ) ); } } String type = null; if ( !Const.isEmpty( item.getText( 5 ) ) ) { type = item.getText( 5 ); } else { missingTypes.add( item.getText( 0 ) ); } String indexedVals = null; if ( !Const.isEmpty( item.getText( 6 ) ) ) { indexedVals = item.getText( 6 ); } // only add if we have all data and its all correct if ( isKey && !moreThanOneKey ) { if ( Const.isEmpty( alias ) ) { // pop up an error dialog - key must have an alias because it does not // belong to a column family or have a column name if ( performChecksAndShowGUIErrorDialog ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.NoAliasForKey" ), Messages .getString( "MappingDialog.Error.Message.NoAliasForKey" ) ); } if ( problems != null ) { problems.add( Messages.getString( "MappingDialog.Error.Message.NoAliasForKey" ) ); } return null; } if ( Const.isEmpty( type ) ) { // pop up an error dialog - must have a type for the key if ( performChecksAndShowGUIErrorDialog ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.NoTypeForKey" ), Messages .getString( "MappingDialog.Error.Message.NoTypeForKey" ) ); } if ( problems != null ) { problems.add( Messages.getString( "MappingDialog.Error.Message.NoTypeForKey" ) ); } return null; } if ( moreThanOneKey ) { // popup an error and then return if ( performChecksAndShowGUIErrorDialog ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.MoreThanOneKey" ), Messages.getString( "MappingDialog.Error.Message.MoreThanOneKey" ) ); } if ( problems != null ) { problems.add( Messages.getString( "MappingDialog.Error.Message.MoreThanOneKey" ) ); } return null; } if ( isTupleMapping ) { theMapping.setKeyName( alias ); theMapping.setTupleFamilies( family ); } else { theMapping.setKeyName( alias ); } try { theMapping.setKeyTypeAsString( type ); } catch ( Exception ex ) { // Ignore } } else { // don't bother adding if there are any errors if ( missingFamilies.size() == 0 && missingColumnNames.size() == 0 && missingTypes.size() == 0 ) { String combinedName = family + HBaseValueMeta.SEPARATOR + colName; if ( !Const.isEmpty( alias ) ) { combinedName += ( HBaseValueMeta.SEPARATOR + alias ); } else { // just use the column name as the alias combinedName += ( HBaseValueMeta.SEPARATOR + colName ); } HBaseValueMeta vm = new HBaseValueMeta( combinedName, 0, -1, -1 ); try { vm.setHBaseTypeFromString( type ); } catch ( IllegalArgumentException e ) { // TODO pop up an error dialog for this one return null; } if ( vm.isString() && indexedVals != null && indexedVals.length() > 0 ) { Object[] vals = HBaseValueMeta.stringIndexListToObjects( indexedVals ); vm.setIndex( vals ); vm.setStorageType( ValueMetaInterface.STORAGE_TYPE_INDEXED ); } try { theMapping.addMappedColumn( vm, isTupleMapping ); } catch ( Exception ex ) { // popup an error if this family:column is already in the mapping // and // then return. if ( performChecksAndShowGUIErrorDialog ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.DuplicateColumn" ), Messages.getString( "MappingDialog.Error.Message1.DuplicateColumn" ) + family + HBaseValueMeta.SEPARATOR + colName + Messages.getString( "MappingDialog.Error.Message2.DuplicateColumn" ) ); } if ( problems != null ) { problems.add( Messages.getString( "MappingDialog.Error.Message1.DuplicateColumn" ) + family + HBaseValueMeta.SEPARATOR + colName + Messages.getString( "MappingDialog.Error.Message2.DuplicateColumn" ) ); } return null; } } } } // now check for any errors in our Lists if ( !keyDefined ) { if ( performChecksAndShowGUIErrorDialog ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.NoKeyDefined" ), Messages .getString( "MappingDialog.Error.Message.NoKeyDefined" ) ); } if ( problems != null ) { problems.add( Messages.getString( "MappingDialog.Error.Message.NoKeyDefined" ) ); } return null; } if ( missingFamilies.size() > 0 || missingColumnNames.size() > 0 || missingTypes.size() > 0 ) { StringBuffer buff = new StringBuffer(); buff.append( Messages.getString( "MappingDialog.Error.Message.IssuesPreventingSaving" ) + ":\n\n" ); if ( missingFamilies.size() > 0 ) { buff.append( Messages.getString( "MappingDialog.Error.Message.FamilyIssue" ) + ":\n" ); buff.append( missingFamilies.toString() ).append( "\n\n" ); } if ( missingColumnNames.size() > 0 ) { buff.append( Messages.getString( "MappingDialog.Error.Message.ColumnIssue" ) + ":\n" ); buff.append( missingColumnNames.toString() ).append( "\n\n" ); } if ( missingTypes.size() > 0 ) { buff.append( Messages.getString( "MappingDialog.Error.Message.TypeIssue" ) + ":\n" ); buff.append( missingTypes.toString() ).append( "\n\n" ); } if ( performChecksAndShowGUIErrorDialog ) { MessageDialog.openError( m_shell, Messages.getString( "MappingDialog.Error.Title.IssuesPreventingSaving" ), buff.toString() ); } if ( problems != null ) { problems.add( buff.toString() ); } return null; } return theMapping; } private void saveMapping() { Mapping theMapping = getMapping( true, null ); if ( theMapping == null ) { // some problem with the mapping (user will have been informed via dialog) return; } String tableName = theMapping.getTableName(); if ( m_allowTableCreate ) { // check for existence of the table. If table doesn't exist // prompt for creation HBaseConnection hbAdmin = m_admin.getConnection(); try { if ( !hbAdmin.tableExists( tableName ) ) { boolean result = MessageDialog.openConfirm( m_shell, "Create table", "Table \"" + tableName + "\" does not exist. Create it?" ); if ( !result ) { return; } if ( theMapping.getMappedColumns().size() == 0 ) { MessageDialog.openError( m_shell, "No columns defined", "A HBase table requires at least one column family to be defined." ); return; } // collect up all the column families so that we can create the table Set<String> cols = theMapping.getMappedColumns().keySet(); Set<String> families = new TreeSet<String>(); for ( String col : cols ) { String family = theMapping.getMappedColumns().get( col ).getColumnFamily(); families.add( family ); } // do we have additional parameters supplied in the table name field // String compression = Compression.Algorithm.NONE.getName(); String compression = null; // String bloomFilter = "NONE"; String bloomFilter = null; String[] opts = m_existingTableNamesCombo.getText().trim().split( "@" ); if ( opts.length > 1 ) { compression = opts[1]; if ( opts.length == 3 ) { bloomFilter = opts[2]; } } Properties creationProps = new Properties(); if ( compression != null ) { creationProps.setProperty( HBaseConnection.COL_DESCRIPTOR_COMPRESSION_KEY, compression ); } if ( bloomFilter != null ) { creationProps.setProperty( HBaseConnection.COL_DESCRIPTOR_BLOOM_FILTER_KEY, bloomFilter ); } List<String> familyList = new ArrayList<String>(); for ( String fam : families ) { familyList.add( fam ); } // create the table hbAdmin.createTable( tableName, familyList, creationProps ); // refresh the table combo populateTableCombo( true ); } } catch ( Exception ex ) { new ErrorDialog( m_shell, Messages.getString( "MappingDialog.Error.Title.ErrorCreatingTable" ), Messages .getString( "MappingDialog.Error.Message.ErrorCreatingTable" ) + " \"" + m_existingTableNamesCombo.getText().trim() + "\"", ex ); return; } } try { // now check to see if the mapping exists if ( m_admin.mappingExists( tableName, m_existingMappingNamesCombo.getText().trim() ) ) { // prompt for overwrite boolean result = MessageDialog.openConfirm( m_shell, Messages.getString( "MappingDialog.Info.Title.MappingExists" ), Messages.getString( "MappingDialog.Info.Message1.MappingExists" ) + m_existingMappingNamesCombo.getText().trim() + Messages.getString( "MappingDialog.Info.Message2.MappingExists" ) + tableName + Messages.getString( "MappingDialog.Info.Message3.MappingExists" ) ); if ( !result ) { return; } } // finally add the mapping. m_admin.putMapping( theMapping, true ); MessageDialog.openConfirm( m_shell, Messages.getString( "MappingDialog.Info.Title.MappingSaved" ), Messages .getString( "MappingDialog.Info.Message1.MappingSaved" ) + m_existingMappingNamesCombo.getText().trim() + Messages.getString( "MappingDialog.Info.Message2.MappingSaved" ) + tableName + Messages.getString( "MappingDialog.Info.Message3.MappingSaved" ) ); } catch ( Exception ex ) { // inform the user via popup new ErrorDialog( m_shell, Messages.getString( "MappingDialog.Error.Title.ErrorSaving" ), Messages .getString( "MappingDialog.Error.Message.ErrorSaving" ), ex ); } } public void setMapping( Mapping mapping ) { if ( mapping == null ) { return; } m_fieldsView.clearAll(); // do the key first TableItem keyItem = new TableItem( m_fieldsView.table, SWT.NONE ); if ( !Const.isEmpty( mapping.getKeyName() ) ) { keyItem.setText( 1, mapping.getKeyName() ); } keyItem.setText( 2, "Y" ); if ( mapping.getKeyType() != null && !Const.isEmpty( mapping.getKeyType().toString() ) ) { keyItem.setText( 5, mapping.getKeyType().toString() ); } if ( mapping.isTupleMapping() && !Const.isEmpty( mapping.getTupleFamilies() ) ) { keyItem.setText( 3, mapping.getTupleFamilies() ); } // the rest of the fields in the mapping Map<String, HBaseValueMeta> mappedFields = mapping.getMappedColumns(); for ( String alias : mappedFields.keySet() ) { HBaseValueMeta vm = mappedFields.get( alias ); TableItem item = new TableItem( m_fieldsView.table, SWT.NONE ); item.setText( 1, alias ); item.setText( 2, "N" ); item.setText( 3, vm.getColumnFamily() ); item.setText( 4, vm.getColumnName() ); if ( vm.isInteger() ) { if ( vm.getIsLongOrDouble() ) { item.setText( 5, "Long" ); } else { item.setText( 5, "Integer" ); } } else if ( vm.isNumber() ) { if ( vm.getIsLongOrDouble() ) { item.setText( 5, "Double" ); } else { item.setText( 5, "Float" ); } } else { item.setText( 5, vm.getTypeDesc() ); } if ( vm.getStorageType() == ValueMetaInterface.STORAGE_TYPE_INDEXED ) { item.setText( 6, HBaseValueMeta.objectIndexValuesToString( vm.getIndex() ) ); } } m_fieldsView.removeEmptyRows(); m_fieldsView.setRowNums(); m_fieldsView.optWidth( true ); } private void loadTableViewFromMapping() { String tableName = ""; if ( !Const.isEmpty( m_existingTableNamesCombo.getText().trim() ) ) { tableName = m_existingTableNamesCombo.getText().trim(); if ( tableName.indexOf( '@' ) > 0 ) { tableName = tableName.substring( 0, tableName.indexOf( '@' ) ); } } try { if ( m_admin.mappingExists( tableName, m_existingMappingNamesCombo.getText().trim() ) ) { Mapping mapping = m_admin.getMapping( tableName, m_existingMappingNamesCombo.getText().trim() ); setMapping( mapping ); } } catch ( Exception ex ) { // inform the user via popup new ErrorDialog( m_shell, Messages.getString( "MappingDialog.Error.Title.ErrorLoadingMapping" ), Messages .getString( "MappingDialog.Error.Message.ErrorLoadingMapping" ), ex ); } } private void populateMappingComboAndFamilyStuff() { String tableName = ""; if ( !Const.isEmpty( m_existingTableNamesCombo.getText().trim() ) ) { tableName = m_existingTableNamesCombo.getText().trim(); if ( tableName.indexOf( '@' ) > 0 ) { tableName = tableName.substring( 0, tableName.indexOf( '@' ) ); } } // defaults if we fail to connect, table doesn't exist etc.. m_familyCI.setComboValues( new String[] { "" } ); m_existingMappingNamesCombo.removeAll(); if ( m_admin != null && !Const.isEmpty( tableName ) ) { try { // first get the existing mapping names (if any) List<String> mappingNames = m_admin.getMappingNames( tableName ); for ( String m : mappingNames ) { m_existingMappingNamesCombo.add( m ); } // now get family information for this table HBaseConnection hbAdmin = m_admin.getConnection(); if ( hbAdmin.tableExists( tableName ) ) { List<String> colFams = hbAdmin.getTableFamiles( tableName ); String[] familyNames = colFams.toArray( new String[1] ); m_familyCI.setComboValues( familyNames ); } else { m_familyCI.setComboValues( new String[] { "" } ); } m_familiesInvalidated = false; return; } catch ( Exception e ) { showConnectionErrorDialog( e ); } } } public HBaseConnection getHBaseConnection() throws Exception { HBaseConnection conf = null; String zookeeperHosts = null; String zookeeperPort = null; NamedCluster nc = namedClusterWidget.getSelectedNamedCluster(); if ( nc != null ) { zookeeperHosts = m_transMeta.environmentSubstitute( nc.getZooKeeperHost() ); zookeeperPort = m_transMeta.environmentSubstitute( nc.getZooKeeperPort() ); } conf = HBaseInputData.getHBaseConnection( zookeeperHosts, zookeeperPort, null, null, null ); return conf; } public String getCurrentConfiguration() { String host = ""; String port = ""; NamedCluster nc = namedClusterWidget.getSelectedNamedCluster(); if ( nc != null ) { host = m_transMeta.environmentSubstitute( nc.getZooKeeperHost() ); port = m_transMeta.environmentSubstitute( nc.getZooKeeperPort() ); } return host + ":" + port; } @Override public void dispose() { // TODO Auto-generated method stub super.dispose(); } }
apache-2.0
googleapis/java-grafeas
src/main/java/io/grafeas/v1/SlsaProvenanceOrBuilder.java
5709
/* * Copyright 2019 The Grafeas Authors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: grafeas/v1/slsa_provenance.proto package io.grafeas.v1; public interface SlsaProvenanceOrBuilder extends // @@protoc_insertion_point(interface_extends:grafeas.v1.SlsaProvenance) com.google.protobuf.MessageOrBuilder { /** * * * <pre> * required * </pre> * * <code>.grafeas.v1.SlsaProvenance.SlsaBuilder builder = 1;</code> * * @return Whether the builder field is set. */ boolean hasBuilder(); /** * * * <pre> * required * </pre> * * <code>.grafeas.v1.SlsaProvenance.SlsaBuilder builder = 1;</code> * * @return The builder. */ io.grafeas.v1.SlsaProvenance.SlsaBuilder getBuilder(); /** * * * <pre> * required * </pre> * * <code>.grafeas.v1.SlsaProvenance.SlsaBuilder builder = 1;</code> */ io.grafeas.v1.SlsaProvenance.SlsaBuilderOrBuilder getBuilderOrBuilder(); /** * * * <pre> * Identifies the configuration used for the build. * When combined with materials, this SHOULD fully describe the build, * such that re-running this recipe results in bit-for-bit identical output * (if the build is reproducible). * </pre> * * <code>.grafeas.v1.SlsaProvenance.SlsaRecipe recipe = 2;</code> * * @return Whether the recipe field is set. */ boolean hasRecipe(); /** * * * <pre> * Identifies the configuration used for the build. * When combined with materials, this SHOULD fully describe the build, * such that re-running this recipe results in bit-for-bit identical output * (if the build is reproducible). * </pre> * * <code>.grafeas.v1.SlsaProvenance.SlsaRecipe recipe = 2;</code> * * @return The recipe. */ io.grafeas.v1.SlsaProvenance.SlsaRecipe getRecipe(); /** * * * <pre> * Identifies the configuration used for the build. * When combined with materials, this SHOULD fully describe the build, * such that re-running this recipe results in bit-for-bit identical output * (if the build is reproducible). * </pre> * * <code>.grafeas.v1.SlsaProvenance.SlsaRecipe recipe = 2;</code> */ io.grafeas.v1.SlsaProvenance.SlsaRecipeOrBuilder getRecipeOrBuilder(); /** * <code>.grafeas.v1.SlsaProvenance.SlsaMetadata metadata = 3;</code> * * @return Whether the metadata field is set. */ boolean hasMetadata(); /** * <code>.grafeas.v1.SlsaProvenance.SlsaMetadata metadata = 3;</code> * * @return The metadata. */ io.grafeas.v1.SlsaProvenance.SlsaMetadata getMetadata(); /** <code>.grafeas.v1.SlsaProvenance.SlsaMetadata metadata = 3;</code> */ io.grafeas.v1.SlsaProvenance.SlsaMetadataOrBuilder getMetadataOrBuilder(); /** * * * <pre> * The collection of artifacts that influenced the build including sources, * dependencies, build tools, base images, and so on. This is considered to be * incomplete unless metadata.completeness.materials is true. Unset or null is * equivalent to empty. * </pre> * * <code>repeated .grafeas.v1.SlsaProvenance.Material materials = 4;</code> */ java.util.List<io.grafeas.v1.SlsaProvenance.Material> getMaterialsList(); /** * * * <pre> * The collection of artifacts that influenced the build including sources, * dependencies, build tools, base images, and so on. This is considered to be * incomplete unless metadata.completeness.materials is true. Unset or null is * equivalent to empty. * </pre> * * <code>repeated .grafeas.v1.SlsaProvenance.Material materials = 4;</code> */ io.grafeas.v1.SlsaProvenance.Material getMaterials(int index); /** * * * <pre> * The collection of artifacts that influenced the build including sources, * dependencies, build tools, base images, and so on. This is considered to be * incomplete unless metadata.completeness.materials is true. Unset or null is * equivalent to empty. * </pre> * * <code>repeated .grafeas.v1.SlsaProvenance.Material materials = 4;</code> */ int getMaterialsCount(); /** * * * <pre> * The collection of artifacts that influenced the build including sources, * dependencies, build tools, base images, and so on. This is considered to be * incomplete unless metadata.completeness.materials is true. Unset or null is * equivalent to empty. * </pre> * * <code>repeated .grafeas.v1.SlsaProvenance.Material materials = 4;</code> */ java.util.List<? extends io.grafeas.v1.SlsaProvenance.MaterialOrBuilder> getMaterialsOrBuilderList(); /** * * * <pre> * The collection of artifacts that influenced the build including sources, * dependencies, build tools, base images, and so on. This is considered to be * incomplete unless metadata.completeness.materials is true. Unset or null is * equivalent to empty. * </pre> * * <code>repeated .grafeas.v1.SlsaProvenance.Material materials = 4;</code> */ io.grafeas.v1.SlsaProvenance.MaterialOrBuilder getMaterialsOrBuilder(int index); }
apache-2.0
KRMAssociatesInc/eHMP
lib/mvi/org/hl7/v3/ParameterizedDataTypeEventRelatedInterval.java
1298
package org.hl7.v3; import javax.xml.bind.annotation.XmlEnum; import javax.xml.bind.annotation.XmlEnumValue; import javax.xml.bind.annotation.XmlType; /** * <p>Java class for ParameterizedDataTypeEventRelatedInterval. * * <p>The following schema fragment specifies the expected content contained within this class. * <p> * <pre> * &lt;simpleType name="ParameterizedDataTypeEventRelatedInterval"> * &lt;restriction base="{urn:hl7-org:v3}cs"> * &lt;enumeration value="EIVL&lt;T>"/> * &lt;/restriction> * &lt;/simpleType> * </pre> * */ @XmlType(name = "ParameterizedDataTypeEventRelatedInterval") @XmlEnum public enum ParameterizedDataTypeEventRelatedInterval { @XmlEnumValue("EIVL<T>") EIVL_T("EIVL<T>"); private final String value; ParameterizedDataTypeEventRelatedInterval(String v) { value = v; } public String value() { return value; } public static ParameterizedDataTypeEventRelatedInterval fromValue(String v) { for (ParameterizedDataTypeEventRelatedInterval c: ParameterizedDataTypeEventRelatedInterval.values()) { if (c.value.equals(v)) { return c; } } throw new IllegalArgumentException(v); } }
apache-2.0
PurelyApplied/geode
geode-core/src/distributedTest/java/org/apache/geode/internal/cache/ha/HARQueueNewImplDUnitTest.java
51287
/* * Licensed to the Apache Software Foundation (ASF) under one or more contributor license * agreements. See the NOTICE file distributed with this work for additional information regarding * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package org.apache.geode.internal.cache.ha; import static org.apache.geode.cache.Region.Entry; import static org.apache.geode.cache.Region.SEPARATOR; import static org.apache.geode.distributed.ConfigurationProperties.DELTA_PROPAGATION; import static org.apache.geode.distributed.ConfigurationProperties.LOCATORS; import static org.apache.geode.distributed.ConfigurationProperties.MCAST_PORT; import static org.apache.geode.internal.cache.CacheServerImpl.generateNameForClientMsgsRegion; import static org.apache.geode.test.awaitility.GeodeAwaitility.await; import static org.apache.geode.test.dunit.Assert.assertEquals; import static org.apache.geode.test.dunit.Assert.assertNotNull; import static org.apache.geode.test.dunit.Assert.assertNull; import static org.apache.geode.test.dunit.Assert.assertTrue; import static org.apache.geode.test.dunit.Assert.fail; import static org.apache.geode.test.dunit.NetworkUtils.getServerHostName; import java.io.File; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Properties; import java.util.Set; import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import org.apache.geode.LogWriter; import org.apache.geode.cache.AttributesFactory; import org.apache.geode.cache.Cache; import org.apache.geode.cache.CacheFactory; import org.apache.geode.cache.DataPolicy; import org.apache.geode.cache.DiskStoreFactory; import org.apache.geode.cache.EntryEvent; import org.apache.geode.cache.Region; import org.apache.geode.cache.RegionAttributes; import org.apache.geode.cache.Scope; import org.apache.geode.cache.server.CacheServer; import org.apache.geode.cache.util.CacheListenerAdapter; import org.apache.geode.cache30.ClientServerTestCase; import org.apache.geode.distributed.DistributedSystem; import org.apache.geode.internal.AvailablePort; import org.apache.geode.internal.cache.CacheServerImpl; import org.apache.geode.internal.cache.LocalRegion; import org.apache.geode.internal.cache.tier.sockets.CacheServerTestUtil; import org.apache.geode.internal.cache.tier.sockets.ClientUpdateMessage; import org.apache.geode.internal.cache.tier.sockets.ConflationDUnitTestHelper; import org.apache.geode.internal.cache.tier.sockets.HAEventWrapper; import org.apache.geode.test.awaitility.GeodeAwaitility; import org.apache.geode.test.dunit.Host; import org.apache.geode.test.dunit.VM; import org.apache.geode.test.dunit.WaitCriterion; import org.apache.geode.test.dunit.internal.JUnit4DistributedTestCase; import org.apache.geode.test.junit.categories.ClientSubscriptionTest; /** * This DUnit contains various tests to ensure new implementation of ha region queues works as * expected. * * @since GemFire 5.7 */ @Category({ClientSubscriptionTest.class}) public class HARQueueNewImplDUnitTest extends JUnit4DistributedTestCase { private static final String regionName = HARQueueNewImplDUnitTest.class.getSimpleName(); private static final Map map = new HashMap(); private static Cache cache = null; private static VM serverVM0 = null; private static VM serverVM1 = null; private static VM clientVM1 = null; private static VM clientVM2 = null; private static LogWriter logger = null; private static int numOfCreates = 0; private static int numOfUpdates = 0; private static int numOfInvalidates = 0; private static Object[] deletedValues = null; private int PORT1; private int PORT2; /** * Sets up the test. */ @Override public final void postSetUp() throws Exception { map.clear(); final Host host = Host.getHost(0); serverVM0 = host.getVM(0); serverVM1 = host.getVM(1); clientVM1 = host.getVM(2); clientVM2 = host.getVM(3); PORT1 = serverVM0.invoke( () -> HARQueueNewImplDUnitTest.createServerCache(HARegionQueue.HA_EVICTION_POLICY_MEMORY)); PORT2 = serverVM1.invoke( () -> HARQueueNewImplDUnitTest.createServerCache(HARegionQueue.HA_EVICTION_POLICY_ENTRY)); numOfCreates = 0; numOfUpdates = 0; numOfInvalidates = 0; } /** * Tears down the test. */ @Override public final void preTearDown() throws Exception { map.clear(); closeCache(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); // Unset the isSlowStartForTesting flag serverVM0.invoke(() -> ConflationDUnitTestHelper.unsetIsSlowStart()); serverVM1.invoke(() -> ConflationDUnitTestHelper.unsetIsSlowStart()); // then close the servers serverVM0.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); disconnectAllFromDS(); } private void createCache(Properties props) throws Exception { props.setProperty(DELTA_PROPAGATION, "false"); DistributedSystem ds = getSystem(props); ds.disconnect(); ds = getSystem(props); assertNotNull(ds); cache = CacheFactory.create(ds); assertNotNull(cache); } public static Integer createServerCache() throws Exception { return createServerCache(null); } public static Integer createServerCache(String ePolicy) throws Exception { return createServerCache(ePolicy, new Integer(1)); } public static Integer createServerCache(String ePolicy, Integer cap) throws Exception { new HARQueueNewImplDUnitTest().createCache(new Properties()); AttributesFactory factory = new AttributesFactory(); factory.setScope(Scope.DISTRIBUTED_ACK); factory.setDataPolicy(DataPolicy.REPLICATE); RegionAttributes attrs = factory.create(); cache.createRegion(regionName, attrs); logger = cache.getLogger(); int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); CacheServer server1 = cache.addCacheServer(); server1.setPort(port); server1.setNotifyBySubscription(true); if (ePolicy != null) { File overflowDirectory = new File("bsi_overflow_" + port); overflowDirectory.mkdir(); DiskStoreFactory dsf = cache.createDiskStoreFactory(); File[] dirs1 = new File[] {overflowDirectory}; server1.getClientSubscriptionConfig().setEvictionPolicy(ePolicy); server1.getClientSubscriptionConfig().setCapacity(cap.intValue()); // specify diskstore for this server server1.getClientSubscriptionConfig() .setDiskStoreName(dsf.setDiskDirs(dirs1).create("bsi").getName()); } server1.start(); return new Integer(server1.getPort()); } public static Integer createOneMoreBridgeServer(Boolean notifyBySubscription) throws Exception { int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET); CacheServer server1 = cache.addCacheServer(); server1.setPort(port); server1.setNotifyBySubscription(notifyBySubscription.booleanValue()); server1.getClientSubscriptionConfig() .setEvictionPolicy(HARegionQueue.HA_EVICTION_POLICY_MEMORY); // let this server to use default diskstore server1.start(); return new Integer(server1.getPort()); } public static void createClientCache(String host, Integer port1, Integer port2, String rLevel, Boolean addListener) throws Exception { CacheServerTestUtil.disableShufflingOfEndpoints(); Properties props = new Properties(); props.setProperty(MCAST_PORT, "0"); props.setProperty(LOCATORS, ""); new HARQueueNewImplDUnitTest().createCache(props); AttributesFactory factory = new AttributesFactory(); ClientServerTestCase.configureConnectionPool(factory, host, port1.intValue(), port2.intValue(), true, Integer.parseInt(rLevel), 2, null, 1000, 250); factory.setScope(Scope.LOCAL); if (addListener.booleanValue()) { factory.addCacheListener(new CacheListenerAdapter() { @Override public void afterInvalidate(EntryEvent event) { logger.fine("Invalidate Event: <" + event.getKey() + ", " + event.getNewValue() + ">"); numOfInvalidates++; } @Override public void afterCreate(EntryEvent event) { logger.fine("Create Event: <" + event.getKey() + ", " + event.getNewValue() + ">"); numOfCreates++; } @Override public void afterUpdate(EntryEvent event) { logger.fine("Update Event: <" + event.getKey() + ", " + event.getNewValue() + ">"); numOfUpdates++; } }); } RegionAttributes attrs = factory.create(); cache.createRegion(regionName, attrs); logger = cache.getLogger(); } public static void createClientCache(String host, Integer port1, Integer port2, String rLevel) throws Exception { createClientCache(host, port1, port2, rLevel, Boolean.FALSE); } public static void registerInterestListAll() { try { Region r = cache.getRegion("/" + regionName); assertNotNull(r); r.registerInterest("ALL_KEYS"); } catch (Exception ex) { fail("failed in registerInterestListAll", ex); } } public static void registerInterestList() { try { Region r = cache.getRegion("/" + regionName); assertNotNull(r); r.registerInterest("k1"); r.registerInterest("k3"); r.registerInterest("k5"); } catch (Exception ex) { fail("failed while registering keys", ex); } } public static void putEntries() { try { Region r = cache.getRegion("/" + regionName); assertNotNull(r); r.put("k1", "pv1"); r.put("k2", "pv2"); r.put("k3", "pv3"); r.put("k4", "pv4"); r.put("k5", "pv5"); } catch (Exception ex) { fail("failed in putEntries()", ex); } } public static void createEntries() { try { Region r = cache.getRegion("/" + regionName); assertNotNull(r); r.create("k1", "v1"); r.create("k2", "v2"); r.create("k3", "v3"); r.create("k4", "v4"); r.create("k5", "v5"); } catch (Exception ex) { fail("failed in createEntries()", ex); } } public static void createEntries(Long num) { try { Region r = cache.getRegion("/" + regionName); assertNotNull(r); for (long i = 0; i < num.longValue(); i++) { r.create("k" + i, "v" + i); } } catch (Exception ex) { fail("failed in createEntries(Long)", ex); } } public static void putHeavyEntries(Integer num) { try { byte[] val = null; Region r = cache.getRegion("/" + regionName); assertNotNull(r); for (long i = 0; i < num.intValue(); i++) { val = new byte[1024 * 1024 * 5]; // 5 MB r.put("k0", val); } } catch (Exception ex) { fail("failed in putHeavyEntries(Long)", ex); } } /** * This test verifies that the client-messages-region does not store duplicate * ClientUpdateMessageImpl instances, during a normal put path as well as the GII path. */ @Test public void testClientMsgsRegionSize() throws Exception { // slow start for dispatcher serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); serverVM1.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.stopServer()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.createEntries()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.startServer()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(5), new Integer(PORT1))); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(5), new Integer(PORT2))); } /** * This test verifies that the ha-region-queues increment the reference count of their respective * HAEventWrapper instances in the client-messages-region correctly, during put as well as GII * path. */ @Test public void testRefCountForNormalAndGIIPut() throws Exception { // slow start for dispatcher serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("240000")); serverVM1.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("240000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.stopServer()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.createEntries()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.startServer()); serverVM1.invoke(() -> ValidateRegionSizes(PORT2)); serverVM0.invoke(() -> ValidateRegionSizes(PORT1)); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.updateMapForVM0()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.updateMapForVM1()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.verifyQueueData(new Integer(5), new Integer(5), new Integer(PORT1))); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.verifyQueueData(new Integer(5), new Integer(5), new Integer(PORT2))); } private void ValidateRegionSizes(int port) { await().untilAsserted(() -> { Region region = cache.getRegion("/" + regionName); Region msgsRegion = cache.getRegion(CacheServerImpl.generateNameForClientMsgsRegion(port)); int clientMsgRegionSize = msgsRegion.size(); int regionSize = region.size(); assertTrue( "Region sizes were not as expected after 60 seconds elapsed. Actual region size = " + regionSize + "Actual client msg region size = " + clientMsgRegionSize, true == ((5 == clientMsgRegionSize) && (5 == regionSize))); }); } /** * This test verifies that the ha-region-queues decrement the reference count of their respective * HAEventWrapper instances in the client-messages-region correctly, after the events have been * peeked and removed from the queue. */ @Test public void testRefCountForPeekAndRemove() throws Exception { serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.createEntries()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(5), new Integer(PORT1))); serverVM0.invoke(() -> ConflationDUnitTestHelper.unsetIsSlowStart()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest .waitTillMessagesAreDispatched(new Integer(PORT1), new Long(5000))); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(0), new Integer(PORT1))); } /** * This test verifies that the processing of the QRM messages results in decrementing the * reference count of corresponding HAEventWrapper instances, correctly. */ @Test public void testRefCountForQRM() throws Exception { serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.stopServer()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.createEntries()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.startServer()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(5), new Integer(PORT2))); serverVM0.invoke(() -> ConflationDUnitTestHelper.unsetIsSlowStart()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(0), new Integer(PORT2))); } /** * This test verifies that the destruction of a ha-region (caused by proxy/client disconnect), * causes the reference count of all HAEventWrapper instances belonging to the ha-region-queue to * be decremented by one, and makes it visible to the client-messages-region. */ @Test public void testRefCountForDestroy() throws Exception { // slow start for dispatcher serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); serverVM1.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); // 1. stop the second server serverVM1.invoke(() -> HARQueueNewImplDUnitTest.stopServer()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.createEntries()); // 3. start the second server. serverVM1.invoke(() -> HARQueueNewImplDUnitTest.startServer()); Thread.sleep(3000); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); Thread.sleep(1000); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.updateMap1()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.updateMap1()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.verifyQueueData(new Integer(5), new Integer(5), new Integer(PORT1))); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.verifyQueueData(new Integer(5), new Integer(5), new Integer(PORT2))); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.updateMap2()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.updateMap2()); Thread.sleep(1000); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.verifyQueueData(new Integer(5), new Integer(5), new Integer(PORT1))); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.verifyQueueData(new Integer(5), new Integer(5), new Integer(PORT2))); } /** * Addresses the bug 39179. If a clientUpdateMessage is dispatched to the client while its GII was * under way, then it should not be put into the HARegionQueue of a client at receiving server * side. */ @Test public void testConcurrentGIIAndDispatch() throws Exception { serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("40000")); serverVM1.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("40000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestListAll()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestListAll()); // 1. stop the second server serverVM1.invoke(() -> HARQueueNewImplDUnitTest.stopServer()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.createEntries()); serverVM0.invoke(HARQueueNewImplDUnitTest.class, "makeValuesOfSomeKeysNullInClientMsgsRegion", new Object[] {new Integer(PORT1), new String[] {"k1", "k3"}}); // 3. start the second server. serverVM1.invoke(() -> HARQueueNewImplDUnitTest.startServer()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(3), new Integer(PORT1))); serverVM1.invoke(HARQueueNewImplDUnitTest.class, "verifyNullValuesInCMR", new Object[] {new Integer(3), new Integer(PORT2), new String[] {"k1", "k3"}}); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(3), new Integer(PORT2))); serverVM0.invoke(HARQueueNewImplDUnitTest.class, "populateValuesOfSomeKeysInClientMsgsRegion", new Object[] {new Integer(PORT1), new String[] {"k1", "k3"}}); serverVM0.invoke(() -> ConflationDUnitTestHelper.unsetIsSlowStart()); serverVM1.invoke(() -> ConflationDUnitTestHelper.unsetIsSlowStart()); } /** * This test verifies that when two BridgeServerImpl instances are created in a single VM, they do * share the client-messages-region. */ @Test public void testTwoBridgeServersInOneVMDoShareCMR() throws Exception { // slow start for dispatcher serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); Integer port3 = (Integer) serverVM0 .invoke(() -> HARQueueNewImplDUnitTest.createOneMoreBridgeServer(Boolean.TRUE)); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), port3, "0"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.createEntries()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(5), new Integer(PORT1))); serverVM0.invoke( () -> HARQueueNewImplDUnitTest.verifyRegionSize(new Integer(5), new Integer(5), port3)); } /** * This test verifies that two clients, connected to two cache servers with different * notifyBySubscription values, on a single VM, receive updates/invalidates depending upon their * notifyBySubscription value. */ @Test public void testUpdatesWithTwoBridgeServersInOneVM() throws Exception { Integer port3 = (Integer) serverVM0 .invoke(() -> HARQueueNewImplDUnitTest.createOneMoreBridgeServer(Boolean.FALSE)); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1", Boolean.TRUE); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, port3, new Integer(PORT2), "1", Boolean.TRUE)); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestListAll()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createEntries()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.putEntries()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest .waitTillMessagesAreDispatched(new Integer(PORT1), new Long(5000))); serverVM0.invoke( () -> HARQueueNewImplDUnitTest.waitTillMessagesAreDispatched(port3, new Long(5000))); // expect updates verifyUpdatesReceived(new Integer(5), Boolean.TRUE, new Long(5000)); // expect invalidates clientVM1.invoke(() -> HARQueueNewImplDUnitTest.verifyUpdatesReceived(new Integer(5), Boolean.TRUE, new Long(5000))); } /** * This test verifies that the HAEventWrapper instances present in the client-messages-region give * up the references to their respective ClientUpdateMessageImpl instances. */ @Test public void testHAEventWrapperDoesNotHoldCUMOnceInsideCMR() throws Exception { // slow start for dispatcher serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.stopServer()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.createEntries(new Long(1000))); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.startServer()); Thread.sleep(2000); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.verifyNullCUMReference(new Integer(PORT1))); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.verifyNullCUMReference(new Integer(PORT2))); } /** * This test verifies that client-messages-regions are not created for the cache servers who have * eviction policy as 'none'. Instead, such cache servers will have simple HashMap structures. * Also, it verifies that such a structure (referred to as haContainer, in general) is destroyed * when its cache server is stopped. */ @Test public void testCMRNotCreatedForNoneEvictionPolicy() throws Exception { serverVM0.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); Thread.sleep(2000); PORT1 = ((Integer) serverVM0.invoke( () -> HARQueueNewImplDUnitTest.createServerCache(HARegionQueue.HA_EVICTION_POLICY_NONE))) .intValue(); PORT2 = ((Integer) serverVM1.invoke( () -> HARQueueNewImplDUnitTest.createServerCache(HARegionQueue.HA_EVICTION_POLICY_NONE))) .intValue(); Boolean isRegion = Boolean.FALSE; // slow start for dispatcher serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); serverVM0 .invoke(() -> HARQueueNewImplDUnitTest.verifyHaContainerType(isRegion, new Integer(PORT1))); serverVM1 .invoke(() -> HARQueueNewImplDUnitTest.verifyHaContainerType(isRegion, new Integer(PORT2))); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.stopOneBridgeServer(new Integer(PORT1))); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.stopOneBridgeServer(new Integer(PORT2))); serverVM0.invoke( () -> HARQueueNewImplDUnitTest.verifyHaContainerDestroyed(isRegion, new Integer(PORT1))); serverVM1.invoke( () -> HARQueueNewImplDUnitTest.verifyHaContainerDestroyed(isRegion, new Integer(PORT2))); } /** * This test verifies that client-messages-regions are created for the cache servers who have * eviction policy either as 'mem' or as 'entry'. Also, it verifies that such a * client-messages-region is destroyed when its cache server is stopped. */ @Test public void testCMRCreatedForMemOrEntryEvictionPolicy() throws Exception { Boolean isRegion = Boolean.TRUE; // slow start for dispatcher serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("30000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM1.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); serverVM0 .invoke(() -> HARQueueNewImplDUnitTest.verifyHaContainerType(isRegion, new Integer(PORT1))); serverVM1 .invoke(() -> HARQueueNewImplDUnitTest.verifyHaContainerType(isRegion, new Integer(PORT2))); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.stopOneBridgeServer(new Integer(PORT1))); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.stopOneBridgeServer(new Integer(PORT2))); serverVM0.invoke( () -> HARQueueNewImplDUnitTest.verifyHaContainerDestroyed(isRegion, new Integer(PORT1))); serverVM1.invoke( () -> HARQueueNewImplDUnitTest.verifyHaContainerDestroyed(isRegion, new Integer(PORT2))); } /** * This test verifies that the Cache.rootRegions() method does not return the * client-messages-region of any of the cache's attached cache servers. */ @Test public void testCMRNotReturnedByRootRegionsMethod() throws Exception { createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestList()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.createEntries()); serverVM0.invoke( () -> HARQueueNewImplDUnitTest.verifyRootRegionsDoesNotReturnCMR(new Integer(PORT1))); serverVM1.invoke( () -> HARQueueNewImplDUnitTest.verifyRootRegionsDoesNotReturnCMR(new Integer(PORT2))); } /** * This test verifies that the memory footprint of the ha region queues is less when ha-overflow * is enabled (with an appropriate value of haCapacity) compared to when it is disabled, for the * same amount of data feed. */ @Ignore("TODO") @Test public void testMemoryFootprintOfHARegionQueuesWithAndWithoutOverflow() throws Exception { serverVM0.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); serverVM1.invoke(() -> HARQueueNewImplDUnitTest.closeCache()); Thread.sleep(2000); Integer numOfEntries = new Integer(30); PORT1 = ((Integer) serverVM0.invoke(() -> HARQueueNewImplDUnitTest .createServerCache(HARegionQueue.HA_EVICTION_POLICY_MEMORY, new Integer(30)))).intValue(); PORT2 = ((Integer) serverVM1.invoke( () -> HARQueueNewImplDUnitTest.createServerCache(HARegionQueue.HA_EVICTION_POLICY_NONE))) .intValue(); serverVM0.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("60000")); serverVM1.invoke(() -> ConflationDUnitTestHelper.setIsSlowStart("60000")); createClientCache(getServerHostName(Host.getHost(0)), new Integer(PORT1), new Integer(PORT2), "1"); final String client1Host = getServerHostName(clientVM1.getHost()); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client1Host, new Integer(PORT1), new Integer(PORT2), "1")); final String client2Host = getServerHostName(clientVM2.getHost()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.createClientCache(client2Host, new Integer(PORT1), new Integer(PORT2), "1")); registerInterestListAll(); clientVM1.invoke(() -> HARQueueNewImplDUnitTest.registerInterestListAll()); clientVM2.invoke(() -> HARQueueNewImplDUnitTest.registerInterestListAll()); serverVM0.invoke(() -> HARQueueNewImplDUnitTest.putHeavyEntries(numOfEntries)); Long usedMemInVM0 = (Long) serverVM0.invoke(() -> HARQueueNewImplDUnitTest .getUsedMemoryAndVerifyRegionSize(new Integer(1), numOfEntries, new Integer(PORT1))); Long usedMemInVM1 = (Long) serverVM1.invoke(() -> HARQueueNewImplDUnitTest .getUsedMemoryAndVerifyRegionSize(new Integer(1), numOfEntries, new Integer(-1))); serverVM0.invoke(() -> ConflationDUnitTestHelper.unsetIsSlowStart()); serverVM1.invoke(() -> ConflationDUnitTestHelper.unsetIsSlowStart()); logger.fine("Used Mem: " + usedMemInVM1.longValue() + "(without overflow), " + usedMemInVM0.longValue() + "(with overflow)"); assertTrue(usedMemInVM0.longValue() < usedMemInVM1.longValue()); } private static void verifyNullCUMReference(Integer port) { Region r = cache.getRegion("/" + CacheServerImpl.generateNameForClientMsgsRegion(port.intValue())); assertNotNull(r); Object[] arr = r.keySet().toArray(); for (int i = 0; i < arr.length; i++) { assertNull(((HAEventWrapper) arr[i]).getClientUpdateMessage()); } } private static void verifyHaContainerDestroyed(Boolean isRegion, Integer port) { Map r = cache.getRegion("/" + CacheServerImpl.generateNameForClientMsgsRegion(port.intValue())); if (isRegion.booleanValue()) { if (r != null) { assertTrue(((Region) r).isDestroyed()); } } else { r = ((CacheServerImpl) cache.getCacheServers().toArray()[0]).getAcceptor() .getCacheClientNotifier().getHaContainer(); if (r != null) { assertTrue(r.isEmpty()); } } } static Long getUsedMemoryAndVerifyRegionSize(Integer rSize, Integer haContainerSize, Integer port) { Long retVal = null; try { retVal = new Long(Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()); if (port.intValue() != -1) { verifyRegionSize(rSize, haContainerSize, port); } else { verifyRegionSize(rSize, haContainerSize); } } catch (Exception e) { fail("failed in getUsedMemory()" + e); } return retVal; } private static void setHACapacity(Integer cap) { try { Iterator iter = cache.getCacheServers().iterator(); if (iter.hasNext()) { CacheServer server = (CacheServer) iter.next(); server.getClientSubscriptionConfig().setCapacity(cap.intValue()); } } catch (Exception e) { fail("failed in setHACapacity()" + e); } } private static void stopOneBridgeServer(Integer port) { try { Iterator iter = cache.getCacheServers().iterator(); if (iter.hasNext()) { CacheServer server = (CacheServer) iter.next(); if (server.getPort() == port.intValue()) { server.stop(); } } } catch (Exception e) { fail("failed in stopOneBridgeServer()" + e); } } public static void stopServer() { try { Iterator iter = cache.getCacheServers().iterator(); if (iter.hasNext()) { CacheServer server = (CacheServer) iter.next(); server.stop(); } } catch (Exception e) { fail("failed in stopServer()" + e); } } public static void updateMapForVM0() { try { map.put("k1", new Long(3)); map.put("k2", new Long(1)); map.put("k3", new Long(3)); map.put("k4", new Long(1)); map.put("k5", new Long(3)); } catch (Exception e) { fail("failed in updateMapForVM0()" + e); } } public static void updateMap1() { try { map.put("k1", new Long(2)); map.put("k2", new Long(1)); map.put("k3", new Long(2)); map.put("k4", new Long(1)); map.put("k5", new Long(2)); } catch (Exception e) { fail("failed in updateMap1()" + e); } } public static void updateMap2() { try { map.put("k1", new Long(1)); map.put("k2", new Long(1)); map.put("k3", new Long(1)); map.put("k4", new Long(1)); map.put("k5", new Long(1)); } catch (Exception e) { fail("failed in updateMap2()" + e); } } public static void updateMapForVM1() { try { updateMapForVM0(); } catch (Exception e) { fail("failed in updateMapForVM1()" + e); } } public static void printMsg(String msg) { try { logger.fine(msg); } catch (Exception e) { fail("failed in printMsg()" + e); } } public static void haQueuePut() { Set set = HARegionQueue.getDispatchedMessagesMapForTesting().keySet(); Iterator iter = set.iterator(); logger.fine("# of HAQueues: " + set.size()); while (iter.hasNext()) { // HARegion haRegion = (HARegion) cache.getRegion(Region.SEPARATOR + (String) iter.next()); // haRegion.getOwner().put(); } } public static void verifyNullValuesInCMR(final Integer numOfEntries, final Integer port, String[] keys) { final Region msgsRegion = cache.getRegion(generateNameForClientMsgsRegion(port.intValue())); WaitCriterion wc = new WaitCriterion() { String excuse; @Override public boolean done() { int sz = msgsRegion.size(); return sz == numOfEntries.intValue(); } @Override public String description() { return excuse; } }; GeodeAwaitility.await().untilAsserted(wc); Set entries = msgsRegion.entrySet(); Iterator iter = entries.iterator(); for (; iter.hasNext();) { Entry entry = (Entry) iter.next(); ClientUpdateMessage cum = (ClientUpdateMessage) entry.getValue(); for (int i = 0; i < keys.length; i++) { logger.fine("cum.key: " + cum.getKeyToConflate()); // assert that the keys are not present in entries set assertTrue(!keys[i].equals(cum.getKeyToConflate())); } } } public static void makeValuesOfSomeKeysNullInClientMsgsRegion(Integer port, String[] keys) { Region msgsRegion = cache.getRegion(CacheServerImpl.generateNameForClientMsgsRegion(port.intValue())); assertNotNull(msgsRegion); Set entries = msgsRegion.entrySet(); Iterator iter = entries.iterator(); deletedValues = new Object[keys.length]; while (iter.hasNext()) { Region.Entry entry = (Region.Entry) iter.next(); ClientUpdateMessage cum = (ClientUpdateMessage) entry.getValue(); for (int i = 0; i < keys.length; i++) { if (keys[i].equals(cum.getKeyToConflate())) { logger.fine("HARQueueNewImplDUnit: Removing " + cum.getKeyOfInterest()); deletedValues[i] = msgsRegion.remove(entry.getKey()); } } } } public static void populateValuesOfSomeKeysInClientMsgsRegion(Integer port, String[] keys) { Region msgsRegion = cache.getRegion(CacheServerImpl.generateNameForClientMsgsRegion(port.intValue())); assertNotNull(msgsRegion); for (int i = 0; i < keys.length; i++) { logger.fine("HARQueueNewImplDUnit: populating " + deletedValues[i]); msgsRegion.put(keys[1], deletedValues[i]); } } public static void startServer() { try { Iterator iter = cache.getCacheServers().iterator(); if (iter.hasNext()) { CacheServer server = (CacheServer) iter.next(); server.start(); } } catch (Exception e) { fail("failed in startServer()" + e); } } public static void verifyQueueData(Integer regionsize, Integer msgsRegionsize, Integer port) { try { // Get the clientMessagesRegion and check the size. Region msgsRegion = cache.getRegion(CacheServerImpl.generateNameForClientMsgsRegion(port.intValue())); Region region = cache.getRegion("/" + regionName); logger.fine( "size<serverRegion, clientMsgsRegion>: " + region.size() + ", " + msgsRegion.size()); assertEquals(regionsize.intValue(), region.size()); assertEquals(msgsRegionsize.intValue(), msgsRegion.size()); Iterator iter = msgsRegion.entrySet().iterator(); while (iter.hasNext()) { await().untilAsserted(() -> { Region.Entry entry = (Region.Entry) iter.next(); HAEventWrapper wrapper = (HAEventWrapper) entry.getKey(); ClientUpdateMessage cum = (ClientUpdateMessage) entry.getValue(); Object key = cum.getKeyOfInterest(); logger.fine("key<feedCount, regionCount>: " + key + "<" + ((Long) map.get(key)).longValue() + ", " + wrapper.getReferenceCount() + ">"); assertEquals(((Long) map.get(key)).longValue(), wrapper.getReferenceCount()); }); } } catch (Exception e) { fail("failed in verifyQueueData()" + e); } } public static void verifyRegionSize(final Integer regionSize, final Integer msgsRegionsize, final Integer port) { WaitCriterion wc = new WaitCriterion() { String excuse; @Override public boolean done() { try { // Get the clientMessagesRegion and check the size. Region region = cache.getRegion("/" + regionName); // logger.fine("size<serverRegion, clientMsgsRegion>: " + region.size() // + ", " + msgsRegion.size()); int sz = region.size(); if (regionSize.intValue() != sz) { excuse = "expected regionSize = " + regionSize + ", actual = " + sz; return false; } Iterator iter = cache.getCacheServers().iterator(); if (iter.hasNext()) { CacheServerImpl server = (CacheServerImpl) iter.next(); Map msgsRegion = server.getAcceptor().getCacheClientNotifier().getHaContainer(); // Region msgsRegion = cache.getRegion(BridgeServerImpl // .generateNameForClientMsgsRegion(port.intValue())); sz = msgsRegion.size(); if (msgsRegionsize.intValue() != sz) { excuse = "expected msgsRegionsize = " + msgsRegionsize + ", actual = " + sz; return false; } } return true; } catch (Exception e) { excuse = "Caught exception " + e; return false; } } @Override public String description() { return excuse; } }; GeodeAwaitility.await().untilAsserted(wc); } public static void verifyRegionSize(final Integer regionSize, final Integer msgsRegionsize) { WaitCriterion wc = new WaitCriterion() { String excuse; @Override public boolean done() { try { // Get the clientMessagesRegion and check the size. Region region = cache.getRegion("/" + regionName); int sz = region.size(); if (regionSize.intValue() != sz) { excuse = "Expected regionSize = " + regionSize.intValue() + ", actual = " + sz; return false; } Iterator iter = cache.getCacheServers().iterator(); if (!iter.hasNext()) { return true; } CacheServerImpl server = (CacheServerImpl) iter.next(); sz = server.getAcceptor().getCacheClientNotifier().getHaContainer().size(); if (sz != msgsRegionsize.intValue()) { excuse = "Expected msgsRegionsize = " + msgsRegionsize.intValue() + ", actual = " + sz; return false; } return true; } catch (Exception e) { excuse = "failed due to " + e; return false; } } @Override public String description() { return excuse; } }; GeodeAwaitility.await().untilAsserted(wc); } public static void verifyHaContainerType(Boolean isRegion, Integer port) { try { Map haMap = cache.getRegion(CacheServerImpl.generateNameForClientMsgsRegion(port.intValue())); if (isRegion.booleanValue()) { assertNotNull(haMap); assertTrue(haMap instanceof LocalRegion); haMap = ((CacheServerImpl) cache.getCacheServers().toArray()[0]).getAcceptor() .getCacheClientNotifier().getHaContainer(); assertNotNull(haMap); assertTrue(haMap instanceof HAContainerRegion); } else { assertNull(haMap); haMap = ((CacheServerImpl) cache.getCacheServers().toArray()[0]).getAcceptor() .getCacheClientNotifier().getHaContainer(); assertNotNull(haMap); assertTrue(haMap instanceof HAContainerMap); } logger.fine("haContainer: " + haMap); } catch (Exception e) { fail("failed in verifyHaContainerType()" + e); } } public static void verifyRootRegionsDoesNotReturnCMR(Integer port) { try { String cmrName = CacheServerImpl.generateNameForClientMsgsRegion(port.intValue()); Map haMap = cache.getRegion(cmrName); assertNotNull(haMap); String rName = ""; Iterator iter = cache.rootRegions().iterator(); while (iter.hasNext()) { rName = ((Region) iter.next()).getName(); if (cmrName.equals(rName)) { throw new AssertionError( "Cache.rootRegions() method should not return the client_messages_region."); } logger.fine("Region name returned from cache.rootRegions(): " + rName); } } catch (Exception e) { fail("failed in verifyRootRegionsDoesNotReturnCMR()" + e); } } public static void verifyUpdatesReceived(final Integer num, Boolean isUpdate, Long waitLimit) { try { if (isUpdate.booleanValue()) { WaitCriterion ev = new WaitCriterion() { @Override public boolean done() { return num.intValue() == numOfUpdates; } @Override public String description() { return null; } }; GeodeAwaitility.await().untilAsserted(ev); } else { WaitCriterion ev = new WaitCriterion() { @Override public boolean done() { return num.intValue() == numOfInvalidates; } @Override public String description() { return null; } }; GeodeAwaitility.await().untilAsserted(ev); } } catch (Exception e) { fail("failed in verifyUpdatesReceived()" + e); } } public static void waitTillMessagesAreDispatched(Integer port, Long waitLimit) { try { Map haContainer = null; haContainer = cache.getRegion( SEPARATOR + generateNameForClientMsgsRegion(port.intValue())); if (haContainer == null) { Object[] servers = cache.getCacheServers().toArray(); for (int i = 0; i < servers.length; i++) { if (port.intValue() == ((CacheServerImpl) servers[i]).getPort()) { haContainer = ((CacheServerImpl) servers[i]).getAcceptor().getCacheClientNotifier() .getHaContainer(); break; } } } final Map m = haContainer; WaitCriterion ev = new WaitCriterion() { @Override public boolean done() { return m.size() == 0; } @Override public String description() { return null; } }; GeodeAwaitility.await().untilAsserted(ev); } catch (Exception e) { fail("failed in waitTillMessagesAreDispatched()" + e); } } public static void closeCache() { if (cache != null && !cache.isClosed()) { cache.close(); cache.getDistributedSystem().disconnect(); } } }
apache-2.0
jmuk/toolkit
src/main/java/com/google/api/codegen/transformer/nodejs/NodeJSModelTypeNameConverter.java
5909
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer.nodejs; import com.google.api.codegen.transformer.ModelTypeNameConverter; import com.google.api.codegen.util.TypeName; import com.google.api.codegen.util.TypeNameConverter; import com.google.api.codegen.util.TypedValue; import com.google.api.codegen.util.nodejs.NodeJSTypeTable; import com.google.api.tools.framework.model.EnumValue; import com.google.api.tools.framework.model.ProtoElement; import com.google.api.tools.framework.model.TypeRef; import com.google.common.collect.ImmutableMap; import com.google.protobuf.DescriptorProtos.FieldDescriptorProto.Type; public class NodeJSModelTypeNameConverter implements ModelTypeNameConverter { /** * A map from primitive types in proto to NodeJS counterparts. */ private static final ImmutableMap<Type, String> PRIMITIVE_TYPE_MAP = ImmutableMap.<Type, String>builder() .put(Type.TYPE_BOOL, "boolean") .put(Type.TYPE_DOUBLE, "number") .put(Type.TYPE_FLOAT, "number") .put(Type.TYPE_INT64, "number") .put(Type.TYPE_UINT64, "number") .put(Type.TYPE_SINT64, "number") .put(Type.TYPE_FIXED64, "number") .put(Type.TYPE_SFIXED64, "number") .put(Type.TYPE_INT32, "number") .put(Type.TYPE_UINT32, "number") .put(Type.TYPE_SINT32, "number") .put(Type.TYPE_FIXED32, "number") .put(Type.TYPE_SFIXED32, "number") .put(Type.TYPE_STRING, "String") .put(Type.TYPE_BYTES, "String") .build(); /** * A map from primitive types in proto to zero value in NodeJS */ private static final ImmutableMap<Type, String> PRIMITIVE_ZERO_VALUE = ImmutableMap.<Type, String>builder() .put(Type.TYPE_BOOL, "false") .put(Type.TYPE_DOUBLE, "0.0") .put(Type.TYPE_FLOAT, "0.0") .put(Type.TYPE_INT64, "0") .put(Type.TYPE_UINT64, "0") .put(Type.TYPE_SINT64, "0") .put(Type.TYPE_FIXED64, "0") .put(Type.TYPE_SFIXED64, "0") .put(Type.TYPE_INT32, "0") .put(Type.TYPE_UINT32, "0") .put(Type.TYPE_SINT32, "0") .put(Type.TYPE_FIXED32, "0") .put(Type.TYPE_SFIXED32, "0") .put(Type.TYPE_STRING, "\'\'") .put(Type.TYPE_BYTES, "\'\'") .build(); private TypeNameConverter typeNameConverter; public NodeJSModelTypeNameConverter(String implicitPackageName) { this.typeNameConverter = new NodeJSTypeTable(implicitPackageName); } @Override public TypeName getTypeName(TypeRef type) { if (type.isMap()) { return new TypeName("Object"); } else if (type.isRepeated()) { TypeName elementTypeName = getTypeNameForElementType(type); return new TypeName("", "", "%i[]", elementTypeName); } else { return getTypeNameForElementType(type); } } /** * Returns the NodeJS representation of a type, without cardinality. If the type is a primitive, * getTypeNameForElementType returns it in unboxed form. */ @Override public TypeName getTypeNameForElementType(TypeRef type) { String primitiveTypeName = PRIMITIVE_TYPE_MAP.get(type.getKind()); if (primitiveTypeName != null) { return new TypeName(primitiveTypeName); } switch (type.getKind()) { case TYPE_MESSAGE: return getTypeName(type.getMessageType()); case TYPE_ENUM: return getTypeName(type.getEnumType()); default: throw new IllegalArgumentException("unknown type kind: " + type.getKind()); } } @Override public TypeName getTypeName(ProtoElement elem) { return typeNameConverter.getTypeName(elem.getFullName()); } /** * Returns the NodeJS representation of a zero value for that type, to be used in code sample doc. */ @Override public TypedValue getZeroValue(TypeRef type) { // Don't call getTypeName; we don't need to import these. if (type.isMap()) { return TypedValue.create(new TypeName("Object"), "{}"); } if (type.isRepeated()) { return TypedValue.create(new TypeName("Array"), "[]"); } if (PRIMITIVE_ZERO_VALUE.containsKey(type.getKind())) { return TypedValue.create(getTypeName(type), PRIMITIVE_ZERO_VALUE.get(type.getKind())); } if (type.isMessage()) { return TypedValue.create(getTypeName(type), "{}"); } if (type.isEnum()) { EnumValue enumValue = type.getEnumType().getValues().get(0); return TypedValue.create(getTypeName(type), "%s." + enumValue.getSimpleName()); } return TypedValue.create(new TypeName(""), "null"); } @Override public String renderPrimitiveValue(TypeRef type, String value) { Type primitiveType = type.getKind(); if (!PRIMITIVE_TYPE_MAP.containsKey(primitiveType)) { throw new IllegalArgumentException( "Initial values are only supported for primitive types, got type " + type + ", with value " + value); } switch (primitiveType) { case TYPE_BOOL: return value.toLowerCase(); case TYPE_STRING: case TYPE_BYTES: return "\'" + value + "\'"; default: // Types that do not need to be modified (e.g. TYPE_INT32) are handled // here return value; } } }
apache-2.0
PamelaLiu/AtomicBomb
AtomicBomb/app/src/test/java/com/jwl/atomicbomb/ExampleUnitTest.java
311
package com.jwl.atomicbomb; import org.junit.Test; import static org.junit.Assert.*; /** * To work on unit tests, switch the Test Artifact in the Build Variants view. */ public class ExampleUnitTest { @Test public void addition_isCorrect() throws Exception { assertEquals(4, 2 + 2); } }
apache-2.0
suoluo/inforshuttle
inforshuttle-mongo/src/main/java/com/inforshuttle/mongo/util/MongoDBTest.java
12588
package com.inforshuttle.mongo.util; import java.util.List; import java.util.Set; import java.util.regex.Pattern; import org.bson.types.ObjectId; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.springframework.context.support.ClassPathXmlApplicationContext; import org.springframework.data.mongodb.core.MongoTemplate; import com.mongodb.BasicDBObject; import com.mongodb.DBCollection; import com.mongodb.DBCursor; import com.mongodb.DBObject; import com.mongodb.QueryBuilder; import com.mongodb.QueryOperators; public class MongoDBTest { DBCollection userCollection = null; ClassPathXmlApplicationContext context; private MongoTemplate mongoTemplate; @Before public void setUp() throws Exception { context = new ClassPathXmlApplicationContext("spring-mongodb.xml"); mongoTemplate = (MongoTemplate) context.getBean("mongoTemplate"); userCollection = mongoTemplate.getCollection("users"); } @After public void tearDown(){ context.close(); } /** * 查询所有的集合名称 */ @Test public void testGetAllCollections() { Set<String> collectionNames = mongoTemplate.getCollectionNames(); for (String name : collectionNames) { System.out.println("collectionName:" + name); } } /** * 查询所有的用户信息 */ @Test public void testFind() { testInitTestData(); // find方法查询所有的数据并返回一个游标对象 DBCursor cursor = userCollection.find(); while (cursor.hasNext()) { print(cursor.next()); } // 获取数据总条数 int sum = cursor.count(); System.out.println("sum===" + sum); } /** * 查询第一条数据 */ public void testFindOne() { testInitTestData(); // 只查询第一条数据 DBObject oneUser = userCollection.findOne(); print(oneUser); } /** * 条件查询 */ public void testConditionQuery() { testInitTestData(); // 查询id=50a1ed9965f413fa025166db DBObject oneUser = userCollection.findOne(new BasicDBObject("_id", new ObjectId("50a1ed9965f413fa025166db"))); print(oneUser); // 查询age=24 List<DBObject> userList1 = userCollection.find(new BasicDBObject("age", 24)).toArray(); print(" find age=24: "); printList(userList1); // 查询age>=23 List<DBObject> userList2 = userCollection.find(new BasicDBObject("age", new BasicDBObject("$gte", 23))) .toArray(); print(" find age>=23: "); printList(userList2); // 查询age<=20 List<DBObject> userList3 = userCollection.find(new BasicDBObject("age", new BasicDBObject("$lte", 20))) .toArray(); print(" find age<=20: "); printList(userList3); // 查询age!=25 List<DBObject> userList4 = userCollection.find(new BasicDBObject("age", new BasicDBObject("$ne", 25))) .toArray(); print(" find age!=25: "); printList(userList4); // 查询age in[23,24,27] List<DBObject> userList5 = userCollection .find(new BasicDBObject("age", new BasicDBObject(QueryOperators.IN, new int[] { 23, 24, 27 }))) .toArray(); print(" find agein[23,24,27]: "); printList(userList5); // 查询age not in[23,24,27] List<DBObject> userList6 = userCollection .find(new BasicDBObject("age", new BasicDBObject(QueryOperators.NIN, new int[] { 23, 24, 27 }))) .toArray(); print(" find age not in[23,24,27]: "); printList(userList6); // 查询29>age>=20 List<DBObject> userList7 = userCollection .find(new BasicDBObject("age", new BasicDBObject("$gte", 20).append("$lt", 29))).toArray(); print(" find 29>age>=20: "); printList(userList7); // 查询age>24 and name="zhangguochen" BasicDBObject query = new BasicDBObject(); query.put("age", new BasicDBObject("$gt", 24)); query.put("name", "zhangguochen"); List<DBObject> userList8 = userCollection.find(query).toArray(); print(" find age>24 and name='zhangguochen':"); printList(userList8); // 和上面的查询一样,用的是QueryBuilder对象 QueryBuilder queryBuilder = new QueryBuilder(); queryBuilder.and("age").greaterThan(24); queryBuilder.and("name").equals("zhangguochen"); List<DBObject> userList82 = userCollection.find(queryBuilder.get()).toArray(); print(" QueryBuilder find age>24 and name='zhangguochen':"); printList(userList82); // 查询所有的用户,并按照年龄升序排列 List<DBObject> userList9 = userCollection.find().sort(new BasicDBObject("age", 1)).toArray(); print(" find all sort age asc: "); printList(userList9); // 查询特定字段 DBObject query1 = new BasicDBObject();// 要查的条件 query.put("age", new BasicDBObject("$gt", 20)); DBObject field = new BasicDBObject();// 要查的哪些字段 field.put("name", true); field.put("age", true); List<DBObject> userList10 = userCollection.find(query1, field).toArray(); print(" select name,age where age>20"); printList(userList10); // 查询部分数据 DBObject query2 = new BasicDBObject();// 查询条件 query2.put("age", new BasicDBObject("$lt", 27)); DBObject fields = new BasicDBObject();// 查询字段 fields.put("name", true); fields.put("age", true); List<DBObject> userList11 = userCollection.find(query2, fields).skip(1).limit(1).toArray(); print(" select age,name from user skip 1 limit 1:"); printList(userList11); // 模糊查询 DBObject fuzzy_query = new BasicDBObject(); String keyWord = "zhang"; Pattern pattern = Pattern.compile("^" + keyWord + ".*$", Pattern.CASE_INSENSITIVE); fuzzy_query.put("name", pattern); // 根据name like zhang%查询 List<DBObject> userList12 = userCollection.find(fuzzy_query).toArray(); print(" select * from user where name like 'zhang*'"); printList(userList12); } /** * 删除用户数据 */ public void testRemoveUser() { testInitTestData(); DBObject query = new BasicDBObject(); // 删除age>24的数据 query.put("age", new BasicDBObject("$gt", 24)); userCollection.remove(query); printList(userCollection.find().toArray()); } /** * 修改用户数据 */ public void testUpdateUser() { // update(query,set,false,true); // query:需要修改的数据查询条件,相当于关系型数据库where后的语句 // set:需要设的值,相当于关系型数据库的set语句 // false:需要修改的数据如果不存在,是否插入新数据,false不插入,true插入 // true:如果查询出多条则不进行修改,false:只修改第一条 testInitTestData(); // 整体更新 DBObject query = new BasicDBObject(); query.put("age", new BasicDBObject("$gt", 15)); DBObject set = userCollection.findOne(query);// 一定是查询出来的DBObject,否则会丢掉一些列,整体更新 set.put("name", "Abc"); set.put("age", 19); set.put("interest", new String[] { "hadoop", "study", "mongodb" }); DBObject zhangguochenAddress = new BasicDBObject(); zhangguochenAddress.put("address", "henan"); set.put("home", zhangguochenAddress); userCollection.update(query, // 需要修改的数据条件 set, // 需要赋的值 false, // 数据如果不存在,是否新建 false);// false只修改第一条,true如果有多条就不修改 printList(userCollection.find().toArray()); // 局部更新,只更改某些列 // 加上$set会是局部更新,不会丢掉某些列,只把name更新为"jindazhong",年龄更新为123 BasicDBObject set1 = new BasicDBObject("$set", new BasicDBObject("name", "jindazhong").append("age", 123)); userCollection.update(query, // 需要修改的数据条件 set1, // 需要赋的值 false, // 数据如果不存在,是否新建 false);// false只修改第一条,true如果有多条就不修改 printList(userCollection.find().toArray()); // 批量更新 // user.updateMulti(new BasicDBObject("age",new // BasicDBObject("$gt",16)), // new BasicDBObject("$set", new // BasicDBObject("name","jindazhong").append("age", 123))); // printList(user.find().toArray()); } /** * 初始化测试数据 */ public void testInitTestData() { userCollection.drop(); DBObject zhangguochen = new BasicDBObject(); zhangguochen.put("name", "zhangguochen"); zhangguochen.put("age", 25); zhangguochen.put("interest", new String[] { "hadoop", "study", "mongodb" }); DBObject zhangguochenAddress = new BasicDBObject(); zhangguochenAddress.put("address", "henan"); zhangguochen.put("home", zhangguochenAddress); DBObject jindazhong = new BasicDBObject(); jindazhong.put("name", "jindazhong"); jindazhong.put("age", 21); jindazhong.put("interest", new String[] { "hadoop", "mongodb" }); jindazhong.put("wife", "小龙女"); DBObject jindazhongAddress = new BasicDBObject(); jindazhongAddress.put("address", "shanghai"); jindazhong.put("home", jindazhongAddress); DBObject yangzhi = new BasicDBObject(); yangzhi.put("name", "yangzhi"); yangzhi.put("age", 22); yangzhi.put("interest", new String[] { "shopping", "sing", "hadoop" }); DBObject yangzhiAddress = new BasicDBObject(); yangzhiAddress.put("address", "hubei"); yangzhi.put("home", yangzhiAddress); DBObject diaoyouwei = new BasicDBObject(); diaoyouwei.put("name", "diaoyouwei"); diaoyouwei.put("age", 23); diaoyouwei.put("interest", new String[] { "notejs", "sqoop" }); DBObject diaoyouweiAddress = new BasicDBObject(); diaoyouweiAddress.put("address", "shandong"); diaoyouwei.put("home", diaoyouweiAddress); DBObject cuichongfei = new BasicDBObject(); cuichongfei.put("name", "cuichongfei"); cuichongfei.put("age", 24); cuichongfei.put("interest", new String[] { "ebsdi", "dq" }); cuichongfei.put("wife", "凤姐"); DBObject cuichongfeiAddress = new BasicDBObject(); cuichongfeiAddress.put("address", "shanxi"); cuichongfei.put("home", cuichongfeiAddress); DBObject huanghu = new BasicDBObject(); huanghu.put("name", "huanghu"); huanghu.put("age", 25); huanghu.put("interest", new String[] { "shopping", "study" }); huanghu.put("wife", "黄蓉"); DBObject huanghuAddress = new BasicDBObject(); huanghuAddress.put("address", "guangdong"); huanghu.put("home", huanghuAddress); DBObject houchangren = new BasicDBObject(); houchangren.put("name", "houchangren"); houchangren.put("age", 26); houchangren.put("interest", new String[] { "dota", "dq" }); DBObject houchangrenAddress = new BasicDBObject(); houchangrenAddress.put("address", "shandong"); houchangren.put("home", houchangrenAddress); DBObject wangjuntao = new BasicDBObject(); wangjuntao.put("name", "wangjuntao"); wangjuntao.put("age", 27); wangjuntao.put("interest", new String[] { "sport", "study" }); wangjuntao.put("wife", "王语嫣"); DBObject wangjuntaoAddress = new BasicDBObject(); wangjuntaoAddress.put("address", "hebei"); wangjuntao.put("home", wangjuntaoAddress); DBObject miaojiagui = new BasicDBObject(); miaojiagui.put("name", "miaojiagui"); miaojiagui.put("age", 28); miaojiagui.put("interest", new String[] { "hadoop", "study", "linux" }); miaojiagui.put("wife", null); DBObject miaojiaguiAddress = new BasicDBObject(); miaojiaguiAddress.put("address", "未知"); miaojiagui.put("home", miaojiaguiAddress); DBObject longzhen = new BasicDBObject(); longzhen.put("name", "longzhen"); longzhen.put("age", 29); longzhen.put("interest", new String[] { "study", "cook" }); longzhen.put("wife", null); DBObject longzhenAddress = new BasicDBObject(); longzhenAddress.put("address", "sichuan"); longzhen.put("home", longzhenAddress); userCollection.insert(zhangguochen); userCollection.insert(jindazhong); userCollection.insert(yangzhi); userCollection.insert(diaoyouwei); userCollection.insert(cuichongfei); userCollection.insert(huanghu); userCollection.insert(houchangren); userCollection.insert(wangjuntao); userCollection.insert(miaojiagui); userCollection.insert(longzhen); } public void testRemove() { userCollection.drop(); } /** * 打印数据 * * @param object */ public void print(Object object) { System.out.println(object); } /** * 打印列表 * * @param objectList */ public void printList(List<DBObject> objectList) { for (Object object : objectList) { print(object); } } }
apache-2.0
google/framework-for-osdu
osdu-r2/os-qa/src/test/java/com/osdu/file_service/e2e/GetFilesLocationTests.java
17133
/* * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.osdu.file_service.e2e; import com.osdu.core.data.provider.DataProviders; import com.osdu.core.endpoints.factories.FactoriesHolder; import com.osdu.core.reporter.TestReporter; import io.qameta.allure.Description; import io.qameta.allure.restassured.AllureRestAssured; import io.restassured.response.Response; import lombok.SneakyThrows; import org.apache.commons.lang3.StringUtils; import org.hamcrest.Matchers; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.testng.Assert; import org.testng.annotations.Test; import java.util.*; import static com.osdu.common.FilesKeeper.*; import static com.osdu.core.data.parser.JsonParser.readJson; import static com.osdu.core.data.provider.TestData.*; import static com.osdu.core.data.provider.TestData.LOCATION; import static io.restassured.RestAssured.given; import static org.apache.http.HttpStatus.*; public class GetFilesLocationTests extends BaseFileService { FactoriesHolder factoriesHolder = new FactoriesHolder(); String timePattern = "yyyy-MM-dd'T'HH:mm:ss.SSS+'0000'"; /** * File service paths */ String getLocation = factoriesHolder.remoteFactoryCreator().getFileService("getLocationFunction"); String getFileLocation = factoriesHolder.remoteFactoryCreator().getFileService("getFileLocationFunction"); String getFilesListFunction = factoriesHolder.remoteFactoryCreator().getFileService("getFileListFunction"); @SneakyThrows @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Valid flow send request with all required fields and with auth token") public void i1_checkNewCreatedFileIsReturnedByFilesList(Map<String, String> data) { DateTime dateTimeBeforeFileCreation = DateTime.now(DateTimeZone.UTC); TestReporter.reportStep("Create time range and save time before file creation: %s", dateTimeBeforeFileCreation); String uniqueID = UUID.randomUUID().toString(); TestReporter.reportStep("Create unique id %s", uniqueID); String bodyRequestWithTheUniqueId = String.format((readJson(requestFileServicePath).toString()), uniqueID); TestReporter.reportStep("Insert unique id into request %s", bodyRequestWithTheUniqueId); given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(bodyRequestWithTheUniqueId) .when() .post(getLocation) .then() .statusCode(SC_OK) .and() .assertThat().body(FILE_ID, Matchers.is(uniqueID)) .assertThat().body(SIGNED_URL, Matchers.notNullValue()) .log() .all(); Response fileLocation = given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(bodyRequestWithTheUniqueId) .when() .post(getFileLocation); fileLocation .then() .statusCode(SC_OK) .and() .assertThat().body(DRIVER, Matchers.is(data.get(DRIVER))) .assertThat().body(LOCATION, Matchers.notNullValue()) .log() .all(); String receivedUrl = fileLocation .then() .extract() .path(LOCATION); //Delay in order to avoid test failing because of the fast speed of the tests running Thread.sleep(4000); DateTime dateTimeAfterFileCreation = DateTime.now(DateTimeZone.UTC); TestReporter.reportStep("Create time range and save time after file creation", dateTimeAfterFileCreation); String requestForFiles = String.format((readJson(requestTemplateForFiles).toString()), dateTimeBeforeFileCreation.toString(), data.get(USER_ID), dateTimeAfterFileCreation.toString()); System.out.println(requestForFiles); Response filesLocation = given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(requestForFiles) .when() .post(getFilesListFunction); filesLocation .then() .statusCode(SC_OK) .and() // .assertThat().body(GET_LOCATION_FROM_FILES, Matchers.contains(receivedUrl)) // .assertThat().body(GET_FILE_ID_FROM_FILES, Matchers.contains(uniqueID)) // .assertThat().body(GET_CREATOR_FROM_FILES, Matchers.contains(data.get(USER_ID))) .log() .all(); List<HashMap<String, String>> jsonArray = filesLocation .then() .extract() .path(CONTENT); System.out.println(jsonArray); HashMap<String, String> contentValues = jsonArray.get(0); Assert.assertTrue(contentValues.values().contains(data.get(USER_ID))); Assert.assertTrue(contentValues.values().contains(uniqueID)); Assert.assertTrue(contentValues.values().contains(receivedUrl)); String receivedTimeSavedIntoString = contentValues.get(GET_CREATION_TIME_FROM_FILES); DateTimeFormatter formatter = DateTimeFormat.forPattern(timePattern); DateTime parsedReceivedString = formatter .withZone(DateTimeZone.UTC) .parseDateTime(receivedTimeSavedIntoString); TestReporter.reportStep("Time before file creation: %s", dateTimeBeforeFileCreation); TestReporter.reportStep("Exactly time for created file and parsed: %s", parsedReceivedString); TestReporter.reportStep("Time after file creation: %s", dateTimeAfterFileCreation); TestReporter.reportStep("Verify received time in the following rage"); Assert.assertTrue(parsedReceivedString.isAfter(dateTimeBeforeFileCreation) && parsedReceivedString.isBefore(dateTimeAfterFileCreation)); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with all required fields and without auth tokens") public void i2_checkFilesListAccessWithoutHeaders(Map<String, String> data) { Response filesLocation = given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(new HashMap<>())) .body(readJson(requestForFilesWithTime).toString()) .when() .post(getFilesListFunction); filesLocation .then() .statusCode(SC_UNAUTHORIZED) .and() .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request without timeTo field and with auth token") public void i3_checkFilesListAccessWithoutOnOfTheRequiredFields(Map<String, String> data) { Response filesLocation = given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithoutTime).toString()) .when() .post(getFilesListFunction); filesLocation .then() .statusCode(SC_BAD_REQUEST) .and() .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Valid flow send request with all required fields and with auth token") public void i4_checkFilesListAccessWithoutOnOfTheRequiredFields(Map<String, String> data) { Response filesLocation = given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithoutItems).toString()) .when() .post(getFilesListFunction); filesLocation .then() .statusCode(SC_BAD_REQUEST) .and() .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send empty request with auth token") public void i5_checkFilesListAccessWithEmptyRequest(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(StringUtils.EMPTY) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with string for PageNum and with auth token") public void i6_checkFilesListAccessWithFieldsTypeMismatch(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithPageNumFieldMismatch).toString()) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_TYPE_MISMATCH))) .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with string for Items and with auth token") public void i7_checkFilesListAccessWithFieldsTypeMismatch(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithItemsFieldMismatch).toString()) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_TYPE_MISMATCH))) .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with string for UserId and with auth token") public void i8_checkFilesListAccessWithFieldsTypeMismatch(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithUserIdFieldMismatch).toString()) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_TYPE_MISMATCH))) .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with invalid data format and with auth token") public void i9_checkFilesListAccessWithInvalidData(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithInvalidData).toString()) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_TIME_PARSING))) .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with 'time from' before 'time to' and with auth token") public void i10_checkFilesListAccessWithWrongTimeRange(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithWrongTimeRange).toString()) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_CONSTRAINT_VIOLATION))) .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with not existed time and with auth token") public void i11_checkFilesListAccessWithNotExistedTime(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithNotExistedTime).toString()) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_TIME_PARSING))) .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with not existed time and with auth token") public void i12_checkFilesListAccessWithNegativeItemsNumber(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithNegativeItemsNumber).toString()) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_CONSTRAINT_VIOLATION))) .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with not existed time and with auth token") public void i13_checkFilesListAccessWithHugePageNumValue(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithHugePageNumValue).toString()) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(EXCEPTION))) .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request without data time and with auth token") public void i14_checkFilesListAccessWithoutDataTime(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithoutDataTime).toString()) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_CONSTRAINT_VIOLATION))) .log() .all(); } @Test(dataProvider = "testedData", dataProviderClass = DataProviders.class) @Description("Send request with invalid json and with auth token") public void i15_checkFilesListAccessWithInvalidJson(Map<String, String> data) { given() .filter(new AllureRestAssured()) .spec(baseRequestSpec(specifiedHeadersSet())) .body(readJson(requestForFilesWithInvalidJson).toString().replaceAll(",", StringUtils.EMPTY)) .when() .post(getFilesListFunction) .then() .statusCode(SC_BAD_REQUEST) .and() .assertThat().body(MESSAGE, Matchers.containsString(data.get(ERROR_JSON_PARSING))) .log() .all(); } }
apache-2.0
charles-cooper/idylfin
src/com/opengamma/analytics/math/minimization/NelderMeadDownhillSimplexMinimizer.java
2029
/** * Copyright (C) 2009 - present by OpenGamma Inc. and the OpenGamma group of companies * * Please see distribution for license. */ package com.opengamma.analytics.math.minimization; import org.apache.commons.lang.Validate; import org.apache.commons.math.FunctionEvaluationException; import org.apache.commons.math.analysis.MultivariateRealFunction; import org.apache.commons.math.optimization.GoalType; import org.apache.commons.math.optimization.MultivariateRealOptimizer; import org.apache.commons.math.optimization.OptimizationException; import org.apache.commons.math.optimization.direct.NelderMead; import com.opengamma.analytics.math.MathException; import com.opengamma.analytics.math.function.Function1D; import com.opengamma.analytics.math.matrix.DoubleMatrix1D; import com.opengamma.analytics.math.util.wrapper.CommonsMathWrapper; /** * This class is a wrapper for the <a href="http://commons.apache.org/math/api-2.1/org/apache/commons/math/optimization/direct/NelderMead.html">Commons Math library implementation</a> * of the Nelder-Mead downhill simplex method. */ public class NelderMeadDownhillSimplexMinimizer implements Minimizer<Function1D<DoubleMatrix1D, Double>, DoubleMatrix1D> { private static final GoalType MINIMIZER = GoalType.MINIMIZE; /** * {@inheritDoc} */ @Override public DoubleMatrix1D minimize(final Function1D<DoubleMatrix1D, Double> function, final DoubleMatrix1D startPosition) { Validate.notNull(function, "function"); Validate.notNull(startPosition, "start position"); final MultivariateRealOptimizer optimizer = new NelderMead(); final MultivariateRealFunction commonsFunction = CommonsMathWrapper.wrapMultivariate(function); try { return new DoubleMatrix1D(CommonsMathWrapper.unwrap(optimizer.optimize(commonsFunction, MINIMIZER, startPosition.getData()))); } catch (final OptimizationException e) { throw new MathException(e); } catch (final FunctionEvaluationException e) { throw new MathException(e); } } }
apache-2.0
gruter/tajo-cdh
tajo-core/src/test/java/org/apache/tajo/engine/query/TestAlterTablespace.java
2455
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.tajo.engine.query; import org.apache.tajo.IntegrationTest; import org.apache.tajo.QueryTestCaseBase; import org.apache.tajo.catalog.proto.CatalogProtos; import org.junit.Test; import org.junit.experimental.categories.Category; import java.util.List; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @Category(IntegrationTest.class) public class TestAlterTablespace extends QueryTestCaseBase { @Test public final void testAlterLocation() throws Exception { if (!testingCluster.isHCatalogStoreRunning()) { ////////////////////////////////////////////////////////////////////////////// // Create two table spaces ////////////////////////////////////////////////////////////////////////////// assertFalse(catalog.existTablespace("space1")); assertTrue(catalog.createTablespace("space1", "hdfs://xxx.com/warehouse")); assertTrue(catalog.existTablespace("space1")); // pre verification CatalogProtos.TablespaceProto space1 = catalog.getTablespace("space1"); assertEquals("space1", space1.getSpaceName()); assertEquals("hdfs://xxx.com/warehouse", space1.getUri()); executeString("ALTER TABLESPACE space1 LOCATION 'hdfs://yyy.com/warehouse';"); // Verify ALTER TABLESPACE space1 space1 = catalog.getTablespace("space1"); assertEquals("space1", space1.getSpaceName()); assertEquals("hdfs://yyy.com/warehouse", space1.getUri()); assertTrue(catalog.dropTablespace("space1")); assertFalse(catalog.existTablespace("space1")); } } }
apache-2.0
holtsoftware/potential-octo-wallhack
School Java/homework/week3/AWTAnimate1-16.26.java
2727
// Adam Holt // week3 // Exercise 16.26 // Illustrate a basic animation of a filled arc drawn full circle. import java.awt.*; public class AWTAnimate1 { public static void main(String[] args){ AnimationFrame frame = new AnimationFrame(); frame.addWindowListener(new WindowCloser()); frame.setVisible(true); // Hand over to the frame to run forever. frame.run(); } } // Repeatedly request a canvas to paint itself. class AnimationFrame extends Frame { public AnimationFrame(){ // Set the frame's size. final int width = 200, height = 200; setSize(width,height); setBackground(Color.white); add(getCanvas()); } public void run(){ // Animate the drawing 50 times per second. final int sleepTime = 100/50; AnimatedArcCanvas canvas = getCanvas(); while(true){ try{ Thread.sleep(sleepTime); // Perform the next animation step. canvas.step(); } catch(InterruptedException e){ } } } protected AnimatedArcCanvas getCanvas(){ return animationCanvas; } private AnimatedArcCanvas animationCanvas = new AnimatedArcCanvas(); } // Animate an arc through a full circle before starting again. class AnimatedArcCanvas extends Canvas { public void paint(Graphics g){ Dimension size = getSize(); final int x = size.width/4, y = size.height/4; final int width = size.width/2, height = size.height/2; final int sweep = getAngle(); ///////////////////////////////////////////////////////////////////////////////////////////////////////////// g.fillArc(x,y,width,height,sweep,4);// This is what i changed ///////////////////////////////////////////////////////////////////////////////////////////////////////////// } // Perform the next animation step. public void step(){ // Update the angle. setAngle(getAngle()+angleIncrement); // Clear last step and draw the current arc repaint(); } protected int getAngle(){ return angle; } protected void setAngle(int a){ if(Math.abs(a) <= circleDegrees){ angle = a; } else{ // Time to restart. angle = 0; } } // Always start at the 3 o'clock position. private final int startAngle = 0; // The number of degrees in a circle. private final int circleDegrees = 360; // Update the angle by a fixed amount. final int angleIncrement = 12; // How many degrees of sweep for the arc. private int angle = 0; }
apache-2.0
cping/LGame
Java/Examples/towerdefence(0.5)/src/org/test/towerdefense/ScreenState.java
241
package org.test.towerdefense; public enum ScreenState { TransitionOn, Active, TransitionOff, Hidden; public int getValue() { return this.ordinal(); } public static ScreenState forValue(int value) { return values()[value]; } }
apache-2.0
ppbizapps/kairosdb
src/test/java/org/kairosdb/core/aggregator/SamplerAggregatorTest.java
4726
/* * Copyright 2016 KairosDB Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kairosdb.core.aggregator; import org.junit.Test; import org.kairosdb.core.DataPoint; import org.kairosdb.core.datapoints.DoubleDataPointFactoryImpl; import org.kairosdb.core.datapoints.LongDataPoint; import org.kairosdb.core.datastore.DataPointGroup; import org.kairosdb.testing.ListDataPointGroup; import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertThat; public class SamplerAggregatorTest { @Test(expected = NullPointerException.class) public void test_nullSet_invalid() { new SamplerAggregator(new DoubleDataPointFactoryImpl()).aggregate(null); } @Test public void test_steadyRate() { ListDataPointGroup group = new ListDataPointGroup("rate"); group.addDataPoint(new LongDataPoint(1, 10)); group.addDataPoint(new LongDataPoint(2, 20)); group.addDataPoint(new LongDataPoint(3, 30)); group.addDataPoint(new LongDataPoint(4, 40)); SamplerAggregator samplerAggregator = new SamplerAggregator(new DoubleDataPointFactoryImpl()); DataPointGroup results = samplerAggregator.aggregate(group); DataPoint dp = results.next(); assertThat(dp.getTimestamp(), equalTo(2L)); assertThat(dp.getDoubleValue(), equalTo(20.0)); dp = results.next(); assertThat(dp.getTimestamp(), equalTo(3L)); assertThat(dp.getDoubleValue(), equalTo(30.0)); dp = results.next(); assertThat(dp.getTimestamp(), equalTo(4L)); assertThat(dp.getDoubleValue(), equalTo(40.0)); } @Test public void test_changingRate() { ListDataPointGroup group = new ListDataPointGroup("rate"); group.addDataPoint(new LongDataPoint(1, 10)); group.addDataPoint(new LongDataPoint(2, 10)); group.addDataPoint(new LongDataPoint(3, 5)); group.addDataPoint(new LongDataPoint(4, 20)); SamplerAggregator samplerAggregator = new SamplerAggregator(new DoubleDataPointFactoryImpl()); DataPointGroup results = samplerAggregator.aggregate(group); DataPoint dp = results.next(); assertThat(dp.getTimestamp(), equalTo(2L)); assertThat(dp.getDoubleValue(), equalTo(10.0)); dp = results.next(); assertThat(dp.getTimestamp(), equalTo(3L)); assertThat(dp.getDoubleValue(), equalTo(5.0)); dp = results.next(); assertThat(dp.getTimestamp(), equalTo(4L)); assertThat(dp.getDoubleValue(), equalTo(20.0)); } @Test public void test_changingPeriod() { ListDataPointGroup group = new ListDataPointGroup("rate"); group.addDataPoint(new LongDataPoint(1, 10)); group.addDataPoint(new LongDataPoint(2, 10)); group.addDataPoint(new LongDataPoint(4, 10)); group.addDataPoint(new LongDataPoint(6, 20)); SamplerAggregator samplerAggregator = new SamplerAggregator(new DoubleDataPointFactoryImpl()); DataPointGroup results = samplerAggregator.aggregate(group); DataPoint dp = results.next(); assertThat(dp.getTimestamp(), equalTo(2L)); assertThat(dp.getDoubleValue(), equalTo(10.0)); dp = results.next(); assertThat(dp.getTimestamp(), equalTo(4L)); assertThat(dp.getDoubleValue(), equalTo(5.0)); dp = results.next(); assertThat(dp.getTimestamp(), equalTo(6L)); assertThat(dp.getDoubleValue(), equalTo(10.0)); } @Test(expected = IllegalStateException.class) public void test_dataPointsAtSameTime() { ListDataPointGroup group = new ListDataPointGroup("rate"); group.addDataPoint(new LongDataPoint(1, 10)); group.addDataPoint(new LongDataPoint(1, 15)); group.addDataPoint(new LongDataPoint(2, 5)); group.addDataPoint(new LongDataPoint(2, 20)); group.addDataPoint(new LongDataPoint(3, 30)); SamplerAggregator samplerAggregator = new SamplerAggregator(new DoubleDataPointFactoryImpl()); DataPointGroup results = samplerAggregator.aggregate(group); DataPoint dp = results.next(); } }
apache-2.0
gspandy/dubbo
dubbo-common/src/test/java/com/alibaba/dubbo/common/extensionloader/ext7/impl/Ext7Impl.java
907
/* * Copyright 1999-2011 Alibaba Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.dubbo.common.extensionloader.ext7.impl; import com.alibaba.dubbo.common.URL; import com.alibaba.dubbo.common.extensionloader.ext7.Ext7; /** * @author ding.lid */ public class Ext7Impl implements Ext7 { public String echo(URL url, String s) { return ""; } }
apache-2.0
Wechat-Group/WxJava
weixin-java-mp/src/main/java/me/chanjar/weixin/mp/bean/material/WxMpMaterialNewsBatchGetResult.java
911
package me.chanjar.weixin.mp.bean.material; import lombok.Data; import me.chanjar.weixin.mp.util.json.WxMpGsonBuilder; import java.io.Serializable; import java.util.Date; import java.util.List; @Data public class WxMpMaterialNewsBatchGetResult implements Serializable { private static final long serialVersionUID = -1617952797921001666L; private int totalCount; private int itemCount; private List<WxMaterialNewsBatchGetNewsItem> items; @Override public String toString() { return WxMpGsonBuilder.create().toJson(this); } @Data public static class WxMaterialNewsBatchGetNewsItem implements Serializable { private static final long serialVersionUID = -5227864606579602345L; private String mediaId; private Date updateTime; private WxMpMaterialNews content; @Override public String toString() { return WxMpGsonBuilder.create().toJson(this); } } }
apache-2.0
xjtu3c/GranuleJ
GOP/GranuleJIDE/src/AST/EnhancedForStmt.java
26304
package AST; import java.util.HashSet; import java.util.LinkedHashSet; import java.io.File; import java.util.*; import beaver.*; import java.util.ArrayList; import java.util.zip.*; import java.io.*; import java.util.Stack; import java.util.regex.Pattern; import java.io.FileOutputStream; import java.io.IOException; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import javax.xml.transform.stream.StreamResult; import javax.xml.transform.dom.DOMSource; import javax.xml.transform.TransformerFactory; import javax.xml.transform.Transformer; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; import org.w3c.dom.Element; import org.w3c.dom.Document; import java.util.HashMap; import java.util.Map.Entry; import javax.xml.transform.TransformerException; import javax.xml.parsers.ParserConfigurationException; import java.util.Collection; /** * @ast node * @declaredat EnhancedFor.ast:1 */ public class EnhancedForStmt extends BranchTargetStmt implements Cloneable, VariableScope { /** * @apilvl low-level */ public void flushCache() { super.flushCache(); targetOf_ContinueStmt_values = null; targetOf_BreakStmt_values = null; canCompleteNormally_computed = false; isDAafter_Variable_values = null; isDUafter_Variable_values = null; cond_label_computed = false; update_label_computed = false; end_label_computed = false; extraLocalIndex_computed = false; } /** * @apilvl internal */ public void flushCollectionCache() { super.flushCollectionCache(); } /** * @apilvl internal */ @SuppressWarnings({"unchecked", "cast"}) public EnhancedForStmt clone() throws CloneNotSupportedException { EnhancedForStmt node = (EnhancedForStmt)super.clone(); node.targetOf_ContinueStmt_values = null; node.targetOf_BreakStmt_values = null; node.canCompleteNormally_computed = false; node.isDAafter_Variable_values = null; node.isDUafter_Variable_values = null; node.cond_label_computed = false; node.update_label_computed = false; node.end_label_computed = false; node.extraLocalIndex_computed = false; node.in$Circle(false); node.is$Final(false); return node; } /** * @apilvl internal */ @SuppressWarnings({"unchecked", "cast"}) public EnhancedForStmt copy() { try { EnhancedForStmt node = (EnhancedForStmt)clone(); if(children != null) node.children = (ASTNode[])children.clone(); return node; } catch (CloneNotSupportedException e) { } System.err.println("Error: Could not clone node of type " + getClass().getName() + "!"); return null; } /** * @apilvl low-level */ @SuppressWarnings({"unchecked", "cast"}) public EnhancedForStmt fullCopy() { EnhancedForStmt res = (EnhancedForStmt)copy(); for(int i = 0; i < getNumChildNoTransform(); i++) { ASTNode node = getChildNoTransform(i); if(node != null) node = node.fullCopy(); res.setChild(node, i); } return res; } /** * @ast method * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:15 */ public void typeCheck() { if (!getExpr().type().isArrayDecl() && !getExpr().type().isIterable()) { error("type " + getExpr().type().name() + " of expression in foreach is neither array type nor java.lang.Iterable"); } else if(getExpr().type().isArrayDecl() && !getExpr().type().componentType().assignConversionTo(getVariableDeclaration().type(), null)) error("parameter of type " + getVariableDeclaration().type().typeName() + " can not be assigned an element of type " + getExpr().type().componentType().typeName()); else if(getExpr().type().isIterable() && !getExpr().type().isUnknown()) { MethodDecl iterator = (MethodDecl)getExpr().type().memberMethods("iterator").iterator().next(); MethodDecl next = (MethodDecl)iterator.type().memberMethods("next").iterator().next(); TypeDecl componentType = next.type(); if(!componentType.assignConversionTo(getVariableDeclaration().type(), null)) error("parameter of type " + getVariableDeclaration().type().typeName() + " can not be assigned an element of type " + componentType.typeName()); } } /** * @ast method * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:58 */ public void toString(StringBuffer s) { s.append(indent()); s.append("for ("); getVariableDeclaration().getModifiers().toString(s); getVariableDeclaration().getTypeAccess().toString(s); s.append(" " + getVariableDeclaration().name()); s.append(" : "); getExpr().toString(s); s.append(") "); getStmt().toString(s); } /** * @ast method * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:24 */ public void createBCode(CodeGeneration gen) { if(getExpr().type().isArrayDecl()) { getExpr().createBCode(gen); gen.emitStoreReference(extraLocalIndex()); IntegerLiteral.push(gen, 0); gen.emit(Bytecode.ISTORE).add(extraLocalIndex()+1); gen.addLabel(cond_label()); gen.emit(Bytecode.ILOAD).add(extraLocalIndex()+1); gen.emitLoadReference(extraLocalIndex()); gen.emit(Bytecode.ARRAYLENGTH); gen.emitCompare(Bytecode.IF_ICMPGE, end_label()); gen.emitLoadReference(extraLocalIndex()); gen.emit(Bytecode.ILOAD).add(extraLocalIndex()+1); gen.emit(getExpr().type().componentType().arrayLoad()); getExpr().type().componentType().emitCastTo(gen, getVariableDeclaration().type()); getVariableDeclaration().type().emitStoreLocal(gen, getVariableDeclaration().localNum()); getStmt().createBCode(gen); gen.addLabel(update_label()); gen.emit(Bytecode.IINC).add(extraLocalIndex()+1).add(1); gen.emitGoto(cond_label()); gen.addLabel(end_label()); } else { getExpr().createBCode(gen); iteratorMethod().emitInvokeMethod(gen, lookupType("java.lang", "Iterable")); gen.emitStoreReference(extraLocalIndex()); gen.addLabel(cond_label()); gen.emitLoadReference(extraLocalIndex()); hasNextMethod().emitInvokeMethod(gen, lookupType("java.util", "Iterator")); gen.emitCompare(Bytecode.IFEQ, end_label()); gen.emitLoadReference(extraLocalIndex()); nextMethod().emitInvokeMethod(gen, lookupType("java.util", "Iterator")); VariableDeclaration obj = getVariableDeclaration(); if (obj.type().isPrimitive()) { gen.emitCheckCast(obj.type().boxed()); obj.type().boxed().emitCastTo(gen, obj.type()); obj.type().emitStoreLocal(gen, obj.localNum()); } else { gen.emitCheckCast(obj.type()); gen.emitStoreReference(obj.localNum()); } getStmt().createBCode(gen); gen.addLabel(update_label()); gen.emitGoto(cond_label()); gen.addLabel(end_label()); } } /** * @ast method * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:72 */ private MethodDecl iteratorMethod() { TypeDecl typeDecl = lookupType("java.lang", "Iterable"); for (Iterator iter = typeDecl.memberMethods("iterator").iterator(); iter.hasNext();) { MethodDecl m = (MethodDecl)iter.next(); if (m.getNumParameter() == 0) { return m; } } throw new Error("Could not find java.lang.Iterable.iterator()"); } /** * @ast method * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:82 */ private MethodDecl hasNextMethod() { TypeDecl typeDecl = lookupType("java.util", "Iterator"); for (Iterator iter = typeDecl.memberMethods("hasNext").iterator(); iter.hasNext();) { MethodDecl m = (MethodDecl)iter.next(); if (m.getNumParameter() == 0) { return m; } } throw new Error("Could not find java.util.Collection.hasNext()"); } /** * @ast method * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:92 */ private MethodDecl nextMethod() { TypeDecl typeDecl = lookupType("java.util", "Iterator"); for (Iterator iter = typeDecl.memberMethods("next").iterator(); iter.hasNext();) { MethodDecl m = (MethodDecl)iter.next(); if (m.getNumParameter() == 0) { return m; } } throw new Error("Could not find java.util.Collection.next()"); } /** * @ast method * @declaredat EnhancedFor.ast:1 */ public EnhancedForStmt() { super(); } /** * @ast method * @declaredat EnhancedFor.ast:7 */ public EnhancedForStmt(VariableDeclaration p0, Expr p1, Stmt p2) { setChild(p0, 0); setChild(p1, 1); setChild(p2, 2); } /** * @apilvl low-level * @ast method * @declaredat EnhancedFor.ast:15 */ protected int numChildren() { return 3; } /** * @apilvl internal * @ast method * @declaredat EnhancedFor.ast:21 */ public boolean mayHaveRewrite() { return false; } /** * Setter for VariableDeclaration * @apilvl high-level * @ast method * @declaredat EnhancedFor.ast:5 */ public void setVariableDeclaration(VariableDeclaration node) { setChild(node, 0); } /** * Getter for VariableDeclaration * @apilvl high-level * @ast method * @declaredat EnhancedFor.ast:12 */ public VariableDeclaration getVariableDeclaration() { return (VariableDeclaration)getChild(0); } /** * @apilvl low-level * @ast method * @declaredat EnhancedFor.ast:18 */ public VariableDeclaration getVariableDeclarationNoTransform() { return (VariableDeclaration)getChildNoTransform(0); } /** * Setter for Expr * @apilvl high-level * @ast method * @declaredat EnhancedFor.ast:5 */ public void setExpr(Expr node) { setChild(node, 1); } /** * Getter for Expr * @apilvl high-level * @ast method * @declaredat EnhancedFor.ast:12 */ public Expr getExpr() { return (Expr)getChild(1); } /** * @apilvl low-level * @ast method * @declaredat EnhancedFor.ast:18 */ public Expr getExprNoTransform() { return (Expr)getChildNoTransform(1); } /** * Setter for Stmt * @apilvl high-level * @ast method * @declaredat EnhancedFor.ast:5 */ public void setStmt(Stmt node) { setChild(node, 2); } /** * Getter for Stmt * @apilvl high-level * @ast method * @declaredat EnhancedFor.ast:12 */ public Stmt getStmt() { return (Stmt)getChild(2); } /** * @apilvl low-level * @ast method * @declaredat EnhancedFor.ast:18 */ public Stmt getStmtNoTransform() { return (Stmt)getChildNoTransform(2); } /** * @attribute syn * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:50 */ @SuppressWarnings({"unchecked", "cast"}) public SimpleSet localLookupVariable(String name) { ASTNode$State state = state(); SimpleSet localLookupVariable_String_value = localLookupVariable_compute(name); return localLookupVariable_String_value; } /** * @apilvl internal */ private SimpleSet localLookupVariable_compute(String name) { if(getVariableDeclaration().name().equals(name)) { return SimpleSet.emptySet.add(getVariableDeclaration()); } return lookupVariable(name); } protected java.util.Map targetOf_ContinueStmt_values; /** * @attribute syn * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:75 */ @SuppressWarnings({"unchecked", "cast"}) public boolean targetOf(ContinueStmt stmt) { Object _parameters = stmt; if(targetOf_ContinueStmt_values == null) targetOf_ContinueStmt_values = new java.util.HashMap(4); if(targetOf_ContinueStmt_values.containsKey(_parameters)) { return ((Boolean)targetOf_ContinueStmt_values.get(_parameters)).booleanValue(); } ASTNode$State state = state(); int num = state.boundariesCrossed; boolean isFinal = this.is$Final(); boolean targetOf_ContinueStmt_value = targetOf_compute(stmt); if(isFinal && num == state().boundariesCrossed) targetOf_ContinueStmt_values.put(_parameters, Boolean.valueOf(targetOf_ContinueStmt_value)); return targetOf_ContinueStmt_value; } /** * @apilvl internal */ private boolean targetOf_compute(ContinueStmt stmt) { return !stmt.hasLabel(); } protected java.util.Map targetOf_BreakStmt_values; /** * @attribute syn * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:76 */ @SuppressWarnings({"unchecked", "cast"}) public boolean targetOf(BreakStmt stmt) { Object _parameters = stmt; if(targetOf_BreakStmt_values == null) targetOf_BreakStmt_values = new java.util.HashMap(4); if(targetOf_BreakStmt_values.containsKey(_parameters)) { return ((Boolean)targetOf_BreakStmt_values.get(_parameters)).booleanValue(); } ASTNode$State state = state(); int num = state.boundariesCrossed; boolean isFinal = this.is$Final(); boolean targetOf_BreakStmt_value = targetOf_compute(stmt); if(isFinal && num == state().boundariesCrossed) targetOf_BreakStmt_values.put(_parameters, Boolean.valueOf(targetOf_BreakStmt_value)); return targetOf_BreakStmt_value; } /** * @apilvl internal */ private boolean targetOf_compute(BreakStmt stmt) { return !stmt.hasLabel(); } /** * @attribute syn * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:79 */ @SuppressWarnings({"unchecked", "cast"}) public boolean canCompleteNormally() { if(canCompleteNormally_computed) { return canCompleteNormally_value; } ASTNode$State state = state(); int num = state.boundariesCrossed; boolean isFinal = this.is$Final(); canCompleteNormally_value = canCompleteNormally_compute(); if(isFinal && num == state().boundariesCrossed) canCompleteNormally_computed = true; return canCompleteNormally_value; } /** * @apilvl internal */ private boolean canCompleteNormally_compute() { return reachable(); } /** * @attribute syn * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:83 */ @SuppressWarnings({"unchecked", "cast"}) public boolean isDAafter(Variable v) { Object _parameters = v; if(isDAafter_Variable_values == null) isDAafter_Variable_values = new java.util.HashMap(4); if(isDAafter_Variable_values.containsKey(_parameters)) { return ((Boolean)isDAafter_Variable_values.get(_parameters)).booleanValue(); } ASTNode$State state = state(); int num = state.boundariesCrossed; boolean isFinal = this.is$Final(); boolean isDAafter_Variable_value = isDAafter_compute(v); if(isFinal && num == state().boundariesCrossed) isDAafter_Variable_values.put(_parameters, Boolean.valueOf(isDAafter_Variable_value)); return isDAafter_Variable_value; } /** * @apilvl internal */ private boolean isDAafter_compute(Variable v) { if(!getExpr().isDAafter(v)) return false; /* for(Iterator iter = targetBreaks().iterator(); iter.hasNext(); ) { BreakStmt stmt = (BreakStmt)iter.next(); if(!stmt.isDAafterReachedFinallyBlocks(v)) return false; } */ return true; } /** * @attribute syn * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:99 */ @SuppressWarnings({"unchecked", "cast"}) public boolean isDUafter(Variable v) { Object _parameters = v; if(isDUafter_Variable_values == null) isDUafter_Variable_values = new java.util.HashMap(4); if(isDUafter_Variable_values.containsKey(_parameters)) { return ((Boolean)isDUafter_Variable_values.get(_parameters)).booleanValue(); } ASTNode$State state = state(); int num = state.boundariesCrossed; boolean isFinal = this.is$Final(); boolean isDUafter_Variable_value = isDUafter_compute(v); if(isFinal && num == state().boundariesCrossed) isDUafter_Variable_values.put(_parameters, Boolean.valueOf(isDUafter_Variable_value)); return isDUafter_Variable_value; } /** * @apilvl internal */ private boolean isDUafter_compute(Variable v) { if(!getExpr().isDUafter(v)) return false; for(Iterator iter = targetBreaks().iterator(); iter.hasNext(); ) { BreakStmt stmt = (BreakStmt)iter.next(); if(!stmt.isDUafterReachedFinallyBlocks(v)) return false; } return true; } /** * @attribute syn * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:114 */ @SuppressWarnings({"unchecked", "cast"}) public boolean continueLabel() { ASTNode$State state = state(); boolean continueLabel_value = continueLabel_compute(); return continueLabel_value; } /** * @apilvl internal */ private boolean continueLabel_compute() { return true; } /** * @apilvl internal */ protected boolean cond_label_computed = false; /** * @apilvl internal */ protected int cond_label_value; /** * @attribute syn * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:12 */ @SuppressWarnings({"unchecked", "cast"}) public int cond_label() { if(cond_label_computed) { return cond_label_value; } ASTNode$State state = state(); int num = state.boundariesCrossed; boolean isFinal = this.is$Final(); cond_label_value = cond_label_compute(); if(isFinal && num == state().boundariesCrossed) cond_label_computed = true; return cond_label_value; } /** * @apilvl internal */ private int cond_label_compute() { return hostType().constantPool().newLabel(); } /** * @apilvl internal */ protected boolean update_label_computed = false; /** * @apilvl internal */ protected int update_label_value; /** * @attribute syn * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:13 */ @SuppressWarnings({"unchecked", "cast"}) public int update_label() { if(update_label_computed) { return update_label_value; } ASTNode$State state = state(); int num = state.boundariesCrossed; boolean isFinal = this.is$Final(); update_label_value = update_label_compute(); if(isFinal && num == state().boundariesCrossed) update_label_computed = true; return update_label_value; } /** * @apilvl internal */ private int update_label_compute() { return hostType().constantPool().newLabel(); } /** * @apilvl internal */ protected boolean end_label_computed = false; /** * @apilvl internal */ protected int end_label_value; /** * @attribute syn * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:14 */ @SuppressWarnings({"unchecked", "cast"}) public int end_label() { if(end_label_computed) { return end_label_value; } ASTNode$State state = state(); int num = state.boundariesCrossed; boolean isFinal = this.is$Final(); end_label_value = end_label_compute(); if(isFinal && num == state().boundariesCrossed) end_label_computed = true; return end_label_value; } /** * @apilvl internal */ private int end_label_compute() { return hostType().constantPool().newLabel(); } /** * @apilvl internal */ protected boolean extraLocalIndex_computed = false; /** * @apilvl internal */ protected int extraLocalIndex_value; /** * @attribute syn * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:16 */ @SuppressWarnings({"unchecked", "cast"}) public int extraLocalIndex() { if(extraLocalIndex_computed) { return extraLocalIndex_value; } ASTNode$State state = state(); int num = state.boundariesCrossed; boolean isFinal = this.is$Final(); extraLocalIndex_value = extraLocalIndex_compute(); if(isFinal && num == state().boundariesCrossed) extraLocalIndex_computed = true; return extraLocalIndex_value; } /** * @apilvl internal */ private int extraLocalIndex_compute() { return localNum(); } /** * @attribute syn * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:21 */ @SuppressWarnings({"unchecked", "cast"}) public int break_label() { ASTNode$State state = state(); int break_label_value = break_label_compute(); return break_label_value; } /** * @apilvl internal */ private int break_label_compute() { return end_label(); } /** * @attribute syn * @aspect EnhancedForToBytecode * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:22 */ @SuppressWarnings({"unchecked", "cast"}) public int continue_label() { ASTNode$State state = state(); int continue_label_value = continue_label_compute(); return continue_label_value; } /** * @apilvl internal */ private int continue_label_compute() { return update_label(); } /** * @attribute inh * @aspect EnhancedFor * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:38 */ @SuppressWarnings({"unchecked", "cast"}) public SimpleSet lookupVariable(String name) { ASTNode$State state = state(); SimpleSet lookupVariable_String_value = getParent().Define_SimpleSet_lookupVariable(this, null, name); return lookupVariable_String_value; } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:41 * @apilvl internal */ public SimpleSet Define_SimpleSet_lookupVariable(ASTNode caller, ASTNode child, String name) { if(caller == getStmtNoTransform()) { return localLookupVariable(name); } if(caller == getExprNoTransform()) { return localLookupVariable(name); } if(caller == getVariableDeclarationNoTransform()) { return localLookupVariable(name); } return getParent().Define_SimpleSet_lookupVariable(this, caller, name); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:43 * @apilvl internal */ public NameType Define_NameType_nameType(ASTNode caller, ASTNode child) { if(caller == getVariableDeclarationNoTransform()) { return NameType.TYPE_NAME; } return getParent().Define_NameType_nameType(this, caller); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:48 * @apilvl internal */ public VariableScope Define_VariableScope_outerScope(ASTNode caller, ASTNode child) { if(caller == getStmtNoTransform()) { return this; } if(caller == getExprNoTransform()) { return this; } if(caller == getVariableDeclarationNoTransform()) { return this; } return getParent().Define_VariableScope_outerScope(this, caller); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:71 * @apilvl internal */ public boolean Define_boolean_isMethodParameter(ASTNode caller, ASTNode child) { if(caller == getVariableDeclarationNoTransform()) { return false; } return getParent().Define_boolean_isMethodParameter(this, caller); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:72 * @apilvl internal */ public boolean Define_boolean_isConstructorParameter(ASTNode caller, ASTNode child) { if(caller == getVariableDeclarationNoTransform()) { return false; } return getParent().Define_boolean_isConstructorParameter(this, caller); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:73 * @apilvl internal */ public boolean Define_boolean_isExceptionHandlerParameter(ASTNode caller, ASTNode child) { if(caller == getVariableDeclarationNoTransform()) { return false; } return getParent().Define_boolean_isExceptionHandlerParameter(this, caller); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:80 * @apilvl internal */ public boolean Define_boolean_reachable(ASTNode caller, ASTNode child) { if(caller == getStmtNoTransform()) { return reachable(); } return getParent().Define_boolean_reachable(this, caller); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:97 * @apilvl internal */ public boolean Define_boolean_isDAbefore(ASTNode caller, ASTNode child, Variable v) { if(caller == getStmtNoTransform()) { return getExpr().isDAafter(v); } if(caller == getExprNoTransform()) { return v == getVariableDeclaration() || isDAbefore(v); } return getParent().Define_boolean_isDAbefore(this, caller, v); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:111 * @apilvl internal */ public boolean Define_boolean_isDUbefore(ASTNode caller, ASTNode child, Variable v) { if(caller == getStmtNoTransform()) { return getExpr().isDUafter(v); } if(caller == getExprNoTransform()) { return v != getVariableDeclaration() && isDUbefore(v); } return getParent().Define_boolean_isDUbefore(this, caller, v); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Frontend\EnhancedFor.jrag:113 * @apilvl internal */ public boolean Define_boolean_insideLoop(ASTNode caller, ASTNode child) { if(caller == getStmtNoTransform()) { return true; } return getParent().Define_boolean_insideLoop(this, caller); } /** * @declaredat D:\zhh\JastAddJ\Java1.5Backend\EnhancedForCodegen.jrag:18 * @apilvl internal */ public int Define_int_localNum(ASTNode caller, ASTNode child) { if(caller == getStmtNoTransform()) { return getVariableDeclaration().localNum() + getVariableDeclaration().type().size(); } if(caller == getVariableDeclarationNoTransform()) { return localNum() + (getExpr().type().isArrayDecl() ? 2 : 1); } return getParent().Define_int_localNum(this, caller); } /** * @apilvl internal */ public ASTNode rewriteTo() { return super.rewriteTo(); } }
apache-2.0
google/j2cl
transpiler/javatests/com/google/j2cl/integration/allsimplebridges/Tester641.java
1432
/* * Copyright 2017 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.j2cl.integration.allsimplebridges; import static com.google.j2cl.integration.testing.Asserts.assertTrue; import jsinterop.annotations.JsType; public class Tester641 { @JsType static interface I1 { default String get(String value) { return "I1.get"; } } @SuppressWarnings("unchecked") static class C1 implements I1 { C1() {} @SuppressWarnings("MissingOverride") public String get(Object value) { return "C1.get"; } @SuppressWarnings("MissingOverride") public String get(String value) { return "C1.get"; } } @SuppressWarnings("unchecked") public static void test() { C1 s = new C1(); assertTrue(s.get(new Object()).equals("C1.get")); assertTrue(s.get("").equals("C1.get")); assertTrue(((I1) s).get("").equals("C1.get")); } }
apache-2.0
Crigges/WurstScript
de.peeeq.wurstscript/src/de/peeeq/wurstscript/translation/imoptimizer/ImOptimizer.java
3705
package de.peeeq.wurstscript.translation.imoptimizer; import java.util.List; import com.google.common.collect.Lists; import de.peeeq.wurstscript.intermediateLang.optimizer.ConstantAndCopyPropagation; import de.peeeq.wurstscript.intermediateLang.optimizer.LocalMerger; import de.peeeq.wurstscript.intermediateLang.optimizer.SimpleRewrites; import de.peeeq.wurstscript.intermediateLang.optimizer.TempMerger; import de.peeeq.wurstscript.jassIm.ImFunction; import de.peeeq.wurstscript.jassIm.ImProg; import de.peeeq.wurstscript.jassIm.ImSet; import de.peeeq.wurstscript.jassIm.ImSetArray; import de.peeeq.wurstscript.jassIm.ImSetArrayTuple; import de.peeeq.wurstscript.jassIm.ImSetTuple; import de.peeeq.wurstscript.jassIm.ImStmt; import de.peeeq.wurstscript.translation.imtranslation.ImTranslator; import de.peeeq.wurstscript.utils.Pair; public class ImOptimizer { ImTranslator trans; public ImOptimizer(ImTranslator trans) { this.trans = trans; } public void optimize() { removeGarbage(); ImCompressor compressor = new ImCompressor(trans); compressor.compressNames(); } public void doInlining() { // remove garbage to reduce work for the inliner removeGarbage(); GlobalsInliner globalsInliner = new GlobalsInliner(trans); globalsInliner.inlineGlobals(); ImInliner inliner = new ImInliner(trans); inliner.doInlining(); trans.assertProperties(); // remove garbage, because inlined functions can be removed removeGarbage(); } public void localOptimizations() { removeGarbage(); new TempMerger(trans).optimize(); new ConstantAndCopyPropagation(trans).optimize(); new SimpleRewrites(trans).optimize(); new LocalMerger(trans).optimize(); new UselessFunctionCallsRemover(trans).optimize(); removeGarbage(); } public void doNullsetting() { NullSetter ns = new NullSetter(trans); ns.optimize(); trans.assertProperties(); } public void removeGarbage() { if (trans.isUnitTestMode()) { // return; } boolean changes = true; int iterations = 0; while (changes && iterations++ < 100) { changes = false; ImProg prog = trans.imProg(); trans.calculateCallRelationsAndUsedVariables(); // keep only used variables changes |= prog.getGlobals().retainAll(trans.getReadVariables()); // keep only functions reachable from main and config changes |= prog.getFunctions().retainAll(trans.getUsedFunctions()); for (ImFunction f: prog.getFunctions()) { // remove set statements to unread variables final List<Pair<ImStmt, ImStmt>> replacements = Lists.newArrayList(); f.accept(new ImFunction.DefaultVisitor() { @Override public void visit(ImSet e) { if (!trans.getReadVariables().contains(e.getLeft())) { replacements.add(Pair.<ImStmt,ImStmt>create(e, e.getRight())); } } @Override public void visit(ImSetArrayTuple e) { if (!trans.getReadVariables().contains(e.getLeft())) { replacements.add(Pair.<ImStmt,ImStmt>create(e, e.getRight())); } } @Override public void visit(ImSetArray e) { if (!trans.getReadVariables().contains(e.getLeft())) { replacements.add(Pair.<ImStmt,ImStmt>create(e, e.getRight())); } } @Override public void visit(ImSetTuple e) { if (!trans.getReadVariables().contains(e.getLeft())) { replacements.add(Pair.<ImStmt,ImStmt>create(e, e.getRight())); } } }); for (Pair<ImStmt, ImStmt> pair : replacements) { changes = true; pair.getB().setParent(null); pair.getA().replaceWith(pair.getB()); } // keep only read local variables changes |= f.getLocals().retainAll(trans.getReadVariables()); } } } }
apache-2.0
cniweb/ant-contrib
ant-contrib/src/main/java/net/sf/antcontrib/logic/Switch.java
5130
/* * Copyright (c) 2001-2004 Ant-Contrib project. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.sf.antcontrib.logic; import java.util.Vector; import org.apache.tools.ant.BuildException; import org.apache.tools.ant.Task; import org.apache.tools.ant.taskdefs.Sequential; /*** * Task definition for the ANT task to switch on a particular value. * * <pre> * * Usage: * * Task declaration in the project: * <code> * &lt;taskdef name="switch" classname="net.sf.antcontrib.logic.Switch" /&gt; * </code> * * Task calling syntax: * <code> * &lt;switch value="value" [caseinsensitive="true|false"] &gt; * &lt;case value="val"&gt; * &lt;property name="propname" value="propvalue" /&gt; | * &lt;antcall target="targetname" /&gt; | * any other tasks * &lt;/case&gt; * [ * &lt;default&gt; * &lt;property name="propname" value="propvalue" /&gt; | * &lt;antcall target="targetname" /&gt; | * any other tasks * &lt;/default&gt; * ] * &lt;/switch&gt; * </code> * * * Attributes: * value -&gt; The value to switch on * caseinsensitive -&gt; Should we do case insensitive comparisons? * (default is false) * * Subitems: * case --&gt; An individual case to consider, if the value that * is being switched on matches to value attribute of * the case, then the nested tasks will be executed. * default --&gt; The default case for when no match is found. * * * Crude Example: * * <code> * &lt;switch value=&quot;${foo}&quot;&gt; * &lt;case value=&quot;bar&quot;&gt; * &lt;echo message=&quot;The value of property foo is bar&quot; /&gt; * &lt;/case&gt; * &lt;case value=&quot;baz&quot;&gt; * &lt;echo message=&quot;The value of property foo is baz&quot; /&gt; * &lt;/case&gt; * &lt;default&gt; * &lt;echo message=&quot;The value of property foo is not sensible&quot; /&gt; * &lt;/default&gt; * &lt;/switch&gt; * </code> * * </pre> * * @author <a href="mailto:mattinger@yahoo.com">Matthew Inger</a> * @author <a href="mailto:stefan.bodewig@freenet.de">Stefan Bodewig</a> */ public class Switch extends Task { private String value; private Vector cases; private Sequential defaultCase; private boolean caseInsensitive; /*** * Default Constructor */ public Switch() { cases = new Vector(); } public void execute() throws BuildException { if (value == null) throw new BuildException("Value is missing"); if (cases.size() == 0 && defaultCase == null) throw new BuildException("No cases supplied"); Sequential selectedCase = defaultCase; int sz = cases.size(); for (int i = 0; i < sz; i++) { Case c = (Case) (cases.elementAt(i)); String cvalue = c.value; if (cvalue == null) { throw new BuildException("Value is required for case."); } String mvalue = value; if (caseInsensitive) { cvalue = cvalue.toUpperCase(); mvalue = mvalue.toUpperCase(); } if (cvalue.equals(mvalue) && c != defaultCase) selectedCase = c; } if (selectedCase == null) { throw new BuildException("No case matched the value " + value + " and no default has been specified."); } selectedCase.perform(); } /*** * Sets the value being switched on */ public void setValue(String value) { this.value = value; } public void setCaseInsensitive(boolean c) { caseInsensitive = c; } public final class Case extends Sequential { private String value; public Case() { super(); } public void setValue(String value) { this.value = value; } public void execute() throws BuildException { super.execute(); } public boolean equals(Object o) { boolean res = false; Case c = (Case) o; if (c.value.equals(value)) res = true; return res; } } /*** * Creates the &lt;case&gt; tag */ public Switch.Case createCase() throws BuildException { Switch.Case res = new Switch.Case(); cases.addElement(res); return res; } /*** * Creates the &lt;default&gt; tag */ public void addDefault(Sequential res) throws BuildException { if (defaultCase != null) throw new BuildException("Cannot specify multiple default cases"); defaultCase = res; } }
apache-2.0
sjaco002/vxquery
vxquery-core/src/main/java/org/apache/vxquery/runtime/functions/json/LibjnDescendantPairsScalarEvaluatorFactory.java
1733
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.vxquery.runtime.functions.json; import org.apache.hyracks.algebricks.common.exceptions.AlgebricksException; import org.apache.hyracks.algebricks.runtime.base.IScalarEvaluator; import org.apache.hyracks.algebricks.runtime.base.IScalarEvaluatorFactory; import org.apache.hyracks.api.context.IHyracksTaskContext; import org.apache.vxquery.runtime.functions.base.AbstractTaggedValueArgumentScalarEvaluatorFactory; public class LibjnDescendantPairsScalarEvaluatorFactory extends AbstractTaggedValueArgumentScalarEvaluatorFactory { private static final long serialVersionUID = 1L; public LibjnDescendantPairsScalarEvaluatorFactory(IScalarEvaluatorFactory[] args) { super(args); } @Override protected IScalarEvaluator createEvaluator(IHyracksTaskContext ctx, IScalarEvaluator[] args) throws AlgebricksException { return new LibjnDescendantPairsScalarEvaluator(ctx, args); } }
apache-2.0
lmjacksoniii/hazelcast
hazelcast/src/main/java/com/hazelcast/internal/util/LockGuard.java
3342
/* * Copyright (c) 2008-2016, Hazelcast, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hazelcast.internal.util; import com.hazelcast.nio.Address; import com.hazelcast.util.Clock; import com.hazelcast.util.Preconditions; /** * Object that provides additional functionalities to a simple lock. The user can define a lock owner and a lock expiry * and as such use a local lock as a cluster-wide lock if used properly. * The {@code lockOwnerId} defines the owner of the lock. One example of the lock owner ID would be a transaction ID. * The guard has an expiration time defined during construction. */ public class LockGuard { public static final LockGuard NOT_LOCKED = new LockGuard(); /** * The ID of the owner that acquired this lock */ private final String lockOwnerId; /** * Owner endpoint, required for logging only */ private final Address lockOwner; /** * Expiry time for lock lease */ private final long lockExpiryTime; private LockGuard() { this.lockOwner = null; this.lockOwnerId = null; this.lockExpiryTime = 0L; } public LockGuard(Address lockOwner, String lockOwnerId, long leaseTime) { Preconditions.checkNotNull(lockOwner); Preconditions.checkNotNull(lockOwnerId); Preconditions.checkPositive(leaseTime, "Lease time should be positive!"); this.lockOwner = lockOwner; this.lockOwnerId = lockOwnerId; this.lockExpiryTime = toLockExpiry(leaseTime); } private static long toLockExpiry(long leaseTime) { long expiryTime = Clock.currentTimeMillis() + leaseTime; if (expiryTime < 0L) { expiryTime = Long.MAX_VALUE; } return expiryTime; } public boolean isLocked() { return lockOwner != null; } public boolean isLeaseExpired() { return lockExpiryTime > 0L && Clock.currentTimeMillis() > lockExpiryTime; } public boolean allowsLock(String ownerId) { Preconditions.checkNotNull(ownerId); boolean notLocked = isLeaseExpired() || !isLocked(); return notLocked || allowsUnlock(ownerId); } public boolean allowsUnlock(String ownerId) { Preconditions.checkNotNull(ownerId); return ownerId.equals(lockOwnerId); } public Address getLockOwner() { return lockOwner; } public String getLockOwnerId() { return lockOwnerId; } public long getLockExpiryTime() { return lockExpiryTime; } @Override public String toString() { return "LockGuard{" + "lockOwner=" + lockOwner + ", lockOwnerId='" + lockOwnerId + '\'' + ", lockExpiryTime=" + lockExpiryTime + '}'; } }
apache-2.0
couchbaselabs/couchbase-java-cache
src/main/java/com/couchbase/client/jcache/CacheEventManager.java
10963
/* * Copyright (c) 2015 Couchbase, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.couchbase.client.jcache; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArraySet; import javax.cache.configuration.CacheEntryListenerConfiguration; import javax.cache.event.CacheEntryCreatedListener; import javax.cache.event.CacheEntryEvent; import javax.cache.event.CacheEntryEventFilter; import javax.cache.event.CacheEntryExpiredListener; import javax.cache.event.CacheEntryListener; import javax.cache.event.CacheEntryListenerException; import javax.cache.event.CacheEntryRemovedListener; import javax.cache.event.CacheEntryUpdatedListener; import javax.cache.event.EventType; /** * This class manages registration of event listeners (through their {@link CacheEntryListenerConfiguration}). It also * manages dispatching of events. First events are queued using {@link #queueEvent(CacheEntryEvent)}. Once all events * have been prepared, {@link #dispatch()} is called to notify the adequate listeners. * <p> * The event concrete class for this implementation is {@link CouchbaseCacheEntryEvent}. * * @author Simon Baslé * @since 1.0 */ public class CacheEventManager<K, V> { private final Set<ListenerEntry<K, V>> entries = new CopyOnWriteArraySet<ListenerEntry<K, V>>(); private final Map<Class<? extends CacheEntryListener>, List<CacheEntryEvent<K, V>>> eventQueues = new ConcurrentHashMap<Class<? extends CacheEntryListener>, List<CacheEntryEvent<K, V>>>(); /** * Requests that the specified event be prepared for dispatching to their specific type of listeners. * * @param event the event to be dispatched */ public void queueEvent(CacheEntryEvent<K, V> event) { if (event == null) { throw new NullPointerException("event can't be null"); } Class<? extends CacheEntryListener> listenerClass; switch (event.getEventType()) { case CREATED: listenerClass = CacheEntryCreatedListener.class; break; case UPDATED: listenerClass = CacheEntryUpdatedListener.class; break; case REMOVED: listenerClass = CacheEntryRemovedListener.class; break; case EXPIRED: listenerClass = CacheEntryExpiredListener.class; break; default: throw new IllegalArgumentException("Unknown event type " + event.getEventType()); } List<CacheEntryEvent<K, V>> queue; synchronized (this) { queue = eventQueues.get(listenerClass); if (queue == null) { queue = new ArrayList<CacheEntryEvent<K, V>>(); eventQueues.put(listenerClass, queue); } } queue.add(event); } private Iterable<CacheEntryEvent<K, V>> filterEvents(ListenerEntry<K, V> listenerEntry, List<CacheEntryEvent<K, V>> allEvents) { CacheEntryEventFilter<? super K, ? super V> filter = listenerEntry.getFilter(); List<CacheEntryEvent<K, V>> filteredEvents; if (filter == null) { filteredEvents = allEvents; } else { filteredEvents = new ArrayList<CacheEntryEvent<K, V>>(allEvents.size()); for (CacheEntryEvent<K, V> event : allEvents) { if (filter.evaluate(event)) { filteredEvents.add(event); } } } return filteredEvents; } protected void dispatchForCreate() { List<CacheEntryEvent<K, V>> events = eventQueues.get(CacheEntryCreatedListener.class); if (events == null) { return; } for (ListenerEntry<K, V> entry: entries) { if (entry.getListener() instanceof CacheEntryCreatedListener) { ((CacheEntryCreatedListener) entry.getListener()).onCreated(filterEvents(entry, events)); } } } protected void dispatchForUpdate() { List<CacheEntryEvent<K, V>> events = eventQueues.get(CacheEntryUpdatedListener.class); if (events == null) { return; } for (ListenerEntry entry: entries) { if (entry.getListener() instanceof CacheEntryUpdatedListener) { ((CacheEntryUpdatedListener) entry.getListener()).onUpdated(filterEvents(entry, events)); } } } protected void dispatchForRemove() { List<CacheEntryEvent<K, V>> events = eventQueues.get(CacheEntryRemovedListener.class); if (events == null) { return; } for (ListenerEntry entry: entries) { if (entry.getListener() instanceof CacheEntryRemovedListener) { ((CacheEntryRemovedListener) entry.getListener()).onRemoved(filterEvents(entry, events)); } } } protected void dispatchForExpiry() { List<CacheEntryEvent<K, V>> events = eventQueues.get(CacheEntryExpiredListener.class); if (events == null) { return; } for (ListenerEntry entry: entries) { if (entry.getListener() instanceof CacheEntryExpiredListener) { ((CacheEntryExpiredListener) entry.getListener()).onExpired(filterEvents(entry, events)); } } } /** * Dispatches the queued events to the registered listeners. */ public void dispatch() { try { dispatchForExpiry(); dispatchForCreate(); dispatchForUpdate(); dispatchForRemove(); } catch (Exception e) { if (e instanceof CacheEntryListenerException) { throw (CacheEntryListenerException) e; } else { throw new CacheEntryListenerException("Exception on listener execution", e); } } } /** * Utility method to create a {@link CouchbaseCacheEntryEvent}, {@link #queueEvent(CacheEntryEvent) queue} it and * {@link #dispatch() dispatch} it one call. * * @param type the type of the event to create. * @param key the key impacted by the event. * @param value the value corresponding to the key after the event. * @param oldValueOrNull the old value before the event or null if not applicable. * @param source the cache in which the event happened. */ public void queueAndDispatch(EventType type, K key, V value, V oldValueOrNull, CouchbaseCache source) { queueEvent(new CouchbaseCacheEntryEvent<K, V>(type, key, value, oldValueOrNull, source)); dispatch(); } /** * Utility method to create a {@link CouchbaseCacheEntryEvent}, {@link #queueEvent(CacheEntryEvent) queue} it and * {@link #dispatch() dispatch} it one call. The old value is not applicable / set to null. * * @param type the type of the event to create. * @param key the key impacted by the event. * @param value the value corresponding to the key after the event. * @param source the cache in which the event happened. */ public void queueAndDispatch(EventType type, K key, V value, CouchbaseCache source) { queueEvent(new CouchbaseCacheEntryEvent<K, V>(type, key, value, source)); dispatch(); } /** * Register a new listener using the given configuration. * * @param config the configuration for the listener. */ public void addListener(CacheEntryListenerConfiguration<K, V> config) { ListenerEntry<K, V> entry = new ListenerEntry<K, V>(config); this.entries.add(entry); } /** * Deregister a listener that was created using the given configuration. * * @param config The configuration used to register and create the listener. */ public void removeListener(CacheEntryListenerConfiguration<K, V> config) { ListenerEntry<K, V> toRemove = new ListenerEntry<K, V>(config); entries.remove(toRemove); } protected static final class ListenerEntry<K, V> { private final CacheEntryListener<? super K, ? super V> listener; private final CacheEntryEventFilter<? super K, ? super V> filter; private final boolean isOldValueRequired; private final boolean isSynchronous; public ListenerEntry(CacheEntryListenerConfiguration<? super K, ? super V> configuration) { this.listener = configuration.getCacheEntryListenerFactory().create(); this.filter = configuration.getCacheEntryEventFilterFactory() == null ? null : configuration.getCacheEntryEventFilterFactory().create(); this.isOldValueRequired = configuration.isOldValueRequired(); this.isSynchronous = configuration.isSynchronous(); } public CacheEntryListener<? super K, ? super V> getListener() { return listener; } public CacheEntryEventFilter<? super K, ? super V> getFilter() { return filter; } public boolean isOldValueRequired() { return isOldValueRequired; } public boolean isSynchronous() { return isSynchronous; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ListenerEntry that = (ListenerEntry) o; if (isOldValueRequired != that.isOldValueRequired) { return false; } if (isSynchronous != that.isSynchronous) { return false; } if (filter != null ? !filter.equals(that.filter) : that.filter != null) { return false; } return listener.equals(that.listener); } @Override public int hashCode() { int result = listener.hashCode(); result = 31 * result + (filter != null ? filter.hashCode() : 0); result = 31 * result + (isOldValueRequired ? 1 : 0); result = 31 * result + (isSynchronous ? 1 : 0); return result; } } }
apache-2.0
xuzha/elasticsearch
test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
97056
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.impl.client.HttpClients; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.MergeSchedulerConfig; import org.elasticsearch.index.MockEngineFactoryPlugin; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.node.NodeMocksPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.client.RandomizingClient; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.AssertingLocalTransport; import org.elasticsearch.test.transport.MockTransportService; import org.hamcrest.Matchers; import org.joda.time.DateTimeZone; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; import java.io.InputStream; import java.lang.annotation.Annotation; import java.lang.annotation.ElementType; import java.lang.annotation.Inherited; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URL; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BooleanSupplier; import java.util.function.Function; import static org.elasticsearch.client.Requests.syncedFlushRequest; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.util.CollectionUtils.eagerPartition; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.XContentTestUtils.convertToMap; import static org.elasticsearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; /** * {@link ESIntegTestCase} is an abstract base class to run integration * tests against a JVM private Elasticsearch Cluster. The test class supports 2 different * cluster scopes. * <ul> * <li>{@link Scope#TEST} - uses a new cluster for each individual test method.</li> * <li>{@link Scope#SUITE} - uses a cluster shared across all test methods in the same suite</li> * </ul> * <p> * The most common test scope is {@link Scope#SUITE} which shares a cluster per test suite. * <p> * If the test methods need specific node settings or change persistent and/or transient cluster settings {@link Scope#TEST} * should be used. To configure a scope for the test cluster the {@link ClusterScope} annotation * should be used, here is an example: * <pre> * * {@literal @}NodeScope(scope=Scope.TEST) public class SomeIT extends ESIntegTestCase { * public void testMethod() {} * } * </pre> * <p> * If no {@link ClusterScope} annotation is present on an integration test the default scope is {@link Scope#SUITE} * <p> * A test cluster creates a set of nodes in the background before the test starts. The number of nodes in the cluster is * determined at random and can change across tests. The {@link ClusterScope} allows configuring the initial number of nodes * that are created before the tests start. * <pre> * {@literal @}NodeScope(scope=Scope.SUITE, numDataNodes=3) * public class SomeIT extends ESIntegTestCase { * public void testMethod() {} * } * </pre> * <p> * Note, the {@link ESIntegTestCase} uses randomized settings on a cluster and index level. For instance * each test might use different directory implementation for each test or will return a random client to one of the * nodes in the cluster for each call to {@link #client()}. Test failures might only be reproducible if the correct * system properties are passed to the test execution environment. * <p> * This class supports the following system properties (passed with -Dkey=value to the application) * <ul> * <li>-D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node and transport clients used</li> * <li>-D{@value #TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is * useful to test the system without asserting modules that to make sure they don't hide any bugs in production.</li> * <li> - a random seed used to initialize the index random context. * </ul> */ @LuceneTestCase.SuppressFileSystems("ExtrasFS") // doesn't work with potential multi data path from test cluster yet public abstract class ESIntegTestCase extends ESTestCase { /** * Property that controls whether ThirdParty Integration tests are run (not the default). */ public static final String SYSPROP_THIRDPARTY = "tests.thirdparty"; /** * Annotation for third-party integration tests. * <p> * These are tests the require a third-party service in order to run. They * may require the user to manually configure an external process (such as rabbitmq), * or may additionally require some external configuration (e.g. AWS credentials) * via the {@code tests.config} system property. */ @Inherited @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.TYPE) @TestGroup(enabled = false, sysProperty = ESIntegTestCase.SYSPROP_THIRDPARTY) public @interface ThirdParty { } /** node names of the corresponding clusters will start with these prefixes */ public static final String SUITE_CLUSTER_NODE_PREFIX = "node_s"; public static final String TEST_CLUSTER_NODE_PREFIX = "node_t"; /** * Key used to set the transport client ratio via the commandline -D{@value #TESTS_CLIENT_RATIO} */ public static final String TESTS_CLIENT_RATIO = "tests.client.ratio"; /** * Key used to eventually switch to using an external cluster and provide its transport addresses */ public static final String TESTS_CLUSTER = "tests.cluster"; /** * Key used to retrieve the index random seed from the index settings on a running node. * The value of this seed can be used to initialize a random context for a specific index. * It's set once per test via a generic index template. */ public static final Setting<Long> INDEX_TEST_SEED_SETTING = Setting.longSetting("index.tests.seed", 0, Long.MIN_VALUE, Property.IndexScope); /** * A boolean value to enable or disable mock modules. This is useful to test the * system without asserting modules that to make sure they don't hide any bugs in * production. * * @see ESIntegTestCase */ public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules"; /** * Threshold at which indexing switches from frequently async to frequently bulk. */ private static final int FREQUENT_BULK_THRESHOLD = 300; /** * Threshold at which bulk indexing will always be used. */ private static final int ALWAYS_BULK_THRESHOLD = 3000; /** * Maximum number of async operations that indexRandom will kick off at one time. */ private static final int MAX_IN_FLIGHT_ASYNC_INDEXES = 150; /** * Maximum number of documents in a single bulk index request. */ private static final int MAX_BULK_INDEX_REQUEST_SIZE = 1000; /** * Default minimum number of shards for an index */ protected static final int DEFAULT_MIN_NUM_SHARDS = 1; /** * Default maximum number of shards for an index */ protected static final int DEFAULT_MAX_NUM_SHARDS = 10; /** * The current cluster depending on the configured {@link Scope}. * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster. */ private static TestCluster currentCluster; private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio(); private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>(); private static ESIntegTestCase INSTANCE = null; // see @SuiteScope private static Long SUITE_SEED = null; @BeforeClass public static void beforeClass() throws Exception { SUITE_SEED = randomLong(); initializeSuiteScope(); } protected final void beforeInternal() throws Exception { final Scope currentClusterScope = getCurrentClusterScope(); switch (currentClusterScope) { case SUITE: assert SUITE_SEED != null : "Suite seed was not initialized"; currentCluster = buildAndPutCluster(currentClusterScope, SUITE_SEED); break; case TEST: currentCluster = buildAndPutCluster(currentClusterScope, randomLong()); break; default: fail("Unknown Scope: [" + currentClusterScope + "]"); } cluster().beforeTest(random(), getPerTestTransportClientRatio()); cluster().wipe(excludeTemplates()); randomIndexTemplate(); } private void printTestMessage(String message) { if (isSuiteScopedTest(getClass()) && (getTestName().equals("<unknown>"))) { logger.info("[{}]: {} suite", getTestClass().getSimpleName(), message); } else { logger.info("[{}#{}]: {} test", getTestClass().getSimpleName(), getTestName(), message); } } /** * Creates a randomized index template. This template is used to pass in randomized settings on a * per index basis. Allows to enable/disable the randomization for number of shards and replicas */ public void randomIndexTemplate() throws IOException { // TODO move settings for random directory etc here into the index based randomized settings. if (cluster().size() > 0) { Settings.Builder randomSettingsBuilder = setRandomIndexSettings(random(), Settings.builder()); if (isInternalCluster()) { // this is only used by mock plugins and if the cluster is not internal we just can't set it randomSettingsBuilder.put(INDEX_TEST_SEED_SETTING.getKey(), random().nextLong()); } randomSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards()) .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas()); // if the test class is annotated with SuppressCodecs("*"), it means don't use lucene's codec randomization // otherwise, use it, it has assertions and so on that can find bugs. SuppressCodecs annotation = getClass().getAnnotation(SuppressCodecs.class); if (annotation != null && annotation.value().length == 1 && "*".equals(annotation.value()[0])) { randomSettingsBuilder.put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)); } else { randomSettingsBuilder.put("index.codec", CodecService.LUCENE_DEFAULT_CODEC); } XContentBuilder mappings = null; if (frequently() && randomDynamicTemplates()) { mappings = XContentFactory.jsonBuilder().startObject().startObject("_default_"); if (randomBoolean()) { mappings.startObject(TimestampFieldMapper.NAME) .field("enabled", randomBoolean()); mappings.endObject(); } mappings.endObject().endObject(); } for (String setting : randomSettingsBuilder.internalMap().keySet()) { assertThat("non index. prefix setting set on index template, its a node setting...", setting, startsWith("index.")); } // always default delayed allocation to 0 to make sure we have tests are not delayed randomSettingsBuilder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0); if (randomBoolean()) { randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), randomBoolean() ? IndexModule.INDEX_QUERY_CACHE : IndexModule.NONE_QUERY_CACHE); } if (randomBoolean()) { randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), randomBoolean()); } PutIndexTemplateRequestBuilder putTemplate = client().admin().indices() .preparePutTemplate("random_index_template") .setTemplate("*") .setOrder(0) .setSettings(randomSettingsBuilder); if (mappings != null) { logger.info("test using _default_ mappings: [{}]", mappings.bytes().toUtf8()); putTemplate.addMapping("_default_", mappings); } assertAcked(putTemplate.execute().actionGet()); } } protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builder builder) { setRandomIndexMergeSettings(random, builder); setRandomIndexTranslogSettings(random, builder); if (random.nextBoolean()) { builder.put(MergeSchedulerConfig.AUTO_THROTTLE_SETTING.getKey(), false); } if (random.nextBoolean()) { builder.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), random.nextBoolean()); } if (random.nextBoolean()) { builder.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "checksum", "true")); } if (randomBoolean()) { // keep this low so we don't stall tests builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), RandomInts.randomIntBetween(random, 1, 15) + "ms"); } return builder; } private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { builder.put(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING.getKey(), random.nextBoolean() ? random.nextDouble() : random.nextBoolean()); } switch (random.nextInt(4)) { case 3: final int maxThreadCount = RandomInts.randomIntBetween(random, 1, 4); final int maxMergeCount = RandomInts.randomIntBetween(random, maxThreadCount, maxThreadCount + 4); builder.put(MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING.getKey(), maxMergeCount); builder.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), maxThreadCount); break; } return builder; } private static Settings.Builder setRandomIndexTranslogSettings(Random random, Settings.Builder builder) { if (random.nextBoolean()) { builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 300), ByteSizeUnit.MB)); } if (random.nextBoolean()) { builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)); // just don't flush } if (random.nextBoolean()) { builder.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), RandomPicks.randomFrom(random, Translog.Durability.values())); } if (random.nextBoolean()) { builder.put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), RandomInts.randomIntBetween(random, 100, 5000), TimeUnit.MILLISECONDS); } return builder; } private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception { return RandomizedContext.current().runWithPrivateRandomness(new com.carrotsearch.randomizedtesting.Randomness(seed), new Callable<TestCluster>() { @Override public TestCluster call() throws Exception { return buildTestCluster(scope, seed); } }); } private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) throws Exception { final Class<?> clazz = this.getClass(); TestCluster testCluster = clusters.remove(clazz); // remove this cluster first clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere switch (currentClusterScope) { case SUITE: if (testCluster == null) { // only build if it's not there yet testCluster = buildWithPrivateContext(currentClusterScope, seed); } break; case TEST: // close the previous one and create a new one IOUtils.closeWhileHandlingException(testCluster); testCluster = buildTestCluster(currentClusterScope, seed); break; } clusters.put(clazz, testCluster); return testCluster; } private static void clearClusters() throws IOException { if (!clusters.isEmpty()) { IOUtils.close(clusters.values()); clusters.clear(); } } protected final void afterInternal(boolean afterClass) throws Exception { boolean success = false; try { final Scope currentClusterScope = getCurrentClusterScope(); clearDisruptionScheme(); try { if (cluster() != null) { if (currentClusterScope != Scope.TEST) { MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData(); assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(), metaData .persistentSettings().getAsMap().size(), equalTo(0)); assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(), metaData .transientSettings().getAsMap().size(), equalTo(0)); } ensureClusterSizeConsistency(); ensureClusterStateConsistency(); if (isInternalCluster()) { // check no pending cluster states are leaked for (Discovery discovery : internalCluster().getInstances(Discovery.class)) { if (discovery instanceof ZenDiscovery) { final ZenDiscovery zenDiscovery = (ZenDiscovery) discovery; assertBusy(new Runnable() { @Override public void run() { assertThat("still having pending states: " + Strings.arrayToDelimitedString(zenDiscovery.pendingClusterStates(), "\n"), zenDiscovery.pendingClusterStates(), emptyArray()); } }); } } } beforeIndexDeletion(); cluster().wipe(excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete if (afterClass || currentClusterScope == Scope.TEST) { cluster().close(); } cluster().assertAfterTest(); } } finally { if (currentClusterScope == Scope.TEST) { clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST } } success = true; } finally { if (!success) { // if we failed here that means that something broke horribly so we should clear all clusters // TODO: just let the exception happen, WTF is all this horseshit // afterTestRule.forceFailure(); } } } /** * @return An exclude set of index templates that will not be removed in between tests. */ protected Set<String> excludeTemplates() { return Collections.emptySet(); } protected void beforeIndexDeletion() { cluster().beforeIndexDeletion(); } public static TestCluster cluster() { return currentCluster; } public static boolean isInternalCluster() { return (currentCluster instanceof InternalTestCluster); } public static InternalTestCluster internalCluster() { if (!isInternalCluster()) { throw new UnsupportedOperationException("current test cluster is immutable"); } return (InternalTestCluster) currentCluster; } public ClusterService clusterService() { return internalCluster().clusterService(); } public static Client client() { return client(null); } public static Client client(@Nullable String node) { if (node != null) { return internalCluster().client(node); } Client client = cluster().client(); if (frequently()) { client = new RandomizingClient(client, random()); } return client; } public static Client dataNodeClient() { Client client = internalCluster().dataNodeClient(); if (frequently()) { client = new RandomizingClient(client, random()); } return client; } public static Iterable<Client> clients() { return cluster().getClients(); } protected int minimumNumberOfShards() { return DEFAULT_MIN_NUM_SHARDS; } protected int maximumNumberOfShards() { return DEFAULT_MAX_NUM_SHARDS; } protected int numberOfShards() { return between(minimumNumberOfShards(), maximumNumberOfShards()); } protected int minimumNumberOfReplicas() { return 0; } protected int maximumNumberOfReplicas() { //use either 0 or 1 replica, yet a higher amount when possible, but only rarely int maxNumReplicas = Math.max(0, cluster().numDataNodes() - 1); return frequently() ? Math.min(1, maxNumReplicas) : maxNumReplicas; } protected int numberOfReplicas() { return between(minimumNumberOfReplicas(), maximumNumberOfReplicas()); } public void setDisruptionScheme(ServiceDisruptionScheme scheme) { internalCluster().setDisruptionScheme(scheme); } public void clearDisruptionScheme() { if (isInternalCluster()) { internalCluster().clearDisruptionScheme(); } } /** * Returns a settings object used in {@link #createIndex(String...)} and {@link #prepareCreate(String)} and friends. * This method can be overwritten by subclasses to set defaults for the indices that are created by the test. * By default it returns a settings object that sets a random number of shards. Number of shards and replicas * can be controlled through specific methods. */ public Settings indexSettings() { Settings.Builder builder = Settings.builder(); int numberOfShards = numberOfShards(); if (numberOfShards > 0) { builder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build(); } int numberOfReplicas = numberOfReplicas(); if (numberOfReplicas >= 0) { builder.put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build(); } // 30% of the time if (randomInt(9) < 3) { final String dataPath = randomAsciiOfLength(10); logger.info("using custom data_path for index: [{}]", dataPath); builder.put(IndexMetaData.SETTING_DATA_PATH, dataPath); } // always default delayed allocation to 0 to make sure we have tests are not delayed builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0); return builder.build(); } /** * Creates one or more indices and asserts that the indices are acknowledged. If one of the indices * already exists this method will fail and wipe all the indices created so far. */ public final void createIndex(String... names) { List<String> created = new ArrayList<>(); for (String name : names) { boolean success = false; try { assertAcked(prepareCreate(name)); created.add(name); success = true; } finally { if (!success && !created.isEmpty()) { cluster().wipeIndices(created.toArray(new String[created.size()])); } } } } /** * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}. */ public final CreateIndexRequestBuilder prepareCreate(String index) { return client().admin().indices().prepareCreate(index).setSettings(indexSettings()); } /** * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}. * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this * method. * <p> * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation * rules based on <code>index.routing.allocation.exclude._name</code>. * </p> */ public final CreateIndexRequestBuilder prepareCreate(String index, int numNodes) { return prepareCreate(index, numNodes, Settings.builder()); } /** * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}. * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this * method. * <p> * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation * rules based on <code>index.routing.allocation.exclude._name</code>. * </p> */ public CreateIndexRequestBuilder prepareCreate(String index, int numNodes, Settings.Builder settingsBuilder) { internalCluster().ensureAtLeastNumDataNodes(numNodes); Settings.Builder builder = Settings.builder().put(indexSettings()).put(settingsBuilder.build()); if (numNodes > 0) { getExcludeSettings(index, numNodes, builder); } return client().admin().indices().prepareCreate(index).setSettings(builder.build()); } private Settings.Builder getExcludeSettings(String index, int num, Settings.Builder builder) { String exclude = String.join(",", internalCluster().allDataNodesButN(num)); builder.put("index.routing.allocation.exclude._name", exclude); return builder; } /** * Waits until all nodes have no pending tasks. */ public void waitNoPendingTasksOnAll() throws Exception { assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get()); assertBusy(new Runnable() { @Override public void run() { for (Client client : clients()) { ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); PendingClusterTasksResponse pendingTasks = client.admin().cluster().preparePendingClusterTasks().setLocal(true).get(); assertThat("client " + client + " still has pending tasks " + pendingTasks.prettyPrint(), pendingTasks, Matchers.emptyIterable()); clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get(); assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0)); } } }); assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get()); } /** * Waits till a (pattern) field name mappings concretely exists on all nodes. Note, this waits for the current * started shards and checks for concrete mappings. */ public void assertConcreteMappingsOnAll(final String index, final String type, final String... fieldNames) throws Exception { Set<String> nodes = internalCluster().nodesInclude(index); assertThat(nodes, Matchers.not(Matchers.emptyIterable())); for (String node : nodes) { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); IndexService indexService = indicesService.indexService(resolveIndex(index)); assertThat("index service doesn't exists on " + node, indexService, notNullValue()); DocumentMapper documentMapper = indexService.mapperService().documentMapper(type); assertThat("document mapper doesn't exists on " + node, documentMapper, notNullValue()); for (String fieldName : fieldNames) { Collection<String> matches = documentMapper.mappers().simpleMatchToFullName(fieldName); assertThat("field " + fieldName + " doesn't exists on " + node, matches, Matchers.not(emptyIterable())); } } assertMappingOnMaster(index, type, fieldNames); } /** * Waits for the given mapping type to exists on the master node. */ public void assertMappingOnMaster(final String index, final String type, final String... fieldNames) throws Exception { GetMappingsResponse response = client().admin().indices().prepareGetMappings(index).setTypes(type).get(); ImmutableOpenMap<String, MappingMetaData> mappings = response.getMappings().get(index); assertThat(mappings, notNullValue()); MappingMetaData mappingMetaData = mappings.get(type); assertThat(mappingMetaData, notNullValue()); Map<String, Object> mappingSource = mappingMetaData.getSourceAsMap(); assertFalse(mappingSource.isEmpty()); assertTrue(mappingSource.containsKey("properties")); for (String fieldName : fieldNames) { Map<String, Object> mappingProperties = (Map<String, Object>) mappingSource.get("properties"); if (fieldName.indexOf('.') != -1) { fieldName = fieldName.replace(".", ".properties."); } assertThat("field " + fieldName + " doesn't exists in mapping " + mappingMetaData.source().string(), XContentMapValues.extractValue(fieldName, mappingProperties), notNullValue()); } } /** Ensures the result counts are as expected, and logs the results if different */ public void assertResultsAndLogOnFailure(long expectedResults, SearchResponse searchResponse) { if (searchResponse.getHits().getTotalHits() != expectedResults) { StringBuilder sb = new StringBuilder("search result contains ["); sb.append(searchResponse.getHits().getTotalHits()).append("] results. expected [").append(expectedResults).append("]"); String failMsg = sb.toString(); for (SearchHit hit : searchResponse.getHits().getHits()) { sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType()) .append("] id [").append(hit.id()).append("]"); } logger.warn("{}", sb); fail(failMsg); } } /** * Restricts the given index to be allocated on <code>n</code> nodes using the allocation deciders. * Yet if the shards can't be allocated on any other node shards for this index will remain allocated on * more than <code>n</code> nodes. */ public void allowNodes(String index, int n) { assert index != null; internalCluster().ensureAtLeastNumDataNodes(n); Settings.Builder builder = Settings.builder(); if (n > 0) { getExcludeSettings(index, n, builder); } Settings build = builder.build(); if (!build.getAsMap().isEmpty()) { logger.debug("allowNodes: updating [{}]'s setting to [{}]", index, build.toDelimitedString(';')); client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet(); } } /** * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations. * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating * are now allocated and started. */ public ClusterHealthStatus ensureGreen(String... indices) { return ensureGreen(TimeValue.timeValueSeconds(30), indices); } /** * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations. * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating * are now allocated and started. * * @param timeout time out value to set on {@link org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest} */ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) { ClusterHealthResponse actionGet = client().admin().cluster() .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet(); if (actionGet.isTimedOut()) { logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); fail("timed out waiting for green state"); } assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN)); logger.debug("indices {} are green", indices.length == 0 ? "[_all]" : indices); return actionGet.getStatus(); } /** * Waits for all relocating shards to become active using the cluster health API. */ public ClusterHealthStatus waitForRelocation() { return waitForRelocation(null); } /** * Waits for all relocating shards to become active and the cluster has reached the given health status * using the cluster health API. */ public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) { ClusterHealthRequest request = Requests.clusterHealthRequest().waitForRelocatingShards(0); if (status != null) { request.waitForStatus(status); } ClusterHealthResponse actionGet = client().admin().cluster() .health(request).actionGet(); if (actionGet.isTimedOut()) { logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false)); } if (status != null) { assertThat(actionGet.getStatus(), equalTo(status)); } return actionGet.getStatus(); } /** * Waits until at least a give number of document is visible for searchers * * @param numDocs number of documents to wait for. * @return the actual number of docs seen. */ public long waitForDocs(final long numDocs) throws InterruptedException { return waitForDocs(numDocs, null); } /** * Waits until at least a give number of document is visible for searchers * * @param numDocs number of documents to wait for * @param indexer a {@link org.elasticsearch.test.BackgroundIndexer}. If supplied it will be first checked for documents indexed. * This saves on unneeded searches. * @return the actual number of docs seen. */ public long waitForDocs(final long numDocs, final @Nullable BackgroundIndexer indexer) throws InterruptedException { // indexing threads can wait for up to ~1m before retrying when they first try to index into a shard which is not STARTED. return waitForDocs(numDocs, 90, TimeUnit.SECONDS, indexer); } /** * Waits until at least a give number of document is visible for searchers * * @param numDocs number of documents to wait for * @param maxWaitTime if not progress have been made during this time, fail the test * @param maxWaitTimeUnit the unit in which maxWaitTime is specified * @param indexer a {@link org.elasticsearch.test.BackgroundIndexer}. If supplied it will be first checked for documents indexed. * This saves on unneeded searches. * @return the actual number of docs seen. */ public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTimeUnit, final @Nullable BackgroundIndexer indexer) throws InterruptedException { final AtomicLong lastKnownCount = new AtomicLong(-1); long lastStartCount = -1; BooleanSupplier testDocs = () -> { if (indexer != null) { lastKnownCount.set(indexer.totalIndexedDocs()); } if (lastKnownCount.get() >= numDocs) { try { long count = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits(); if (count == lastKnownCount.get()) { // no progress - try to refresh for the next time client().admin().indices().prepareRefresh().get(); } lastKnownCount.set(count); } catch (Throwable e) { // count now acts like search and barfs if all shards failed... logger.debug("failed to executed count", e); return false; } logger.debug("[{}] docs visible for search. waiting for [{}]", lastKnownCount.get(), numDocs); } else { logger.debug("[{}] docs indexed. waiting for [{}]", lastKnownCount.get(), numDocs); } return lastKnownCount.get() >= numDocs; }; while (!awaitBusy(testDocs, maxWaitTime, maxWaitTimeUnit)) { if (lastStartCount == lastKnownCount.get()) { // we didn't make any progress fail("failed to reach " + numDocs + "docs"); } lastStartCount = lastKnownCount.get(); } return lastKnownCount.get(); } /** * Sets the cluster's minimum master node and make sure the response is acknowledge. * Note: this doesn't guarantee that the new setting has taken effect, just that it has been received by all nodes. */ public void setMinimumMasterNodes(int n) { assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings( Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), n)) .get().isAcknowledged()); } /** * Ensures the cluster has a yellow state via the cluster health API. */ public ClusterHealthStatus ensureYellow(String... indices) { ClusterHealthResponse actionGet = client().admin().cluster() .health(Requests.clusterHealthRequest(indices).waitForRelocatingShards(0).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet(); if (actionGet.isTimedOut()) { logger.info("ensureYellow timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false)); } logger.debug("indices {} are yellow", indices.length == 0 ? "[_all]" : indices); return actionGet.getStatus(); } /** * Prints the current cluster state as debug logging. */ public void logClusterState() { logger.debug("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint()); } /** * Prints the segments info for the given indices as debug logging. */ public void logSegmentsState(String... indices) throws Exception { IndicesSegmentResponse segsRsp = client().admin().indices().prepareSegments(indices).get(); logger.debug("segments {} state: \n{}", indices.length == 0 ? "[_all]" : indices, segsRsp.toXContent(JsonXContent.contentBuilder().prettyPrint(), ToXContent.EMPTY_PARAMS).string()); } /** * Prints current memory stats as info logging. */ public void logMemoryStats() { logger.info("memory: {}", XContentHelper.toString(client().admin().cluster().prepareNodesStats().clear().setJvm(true).get())); } void ensureClusterSizeConsistency() { if (cluster() != null) { // if static init fails the cluster can be null logger.trace("Check consistency for [{}] nodes", cluster().size()); assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(cluster().size())).get()); } } /** * Verifies that all nodes that have the same version of the cluster state as master have same cluster state */ protected void ensureClusterStateConsistency() throws IOException { if (cluster() != null) { ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState); // remove local node reference masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null); Map<String, Object> masterStateMap = convertToMap(masterClusterState); int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; String masterId = masterClusterState.nodes().getMasterNodeId(); for (Client client : cluster().getClients()) { ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); // remove local node reference localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null); final Map<String, Object> localStateMap = convertToMap(localClusterState); final int localClusterStateSize = ClusterState.Builder.toBytes(localClusterState).length; // Check that the non-master node has the same version of the cluster state as the master and // that the master node matches the master (otherwise there is no requirement for the cluster state to match) if (masterClusterState.version() == localClusterState.version() && masterId.equals(localClusterState.nodes().getMasterNodeId())) { try { assertEquals("clusterstate UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID()); // We cannot compare serialization bytes since serialization order of maps is not guaranteed // but we can compare serialization sizes - they should be the same assertEquals("clusterstate size does not match", masterClusterStateSize, localClusterStateSize); // Compare JSON serialization assertNull("clusterstate JSON serialization does not match", differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap)); } catch (AssertionError error) { logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString()); throw error; } } } } } /** * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each * shard is available on the cluster. */ protected ClusterHealthStatus ensureSearchable(String... indices) { // this is just a temporary thing but it's easier to change if it is encapsulated. return ensureGreen(indices); } protected void ensureStableCluster(int nodeCount) { ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30)); } protected void ensureStableCluster(int nodeCount, TimeValue timeValue) { ensureStableCluster(nodeCount, timeValue, false, null); } protected void ensureStableCluster(int nodeCount, @Nullable String viaNode) { ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30), false, viaNode); } protected void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean local, @Nullable String viaNode) { if (viaNode == null) { viaNode = randomFrom(internalCluster().getNodeNames()); } logger.debug("ensuring cluster is stable with [{}] nodes. access node: [{}]. timeout: [{}]", nodeCount, viaNode, timeValue); ClusterHealthResponse clusterHealthResponse = client(viaNode).admin().cluster().prepareHealth() .setWaitForEvents(Priority.LANGUID) .setWaitForNodes(Integer.toString(nodeCount)) .setTimeout(timeValue) .setLocal(local) .setWaitForRelocatingShards(0) .get(); if (clusterHealthResponse.isTimedOut()) { ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get(); fail("failed to reach a stable cluster of [" + nodeCount + "] nodes. Tried via [" + viaNode + "]. last cluster state:\n" + stateResponse.getState().prettyPrint()); } assertThat(clusterHealthResponse.isTimedOut(), is(false)); } /** * Syntactic sugar for: * <pre> * client().prepareIndex(index, type).setSource(source).execute().actionGet(); * </pre> */ protected final IndexResponse index(String index, String type, XContentBuilder source) { return client().prepareIndex(index, type).setSource(source).execute().actionGet(); } /** * Syntactic sugar for: * <pre> * client().prepareIndex(index, type).setSource(source).execute().actionGet(); * </pre> */ protected final IndexResponse index(String index, String type, String id, Map<String, Object> source) { return client().prepareIndex(index, type, id).setSource(source).execute().actionGet(); } /** * Syntactic sugar for: * <pre> * client().prepareGet(index, type, id).execute().actionGet(); * </pre> */ protected final GetResponse get(String index, String type, String id) { return client().prepareGet(index, type, id).execute().actionGet(); } /** * Syntactic sugar for: * <pre> * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet(); * </pre> */ protected final IndexResponse index(String index, String type, String id, XContentBuilder source) { return client().prepareIndex(index, type, id).setSource(source).execute().actionGet(); } /** * Syntactic sugar for: * <pre> * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet(); * </pre> */ protected final IndexResponse index(String index, String type, String id, Object... source) { return client().prepareIndex(index, type, id).setSource(source).execute().actionGet(); } /** * Syntactic sugar for: * <pre> * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet(); * </pre> * <p> * where source is a String. */ protected final IndexResponse index(String index, String type, String id, String source) { return client().prepareIndex(index, type, id).setSource(source).execute().actionGet(); } /** * Waits for relocations and refreshes all indices in the cluster. * * @see #waitForRelocation() */ protected final RefreshResponse refresh(String... indices) { waitForRelocation(); // TODO RANDOMIZE with flush? RefreshResponse actionGet = client().admin().indices().prepareRefresh(indices).execute().actionGet(); assertNoFailures(actionGet); return actionGet; } /** * Flushes and refreshes all indices in the cluster */ protected final void flushAndRefresh(String... indices) { flush(indices); refresh(indices); } /** * Flush some or all indices in the cluster. */ protected final FlushResponse flush(String... indices) { waitForRelocation(); FlushResponse actionGet = client().admin().indices().prepareFlush(indices).setWaitIfOngoing(true).execute().actionGet(); for (ShardOperationFailedException failure : actionGet.getShardFailures()) { assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); } return actionGet; } /** * Waits for all relocations and force merge all indices in the cluster to 1 segment. */ protected ForceMergeResponse forceMerge() { waitForRelocation(); ForceMergeResponse actionGet = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet(); assertNoFailures(actionGet); return actionGet; } /** * Returns <code>true</code> iff the given index exists otherwise <code>false</code> */ protected boolean indexExists(String index) { IndicesExistsResponse actionGet = client().admin().indices().prepareExists(index).execute().actionGet(); return actionGet.isExists(); } /** * Syntactic sugar for enabling allocation for <code>indices</code> */ protected final void enableAllocation(String... indices) { client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put( EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all" )).get(); } /** * Syntactic sugar for disabling allocation for <code>indices</code> */ protected final void disableAllocation(String... indices) { client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put( EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none" )).get(); } /** * Returns a random admin client. This client can either be a node or a transport client pointing to any of * the nodes in the cluster. */ protected AdminClient admin() { return client().admin(); } /** * Convenience method that forwards to {@link #indexRandom(boolean, List)}. */ public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException { indexRandom(forceRefresh, Arrays.asList(builders)); } public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException { indexRandom(forceRefresh, dummyDocuments, Arrays.asList(builders)); } private static final String RANDOM_BOGUS_TYPE = "RANDOM_BOGUS_TYPE______"; /** * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either * indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document * ids or index segment creations. Some features might have bug when a given document is the first or the last in a * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. * * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed. Additionally if <tt>true</tt> * some empty dummy documents are may be randomly inserted into the document list and deleted once all documents are indexed. * This is useful to produce deleted documents on the server side. * @param builders the documents to index. * @see #indexRandom(boolean, boolean, java.util.List) */ public void indexRandom(boolean forceRefresh, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException { indexRandom(forceRefresh, forceRefresh, builders); } /** * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document * ids or index segment creations. Some features might have bug when a given document is the first or the last in a * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. * * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed. * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once * all documents are indexed. This is useful to produce deleted documents on the server side. * @param builders the documents to index. */ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException { indexRandom(forceRefresh, dummyDocuments, true, builders); } /** * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document * ids or index segment creations. Some features might have bug when a given document is the first or the last in a * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index * layout. * * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed. * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once * all documents are indexed. This is useful to produce deleted documents on the server side. * @param maybeFlush if <tt>true</tt> this method may randomly execute full flushes after index operations. * @param builders the documents to index. */ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException { Random random = random(); Set<String> indicesSet = new HashSet<>(); for (IndexRequestBuilder builder : builders) { indicesSet.add(builder.request().index()); } Set<Tuple<String, String>> bogusIds = new HashSet<>(); if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) { builders = new ArrayList<>(builders); final String[] indices = indicesSet.toArray(new String[indicesSet.size()]); // inject some bogus docs final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2); final int unicodeLen = between(1, 10); for (int i = 0; i < numBogusDocs; i++) { String id = randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet()); String index = RandomPicks.randomFrom(random, indices); bogusIds.add(new Tuple<>(index, id)); builders.add(client().prepareIndex(index, RANDOM_BOGUS_TYPE, id).setSource("{}")); } } final String[] indices = indicesSet.toArray(new String[indicesSet.size()]); Collections.shuffle(builders, random()); final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>> errors = new CopyOnWriteArrayList<>(); List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>(); // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk. if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) { if (frequently()) { logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false); for (IndexRequestBuilder indexRequestBuilder : builders) { indexRequestBuilder.execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors)); postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush); } } else { logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false); for (IndexRequestBuilder indexRequestBuilder : builders) { indexRequestBuilder.execute().actionGet(); postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush); } } } else { List<List<IndexRequestBuilder>> partition = eagerPartition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE, Math.max(1, (int) (builders.size() * randomDouble())))); logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size()); for (List<IndexRequestBuilder> segmented : partition) { BulkRequestBuilder bulkBuilder = client().prepareBulk(); for (IndexRequestBuilder indexRequestBuilder : segmented) { bulkBuilder.add(indexRequestBuilder); } BulkResponse actionGet = bulkBuilder.execute().actionGet(); assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false)); } } for (CountDownLatch operation : inFlightAsyncOperations) { operation.await(); } final List<Throwable> actualErrors = new ArrayList<>(); for (Tuple<IndexRequestBuilder, Throwable> tuple : errors) { if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) { tuple.v1().execute().actionGet(); // re-index if rejected } else { actualErrors.add(tuple.v2()); } } assertThat(actualErrors, emptyIterable()); if (!bogusIds.isEmpty()) { // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs! for (Tuple<String, String> doc : bogusIds) { assertTrue("failed to delete a dummy doc [" + doc.v1() + "][" + doc.v2() + "]", client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get().isFound()); } } if (forceRefresh) { assertNoFailures(client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get()); } } private AtomicInteger dummmyDocIdGenerator = new AtomicInteger(); /** Disables an index block for the specified index */ public static void disableIndexBlock(String index, String block) { Settings settings = Settings.builder().put(block, false).build(); client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); } /** Enables an index block for the specified index */ public static void enableIndexBlock(String index, String block) { Settings settings = Settings.builder().put(block, true).build(); client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); } /** Sets or unsets the cluster read_only mode **/ public static void setClusterReadOnly(boolean value) { Settings settings = Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), value).build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get()); } private static CountDownLatch newLatch(List<CountDownLatch> latches) { CountDownLatch l = new CountDownLatch(1); latches.add(l); return l; } /** * Maybe refresh, force merge, or flush then always make sure there aren't too many in flight async operations. */ private void postIndexAsyncActions(String[] indices, List<CountDownLatch> inFlightAsyncOperations, boolean maybeFlush) throws InterruptedException { if (rarely()) { if (rarely()) { client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else if (maybeFlush && rarely()) { if (randomBoolean()) { client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else { client().admin().indices().syncedFlush(syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()), new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } } else if (rarely()) { client().admin().indices().prepareForceMerge(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).setMaxNumSegments(between(1, 10)).setFlush(maybeFlush && randomBoolean()).execute( new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } } while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) { int waitFor = between(0, inFlightAsyncOperations.size() - 1); inFlightAsyncOperations.remove(waitFor).await(); } } /** * The scope of a test cluster used together with * {@link ESIntegTestCase.ClusterScope} annotations on {@link ESIntegTestCase} subclasses. */ public enum Scope { /** * A cluster shared across all method in a single test suite */ SUITE, /** * A test exclusive test cluster */ TEST } /** * Defines a cluster scope for a {@link ESIntegTestCase} subclass. * By default if no {@link ClusterScope} annotation is present {@link ESIntegTestCase.Scope#SUITE} is used * together with randomly chosen settings like number of nodes etc. */ @Retention(RetentionPolicy.RUNTIME) @Target({ElementType.TYPE}) public @interface ClusterScope { /** * Returns the scope. {@link ESIntegTestCase.Scope#SUITE} is default. */ Scope scope() default Scope.SUITE; /** * Returns the number of nodes in the cluster. Default is <tt>-1</tt> which means * a random number of nodes is used, where the minimum and maximum number of nodes * are either the specified ones or the default ones if not specified. */ int numDataNodes() default -1; /** * Returns the minimum number of nodes in the cluster. Default is <tt>-1</tt>. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ int minNumDataNodes() default -1; /** * Returns the maximum number of nodes in the cluster. Default is <tt>-1</tt>. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ int maxNumDataNodes() default -1; /** * Returns the number of client nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_NUM_CLIENT_NODES}, a * negative value means that the number of client nodes will be randomized. */ int numClientNodes() default InternalTestCluster.DEFAULT_NUM_CLIENT_NODES; /** * Returns the transport client ratio. By default this returns <code>-1</code> which means a random * ratio in the interval <code>[0..1]</code> is used. */ double transportClientRatio() default -1; /** * Return whether or not to enable dynamic templates for the mappings. */ boolean randomDynamicTemplates() default true; } private class LatchedActionListener<Response> implements ActionListener<Response> { private final CountDownLatch latch; public LatchedActionListener(CountDownLatch latch) { this.latch = latch; } @Override public final void onResponse(Response response) { latch.countDown(); } @Override public final void onFailure(Throwable t) { try { logger.info("Action Failed", t); addError(t); } finally { latch.countDown(); } } protected void addError(Throwable t) { } } private class PayloadLatchedActionListener<Response, T> extends LatchedActionListener<Response> { private final CopyOnWriteArrayList<Tuple<T, Throwable>> errors; private final T builder; public PayloadLatchedActionListener(T builder, CountDownLatch latch, CopyOnWriteArrayList<Tuple<T, Throwable>> errors) { super(latch); this.errors = errors; this.builder = builder; } @Override protected void addError(Throwable t) { errors.add(new Tuple<>(builder, t)); } } /** * Clears the given scroll Ids */ public void clearScroll(String... scrollIds) { ClearScrollResponse clearResponse = client().prepareClearScroll() .setScrollIds(Arrays.asList(scrollIds)).get(); assertThat(clearResponse.isSucceeded(), equalTo(true)); } private static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> annotationClass) { if (clazz == Object.class || clazz == ESIntegTestCase.class) { return null; } A annotation = clazz.getAnnotation(annotationClass); if (annotation != null) { return annotation; } return getAnnotation(clazz.getSuperclass(), annotationClass); } private Scope getCurrentClusterScope() { return getCurrentClusterScope(this.getClass()); } private static Scope getCurrentClusterScope(Class<?> clazz) { ClusterScope annotation = getAnnotation(clazz, ClusterScope.class); // if we are not annotated assume suite! return annotation == null ? Scope.SUITE : annotation.scope(); } private int getNumDataNodes() { ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); return annotation == null ? -1 : annotation.numDataNodes(); } private int getMinNumDataNodes() { ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); return annotation == null || annotation.minNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES : annotation.minNumDataNodes(); } private int getMaxNumDataNodes() { ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); return annotation == null || annotation.maxNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES : annotation.maxNumDataNodes(); } private int getNumClientNodes() { ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); return annotation == null ? InternalTestCluster.DEFAULT_NUM_CLIENT_NODES : annotation.numClientNodes(); } private boolean randomDynamicTemplates() { ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); return annotation == null || annotation.randomDynamicTemplates(); } /** * This method is used to obtain settings for the <tt>Nth</tt> node in the cluster. * Nodes in this cluster are associated with an ordinal number such that nodes can * be started with specific configurations. This method might be called multiple * times with the same ordinal and is expected to return the same value for each invocation. * In other words subclasses must ensure this method is idempotent. */ protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder() // Default the watermarks to absurdly low to prevent the tests // from failing on nodes without enough disk space .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b") .put("script.indexed", "true") .put("script.inline", "true") // wait short time for other active shards before actually deleting, default 30s not needed in tests .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS)); return builder.build(); } /** * Returns a collection of plugins that should be loaded on each node. */ protected Collection<Class<? extends Plugin>> nodePlugins() { return Collections.emptyList(); } /** * Returns a collection of plugins that should be loaded when creating a transport client. */ protected Collection<Class<? extends Plugin>> transportClientPlugins() { return Collections.emptyList(); } /** Helper method to create list of plugins without specifying generic types. */ @SafeVarargs @SuppressWarnings("varargs") // due to type erasure, the varargs type is non-reifiable, which causes this warning protected final Collection<Class<? extends Plugin>> pluginList(Class<? extends Plugin>... plugins) { return Arrays.asList(plugins); } /** * This method is used to obtain additional settings for clients created by the internal cluster. * These settings will be applied on the client in addition to some randomized settings defined in * the cluster. These settings will also override any other settings the internal cluster might * add by default. */ protected Settings transportClientSettings() { return Settings.EMPTY; } private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException { String[] stringAddresses = clusterAddresses.split(","); TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; int i = 0; for (String stringAddress : stringAddresses) { URL url = new URL("http://" + stringAddress); InetAddress inetAddress = InetAddress.getByName(url.getHost()); transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort())); } return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); } protected Settings externalClusterClientSettings() { return Settings.EMPTY; } protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { String clusterAddresses = System.getProperty(TESTS_CLUSTER); if (Strings.hasLength(clusterAddresses)) { if (scope == Scope.TEST) { throw new IllegalArgumentException("Cannot run TEST scope test with " + TESTS_CLUSTER); } return buildExternalCluster(clusterAddresses); } final String nodePrefix; switch (scope) { case TEST: nodePrefix = TEST_CLUSTER_NODE_PREFIX; break; case SUITE: nodePrefix = SUITE_CLUSTER_NODE_PREFIX; break; default: throw new ElasticsearchException("Scope not supported: " + scope); } NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { @Override public Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false). put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build(); } @Override public Collection<Class<? extends Plugin>> nodePlugins() { return ESIntegTestCase.this.nodePlugins(); } @Override public Settings transportClientSettings() { return ESIntegTestCase.this.transportClientSettings(); } @Override public Collection<Class<? extends Plugin>> transportClientPlugins() { return ESIntegTestCase.this.transportClientPlugins(); } }; int numDataNodes = getNumDataNodes(); int minNumDataNodes; int maxNumDataNodes; if (numDataNodes >= 0) { minNumDataNodes = maxNumDataNodes = numDataNodes; } else { minNumDataNodes = getMinNumDataNodes(); maxNumDataNodes = getMaxNumDataNodes(); } SuppressLocalMode noLocal = getAnnotation(this.getClass(), SuppressLocalMode.class); SuppressNetworkMode noNetwork = getAnnotation(this.getClass(), SuppressNetworkMode.class); String nodeMode = InternalTestCluster.configuredNodeMode(); if (noLocal != null && noNetwork != null) { throw new IllegalStateException("Can't suppress both network and local mode"); } else if (noLocal != null) { nodeMode = "network"; } else if (noNetwork != null) { nodeMode = "local"; } Collection<Class<? extends Plugin>> mockPlugins = getMockPlugins(); return new InternalTestCluster(nodeMode, seed, createTempDir(), minNumDataNodes, maxNumDataNodes, InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper()); } /** * Returns a function that allows to wrap / filter all clients that are exposed by the test cluster. This is useful * for debugging or request / response pre and post processing. It also allows to intercept all calls done by the test * framework. By default this method returns an identity function {@link Function#identity()}. */ protected Function<Client,Client> getClientWrapper() { return Function.identity(); } /** Return the mock plugins the cluster should use */ protected Collection<Class<? extends Plugin>> getMockPlugins() { final ArrayList<Class<? extends Plugin>> mocks = new ArrayList<>(); if (randomBoolean()) { // sometimes run without those completely if (randomBoolean()) { mocks.add(MockTransportService.TestPlugin.class); } if (randomBoolean()) { mocks.add(MockFSIndexStore.TestPlugin.class); } if (randomBoolean()) { mocks.add(NodeMocksPlugin.class); } if (randomBoolean()) { mocks.add(MockEngineFactoryPlugin.class); } if (randomBoolean()) { mocks.add(MockSearchService.TestPlugin.class); } if (randomBoolean()) { mocks.add(AssertingLocalTransport.TestPlugin.class); } } mocks.add(TestSeedPlugin.class); return Collections.unmodifiableList(mocks); } public static final class TestSeedPlugin extends Plugin { @Override public String name() { return "test-seed-plugin"; } @Override public String description() { return "a test plugin that registers index.tests.seed as an index setting"; } public void onModule(SettingsModule module) { module.registerSetting(INDEX_TEST_SEED_SETTING); } } /** * Returns the client ratio configured via */ private static double transportClientRatio() { String property = System.getProperty(TESTS_CLIENT_RATIO); if (property == null || property.isEmpty()) { return Double.NaN; } return Double.parseDouble(property); } /** * Returns the transport client ratio from the class level annotation or via * {@link System#getProperty(String)} if available. If both are not available this will * return a random ratio in the interval <tt>[0..1]</tt> */ protected double getPerTestTransportClientRatio() { final ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class); double perTestRatio = -1; if (annotation != null) { perTestRatio = annotation.transportClientRatio(); } if (perTestRatio == -1) { return Double.isNaN(TRANSPORT_CLIENT_RATIO) ? randomDouble() : TRANSPORT_CLIENT_RATIO; } assert perTestRatio >= 0.0 && perTestRatio <= 1.0; return perTestRatio; } /** * Returns a random JODA Time Zone based on Java Time Zones */ public static DateTimeZone randomDateTimeZone() { DateTimeZone timeZone; // It sounds like some Java Time Zones are unknown by JODA. For example: Asia/Riyadh88 // We need to fallback in that case to a known time zone try { timeZone = DateTimeZone.forTimeZone(RandomizedTest.randomTimeZone()); } catch (IllegalArgumentException e) { timeZone = DateTimeZone.forOffsetHours(randomIntBetween(-12, 12)); } return timeZone; } /** * Returns path to a random directory that can be used to create a temporary file system repo */ public Path randomRepoPath() { if (currentCluster instanceof InternalTestCluster) { return randomRepoPath(((InternalTestCluster) currentCluster).getDefaultSettings()); } else if (currentCluster instanceof CompositeTestCluster) { return randomRepoPath(((CompositeTestCluster) currentCluster).internalCluster().getDefaultSettings()); } throw new UnsupportedOperationException("unsupported cluster type"); } /** * Returns path to a random directory that can be used to create a temporary file system repo */ public static Path randomRepoPath(Settings settings) { Environment environment = new Environment(settings); Path[] repoFiles = environment.repoFiles(); assert repoFiles.length > 0; Path path; do { path = repoFiles[0].resolve(randomAsciiOfLength(10)); } while (Files.exists(path)); return path; } protected NumShards getNumShards(String index) { MetaData metaData = client().admin().cluster().prepareState().get().getState().metaData(); assertThat(metaData.hasIndex(index), equalTo(true)); int numShards = Integer.valueOf(metaData.index(index).getSettings().get(SETTING_NUMBER_OF_SHARDS)); int numReplicas = Integer.valueOf(metaData.index(index).getSettings().get(SETTING_NUMBER_OF_REPLICAS)); return new NumShards(numShards, numReplicas); } /** * Asserts that all shards are allocated on nodes matching the given node pattern. */ public Set<String> assertAllShardsOnNodes(String index, String... pattern) { Set<String> nodes = new HashSet<>(); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndexName())) { String name = clusterState.nodes().get(shardRouting.currentNodeId()).getName(); nodes.add(name); assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true)); } } } } return nodes; } protected static class NumShards { public final int numPrimaries; public final int numReplicas; public final int totalNumShards; public final int dataCopies; private NumShards(int numPrimaries, int numReplicas) { this.numPrimaries = numPrimaries; this.numReplicas = numReplicas; this.dataCopies = numReplicas + 1; this.totalNumShards = numPrimaries * dataCopies; } } private static boolean runTestScopeLifecycle() { return INSTANCE == null; } @Before public final void before() throws Exception { if (runTestScopeLifecycle()) { printTestMessage("setup"); beforeInternal(); } printTestMessage("starting"); } @After public final void after() throws Exception { printTestMessage("finished"); // Deleting indices is going to clear search contexts implicitly so we // need to check that there are no more in-flight search contexts before // we remove indices super.ensureAllSearchContextsReleased(); if (runTestScopeLifecycle()) { printTestMessage("cleaning up after"); afterInternal(false); printTestMessage("cleaned up after"); } } @AfterClass public static void afterClass() throws Exception { if (!runTestScopeLifecycle()) { try { INSTANCE.printTestMessage("cleaning up after"); INSTANCE.afterInternal(true); } finally { INSTANCE = null; } } else { clearClusters(); } SUITE_SEED = null; currentCluster = null; } private static void initializeSuiteScope() throws Exception { Class<?> targetClass = getTestClass(); /** * Note we create these test class instance via reflection * since JUnit creates a new instance per test and that is also * the reason why INSTANCE is static since this entire method * must be executed in a static context. */ assert INSTANCE == null; if (isSuiteScopedTest(targetClass)) { // note we need to do this this way to make sure this is reproducible INSTANCE = (ESIntegTestCase) targetClass.newInstance(); boolean success = false; try { INSTANCE.printTestMessage("setup"); INSTANCE.beforeInternal(); INSTANCE.setupSuiteScopeCluster(); success = true; } finally { if (!success) { afterClass(); } } } else { INSTANCE = null; } } /** * Compute a routing key that will route documents to the <code>shard</code>-th shard * of the provided index. */ protected String routingKeyForShard(String index, String type, int shard) { return internalCluster().routingKeyForShard(resolveIndex(index), type, shard, random()); } /** * Return settings that could be used to start a node that has the given zipped home directory. */ protected Settings prepareBackwardsDataDir(Path backwardsIndex, Object... settings) throws IOException { Path indexDir = createTempDir(); Path dataDir = indexDir.resolve("data"); try (InputStream stream = Files.newInputStream(backwardsIndex)) { TestUtil.unzip(stream, indexDir); } assertTrue(Files.exists(dataDir)); // list clusters in the datapath, ignoring anything from extrasfs final Path[] list; try (DirectoryStream<Path> stream = Files.newDirectoryStream(dataDir)) { List<Path> dirs = new ArrayList<>(); for (Path p : stream) { if (!p.getFileName().toString().startsWith("extra")) { dirs.add(p); } } list = dirs.toArray(new Path[0]); } if (list.length != 1) { StringBuilder builder = new StringBuilder("Backwards index must contain exactly one cluster\n"); for (Path line : list) { builder.append(line.toString()).append('\n'); } throw new IllegalStateException(builder.toString()); } Path src = list[0]; Path dest = dataDir.resolve(internalCluster().getClusterName()); assertTrue(Files.exists(src)); Files.move(src, dest); assertFalse(Files.exists(src)); assertTrue(Files.exists(dest)); Settings.Builder builder = Settings.builder() .put(settings) .put(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath()); Path configDir = indexDir.resolve("config"); if (Files.exists(configDir)) { builder.put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath()); } return builder.build(); } protected HttpRequestBuilder httpClient() { final NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); final NodeInfo[] nodes = nodeInfos.getNodes(); assertTrue(nodes.length > 0); TransportAddress publishAddress = randomFrom(nodes).getHttp().address().publishAddress(); assertEquals(1, publishAddress.uniqueAddressTypeId()); InetSocketAddress address = ((InetSocketTransportAddress) publishAddress).address(); return new HttpRequestBuilder(HttpClients.createDefault()).host(NetworkAddress.format(address.getAddress())).port(address.getPort()); } /** * This method is executed iff the test is annotated with {@link SuiteScopeTestCase} * before the first test of this class is executed. * * @see SuiteScopeTestCase */ protected void setupSuiteScopeCluster() throws Exception { } private static boolean isSuiteScopedTest(Class<?> clazz) { return clazz.getAnnotation(SuiteScopeTestCase.class) != null; } /** * If a test is annotated with {@link SuiteScopeTestCase} * the checks and modifications that are applied to the used test cluster are only done after all tests * of this class are executed. This also has the side-effect of a suite level setup method {@link #setupSuiteScopeCluster()} * that is executed in a separate test instance. Variables that need to be accessible across test instances must be static. */ @Retention(RetentionPolicy.RUNTIME) @Inherited @Target(ElementType.TYPE) public @interface SuiteScopeTestCase { } /** * If used the test will never run in local mode. */ @Retention(RetentionPolicy.RUNTIME) @Inherited @Target(ElementType.TYPE) public @interface SuppressLocalMode { } /** * If used the test will never run in network mode */ @Retention(RetentionPolicy.RUNTIME) @Inherited @Target(ElementType.TYPE) public @interface SuppressNetworkMode { } public static Index resolveIndex(String index) { GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().setIndices(index).get(); assertTrue("index " + index + " not found", getIndexResponse.getSettings().containsKey(index)); String uuid = getIndexResponse.getSettings().get(index).get(IndexMetaData.SETTING_INDEX_UUID); return new Index(index, uuid); } }
apache-2.0
ederign/kie-wb-common
kie-wb-common-stunner/kie-wb-common-stunner-extensions/kie-wb-common-stunner-svg/kie-wb-common-stunner-svg-gen/src/main/java/org/kie/workbench/common/stunner/svg/gen/translator/impl/SVGRectTranslator.java
2656
/* * Copyright 2017 Red Hat, Inc. and/or its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kie.workbench.common.stunner.svg.gen.translator.impl; import org.kie.workbench.common.stunner.svg.gen.exception.TranslatorException; import org.kie.workbench.common.stunner.svg.gen.model.impl.RectDefinition; import org.kie.workbench.common.stunner.svg.gen.translator.SVGTranslatorContext; import org.kie.workbench.common.stunner.svg.gen.translator.css.SVGAttributeParserUtils; import org.w3c.dom.Element; public class SVGRectTranslator extends AbstractSVGShapeTranslator<Element, RectDefinition> { public static final String WIDTH = "width"; public static final String HEIGHT = "height"; public static final String RX = "rx"; public static final String RY = "ry"; @Override public Class<Element> getInputType() { return Element.class; } @Override public RectDefinition doTranslate(final Element rectElement, final SVGTranslatorContext context) throws TranslatorException { final String rx = rectElement.getAttribute(RX); final String ry = rectElement.getAttribute(RY); final String width = rectElement.getAttribute(WIDTH); final String height = rectElement.getAttribute(HEIGHT); return new RectDefinition(getId(rectElement), SVGAttributeParserUtils.toPixelValue(width), SVGAttributeParserUtils.toPixelValue(height), getCornerRadius(rx, ry)); } @Override public String getTagName() { return "rect"; } private double getCornerRadius(final String rx, final String ry) { final double cx = SVGAttributeParserUtils.toPixelValue(rx, 0d); final double cy = SVGAttributeParserUtils.toPixelValue(ry, 0d); return cx > cy ? cx : cy; } }
apache-2.0
makedonsky94/SwipeToClose
app/src/main/java/com/makedonsky/example/DetailImageActivity.java
664
package com.makedonsky.example; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v7.app.AppCompatActivity; import com.makedonsky.widget.SwipeLayout; import com.makedonsky.widget.SwipeToClose; public class DetailImageActivity extends AppCompatActivity { @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.detail_image); SwipeToClose .with(this) .withShadowAlpha(1.0f, 0.0f) .withDirection(SwipeLayout.DIRECTION_VERTICAL) .bind(); } }
apache-2.0
divdavem/attester-maven-plugin
src/main/java/com/ariatemplates/attester/maven/RunAttester.java
16121
/* * Copyright 2012 Amadeus s.a.s. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.ariatemplates.attester.maven; import java.io.File; import java.io.FileNotFoundException; import java.util.LinkedList; import java.util.List; import org.apache.maven.model.Dependency; import org.apache.maven.plugin.MojoExecutionException; import org.apache.maven.plugin.MojoFailureException; public abstract class RunAttester extends RunNode { /** * Configuration file to use. More information about the format to use is * documented <a href="https://github.com/ariatemplates/attester#usage" * >here</a>. * * Note that most options can be configured directly from the pom.xml, * without the need for an external configuration file. * * @parameter */ public File configFile; /** * First directory to serve as the root of the web server. This directory is * usually the one containing Javascript tests. (Passed through * <code>--config.resources./</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="${basedir}/src/test/webapp" */ public File testSourceDirectory; /** * Second directory to serve as the root of the web server. In case a file * is requested and not present in <a * href="#testSourceDirectory">testSourceDirectory</a>, it is looked for in * this directory. This directory is usually the one containing the main * Javascript files of the application. If it exists, it is also used as the * default value for the <a * href="#coverageRootDirectory">coverageRootDirectory</a> option. (Passed * through <code>--config.resources./</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="${basedir}/src/main/webapp" */ public File warSourceDirectory; /** * Third directory to serve as the root of the web server. In case a file is * requested and not present in either <a * href="#warSourceDirectory">warSourceDirectory</a> or <a * href="#testSourceDirectory">testSourceDirectory</a>, it is looked for in * this directory. This directory usually contains any external library * needed by the application. (Passed through * <code>--config.resources./</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter * expression="${project.build.directory}/${project.build.finalName}" */ public File webappDirectory; /** * Root directory for code coverage instrumentation. Files inside this * directory and matching one of the <a * href="#coverageIncludes">coverageIncludes</a> patterns and not matching * any <a href="#coverageExcludes">coverageExcludes</a> patterns will be * instrumented for code coverage when requested to the web server. Note * that for code coverage instrumentation to be effective, this directory or * one of its parents has to be configured to be served by the web server * (e.g. through <a href="#testSourceDirectory">testSourceDirectory</a>, <a * href="#warSourceDirectory">warSourceDirectory</a> or <a * href="#webappDirectory">webappDirectory</a>). The default value for this * parameter is the value of the <a * href="#warSourceDirectory">warSourceDirectory</a> parameter. (Passed * through <code>--config.coverage.files.rootDirectory</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter */ public File coverageRootDirectory; /** * List of file patterns to be included for code coverage instrumentation. * See <a href="#coverageRootDirectory">coverageRootDirectory</a> for more * information. This property is ignored if <a * href="#coverageRootDirectory">coverageRootDirectory</a> does not exist or * is not a directory. (Passed through * <code>--config.coverage.files.includes</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter */ public String[] coverageIncludes = new String[] { "**/*.js" }; /** * List of file patterns to be excluded from code coverage instrumentation. * See <a href="#coverageRootDirectory">coverageRootDirectory</a> for more * information. This property is ignored if <a * href="#coverageRootDirectory">coverageRootDirectory</a> does not exist or * is not a directory. (Passed through * <code>--config.coverage.files.excludes</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter */ public String[] coverageExcludes; /** * List of browsers to test. (Passed through <code>--config.browsers</code> * to <a href="https://github.com/ariatemplates/attester#usage" * >attester</a>) * * @parameter */ public String[] browsers; /** * Aria Templates bootstrap file. (Passed through * <code>--config.tests.aria-templates.bootstrap</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="/aria/aria-templates-${at.version}.js" */ public String ariaTemplatesBootstrap; /** * Aria Templates extra scripts to be included before running each test. * (Passed through <code>--config.tests.aria-templates.extraScripts</code> to * <a href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter */ public String[] ariaTemplatesExtraScripts; /** * Aria Templates single test classpath to include. If this parameter is * defined, ariaTemplatesClasspathsIncludes and * ariaTemplatesClasspathsExcludes are ignored. It allows to easily run a * single test from the command line. (Passed through * <code>--config.tests.aria-templates.classpaths.includes</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="${attester.ariaTemplatesClasspath}" */ public String ariaTemplatesClasspath; /** * Aria Templates test classpaths to include. (Passed through * <code>--config.tests.aria-templates.classpaths.includes</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter */ public String[] ariaTemplatesClasspathsIncludes = new String[] { "MainTestSuite" }; /** * Aria Templates test classpaths to exclude. (Passed through * <code>--config.tests.aria-templates.classpaths.excludes</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter */ public String[] ariaTemplatesClasspathsExcludes; /** * Directory for the set of JUnit-style report files. (Passed through * <code>--config.test-reports.xml-directory</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="${project.build.directory}/jstestdriver" */ public File xmlReportsDirectory; /** * Single JUnit-style file report. (Passed through * <code>--config.test-reports.xml-file</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="${project.build.directory}/atjstestsReport.xml" */ public File xmlReportFile; /** * JSON file report. (Passed through * <code>--config.test-reports.json-file</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="${project.build.directory}/atjstestsReport.json" */ public File jsonReportFile; /** * JSON coverage file report. (Passed through * <code>--config.coverage-reports.json-file</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter * expression="${project.build.directory}/atjstestsCoverageReport.json" */ public File jsonCoverageReportFile; /** * <a href="http://ltp.sourceforge.net/coverage/lcov/geninfo.1.php">lcov</a> * coverage file report. (Passed through * <code>--config.coverage-reports.lcov-file</code> to <a * href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression= * "${project.build.directory}/jstestdriver/jsTestDriver.conf-coverage.dat" */ public File lcovCoverageReportFile; /** * Path to the attester directory. If not defined, attester is extracted * from the the following maven artifact: * <code>com.ariatemplates.attester:attester:zip:project</code> * * @parameter expression="${com.ariatemplates.attester.path}" */ public File attesterPath; /** * Parent directory in which the directory containing the extracted attester files * will be created, if it does not already exist. * Files are extracted from the following maven artifact: * <code>com.ariatemplates.attester:attester:zip:project</code> * * This parameter is only used if attesterPath is not defined. * * @parameter expression="${project.build.directory}" */ public File attesterExtractionParentDirectory; /** * Host for the internal web server. (Passed through <code>--host</code> to * <a href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="${attester.host}" */ public String host; /** * Port for the internal web server. (Passed through <code>--port</code> to * <a href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="${attester.port}" */ public Integer port; /** * Enables or disables colors. (Passes <code>--colors</code> or * <code>--no-colors</code> to <a href="https://github.com/ariatemplates/attester#usage" >attester</a>) * * @parameter expression="${attester.colors}" */ public boolean colors = false; private static final String PATH_IN_ATTESTER_DIRECTORY = "bin" + File.separator + "attester.js"; protected File attesterJsMainFile; public static Dependency getAttesterDependency() { Dependency dependency = new Dependency(); dependency.setGroupId("com.ariatemplates.attester"); dependency.setArtifactId("attester"); dependency.setVersion(RunAttester.class.getPackage().getImplementationVersion()); dependency.setClassifier("project"); dependency.setType("zip"); return dependency; } protected File extractDependency(File property, Dependency dependency, String pathAfterProperty, String pathAfterDependency) { File res; try { if (property != null) { res = new File(property, pathAfterProperty); } else { ArtifactExtractor extractor = new ArtifactExtractor(); extractor.outputParentDirectory = attesterExtractionParentDirectory; extractor.setLog(this.getLog()); String outputDirectory = extractor.inplaceExtractDependency(session.getLocalRepository(), dependency); res = new File(outputDirectory, pathAfterDependency); } if (!res.exists()) { throw new FileNotFoundException("Could not find file: " + res.getAbsolutePath()); } } catch (Exception e) { throw new RuntimeException("Failed to find or extract " + dependency.getArtifactId(), e); } return res; } protected void extractAttester() { attesterJsMainFile = extractDependency(attesterPath, getAttesterDependency(), PATH_IN_ATTESTER_DIRECTORY, "attester" + File.separator + PATH_IN_ATTESTER_DIRECTORY); } @Override public void execute() throws MojoExecutionException, MojoFailureException { extractAttester(); super.execute(); } protected void addMultipleOptions(List<String> optionsArray, String optionName, String[] array) { if (array != null) { for (String item : array) { optionsArray.add(optionName); optionsArray.add(item); } } } @Override protected List<String> getNodeArguments() { List<String> res = new LinkedList<String>(); res.add(attesterJsMainFile.getAbsolutePath()); if (configFile != null) { res.add(configFile.getAbsolutePath()); } if (port != null) { res.add("--port"); res.add(port.toString()); } if (host != null) { res.add("--host"); res.add(host); } res.add(colors ? "--colors" : "--no-colors"); if (testSourceDirectory != null && testSourceDirectory.isDirectory()) { res.add("--config.resources./"); res.add(testSourceDirectory.getAbsolutePath()); } if (warSourceDirectory != null && warSourceDirectory.isDirectory()) { res.add("--config.resources./"); res.add(warSourceDirectory.getAbsolutePath()); if (coverageRootDirectory == null) { coverageRootDirectory = warSourceDirectory; } } if (webappDirectory != null && webappDirectory.isDirectory()) { res.add("--config.resources./"); res.add(webappDirectory.getAbsolutePath()); } if (coverageRootDirectory != null && coverageRootDirectory.isDirectory()) { res.add("--config.coverage.files.rootDirectory"); res.add(coverageRootDirectory.getAbsolutePath()); addMultipleOptions(res, "--config.coverage.files.includes", coverageIncludes); addMultipleOptions(res, "--config.coverage.files.excludes", coverageExcludes); } res.add("--config.test-reports.xml-directory"); res.add(xmlReportsDirectory.getAbsolutePath()); res.add("--config.test-reports.xml-file"); res.add(xmlReportFile.getAbsolutePath()); res.add("--config.test-reports.json-file"); res.add(jsonReportFile.getAbsolutePath()); res.add("--config.coverage-reports.json-file"); res.add(jsonCoverageReportFile.getAbsolutePath()); res.add("--config.coverage-reports.lcov-file"); res.add(lcovCoverageReportFile.getAbsolutePath()); addMultipleOptions(res, "--config.browsers", browsers); res.add("--config.tests.aria-templates.bootstrap"); res.add(ariaTemplatesBootstrap); if (ariaTemplatesClasspath != null) { res.add("--config.tests.aria-templates.classpaths.includes"); res.add(ariaTemplatesClasspath); } else { addMultipleOptions(res, "--config.tests.aria-templates.classpaths.includes", ariaTemplatesClasspathsIncludes); addMultipleOptions(res, "--config.tests.aria-templates.classpaths.excludes", ariaTemplatesClasspathsExcludes); } addMultipleOptions(res, "--config.tests.aria-templates.extraScripts", ariaTemplatesExtraScripts); addExtraAttesterOptions(res); res.addAll(arguments); return res; } protected void addExtraAttesterOptions(List<String> list) { } }
apache-2.0
UniTime/unitime
JavaSource/org/unitime/timetable/onlinesectioning/solver/ComputeSuggestionsAction.java
28511
/* * Licensed to The Apereo Foundation under one or more contributor license * agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. * * The Apereo Foundation licenses this file to you under the Apache License, * Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. * */ package org.unitime.timetable.onlinesectioning.solver; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Hashtable; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeSet; import org.cpsolver.ifs.assignment.Assignment; import org.cpsolver.ifs.assignment.AssignmentComparator; import org.cpsolver.ifs.assignment.AssignmentMap; import org.cpsolver.studentsct.extension.StudentQuality; import org.cpsolver.studentsct.heuristics.selection.BranchBoundSelection.BranchBoundNeighbour; import org.cpsolver.studentsct.model.Config; import org.cpsolver.studentsct.model.Course; import org.cpsolver.studentsct.model.CourseRequest; import org.cpsolver.studentsct.model.Enrollment; import org.cpsolver.studentsct.model.FreeTimeRequest; import org.cpsolver.studentsct.model.Request; import org.cpsolver.studentsct.model.SctAssignment; import org.cpsolver.studentsct.model.Section; import org.cpsolver.studentsct.model.Student; import org.cpsolver.studentsct.model.Subpart; import org.cpsolver.studentsct.online.MaxOverExpectedConstraint; import org.cpsolver.studentsct.online.OnlineReservation; import org.cpsolver.studentsct.online.OnlineSectioningModel; import org.cpsolver.studentsct.online.expectations.MinimizeConflicts; import org.cpsolver.studentsct.online.expectations.NeverOverExpected; import org.cpsolver.studentsct.online.expectations.OverExpectedCriterion; import org.cpsolver.studentsct.online.selection.BestPenaltyCriterion; import org.cpsolver.studentsct.online.selection.MultiCriteriaBranchAndBoundSelection; import org.cpsolver.studentsct.online.selection.MultiCriteriaBranchAndBoundSuggestions; import org.cpsolver.studentsct.online.selection.SuggestionsBranchAndBound; import org.cpsolver.studentsct.reservation.IndividualRestriction; import org.cpsolver.studentsct.reservation.Reservation; import org.cpsolver.studentsct.reservation.Restriction; import org.joda.time.Days; import org.joda.time.LocalDate; import org.unitime.localization.impl.Localization; import org.unitime.timetable.defaults.ApplicationProperty; import org.unitime.timetable.gwt.resources.StudentSectioningMessages; import org.unitime.timetable.gwt.server.DayCode; import org.unitime.timetable.gwt.shared.ClassAssignmentInterface; import org.unitime.timetable.gwt.shared.CourseRequestInterface; import org.unitime.timetable.gwt.shared.SectioningException; import org.unitime.timetable.onlinesectioning.OnlineSectioningLog; import org.unitime.timetable.onlinesectioning.OnlineSectioningServer; import org.unitime.timetable.onlinesectioning.OnlineSectioningHelper; import org.unitime.timetable.onlinesectioning.OnlineSectioningServer.Lock; import org.unitime.timetable.onlinesectioning.basic.GetAssignment; import org.unitime.timetable.onlinesectioning.model.XCourse; import org.unitime.timetable.onlinesectioning.model.XCourseId; import org.unitime.timetable.onlinesectioning.model.XCourseRequest; import org.unitime.timetable.onlinesectioning.model.XDistribution; import org.unitime.timetable.onlinesectioning.model.XDistributionType; import org.unitime.timetable.onlinesectioning.model.XEnrollment; import org.unitime.timetable.onlinesectioning.model.XOffering; import org.unitime.timetable.onlinesectioning.model.XRequest; import org.unitime.timetable.onlinesectioning.model.XReservationType; import org.unitime.timetable.onlinesectioning.model.XSection; import org.unitime.timetable.onlinesectioning.model.XStudent; import org.unitime.timetable.solver.studentsct.StudentSolver; /** * @author Tomas Muller */ public class ComputeSuggestionsAction extends FindAssignmentAction { private static final long serialVersionUID = 1L; private static StudentSectioningMessages MSG = Localization.create(StudentSectioningMessages.class); private ClassAssignmentInterface.ClassAssignment iSelection; private double iValue = 0.0; private String iFilter = null; public ComputeSuggestionsAction forRequest(CourseRequestInterface request) { super.forRequest(request); return this; } public ComputeSuggestionsAction withAssignment(Collection<ClassAssignmentInterface.ClassAssignment> assignment) { super.withAssignment(assignment); return this; } public ComputeSuggestionsAction withSelection(ClassAssignmentInterface.ClassAssignment selectedAssignment) { iSelection = selectedAssignment; return this; } public ComputeSuggestionsAction withFilter(String filter) { iFilter = filter; return this; } public ClassAssignmentInterface.ClassAssignment getSelection() { return iSelection; } public String getFilter() { return iFilter; } @Override public List<ClassAssignmentInterface> execute(OnlineSectioningServer server, OnlineSectioningHelper helper) { long t0 = System.currentTimeMillis(); OverExpectedCriterion overExpected = server.getOverExpectedCriterion(); if ((getRequest().areSpaceConflictsAllowed() || getRequest().areTimeConflictsAllowed() || getRequest().areLinkedConflictsAllowed()) && server.getConfig().getPropertyBoolean("OverExpected.MinimizeConflicts", false)) { overExpected = new MinimizeConflicts(server.getConfig(), overExpected); } OnlineSectioningModel model = new OnlineSectioningModel(server.getConfig(), overExpected); Assignment<Request, Enrollment> assignment = new AssignmentMap<Request, Enrollment>(); boolean linkedClassesMustBeUsed = server.getConfig().getPropertyBoolean("LinkedClasses.mustBeUsed", false); OnlineSectioningLog.Action.Builder action = helper.getAction(); if (getRequest().getStudentId() != null) action.setStudent( OnlineSectioningLog.Entity.newBuilder() .setUniqueId(getRequest().getStudentId())); Student student = new Student(getRequest().getStudentId() == null ? -1l : getRequest().getStudentId()); Set<IdPair> enrolled = null; Lock readLock = server.readLock(); ClassAssignmentInterface unavailabilities = null; boolean checkDeadlines = server.getConfig().getPropertyBoolean("FindAssignment.CheckDeadlines", false) && !getRequest().areDeadlineConflictsAllowed(); Integer currentDateIndex = null; if (server.getConfig().getPropertyBoolean("FindAssignment.AvoidPastSections", true)) currentDateIndex = Days.daysBetween(new LocalDate(server.getAcademicSession().getDatePatternFirstDate()), new LocalDate()).getDays() + server.getConfig().getPropertyInt("FindAssignment.AvoidPastOffset", 0); boolean onlineOnlyFilter = true; if (helper.hasAdminPermission() && server.getConfig().getPropertyBoolean("Load.OnlineOnlyAdminOverride", false)) onlineOnlyFilter = false; else if (helper.hasAvisorPermission() && server.getConfig().getPropertyBoolean("Load.OnlineOnlyAdvisorOverride", false)) onlineOnlyFilter = false; try { XStudent original = (getRequest().getStudentId() == null ? null : server.getStudent(getRequest().getStudentId())); if (original != null) { unavailabilities = new ClassAssignmentInterface(); GetAssignment.fillUnavailabilitiesIn(unavailabilities, original, server, helper, null); Collections.reverse(unavailabilities.getCourseAssignments()); student.setExternalId(original.getExternalId()); student.setName(original.getName()); student.setNeedShortDistances(original.hasAccomodation(server.getDistanceMetric().getShortDistanceAccommodationReference())); student.setAllowDisabled(original.isAllowDisabled()); if (server instanceof StudentSolver) student.setMaxCredit(original.getMaxCredit()); action.getStudentBuilder().setUniqueId(original.getStudentId()).setExternalId(original.getExternalId()).setName(original.getName()); enrolled = new HashSet<IdPair>(); for (XRequest r: original.getRequests()) { if (r instanceof XCourseRequest && ((XCourseRequest)r).getEnrollment() != null) { XEnrollment e = ((XCourseRequest)r).getEnrollment(); for (Long s: e.getSectionIds()) enrolled.add(new IdPair(e.getCourseId(), s)); } } OnlineSectioningLog.Enrollment.Builder enrollment = OnlineSectioningLog.Enrollment.newBuilder(); enrollment.setType(OnlineSectioningLog.Enrollment.EnrollmentType.STORED); for (XRequest oldRequest: original.getRequests()) { if (oldRequest instanceof XCourseRequest && ((XCourseRequest)oldRequest).getEnrollment() != null) { XCourseRequest cr = (XCourseRequest)oldRequest; XOffering offering = server.getOffering(cr.getEnrollment().getOfferingId()); for (XSection section: offering.getSections(cr.getEnrollment())) enrollment.addSection(OnlineSectioningHelper.toProto(section, cr.getEnrollment())); } } action.addEnrollment(enrollment); } Map<Long, Section> classTable = new HashMap<Long, Section>(); Set<XDistribution> distributions = new HashSet<XDistribution>(); if (getAssignment() != null) getRequest().moveActiveSubstitutionsUp(); for (CourseRequestInterface.Request c: getRequest().getCourses()) addRequest(server, model, assignment, student, original, c, false, true, classTable, distributions, getAssignment() != null, getAssignment() != null, checkDeadlines, currentDateIndex, onlineOnlyFilter); if (student.getRequests().isEmpty()) throw new SectioningException(MSG.exceptionNoCourse()); for (CourseRequestInterface.Request c: getRequest().getAlternatives()) addRequest(server, model, assignment, student, original, c, true, true, classTable, distributions, getAssignment() != null, getAssignment() != null, checkDeadlines, currentDateIndex, onlineOnlyFilter); if (helper.isAlternativeCourseEnabled()) { for (Request r: student.getRequests()) { if (r.isAlternative() || !(r instanceof CourseRequest)) continue; CourseRequest cr = (CourseRequest)r; if (cr.getCourses().size() == 1) { XCourse course = server.getCourse(cr.getCourses().get(0).getId()); Long altCourseId = (course == null ? null : course.getAlternativeCourseId()); if (altCourseId != null) { boolean hasCourse = false; for (Request x: student.getRequests()) if (x instanceof CourseRequest) for (Course c: ((CourseRequest)x).getCourses()) if (c.getId() == altCourseId) { hasCourse = true; break; } if (!hasCourse) { XCourseId ci = server.getCourse(altCourseId); if (ci != null) { XOffering x = server.getOffering(ci.getOfferingId()); if (x != null) { cr.getCourses().add(clone(x, server.getEnrollments(x.getOfferingId()), ci.getCourseId(), student.getId(), original, classTable, server, model, getAssignment() != null, checkDeadlines, currentDateIndex, onlineOnlyFilter)); distributions.addAll(x.getDistributions()); } } } } } } } if (student.getExternalId() != null && !student.getExternalId().isEmpty()) { Collection<Long> offeringIds = server.getInstructedOfferings(student.getExternalId()); if (offeringIds != null) for (Long offeringId: offeringIds) { XOffering offering = server.getOffering(offeringId); if (offering != null) offering.fillInUnavailabilities(student); } } model.addStudent(student); model.setStudentQuality(new StudentQuality(server.getDistanceMetric(), model.getProperties())); // model.setDistanceConflict(new DistanceConflict(server.getDistanceMetric(), model.getProperties())); // model.setTimeOverlaps(new TimeOverlapsCounter(null, model.getProperties())); for (XDistribution link: distributions) { if (link.getDistributionType() == XDistributionType.LinkedSections) { List<Section> sections = new ArrayList<Section>(); for (Long sectionId: link.getSectionIds()) { Section x = classTable.get(sectionId); if (x != null) sections.add(x); } if (sections.size() >= 2) model.addLinkedSections(linkedClassesMustBeUsed, sections); } } } finally { readLock.release(); } long t1 = System.currentTimeMillis(); Hashtable<CourseRequest, Set<Section>> preferredSectionsForCourse = new Hashtable<CourseRequest, Set<Section>>(); Hashtable<CourseRequest, Set<Section>> requiredSectionsForCourse = new Hashtable<CourseRequest, Set<Section>>(); HashSet<FreeTimeRequest> requiredFreeTimes = new HashSet<FreeTimeRequest>(); ArrayList<ClassAssignmentInterface> ret = new ArrayList<ClassAssignmentInterface>(); ClassAssignmentInterface messages = new ClassAssignmentInterface(); ret.add(messages); OnlineSectioningLog.Enrollment.Builder requested = OnlineSectioningLog.Enrollment.newBuilder(); requested.setType(OnlineSectioningLog.Enrollment.EnrollmentType.PREVIOUS); for (ClassAssignmentInterface.ClassAssignment a: getAssignment()) if (a != null && a.isAssigned()) requested.addSection(OnlineSectioningHelper.toProto(a)); action.addEnrollment(requested); Request selectedRequest = null; Section selectedSection = null; double selectedPenalty = 0; Enrollment enrollmentArray[] = new Enrollment[student.getRequests().size()]; int idx = 0; for (Iterator<Request> e = student.getRequests().iterator(); e.hasNext();) { Request r = (Request)e.next(); OnlineSectioningLog.Request.Builder rq = OnlineSectioningHelper.toProto(r); if (r instanceof CourseRequest) { CourseRequest cr = (CourseRequest)r; // Experimental: provide student with a blank override that allows for overlaps as well as over-limit if (getRequest().areTimeConflictsAllowed() || getRequest().areSpaceConflictsAllowed() || getRequest().areLinkedConflictsAllowed()) { for (Course course: cr.getCourses()) { XCourse xc = server.getCourse(course.getId()); boolean time = getRequest().areTimeConflictsAllowed() && xc.areTimeConflictOverridesAllowed(); boolean space = getRequest().areSpaceConflictsAllowed() && xc.areSpaceConflictOverridesAllowed(); boolean linked = getRequest().areLinkedConflictsAllowed() && xc.areLinkedConflictOverridesAllowed(); boolean hasNeverIncludedReservation = false; if (!server.getConfig().getPropertyBoolean("Reservations.NeverIncludedAllowOverride", false)) for (Reservation res: course.getOffering().getReservations()) { if (res.neverIncluded()) hasNeverIncludedReservation = true; } if (!hasNeverIncludedReservation && (time || space || linked)) { OnlineReservation dummy = new OnlineReservation(XReservationType.Dummy.ordinal(), -3l, course.getOffering(), -100, space, 1, true, true, time, true, true); dummy.setBreakLinkedSections(linked); for (Config g: course.getOffering().getConfigs()) { dummy.addConfig(g); for (Subpart s: g.getSubparts()) { for (Section x: s.getSections()) { dummy.addSection(x, false); } } } } if ((time || space || linked) && server.getConfig().getPropertyBoolean("Restrictions.AllowOverride", false)) { if (course.getOffering().hasRestrictions()) { Restriction restriction = new IndividualRestriction(-3l, course.getOffering(), student.getId()); for (Config c: course.getOffering().getConfigs()) restriction.addConfig(c); } } } } if (!getSelection().isFreeTime() && cr.getCourse(getSelection().getCourseId()) != null) { selectedRequest = r; if (getSelection().getClassId() != null) { Section section = cr.getSection(getSelection().getClassId()); if (section != null) selectedSection = section; } } HashSet<Section> preferredSections = new HashSet<Section>(); HashSet<Section> requiredSections = new HashSet<Section>(); a: for (ClassAssignmentInterface.ClassAssignment a: getAssignment()) { if (a != null && !a.isFreeTime() && cr.getCourse(a.getCourseId()) != null && a.getClassId() != null) { Section section = cr.getSection(a.getClassId()); boolean hasIndividualReservation = false; if (section != null && section.getLimit() == 0) { for (Reservation res: cr.getReservations(cr.getCourse(a.getCourseId()))) { if (!res.canAssignOverLimit()) continue; Set<Section> sect = res.getSections(section.getSubpart()); if (sect == null || sect.contains(section)) hasIndividualReservation = true; } } if (section == null || (section.getLimit() == 0 && !hasIndividualReservation)) { messages.addMessage((a.isSaved() ? "Enrolled class " : a.isPinned() ? "Required class " : "Previously selected class ") + a.getSubject() + " " + a.getCourseNbr() + " " + a.getSubpart() + " " + a.getSection() + " is no longer available."); if (getSelection().getCourseId() != null && cr.getCourse(getSelection().getCourseId()) != null) continue a; } selectedPenalty += model.getOverExpected(assignment, enrollmentArray, idx, section, cr); if (a.isPinned() && !getSelection().equals(a)) requiredSections.add(section); preferredSections.add(section); rq.addSection(OnlineSectioningHelper.toProto(section, cr.getCourse(a.getCourseId())).setPreference( getSelection().equals(a) ? OnlineSectioningLog.Section.Preference.SELECTED : a.isPinned() ? OnlineSectioningLog.Section.Preference.REQUIRED : OnlineSectioningLog.Section.Preference.PREFERRED)); } } preferredSectionsForCourse.put(cr, preferredSections); requiredSectionsForCourse.put(cr, requiredSections); if (!preferredSections.isEmpty()) { Section section = preferredSections.iterator().next(); enrollmentArray[idx] = new Enrollment(cr, 0, section.getSubpart().getConfig(), preferredSections, assignment); } } else { FreeTimeRequest ft = (FreeTimeRequest)r; if (getSelection().isFreeTime() && ft.getTime() != null && ft.getTime().getStartSlot() == getSelection().getStart() && ft.getTime().getLength() == getSelection().getLength() && ft.getTime().getDayCode() == DayCode.toInt(DayCode.toDayCodes(getSelection().getDays()))) { selectedRequest = r; for (OnlineSectioningLog.Time.Builder ftb: rq.getFreeTimeBuilderList()) ftb.setPreference(OnlineSectioningLog.Section.Preference.SELECTED); } else for (ClassAssignmentInterface.ClassAssignment a: getAssignment()) { if (a != null && a.isFreeTime() && a.isPinned() && ft.getTime() != null && ft.getTime().getStartSlot() == a.getStart() && ft.getTime().getLength() == a.getLength() && ft.getTime().getDayCode() == DayCode.toInt(DayCode.toDayCodes(a.getDays()))) { requiredFreeTimes.add(ft); for (OnlineSectioningLog.Time.Builder ftb: rq.getFreeTimeBuilderList()) ftb.setPreference(OnlineSectioningLog.Section.Preference.REQUIRED); } } } idx++; action.addRequest(rq); } long t2 = System.currentTimeMillis(); if (selectedRequest == null) return new ArrayList<ClassAssignmentInterface>(); SuggestionsBranchAndBound suggestionBaB = null; boolean avoidOverExpected = server.getAcademicSession().isSectioningEnabled(); if (avoidOverExpected && helper.getUser() != null && helper.getUser().hasType() && helper.getUser().getType() != OnlineSectioningLog.Entity.EntityType.STUDENT) avoidOverExpected = false; String override = ApplicationProperty.OnlineSchedulingAllowOverExpected.value(); if (override != null) avoidOverExpected = "false".equalsIgnoreCase(override); double maxOverExpected = -1.0; if (avoidOverExpected && !(model.getOverExpectedCriterion() instanceof NeverOverExpected)) { long x0 = System.currentTimeMillis(); MultiCriteriaBranchAndBoundSelection selection = new MultiCriteriaBranchAndBoundSelection(model.getProperties()); selection.setModel(model); selection.setPreferredSections(preferredSectionsForCourse); selection.setRequiredSections(requiredSectionsForCourse); selection.setRequiredFreeTimes(requiredFreeTimes); selection.setTimeout(100); BranchBoundNeighbour neighbour = selection.select(assignment, student, new BestPenaltyCriterion(student, model)); long x1 = System.currentTimeMillis(); if (neighbour != null) { maxOverExpected = 0; for (int i = 0; i < neighbour.getAssignment().length; i++) { Enrollment enrollment = neighbour.getAssignment()[i]; if (enrollment != null && enrollment.getAssignments() != null && enrollment.isCourseRequest()) for (Section section: enrollment.getSections()) maxOverExpected += model.getOverExpected(assignment, neighbour.getAssignment(), i, section, enrollment.getRequest()); } if (maxOverExpected < selectedPenalty) maxOverExpected = selectedPenalty; helper.debug("Maximum number of over-expected sections limited to " + maxOverExpected + " (computed in " + (x1 - x0) + " ms)."); } } SuggestionsFilter filter = null; if (getFilter() != null && !getFilter().isEmpty()) { filter = new SuggestionsFilter(getFilter(), server.getAcademicSession().getDatePatternFirstDate()); } if (maxOverExpected >= 0.0) model.addGlobalConstraint(new MaxOverExpectedConstraint(maxOverExpected)); if (server.getConfig().getPropertyBoolean("StudentWeights.MultiCriteria", true)) { suggestionBaB = new MultiCriteriaBranchAndBoundSuggestions( model.getProperties(), student, assignment, requiredSectionsForCourse, requiredFreeTimes, preferredSectionsForCourse, selectedRequest, selectedSection, filter, maxOverExpected, server.getConfig().getPropertyBoolean("StudentWeights.PriorityWeighting", true)); } else { suggestionBaB = new SuggestionsBranchAndBound(model.getProperties(), student, assignment, requiredSectionsForCourse, requiredFreeTimes, preferredSectionsForCourse, selectedRequest, selectedSection, filter, maxOverExpected); } helper.debug("Using " + (server.getConfig().getPropertyBoolean("StudentWeights.MultiCriteria", true) ? "multi-criteria ": "") + (server.getConfig().getPropertyBoolean("StudentWeights.PriorityWeighting", true) ? "priority" : "equal") + " weighting model" + " with " + server.getConfig().getPropertyInt("Suggestions.Timeout", 5000) +" ms time limit" + (maxOverExpected < 0 ? "" : ", maximal over-expected of " + maxOverExpected) + " and maximal depth of " + server.getConfig().getPropertyInt("Suggestions.MaxDepth", 4) + "."); TreeSet<SuggestionsBranchAndBound.Suggestion> suggestions = suggestionBaB.computeSuggestions(); iValue = (suggestions.isEmpty() ? 0.0 : - suggestions.first().getValue()); long t3 = System.currentTimeMillis(); helper.debug(" -- suggestion B&B took "+suggestionBaB.getTime()+"ms"+(suggestionBaB.isTimeoutReached()?", timeout reached":"")); for (SuggestionsBranchAndBound.Suggestion suggestion : suggestions) { ClassAssignmentInterface ca = convert(server, assignment, suggestion.getEnrollments(), requiredSectionsForCourse, requiredFreeTimes, true, model.getStudentQuality(), enrolled); if (unavailabilities != null) for (ClassAssignmentInterface.CourseAssignment u: unavailabilities.getCourseAssignments()) ca.getCourseAssignments().add(0, u); ret.add(ca); OnlineSectioningLog.Enrollment.Builder solution = OnlineSectioningLog.Enrollment.newBuilder(); solution.setType(OnlineSectioningLog.Enrollment.EnrollmentType.COMPUTED); solution.setValue(- suggestion.getValue()); for (Enrollment e: suggestion.getEnrollments()) { if (e != null && e.getAssignments() != null) for (SctAssignment section: e.getAssignments()) solution.addSection(OnlineSectioningHelper.toProto(section, e)); } action.addEnrollment(solution); } // No suggestions -- compute conflicts with message if (suggestions.isEmpty() && selectedRequest != null && selectedRequest instanceof CourseRequest) { TreeSet<Enrollment> overlap = new TreeSet<Enrollment>(new Comparator<Enrollment>() { @Override public int compare(Enrollment o1, Enrollment o2) { return o1.getRequest().compareTo(o2.getRequest()); } }); Hashtable<CourseRequest, TreeSet<Section>> overlapingSections = new Hashtable<CourseRequest, TreeSet<Section>>(); CourseRequest request = (CourseRequest)selectedRequest; Course course = request.getCourses().get(0); Collection<Enrollment> avEnrls = request.getAvaiableEnrollmentsSkipSameTime(assignment); for (Iterator<Enrollment> e = avEnrls.iterator(); e.hasNext();) { Enrollment enrl = e.next(); for (Request q: enrl.getStudent().getRequests()) { if (q.equals(request)) continue; Enrollment x = assignment.getValue(q); if (x == null || x.getAssignments() == null || x.getAssignments().isEmpty()) continue; for (Iterator<SctAssignment> i = x.getAssignments().iterator(); i.hasNext();) { SctAssignment a = i.next(); if (a.isOverlapping(enrl.getAssignments())) { overlap.add(x); if (x.getRequest() instanceof CourseRequest) { CourseRequest cr = (CourseRequest)x.getRequest(); TreeSet<Section> ss = overlapingSections.get(cr); if (ss == null) { ss = new TreeSet<Section>(new AssignmentComparator<Section, Request, Enrollment>(assignment)); overlapingSections.put(cr, ss); } ss.add((Section)a); } } } } } TreeSet<String> overlapMessages = new TreeSet<String>(); for (Iterator<Enrollment> i = overlap.iterator(); i.hasNext();) { Enrollment q = i.next(); String ov = null; if (q.getRequest() instanceof FreeTimeRequest) { ov = OnlineSectioningHelper.toString((FreeTimeRequest)q.getRequest()); } else { CourseRequest cr = (CourseRequest)q.getRequest(); Course o = q.getCourse(); ov = MSG.course(o.getSubjectArea(), o.getCourseNumber()); if (overlapingSections.get(cr).size() == 1) for (Iterator<Section> j = overlapingSections.get(cr).iterator(); j.hasNext();) { Section s = j.next(); ov += " " + s.getSubpart().getName(); } } overlapMessages.add(ov); } if (!overlapMessages.isEmpty()) { String overlapMessage = null; for (Iterator<String> i = overlapMessages.iterator(); i.hasNext(); ) { String ov = i.next(); if (overlapMessage == null) overlapMessage = ov; else if (i.hasNext()) { overlapMessage += MSG.conflictWithMiddle(ov); } else { overlapMessage += MSG.conflictWithLast(ov); } } messages.addMessage(MSG.suggestionsNoChoicesCourseIsConflicting(MSG.course(course.getSubjectArea(), course.getCourseNumber()), overlapMessage)); } else if (course.getLimit() == 0) { messages.addMessage(MSG.suggestionsNoChoicesCourseIsFull(MSG.course(course.getSubjectArea(), course.getCourseNumber()))); } if (student.hasMaxCredit()) { float assignedCredit = 0; for (Request q: student.getRequests()) { if (q.equals(request)) continue; Enrollment e = q.getAssignment(assignment); if (e != null) assignedCredit += e.getCredit(); } Float minCred = null; for (Course c: request.getCourses()) { if (c.hasCreditValue() && (minCred == null || minCred > c.getCreditValue())) minCred = c.getCreditValue(); } if (minCred != null && assignedCredit + minCred > student.getMaxCredit()) messages.addMessage(MSG.conflictOverMaxCredit(student.getMaxCredit())); } } long t4 = System.currentTimeMillis(); helper.debug("Sectioning took "+(t4-t0)+"ms (model "+(t1-t0)+"ms, solver init "+(t2-t1)+"ms, sectioning "+(t3-t2)+"ms, conversion "+(t4-t3)+"ms)"); return ret; } @Override public String name() { return "suggestions"; } public double value() { return iValue; } }
apache-2.0
torakiki/sambox
src/main/java/org/sejda/sambox/pdmodel/graphics/shading/AxialShadingPaint.java
2390
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.sejda.sambox.pdmodel.graphics.shading; import java.awt.Color; import java.awt.PaintContext; import java.awt.Rectangle; import java.awt.RenderingHints; import java.awt.geom.AffineTransform; import java.awt.geom.Rectangle2D; import java.awt.image.ColorModel; import java.io.IOException; import org.sejda.sambox.util.Matrix; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * AWT Paint for axial shading. * */ public class AxialShadingPaint extends ShadingPaint<PDShadingType2> { private static final Logger LOG = LoggerFactory.getLogger(AxialShadingPaint.class); /** * Constructor. * * @param shadingType2 the shading resources * @param matrix the pattern matrix concatenated with that of the parent content stream */ AxialShadingPaint(PDShadingType2 shadingType2, Matrix matrix) { super(shadingType2, matrix); } @Override public int getTransparency() { return 0; } @Override public PaintContext createContext(ColorModel cm, Rectangle deviceBounds, Rectangle2D userBounds, AffineTransform xform, RenderingHints hints) { try { return new AxialShadingContext(shading, cm, xform, matrix, deviceBounds); } catch (IOException e) { LOG.error("An error occurred while painting", e); return new Color(0, 0, 0, 0).createContext(cm, deviceBounds, userBounds, xform, hints); } } }
apache-2.0
lmjacksoniii/hazelcast
hazelcast/src/test/java/com/hazelcast/internal/metrics/impl/DoubleGaugeImplTest.java
3425
package com.hazelcast.internal.metrics.impl; import com.hazelcast.internal.metrics.DoubleGauge; import com.hazelcast.internal.metrics.DoubleProbeFunction; import com.hazelcast.internal.metrics.LongProbeFunction; import com.hazelcast.internal.metrics.Probe; import com.hazelcast.logging.Logger; import com.hazelcast.test.HazelcastSerialClassRunner; import com.hazelcast.test.annotation.QuickTest; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import static com.hazelcast.internal.metrics.ProbeLevel.INFO; import static com.hazelcast.internal.metrics.ProbeLevel.MANDATORY; import static org.junit.Assert.assertEquals; @RunWith(HazelcastSerialClassRunner.class) @Category(QuickTest.class) public class DoubleGaugeImplTest { private MetricsRegistryImpl metricsRegistry; @Before public void setup() { metricsRegistry = new MetricsRegistryImpl(Logger.getLogger(MetricsRegistryImpl.class), INFO); } class SomeObject { @Probe long longField = 10; @Probe double doubleField = 10.8; } // ============ readDouble =========================== @Test public void whenNoProbeAvailable() { DoubleGauge gauge = metricsRegistry.newDoubleGauge("foo"); double actual = gauge.read(); assertEquals(0, actual, 0.1); } @Test public void whenProbeThrowsException() { metricsRegistry.register(this, "foo", MANDATORY, new DoubleProbeFunction() { @Override public double get(Object o) { throw new RuntimeException(); } }); DoubleGauge gauge = metricsRegistry.newDoubleGauge("foo"); double actual = gauge.read(); assertEquals(0, actual, 0.1); } @Test public void whenDoubleProbe() { metricsRegistry.register(this, "foo", MANDATORY, new DoubleProbeFunction() { @Override public double get(Object o) { return 10; } }); DoubleGauge gauge = metricsRegistry.newDoubleGauge("foo"); double actual = gauge.read(); assertEquals(10, actual, 0.1); } @Test public void whenLongProbe() { metricsRegistry.register(this, "foo", MANDATORY, new LongProbeFunction() { @Override public long get(Object o) throws Exception { return 10; } }); DoubleGauge gauge = metricsRegistry.newDoubleGauge("foo"); double actual = gauge.read(); assertEquals(10, actual, 0.1); } @Test public void whenLongGaugeField() { SomeObject someObject = new SomeObject(); metricsRegistry.scanAndRegister(someObject, "foo"); DoubleGauge gauge = metricsRegistry.newDoubleGauge("foo.longField"); assertEquals(someObject.longField, gauge.read(), 0.1); } @Test public void whenDoubleGaugeField() { SomeObject someObject = new SomeObject(); metricsRegistry.scanAndRegister(someObject, "foo"); DoubleGauge gauge = metricsRegistry.newDoubleGauge("foo.doubleField"); assertEquals(someObject.doubleField, gauge.read(), 0.1); } }
apache-2.0
vikkyrk/incubator-beam
sdks/java/io/hadoop/jdk1.8-tests/src/test/java/org/apache/beam/sdk/io/hadoop/inputformat/integration/tests/HIFIOCassandraIT.java
8456
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.beam.sdk.io.hadoop.inputformat.integration.tests; import com.datastax.driver.core.Row; import java.io.Serializable; import org.apache.beam.sdk.io.common.HashingFn; import org.apache.beam.sdk.io.hadoop.inputformat.HadoopInputFormatIO; import org.apache.beam.sdk.io.hadoop.inputformat.custom.options.HIFTestOptions; import org.apache.beam.sdk.options.PipelineOptionsFactory; import org.apache.beam.sdk.testing.PAssert; import org.apache.beam.sdk.testing.TestPipeline; import org.apache.beam.sdk.transforms.Combine; import org.apache.beam.sdk.transforms.Count; import org.apache.beam.sdk.transforms.SimpleFunction; import org.apache.beam.sdk.transforms.Values; import org.apache.beam.sdk.values.KV; import org.apache.beam.sdk.values.PCollection; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.InputFormat; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; /** * A test of {@link org.apache.beam.sdk.io.hadoop.inputformat.HadoopInputFormatIO} on an * independent Cassandra instance. * * <p>This test requires a running instance of Cassandra, and the test dataset must exist in * the database. * * <p>You can run this test by doing the following: * <pre> * mvn -e -Pio-it verify -pl sdks/java/io/hadoop/jdk1.8-tests/HIFIOCassandraIT * -DintegrationTestPipelineOptions='[ * "--cassandraServerIp=1.2.3.4", * "--cassandraServerPort=port", * "--cassandraUserName=user", * "--cassandraPassword=mypass" ]' * </pre> * * <p>If you want to run this with a runner besides directrunner, there are profiles for dataflow * and spark in the jdk1.8-tests pom. You'll want to activate those in addition to the normal test * runner invocation pipeline options. */ @RunWith(JUnit4.class) public class HIFIOCassandraIT implements Serializable { private static final String CASSANDRA_KEYSPACE = "ycsb"; private static final String CASSANDRA_TABLE = "usertable"; private static final String CASSANDRA_THRIFT_PORT_PROPERTY = "cassandra.input.thrift.port"; private static final String CASSANDRA_THRIFT_ADDRESS_PROPERTY = "cassandra.input.thrift.address"; private static final String CASSANDRA_PARTITIONER_CLASS_PROPERTY = "cassandra.input.partitioner.class"; private static final String CASSANDRA_KEYSPACE_PROPERTY = "cassandra.input.keyspace"; private static final String CASSANDRA_COLUMNFAMILY_PROPERTY = "cassandra.input.columnfamily"; private static final String CASSANDRA_PARTITIONER_CLASS_VALUE = "Murmur3Partitioner"; private static final String USERNAME = "cassandra.username"; private static final String PASSWORD = "cassandra.password"; private static final String INPUT_KEYSPACE_USERNAME_CONFIG = "cassandra.input.keyspace.username"; private static final String INPUT_KEYSPACE_PASSWD_CONFIG = "cassandra.input.keyspace.passwd"; private static HIFTestOptions options; @Rule public final transient TestPipeline pipeline = TestPipeline.create(); @BeforeClass public static void setUp() { PipelineOptionsFactory.register(HIFTestOptions.class); options = TestPipeline.testingPipelineOptions().as(HIFTestOptions.class); } /** * This test reads data from the Cassandra instance and verifies if data is read successfully. */ @Test public void testHIFReadForCassandra() { // Expected hashcode is evaluated during insertion time one time and hardcoded here. String expectedHashCode = "1a30ad400afe4ebf5fde75f5d2d95408"; Long expectedRecordsCount = 1000L; Configuration conf = getConfiguration(options); PCollection<KV<Long, String>> cassandraData = pipeline.apply(HadoopInputFormatIO .<Long, String>read().withConfiguration(conf).withValueTranslation(myValueTranslate)); PAssert.thatSingleton(cassandraData.apply("Count", Count.<KV<Long, String>>globally())) .isEqualTo(expectedRecordsCount); PCollection<String> textValues = cassandraData.apply(Values.<String>create()); // Verify the output values using checksum comparison. PCollection<String> consolidatedHashcode = textValues.apply(Combine.globally(new HashingFn()).withoutDefaults()); PAssert.that(consolidatedHashcode).containsInAnyOrder(expectedHashCode); pipeline.run().waitUntilFinish(); } SimpleFunction<Row, String> myValueTranslate = new SimpleFunction<Row, String>() { @Override public String apply(Row input) { return input.getString("y_id") + "|" + input.getString("field0") + "|" + input.getString("field1") + "|" + input.getString("field2") + "|" + input.getString("field3") + "|" + input.getString("field4") + "|" + input.getString("field5") + "|" + input.getString("field6") + "|" + input.getString("field7") + "|" + input.getString("field8") + "|" + input.getString("field9"); } }; /** * This test reads data from the Cassandra instance based on query and verifies if data is read * successfully. */ @Test public void testHIFReadForCassandraQuery() { String expectedHashCode = "7bead6d6385c5f4dd0524720cd320b49"; Long expectedNumRows = 1L; Configuration conf = getConfiguration(options); conf.set("cassandra.input.cql", "select * from " + CASSANDRA_KEYSPACE + "." + CASSANDRA_TABLE + " where token(y_id) > ? and token(y_id) <= ? " + "and field0 = 'user48:field0:431531'"); PCollection<KV<Long, String>> cassandraData = pipeline.apply(HadoopInputFormatIO.<Long, String>read().withConfiguration(conf) .withValueTranslation(myValueTranslate)); PAssert.thatSingleton(cassandraData.apply("Count", Count.<KV<Long, String>>globally())) .isEqualTo(expectedNumRows); PCollection<String> textValues = cassandraData.apply(Values.<String>create()); // Verify the output values using checksum comparison. PCollection<String> consolidatedHashcode = textValues.apply(Combine.globally(new HashingFn()).withoutDefaults()); PAssert.that(consolidatedHashcode).containsInAnyOrder(expectedHashCode); pipeline.run().waitUntilFinish(); } /** * Returns Hadoop configuration for reading data from Cassandra. To read data from Cassandra using * HadoopInputFormatIO, following properties must be set: InputFormat class, InputFormat key * class, InputFormat value class, Thrift address, Thrift port, partitioner class, keyspace and * columnfamily name. */ private static Configuration getConfiguration(HIFTestOptions options) { Configuration conf = new Configuration(); conf.set(CASSANDRA_THRIFT_PORT_PROPERTY, options.getCassandraServerPort().toString()); conf.set(CASSANDRA_THRIFT_ADDRESS_PROPERTY, options.getCassandraServerIp()); conf.set(CASSANDRA_PARTITIONER_CLASS_PROPERTY, CASSANDRA_PARTITIONER_CLASS_VALUE); conf.set(CASSANDRA_KEYSPACE_PROPERTY, CASSANDRA_KEYSPACE); conf.set(CASSANDRA_COLUMNFAMILY_PROPERTY, CASSANDRA_TABLE); // Set user name and password if Cassandra instance has security configured. conf.set(USERNAME, options.getCassandraUserName()); conf.set(PASSWORD, options.getCassandraPassword()); conf.set(INPUT_KEYSPACE_USERNAME_CONFIG, options.getCassandraUserName()); conf.set(INPUT_KEYSPACE_PASSWD_CONFIG, options.getCassandraPassword()); conf.setClass("mapreduce.job.inputformat.class", org.apache.cassandra.hadoop.cql3.CqlInputFormat.class, InputFormat.class); conf.setClass("key.class", java.lang.Long.class, Object.class); conf.setClass("value.class", com.datastax.driver.core.Row.class, Object.class); return conf; } }
apache-2.0
FlxRobin/presto
presto-main/src/test/java/com/facebook/presto/metadata/TestNativeMetadata.java
6041
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.facebook.presto.metadata; import com.facebook.presto.spi.ConnectorColumnHandle; import com.facebook.presto.spi.ColumnMetadata; import com.facebook.presto.spi.ConnectorMetadata; import com.facebook.presto.spi.ConnectorSession; import com.facebook.presto.spi.ConnectorTableHandle; import com.facebook.presto.spi.ConnectorTableMetadata; import com.facebook.presto.spi.SchemaTableName; import com.facebook.presto.spi.SchemaTablePrefix; import com.facebook.presto.split.NativePartitionKey; import com.facebook.presto.type.TypeRegistry; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.skife.jdbi.v2.DBI; import org.skife.jdbi.v2.Handle; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeMethod; import org.testng.annotations.Test; import java.util.List; import java.util.Locale; import java.util.Map; import static com.facebook.presto.metadata.MetadataUtil.TableMetadataBuilder.tableMetadataBuilder; import static com.facebook.presto.spi.type.BigintType.BIGINT; import static com.facebook.presto.spi.type.DoubleType.DOUBLE; import static com.facebook.presto.spi.type.TimeZoneKey.UTC_KEY; import static com.facebook.presto.spi.type.VarcharType.VARCHAR; import static io.airlift.testing.Assertions.assertInstanceOf; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertNull; @Test(singleThreaded = true) public class TestNativeMetadata { private static final ConnectorSession SESSION = new ConnectorSession("user", "test", "default", "default", UTC_KEY, Locale.ENGLISH, null, null); private static final SchemaTableName DEFAULT_TEST_ORDERS = new SchemaTableName("test", "orders"); private Handle dummyHandle; private ConnectorMetadata metadata; @BeforeMethod public void setupDatabase() throws Exception { TypeRegistry typeRegistry = new TypeRegistry(); DBI dbi = new DBI("jdbc:h2:mem:test" + System.nanoTime()); dbi.registerMapper(new TableColumnMapper(typeRegistry)); dbi.registerMapper(new ColumnMetadataMapper(typeRegistry)); dbi.registerMapper(new NativePartitionKey.Mapper(typeRegistry)); dummyHandle = dbi.open(); metadata = new NativeMetadata(new NativeConnectorId("default"), dbi, new DatabaseShardManager(dbi)); } @AfterMethod public void cleanupDatabase() { dummyHandle.close(); } @Test public void testCreateTable() { assertNull(metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS)); ConnectorTableHandle tableHandle = metadata.createTable(SESSION, getOrdersTable()); assertInstanceOf(tableHandle, NativeTableHandle.class); assertEquals(((NativeTableHandle) tableHandle).getTableId(), 1); ConnectorTableMetadata table = metadata.getTableMetadata(tableHandle); assertTableEqual(table, getOrdersTable()); ConnectorColumnHandle columnHandle = metadata.getColumnHandle(tableHandle, "orderkey"); assertInstanceOf(columnHandle, NativeColumnHandle.class); assertEquals(((NativeColumnHandle) columnHandle).getColumnId(), 1); } @Test public void testListTables() { metadata.createTable(SESSION, getOrdersTable()); List<SchemaTableName> tables = metadata.listTables(SESSION, null); assertEquals(tables, ImmutableList.of(DEFAULT_TEST_ORDERS)); } @Test public void testListTableColumns() { metadata.createTable(SESSION, getOrdersTable()); Map<SchemaTableName, List<ColumnMetadata>> columns = metadata.listTableColumns(SESSION, new SchemaTablePrefix()); assertEquals(columns, ImmutableMap.of(DEFAULT_TEST_ORDERS, getOrdersTable().getColumns())); } @Test public void testListTableColumnsFiltering() { metadata.createTable(SESSION, getOrdersTable()); Map<SchemaTableName, List<ColumnMetadata>> filterCatalog = metadata.listTableColumns(SESSION, new SchemaTablePrefix()); Map<SchemaTableName, List<ColumnMetadata>> filterSchema = metadata.listTableColumns(SESSION, new SchemaTablePrefix("test")); Map<SchemaTableName, List<ColumnMetadata>> filterTable = metadata.listTableColumns(SESSION, new SchemaTablePrefix("test", "orders")); assertEquals(filterCatalog, filterSchema); assertEquals(filterCatalog, filterTable); } private static ConnectorTableMetadata getOrdersTable() { return tableMetadataBuilder(DEFAULT_TEST_ORDERS) .column("orderkey", BIGINT) .column("custkey", BIGINT) .column("totalprice", DOUBLE) .column("orderdate", VARCHAR) .build(); } private static void assertTableEqual(ConnectorTableMetadata actual, ConnectorTableMetadata expected) { assertEquals(actual.getTable(), expected.getTable()); List<ColumnMetadata> actualColumns = actual.getColumns(); List<ColumnMetadata> expectedColumns = expected.getColumns(); assertEquals(actualColumns.size(), expectedColumns.size()); for (int i = 0; i < actualColumns.size(); i++) { ColumnMetadata actualColumn = actualColumns.get(i); ColumnMetadata expectedColumn = expectedColumns.get(i); assertEquals(actualColumn.getName(), expectedColumn.getName()); assertEquals(actualColumn.getType(), expectedColumn.getType()); } } }
apache-2.0
freeVM/freeVM
enhanced/buildtest/tests/functional/src/test/functional/org/apache/harmony/test/func/reg/jit/btest2795/Interface2795.java
1119
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** */ /* * Please, note that you need to compile Abstract2795 and Interface2795 classes * after you compiled Test2795_1 and Test2795_2 classes to reproduce the * failure. Please, see makefile for more details. */ package org.apache.harmony.test.func.reg.jit.btest2795; interface Interface2795 { }
apache-2.0
alucardxh/BaseFramework
src/main/java/com/baseframework/utils/HttpClientUtils2.java
9266
package com.baseframework.utils; import java.nio.charset.Charset; import java.util.List; import java.util.concurrent.TimeUnit; import javax.script.ScriptEngine; import javax.script.ScriptEngineManager; import javax.script.ScriptException; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.HttpStatus; import org.apache.http.client.HttpClient; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.entity.UrlEncodedFormEntity; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.conn.HttpClientConnectionManager; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.entity.mime.MultipartEntityBuilder; import org.apache.http.entity.mime.content.ByteArrayBody; import org.apache.http.entity.mime.content.StringBody; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.message.BasicNameValuePair; import org.apache.http.util.EntityUtils; public class HttpClientUtils2 { /** 连接超时时间 */ private static final int defaultConnectionTimeout = 10000; /** 回应超时时间 */ private static final int defaultSoTimeout = 30000; /** 闲置连接超时时间 */ private static final int defaultIdleConnTimeout = 60000; private static final int defaultMaxConnPerHost = 30; private static final int defaultMaxTotalConn = 80; /** 默认等待HttpConnectionManager返回连接超时(只有在达到最大连接数时起作用)*/ private static final long defaultHttpConnectionManagerTimeout = 3 * 1000; private static PoolingHttpClientConnectionManager connectionManager; private static RequestConfig requestConfig; static { connectionManager = new PoolingHttpClientConnectionManager(); connectionManager.setDefaultMaxPerRoute(defaultMaxConnPerHost); connectionManager.setMaxTotal(defaultMaxTotalConn); //new IdleConnectionMonitorThread(connectionManager).start(); } public static HttpClient getHttpClient() { requestConfig = RequestConfig.custom().setConnectTimeout(defaultConnectionTimeout).setSocketTimeout(defaultSoTimeout).build(); CloseableHttpClient httpClient = HttpClients.createMinimal(connectionManager); return httpClient; } public static String doPost(HttpClient httpClient, String url, List<BasicNameValuePair> params) { String body = ""; try { HttpPost httpPost = new HttpPost(url); httpPost.setConfig(requestConfig); if (params != null) { HttpEntity entity = new UrlEncodedFormEntity(params, "UTF-8"); httpPost.setEntity(entity); } HttpResponse response = httpClient.execute(httpPost); if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { body = EntityUtils.toString(response.getEntity(),"UTF-8"); } httpPost.releaseConnection(); } catch (Exception e) { e.printStackTrace(); } return body; } public static String doPostJson(HttpClient httpClient, String url, String json) { String body = ""; try { HttpPost httpPost = new HttpPost(url); httpPost.setConfig(requestConfig); httpPost.addHeader("Content-type","application/json; charset=UTF-8"); httpPost.setHeader("Accept", "application/json"); StringEntity stringEntity = new StringEntity(json, Charset.forName("UTF-8")); httpPost.setEntity(stringEntity); HttpResponse response = httpClient.execute(httpPost); if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { body = EntityUtils.toString(response.getEntity(),"UTF-8"); } httpPost.releaseConnection(); } catch (Exception e) { e.printStackTrace(); } return body; } public static String doPostXml(HttpClient httpClient, String url, String xml) { String body = ""; try { HttpPost httpPost = new HttpPost(url); httpPost.setConfig(requestConfig); httpPost.addHeader("Content-Type", "text/xml; charset=UTF-8"); httpPost.setEntity(new StringEntity(xml, "UTF-8")); HttpResponse response = httpClient.execute(httpPost); if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { body = EntityUtils.toString(response.getEntity(),"UTF-8"); } httpPost.releaseConnection(); } catch (Exception e) { e.printStackTrace(); } return body; } public static String doGet(HttpClient httpClient, String url) { String body = ""; try { HttpGet httpGet = new HttpGet(url); httpGet.setConfig(requestConfig); HttpResponse response = httpClient.execute(httpGet); if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { body = EntityUtils.toString(response.getEntity(),"UTF-8"); } httpGet.releaseConnection(); } catch (Exception e) { e.printStackTrace(); } return body; } public static byte[] getFile(HttpClient httpClient, String url) { byte[] file = null; try { HttpGet httpGet = new HttpGet(url); httpGet.setConfig(requestConfig); HttpResponse response = httpClient.execute(httpGet); if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { file = EntityUtils.toByteArray(response.getEntity()); } httpGet.releaseConnection(); } catch (Exception e) { e.printStackTrace(); } return file; } public static String uploadFile(HttpClient httpClient, String url, List<BasicNameValuePair> params, String filename, byte[] data) { String body = ""; try { //File file = new File(filename); //FileUtils.writeByteArrayToFile(file, data); MultipartEntityBuilder builder = MultipartEntityBuilder.create(); for (BasicNameValuePair pair : params) { builder.addPart(pair.getName(), new StringBody(pair.getValue(), ContentType.MULTIPART_FORM_DATA)); } builder.addPart(filename,new ByteArrayBody(data, filename)); HttpEntity entity = builder.build(); HttpPost httpPost = new HttpPost(url); httpPost.setConfig(requestConfig); httpPost.setEntity(entity); httpClient.execute(httpPost); HttpResponse response = httpClient.execute(httpPost); if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { body = EntityUtils.toString(response.getEntity(),"UTF-8"); } httpPost.releaseConnection(); } catch (Exception e) { e.printStackTrace(); } return body; } private static class IdleConnectionMonitorThread extends Thread { private final HttpClientConnectionManager connMgr; private volatile boolean shutdown; public IdleConnectionMonitorThread(HttpClientConnectionManager connMgr) { super(); this.connMgr = connMgr; } @Override public void run() { try { while (!shutdown) { synchronized (this) { wait(5000); System.out.println("清空失效连接..."); // 关闭失效连接 connMgr.closeExpiredConnections(); //关闭空闲超过30秒的连接 connMgr.closeIdleConnections(defaultIdleConnTimeout, TimeUnit.SECONDS); } } } catch (InterruptedException ex) { } } public void shutdown() { shutdown = true; synchronized (this) { notifyAll(); } } } public static void main(String[] args) throws ScriptException { /*ScriptEngineManager sem = new ScriptEngineManager(); ScriptEngine engine = sem.getEngineByExtension("js"); try{ //直接解析 Object res = engine.eval("unescape('%u4E1C%u54F2%u65ED')"); System.out.println(res); }catch(Exception ex){ ex.printStackTrace(); } String xh=engine.eval("escape('1333')").toString(); String name= engine.eval("escape('东哲旭')").toString(); String team=engine.eval("escape('all')").toString(); HttpClient hc = HttpClientUtils2.getHttpClient(); String s = HttpClientUtils2.doGet(hc, "http://www.xa83zx.cn/kscx/search.asp?xh="+xh+"&stuname="+name+"&team="+team); System.out.println(s);*/ ScriptEngineManager sem = new ScriptEngineManager(); ScriptEngine engine = sem.getEngineByExtension("js"); /* try{ //直接解析 Object res = engine.eval("unescape('%u4E1C%u54F2%u65ED')"); System.out.println(res); }catch(Exception ex){ ex.printStackTrace(); } */ String xh=engine.eval("escape('1333')").toString(); String name= engine.eval("escape('东哲旭')").toString(); String team=engine.eval("escape('all')").toString(); HttpClient hc = HttpClientUtils2.getHttpClient(); //String s = HttpClientUtils.doGet(hc, "http://www.xa83zx.cn/kscx/search.asp?xh="+xh+"&stuname="+name+"&team="+team); String s = HttpClientUtils2.doGet(hc, "http://www.xa83zx.cn/kscx/search.asp?xh="+xh+"&stuname="+name+"&team="+team); Object responStr=engine.eval("unescape('"+s+"')"); System.out.println(responStr); } }
apache-2.0
googleapis/java-automl
proto-google-cloud-automl-v1/src/main/java/com/google/cloud/automl/v1/TextSentimentDatasetMetadata.java
19789
/* * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Generated by the protocol buffer compiler. DO NOT EDIT! // source: google/cloud/automl/v1/text.proto package com.google.cloud.automl.v1; /** * * * <pre> * Dataset metadata for text sentiment. * </pre> * * Protobuf type {@code google.cloud.automl.v1.TextSentimentDatasetMetadata} */ public final class TextSentimentDatasetMetadata extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.automl.v1.TextSentimentDatasetMetadata) TextSentimentDatasetMetadataOrBuilder { private static final long serialVersionUID = 0L; // Use TextSentimentDatasetMetadata.newBuilder() to construct. private TextSentimentDatasetMetadata(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private TextSentimentDatasetMetadata() {} @java.lang.Override @SuppressWarnings({"unused"}) protected java.lang.Object newInstance(UnusedPrivateParameter unused) { return new TextSentimentDatasetMetadata(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return this.unknownFields; } private TextSentimentDatasetMetadata( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); if (extensionRegistry == null) { throw new java.lang.NullPointerException(); } com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; case 8: { sentimentMax_ = input.readInt32(); break; } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.automl.v1.TextProto .internal_static_google_cloud_automl_v1_TextSentimentDatasetMetadata_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.automl.v1.TextProto .internal_static_google_cloud_automl_v1_TextSentimentDatasetMetadata_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.automl.v1.TextSentimentDatasetMetadata.class, com.google.cloud.automl.v1.TextSentimentDatasetMetadata.Builder.class); } public static final int SENTIMENT_MAX_FIELD_NUMBER = 1; private int sentimentMax_; /** * * * <pre> * Required. A sentiment is expressed as an integer ordinal, where higher value * means a more positive sentiment. The range of sentiments that will be used * is between 0 and sentiment_max (inclusive on both ends), and all the values * in the range must be represented in the dataset before a model can be * created. * sentiment_max value must be between 1 and 10 (inclusive). * </pre> * * <code>int32 sentiment_max = 1;</code> * * @return The sentimentMax. */ @java.lang.Override public int getSentimentMax() { return sentimentMax_; } private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) return true; if (isInitialized == 0) return false; memoizedIsInitialized = 1; return true; } @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (sentimentMax_ != 0) { output.writeInt32(1, sentimentMax_); } unknownFields.writeTo(output); } @java.lang.Override public int getSerializedSize() { int size = memoizedSize; if (size != -1) return size; size = 0; if (sentimentMax_ != 0) { size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, sentimentMax_); } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; } @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.automl.v1.TextSentimentDatasetMetadata)) { return super.equals(obj); } com.google.cloud.automl.v1.TextSentimentDatasetMetadata other = (com.google.cloud.automl.v1.TextSentimentDatasetMetadata) obj; if (getSentimentMax() != other.getSentimentMax()) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + SENTIMENT_MAX_FIELD_NUMBER; hash = (53 * hash) + getSentimentMax(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( PARSER, input, extensionRegistry); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3.parseWithIOException( PARSER, input, extensionRegistry); } @java.lang.Override public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder( com.google.cloud.automl.v1.TextSentimentDatasetMetadata prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } @java.lang.Override public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * * * <pre> * Dataset metadata for text sentiment. * </pre> * * Protobuf type {@code google.cloud.automl.v1.TextSentimentDatasetMetadata} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:google.cloud.automl.v1.TextSentimentDatasetMetadata) com.google.cloud.automl.v1.TextSentimentDatasetMetadataOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return com.google.cloud.automl.v1.TextProto .internal_static_google_cloud_automl_v1_TextSentimentDatasetMetadata_descriptor; } @java.lang.Override protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return com.google.cloud.automl.v1.TextProto .internal_static_google_cloud_automl_v1_TextSentimentDatasetMetadata_fieldAccessorTable .ensureFieldAccessorsInitialized( com.google.cloud.automl.v1.TextSentimentDatasetMetadata.class, com.google.cloud.automl.v1.TextSentimentDatasetMetadata.Builder.class); } // Construct using com.google.cloud.automl.v1.TextSentimentDatasetMetadata.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} } @java.lang.Override public Builder clear() { super.clear(); sentimentMax_ = 0; return this; } @java.lang.Override public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return com.google.cloud.automl.v1.TextProto .internal_static_google_cloud_automl_v1_TextSentimentDatasetMetadata_descriptor; } @java.lang.Override public com.google.cloud.automl.v1.TextSentimentDatasetMetadata getDefaultInstanceForType() { return com.google.cloud.automl.v1.TextSentimentDatasetMetadata.getDefaultInstance(); } @java.lang.Override public com.google.cloud.automl.v1.TextSentimentDatasetMetadata build() { com.google.cloud.automl.v1.TextSentimentDatasetMetadata result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @java.lang.Override public com.google.cloud.automl.v1.TextSentimentDatasetMetadata buildPartial() { com.google.cloud.automl.v1.TextSentimentDatasetMetadata result = new com.google.cloud.automl.v1.TextSentimentDatasetMetadata(this); result.sentimentMax_ = sentimentMax_; onBuilt(); return result; } @java.lang.Override public Builder clone() { return super.clone(); } @java.lang.Override public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.setField(field, value); } @java.lang.Override public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { return super.clearField(field); } @java.lang.Override public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { return super.clearOneof(oneof); } @java.lang.Override public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { return super.setRepeatedField(field, index, value); } @java.lang.Override public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { return super.addRepeatedField(field, value); } @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof com.google.cloud.automl.v1.TextSentimentDatasetMetadata) { return mergeFrom((com.google.cloud.automl.v1.TextSentimentDatasetMetadata) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(com.google.cloud.automl.v1.TextSentimentDatasetMetadata other) { if (other == com.google.cloud.automl.v1.TextSentimentDatasetMetadata.getDefaultInstance()) return this; if (other.getSentimentMax() != 0) { setSentimentMax(other.getSentimentMax()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; } @java.lang.Override public final boolean isInitialized() { return true; } @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { com.google.cloud.automl.v1.TextSentimentDatasetMetadata parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (com.google.cloud.automl.v1.TextSentimentDatasetMetadata) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int sentimentMax_; /** * * * <pre> * Required. A sentiment is expressed as an integer ordinal, where higher value * means a more positive sentiment. The range of sentiments that will be used * is between 0 and sentiment_max (inclusive on both ends), and all the values * in the range must be represented in the dataset before a model can be * created. * sentiment_max value must be between 1 and 10 (inclusive). * </pre> * * <code>int32 sentiment_max = 1;</code> * * @return The sentimentMax. */ @java.lang.Override public int getSentimentMax() { return sentimentMax_; } /** * * * <pre> * Required. A sentiment is expressed as an integer ordinal, where higher value * means a more positive sentiment. The range of sentiments that will be used * is between 0 and sentiment_max (inclusive on both ends), and all the values * in the range must be represented in the dataset before a model can be * created. * sentiment_max value must be between 1 and 10 (inclusive). * </pre> * * <code>int32 sentiment_max = 1;</code> * * @param value The sentimentMax to set. * @return This builder for chaining. */ public Builder setSentimentMax(int value) { sentimentMax_ = value; onChanged(); return this; } /** * * * <pre> * Required. A sentiment is expressed as an integer ordinal, where higher value * means a more positive sentiment. The range of sentiments that will be used * is between 0 and sentiment_max (inclusive on both ends), and all the values * in the range must be represented in the dataset before a model can be * created. * sentiment_max value must be between 1 and 10 (inclusive). * </pre> * * <code>int32 sentiment_max = 1;</code> * * @return This builder for chaining. */ public Builder clearSentimentMax() { sentimentMax_ = 0; onChanged(); return this; } @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); } @java.lang.Override public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return super.mergeUnknownFields(unknownFields); } // @@protoc_insertion_point(builder_scope:google.cloud.automl.v1.TextSentimentDatasetMetadata) } // @@protoc_insertion_point(class_scope:google.cloud.automl.v1.TextSentimentDatasetMetadata) private static final com.google.cloud.automl.v1.TextSentimentDatasetMetadata DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new com.google.cloud.automl.v1.TextSentimentDatasetMetadata(); } public static com.google.cloud.automl.v1.TextSentimentDatasetMetadata getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<TextSentimentDatasetMetadata> PARSER = new com.google.protobuf.AbstractParser<TextSentimentDatasetMetadata>() { @java.lang.Override public TextSentimentDatasetMetadata parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new TextSentimentDatasetMetadata(input, extensionRegistry); } }; public static com.google.protobuf.Parser<TextSentimentDatasetMetadata> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<TextSentimentDatasetMetadata> getParserForType() { return PARSER; } @java.lang.Override public com.google.cloud.automl.v1.TextSentimentDatasetMetadata getDefaultInstanceForType() { return DEFAULT_INSTANCE; } }
apache-2.0
jimv39/qvcsos
qvcse-qvcslib/src/main/java/com/qumasoft/qvcslib/logfileaction/SetCommentPrefix.java
1251
/* Copyright 2004-2014 Jim Voris * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.qumasoft.qvcslib.logfileaction; /** * Set comment prefix action. * @author Jim Voris */ public class SetCommentPrefix extends ActionType { private final String commentPrefix; /** * Creates a new instance of LogfileActionSetCommentPrefix. * @param commentPfx comment prefix. */ public SetCommentPrefix(final String commentPfx) { super("Set Comment Prefix", ActionType.SET_COMMENT_PREFIX); commentPrefix = commentPfx; } /** * Get the comment prefix. * @return the comment prefix. */ String getCommentPrefix() { return commentPrefix; } }
apache-2.0