text
stringlengths 7
1.01M
|
|---|
package com.example.wenda01.fragments.jq;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.v4.view.ViewPager;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.LinearLayout;
import android.widget.TextView;
import com.example.wenda01.R;
import com.example.wenda01.adapters.CardPagerAdapter;
import com.example.wenda01.adapters.ImageAdapter;
import com.example.wenda01.beans.jq.JqOneResult;
import com.example.wenda01.beans.ShowItem;
import com.example.wenda01.fragments.base.JqFabFragment;
import com.example.wenda01.views.Card.CardItem;
import com.example.wenda01.utils.ShadowTransformer;
import com.google.gson.Gson;
import java.util.ArrayList;
import java.util.List;
import okhttp3.FormBody;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.Response;
public class JqContentFragment extends JqFabFragment implements View.OnClickListener { //进行病症记录的搜索
private TextView textTitle;
private RecyclerView recyclerView;
private int id; //节气id
private int showType=0; //展示模式
private String name; //名称
private String jieshao; //介绍
private String qihou; //气候
private String shenghuo; //生活
private String jingshen; //精神
private String fangbing; //防病
private String yinshi; //饮食
private String yaoshan; //药膳
private String [] arr={"介绍","气候","生活","精神","防病","饮食","药膳"};
private String [] arrTitle={"简介","养生","食疗"};
private List<ShowItem> list; //列表模式显示内容
private List<ShowItem> list1; //卡片模式简介显示内容
private List<ShowItem> list2; //卡片模式养生显示内容
private List<ShowItem> list3; //卡片模式食疗显示内容
private CardPagerAdapter mCardAdapter;
private ViewPager mViewPager;
private ShowItemAdapter showItemAdapter;
private ShadowTransformer mCardShadowTransformer;
private LinearLayout layoutList;
private LinearLayout layoutCard;
private Button buttonChange;
private RecyclerView recyclerImage;
private List<Integer> images;
private ImageAdapter imageAdapter;
@Nullable
@Override
public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {
view=inflater.inflate(R.layout.jq_content_frag,container,false);
preWork();
setSDFab(view,this);
setInitData();
return view;
}
@Override
public void onClick(View v) {
if (onClickFab(v) ){
return;
}
switch (v.getId()){
case R.id.button_change:
changeShowLayout();
break;
}
}
private void changeShowLayout(){ //更改节气信息的展示模式(卡片、列表)
showType=(showType+1)%2;
if(layoutCard.getVisibility()==View.VISIBLE){
layoutList.setVisibility(View.VISIBLE);
layoutCard.setVisibility(View.GONE);
buttonChange.setText("卡片模式");
}else{
layoutList.setVisibility(View.GONE);
layoutCard.setVisibility(View.VISIBLE);
buttonChange.setText("列表模式");
}
}
private void preWork(){
layoutList=view.findViewById(R.id.jq_content_list);
layoutCard=view.findViewById(R.id.jq_content_card);
buttonChange=view.findViewById(R.id.button_change);
textTitle=view.findViewById(R.id.s_title);
recyclerView=view.findViewById(R.id.s_recycler);
recyclerImage=view.findViewById(R.id.recycler_image);
buttonChange.setText("列表模式");
textTitle.setText("节气");
list=new ArrayList<>();
list1=new ArrayList<>();
list2=new ArrayList<>();
list3=new ArrayList<>();
images=new ArrayList<>();
LinearLayoutManager layoutManager=new LinearLayoutManager(getActivity());
recyclerView.setLayoutManager(layoutManager);
showItemAdapter=new ShowItemAdapter(list);
recyclerView.setAdapter(showItemAdapter);
LinearLayoutManager imageManager=new LinearLayoutManager(getActivity());
imageManager.setOrientation(LinearLayoutManager.HORIZONTAL);
recyclerImage.setLayoutManager(imageManager);
imageAdapter=new ImageAdapter(images);
recyclerImage.setAdapter(imageAdapter);
mViewPager = (ViewPager) view.findViewById(R.id.viewPager);
mCardAdapter = new CardPagerAdapter(this);
layoutList.setVisibility(View.GONE);
layoutCard.setVisibility(View.VISIBLE);
buttonChange.setOnClickListener(this);
if(view.findViewById(R.id.jq_wheel)!=null){
isTwoPane=true;
}else {
isTwoPane=false;
}
}
private void setInitData(){ //获得活动传递的初始参数
id=getArguments().getInt("id");
isTwoPane=getArguments().getBoolean("isTwoPane");
sendReq4JqAll();
}
private void sendReq4JqAll(){ //访问服务器获得该节气的信息介绍
new Thread(new Runnable() {
@Override
public void run() {
try{
OkHttpClient client=new OkHttpClient();
RequestBody requestBody=new FormBody.Builder()
.add("requestType","2")
.add("id",""+id)
.add("member_id","88")
.add("key_tz","lblc6wcj3ogh0uyhfek53b5z")
.build();
Request request=new Request.Builder()
.url("http://miaolangzhong.com/erzhentang/saas100Business/Food24.do")
.post(requestBody)
.build();
Response response=client.newCall(request).execute();
String responseData=response.body().string();
show(responseData);
// Toast.makeText(getContext(),responseData,Toast.LENGTH_SHORT).show();
}catch (Exception e){
e.printStackTrace();
}
}
}).start();
}
public void show(String s){ // 获得服务器返回结果
getActivity().runOnUiThread(new Runnable() {
@Override
public void run() {
parseJSONWithGSON(s);
}
});
}
private void parseJSONWithGSON(String jsonData){ //解析服务器返回的节气信息
Gson gson=new Gson();
JqOneResult jqOneResult=gson.fromJson(jsonData, JqOneResult.class);
// Toast.makeText(getContext(),""+xyResult.getRec().getListF24().size(),Toast.LENGTH_SHORT).show();
name=jqOneResult.getRec().getName();
jieshao =jqOneResult.getRec().getJieshao();
qihou =jqOneResult.getRec().getQihou();
shenghuo=jqOneResult.getRec().getShenghuo();
jingshen=jqOneResult.getRec().getJingshen();
fangbing=jqOneResult.getRec().getFangbing();
yinshi =jqOneResult.getRec().getYinshi();
yaoshan =jqOneResult.getRec().getYaoshan();
textTitle.setText(name);
ShowItem jS = new ShowItem(arr[0],jieshao );
ShowItem qH = new ShowItem(arr[1],qihou );
ShowItem sH = new ShowItem(arr[2],shenghuo);
ShowItem jShen = new ShowItem(arr[3],jingshen);
ShowItem fB = new ShowItem(arr[4],fangbing);
ShowItem yS = new ShowItem(arr[5],yinshi );
ShowItem yShan = new ShowItem(arr[6],yaoshan );
list.add(jS );
list.add(qH );
list.add(sH );
list.add(jShen);
list.add(fB );
list.add(yS );
list.add(yShan);
list1.add(jS );
list1.add(qH );
list2.add(sH );
list2.add(jShen);
list2.add(fB );
list3.add(yS );
list3.add(yShan);
setCard();
showItemAdapter.notifyDataSetChanged();
setImageAdapter();
// Toast.makeText(getContext(),jieshao,Toast.LENGTH_SHORT).show();
}
private void setImageAdapter(){ //设置节气图片的适配器
String prex="";
if(id<10){
prex+="0";
}
prex+=id;
images.add(getResources().getIdentifier("jq"+prex+1, "drawable", getContext().getPackageName()));
images.add(getResources().getIdentifier("jq"+prex+2, "drawable", getContext().getPackageName()));
images.add(getResources().getIdentifier("jq"+prex+3, "drawable", getContext().getPackageName()));
imageAdapter.notifyDataSetChanged();
}
private void setCard(){ //设置卡片模式下的相关控件
String prex="";
if(id<10){
prex+="0";
}
prex+=id;
mCardAdapter.addCardItem(new CardItem(arrTitle[0],list1,getResources().getIdentifier("jq"+prex+1, "drawable", getContext().getPackageName())));
mCardAdapter.addCardItem(new CardItem(arrTitle[1],list2,getResources().getIdentifier("jq"+prex+2, "drawable", getContext().getPackageName())));
mCardAdapter.addCardItem(new CardItem(arrTitle[2],list3,getResources().getIdentifier("jq"+prex+3, "drawable", getContext().getPackageName())));
mCardShadowTransformer = new ShadowTransformer(mViewPager, mCardAdapter);
mCardShadowTransformer.enableScaling(true);
mViewPager.setAdapter(mCardAdapter);
if(isTwoPane){
mViewPager.setPageMargin(getResources().getDimensionPixelSize(R.dimen.page_margin));
}else{
mViewPager.setPageMargin(getResources().getDimensionPixelSize(R.dimen.page_margin_small));
}
mViewPager.setPageTransformer(false, mCardShadowTransformer);
mViewPager.setOffscreenPageLimit(3);
}
class ShowItemAdapter extends RecyclerView.Adapter<ShowItemAdapter.ViewHolder> { //节气详细信息的适配器
private List<ShowItem> mList;
class ViewHolder extends RecyclerView.ViewHolder{
TextView titleText;
TextView contentText;
public ViewHolder(View view){
super(view);
titleText=view.findViewById(R.id.item_name);
contentText=view.findViewById(R.id.item_content);
titleText.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
if(contentText.getVisibility()==View.VISIBLE){
contentText.setVisibility(View.GONE);
}else{
contentText.setVisibility(View.VISIBLE);
}
}
});
contentText.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
reportString=contentText.getText().toString().trim();
doFabReport(reportString,false,true);
}
});
}
}
public ShowItemAdapter(List<ShowItem> list){
mList=list;
}
@NonNull
@Override
public ViewHolder onCreateViewHolder(@NonNull ViewGroup parent, int viewType) {
View view= LayoutInflater.from(parent.getContext()).inflate(R.layout.jq_show_item,parent,false);
final ViewHolder holder=new ViewHolder(view);
view.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
}
});
return holder;
}
@Override
public void onBindViewHolder(ViewHolder holder, int position) {
ShowItem showItem=mList.get(position);
holder.titleText.setText(showItem.getName());
holder.contentText.setText(showItem.getContent());
}
@Override
public int getItemCount() {
return mList.size();
}
}
}
|
package com.nls.security;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContext;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.core.userdetails.UserDetails;
/**
* Utility class for Spring Security.
*/
public final class SecurityUtils {
private SecurityUtils() {
}
/**
* Get the login of the current user.
*
* @return the login of the current user
*/
public static String getCurrentUserLogin() {
SecurityContext securityContext = SecurityContextHolder.getContext();
Authentication authentication = securityContext.getAuthentication();
String userName = null;
if (authentication != null) {
if (authentication.getPrincipal() instanceof UserDetails) {
UserDetails springSecurityUser = (UserDetails) authentication.getPrincipal();
userName = springSecurityUser.getUsername();
} else if (authentication.getPrincipal() instanceof String) {
userName = (String) authentication.getPrincipal();
}
}
return userName;
}
/**
* Check if a user is authenticated.
*
* @return true if the user is authenticated, false otherwise
*/
public static boolean isAuthenticated() {
SecurityContext securityContext = SecurityContextHolder.getContext();
Authentication authentication = securityContext.getAuthentication();
if (authentication != null) {
return authentication.getAuthorities().stream()
.noneMatch(grantedAuthority -> grantedAuthority.getAuthority().equals(AuthoritiesConstants.ANONYMOUS));
}
return false;
}
/**
* If the current user has a specific authority (security role).
*
* <p>The name of this method comes from the isUserInRole() method in the Servlet API</p>
*
* @param authority the authority to check
* @return true if the current user has the authority, false otherwise
*/
public static boolean isCurrentUserInRole(String authority) {
SecurityContext securityContext = SecurityContextHolder.getContext();
Authentication authentication = securityContext.getAuthentication();
if (authentication != null) {
return authentication.getAuthorities().stream()
.anyMatch(grantedAuthority -> grantedAuthority.getAuthority().equals(authority));
}
return false;
}
}
|
package com.marticles.airnet.mainservice.controller;
import com.marticles.airnet.mainservice.model.User;
import com.marticles.airnet.mainservice.model.UserLocal;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.GetMapping;
/**
* @author Marticles
* @description InfoController
* @date 2019/3/5
*/
@Controller
public class InfoController {
@Autowired
private UserLocal userLocal;
@GetMapping("/about-api")
public String aboutApi(Model model){
return "/about-api";
}
@GetMapping("/about-airnet")
public String aboutAirNet(Model model){
setUserLoginStatus(model,userLocal.getUser());
return "/about-airnet";
}
@GetMapping("/info")
public String info(Model model){
setUserLoginStatus(model,userLocal.getUser());
return "/info";
}
private void setUserLoginStatus(Model model, User user) {
if (null != user) {
model.addAttribute("isLogin", "true");
} else {
model.addAttribute("isLogin", "false");
}
}
}
|
package com.ceiba.reserva.comando.manejador;
import com.ceiba.manejador.ManejadorComando;
import com.ceiba.reserva.servicio.ServicioEliminarReserva;
import org.springframework.stereotype.Component;
@Component
public class ManejadorEliminarReserva implements ManejadorComando<Long> {
private final ServicioEliminarReserva servicioEliminarReserva;
public ManejadorEliminarReserva(ServicioEliminarReserva servicioEliminarReserva) {
this.servicioEliminarReserva = servicioEliminarReserva;
}
public void ejecutar(Long idReserva){
this.servicioEliminarReserva.ejecutar(idReserva);
}
}
|
/*******************************************************************************
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*******************************************************************************/
/* This file has been modified by Open Source Strategies, Inc. */
package org.ofbiz.accounting.thirdparty.authorizedotnet;
import java.math.BigDecimal;
import java.sql.Timestamp;
import com.ibm.icu.util.Calendar;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import javolution.util.FastMap;
import org.ofbiz.accounting.payment.PaymentGatewayServices;
import org.ofbiz.base.util.Debug;
import org.ofbiz.base.util.HttpClient;
import org.ofbiz.base.util.HttpClientException;
import org.ofbiz.base.util.UtilDateTime;
import org.ofbiz.base.util.UtilFormatOut;
import org.ofbiz.base.util.UtilMisc;
import org.ofbiz.base.util.UtilProperties;
import org.ofbiz.base.util.UtilValidate;
import org.ofbiz.entity.Delegator;
import org.ofbiz.entity.GenericEntityException;
import org.ofbiz.entity.GenericValue;
import org.ofbiz.service.DispatchContext;
import org.ofbiz.service.ModelService;
import org.ofbiz.service.ServiceUtil;
public class AIMPaymentServices {
public static final String module = AIMPaymentServices.class.getName();
// The list of refund failure response codes that would cause the ccRefund service
// to attempt to void the refund's associated authorization transaction. This list
// contains the responses where the voiding does not need to be done within a certain
// time limit
private static final List<String> VOIDABLE_RESPONSES_NO_TIME_LIMIT = UtilMisc.toList("50");
// A list of refund failure response codes that would cause the ccRefund service
// to first check whether the refund's associated authorization transaction has occurred
// within a certain time limit, and if so, cause it to void the transaction
private static final List<String> VOIDABLE_RESPONSES_TIME_LIMIT = UtilMisc.toList("54");
// The number of days in the time limit when one can safely consider an unsettled
// transaction to be still valid
private static final int TIME_LIMIT_VERIFICATION_DAYS = 120;
private static Properties AIMProperties = null;
// A routine to check whether a given refund failure response code will cause the
// ccRefund service to attempt to void the refund's associated authorization transaction
private static boolean isVoidableResponse(String responseCode) {
return
VOIDABLE_RESPONSES_NO_TIME_LIMIT.contains(responseCode) ||
VOIDABLE_RESPONSES_TIME_LIMIT.contains(responseCode);
}
public static Map<String, Object> ccAuth(DispatchContext ctx, Map<String, Object> context) {
Delegator delegator = ctx.getDelegator();
Map<String, Object> results = ServiceUtil.returnSuccess();
Map<String, Object> request = FastMap.newInstance();
Properties props = buildAIMProperties(context, delegator);
buildMerchantInfo(context, props, request);
buildGatewayResponeConfig(context, props, request);
buildCustomerBillingInfo(context, props, request);
buildEmailSettings(context, props, request);
buildInvoiceInfo(context, props, request);
props.put("transType", "AUTH_ONLY");
buildAuthTransaction(context, props, request);
Map<String, Object> validateResults = validateRequest(context, props, request);
String respMsg = (String)validateResults.get(ModelService.RESPONSE_MESSAGE);
if (ModelService.RESPOND_ERROR.equals(respMsg)) {
results.put(ModelService.ERROR_MESSAGE, "Validation Failed - invalid values");
return results;
}
Map<String, Object> reply = processCard(request, props);
//now we need to process the result
processAuthTransResult(reply, results);
return results;
}
public static Map<String, Object> ccCapture(DispatchContext ctx, Map<String, Object> context) {
Delegator delegator = ctx.getDelegator();
GenericValue orderPaymentPreference = (GenericValue) context.get("orderPaymentPreference");
GenericValue creditCard = null;
try {
creditCard = delegator.getRelatedOne("CreditCard",orderPaymentPreference);
} catch (GenericEntityException e) {
Debug.logError(e, module);
return ServiceUtil.returnError("Unable to obtain cc information from payment preference");
}
GenericValue authTransaction = PaymentGatewayServices.getAuthTransaction(orderPaymentPreference);
if (authTransaction == null) {
return ServiceUtil.returnError("No authorization transaction found for the OrderPaymentPreference; cannot Capture");
}
context.put("creditCard", creditCard);
context.put("authTransaction", authTransaction);
Map<String, Object> results = ServiceUtil.returnSuccess();
Map<String, Object> request = FastMap.newInstance();
Properties props = buildAIMProperties(context, delegator);
buildMerchantInfo(context, props, request);
buildGatewayResponeConfig(context, props, request);
buildCustomerBillingInfo(context, props, request);
buildEmailSettings(context, props, request);
request.put("x_Invoice_Num","Order " + orderPaymentPreference.getString("orderId"));
// PRIOR_AUTH_CAPTURE is the right one to use, since we already have an authorization from the authTransaction.
// CAPTURE_ONLY is a "force" transaction to be used if there is no prior authorization
props.put("transType", "PRIOR_AUTH_CAPTURE");
//props.put("transType","CAPTURE_ONLY");
props.put("cardtype", (String)creditCard.get("cardType"));
buildCaptureTransaction(context,props,request);
Map<String, Object> validateResults = validateRequest(context, props, request);
String respMsg = (String)validateResults.get(ModelService.RESPONSE_MESSAGE);
if (ModelService.RESPOND_ERROR.equals(respMsg)) {
results.put(ModelService.ERROR_MESSAGE, "Validation Failed - invalid values");
return results;
}
Map<String, Object> reply = processCard(request, props);
processCaptureTransResult(reply, results);
// if there is no captureRefNum, then the capture failed
if (results.get("captureRefNum") == null) {
return ServiceUtil.returnError((String) results.get("captureMessage"));
}
return results;
}
public static Map<String, Object> ccRefund(DispatchContext ctx, Map<String, Object> context) {
Delegator delegator = ctx.getDelegator();
GenericValue orderPaymentPreference = (GenericValue) context.get("orderPaymentPreference");
GenericValue creditCard = null;
try {
creditCard = delegator.getRelatedOne("CreditCard", orderPaymentPreference);
} catch (GenericEntityException e) {
Debug.logError(e, module);
return ServiceUtil.returnError("Unable to obtain cc information from payment preference");
}
GenericValue authTransaction = PaymentGatewayServices.getAuthTransaction(orderPaymentPreference);
if (authTransaction == null) {
return ServiceUtil.returnError("No authorization transaction found for the OrderPaymentPreference; cannot Refund");
}
context.put("creditCard",creditCard);
context.put("authTransaction",authTransaction);
Map<String, Object> results = ServiceUtil.returnSuccess();
Map<String, Object> request = FastMap.newInstance();
Properties props = buildAIMProperties(context, delegator);
buildMerchantInfo(context, props, request);
buildGatewayResponeConfig(context, props, request);
buildCustomerBillingInfo(context, props, request);
buildEmailSettings(context, props, request);
buildInvoiceInfo(context, props, request);
props.put("transType", "CREDIT");
props.put("cardtype", (String)creditCard.get("cardType"));
buildRefundTransaction(context, props, request);
Map<String, Object> validateResults = validateRequest(context, props, request);
String respMsg = (String)validateResults.get(ModelService.RESPONSE_MESSAGE);
if (ModelService.RESPOND_ERROR.equals(respMsg)) {
results.put(ModelService.ERROR_MESSAGE, "Validation Failed - invalid values");
return results;
}
Map<String, Object> reply = processCard(request, props);
results.putAll(processRefundTransResult(reply));
boolean refundResult = ((Boolean)results.get("refundResult")).booleanValue();
String refundFlag = (String)results.get("refundFlag");
// Since the refund failed, we are going to void the previous authorization against
// which ccRefunds attempted to issue the refund. This happens because Authorize.NET requires
// that settled transactions need to be voided the same day. unfortunately they provide no method for
// determining what transactions can be voided and what can be refunded, so we'll have to try it with timestamps
if (!refundResult && isVoidableResponse(refundFlag)) {
boolean canDoVoid = false;
if (VOIDABLE_RESPONSES_TIME_LIMIT.contains(refundFlag)) {
// We are calculating the timestamp that is at the beginning of a time limit,
// since we can safely assume that, within this time limit, an unsettled transaction
// can still be considered valid
Calendar startCalendar = UtilDateTime.toCalendar(UtilDateTime.nowTimestamp());
startCalendar.add(Calendar.DATE, -TIME_LIMIT_VERIFICATION_DAYS);
Timestamp startTimestamp = new java.sql.Timestamp(startCalendar.getTime().getTime());
Timestamp authTimestamp = authTransaction.getTimestamp("transactionDate");
if (startTimestamp.before(authTimestamp)) {
canDoVoid = true;
}
} else {
// Since there's no time limit to check, the voiding of the transaction will go
// through as usual
canDoVoid = true;
}
if (canDoVoid) {
Debug.logWarning("Refund was unsuccessful; will now attempt a VOID transaction.", module);
BigDecimal authAmountObj = authTransaction.getBigDecimal("amount");
BigDecimal refundAmountObj = (BigDecimal)context.get("refundAmount");
BigDecimal authAmount = authAmountObj != null ? authAmountObj : BigDecimal.ZERO;
BigDecimal refundAmount = refundAmountObj != null ? refundAmountObj : BigDecimal.ZERO;
if (authAmount.compareTo(refundAmount) == 0) {
reply = voidTransaction(authTransaction, context, delegator);
if (ServiceUtil.isError(reply)) {
return reply;
}
results = ServiceUtil.returnSuccess();
results.putAll(processRefundTransResult(reply));
return results;
} else {
// TODO: Modify the code to (a) do a void of the whole transaction, and (b)
// create a new auth-capture of the difference.
return ServiceUtil.returnFailure("Cannot perform a VOID transaction: authAmount [" + authAmount + "] is different than voidAmount [" + refundAmount + "]");
}
}
}
return results;
}
public static Map<String, Object> ccRelease(DispatchContext ctx, Map<String, Object> context) {
Delegator delegator = ctx.getDelegator();
GenericValue orderPaymentPreference = (GenericValue) context.get("orderPaymentPreference");
GenericValue authTransaction = PaymentGatewayServices.getAuthTransaction(orderPaymentPreference);
if (authTransaction == null) {
return ServiceUtil.returnError("No authorization transaction found for the OrderPaymentPreference [ID = " + orderPaymentPreference.getString("orderPaymentPreferenceId") + "]; cannot void");
}
Map<String, Object> reply = voidTransaction(authTransaction, context, delegator);
if (ServiceUtil.isError(reply)) {
return reply;
}
Map<String, Object> results = ServiceUtil.returnSuccess();
results.putAll(processReleaseTransResult(reply));
return results;
}
private static Map<String, Object> voidTransaction(GenericValue authTransaction, Map<String, Object> context, Delegator delegator) {
context.put("authTransaction", authTransaction);
Map<String, Object> results = ServiceUtil.returnSuccess();
Map<String, Object> request = FastMap.newInstance();
Properties props = buildAIMProperties(context, delegator);
buildMerchantInfo(context, props, request);
buildGatewayResponeConfig(context, props, request);
buildEmailSettings(context, props, request);
props.put("transType", "VOID");
buildVoidTransaction(context, props, request);
Map<String, Object> validateResults = validateRequest(context, props, request);
String respMsg = (String)validateResults.get(ModelService.RESPONSE_MESSAGE);
if (ModelService.RESPOND_ERROR.equals(respMsg)) {
results.put(ModelService.ERROR_MESSAGE, "Validation Failed - invalid values");
return results;
}
return processCard(request, props);
}
public static Map<String, Object> ccCredit(DispatchContext ctx, Map<String, Object> context) {
Map<String, Object> results = FastMap.newInstance();
results.put(ModelService.RESPONSE_MESSAGE, ModelService.RESPOND_ERROR);
results.put(ModelService.ERROR_MESSAGE, "Authorize.net ccCredit unsupported with version 3.1");
return results;
}
public static Map<String, Object> ccAuthCapture(DispatchContext ctx, Map<String, Object> context) {
Delegator delegator = ctx.getDelegator();
Map<String, Object> results = ServiceUtil.returnSuccess();
Map<String, Object> request = FastMap.newInstance();
Properties props = buildAIMProperties(context, delegator);
buildMerchantInfo(context, props, request);
buildGatewayResponeConfig(context, props, request);
buildCustomerBillingInfo(context, props, request);
buildEmailSettings(context, props, request);
buildInvoiceInfo(context, props, request);
props.put("transType", "AUTH_CAPTURE");
buildAuthTransaction(context, props, request);
Map<String, Object> validateResults = validateRequest(context, props, request);
String respMsg = (String)validateResults.get(ModelService.RESPONSE_MESSAGE);
if (ModelService.RESPOND_ERROR.equals(respMsg)) {
results.put(ModelService.ERROR_MESSAGE, "Validation Failed - invalid values");
return results;
}
Map<String, Object> reply = processCard(request, props);
//now we need to process the result
processAuthCaptureTransResult(reply, results);
// if there is no captureRefNum, then the capture failed
if (results.get("captureRefNum") == null) {
return ServiceUtil.returnError((String) results.get("captureMessage"));
}
return results;
}
private static Map<String, Object> processCard(Map<String, Object> request, Properties props) {
Map<String, Object> result = FastMap.newInstance();
String url = props.getProperty("url");
if (UtilValidate.isEmpty(url)) {
return ServiceUtil.returnFailure("No payment.authorizedotnet.url found.");
}
if (isTestMode()) {
Debug.logInfo("TEST Authorize.net using url [" + url + "]", module);
Debug.logInfo("TEST Authorize.net request string " + request.toString(),module);
Debug.logInfo("TEST Authorize.net properties string " + props.toString(),module);
}
try {
HttpClient httpClient = new HttpClient(url, request);
String certificateAlias = props.getProperty("certificateAlias");
httpClient.setClientCertificateAlias(certificateAlias);
String httpResponse = httpClient.post();
Debug.logInfo("transaction response: " + httpResponse,module);
AuthorizeResponse ar = new AuthorizeResponse(httpResponse);
String resp = ar.getResponseCode();
if (resp.equals(AuthorizeResponse.APPROVED)) {
result.put("authResult", Boolean.TRUE);
} else {
result.put("authResult", Boolean.FALSE);
Debug.logInfo("responseCode: " + ar.getResponseField(AuthorizeResponse.RESPONSE_CODE),module);
Debug.logInfo("responseReason: " + ar.getResponseField(AuthorizeResponse.RESPONSE_REASON_CODE),module);
Debug.logInfo("reasonText: " + ar.getResponseField(AuthorizeResponse.RESPONSE_REASON_TEXT),module);
}
result.put("httpResponse", httpResponse);
result.put("authorizeResponse", ar);
} catch (HttpClientException e) {
Debug.logInfo(e, "Could not complete Authorize.Net transaction: " + e.toString(),module);
}
result.put(ModelService.RESPONSE_MESSAGE, ModelService.RESPOND_SUCCESS);
return result;
}
private static boolean isTestMode() {
return "true".equalsIgnoreCase((String)AIMProperties.get("testReq"));
}
private static Properties buildAIMProperties(Map<String, Object> context, Delegator delegator) {
String paymentGatewayConfigId = (String) context.get("paymentGatewayConfigId");
String configStr = (String) context.get("paymentConfig");
if (configStr == null) {
configStr = "payment.properties";
}
GenericValue cc = (GenericValue)context.get("creditCard");
String url = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "transactionUrl", configStr, "payment.authorizedotnet.url");
String certificateAlias = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "certificateAlias", configStr, "payment.authorizedotnet.certificateAlias");
String ver = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "apiVersion", configStr, "payment.authorizedotnet.version");
String delimited = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "delimitedData", configStr, "payment.authorizedotnet.delimited");
String delimiter = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "delimiterChar", configStr, "payment.authorizedotnet.delimiter");
String method = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "method", configStr, "payment.authorizedotnet.method");
String emailCustomer = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "emailCustomer", configStr, "payment.authorizedotnet.emailcustomer");
String emailMerchant = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "emailMerchant", configStr, "payment.authorizedotnet.emailmerchant");
String testReq = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "testMode", configStr, "payment.authorizedotnet.test");
String relay = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "relayResponse", configStr, "payment.authorizedotnet.relay");
String tranKey = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "tranKey", configStr, "payment.authorizedotnet.trankey");
String login = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "userId", configStr, "payment.authorizedotnet.login");
String password = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "pwd", configStr, "payment.authorizedotnet.password");
String transDescription = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "transDescription", configStr, "payment.authorizedotnet.transdescription");
String duplicateWindow = getPaymentGatewayConfigValue(delegator, paymentGatewayConfigId, "duplicateWindow", configStr, "payment.authorizedotnet.duplicateWindow");
if (UtilValidate.isEmpty(ver)) {
ver = "3.0";
}
if (UtilValidate.isEmpty(login)) {
Debug.logInfo("the login property in " + configStr + " is not configured.", module);
}
if (UtilValidate.isEmpty(password) && !("3.1".equals(ver))) {
Debug.logInfo("The password property in " + configStr + " is not configured.", module);
}
if ("3.1".equals(ver)) {
if (tranKey == null || tranKey.length() <= 0) {
Debug.logInfo("Trankey property required for version 3.1 reverting to 3.0",module);
ver = "3.0";
}
}
Properties props = new Properties();
props.put("url", url);
props.put("certificateAlias", certificateAlias);
props.put("ver", ver);
props.put("delimited", delimited);
props.put("delimiter", delimiter);
props.put("method", method);
props.put("emailCustomer", emailCustomer);
props.put("emailMerchant", emailMerchant);
props.put("testReq", testReq);
props.put("relay", relay);
props.put("transDescription", transDescription);
props.put("login", login);
props.put("password", password);
props.put("trankey", tranKey);
props.put("duplicateWindow", duplicateWindow);
if (cc != null) {
props.put("cardtype", (String)cc.get("cardType"));
}
if (AIMProperties == null) {
AIMProperties = props;
}
if (isTestMode()) {
Debug.logInfo("Created Authorize.Net properties file: " + props.toString(), module);
}
return props;
}
private static void buildMerchantInfo(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
AIMRequest.put("x_Login", props.getProperty("login"));
String trankey = props.getProperty("trankey");
if (UtilValidate.isNotEmpty(trankey)) {
AIMRequest.put("x_Tran_Key", props.getProperty("trankey"));
}
AIMRequest.put("x_Password",props.getProperty("password"));
AIMRequest.put("x_Version", props.getProperty("ver"));
String duplicateWindow = props.getProperty("duplicateWindow");
if (UtilValidate.isNotEmpty(duplicateWindow)) {
AIMRequest.put("x_duplicate_window", props.getProperty("duplicateWindow"));
}
}
private static void buildGatewayResponeConfig(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
AIMRequest.put("x_Delim_Data", props.getProperty("delimited"));
AIMRequest.put("x_Delim_Char", props.getProperty("delimiter"));
}
private static void buildCustomerBillingInfo(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
try {
// this would be used in the case of a capture, where one of the parameters is an OrderPaymentPreference
if (params.get("orderPaymentPreference") != null) {
GenericValue opp = (GenericValue) params.get("orderPaymentPreference");
if ("CREDIT_CARD".equals(opp.getString("paymentMethodTypeId"))) {
// sometimes the ccAuthCapture interface is used, in which case the creditCard is passed directly
GenericValue creditCard = (GenericValue) params.get("creditCard");
if (creditCard == null || ! (opp.get("paymentMethodId").equals(creditCard.get("paymentMethodId")))) {
creditCard = opp.getRelatedOne("CreditCard");
}
AIMRequest.put("x_First_Name", UtilFormatOut.checkNull(creditCard.getString("firstNameOnCard")));
AIMRequest.put("x_Last_Name", UtilFormatOut.checkNull(creditCard.getString("lastNameOnCard")));
AIMRequest.put("x_Company", UtilFormatOut.checkNull(creditCard.getString("companyNameOnCard")));
if (UtilValidate.isNotEmpty(creditCard.getString("contactMechId"))) {
GenericValue address = creditCard.getRelatedOne("PostalAddress");
if (address != null) {
AIMRequest.put("x_Address", UtilFormatOut.checkNull(address.getString("address1")));
AIMRequest.put("x_City", UtilFormatOut.checkNull(address.getString("city")));
AIMRequest.put("x_State", UtilFormatOut.checkNull(address.getString("stateProvinceGeoId")));
AIMRequest.put("x_Zip", UtilFormatOut.checkNull(address.getString("postalCode")));
AIMRequest.put("x_Country", UtilFormatOut.checkNull(address.getString("countryGeoId")));
}
}
} else {
Debug.logWarning("Payment preference " + opp + " is not a credit card", module);
}
} else {
// this would be the case for an authorization
GenericValue cp = (GenericValue)params.get("billToParty");
GenericValue ba = (GenericValue)params.get("billingAddress");
AIMRequest.put("x_First_Name", UtilFormatOut.checkNull(cp.getString("firstName")));
AIMRequest.put("x_Last_Name", UtilFormatOut.checkNull(cp.getString("lastName")));
AIMRequest.put("x_Address", UtilFormatOut.checkNull(ba.getString("address1")));
AIMRequest.put("x_City", UtilFormatOut.checkNull(ba.getString("city")));
AIMRequest.put("x_State", UtilFormatOut.checkNull(ba.getString("stateProvinceGeoId")));
AIMRequest.put("x_Zip", UtilFormatOut.checkNull(ba.getString("postalCode")));
AIMRequest.put("x_Country", UtilFormatOut.checkNull(ba.getString("countryGeoId")));
}
return;
} catch (GenericEntityException ex) {
Debug.logError("Cannot build customer information for " + params + " due to error: " + ex.getMessage(), module);
return;
}
}
private static void buildEmailSettings(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
GenericValue ea = (GenericValue)params.get("billToEmail");
AIMRequest.put("x_Email_Customer", props.getProperty("emailCustomer"));
AIMRequest.put("x_Email_Merchant", props.getProperty("emailMerchant"));
if (ea != null) {
AIMRequest.put("x_Email", UtilFormatOut.checkNull(ea.getString("infoString")));
}
}
private static void buildInvoiceInfo(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
String description = UtilFormatOut.checkNull(props.getProperty("transDescription"));
String orderId = UtilFormatOut.checkNull((String)params.get("orderId"));
if (UtilValidate.isEmpty(orderId)) {
GenericValue orderPaymentPreference = (GenericValue) params.get("orderPaymentPreference");
if (UtilValidate.isNotEmpty(orderPaymentPreference)) {
orderId = (String) orderPaymentPreference.get("orderId");
}
}
AIMRequest.put("x_Invoice_Num", "Order " + orderId);
AIMRequest.put("x_Description", description);
}
private static void buildAuthTransaction(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
GenericValue cc = (GenericValue) params.get("creditCard");
String currency = (String) params.get("currency");
String amount = ((BigDecimal)params.get("processAmount")).toString();
String number = UtilFormatOut.checkNull(cc.getString("cardNumber"));
String expDate = UtilFormatOut.checkNull(cc.getString("expireDate"));
String cardSecurityCode = (String) params.get("cardSecurityCode");
AIMRequest.put("x_Amount", amount);
AIMRequest.put("x_Currency_Code", currency);
AIMRequest.put("x_Method", props.getProperty("method"));
AIMRequest.put("x_Type", props.getProperty("transType"));
AIMRequest.put("x_Card_Num", number);
AIMRequest.put("x_Exp_Date", expDate);
if (UtilValidate.isNotEmpty(cardSecurityCode)) {
AIMRequest.put("x_card_code", cardSecurityCode);
}
}
private static void buildCaptureTransaction(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
GenericValue at = (GenericValue) params.get("authTransaction");
GenericValue cc = (GenericValue) params.get("creditCard");
String currency = (String) params.get("currency");
String amount = ((BigDecimal) params.get("captureAmount")).toString();
String number = UtilFormatOut.checkNull(cc.getString("cardNumber"));
String expDate = UtilFormatOut.checkNull(cc.getString("expireDate"));
AIMRequest.put("x_Amount", amount);
AIMRequest.put("x_Currency_Code", currency);
AIMRequest.put("x_Method", props.getProperty("method"));
AIMRequest.put("x_Type", props.getProperty("transType"));
AIMRequest.put("x_Card_Num", number);
AIMRequest.put("x_Exp_Date", expDate);
AIMRequest.put("x_Trans_ID", at.get("referenceNum"));
AIMRequest.put("x_Auth_Code", at.get("gatewayCode"));
}
private static void buildRefundTransaction(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
GenericValue at = (GenericValue) params.get("authTransaction");
GenericValue cc = (GenericValue) params.get("creditCard");
String currency = (String) params.get("currency");
String amount = ((BigDecimal) params.get("refundAmount")).toString();
String number = UtilFormatOut.checkNull(cc.getString("cardNumber"));
String expDate = UtilFormatOut.checkNull(cc.getString("expireDate"));
AIMRequest.put("x_Amount", amount);
AIMRequest.put("x_Currency_Code", currency);
AIMRequest.put("x_Method", props.getProperty("method"));
AIMRequest.put("x_Type", props.getProperty("transType"));
AIMRequest.put("x_Card_Num", number);
AIMRequest.put("x_Exp_Date", expDate);
AIMRequest.put("x_Trans_ID", at.get("referenceNum"));
AIMRequest.put("x_Auth_Code", at.get("gatewayCode"));
Debug.logInfo("buildCaptureTransaction. " + at.toString(), module);
}
private static void buildVoidTransaction(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
GenericValue at = (GenericValue) params.get("authTransaction");
String currency = (String) params.get("currency");
AIMRequest.put("x_Currency_Code", currency);
AIMRequest.put("x_Method", props.getProperty("method"));
AIMRequest.put("x_Type", props.getProperty("transType"));
AIMRequest.put("x_Trans_ID", at.get("referenceNum"));
AIMRequest.put("x_Auth_Code", at.get("gatewayCode"));
Debug.logInfo("buildVoidTransaction. " + at.toString(), module);
}
private static Map<String, Object> validateRequest(Map<String, Object> params, Properties props, Map<String, Object> AIMRequest) {
Map<String, Object> result = FastMap.newInstance();
result.put(ModelService.RESPONSE_MESSAGE, ModelService.RESPOND_SUCCESS);
return result;
}
private static void processAuthTransResult(Map<String, Object> reply, Map<String, Object> results) {
AuthorizeResponse ar = (AuthorizeResponse) reply.get("authorizeResponse");
Boolean authResult = (Boolean) reply.get("authResult");
results.put("authResult", new Boolean(authResult.booleanValue()));
results.put("authFlag", ar.getReasonCode());
results.put("authMessage", ar.getReasonText());
if (authResult.booleanValue()) { //passed
results.put("authCode", ar.getResponseField(AuthorizeResponse.AUTHORIZATION_CODE));
results.put("authRefNum", ar.getResponseField(AuthorizeResponse.TRANSACTION_ID));
results.put("cvCode", ar.getResponseField(AuthorizeResponse.CID_RESPONSE_CODE));
results.put("avsCode", ar.getResponseField(AuthorizeResponse.AVS_RESULT_CODE));
results.put("processAmount", new BigDecimal(ar.getResponseField(AuthorizeResponse.AMOUNT)));
} else {
results.put("authCode", ar.getResponseCode());
results.put("processAmount", BigDecimal.ZERO);
results.put("authRefNum", AuthorizeResponse.ERROR);
}
Debug.logInfo("processAuthTransResult: " + results.toString(),module);
}
private static void processCaptureTransResult(Map<String, Object> reply, Map<String, Object> results) {
AuthorizeResponse ar = (AuthorizeResponse) reply.get("authorizeResponse");
Boolean captureResult = (Boolean) reply.get("authResult");
results.put("captureResult", new Boolean(captureResult.booleanValue()));
results.put("captureFlag", ar.getReasonCode());
results.put("captureMessage", ar.getReasonText());
results.put("captureRefNum", ar.getResponseField(AuthorizeResponse.TRANSACTION_ID));
if (captureResult.booleanValue()) { //passed
results.put("captureCode", ar.getResponseField(AuthorizeResponse.AUTHORIZATION_CODE));
results.put("captureAmount", new BigDecimal(ar.getResponseField(AuthorizeResponse.AMOUNT)));
} else {
results.put("captureAmount", BigDecimal.ZERO);
}
Debug.logInfo("processCaptureTransResult: " + results.toString(),module);
}
private static Map<String, Object> processRefundTransResult(Map<String, Object> reply) {
Map<String, Object> results = FastMap.newInstance();
AuthorizeResponse ar = (AuthorizeResponse) reply.get("authorizeResponse");
Boolean captureResult = (Boolean) reply.get("authResult");
results.put("refundResult", new Boolean(captureResult.booleanValue()));
results.put("refundFlag", ar.getReasonCode());
results.put("refundMessage", ar.getReasonText());
results.put("refundRefNum", ar.getResponseField(AuthorizeResponse.TRANSACTION_ID));
if (captureResult.booleanValue()) { //passed
results.put("refundCode", ar.getResponseField(AuthorizeResponse.AUTHORIZATION_CODE));
results.put("refundAmount", new BigDecimal(ar.getResponseField(AuthorizeResponse.AMOUNT)));
} else {
results.put("refundAmount", BigDecimal.ZERO);
}
Debug.logInfo("processRefundTransResult: " + results.toString(),module);
return results;
}
private static Map<String, Object> processReleaseTransResult(Map<String, Object> reply) {
Map<String, Object> results = FastMap.newInstance();
AuthorizeResponse ar = (AuthorizeResponse) reply.get("authorizeResponse");
Boolean captureResult = (Boolean) reply.get("authResult");
results.put("releaseResult", new Boolean(captureResult.booleanValue()));
results.put("releaseFlag", ar.getReasonCode());
results.put("releaseMessage", ar.getReasonText());
results.put("releaseRefNum", ar.getResponseField(AuthorizeResponse.TRANSACTION_ID));
if (captureResult.booleanValue()) { //passed
results.put("releaseCode", ar.getResponseField(AuthorizeResponse.AUTHORIZATION_CODE));
results.put("releaseAmount", new BigDecimal(ar.getResponseField(AuthorizeResponse.AMOUNT)));
} else {
results.put("releaseAmount", BigDecimal.ZERO);
}
Debug.logInfo("processReleaseTransResult: " + results.toString(),module);
return results;
}
private static void processAuthCaptureTransResult(Map<String, Object> reply, Map<String, Object> results) {
AuthorizeResponse ar = (AuthorizeResponse) reply.get("authorizeResponse");
Boolean authResult = (Boolean) reply.get("authResult");
results.put("authResult", new Boolean(authResult.booleanValue()));
results.put("authFlag", ar.getReasonCode());
results.put("authMessage", ar.getReasonText());
results.put("captureResult", new Boolean(authResult.booleanValue()));
results.put("captureFlag", ar.getReasonCode());
results.put("captureMessage", ar.getReasonText());
results.put("captureRefNum", ar.getResponseField(AuthorizeResponse.TRANSACTION_ID));
if (authResult.booleanValue()) { //passed
results.put("authCode", ar.getResponseField(AuthorizeResponse.AUTHORIZATION_CODE));
results.put("authRefNum", ar.getResponseField(AuthorizeResponse.TRANSACTION_ID));
results.put("cvCode", ar.getResponseField(AuthorizeResponse.CID_RESPONSE_CODE));
results.put("avsCode", ar.getResponseField(AuthorizeResponse.AVS_RESULT_CODE));
results.put("processAmount", new BigDecimal(ar.getResponseField(AuthorizeResponse.AMOUNT)));
} else {
results.put("authCode", ar.getResponseCode());
results.put("processAmount", BigDecimal.ZERO);
results.put("authRefNum", AuthorizeResponse.ERROR);
}
Debug.logInfo("processAuthTransResult: " + results.toString(),module);
}
private static String getPaymentGatewayConfigValue(Delegator delegator, String paymentGatewayConfigId, String paymentGatewayConfigParameterName,
String resource, String parameterName) {
String returnValue = "";
if (UtilValidate.isNotEmpty(paymentGatewayConfigId)) {
try {
GenericValue payflowPro = delegator.findOne("PaymentGatewayAuthorizeNet", UtilMisc.toMap("paymentGatewayConfigId", paymentGatewayConfigId), false);
if (UtilValidate.isNotEmpty(payflowPro)) {
Object payflowProField = payflowPro.get(paymentGatewayConfigParameterName);
if (payflowProField != null) {
returnValue = payflowProField.toString().trim();
}
}
} catch (GenericEntityException e) {
Debug.logError(e, module);
}
} else {
String value = UtilProperties.getPropertyValue(resource, parameterName);
if (value != null) {
returnValue = value.trim();
}
}
return returnValue;
}
}
|
package com.atguigu.gmall.sms.entity;
import com.baomidou.mybatisplus.annotation.TableId;
import com.baomidou.mybatisplus.annotation.TableName;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.io.Serializable;
import java.util.Date;
import lombok.Data;
/**
* 秒杀商品通知订阅
*
* @author daiyuquan
* @email 1206445365@qq.com
* @date 2019-12-03 12:59:40
*/
@ApiModel
@Data
@TableName("sms_seckill_sku_notice")
public class SeckillSkuNoticeEntity implements Serializable {
private static final long serialVersionUID = 1L;
/**
* id
*/
@TableId
@ApiModelProperty(name = "id",value = "id")
private Long id;
/**
* member_id
*/
@ApiModelProperty(name = "memberId",value = "member_id")
private Long memberId;
/**
* sku_id
*/
@ApiModelProperty(name = "skuId",value = "sku_id")
private Long skuId;
/**
* 活动场次id
*/
@ApiModelProperty(name = "sessionId",value = "活动场次id")
private Long sessionId;
/**
* 订阅时间
*/
@ApiModelProperty(name = "subcribeTime",value = "订阅时间")
private Date subcribeTime;
/**
* 发送时间
*/
@ApiModelProperty(name = "sendTime",value = "发送时间")
private Date sendTime;
/**
* 通知方式[0-短信,1-邮件]
*/
@ApiModelProperty(name = "noticeType",value = "通知方式[0-短信,1-邮件]")
private Integer noticeType;
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.shardingsphere.test.sql.parser.parameterized.jaxb.cases.domain.segment.impl.output;
import lombok.Getter;
import lombok.Setter;
import org.apache.shardingsphere.test.sql.parser.parameterized.jaxb.cases.domain.segment.AbstractExpectedSQLSegment;
import org.apache.shardingsphere.test.sql.parser.parameterized.jaxb.cases.domain.segment.impl.projection.impl.column.ExpectedColumnProjection;
import javax.xml.bind.annotation.XmlElement;
import java.util.LinkedList;
import java.util.List;
/**
* Expected output column.
*/
@Getter
@Setter
public final class ExpectedOutputColumn extends AbstractExpectedSQLSegment {
@XmlElement(name = "column-projection")
private final List<ExpectedColumnProjection> columnProjections = new LinkedList<>();
}
|
package zefryuuko.chat.commdata;
/**
* A container that stores a request from client/server. Used for requests that does not need any parameters
*/
public class RequestData extends CommData
{
private final String request;
public RequestData(String request)
{
super("RequestData");
this.request = request;
}
public String getRequest()
{
return request;
}
}
|
package org.ael.plugin.aop.annotation;
import java.lang.annotation.*;
/**
* @author aorxsr
* @date 2020/2/19
* 写在类上就是这个类里的方法全部都拦截,如果是在方法上就是单个拦截
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD, ElementType.TYPE})
@Documented
public @interface ExceptionEnhance {
String enhanceMethodName() default "";
}
|
/**
* copyright Ed Sweeney, 2012, 2013 all rights reserved
*/
package com.onextent.augie.ments;
import android.app.Dialog;
import android.app.DialogFragment;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.CheckBox;
import android.widget.CompoundButton;
import android.widget.CompoundButton.OnCheckedChangeListener;
import android.widget.TextView;
import com.onextent.android.codeable.Code;
import com.onextent.android.codeable.CodeableException;
import com.onextent.android.codeable.CodeableHandler;
import com.onextent.augie.AugLog;
import com.onextent.augie.AugieActivity;
import com.onextent.augie.R;
public class GPSDialog extends DialogFragment {
private View myview;
@Override
public void onPause() {
super.onPause();
AugieActivity activity = (AugieActivity) getActivity();
activity.unlisten(GPS.GPS_UPDATE_AUGIE_NAME, gpsEventHandler);
}
@Override
public void onResume() {
super.onResume();
AugieActivity activity = (AugieActivity) getActivity();
activity.listen(GPS.GPS_UPDATE_AUGIE_NAME, gpsEventHandler);
}
GPS augiement;
ViewGroup container;
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
this.container = container;
AugieActivity activity = (AugieActivity) getActivity();
augiement = (GPS)
activity.getModeManager().getCurrentMode()
.getAugiements()
.get(GPS.AUGIE_NAME);
Dialog d = getDialog();
if (d != null) d.setTitle(augiement.getMeta().getUIName() + " GPS");
myview = inflater.inflate(R.layout.gps_settings, container, false);
try {
setEnabledUI();
TextView lat = (TextView) myview.findViewById(R.id.latitude);
TextView longitude = (TextView) myview.findViewById(R.id.longitude);
lat.setText(Double.toString(augiement.getLatitude()));
longitude.setText(Double.toString(augiement.getLongitude()));
} catch (Exception e) {
AugLog.e( e.toString(), e);
}
return myview;
}
private CodeableHandler gpsEventHandler = new CodeableHandler() {
@Override
public void onCode(Code code) {
AugLog.d( "ejs got dialog gps: " + code);
TextView lat = (TextView) myview.findViewById(R.id.latitude);
TextView longitude = (TextView) myview.findViewById(R.id.longitude);
try {
lat.setText(code.getString(GPS.LATITUDE_KEY));
longitude.setText(code.getString(GPS.LONGITUDE_KEY));
} catch (CodeableException e) {
AugLog.e( e.toString(), e);
}
}
};
private void setEnabledUI() {
CheckBox cbox = (CheckBox) myview.findViewById(R.id.gpsEnabled);
boolean isEnabled = augiement.isEnabled();
cbox.setChecked(isEnabled);
cbox.setEnabled(false); //read only
if (isEnabled)
cbox.setText("GPS enabled via " + augiement.getProvider());
else
cbox.setText("You must enable GPS to use this Augiement.");
cbox.setOnCheckedChangeListener(new OnCheckedChangeListener() {
@Override
public void onCheckedChanged(CompoundButton buttonView, boolean isChecked) {
//noop
}
});
}
}
|
package com.caldeira.blog.config.security;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.HttpMethod;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.config.annotation.authentication.builders.AuthenticationManagerBuilder;
import org.springframework.security.config.annotation.web.builders.HttpSecurity;
import org.springframework.security.config.annotation.web.builders.WebSecurity;
import org.springframework.security.config.annotation.web.configuration.EnableWebSecurity;
import org.springframework.security.config.annotation.web.configuration.WebSecurityConfigurerAdapter;
import org.springframework.security.config.http.SessionCreationPolicy;
import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;
import org.springframework.security.web.authentication.UsernamePasswordAuthenticationFilter;
import com.caldeira.blog.repository.UserRepository;
@EnableWebSecurity
@Configuration
public class SecurityConfigurations extends WebSecurityConfigurerAdapter {
@Autowired
private AuthenticationService authenticationService;
@Autowired
private TokenService tokenService;
@Autowired
UserRepository userRepository;
@Override
@Bean
protected AuthenticationManager authenticationManager() throws Exception {
return super.authenticationManager();
}
// Authentication configurations
@Override
protected void configure(AuthenticationManagerBuilder auth) throws Exception {
auth.userDetailsService(authenticationService).passwordEncoder(new BCryptPasswordEncoder());
}
// Authorization configurations
@Override
protected void configure(HttpSecurity http) throws Exception {
http.authorizeRequests()
.antMatchers(HttpMethod.GET, "/category/*").permitAll()
.antMatchers(HttpMethod.GET, "/post/*").permitAll()
.antMatchers(HttpMethod.GET, "/post").permitAll()
.antMatchers(HttpMethod.GET, "/").permitAll()
.antMatchers(HttpMethod.POST, "/auth").permitAll()
.antMatchers(HttpMethod.POST, "/auth/signup").permitAll()
.anyRequest().authenticated()
.and().csrf().disable()
.sessionManagement().sessionCreationPolicy(SessionCreationPolicy.STATELESS)
.and().addFilterBefore(new AuthenticationViaTokenFilter(tokenService, userRepository),
UsernamePasswordAuthenticationFilter.class);
}
// Static resources configurer (js, css, img, etc.)
@Override
public void configure(WebSecurity web) throws Exception {
web.ignoring().antMatchers("/**.html", "/v2/api-docs", "/webjars/**",
"/configuration/**", "/swagger-resources/**");
}
}
|
package com.nulabinc.backlog4j.internal.json.activities;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.commons.lang3.builder.ToStringBuilder;
/**
* @author nulab-inc
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class PullRequestAddedActivity extends ActivityJSONImpl {
private int type = 18;
@JsonDeserialize(as=PullRequestContent.class)
private PullRequestContent content;
@Override
public Type getType() {
return Type.valueOf(this.type);
}
@Override
public PullRequestContent getContent() {
return this.content;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (obj == this) {
return true;
}
if (obj.getClass() != getClass()) {
return false;
}
PullRequestAddedActivity rhs = (PullRequestAddedActivity) obj;
return new EqualsBuilder()
.append(this.type, rhs.type)
.append(this.content, rhs.content)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder()
.append(type)
.append(content)
.toHashCode();
}
@Override
public String toString() {
return new ToStringBuilder(this)
.append("type", type)
.append("content", content)
.toString();
}
}
|
package net.cogzmc.permissions.command.impl.verbs;
import lombok.Getter;
import net.cogzmc.core.Core;
import net.cogzmc.core.modular.command.ArgumentRequirementException;
import net.cogzmc.core.modular.command.CommandException;
import net.cogzmc.core.player.CGroup;
import net.cogzmc.core.player.COfflinePlayer;
import net.cogzmc.permissions.command.Verb;
import net.cogzmc.permissions.command.impl.PermissionName;
import org.bukkit.command.CommandSender;
@Getter
@PermissionName("addgroup")
public final class PlayerAddGroupVerb extends Verb<COfflinePlayer> {
private final String[] names = new String[]{"addgroup"};
private final Integer requiredArguments = 1;
@Override
protected void perform(CommandSender sender, COfflinePlayer target, String[] args) throws CommandException {
CGroup group = Core.getPermissionsManager().getGroup(args[0]);
if (group == null) throw new ArgumentRequirementException("The group you specified was invalid!");
target.addToGroup(group);
sendSuccessMessage("Added " + target.getName() + " to the group " + group.getName(), sender);
}
}
|
/* Copyright 2009-2019 David Hadka
*
* This file is part of the MOEA Framework.
*
* The MOEA Framework is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* The MOEA Framework is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
* License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the MOEA Framework. If not, see <http://www.gnu.org/licenses/>.
*/
package org.moeaframework.analysis.plot;
import javax.swing.JFrame;
import org.junit.Ignore;
import org.junit.Test;
import org.moeaframework.Analyzer;
import org.moeaframework.Executor;
import org.moeaframework.Instrumenter;
import org.moeaframework.analysis.collector.Accumulator;
import org.moeaframework.core.NondominatedPopulation;
/**
* Tests the {@link Plot} class. These tests do not check for the correctness
* of the plots, only that the code runs without error.
*/
public class PlotTest {
public static boolean isJUnitTest() {
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
for (StackTraceElement element : stackTrace) {
if (element.getClassName().startsWith("org.junit.")) {
return true;
}
}
return false;
}
@Test
@Ignore("Fails on TravisCI due to missing display")
public void testEmpty() {
runTest(new Plot());
}
@Test
@Ignore("Fails on TravisCI due to missing display")
public void testBasicShapes() {
runTest(new Plot()
.scatter("Points", new double[] { 0, 1, 2 }, new double[] { 0, 1, 2 })
.line("Line", new double[] { 0, 1, 2 }, new double[] { 0, 1, 2 })
.stacked("Stacked 1", new double[] { 0.5, 1.5 }, new double[] { 0.5, 0.6 })
.stacked("Stacked 2", new double[] { 0.5, 1.5 }, new double[] { 0.3, 0.2 })
.area("Area", new double[] { 0, 1, 2 }, new double[] { 0, 0.5, 0 })
.setTitle("Basic Shapes")
.setXLabel("X")
.setYLabel("Y"));
}
@Test
@Ignore("Fails on TravisCI due to missing display")
public void testOutOfOrder() {
runTest(new Plot()
.scatter("Points", new double[] { 0, 2, 1 }, new double[] { 0, 1, 2 })
.line("Line", new double[] { 0, 2, 1 }, new double[] { 0, 1, 2 })
.area("Area", new double[] { 0, 2, 1 }, new double[] { 0, 0.5, 0 }));
}
@Test
@Ignore("Fails on TravisCI due to missing display")
public void testParetoFront() {
NondominatedPopulation result = new Executor()
.withProblem("UF1")
.withAlgorithm("NSGAII")
.withMaxEvaluations(20)
.withProperty("populationSize", 20)
.run();
runTest(new Plot().add("NSGAII", result));
}
@Test
@Ignore("Fails on TravisCI due to missing display")
public void testAnalyzer() {
String problem = "ZDT1";
String[] algorithms = { "NSGAII", "eMOEA", "OMOPSO" };
Executor executor = new Executor()
.withProblem(problem)
.withMaxEvaluations(10000);
Analyzer analyzer = new Analyzer()
.withProblem(problem)
.includeGenerationalDistance()
.includeAdditiveEpsilonIndicator()
.includeInvertedGenerationalDistance();
for (String algorithm : algorithms) {
analyzer.addAll(algorithm,
executor.withAlgorithm(algorithm).runSeeds(10));
}
runTest(new Plot().add(analyzer));
}
@Test
@Ignore("Fails on TravisCI due to missing display")
public void testAccumulator() {
Instrumenter instrumenter = new Instrumenter()
.withProblem("UF1")
.withFrequency(100)
.attachElapsedTimeCollector()
.attachGenerationalDistanceCollector();
new Executor()
.withProblem("UF1")
.withAlgorithm("NSGAII")
.withMaxEvaluations(10000)
.withInstrumenter(instrumenter)
.run();
Accumulator accumulator = instrumenter.getLastAccumulator();
runTest(new Plot().add(accumulator));
}
public void runTest(Plot plot) {
if (isJUnitTest()) {
JFrame frame = plot.show();
frame.dispose();
} else {
plot.showDialog();
}
}
public static void main(String[] args) {
new PlotTest().testEmpty();
new PlotTest().testBasicShapes();
new PlotTest().testOutOfOrder();
new PlotTest().testParetoFront();
new PlotTest().testAnalyzer();
new PlotTest().testAccumulator();
}
}
|
/*
* Copyright 2003-2018 MarkLogic Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.marklogic.mapreduce;
/**
* Configuration property names and other constants used in the
* package. Use these property names in your Hadoop configuration
* to set MarkLogic specific properties. Properties may be set
* either in a Hadoop configuration file or programatically.
*
* <p>
* Use the <code>mapreduce.marklogic.input.*</code> properties when
* using MarkLogic Server as an input source. Use the
* <code>mapreduce.marklogic.output.*</code> properties when using
* MarkLogic Server to store your results.
* </p>
*
* @author jchen
*/
public interface MarkLogicConstants {
// input-related config property names
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the MarkLogic Server user name
* under which input queries and operations run. Required if using
* MarkLogic Server for input.
*/
static final String INPUT_USERNAME =
"mapreduce.marklogic.input.username";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the cleartext password to use for
* authentication with {@link #INPUT_USERNAME input.username}.
* Required if using MarkLogic Server for input.
*/
static final String INPUT_PASSWORD =
"mapreduce.marklogic.input.password";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the MarkLogic Server host to use for
* input operations. Required if using MarkLogic Server for input.
*/
static final String INPUT_HOST =
"mapreduce.marklogic.input.host";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the port number of the input XDBC
* server on the MarkLogic Server host specified by the
* {@link #INPUT_HOST input.host} property. Required if using
* MarkLogic Server for input.
*
* <p>
* <strong>NOTE:</strong> Within a cluster, all nodes supplying
* MapReduce input data must use the same XDBC server port number.
* </p>
*/
static final String INPUT_PORT =
"mapreduce.marklogic.input.port";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether the connection to the input server is
* SSL enabled; false is assumed if not set.
*/
static final String INPUT_USE_SSL = "mapreduce.marklogic.input.usessl";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the SSL protocol which will be used if
* {@link #INPUT_USE_SSL input.ssl} is set to true.
*/
static final String INPUT_SSL_PROTOCOL =
"mapreduce.marklogic.input.sslprotocol";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the name of the class implementing
* {@link SslConfigOptions} which will be used if
* {@link #INPUT_USE_SSL input.ssl} is set to true.
*/
static final String INPUT_SSL_OPTIONS_CLASS =
"mapreduce.marklogic.input.ssloptionsclass";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the document selection portion of the
* path expression used to retrieve data from the server. Only
* used if using MarkLogic Server for input in <code>basic</code> mode.
*
* <p>
* The XQuery path expression step given in this property must
* select a sequence of document nodes. To further refine the
* input selection to nodes or values within the documents, use
* {@link #SUBDOCUMENT_EXPRESSION input.subdocumentexpr}. If
* this property is not set, <code>fn:collection()</code> is used.
* For more information, see the overview.
* </p>
*
* <p>
* This property is only usable when <code>basic</code> mode is
* specified with the {@link #INPUT_MODE input.mode} property. If
* more powerful input customization is needed, use
* <code>advanced</code> mode and specify a complete input query
* with the {@link #INPUT_QUERY input.query} property.
* </p>
*
* <p>
* The path expression step given in this property must be
* <em>searchable</em>. A searchable expression is one which can
* be optimized using indexes. See the <em>Query and Performance
* Tuning Guide</em> for more information on searchable path
* expressions.
* </p>
*
* <p>The following selects all documents:</p>
*
* <pre class="codesample">
* <property>
* <name>mapreduce.marklogic.input.documentselector</name>
* <value>fn:collection()</value>
* </property>
* </pre>
*/
static final String DOCUMENT_SELECTOR =
"mapreduce.marklogic.input.documentselector";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the path expression used to retrieve
* sub-document records from the server. Used only if using MarkLogic
* Server for input in <code>basic</code> mode. If not set,
* the document nodes selected by the {@link #DOCUMENT_SELECTOR
* document selector} are used.
*
* <p>
* The XQuery path expression step given in this property should
* select a sequence of nodes or atomic values from the set of
* documents selected by the path step given in the
* {@link #DOCUMENT_SELECTOR input.documentselector} property.
* For more information, see the overview.
* </p>
* <p>
* This property is only usable when <code>basic</code> mode is
* specified with the {@link #INPUT_MODE input.mode} property. If
* more powerful input customization is needed, use
* <code>advanced</code> mode and specify a complete input query
* with the {@link #INPUT_QUERY input.query} property.
* </p>
*
* <p>The following would select all documents containing hrefs:</p>
*
* <pre class="codesample">
* <property>
* <name>mapreduce.marklogic.input.documentselector</name>
* <value>fn:collection()</value>
* </property>
* <property>
* <name>mapreduce.marklogic.input.subdocumentexpr</name>
* <value>//wp:a[@href]</value>
* </property>
* </pre>
*/
static final String SUBDOCUMENT_EXPRESSION =
"mapreduce.marklogic.input.subdocumentexpr";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the name of the class implementing
* {@link com.marklogic.mapreduce.functions.LexiconFunction LexiconFunction}
* which will be used to generate input.
*/
static final String INPUT_LEXICON_FUNCTION_CLASS =
"mapreduce.marklogic.input.lexiconfunctionclass";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies a list of namespaces to use when
* evaluating the path expression constructed from the
* {@link #DOCUMENT_SELECTOR input.documentselector} and
* {@link #SUBDOCUMENT_EXPRESSION input.subdocumentexpr} properties.
*
* <p>Specify the namespaces as comma separated alias-URI pairs.
* For example:
* </p>
*
* <pre class="codesample">
* <property>
* <name>mapreduce.marklogic.input.namespace</name>
* <value>wp, "http://www.mediawiki.org.xml/export-0.4/"</value>
* </property>
* </pre>
*
* <p>
* If a namespace URI includes a comma, you must set this
* property programmatically, rather than in a config file.
* </p>
*/
static final String PATH_NAMESPACE =
"mapreduce.marklogic.input.namespace";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the query MarkLogic Server uses
* to generate input splits. This property is required (and only
* usable) in <code>advanced</code> mode; see the
* {@link #INPUT_MODE input.mode} property for details.
* </p>
* <p>
* The split query must return a sequence of (forest id, record
* count, hostname) tuples. The host name and forest id identify
* the forest associated with the split. The count is an estimate
* of the number of key-value pairs in the split.
* </p>
* <p>
* The default split query used in <code>basic</code> input mode
* computes a rough estimate based on the number of documents in
* the database.
* </p>
*/
static final String SPLIT_QUERY =
"mapreduce.marklogic.input.splitquery";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the maximum number of fragments per
* input split. Optional. Default: {@value #DEFAULT_MAX_SPLIT_SIZE}.
* The default should be suitable for most applications.
*/
static final String MAX_SPLIT_SIZE =
"mapreduce.marklogic.input.maxsplitsize";
/**
* Not yet Implemented.
*
* <p>
* The config property name (<code>{@value}</code>)
* which, if set, specifies the name of the MarkLogic Server
* database from which to create input splits.
* </p>
*/
static final String INPUT_DATABASE_NAME =
"mapreduce.marklogic.input.databasename";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the name of the class of the map
* input keys for {@link KeyValueInputFormat}. Optional.
* Default: {@link org.apache.hadoop.io.Text}.
*/
static final String INPUT_KEY_CLASS =
"mapreduce.marklogic.input.keyclass";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the name of the class of the map
* input value for {@link KeyValueInputFormat}, {@link ValueInputFormat}
* and {@link DocumentInputFormat}.
* Optional. Default: {@link org.apache.hadoop.io.Text} for
* {@link KeyValueInputFormat} and {@link ValueInputFormat},
* {@link DatabaseDocument} for {@link DocumentInputFormat}.
*/
static final String INPUT_VALUE_CLASS =
"mapreduce.marklogic.input.valueclass";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether to use basic or advanced
* input query mode. Allowable values are <code>basic</code> and
* <code>advanced</code>. Optional. Default: <code>basic</code>.
*
* <p><em>Only basic mode is supported at this time.</em></p>
*
* <p>
* Basic mode enables use of the
* {@link #DOCUMENT_SELECTOR input.documentselector},
* {@link #SUBDOCUMENT_EXPRESSION input.subdocumentexpr}, and
* {@link #PATH_NAMESPACE input.namespace} properties. Advanced
* mode enables use of the {@link #INPUT_QUERY input.query} and
* {@link #SPLIT_QUERY input.splitquery} properties.
* </p>
*/
static final String INPUT_MODE =
"mapreduce.marklogic.input.mode";
/**
* Value string of basic mode for {@link #INPUT_MODE input.mode}.
*/
static final String BASIC_MODE = "basic";
/**
* Value string of advanced mode for {@link #INPUT_MODE input.mode}.
*/
static final String ADVANCED_MODE = "advanced";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the query used to retrieve input
* records from MarkLogic Server. This property is required
* when <code>advanced</code> is specified in the
* {@link #INPUT_MODE input.mode} property.
* </p>
*
* <p>
* The value of this property must be a fully formed query,
* suitable for evaluation by <code>xdmp:eval</code>, and
* must return a sequence. The items in the sequence depend
* on the {@link org.apache.hadoop.mapreduce.InputFormat InputFormat}
* subclass configured for the job. For details, see
* "Advanced Input Mode" in the <em>Hadoop MapReduce Connector
* Developer's Guide</em>.
* </p>
*/
static final String INPUT_QUERY =
"mapreduce.marklogic.input.query";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies data retrieval from MarkLogic Server at the
* specified timestamp.
* </p>
*/
static final String INPUT_QUERY_TIMESTAMP =
"mapreduce.marklogic.input.querytimestamp";
/**
* The config property name (<code>{@value}</code>)
* which, if set to true, specifies that the input query declares and
* references external variables <code>{@value #SPLIT_START_VARNAME}</code>
* and <code>{@value #SPLIT_END_VARNAME}</code> under the
* namespace {@value #MR_NAMESPACE}. The connector binds to
* these variables with the start and end of an input split
* instead of constraining the query with the split range.
*
* <p>
* For details, see "Optimizing Your Input Query" in the <em>Hadoop
* MapReduce Connector Developer's Guide</em>.
* </p>
*/
static final String BIND_SPLIT_RANGE =
"mapreduce.marklogic.input.bindsplitrange";
/**
* The namespace ({@value}) in which the split range external variables
* are defined.
*
* <p>
* The split range variables <code>{@value #SPLIT_START_VARNAME}</code>
* and <code>{@value #SPLIT_END_VARNAME}</code> are in this namespace when
* using advanced input mode and <code>{@value #BIND_SPLIT_RANGE}</code>
* is true. Declare a namespace prefix for this namespace in your input
* query and qualify references to <code>{@value #SPLIT_START_VARNAME}</code>
* and <code>{@value #SPLIT_END_VARNAME}</code> by the prefix. For details,
* see "Optimizing Your Input Query" in the <em>Hadoop MapReduce Connector
* Developer's Guide</em>.
* </p>
*/
static final String MR_NAMESPACE = "http://marklogic.com/hadoop";
/**
* Use this external variable name (<code>{@value}</code>) in your advanced
* mode input query to access the start value of the record range in an
* input split when <code>{@value #BIND_SPLIT_RANGE}</code> is true.
*
* <p>
* The variable must be declared and referenced in the namespace
* <code>{@value #MR_NAMESPACE}</code>. For details, see
* "Optimizing Your Input Query" in the <em>Hadoop MapReduce Connector
* Developer's Guide</em>.
* </p>
*/
static final String SPLIT_START_VARNAME = "splitstart";
/**
* Use this external variable name (<code>{@value}</code>) in your advanced
* mode input query to access the end value of the record range in an input
* split when <code>{@value #BIND_SPLIT_RANGE}</code> is true.
*
* <p>
* The variable must be declared and referenced in the namespace
* <code>{@value #MR_NAMESPACE}</code>. For details, see
* "Optimizing Your Input Query" in the <em>Hadoop MapReduce Connector
* Developer's Guide</em>.
* </p>
*/
static final String SPLIT_END_VARNAME = "splitend";
/**
* The config property name (<code>{@value}</code>) which, if
* set, specifies the ratio of the number of retrieved
* records to the number of accessed fragments. Optional.
* Default: 1.0 (one record per fragment) for documents,
* 100 for nodes and values.
*
* <p>
* The record to fragment ratio is used for progress estimate.
* </p>
*/
static final String RECORD_TO_FRAGMENT_RATIO =
"mapreduce.marklogic.input.recordtofragmentratio";
/**
* The config property name (<code>{@value}</code>) which, if
* set, specifies whether to format data with indentation retrieved from
* MarkLogic. Optional. Valid values: TRUE, FALSE, SERVERDEFAULT.
* Default: false.
*/
static final String INDENTED =
"mapreduce.marklogic.input.indented";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates to only include documents with one or many of
* specified collection URIs when using {@link ForestInputFormat}.
*/
static final String COLLECTION_FILTER =
"mapreduce.marklogic.input.filter.collection";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates to only include documents with one of
* specified directory URIs when using {@link ForestInputFormat}.
*/
static final String DIRECTORY_FILTER =
"mapreduce.marklogic.input.filter.directory";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates to only include documents matching the cts
* query {@link MarkLogicInputFormat}.
*/
static final String QUERY_FILTER =
"mapreduce.marklogic.input.filter.query";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates to only include documents with one of
* specified types when using {@link ForestInputFormat}.
*/
static final String TYPE_FILTER =
"mapreduce.marklogic.input.filter.type";
static final String EXTRACT_URI = "mapreduce.marklogic.input.extracturi";
// output-related config property names
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the MarkLogic Server user name
* under which output operations run. Required if using MarkLogic
* Server for output.
*/
static final String OUTPUT_USERNAME =
"mapreduce.marklogic.output.username";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the cleartext password to use for
* authentication with {@link #OUTPUT_USERNAME output.username}.
* Required if using MarkLogic Server for output.
*/
static final String OUTPUT_PASSWORD =
"mapreduce.marklogic.output.password";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the MarkLogic Server host to use for
* output operations. Required if using MarkLogic Server for
* output.
*/
static final String OUTPUT_HOST =
"mapreduce.marklogic.output.host";
/** Internal use only. */
static final String OUTPUT_FOREST_HOST =
"mapreduce.marklogic.output.hostforests";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the port number of the output MarkLogic
* Server specified by the {@link #INPUT_HOST input.host} property.
* Required if using MarkLogic Server for output.
*/
static final String OUTPUT_PORT =
"mapreduce.marklogic.output.port";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the MarkLogic Server database to use for
* output operations. The default value is the target database assigned
* to the AppServer.
* .
*/
static final String OUTPUT_DATABASE_NAME =
"mapreduce.marklogic.output.databasename";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether the connection to the output server is
* SSL enabled; false is assumed if not set.
*/
static final String OUTPUT_USE_SSL = "mapreduce.marklogic.output.usessl";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies SSL protocol which will be used if
* {@link #OUTPUT_USE_SSL output.usessl} is set to true.
*/
static final String OUTPUT_SSL_PROTOCOL =
"mapreduce.marklogic.output.sslprotocol";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the name of the class implementing
* {@link SslConfigOptions} which will be used if
* {@link #OUTPUT_SSL_PROTOCOL output.usesslprotocol} is set to SSLv3.
*/
static final String OUTPUT_SSL_OPTIONS_CLASS =
"mapreduce.marklogic.output.ssloptionsclass";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the MarkLogic Server database directory
* where output documents are created.
* <p>
* If {@link #OUTPUT_CLEAN_DIR output.cleandir} is false (the default)
* then an error occurs if the directory already exists. If {@link
* #OUTPUT_CLEAN_DIR output.cleandir} is true, then the directory
* is removed as part of the job submission process.
* </p>
*/
static final String OUTPUT_DIRECTORY =
"mapreduce.marklogic.output.content.directory";
/**
* The config property name (<code>{@value}</code>) which, if set,
* specifies the charset encoding to be used by the server when loading
* this document. The encoding provided will be passed to the server at
* document load time and must be a name that it recognizes. The document
* byte stream will be transcoded to UTF-8 for storage.
*/
static final String OUTPUT_CONTENT_ENCODING =
"mapreduce.marklogic.output.content.encoding";
/**
* Default output content encoding
*/
static final String DEFAULT_OUTPUT_CONTENT_ENCODING = "UTF-8";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies a comma-separated list of collections
* to which generated output documents are added. Optional. Relevant
* only when using MarkLogic Server for output with
* {@link ContentOutputFormat}.
*
* <p>Example:</p>
*
* <pre class="codesample">
* <property>
* <name>mapreduce.marklogic.output.content.collection</name>
* <value>latest,top10</value>
* </property>
* </pre>
*/
static final String OUTPUT_COLLECTION =
"mapreduce.marklogic.output.content.collection";
/** Default graph for rdf **/
static final String OUTPUT_GRAPH =
"mapreduce.marklogic.output.rdf.graph";
/** Graph overrided for rdf **/
static final String OUTPUT_OVERRIDE_GRAPH =
"mapreduce.marklogic.output.rdf.overridegraph";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies a comma-separated list role-capability
* pairs to associate with created output documents. Optional. If
* not set, the default permissions for
* {@link #OUTPUT_USERNAME output.username} are used. Relevant
* only when using MarkLogic Server for output with
* {@link ContentOutputFormat}.
*
* <p>Example:</p>
*
* <pre class="codesample">
* <property>
* <name>mapreduce.marklogic.output.content.permission</name>
* <value>dls-user,update,dls-user,read</value>
* </property>
* </pre>
*
* <p>
* See "URI Privileges and Permissions on Documents" in the
* <em>Understanding and Using Security Guide</em> for more
* information about roles and capabilities.
* </p>
*
* <p>
* If the property value includes a comma in embedded in the
* role name, you must set this property in your code,
* rather than in a configuration file.
* </p>
*/
static final String OUTPUT_PERMISSION =
"mapreduce.marklogic.output.content.permission";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the document quality for created
* output documents. Optional. Relevant only when using MarkLogic
* Server for output with {@link ContentOutputFormat}.
*
* <p>
* Quality affects the search relevance of a document. The
* value must be a positive or negative integer. For more
* information about document quality, see "Relevance Scores:
* Understanding and Customizing" in the <em>Search Developer's
* Guide</em>.
* </p>
*/
static final String OUTPUT_QUALITY =
"mapreduce.marklogic.output.content.quality";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether to use streaming to insert
* content. When streaming is set to true, the content will
* not be fully buffered in memory, hence will consume less
* memory but will disable auto-retry if there is a problem
* inserting the content.
*/
static final String OUTPUT_STREAMING =
"mapreduce.marklogic.output.content.streaming";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates whether or not to remove the output
* directory. Only applicable to {@link ContentOutputFormat}.
* Default: false.
*
* <p>
* When set to true, the output directory specified by the
* {@link #OUTPUT_DIRECTORY output.content.directory} property
* is removed. When set to false, an exception is thrown if
* the output content directory already exists.
* </p>
*/
static final String OUTPUT_CLEAN_DIR =
"mapreduce.marklogic.output.content.cleandir";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates whether or not to use the fast load mode
* to load content into MarkLogic. Default: false.
*
* <p>
* Setting it to true when the documents
* to be loaded already exist may cause XDMP-DBDUPURI error if the
* original documents were inserted when the database had a different
* forest count. The fast load mode will always be
* used if "mapreduce.marklogic.output.content.directory" is set.
* </p>
*/
static final String OUTPUT_FAST_LOAD =
"mapreduce.marklogic.output.content.fastload";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates what node operation to perform
* during output. Required if using MarkLogic Server for output
* with NodeOutputFormat. Valid choices: INSERT_BEFORE, INSERT_AFTER,
* INSERT_CHILD, REPLACE.
*
* @see NodeOpType
* @see NodeOutputFormat
*/
static final String NODE_OPERATION_TYPE =
"mapreduce.marklogic.output.node.optype";
/**
* The config property name (<code>{@value}</code>)
* which, if set to true, causes {@link PropertyOutputFormat}
* to create document properties for reduce output
* key-value pairs even when no document exists with
* the target URI. Default: false.
*
* <p>
* By default, {@link PropertyOutputFormat} does not create a
* property for a document URI unless the document already
* exists.
* </p>
*/
static final String OUTPUT_PROPERTY_ALWAYS_CREATE =
"mapreduce.marklogic.output.property.alwayscreate";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates the namespace used for output.
* This is used only in NodeOutputFormat, and is used for
* resolving element names in the node path.
*/
static final String OUTPUT_NAMESPACE =
"mapreduce.marklogic.output.node.namespace";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates whether the job is running in local or
* distributed mode.
*/
static final String EXECUTION_MODE = "mapreduce.marklogic.mode";
static final String MODE_DISTRIBUTED = "distributed";
static final String MODE_LOCAL = "local";
/**
* The default maximum split size for input splits, used if
* {@link #MAX_SPLIT_SIZE input.maxsplitsize} is not specified.
*/
static final long DEFAULT_MAX_SPLIT_SIZE = 50000;
/**
* The default maximum split size for input splits, used if
* {@link #MAX_SPLIT_SIZE input.maxsplitsize} is not specified
* and running in local mode.
*/
static final long DEFAULT_LOCAL_MAX_SPLIT_SIZE = 20000;
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates what property operation to perform
* during output when using {@link PropertyOutputFormat}. Ignored
* if not using {@link PropertyOutputFormat}. Optional. Valid choices:
* SET_PROPERTY, ADD_PROPERTY. Default: SET_PROPERTY.
*
* @see PropertyOpType
* @see PropertyOutputFormat
* @see PropertyWriter
*/
static final String PROPERTY_OPERATION_TYPE =
"mapreduce.marklogic.output.property.optype";
/**
* Default property operation type.
*/
static final String DEFAULT_PROPERTY_OPERATION_TYPE = "SET_PROPERTY";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates type of content to be inserted when using
* ContentOutputFormat. Optional. Valid choices: XML, JSON, TEXT, BINARY,
* MIXED, UNKNOWN.
* Default: XML.
*/
static final String CONTENT_TYPE =
"mapreduce.marklogic.output.content.type";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the data type of the
* output keys for {@link KeyValueOutputFormat}. Optional.
* Default: xs:string.
*/
static final String OUTPUT_KEY_TYPE =
"mapreduce.marklogic.output.keytype";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the data type of the map
* output value for {@link KeyValueOutputFormat}.
* Optional. Default: xs:string.
*/
static final String OUTPUT_VALUE_TYPE =
"mapreduce.marklogic.output.valuetype";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the statement to execute against MarkLogic
* Server. This property is required for KeyValueOutputFormat.
*
* <p>
* The statement is allowed to declare and refernce two external variables
* "key" and "value" under namespace "http://marklogic.com/hadoop", which
* will be bound by the connector with the output key and value in the user
* specified data type.
* </p>
*/
static final String OUTPUT_QUERY =
"mapreduce.marklogic.output.query";
/**
* Value string of the output key external variable name.
*/
static final String OUTPUT_KEY_VARNAME = "key";
/**
* The config property name (<code>{@value}</code>) which, if set,
* specifies the language name to associate with inserted documents. A
* value of <code>en</code> indicates that the document is in english. The
* default is null, which indicates to use the server default.
*/
static final String OUTPUT_CONTENT_LANGUAGE =
"mapreduce.marklogic.output.content.language";
/**
* The config property name (<code>{@value}</code>) which, if set,
* specifies the namespace to associate with inserted documents. The
* default is null, which indicates that the default namespace should
* be used.
*/
static final String OUTPUT_CONTENT_NAMESPACE =
"mapreduce.marklogic.output.content.namespace";
/**
* Value string of the output value external variable name.
*/
static final String OUTPUT_VALUE_VARNAME = "value";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the document repair level for this options object.
*/
static final String OUTPUT_XML_REPAIR_LEVEL =
"mapreduce.marklogic.output.content.repairlevel";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether to tolerate insertion errors and
* make sure all successful inserts are committed.
*/
static final String OUTPUT_TOLERATE_ERRORS =
"mapreduce.marklogic.output.content.tolerateerrors";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the partition
* where output documents are created.
*/
static final String OUTPUT_PARTITION =
"mapreduce.marklogic.output.partition";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies a comma separated list of regex pattern and
* string pairs, 1st to match a uri segment, 2nd the string to replace
* with, with the 2nd one in ''.
*/
static final String OUTPUT_URI_REPLACE =
"mapreduce.marklogic.output.urireplace";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies a string to prepend to all document URIs.
*/
static final String OUTPUT_URI_PREFIX =
"mapreduce.marklogic.output_uriprefix";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies a string to append to all document URIs.
*/
static final String OUTPUT_URI_SUFFIX =
"mapreduce.marklogic.output_urisuffix";
/**
* Default output XML repair level
*/
static final String DEFAULT_OUTPUT_XML_REPAIR_LEVEL = "DEFAULT";
/**
* Default content type.
*/
static final String DEFAULT_CONTENT_TYPE = "XML";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates the number of records in one request.
* Optional. Currently only applies to ContentOutputFormat.
*/
static final String BATCH_SIZE =
"mapreduce.marklogic.output.batchsize";
/**
* Default batch size.
*/
static final int DEFAULT_BATCH_SIZE = 100;
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates the number of requests in one transaction.
* Optional.
*/
static final String TXN_SIZE =
"mapreduce.marklogic.output.transactionsize";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates assignment policy for output documents.
* Optional.
*/
static final String ASSIGNMENT_POLICY =
"mapreduce.marklogic.output.assignmentpolicy";
/**
* The config property name (<code>{@value}</code>)
* which, if set, indicates temporal collection for documents.
* Optional.
*/
static final String TEMPORAL_COLLECTION =
"mapreduce.marklogic.output.temporalcollection";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies the query language will be used for input query and split query.
* Optional. Valid values: XQuery, Javascript.
* Default: XQuery.
*
* @author mattsun
*
*/
static final String INPUT_QUERY_LANGUAGE =
"mapreduce.marklogic.input.querylanguage";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specified the query language will be used for output query.
* Optional. Valid values: XQuery, Javascript.
* Default: XQuery.
*/
static final String OUTPUT_QUERY_LANGUAGE =
"mapreduce.marklogic.output.querylanguage";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies a comma-separated list of
* redaction rule collection URIs.
* Optional. If not set, no data will be redacted.
*/
static final String REDACTION_RULE_COLLECTION =
"mapreduce.marklogic.input.redaction.rules";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether to copy document collections from
* source to destination.
*/
static final String COPY_COLLECTIONS =
"mapreduce.marklogic.copycollections";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether to copy document quality from
* source to destination.
*/
static final String COPY_QUALITY =
"mapreduce.marklogic.copyquality";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether to copy document metadata from
* source to destination.
*/
static final String COPY_METADATA =
"mapreduce.marklogic.copymetadata";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether to restrict input hosts that
* mlcp will connect to.
*/
static final String INPUT_RESTRICT_HOSTS =
"mapreduce.marklogic.input.restricthosts";
/**
* The config property name (<code>{@value}</code>)
* which, if set, specifies whether to restrict output hosts that
* mlcp will connecot to.
*/
static final String OUTPUT_RESTRICT_HOSTS =
"mapreduce.marklogic.output.restricthosts";
/**
* Minimum MarkLogic version to accept node-update permissions.
*/
static final long MIN_NODEUPDATE_VERSION =
9000040L;
}
|
package com.soccer.web.payment.dao;
import org.apache.ibatis.annotations.Mapper;
import com.soccer.web.channel.vo.ChannelVO;
import com.soccer.web.payment.vo.PaymentVO;
@Mapper
public interface PaymentMapper {
//결제
void payment(Integer userIdx) throws Exception;
//중복결제 방지
PaymentVO payCheck(Integer userIdx) throws Exception;
//채널 생성 횟수 차감
void paymentUpdate(ChannelVO channelVO) throws Exception;
}
|
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See License.txt in the repository root.
package com.microsoft.tfs.core.pendingcheckin;
import com.microsoft.tfs.core.checkinpolicies.PolicyEvaluator;
import com.microsoft.tfs.core.checkinpolicies.PolicyEvaluatorState;
import com.microsoft.tfs.core.checkinpolicies.PolicyFailure;
import com.microsoft.tfs.util.Check;
/**
* <p>
* Contains the results of a checkin evaluation.
* </p>
*
* @since TEE-SDK-10.1
* @threadsafety conditionally thread-safe
*/
public class CheckinEvaluationResult {
private final CheckinConflict[] conflicts;
private final CheckinNoteFailure[] noteFailures;
private final PolicyFailure[] policyFailures;
private final PolicyEvaluatorState policyEvaluatorState;
private final Exception policyEvaluationException;
/**
* @param conflicts
* the conflicts detected in the evaluation (must not be
* <code>null</code>)
* @param noteFailures
* checkin notes that were not supplied correctly (must not be
* <code>null</code>)
* @param policyFailures
* checkin policy evaluation failures (must not be <code>null</code>)
* @param policyEvaluatorState
* the final state of the policy evaluator after evaluation (may be
* <code>null</code> if policies were not evaluated)
* @param policyEvaluationException
* an exception generated during checkin policy evaluation (may be
* <code>null</code>).
*/
public CheckinEvaluationResult(
final CheckinConflict[] conflicts,
final CheckinNoteFailure[] noteFailures,
final PolicyFailure[] policyFailures,
final PolicyEvaluatorState policyEvaluatorState,
final Exception policyEvaluationException) {
Check.notNull(conflicts, "conflicts"); //$NON-NLS-1$
Check.notNull(noteFailures, "noteFailures"); //$NON-NLS-1$
Check.notNull(policyFailures, "policyFailures"); //$NON-NLS-1$
this.conflicts = conflicts;
this.noteFailures = noteFailures;
this.policyFailures = policyFailures;
this.policyEvaluatorState = policyEvaluatorState;
this.policyEvaluationException = policyEvaluationException;
}
/**
* @return the conflicts found during evaluation. Do not modify the returned
* objects to ensure thread-safety.
*/
public CheckinConflict[] getConflicts() {
return conflicts;
}
/**
* @return the check-in note failures found during evaluation. Do not modify
* the returned objects to ensure thread-safety.
*/
public CheckinNoteFailure[] getNoteFailures() {
return noteFailures;
}
/**
* @return the check-in policy failures found during evaluation. Do not
* modify the returned objects to ensure thread-safety.
*/
public PolicyFailure[] getPolicyFailures() {
return policyFailures;
}
/**
* @return the state of the {@link PolicyEvaluator} after policy evaluation,
* which may be <code>null</code> if check-in policies were not
* evaluated.
*/
public PolicyEvaluatorState getPolicyEvaluatorState() {
return policyEvaluatorState;
}
/**
* @return the {@link Exception} that occurred during policy evaluation, if
* there was one (otherwise null).
*/
public Exception getPolicyEvaluationException() {
return policyEvaluationException;
}
}
|
/*******************************************************************************
* Copyright 2012 Internet2
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package edu.internet2.middleware.grouper.ws.soap_v2_3;
/**
* <pre>
* results for the get attributeAssignments call.
*
* result code:
* code of the result for this attribute assignment overall
* SUCCESS: means everything ok
* INSUFFICIENT_PRIVILEGES: not allowed
* INVALID_QUERY: bad inputs
* EXCEPTION: something bad happened
* </pre>
* @author mchyzer
*/
public class WsGetAttributeAssignmentsResults {
/**
* attribute def references in the assignments or inputs (and able to be read)
*/
private WsAttributeDef[] wsAttributeDefs;
/**
* attribute def references in the assignments or inputs (and able to be read)
* @return attribute defs
*/
public WsAttributeDef[] getWsAttributeDefs() {
return this.wsAttributeDefs;
}
/**
* attribute def references in the assignments or inputs (and able to be read)
* @param wsAttributeDefs1
*/
public void setWsAttributeDefs(WsAttributeDef[] wsAttributeDefs1) {
this.wsAttributeDefs = wsAttributeDefs1;
}
/**
* attribute def names referenced in the assignments or inputs (and able to read)
*/
private WsAttributeDefName[] wsAttributeDefNames;
/**
* attribute def names referenced in the assignments or inputs (and able to read)
* @return attribute def names
*/
public WsAttributeDefName[] getWsAttributeDefNames() {
return this.wsAttributeDefNames;
}
/**
* attribute def names referenced in the assignments or inputs (and able to read)
* @param wsAttributeDefNames1
*/
public void setWsAttributeDefNames(WsAttributeDefName[] wsAttributeDefNames1) {
this.wsAttributeDefNames = wsAttributeDefNames1;
}
/**
* the assignments being queried
*/
private WsAttributeAssign[] wsAttributeAssigns;
/**
* the assignments being queried
* @return the assignments being queried
*/
public WsAttributeAssign[] getWsAttributeAssigns() {
return this.wsAttributeAssigns;
}
/**
* the assignments being queried
* @param wsAttributeAssigns1
*/
public void setWsAttributeAssigns(WsAttributeAssign[] wsAttributeAssigns1) {
this.wsAttributeAssigns = wsAttributeAssigns1;
}
/**
* attributes of subjects returned, in same order as the data
*/
private String[] subjectAttributeNames;
/**
* attributes of subjects returned, in same order as the data
* @return the attributeNames
*/
public String[] getSubjectAttributeNames() {
return this.subjectAttributeNames;
}
/**
* attributes of subjects returned, in same order as the data
* @param attributeNamesa the attributeNames to set
*/
public void setSubjectAttributeNames(String[] attributeNamesa) {
this.subjectAttributeNames = attributeNamesa;
}
/**
* metadata about the result
*/
private WsResultMeta resultMetadata = new WsResultMeta();
/**
* @return the resultMetadata
*/
public WsResultMeta getResultMetadata() {
return this.resultMetadata;
}
/**
* metadata about the result
*/
private WsResponseMeta responseMetadata = new WsResponseMeta();
/**
* groups that are in the results
*/
private WsGroup[] wsGroups;
/**
* stems that are in the results
*/
private WsStem[] wsStems;
/**
* stems that are in the results
* @return stems
*/
public WsStem[] getWsStems() {
return this.wsStems;
}
/**
* stems that are in the results
* @param wsStems1
*/
public void setWsStems(WsStem[] wsStems1) {
this.wsStems = wsStems1;
}
/**
* results for each assignment sent in
*/
private WsMembership[] wsMemberships;
/**
* subjects that are in the results
*/
private WsSubject[] wsSubjects;
/**
* @see edu.internet2.middleware.grouper.ws.rest.WsResponseBean#getResponseMetadata()
* @return the response metadata
*/
public WsResponseMeta getResponseMetadata() {
return this.responseMetadata;
}
/**
* @param resultMetadata1 the resultMetadata to set
*/
public void setResultMetadata(WsResultMeta resultMetadata1) {
this.resultMetadata = resultMetadata1;
}
/**
* @param responseMetadata1 the responseMetadata to set
*/
public void setResponseMetadata(WsResponseMeta responseMetadata1) {
this.responseMetadata = responseMetadata1;
}
/**
* @return the wsGroups
*/
public WsGroup[] getWsGroups() {
return this.wsGroups;
}
/**
* results for each assignment sent in
* @return the results
*/
public WsMembership[] getWsMemberships() {
return this.wsMemberships;
}
/**
* subjects that are in the results
* @return the subjects
*/
public WsSubject[] getWsSubjects() {
return this.wsSubjects;
}
/**
* @param wsGroup1 the wsGroups to set
*/
public void setWsGroups(WsGroup[] wsGroup1) {
this.wsGroups = wsGroup1;
}
/**
* results for each assignment sent in
* @param results1 the results to set
*/
public void setWsMemberships(WsMembership[] results1) {
this.wsMemberships = results1;
}
/**
* subjects that are in the results
* @param wsSubjects1
*/
public void setWsSubjects(WsSubject[] wsSubjects1) {
this.wsSubjects = wsSubjects1;
}
}
|
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.hive;
import com.facebook.presto.Session;
import com.facebook.presto.common.Subfield;
import com.facebook.presto.common.predicate.Domain;
import com.facebook.presto.common.predicate.Range;
import com.facebook.presto.common.predicate.TupleDomain;
import com.facebook.presto.common.predicate.ValueSet;
import com.facebook.presto.common.type.ArrayType;
import com.facebook.presto.cost.StatsProvider;
import com.facebook.presto.metadata.FunctionAndTypeManager;
import com.facebook.presto.metadata.Metadata;
import com.facebook.presto.spi.ColumnHandle;
import com.facebook.presto.spi.ConnectorTableLayoutHandle;
import com.facebook.presto.spi.plan.AggregationNode;
import com.facebook.presto.spi.plan.PlanNode;
import com.facebook.presto.spi.plan.TableScanNode;
import com.facebook.presto.spi.relation.CallExpression;
import com.facebook.presto.spi.relation.ConstantExpression;
import com.facebook.presto.spi.relation.RowExpression;
import com.facebook.presto.spi.relation.VariableReferenceExpression;
import com.facebook.presto.sql.analyzer.FeaturesConfig;
import com.facebook.presto.sql.planner.Plan;
import com.facebook.presto.sql.planner.assertions.ExpectedValueProvider;
import com.facebook.presto.sql.planner.assertions.MatchResult;
import com.facebook.presto.sql.planner.assertions.Matcher;
import com.facebook.presto.sql.planner.assertions.PlanMatchPattern;
import com.facebook.presto.sql.planner.assertions.SymbolAliases;
import com.facebook.presto.sql.planner.plan.JoinNode;
import com.facebook.presto.sql.planner.plan.SemiJoinNode;
import com.facebook.presto.sql.relational.FunctionResolution;
import com.facebook.presto.sql.tree.FunctionCall;
import com.facebook.presto.sql.tree.LongLiteral;
import com.facebook.presto.sql.tree.StringLiteral;
import com.facebook.presto.testing.QueryRunner;
import com.facebook.presto.tests.AbstractTestQueryFramework;
import com.facebook.presto.tests.DistributedQueryRunner;
import com.google.common.base.Functions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import io.airlift.slice.Slice;
import io.airlift.slice.Slices;
import org.testng.annotations.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import static com.facebook.presto.SystemSessionProperties.JOIN_REORDERING_STRATEGY;
import static com.facebook.presto.SystemSessionProperties.OPTIMIZE_METADATA_QUERIES;
import static com.facebook.presto.SystemSessionProperties.PUSHDOWN_DEREFERENCE_ENABLED;
import static com.facebook.presto.common.function.OperatorType.EQUAL;
import static com.facebook.presto.common.predicate.Domain.create;
import static com.facebook.presto.common.predicate.Domain.multipleValues;
import static com.facebook.presto.common.predicate.Domain.notNull;
import static com.facebook.presto.common.predicate.Domain.singleValue;
import static com.facebook.presto.common.predicate.Range.greaterThan;
import static com.facebook.presto.common.predicate.TupleDomain.withColumnDomains;
import static com.facebook.presto.common.predicate.ValueSet.ofRanges;
import static com.facebook.presto.common.type.BigintType.BIGINT;
import static com.facebook.presto.common.type.BooleanType.BOOLEAN;
import static com.facebook.presto.common.type.DoubleType.DOUBLE;
import static com.facebook.presto.common.type.VarcharType.VARCHAR;
import static com.facebook.presto.common.type.VarcharType.createVarcharType;
import static com.facebook.presto.expressions.LogicalRowExpressions.TRUE_CONSTANT;
import static com.facebook.presto.hive.HiveColumnHandle.ColumnType.SYNTHESIZED;
import static com.facebook.presto.hive.HiveColumnHandle.isPushedDownSubfield;
import static com.facebook.presto.hive.HiveQueryRunner.HIVE_CATALOG;
import static com.facebook.presto.hive.HiveSessionProperties.COLLECT_COLUMN_STATISTICS_ON_WRITE;
import static com.facebook.presto.hive.HiveSessionProperties.PARQUET_DEREFERENCE_PUSHDOWN_ENABLED;
import static com.facebook.presto.hive.HiveSessionProperties.PARTIAL_AGGREGATION_PUSHDOWN_ENABLED;
import static com.facebook.presto.hive.HiveSessionProperties.PARTIAL_AGGREGATION_PUSHDOWN_FOR_VARIABLE_LENGTH_DATATYPES_ENABLED;
import static com.facebook.presto.hive.HiveSessionProperties.PUSHDOWN_FILTER_ENABLED;
import static com.facebook.presto.hive.HiveSessionProperties.RANGE_FILTERS_ON_SUBSCRIPTS_ENABLED;
import static com.facebook.presto.hive.HiveSessionProperties.SHUFFLE_PARTITIONED_COLUMNS_FOR_TABLE_WRITE;
import static com.facebook.presto.hive.TestHiveIntegrationSmokeTest.assertRemoteExchangesCount;
import static com.facebook.presto.parquet.ParquetTypeUtils.pushdownColumnNameForSubfield;
import static com.facebook.presto.sql.analyzer.TypeSignatureProvider.fromTypes;
import static com.facebook.presto.sql.planner.assertions.MatchResult.NO_MATCH;
import static com.facebook.presto.sql.planner.assertions.MatchResult.match;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.aggregation;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.any;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.anySymbol;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.anyTree;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.equiJoinClause;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.exchange;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.expression;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.filter;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.globalAggregation;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.join;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.node;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.output;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.project;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.strictTableScan;
import static com.facebook.presto.sql.planner.assertions.PlanMatchPattern.values;
import static com.facebook.presto.sql.planner.optimizations.PlanNodeSearcher.searchFrom;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Scope.LOCAL;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Scope.REMOTE_STREAMING;
import static com.facebook.presto.sql.planner.plan.ExchangeNode.Type.GATHER;
import static com.facebook.presto.sql.planner.plan.JoinNode.Type.INNER;
import static com.google.common.base.MoreObjects.toStringHelper;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static com.google.common.collect.ImmutableMap.toImmutableMap;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import static io.airlift.slice.Slices.utf8Slice;
import static io.airlift.tpch.TpchTable.CUSTOMER;
import static io.airlift.tpch.TpchTable.LINE_ITEM;
import static io.airlift.tpch.TpchTable.NATION;
import static io.airlift.tpch.TpchTable.ORDERS;
import static java.lang.String.format;
import static java.util.Objects.requireNonNull;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertTrue;
public class TestHiveLogicalPlanner
extends AbstractTestQueryFramework
{
@Override
protected QueryRunner createQueryRunner()
throws Exception
{
return HiveQueryRunner.createQueryRunner(
ImmutableList.of(ORDERS, LINE_ITEM, CUSTOMER, NATION),
ImmutableMap.of("experimental.pushdown-subfields-enabled", "true"),
Optional.empty());
}
@Test
public void testRepeatedFilterPushdown()
{
QueryRunner queryRunner = getQueryRunner();
try {
queryRunner.execute("CREATE TABLE orders_partitioned WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, orderpriority, '2019-11-01' as ds FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, orderpriority, '2019-11-02' as ds FROM orders WHERE orderkey < 1000");
queryRunner.execute("CREATE TABLE lineitem_unpartitioned AS " +
"SELECT orderkey, linenumber, shipmode, '2019-11-01' as ds FROM lineitem WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, linenumber, shipmode, '2019-11-02' as ds FROM lineitem WHERE orderkey < 1000 ");
TupleDomain<String> ordersDomain = withColumnDomains(ImmutableMap.of(
"orderpriority", singleValue(createVarcharType(15), utf8Slice("1-URGENT"))));
TupleDomain<String> lineitemDomain = withColumnDomains(ImmutableMap.of(
"shipmode", singleValue(createVarcharType(10), utf8Slice("MAIL")),
"ds", singleValue(createVarcharType(10), utf8Slice("2019-11-02"))));
assertPlan(pushdownFilterEnabled(),
"WITH a AS (\n" +
" SELECT ds, orderkey\n" +
" FROM orders_partitioned\n" +
" WHERE orderpriority = '1-URGENT' AND ds > '2019-11-01'\n" +
"),\n" +
"b AS (\n" +
" SELECT ds, orderkey, linenumber\n" +
" FROM lineitem_unpartitioned\n" +
" WHERE shipmode = 'MAIL'\n" +
")\n" +
"SELECT * FROM a LEFT JOIN b ON a.ds = b.ds",
anyTree(node(JoinNode.class,
anyTree(tableScan("orders_partitioned", ordersDomain, TRUE_CONSTANT, ImmutableSet.of("orderpriority"))),
anyTree(tableScan("lineitem_unpartitioned", lineitemDomain, TRUE_CONSTANT, ImmutableSet.of("shipmode", "ds"))))));
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS orders_partitioned");
queryRunner.execute("DROP TABLE IF EXISTS lineitem_unpartitioned");
}
}
@Test
public void testPushdownFilter()
{
Session pushdownFilterEnabled = pushdownFilterEnabled();
// Only domain predicates
assertPlan("SELECT linenumber FROM lineitem WHERE partkey = 10",
output(exchange(project(
filter("partkey = 10",
strictTableScan("lineitem", identityMap("linenumber", "partkey")))))));
assertPlan(pushdownFilterEnabled, "SELECT linenumber FROM lineitem WHERE partkey = 10",
output(exchange(
strictTableScan("lineitem", identityMap("linenumber")))),
plan -> assertTableLayout(plan, "lineitem", withColumnDomains(ImmutableMap.of(new Subfield("partkey", ImmutableList.of()), singleValue(BIGINT, 10L))), TRUE_CONSTANT, ImmutableSet.of("partkey")));
assertPlan(pushdownFilterEnabled, "SELECT partkey, linenumber FROM lineitem WHERE partkey = 10",
output(exchange(
strictTableScan("lineitem", identityMap("partkey", "linenumber")))),
plan -> assertTableLayout(plan, "lineitem", withColumnDomains(ImmutableMap.of(new Subfield("partkey", ImmutableList.of()), singleValue(BIGINT, 10L))), TRUE_CONSTANT, ImmutableSet.of("partkey")));
// Only remaining predicate
assertPlan("SELECT linenumber FROM lineitem WHERE mod(orderkey, 2) = 1",
output(exchange(project(
filter("mod(orderkey, 2) = 1",
strictTableScan("lineitem", identityMap("linenumber", "orderkey")))))));
// Remaining predicate is NULL
assertPlan(pushdownFilterEnabled, "SELECT linenumber FROM lineitem WHERE cardinality(NULL) > 0",
output(values("linenumber")));
assertPlan(pushdownFilterEnabled, "SELECT linenumber FROM lineitem WHERE orderkey > 10 AND cardinality(NULL) > 0",
output(values("linenumber")));
// Remaining predicate is always FALSE
assertPlan(pushdownFilterEnabled, "SELECT linenumber FROM lineitem WHERE cardinality(ARRAY[1]) > 1",
output(values("linenumber")));
assertPlan(pushdownFilterEnabled, "SELECT linenumber FROM lineitem WHERE orderkey > 10 AND cardinality(ARRAY[1]) > 1",
output(values("linenumber")));
// TupleDomain predicate is always FALSE
assertPlan(pushdownFilterEnabled, "SELECT linenumber FROM lineitem WHERE orderkey = 1 AND orderkey = 2",
output(values("linenumber")));
assertPlan(pushdownFilterEnabled, "SELECT linenumber FROM lineitem WHERE orderkey = 1 AND orderkey = 2 AND linenumber % 2 = 1",
output(values("linenumber")));
FunctionAndTypeManager functionAndTypeManager = getQueryRunner().getMetadata().getFunctionAndTypeManager();
FunctionResolution functionResolution = new FunctionResolution(functionAndTypeManager);
RowExpression remainingPredicate = new CallExpression(EQUAL.name(),
functionResolution.comparisonFunction(EQUAL, BIGINT, BIGINT),
BOOLEAN,
ImmutableList.of(
new CallExpression("mod",
functionAndTypeManager.lookupFunction("mod", fromTypes(BIGINT, BIGINT)),
BIGINT,
ImmutableList.of(
new VariableReferenceExpression("orderkey", BIGINT),
constant(2))),
constant(1)));
assertPlan(pushdownFilterEnabled, "SELECT linenumber FROM lineitem WHERE mod(orderkey, 2) = 1",
output(exchange(
strictTableScan("lineitem", identityMap("linenumber")))),
plan -> assertTableLayout(plan, "lineitem", TupleDomain.all(), remainingPredicate, ImmutableSet.of("orderkey")));
assertPlan(pushdownFilterEnabled, "SELECT orderkey, linenumber FROM lineitem WHERE mod(orderkey, 2) = 1",
output(exchange(
strictTableScan("lineitem", identityMap("orderkey", "linenumber")))),
plan -> assertTableLayout(plan, "lineitem", TupleDomain.all(), remainingPredicate, ImmutableSet.of("orderkey")));
// A mix of domain and remaining predicates
assertPlan("SELECT linenumber FROM lineitem WHERE partkey = 10 AND mod(orderkey, 2) = 1",
output(exchange(project(
filter("partkey = 10 AND mod(orderkey, 2) = 1",
strictTableScan("lineitem", identityMap("linenumber", "orderkey", "partkey")))))));
assertPlan(pushdownFilterEnabled, "SELECT linenumber FROM lineitem WHERE partkey = 10 AND mod(orderkey, 2) = 1",
output(exchange(
strictTableScan("lineitem", identityMap("linenumber")))),
plan -> assertTableLayout(plan, "lineitem", withColumnDomains(ImmutableMap.of(new Subfield("partkey", ImmutableList.of()), singleValue(BIGINT, 10L))), remainingPredicate, ImmutableSet.of("partkey", "orderkey")));
assertPlan(pushdownFilterEnabled, "SELECT partkey, orderkey, linenumber FROM lineitem WHERE partkey = 10 AND mod(orderkey, 2) = 1",
output(exchange(
strictTableScan("lineitem", identityMap("partkey", "orderkey", "linenumber")))),
plan -> assertTableLayout(plan, "lineitem", withColumnDomains(ImmutableMap.of(new Subfield("partkey", ImmutableList.of()), singleValue(BIGINT, 10L))), remainingPredicate, ImmutableSet.of("partkey", "orderkey")));
}
@Test
public void testPartitionPruning()
{
QueryRunner queryRunner = getQueryRunner();
queryRunner.execute("CREATE TABLE test_partition_pruning WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, CAST(to_iso8601(date_add('DAY', orderkey % 7, date('2019-11-01'))) AS VARCHAR) AS ds FROM orders WHERE orderkey < 1000");
Session pushdownFilterEnabled = pushdownFilterEnabled();
try {
assertPlan(pushdownFilterEnabled, "SELECT * FROM test_partition_pruning WHERE ds = '2019-11-01'",
anyTree(tableScanWithConstraint("test_partition_pruning", ImmutableMap.of("ds", singleValue(VARCHAR, utf8Slice("2019-11-01"))))));
assertPlan(pushdownFilterEnabled, "SELECT * FROM test_partition_pruning WHERE date(ds) = date('2019-11-01')",
anyTree(tableScanWithConstraint("test_partition_pruning", ImmutableMap.of("ds", singleValue(VARCHAR, utf8Slice("2019-11-01"))))));
assertPlan(pushdownFilterEnabled, "SELECT * FROM test_partition_pruning WHERE date(ds) BETWEEN date('2019-11-02') AND date('2019-11-04')",
anyTree(tableScanWithConstraint("test_partition_pruning", ImmutableMap.of("ds", multipleValues(VARCHAR, utf8Slices("2019-11-02", "2019-11-03", "2019-11-04"))))));
assertPlan(pushdownFilterEnabled, "SELECT * FROM test_partition_pruning WHERE ds < '2019-11-05'",
anyTree(tableScanWithConstraint("test_partition_pruning", ImmutableMap.of("ds", multipleValues(VARCHAR, utf8Slices("2019-11-01", "2019-11-02", "2019-11-03", "2019-11-04"))))));
assertPlan(pushdownFilterEnabled, "SELECT * FROM test_partition_pruning WHERE date(ds) > date('2019-11-02')",
anyTree(tableScanWithConstraint("test_partition_pruning", ImmutableMap.of("ds", multipleValues(VARCHAR, utf8Slices("2019-11-03", "2019-11-04", "2019-11-05", "2019-11-06", "2019-11-07"))))));
assertPlan(pushdownFilterEnabled, "SELECT * FROM test_partition_pruning WHERE ds < '2019-11-05' AND date(ds) > date('2019-11-02')",
anyTree(tableScanWithConstraint("test_partition_pruning", ImmutableMap.of("ds", multipleValues(VARCHAR, utf8Slices("2019-11-03", "2019-11-04"))))));
}
finally {
queryRunner.execute("DROP TABLE test_partition_pruning");
}
}
@Test
public void testOptimizeMetadataQueries()
{
QueryRunner queryRunner = getQueryRunner();
Session optimizeMetadataQueries = Session.builder(this.getQueryRunner().getDefaultSession())
.setSystemProperty(OPTIMIZE_METADATA_QUERIES, Boolean.toString(true))
.setCatalogSessionProperty(HIVE_CATALOG, PUSHDOWN_FILTER_ENABLED, Boolean.toString(true))
.build();
queryRunner.execute(
"CREATE TABLE test_optimize_metadata_queries WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, CAST(to_iso8601(date_add('DAY', orderkey % 7, date('2020-10-01'))) AS VARCHAR) AS ds FROM orders WHERE orderkey < 1000");
queryRunner.execute(
"CREATE TABLE test_optimize_metadata_queries_multiple_partition_columns WITH (partitioned_by = ARRAY['ds', 'value']) AS " +
"SELECT orderkey, CAST(to_iso8601(date_add('DAY', orderkey % 7, date('2020-10-01'))) AS VARCHAR) AS ds, 1 AS value FROM orders WHERE orderkey < 1000");
try {
assertPlan(
optimizeMetadataQueries,
"SELECT DISTINCT ds FROM test_optimize_metadata_queries",
anyTree(values(
ImmutableList.of("ds"),
ImmutableList.of(
ImmutableList.of(new StringLiteral("2020-10-01")),
ImmutableList.of(new StringLiteral("2020-10-02")),
ImmutableList.of(new StringLiteral("2020-10-03")),
ImmutableList.of(new StringLiteral("2020-10-04")),
ImmutableList.of(new StringLiteral("2020-10-05")),
ImmutableList.of(new StringLiteral("2020-10-06")),
ImmutableList.of(new StringLiteral("2020-10-07"))))));
assertPlan(
optimizeMetadataQueries,
"SELECT DISTINCT ds FROM test_optimize_metadata_queries WHERE ds > '2020-10-04'",
anyTree(values(
ImmutableList.of("ds"),
ImmutableList.of(
ImmutableList.of(new StringLiteral("2020-10-05")),
ImmutableList.of(new StringLiteral("2020-10-06")),
ImmutableList.of(new StringLiteral("2020-10-07"))))));
assertPlan(
optimizeMetadataQueries,
"SELECT DISTINCT ds FROM test_optimize_metadata_queries WHERE ds = '2020-10-04' AND orderkey > 200",
anyTree(tableScan(
"test_optimize_metadata_queries",
withColumnDomains(ImmutableMap.of("orderkey", Domain.create(ValueSet.ofRanges(Range.greaterThan(BIGINT, 200L)), false))),
TRUE_CONSTANT,
ImmutableSet.of("orderkey"))));
assertPlan(
optimizeMetadataQueries,
"SELECT DISTINCT ds FROM test_optimize_metadata_queries WHERE ds = '2020-10-04' AND orderkey > 200",
anyTree(tableScan(
"test_optimize_metadata_queries",
withColumnDomains(ImmutableMap.of("orderkey", Domain.create(ValueSet.ofRanges(Range.greaterThan(BIGINT, 200L)), false))),
TRUE_CONSTANT,
ImmutableSet.of("orderkey"))));
assertPlan(
optimizeMetadataQueries,
"SELECT DISTINCT ds FROM test_optimize_metadata_queries_multiple_partition_columns",
anyTree(values(
ImmutableList.of("ds"),
ImmutableList.of(
ImmutableList.of(new StringLiteral("2020-10-01")),
ImmutableList.of(new StringLiteral("2020-10-02")),
ImmutableList.of(new StringLiteral("2020-10-03")),
ImmutableList.of(new StringLiteral("2020-10-04")),
ImmutableList.of(new StringLiteral("2020-10-05")),
ImmutableList.of(new StringLiteral("2020-10-06")),
ImmutableList.of(new StringLiteral("2020-10-07"))))));
assertPlan(
optimizeMetadataQueries,
"SELECT DISTINCT ds FROM test_optimize_metadata_queries_multiple_partition_columns WHERE ds > '2020-10-04'",
anyTree(values(
ImmutableList.of("ds"),
ImmutableList.of(
ImmutableList.of(new StringLiteral("2020-10-05")),
ImmutableList.of(new StringLiteral("2020-10-06")),
ImmutableList.of(new StringLiteral("2020-10-07"))))));
assertPlan(
optimizeMetadataQueries,
"SELECT DISTINCT ds FROM test_optimize_metadata_queries_multiple_partition_columns WHERE ds = '2020-10-04' AND orderkey > 200",
anyTree(tableScan(
"test_optimize_metadata_queries_multiple_partition_columns",
withColumnDomains(ImmutableMap.of("orderkey", Domain.create(ValueSet.ofRanges(Range.greaterThan(BIGINT, 200L)), false))),
TRUE_CONSTANT,
ImmutableSet.of("orderkey"))));
assertPlan(
optimizeMetadataQueries,
"SELECT ds, MAX(value) FROM test_optimize_metadata_queries_multiple_partition_columns WHERE ds > '2020-10-04' GROUP BY ds",
anyTree(values(
ImmutableList.of("ds", "value"),
ImmutableList.of(
ImmutableList.of(new StringLiteral("2020-10-05"), new LongLiteral("1")),
ImmutableList.of(new StringLiteral("2020-10-06"), new LongLiteral("1")),
ImmutableList.of(new StringLiteral("2020-10-07"), new LongLiteral("1"))))));
assertPlan(
optimizeMetadataQueries,
"SELECT MAX(ds), MAX(value) FROM test_optimize_metadata_queries_multiple_partition_columns WHERE ds > '2020-10-04'",
anyTree(
project(
ImmutableMap.of(
"max", expression("'2020-10-07'"),
"max_2", expression("1")),
any(values()))));
assertPlan(
optimizeMetadataQueries,
"SELECT MAX(value), MAX(ds) FROM test_optimize_metadata_queries_multiple_partition_columns WHERE ds > '2020-10-04'",
anyTree(
project(
ImmutableMap.of(
"max", expression("1"),
"max_2", expression("'2020-10-07'")),
any(values()))));
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS test_optimize_metadata_queries");
queryRunner.execute("DROP TABLE IF EXISTS test_optimize_metadata_queries_multiple_partition_columns");
}
}
@Test
public void testMetadataAggregationFolding()
{
QueryRunner queryRunner = getQueryRunner();
Session optimizeMetadataQueries = Session.builder(this.getQueryRunner().getDefaultSession())
.setSystemProperty(OPTIMIZE_METADATA_QUERIES, Boolean.toString(true))
.build();
Session shufflePartitionColumns = Session.builder(this.getQueryRunner().getDefaultSession())
.setCatalogSessionProperty(HIVE_CATALOG, SHUFFLE_PARTITIONED_COLUMNS_FOR_TABLE_WRITE, Boolean.toString(true))
.build();
queryRunner.execute(
shufflePartitionColumns,
"CREATE TABLE test_metadata_aggregation_folding WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, CAST(to_iso8601(date_add('DAY', orderkey % 7, date('2020-07-01'))) AS VARCHAR) AS ds FROM orders WHERE orderkey < 1000");
queryRunner.execute(
shufflePartitionColumns,
"CREATE TABLE test_metadata_aggregation_folding_more_partitions WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, CAST(to_iso8601(date_add('DAY', orderkey % 200, date('2020-07-01'))) AS VARCHAR) AS ds FROM orders WHERE orderkey < 1000");
queryRunner.execute(
shufflePartitionColumns,
"CREATE TABLE test_metadata_aggregation_folding_null_partitions WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, CAST(to_iso8601(date_add('DAY', orderkey % 7, date('2020-07-01'))) AS VARCHAR) AS ds FROM orders WHERE orderkey < 1000");
queryRunner.execute(
shufflePartitionColumns,
"INSERT INTO test_metadata_aggregation_folding_null_partitions SELECT 0 as orderkey, null AS ds");
try {
assertPlan(
optimizeMetadataQueries,
"SELECT * FROM test_metadata_aggregation_folding WHERE ds = (SELECT max(ds) from test_metadata_aggregation_folding)",
anyTree(
join(INNER, ImmutableList.of(),
tableScan("test_metadata_aggregation_folding", getSingleValueColumnDomain("ds", "2020-07-07"), TRUE_CONSTANT, ImmutableSet.of("ds")),
anyTree(any()))));
assertPlan(
optimizeMetadataQueries,
"SELECT * FROM test_metadata_aggregation_folding WHERE ds = (SELECT min(ds) from test_metadata_aggregation_folding)",
anyTree(
join(INNER, ImmutableList.of(),
tableScan("test_metadata_aggregation_folding", getSingleValueColumnDomain("ds", "2020-07-01"), TRUE_CONSTANT, ImmutableSet.of("ds")),
anyTree(any()))));
assertPlan(
optimizeMetadataQueries,
"SELECT * FROM test_metadata_aggregation_folding_more_partitions WHERE ds = (SELECT max(ds) from test_metadata_aggregation_folding_more_partitions)",
anyTree(
join(INNER, ImmutableList.of(),
tableScan("test_metadata_aggregation_folding_more_partitions", getSingleValueColumnDomain("ds", "2021-01-16"), TRUE_CONSTANT, ImmutableSet.of("ds")),
anyTree(any()))));
assertPlan(
optimizeMetadataQueries,
"SELECT * FROM test_metadata_aggregation_folding_more_partitions WHERE ds = (SELECT min(ds) from test_metadata_aggregation_folding_more_partitions)",
anyTree(
join(INNER, ImmutableList.of(),
tableScan("test_metadata_aggregation_folding_more_partitions", getSingleValueColumnDomain("ds", "2020-07-01"), TRUE_CONSTANT, ImmutableSet.of("ds")),
anyTree(any()))));
assertPlan(
optimizeMetadataQueries,
"SELECT * FROM test_metadata_aggregation_folding WHERE ds = (SELECT max(ds) from test_metadata_aggregation_folding_null_partitions)",
anyTree(
join(INNER, ImmutableList.of(),
tableScan("test_metadata_aggregation_folding", getSingleValueColumnDomain("ds", "2020-07-07"), TRUE_CONSTANT, ImmutableSet.of("ds")),
anyTree(any()))));
assertPlan(
optimizeMetadataQueries,
"SELECT * FROM test_metadata_aggregation_folding WHERE ds = (SELECT min(ds) from test_metadata_aggregation_folding_null_partitions)",
anyTree(
join(INNER, ImmutableList.of(),
tableScan("test_metadata_aggregation_folding", getSingleValueColumnDomain("ds", "2020-07-01"), TRUE_CONSTANT, ImmutableSet.of("ds")),
anyTree(any()))));
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS test_metadata_aggregation_folding");
queryRunner.execute("DROP TABLE IF EXISTS test_metadata_aggregation_folding_more_partitions");
queryRunner.execute("DROP TABLE IF EXISTS test_metadata_aggregation_folding_null_partitions");
}
}
private static TupleDomain<String> getSingleValueColumnDomain(String column, String value)
{
return withColumnDomains(ImmutableMap.of(column, singleValue(VARCHAR, utf8Slice(value))));
}
private static List<Slice> utf8Slices(String... values)
{
return Arrays.stream(values).map(Slices::utf8Slice).collect(toImmutableList());
}
private static PlanMatchPattern tableScanWithConstraint(String tableName, Map<String, Domain> expectedConstraint)
{
return PlanMatchPattern.tableScan(tableName).with(new Matcher() {
@Override
public boolean shapeMatches(PlanNode node)
{
return node instanceof TableScanNode;
}
@Override
public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session session, Metadata metadata, SymbolAliases symbolAliases)
{
TableScanNode tableScan = (TableScanNode) node;
TupleDomain<String> constraint = tableScan.getCurrentConstraint()
.transform(HiveColumnHandle.class::cast)
.transform(HiveColumnHandle::getName);
if (!expectedConstraint.equals(constraint.getDomains().get())) {
return NO_MATCH;
}
return match();
}
});
}
@Test
public void testPushdownFilterOnSubfields()
{
assertUpdate("CREATE TABLE test_pushdown_filter_on_subfields(" +
"id bigint, " +
"a array(bigint), " +
"b map(varchar, bigint), " +
"c row(" +
"a bigint, " +
"b row(x bigint), " +
"c array(bigint), " +
"d map(bigint, bigint), " +
"e map(varchar, bigint)))");
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE a[1] = 1",
ImmutableMap.of(new Subfield("a[1]"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields where a[1 + 1] = 1",
ImmutableMap.of(new Subfield("a[2]"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE b['foo'] = 1",
ImmutableMap.of(new Subfield("b[\"foo\"]"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE b[concat('f','o', 'o')] = 1",
ImmutableMap.of(new Subfield("b[\"foo\"]"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE c.a = 1",
ImmutableMap.of(new Subfield("c.a"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE c.b.x = 1",
ImmutableMap.of(new Subfield("c.b.x"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE c.c[5] = 1",
ImmutableMap.of(new Subfield("c.c[5]"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE c.d[5] = 1",
ImmutableMap.of(new Subfield("c.d[5]"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE c.e[concat('f', 'o', 'o')] = 1",
ImmutableMap.of(new Subfield("c.e[\"foo\"]"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE c.e['foo'] = 1",
ImmutableMap.of(new Subfield("c.e[\"foo\"]"), singleValue(BIGINT, 1L)));
assertPushdownFilterOnSubfields("SELECT * FROM test_pushdown_filter_on_subfields WHERE c.a IS NOT NULL AND c.c IS NOT NULL",
ImmutableMap.of(new Subfield("c.a"), notNull(BIGINT), new Subfield("c.c"), notNull(new ArrayType(BIGINT))));
// TupleDomain predicate is always FALSE
assertPlan(pushdownFilterEnabled(), "SELECT id FROM test_pushdown_filter_on_subfields WHERE c.a = 1 AND c.a = 2",
output(values("id")));
assertUpdate("DROP TABLE test_pushdown_filter_on_subfields");
}
@Test
public void testPushdownArraySubscripts()
{
assertUpdate("CREATE TABLE test_pushdown_array_subscripts(id bigint, " +
"a array(bigint), " +
"b array(array(varchar)), " +
"y array(row(a bigint, b varchar, c double, d row(d1 bigint, d2 double))), " +
"z array(array(row(p bigint, e row(e1 bigint, e2 varchar)))))");
assertPushdownSubscripts("test_pushdown_array_subscripts");
// Unnest
assertPushdownSubfields("SELECT t.b, a[1] FROM test_pushdown_array_subscripts CROSS JOIN UNNEST(b) as t(b)", "test_pushdown_array_subscripts",
ImmutableMap.of("a", toSubfields("a[1]")));
assertPushdownSubfields("SELECT t.b, a[1] FROM test_pushdown_array_subscripts CROSS JOIN UNNEST(b[1]) as t(b)", "test_pushdown_array_subscripts",
ImmutableMap.of("a", toSubfields("a[1]"), "b", toSubfields("b[1]")));
assertPushdownSubfields("SELECT t.b[2], a[1] FROM test_pushdown_array_subscripts CROSS JOIN UNNEST(b) as t(b)", "test_pushdown_array_subscripts",
ImmutableMap.of("a", toSubfields("a[1]"), "b", toSubfields("b[*][2]")));
assertPushdownSubfields("SELECT id, grouping(index), sum(length(b[1][2])) FROM test_pushdown_array_subscripts CROSS JOIN UNNEST(a) as t(index) GROUP BY grouping sets ((index, id), (index))", "test_pushdown_array_subscripts",
ImmutableMap.of("b", toSubfields("b[1][2]")));
assertPushdownSubfields("SELECT id, b[1] FROM test_pushdown_array_subscripts CROSS JOIN UNNEST(a) as t(unused)", "test_pushdown_array_subscripts",
ImmutableMap.of("b", toSubfields("b[1]")));
// No subfield pruning
assertPushdownSubfields("SELECT array_sort(a)[1] FROM test_pushdown_array_subscripts", "test_pushdown_array_subscripts",
ImmutableMap.of());
assertPushdownSubfields("SELECT id FROM test_pushdown_array_subscripts CROSS JOIN UNNEST(a) as t(index) WHERE a[1] > 10 AND cardinality(b[index]) = 2", "test_pushdown_array_subscripts",
ImmutableMap.of());
assertUpdate("DROP TABLE test_pushdown_array_subscripts");
}
@Test
public void testPushdownMapSubscripts()
{
assertUpdate("CREATE TABLE test_pushdown_map_subscripts(id bigint, " +
"a map(bigint, bigint), " +
"b map(bigint, map(bigint, varchar)), " +
"c map(varchar, bigint), \n" +
"y map(bigint, row(a bigint, b varchar, c double, d row(d1 bigint, d2 double)))," +
"z map(bigint, map(bigint, row(p bigint, e row(e1 bigint, e2 varchar)))))");
assertPushdownSubscripts("test_pushdown_map_subscripts");
// Unnest
assertPushdownSubfields("SELECT t.b, a[1] FROM test_pushdown_map_subscripts CROSS JOIN UNNEST(b) as t(k, b)", "test_pushdown_map_subscripts",
ImmutableMap.of("a", toSubfields("a[1]")));
assertPushdownSubfields("SELECT t.b, a[1] FROM test_pushdown_map_subscripts CROSS JOIN UNNEST(b[1]) as t(k, b)", "test_pushdown_map_subscripts",
ImmutableMap.of("a", toSubfields("a[1]"), "b", toSubfields("b[1]")));
assertPushdownSubfields("SELECT t.b[2], a[1] FROM test_pushdown_map_subscripts CROSS JOIN UNNEST(b) as t(k, b)", "test_pushdown_map_subscripts",
ImmutableMap.of("a", toSubfields("a[1]"), "b", toSubfields("b[*][2]")));
assertPushdownSubfields("SELECT id, b[1] FROM test_pushdown_map_subscripts CROSS JOIN UNNEST(a) as t(unused_k, unused_v)", "test_pushdown_map_subscripts",
ImmutableMap.of("b", toSubfields("b[1]")));
// Map with varchar keys
assertPushdownSubfields("SELECT c['cat'] FROM test_pushdown_map_subscripts", "test_pushdown_map_subscripts",
ImmutableMap.of("c", toSubfields("c[\"cat\"]")));
assertPushdownSubfields("SELECT c[JSON_EXTRACT_SCALAR(JSON_PARSE('{}'),'$.a')] FROM test_pushdown_map_subscripts", "test_pushdown_map_subscripts",
ImmutableMap.of());
assertPushdownSubfields("SELECT mod(c['cat'], 2) FROM test_pushdown_map_subscripts WHERE c['dog'] > 10", "test_pushdown_map_subscripts",
ImmutableMap.of("c", toSubfields("c[\"cat\"]", "c[\"dog\"]")));
// No subfield pruning
assertPushdownSubfields("SELECT map_keys(a)[1] FROM test_pushdown_map_subscripts", "test_pushdown_map_subscripts",
ImmutableMap.of());
assertUpdate("DROP TABLE test_pushdown_map_subscripts");
}
private void assertPushdownSubscripts(String tableName)
{
// Filter and project
assertPushdownSubfields(format("SELECT a[1] FROM %s", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]")));
assertPushdownSubfields(format("SELECT a[1] + 10 FROM %s", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]")));
assertPushdownSubfields(format("SELECT a[1] + mod(a[2], 3) FROM %s", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[2]")));
assertPushdownSubfields(format("SELECT a[1] FROM %s WHERE a[2] > 10", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[2]")));
assertPushdownSubfields(format("SELECT a[1] FROM %s WHERE mod(a[2], 3) = 1", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[2]")));
assertPushdownSubfields(format("SELECT a[1], b[2][3] FROM %s", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]"), "b", toSubfields("b[2][3]")));
assertPushdownSubfields(format("SELECT cardinality(b[1]), b[1][2] FROM %s", tableName), tableName,
ImmutableMap.of("b", toSubfields("b[1]")));
assertPushdownSubfields(format("CREATE TABLE x AS SELECT id, a[1] as a1 FROM %s", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]")));
assertPushdownSubfields(format("CREATE TABLE x AS SELECT id FROM %s WHERE a[1] > 10", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]")));
assertPushdownSubfields(format("SELECT a[1] FROM %s ORDER BY id LIMIT 1", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]")));
// Sort
assertPushdownSubfields(format("SELECT a[1] FROM %s ORDER BY a[2]", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[2]")));
// Join
assertPlan(format("SELECT l.orderkey, a.a[1] FROM lineitem l, %s a WHERE l.linenumber = a.id", tableName),
anyTree(
node(JoinNode.class,
anyTree(tableScan("lineitem", ImmutableMap.of())),
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]")))))));
assertPlan(format("SELECT l.orderkey, a.a[1] FROM lineitem l, %s a WHERE l.linenumber = a.id AND a.a[2] > 10", tableName),
anyTree(
node(JoinNode.class,
anyTree(tableScan("lineitem", ImmutableMap.of())),
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]", "a[2]")))))));
// Semi join
assertPlan(format("SELECT a[1] FROM %s WHERE a[2] IN (SELECT a[3] FROM %s)", tableName, tableName),
anyTree(node(SemiJoinNode.class,
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]", "a[2]")))),
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[3]")))))));
// Aggregation
assertPushdownSubfields(format("SELECT id, min(a[1]) FROM %s GROUP BY 1", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]")));
assertPushdownSubfields(format("SELECT id, min(a[1]) FROM %s GROUP BY 1, a[2]", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[2]")));
assertPushdownSubfields(format("SELECT id, min(a[1]) FROM %s GROUP BY 1 HAVING max(a[2]) > 10", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[2]")));
assertPushdownSubfields(format("SELECT id, min(mod(a[1], 3)) FROM %s GROUP BY 1", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]")));
assertPushdownSubfields(format("SELECT id, min(a[1]) FILTER (WHERE a[2] > 10) FROM %s GROUP BY 1", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[2]")));
assertPushdownSubfields(format("SELECT id, min(a[1] + length(b[2][3])) * avg(a[4]) FROM %s GROUP BY 1", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[4]"), "b", toSubfields("b[2][3]")));
assertPushdownSubfields(format("SELECT min(a[1]) FROM %s GROUP BY id", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]")));
assertPushdownSubfields(format("SELECT arbitrary(y[1]).a FROM %s GROUP BY id", tableName), tableName,
ImmutableMap.of("y", toSubfields("y[1].a")));
assertPushdownSubfields(format("SELECT arbitrary(y[1]).d.d1 FROM %s GROUP BY id", tableName), tableName,
ImmutableMap.of("y", toSubfields("y[1].d.d1")));
assertPushdownSubfields(format("SELECT arbitrary(y[2].d).d1 FROM %s GROUP BY id", tableName), tableName,
ImmutableMap.of("y", toSubfields("y[2].d.d1")));
assertPushdownSubfields(format("SELECT arbitrary(y[3].d.d1) FROM %s GROUP BY id", tableName), tableName,
ImmutableMap.of("y", toSubfields("y[3].d.d1")));
assertPushdownSubfields(format("SELECT arbitrary(z[1][2]).e.e1 FROM %s GROUP BY id", tableName), tableName,
ImmutableMap.of("z", toSubfields("z[1][2].e.e1")));
assertPushdownSubfields(format("SELECT arbitrary(z[2][3].e).e2 FROM %s GROUP BY id", tableName), tableName,
ImmutableMap.of("z", toSubfields("z[2][3].e.e2")));
// Union
assertPlan(format("SELECT a[1] FROM %s UNION ALL SELECT a[2] FROM %s", tableName, tableName),
anyTree(exchange(
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]")))),
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[2]")))))));
assertPlan(format("SELECT a[1] FROM (SELECT * FROM %s UNION ALL SELECT * FROM %s)", tableName, tableName),
anyTree(exchange(
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]")))),
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]")))))));
assertPlan(format("SELECT a[1] FROM (SELECT * FROM %s WHERE a[2] > 10 UNION ALL SELECT * FROM %s)", tableName, tableName),
anyTree(exchange(
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]", "a[2]")))),
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]")))))));
// Except
assertPlan(format("SELECT a[1] FROM %s EXCEPT SELECT a[2] FROM %s", tableName, tableName),
anyTree(exchange(
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]")))),
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[2]")))))));
// Intersect
assertPlan(format("SELECT a[1] FROM %s INTERSECT SELECT a[2] FROM %s", tableName, tableName),
anyTree(exchange(
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[1]")))),
anyTree(tableScan(tableName, ImmutableMap.of("a", toSubfields("a[2]")))))));
// Window function
assertPushdownSubfields(format("SELECT id, first_value(a[1]) over (partition by a[2] order by b[1][2]) FROM %s", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[2]"), "b", toSubfields("b[1][2]")));
assertPushdownSubfields(format("SELECT count(*) over (partition by a[1] order by a[2] rows between a[3] preceding and a[4] preceding) FROM %s", tableName), tableName,
ImmutableMap.of("a", toSubfields("a[1]", "a[2]", "a[3]", "a[4]")));
// no subfield pruning
assertPushdownSubfields(format("SELECT a[id] FROM %s", tableName), tableName,
ImmutableMap.of());
assertPushdownSubfields(format("SELECT a[1] FROM (SELECT DISTINCT * FROM %s) LIMIT 10", tableName), tableName,
ImmutableMap.of());
// No pass through subfield pruning
assertPushdownSubfields(format("SELECT id, min(y[1]).a FROM %s GROUP BY 1", tableName), tableName,
ImmutableMap.of("y", toSubfields("y[1]")));
assertPushdownSubfields(format("SELECT id, min(y[1]).a, min(y[1].d).d1 FROM %s GROUP BY 1", tableName), tableName,
ImmutableMap.of("y", toSubfields("y[1]")));
assertPushdownSubfields(format("SELECT id, min(z[1][2]).e.e1 FROM %s GROUP BY 1", tableName), tableName,
ImmutableMap.of("z", toSubfields("z[1][2]")));
}
@Test
public void testPushdownSubfields()
{
assertUpdate("CREATE TABLE test_pushdown_struct_subfields(id bigint, x row(a bigint, b varchar, c double, d row(d1 bigint, d2 double)), y array(row(a bigint, b varchar, c double, d row(d1 bigint, d2 double))))");
assertPushdownSubfields("SELECT t.a, t.d.d1, x.a FROM test_pushdown_struct_subfields CROSS JOIN UNNEST(y) as t(a, b, c, d)", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a"), "y", toSubfields("y[*].a", "y[*].d.d1")));
assertPushdownSubfields("SELECT x.a, mod(x.d.d1, 2) FROM test_pushdown_struct_subfields", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a", "x.d.d1")));
assertPushdownSubfields("SELECT x.d, mod(x.d.d1, 2), x.d.d2 FROM test_pushdown_struct_subfields", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.d")));
assertPushdownSubfields("SELECT x.a FROM test_pushdown_struct_subfields WHERE x.b LIKE 'abc%'", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a", "x.b")));
assertPushdownSubfields("SELECT x.a FROM test_pushdown_struct_subfields WHERE x.a > 10 AND x.b LIKE 'abc%'", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a", "x.b")));
// Join
Session session = getQueryRunner().getDefaultSession();
assertPlan("SELECT l.orderkey, x.a, mod(x.d.d1, 2) FROM lineitem l, test_pushdown_struct_subfields a WHERE l.linenumber = a.id",
anyTree(
node(JoinNode.class,
anyTree(tableScan("lineitem", ImmutableMap.of())),
anyTree(tableScan("test_pushdown_struct_subfields", ImmutableMap.of("x", toSubfields("x.a", "x.d.d1")))))));
assertPlan("SELECT l.orderkey, x.a, mod(x.d.d1, 2) FROM lineitem l, test_pushdown_struct_subfields a WHERE l.linenumber = a.id AND x.a > 10",
anyTree(
node(JoinNode.class,
anyTree(tableScan("lineitem", ImmutableMap.of())),
anyTree(tableScan("test_pushdown_struct_subfields", ImmutableMap.of("x", toSubfields("x.a", "x.d.d1")))))));
// Aggregation
assertPushdownSubfields("SELECT id, min(x.a) FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a")));
assertPushdownSubfields("SELECT id, min(mod(x.a, 3)) FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a")));
assertPushdownSubfields("SELECT id, min(x.a) FILTER (WHERE x.b LIKE 'abc%') FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a", "x.b")));
assertPushdownSubfields("SELECT id, min(x.a + length(y[2].b)) * avg(x.d.d1) FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a", "x.d.d1"), "y", toSubfields("y[2].b")));
assertPushdownSubfields("SELECT id, arbitrary(x.a) FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a")));
assertPushdownSubfields("SELECT id, arbitrary(x).a FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a")));
assertPushdownSubfields("SELECT id, arbitrary(x).d.d1 FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.d.d1")));
assertPushdownSubfields("SELECT id, arbitrary(x.d).d1 FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.d.d1")));
assertPushdownSubfields("SELECT id, arbitrary(x.d.d2) FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.d.d2")));
// Unnest
assertPushdownSubfields("SELECT t.a, t.d.d1, x.a FROM test_pushdown_struct_subfields CROSS JOIN UNNEST(y) as t(a, b, c, d)", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a"), "y", toSubfields("y[*].a", "y[*].d.d1")));
assertPushdownSubfields("SELECT t.*, x.a FROM test_pushdown_struct_subfields CROSS JOIN UNNEST(y) as t(a, b, c, d)", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a"), "y", toSubfields("y[*].a", "y[*].b", "y[*].c", "y[*].d")));
assertPushdownSubfields("SELECT id, x.a FROM test_pushdown_struct_subfields CROSS JOIN UNNEST(y) as t(a, b, c, d)", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a")));
// Legacy unnest
Session legacyUnnest = Session.builder(getSession()).setSystemProperty("legacy_unnest", "true").build();
assertPushdownSubfields(legacyUnnest, "SELECT t.y.a, t.y.d.d1, x.a FROM test_pushdown_struct_subfields CROSS JOIN UNNEST(y) as t(y)", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a"), "y", toSubfields("y[*].a", "y[*].d.d1")));
assertPushdownSubfields(legacyUnnest, "SELECT t.*, x.a FROM test_pushdown_struct_subfields CROSS JOIN UNNEST(y) as t(y)", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a")));
assertPushdownSubfields(legacyUnnest, "SELECT id, x.a FROM test_pushdown_struct_subfields CROSS JOIN UNNEST(y) as t(y)", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a")));
// Case sensitivity
assertPushdownSubfields("SELECT x.a, x.b, x.A + 2 FROM test_pushdown_struct_subfields WHERE x.B LIKE 'abc%'", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.a", "x.b")));
// No pass-through subfield pruning
assertPushdownSubfields("SELECT id, min(x.d).d1 FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.d")));
assertPushdownSubfields("SELECT id, min(x.d).d1, min(x.d.d2) FROM test_pushdown_struct_subfields GROUP BY 1", "test_pushdown_struct_subfields",
ImmutableMap.of("x", toSubfields("x.d")));
assertUpdate("DROP TABLE test_pushdown_struct_subfields");
}
@Test
public void testPushdownSubfieldsAssorted()
{
assertUpdate("CREATE TABLE test_pushdown_subfields(" +
"id bigint, " +
"a array(bigint), " +
"b map(bigint, bigint), " +
"c map(varchar, bigint), " +
"d row(d1 bigint, d2 array(bigint), d3 map(bigint, bigint), d4 row(x double, y double)), " +
"w array(array(row(p bigint, e row(e1 bigint, e2 varchar)))), " +
"x row(a bigint, b varchar, c double, d row(d1 bigint, d2 double)), " +
"y array(row(a bigint, b varchar, c double, d row(d1 bigint, d2 double))), " +
"z row(a bigint, b varchar, c double))");
assertPushdownSubfields("SELECT id, a[1], mod(a[2], 3), b[10], c['cat'] + c['dog'], d.d1 * d.d2[5] / d.d3[2], d.d4.x FROM test_pushdown_subfields", "test_pushdown_subfields",
ImmutableMap.of(
"a", toSubfields("a[1]", "a[2]"),
"b", toSubfields("b[10]"),
"c", toSubfields("c[\"cat\"]", "c[\"dog\"]"),
"d", toSubfields("d.d1", "d.d2[5]", "d.d3[2]", "d.d4.x")));
assertPushdownSubfields("SELECT count(*) FROM test_pushdown_subfields WHERE a[1] > a[2] AND b[1] * c['cat'] = 5 AND d.d4.x IS NULL", "test_pushdown_subfields",
ImmutableMap.of(
"a", toSubfields("a[1]", "a[2]"),
"b", toSubfields("b[1]"),
"c", toSubfields("c[\"cat\"]"),
"d", toSubfields("d.d4.x")));
assertPushdownSubfields("SELECT a[1], cardinality(b), map_keys(c), k, v, d.d3[5] FROM test_pushdown_subfields CROSS JOIN UNNEST(c) as t(k, v)", "test_pushdown_subfields",
ImmutableMap.of(
"a", toSubfields("a[1]"),
"d", toSubfields("d.d3[5]")));
// Subfield pruning should pass-through arbitrary() function
assertPushdownSubfields("SELECT id, " +
"arbitrary(x.a), " +
"arbitrary(x).a, " +
"arbitrary(x).d.d1, " +
"arbitrary(x.d).d1, " +
"arbitrary(x.d.d2), " +
"arbitrary(y[1]).a, " +
"arbitrary(y[1]).d.d1, " +
"arbitrary(y[2]).d.d1, " +
"arbitrary(y[3].d.d1), " +
"arbitrary(z).c, " +
"arbitrary(w[1][2]).e.e1, " +
"arbitrary(w[2][3].e.e2) " +
"FROM test_pushdown_subfields " +
"GROUP BY 1", "test_pushdown_subfields",
ImmutableMap.of("x", toSubfields("x.a", "x.d.d1", "x.d.d2"),
"y", toSubfields("y[1].a", "y[1].d.d1", "y[2].d.d1", "y[3].d.d1"),
"z", toSubfields("z.c"),
"w", toSubfields("w[1][2].e.e1", "w[2][3].e.e2")));
// Subfield pruning should not pass-through other aggregate functions e.g. min() function
assertPushdownSubfields("SELECT id, " +
"min(x.d).d1, " +
"min(x.d.d2), " +
"min(z).c, " +
"min(z.b), " +
"min(y[1]).a, " +
"min(y[1]).d.d1, " +
"min(y[2].d.d1), " +
"min(w[1][2]).e.e1, " +
"min(w[2][3].e.e2) " +
"FROM test_pushdown_subfields " +
"GROUP BY 1", "test_pushdown_subfields",
ImmutableMap.of("x", toSubfields("x.d"),
"y", toSubfields("y[1]", "y[2].d.d1"),
"w", toSubfields("w[1][2]", "w[2][3].e.e2")));
assertUpdate("DROP TABLE test_pushdown_subfields");
}
@Test
public void testPushdownFilterAndSubfields()
{
assertUpdate("CREATE TABLE test_pushdown_filter_and_subscripts(id bigint, a array(bigint), b array(array(varchar)))");
Session pushdownFilterEnabled = Session.builder(getQueryRunner().getDefaultSession())
.setCatalogSessionProperty(HIVE_CATALOG, PUSHDOWN_FILTER_ENABLED, "true")
.build();
assertPushdownSubfields("SELECT a[1] FROM test_pushdown_filter_and_subscripts WHERE a[2] > 10", "test_pushdown_filter_and_subscripts",
ImmutableMap.of("a", toSubfields("a[1]", "a[2]")));
assertPushdownSubfields(pushdownFilterEnabled, "SELECT a[1] FROM test_pushdown_filter_and_subscripts WHERE a[2] > 10", "test_pushdown_filter_and_subscripts",
ImmutableMap.of("a", toSubfields("a[1]")));
assertUpdate("DROP TABLE test_pushdown_filter_and_subscripts");
}
@Test
public void testVirtualBucketing()
{
try {
assertUpdate("CREATE TABLE test_virtual_bucket(a bigint, b bigint)");
Session virtualBucketEnabled = Session.builder(getSession())
.setCatalogSessionProperty(HIVE_CATALOG, "virtual_bucket_count", "2")
.build();
assertPlan(
virtualBucketEnabled,
"SELECT COUNT(DISTINCT(\"$path\")) FROM test_virtual_bucket",
anyTree(
exchange(REMOTE_STREAMING, GATHER, anyTree(
tableScan("test_virtual_bucket", ImmutableMap.of())))),
assertRemoteExchangesCount(1, getSession(), (DistributedQueryRunner) getQueryRunner()));
}
finally {
assertUpdate("DROP TABLE IF EXISTS test_virtual_bucket");
}
}
// TODO: plan verification https://github.com/prestodb/presto/issues/16031
@Test(enabled = false)
public void testMaterializedViewOptimization()
{
QueryRunner queryRunner = getQueryRunner();
try {
queryRunner.execute("CREATE TABLE orders_partitioned WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, orderpriority, '2020-01-01' as ds FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, orderpriority, '2019-01-02' as ds FROM orders WHERE orderkey > 1000");
assertUpdate("CREATE MATERIALIZED VIEW test_orders_view WITH (partitioned_by = ARRAY['ds']) " +
"AS SELECT orderkey, orderpriority, ds FROM orders_partitioned");
assertTrue(getQueryRunner().tableExists(getSession(), "test_orders_view"));
assertUpdate("INSERT INTO test_orders_view(orderkey, orderpriority, ds) " +
"select orderkey, orderpriority, ds from orders_partitioned where ds='2020-01-01'", 255);
String viewQuery = "SELECT orderkey from test_orders_view where orderkey < 10000";
String baseQuery = "SELECT orderkey from orders_partitioned where orderkey < 10000";
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS test_orders_view");
queryRunner.execute("DROP TABLE IF EXISTS orders_partitioned");
}
}
// enable after https://github.com/prestodb/presto/pull/15996
@Test(enabled = false)
public void testMaterializedViewOptimizationFullyMaterialized()
{
QueryRunner queryRunner = getQueryRunner();
String table = "orders_partitioned_fully_materialized";
String view = "orders_view_fully_materialized";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, orderpriority, '2020-01-01' as ds FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, orderpriority, '2019-01-02' as ds FROM orders WHERE orderkey > 1000", table));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds']) " +
"AS SELECT orderkey, orderpriority, ds FROM %s", view, table));
assertTrue(getQueryRunner().tableExists(getSession(), view));
assertUpdate(format("INSERT INTO %s(orderkey, orderpriority, ds) select orderkey, orderpriority, ds from %s", view, table), 15000);
String viewQuery = format("SELECT orderkey from %s where orderkey < 10000", view);
String baseQuery = format("SELECT orderkey from %s where orderkey < 10000", table);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + table);
}
}
@Test(enabled = false)
public void testMaterializedViewOptimizationNotMaterialized()
{
String baseTable = "orders_partitioned_not_materialized";
String view = "orders_partitioned_view_not_materialized";
QueryRunner queryRunner = getQueryRunner();
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, orderpriority, '2020-01-01' as ds FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, orderpriority, '2019-01-02' as ds FROM orders WHERE orderkey > 1000", baseTable));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds']) " +
"AS SELECT orderkey, orderpriority, ds FROM %s", view, baseTable));
assertTrue(getQueryRunner().tableExists(getSession(), view));
String viewQuery = format("SELECT orderkey from %s where orderkey < 10000", view);
String baseQuery = format("SELECT orderkey from %s where orderkey < 10000", baseTable);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + baseTable);
}
}
@Test(enabled = false)
public void testMaterializedViewOptimizationWithNullPartition()
{
QueryRunner queryRunner = getQueryRunner();
String baseTable = "orders_partitioned_null_partition";
String view = "orders_partitioned_view_null_partition";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, orderpriority, '2020-01-01' as ds FROM orders WHERE orderkey < 500 " +
"UNION ALL " +
"SELECT orderkey, orderpriority, '2019-01-02' as ds FROM orders WHERE orderkey > 500 and orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, orderpriority, NULL as ds FROM orders WHERE orderkey > 1000 and orderkey < 1500", baseTable));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, orderpriority, ds FROM %s", view, baseTable));
assertTrue(getQueryRunner().tableExists(getSession(), view));
assertUpdate(format("INSERT INTO %s(orderkey, orderpriority, ds) " +
"select orderkey, orderpriority, ds from %s where ds='2020-01-01'", view, baseTable), 127);
String viewQuery = format("SELECT orderkey from %s where orderkey < 10000", view);
String baseQuery = format("SELECT orderkey from %s where orderkey < 10000", baseTable);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + baseTable);
}
}
@Test(enabled = false)
public void testMaterializedViewWithLessGranularity()
{
QueryRunner queryRunner = getQueryRunner();
String baseTable = "orders_partitioned_less_granularity";
String view = "orders_partitioned_view_less_granularity";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['orderpriority', 'ds']) AS " +
"SELECT orderkey, orderpriority, '2020-01-01' as ds FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, orderpriority, '2019-01-02' as ds FROM orders WHERE orderkey > 1000", baseTable));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, orderpriority, ds FROM %s", view, baseTable));
assertTrue(getQueryRunner().tableExists(getSession(), view));
assertUpdate(format("INSERT INTO %s(orderkey, orderpriority, ds) " +
"select orderkey, orderpriority, ds from %s where ds='2020-01-01'", view, baseTable), 255);
String viewQuery = format("SELECT orderkey from %s where orderkey < 10000", view);
String baseQuery = format("SELECT orderkey from %s where orderkey < 10000", baseTable);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + baseTable);
}
}
@Test(enabled = false)
public void testMaterializedViewWithDifferentPartitions()
{
QueryRunner queryRunner = getQueryRunner();
String baseTable = "orders_partitioned_different_partitions";
String view = "orders_partitioned_view_different_partitions";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds', 'orderpriority']) AS " +
"SELECT orderkey, orderstatus, '2020-01-01' as ds, orderpriority FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, orderstatus, '2019-01-02' as ds, orderpriority FROM orders WHERE orderkey > 1000", baseTable));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds', 'orderstatus']) AS " +
"SELECT orderkey, orderpriority, ds, orderstatus FROM %s", view, baseTable));
assertTrue(getQueryRunner().tableExists(getSession(), view));
assertUpdate(format("INSERT INTO %s(orderkey, orderpriority, ds, orderstatus) " +
"select orderkey, orderpriority, ds, orderstatus from %s where ds='2020-01-01'", view, baseTable), 255);
String viewQuery = format("SELECT orderkey from %s where orderkey < 10000", view);
String baseQuery = format("SELECT orderkey from %s where orderkey < 10000", baseTable);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + baseTable);
}
}
@Test(enabled = false)
public void testMaterializedViewJoinsWithOneTableAlias()
{
QueryRunner queryRunner = getQueryRunner();
String view = "view_join_with_one_alias";
String table1 = "nation_partitioned_join_with_one_alias";
String table2 = "customer_partitioned_join_with_one_alias";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['nationkey', 'regionkey']) AS " +
"SELECT name, nationkey, regionkey FROM nation", table1));
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['nationkey']) AS SELECT custkey," +
" name, mktsegment, nationkey FROM customer", table2));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['marketsegment', " +
"'nationkey', 'regionkey']) AS SELECT %s.name AS nationname, " +
"customer.custkey, customer.name AS customername, UPPER(customer.mktsegment) AS marketsegment, customer.nationkey, regionkey " +
"FROM %s JOIN %s customer ON (%s.nationkey = customer.nationkey)",
view, table1, table1, table2, table1));
assertUpdate(format("INSERT INTO %s(nationname, custkey, customername, marketsegment, nationkey, regionkey) " +
"SELECT %s.name AS nationname, customer.custkey, customer.name AS customername, UPPER(customer.mktsegment) " +
"AS marketsegment, customer.nationkey, regionkey FROM %s JOIN %s customer ON (%s.nationkey = customer.nationkey) " +
"WHERE customer.nationkey != 24 and %s.regionkey != 1",
view, table1, table1, table2, table1, table1), 1200);
String viewQuery = format("SELECT nationname, custkey from %s", view);
String baseQuery = format("SELECT %s.name AS nationname, customer.custkey FROM %s JOIN %s customer ON (%s.nationkey = customer.nationkey)",
table1, table1, table2, table1);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + table1);
queryRunner.execute("DROP TABLE IF EXISTS " + table2);
}
}
@Test(enabled = false)
public void testMaterializedViewOptimizationWithDerivedFields()
{
QueryRunner queryRunner = getQueryRunner();
String baseTable = "lineitem_partitioned_derived_fields";
String view = "lineitem_partitioned_view_derived_fields";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds', 'shipmode']) AS " +
"SELECT discount, extendedprice, '2020-01-01' as ds, shipmode FROM lineitem WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT discount, extendedprice, '2020-01-02' as ds, shipmode FROM lineitem WHERE orderkey > 1000", baseTable));
assertUpdate(format(
"CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds', 'shipmode']) AS " +
"SELECT SUM(discount*extendedprice) as _discount_multi_extendedprice_, ds, shipmode FROM %s group by ds, shipmode",
view, baseTable));
assertTrue(getQueryRunner().tableExists(getSession(), view));
assertUpdate(format("INSERT INTO %s(_discount_multi_extendedprice_, ds, shipmode) " +
"select SUM(discount*extendedprice), ds, shipmode from %s where ds='2020-01-01' group by ds, shipmode",
view, baseTable), 7);
String viewQuery = format("SELECT sum(_discount_multi_extendedprice_) from %s group by ds, shipmode", view);
String baseQuery = format("SELECT sum(discount * extendedprice) as _discount_multi_extendedprice_ from %s group by ds, shipmode", baseTable);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + baseTable);
}
}
@Test(enabled = false)
public void testMaterializedViewOptimizationWithDerivedFieldsWithAlias()
{
QueryRunner queryRunner = getQueryRunner();
String baseTable = "lineitem_partitioned_derived_fields_with_alias";
String view = "lineitem_partitioned_view_derived_fields_with_alias";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds', 'shipmode']) AS " +
"SELECT discount, extendedprice, '2020-01-01' as ds, shipmode FROM lineitem WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT discount, extendedprice, '2020-01-02' as ds, shipmode FROM lineitem WHERE orderkey > 1000 ", baseTable));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds', 'view_shipmode']) " +
"AS SELECT SUM(discount*extendedprice) as _discount_multi_extendedprice_, ds, shipmode as view_shipmode " +
"FROM %s group by ds, shipmode", view, baseTable));
assertTrue(getQueryRunner().tableExists(getSession(), view));
assertUpdate(format("INSERT INTO %s(_discount_multi_extendedprice_, ds, view_shipmode) " +
"select SUM(discount*extendedprice), ds, shipmode from %s where ds='2020-01-01' group by ds, shipmode",
view, baseTable), 7);
String viewQuery = format("SELECT sum(_discount_multi_extendedprice_) from %s group by ds", view);
String baseQuery = format("SELECT sum(discount * extendedprice) as _discount_multi_extendedprice_ from %s group by ds", baseTable);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + baseTable);
}
}
//FIXME: It does not work as column map in the materialized View metadata is not correct. It only contains 1 map instead of 2.
// https://github.com/prestodb/presto/pull/15996
@Test(enabled = false)
public void testMaterializedViewForJoin()
{
QueryRunner queryRunner = getQueryRunner();
String table1 = "orders_key_partitioned_join";
String table2 = "orders_price_partitioned_join";
String view = "orders_view_join";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT orderkey, '2020-01-01' as ds FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, '2019-01-02' as ds FROM orders WHERE orderkey > 1000", table1));
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds']) AS " +
"SELECT totalprice, '2020-01-01' as ds FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT totalprice, '2019-01-02' as ds FROM orders WHERE orderkey > 1000", table2));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds']) " +
"AS SELECT t1.orderkey as view_orderkey, t2.totalprice as view_totalprice, t1.ds" +
" FROM %s t1 inner join %s t2 ON t1.ds=t2.ds", view, table1, table2));
assertTrue(queryRunner.tableExists(getSession(), view));
assertUpdate(format("INSERT INTO %s(view_orderkey, view_totalprice, ds, view_orderpriority, view_orderstatus) " +
"SELECT SELECT t1.orderkey as view_orderkey, t2.totalprice as view_totalprice, t1.ds " +
" FROM %s t1 inner join %s t2 ON t1.ds=t2.ds" +
" where t1.ds='2020-01-01'", view, table1, table2), 65025);
String viewQuery = format("SELECT view_orderkey, view_totalprice from %s where view_orderkey < 10000", view);
// getExplainPlan(viewQuery, LOGICAL);
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + table1);
queryRunner.execute("DROP TABLE IF EXISTS " + table2);
}
}
@Test(enabled = false)
public void testMaterializedViewOptimizationWithDoublePartition()
{
QueryRunner queryRunner = getQueryRunner();
String table = "orders_partitioned_double_partition";
String view = "orders_view_double_partition";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['totalprice']) AS " +
"SELECT orderkey, orderpriority, totalprice FROM orders WHERE orderkey < 10 ", table));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['totalprice']) " +
"AS SELECT orderkey, orderpriority, totalprice FROM %s", view, table));
assertTrue(getQueryRunner().tableExists(getSession(), view));
assertUpdate(format("INSERT INTO %s(orderkey, orderpriority, totalprice) " +
"select orderkey, orderpriority, totalprice from %s where totalprice<65000", view, table), 3);
String viwQuery = format("SELECT orderkey from %s where orderkey < 10000", view);
String baseQuery = format("SELECT orderkey from %s where orderkey < 10000", table);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viwQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + table);
}
}
@Test(enabled = false)
public void testMaterializedViewForJoinWithMultiplePartitions()
{
QueryRunner queryRunner = getQueryRunner();
String view = "order_view_join_with_multiple_partitions";
String table1 = "orders_key_partitioned_join_with_multiple_partitions";
String table2 = "orders_price_partitioned_join_with_multiple_partitions";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds', 'orderpriority']) AS " +
"SELECT orderkey, '2020-01-01' as ds, orderpriority FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, '2019-01-02' as ds , orderpriority FROM orders WHERE orderkey > 1000 and orderkey < 2000", table1));
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds', 'orderstatus']) AS " +
"SELECT totalprice, '2020-01-01' as ds, orderstatus FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT totalprice, '2019-01-02' as ds, orderstatus FROM orders WHERE orderkey > 1000 and orderkey < 2000", table2));
assertUpdate(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds', 'view_orderpriority', 'view_orderstatus']) " +
"AS SELECT t1.orderkey as view_orderkey, t2.totalprice as view_totalprice, " +
"t1.ds as ds, t1.orderpriority as view_orderpriority, t2.orderstatus as view_orderstatus " +
" FROM %s t1 inner join %s t2 ON t1.ds=t2.ds", view, table1, table2));
assertUpdate(format("INSERT INTO %s(view_orderkey, view_totalprice, ds, view_orderpriority, view_orderstatus) " +
"SELECT t1.orderkey as view_orderkey, t2.totalprice as view_totalprice, t1.ds as ds, t1.orderpriority as view_orderpriority, " +
"t2.orderstatus as view_orderstatus FROM %s t1 inner join %s t2 ON t1.ds=t2.ds" +
" where t1.ds='2020-01-01'", view, table1, table2), 65025);
String viewQuery = format("SELECT view_orderkey from %s where view_orderkey < 10000", view);
String baseQuery = format("SELECT t1.orderkey FROM %s t1" +
" inner join %s t2 ON t1.ds=t2.ds where t1.orderkey < 10000", table1, table2);
// getExplainPlan(viewQuery, LOGICAL);
assertEquals(computeActual(viewQuery).getRowCount(), computeActual(baseQuery).getRowCount());
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + table1);
queryRunner.execute("DROP TABLE IF EXISTS " + table2);
}
}
@Test(enabled = false)
public void testMaterialziedViewFullOuterJoin()
{
QueryRunner queryRunner = getQueryRunner();
String view = "order_view_full_outer_join";
String table1 = "orders_key_partitioned_full_outer_join";
String table2 = "orders_price_partitioned_full_outer_join";
try {
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds', 'orderpriority']) AS " +
"SELECT orderkey, '2020-01-01' as ds, orderpriority FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, '2019-01-02' as ds , orderpriority FROM orders WHERE orderkey > 1000 and orderkey < 2000", table1));
queryRunner.execute(format("CREATE TABLE %s WITH (partitioned_by = ARRAY['ds', 'orderstatus']) AS " +
"SELECT totalprice, '2020-01-01' as ds, orderstatus FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT totalprice, '2019-01-02' as ds, orderstatus FROM orders WHERE orderkey > 1000 and orderkey < 2000", table2));
assertQueryFails(format("CREATE MATERIALIZED VIEW %s WITH (partitioned_by = ARRAY['ds', 'view_orderpriority', 'view_orderstatus']) " +
"AS SELECT t1.orderkey as view_orderkey, t2.totalprice as view_totalprice, " +
"t1.ds as ds, t1.orderpriority as view_orderpriority, t2.orderstatus as view_orderstatus " +
" FROM %s t1 full outer join %s t2 ON t1.ds=t2.ds", view, table1, table2),
".*Only inner join is supported for materialized view.*");
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS " + view);
queryRunner.execute("DROP TABLE IF EXISTS " + table1);
queryRunner.execute("DROP TABLE IF EXISTS " + table2);
}
}
// Make sure subfield pruning doesn't interfere with cost-based optimizer
@Test
public void testPushdownSubfieldsAndJoinReordering()
{
Session collectStatistics = Session.builder(getSession())
.setCatalogSessionProperty(HIVE_CATALOG, COLLECT_COLUMN_STATISTICS_ON_WRITE, "true")
.build();
getQueryRunner().execute(collectStatistics, "CREATE TABLE orders_ex AS SELECT orderkey, custkey, array[custkey] as keys FROM orders");
try {
Session joinReorderingOn = Session.builder(pushdownFilterEnabled())
.setSystemProperty(JOIN_REORDERING_STRATEGY, FeaturesConfig.JoinReorderingStrategy.AUTOMATIC.name())
.build();
Session joinReorderingOff = Session.builder(pushdownFilterEnabled())
.setSystemProperty(JOIN_REORDERING_STRATEGY, FeaturesConfig.JoinReorderingStrategy.ELIMINATE_CROSS_JOINS.name())
.build();
assertPlan(joinReorderingOff, "SELECT sum(custkey) FROM orders_ex o, lineitem l WHERE o.orderkey = l.orderkey",
anyTree(join(INNER, ImmutableList.of(equiJoinClause("o_orderkey", "l_orderkey")),
anyTree(PlanMatchPattern.tableScan("orders_ex", ImmutableMap.of("o_orderkey", "orderkey"))),
anyTree(PlanMatchPattern.tableScan("lineitem", ImmutableMap.of("l_orderkey", "orderkey"))))));
assertPlan(joinReorderingOff, "SELECT sum(keys[1]) FROM orders_ex o, lineitem l WHERE o.orderkey = l.orderkey",
anyTree(join(INNER, ImmutableList.of(equiJoinClause("o_orderkey", "l_orderkey")),
anyTree(PlanMatchPattern.tableScan("orders_ex", ImmutableMap.of("o_orderkey", "orderkey"))),
anyTree(PlanMatchPattern.tableScan("lineitem", ImmutableMap.of("l_orderkey", "orderkey"))))));
assertPlan(joinReorderingOn, "SELECT sum(custkey) FROM orders_ex o, lineitem l WHERE o.orderkey = l.orderkey",
anyTree(join(INNER, ImmutableList.of(equiJoinClause("l_orderkey", "o_orderkey")),
anyTree(PlanMatchPattern.tableScan("lineitem", ImmutableMap.of("l_orderkey", "orderkey"))),
anyTree(PlanMatchPattern.tableScan("orders_ex", ImmutableMap.of("o_orderkey", "orderkey"))))));
assertPlan(joinReorderingOn, "SELECT sum(keys[1]) FROM orders_ex o, lineitem l WHERE o.orderkey = l.orderkey",
anyTree(join(INNER, ImmutableList.of(equiJoinClause("l_orderkey", "o_orderkey")),
anyTree(PlanMatchPattern.tableScan("lineitem", ImmutableMap.of("l_orderkey", "orderkey"))),
anyTree(PlanMatchPattern.tableScan("orders_ex", ImmutableMap.of("o_orderkey", "orderkey"))))));
assertPlan(joinReorderingOff, "SELECT l.discount, l.orderkey, o.totalprice FROM lineitem l, orders o WHERE l.orderkey = o.orderkey AND l.quantity < 2 AND o.totalprice BETWEEN 0 AND 200000",
anyTree(
node(JoinNode.class,
anyTree(tableScan("lineitem", ImmutableMap.of())),
anyTree(tableScan("orders", ImmutableMap.of())))));
assertPlan(joinReorderingOn, "SELECT l.discount, l.orderkey, o.totalprice FROM lineitem l, orders o WHERE l.orderkey = o.orderkey AND l.quantity < 2 AND o.totalprice BETWEEN 0 AND 200000",
anyTree(
node(JoinNode.class,
anyTree(tableScan("orders", ImmutableMap.of())),
anyTree(tableScan("lineitem", ImmutableMap.of())))));
assertPlan(joinReorderingOff, "SELECT keys[1] FROM orders_ex o, lineitem l WHERE o.orderkey = l.orderkey AND o.keys[1] > 0",
anyTree(join(INNER, ImmutableList.of(equiJoinClause("o_orderkey", "l_orderkey")),
anyTree(PlanMatchPattern.tableScan("orders_ex", ImmutableMap.of("o_orderkey", "orderkey"))),
anyTree(PlanMatchPattern.tableScan("lineitem", ImmutableMap.of("l_orderkey", "orderkey"))))));
assertPlan(joinReorderingOn, "SELECT keys[1] FROM orders_ex o, lineitem l WHERE o.orderkey = l.orderkey AND o.keys[1] > 0",
anyTree(join(INNER, ImmutableList.of(equiJoinClause("l_orderkey", "o_orderkey")),
anyTree(PlanMatchPattern.tableScan("lineitem", ImmutableMap.of("l_orderkey", "orderkey"))),
anyTree(PlanMatchPattern.tableScan("orders_ex", ImmutableMap.of("o_orderkey", "orderkey"))))));
}
finally {
assertUpdate("DROP TABLE orders_ex");
}
}
@Test
public void testParquetDereferencePushDown()
{
assertUpdate("CREATE TABLE test_pushdown_nestedcolumn_parquet(" +
"id bigint, " +
"x row(a bigint, b varchar, c double, d row(d1 bigint, d2 double))," +
"y array(row(a bigint, b varchar, c double, d row(d1 bigint, d2 double)))) " +
"with (format = 'PARQUET')");
assertParquetDereferencePushDown("SELECT x.a FROM test_pushdown_nestedcolumn_parquet",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
assertParquetDereferencePushDown("SELECT x.a, mod(x.d.d1, 2) FROM test_pushdown_nestedcolumn_parquet",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a", "x.d.d1"));
assertParquetDereferencePushDown("SELECT x.d, mod(x.d.d1, 2), x.d.d2 FROM test_pushdown_nestedcolumn_parquet",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.d"));
assertParquetDereferencePushDown("SELECT x.a FROM test_pushdown_nestedcolumn_parquet WHERE x.b LIKE 'abc%'",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a", "x.b"));
assertParquetDereferencePushDown("SELECT x.a FROM test_pushdown_nestedcolumn_parquet WHERE x.a > 10 AND x.b LIKE 'abc%'",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a", "x.b"),
withColumnDomains(ImmutableMap.of(pushdownColumnNameForSubfield(nestedColumn("x.a")), create(ofRanges(greaterThan(BIGINT, 10L)), false))),
ImmutableSet.of(pushdownColumnNameForSubfield(nestedColumn("x.a"))),
TRUE_CONSTANT);
// Join
assertPlan(withParquetDereferencePushDownEnabled(), "SELECT l.orderkey, x.a, mod(x.d.d1, 2) FROM lineitem l, test_pushdown_nestedcolumn_parquet a WHERE l.linenumber = a.id",
anyTree(
node(JoinNode.class,
anyTree(tableScan("lineitem", ImmutableMap.of())),
anyTree(tableScanParquetDeferencePushDowns("test_pushdown_nestedcolumn_parquet", nestedColumnMap("x.a", "x.d.d1"))))));
assertPlan(withParquetDereferencePushDownEnabled(), "SELECT l.orderkey, x.a, mod(x.d.d1, 2) FROM lineitem l, test_pushdown_nestedcolumn_parquet a WHERE l.linenumber = a.id AND x.a > 10",
anyTree(
node(JoinNode.class,
anyTree(tableScan("lineitem", ImmutableMap.of())),
anyTree(tableScanParquetDeferencePushDowns(
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a", "x.d.d1"),
withColumnDomains(ImmutableMap.of(pushdownColumnNameForSubfield(nestedColumn("x.a")), create(ofRanges(greaterThan(BIGINT, 10L)), false))),
ImmutableSet.of(pushdownColumnNameForSubfield(nestedColumn("x.a"))),
TRUE_CONSTANT)))));
// Self-Join and Table scan assignments
assertPlan(withParquetDereferencePushDownEnabled(), "SELECT t1.x.a, t2.x.a FROM test_pushdown_nestedcolumn_parquet t1, test_pushdown_nestedcolumn_parquet t2 where t1.id = t2.id",
anyTree(
node(JoinNode.class,
anyTree(tableScanParquetDeferencePushDowns("test_pushdown_nestedcolumn_parquet", nestedColumnMap("x.a"))),
anyTree(tableScanParquetDeferencePushDowns("test_pushdown_nestedcolumn_parquet", nestedColumnMap("x_1.a"))))));
// Aggregation
assertParquetDereferencePushDown("SELECT id, min(x.a) FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
assertParquetDereferencePushDown("SELECT id, min(mod(x.a, 3)) FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
assertParquetDereferencePushDown("SELECT id, min(x.a) FILTER (WHERE x.b LIKE 'abc%') FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a", "x.b"));
assertParquetDereferencePushDown("SELECT id, min(x.a + 1) * avg(x.d.d1) FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a", "x.d.d1"));
assertParquetDereferencePushDown("SELECT id, arbitrary(x.a) FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
// Dereference can't be pushed down, but the subfield pushdown will help in pruning the number of columns to read
assertPushdownSubfields("SELECT id, arbitrary(x.a) FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
ImmutableMap.of("x", toSubfields("x.a")));
// Dereference can't be pushed down, but the subfield pushdown will help in pruning the number of columns to read
assertPushdownSubfields("SELECT id, arbitrary(x).d.d1 FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
ImmutableMap.of("x", toSubfields("x.d.d1")));
assertParquetDereferencePushDown("SELECT id, arbitrary(x.d).d1 FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.d"));
assertParquetDereferencePushDown("SELECT id, arbitrary(x.d.d2) FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.d.d2"));
// Unnest
assertParquetDereferencePushDown("SELECT t.a, t.d.d1, x.a FROM test_pushdown_nestedcolumn_parquet CROSS JOIN UNNEST(y) as t(a, b, c, d)",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
assertParquetDereferencePushDown("SELECT t.*, x.a FROM test_pushdown_nestedcolumn_parquet CROSS JOIN UNNEST(y) as t(a, b, c, d)",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
assertParquetDereferencePushDown("SELECT id, x.a FROM test_pushdown_nestedcolumn_parquet CROSS JOIN UNNEST(y) as t(a, b, c, d)",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
// Legacy unnest
Session legacyUnnest = Session.builder(withParquetDereferencePushDownEnabled())
.setSystemProperty("legacy_unnest", "true")
.build();
assertParquetDereferencePushDown(legacyUnnest, "SELECT t.y.a, t.y.d.d1, x.a FROM test_pushdown_nestedcolumn_parquet CROSS JOIN UNNEST(y) as t(y)",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
assertParquetDereferencePushDown(legacyUnnest, "SELECT t.*, x.a FROM test_pushdown_nestedcolumn_parquet CROSS JOIN UNNEST(y) as t(y)",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
assertParquetDereferencePushDown(legacyUnnest, "SELECT id, x.a FROM test_pushdown_nestedcolumn_parquet CROSS JOIN UNNEST(y) as t(y)",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a"));
// Case sensitivity
assertParquetDereferencePushDown("SELECT x.a, x.b, x.A + 2 FROM test_pushdown_nestedcolumn_parquet WHERE x.B LIKE 'abc%'",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.a", "x.b"));
// No pass-through nested column pruning
assertParquetDereferencePushDown("SELECT id, min(x.d).d1 FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.d"));
assertParquetDereferencePushDown("SELECT id, min(x.d).d1, min(x.d.d2) FROM test_pushdown_nestedcolumn_parquet GROUP BY 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.d"));
// Test pushdown of filters on dereference columns
assertParquetDereferencePushDown("SELECT id, x.d.d1 FROM test_pushdown_nestedcolumn_parquet WHERE x.d.d1 = 1",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.d.d1"),
withColumnDomains(ImmutableMap.of(
pushdownColumnNameForSubfield(nestedColumn("x.d.d1")), singleValue(BIGINT, 1L))),
ImmutableSet.of(pushdownColumnNameForSubfield(nestedColumn("x.d.d1"))),
TRUE_CONSTANT);
assertParquetDereferencePushDown("SELECT id, x.d.d1 FROM test_pushdown_nestedcolumn_parquet WHERE x.d.d1 = 1 and x.d.d2 = 5.0",
"test_pushdown_nestedcolumn_parquet",
nestedColumnMap("x.d.d1", "x.d.d2"),
withColumnDomains(ImmutableMap.of(
pushdownColumnNameForSubfield(nestedColumn("x.d.d1")), singleValue(BIGINT, 1L),
pushdownColumnNameForSubfield(nestedColumn("x.d.d2")), singleValue(DOUBLE, 5.0))),
ImmutableSet.of(
pushdownColumnNameForSubfield(nestedColumn("x.d.d1")),
pushdownColumnNameForSubfield(nestedColumn("x.d.d2"))),
TRUE_CONSTANT);
assertUpdate("DROP TABLE test_pushdown_nestedcolumn_parquet");
}
@Test
public void testBucketPruning()
{
QueryRunner queryRunner = getQueryRunner();
queryRunner.execute("CREATE TABLE orders_bucketed WITH (bucket_count = 11, bucketed_by = ARRAY['orderkey']) AS " +
"SELECT * FROM orders");
try {
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey = 100",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertBucketFilter(plan, "orders_bucketed", 11, ImmutableSet.of(1)));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey = 100 OR orderkey = 101",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertBucketFilter(plan, "orders_bucketed", 11, ImmutableSet.of(1, 2)));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey IN (100, 101, 133)",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertBucketFilter(plan, "orders_bucketed", 11, ImmutableSet.of(1, 2)));
assertPlan(getSession(), "SELECT * FROM orders_bucketed",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertNoBucketFilter(plan, "orders_bucketed", 11));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey > 100",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertNoBucketFilter(plan, "orders_bucketed", 11));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey != 100",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertNoBucketFilter(plan, "orders_bucketed", 11));
}
finally {
queryRunner.execute("DROP TABLE orders_bucketed");
}
queryRunner.execute("CREATE TABLE orders_bucketed WITH (bucket_count = 11, bucketed_by = ARRAY['orderkey', 'custkey']) AS " +
"SELECT * FROM orders");
try {
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey = 101 AND custkey = 280",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertBucketFilter(plan, "orders_bucketed", 11, ImmutableSet.of(1)));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey IN (101, 71) AND custkey = 280",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertBucketFilter(plan, "orders_bucketed", 11, ImmutableSet.of(1, 6)));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey IN (101, 71) AND custkey IN (280, 34)",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertBucketFilter(plan, "orders_bucketed", 11, ImmutableSet.of(1, 2, 6, 8)));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey = 101 AND custkey = 280 AND orderstatus <> '0'",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertBucketFilter(plan, "orders_bucketed", 11, ImmutableSet.of(1)));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey = 101",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertNoBucketFilter(plan, "orders_bucketed", 11));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE custkey = 280",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertNoBucketFilter(plan, "orders_bucketed", 11));
assertPlan(getSession(), "SELECT * FROM orders_bucketed WHERE orderkey = 101 AND custkey > 280",
anyTree(PlanMatchPattern.tableScan("orders_bucketed")),
plan -> assertNoBucketFilter(plan, "orders_bucketed", 11));
}
finally {
queryRunner.execute("DROP TABLE orders_bucketed");
}
}
@Test
public void testAddRequestedColumnsToLayout()
{
String tableName = "test_add_requested_columns_to_layout";
assertUpdate(format("CREATE TABLE %s(" +
"id bigint, " +
"a row(d1 bigint, d2 array(bigint), d3 map(bigint, bigint), d4 row(x double, y double)), " +
"b varchar )", tableName));
try {
assertPlan(getSession(), format("SELECT b FROM %s", tableName),
anyTree(PlanMatchPattern.tableScan(tableName)),
plan -> assertRequestedColumnsInLayout(plan, tableName, ImmutableSet.of("b")));
assertPlan(getSession(), format("SELECT id, b FROM %s", tableName),
anyTree(PlanMatchPattern.tableScan(tableName)),
plan -> assertRequestedColumnsInLayout(plan, tableName, ImmutableSet.of("id", "b")));
assertPlan(getSession(), format("SELECT id, a FROM %s", tableName),
anyTree(PlanMatchPattern.tableScan(tableName)),
plan -> assertRequestedColumnsInLayout(plan, tableName, ImmutableSet.of("id", "a")));
assertPlan(getSession(), format("SELECT a.d1, a.d4.x FROM %s", tableName),
anyTree(PlanMatchPattern.tableScan(tableName)),
plan -> assertRequestedColumnsInLayout(plan, tableName, ImmutableSet.of("a.d1", "a.d4.x")));
}
finally {
assertUpdate(format("DROP TABLE %s", tableName));
}
}
@Test
public void testPartialAggregatePushdown()
{
QueryRunner queryRunner = getQueryRunner();
try {
queryRunner.execute("CREATE TABLE orders_partitioned_parquet WITH (partitioned_by = ARRAY['ds'], format='PARQUET') AS " +
"SELECT orderkey, orderpriority, comment, '2019-11-01' as ds FROM orders WHERE orderkey < 1000 " +
"UNION ALL " +
"SELECT orderkey, orderpriority, comment, '2019-11-02' as ds FROM orders WHERE orderkey < 1000");
Map<Optional<String>, ExpectedValueProvider<FunctionCall>> aggregations = ImmutableMap.of(Optional.of("count"),
PlanMatchPattern.functionCall("count", false, ImmutableList.of(anySymbol())));
List<String> groupByKey = ImmutableList.of("count_star");
assertPlan(partialAggregatePushdownEnabled(),
"select count(*) from orders_partitioned_parquet",
anyTree(aggregation(globalAggregation(), aggregations, ImmutableMap.of(), Optional.empty(), AggregationNode.Step.FINAL,
exchange(LOCAL, GATHER,
new PlanMatchPattern[] {exchange(REMOTE_STREAMING, GATHER,
new PlanMatchPattern[] {tableScan("orders_partitioned_parquet", ImmutableMap.of())})}))));
aggregations = ImmutableMap.of(
Optional.of("count_1"),
PlanMatchPattern.functionCall("count", false, ImmutableList.of(anySymbol())),
Optional.of("max"),
PlanMatchPattern.functionCall("max", false, ImmutableList.of(anySymbol())),
Optional.of("min"),
PlanMatchPattern.functionCall("max", false, ImmutableList.of(anySymbol())));
assertPlan(partialAggregatePushdownEnabled(),
"select count(orderkey), max(orderpriority), min(ds) from orders_partitioned_parquet",
anyTree(new PlanMatchPattern[] {aggregation(globalAggregation(), aggregations, ImmutableMap.of(), Optional.empty(), AggregationNode.Step.FINAL,
exchange(LOCAL, GATHER,
new PlanMatchPattern[] {exchange(REMOTE_STREAMING, GATHER,
new PlanMatchPattern[] {tableScan(
"orders_partitioned_parquet",
ImmutableMap.of("orderkey",
ImmutableSet.of(),
"orderpriority",
ImmutableSet.of(),
"ds",
ImmutableSet.of()))})}))}));
// Negative tests
assertPlan(partialAggregatePushdownEnabled(),
"select count(orderkey), max(orderpriority), min(ds) from orders_partitioned_parquet where orderkey = 100",
anyTree(PlanMatchPattern.tableScan("orders_partitioned_parquet")),
plan -> assertNoAggregatedColumns(plan, "orders_partitioned_parquet"));
aggregations = ImmutableMap.of(
Optional.of("count_1"),
PlanMatchPattern.functionCall("count", false, ImmutableList.of(anySymbol())),
Optional.of("arbitrary"),
PlanMatchPattern.functionCall("arbitrary", false, ImmutableList.of(anySymbol())),
Optional.of("min"),
PlanMatchPattern.functionCall("max", false, ImmutableList.of(anySymbol())));
assertPlan(partialAggregatePushdownEnabled(),
"select count(orderkey), arbitrary(orderpriority), min(ds) from orders_partitioned_parquet",
anyTree(PlanMatchPattern.tableScan("orders_partitioned_parquet")),
plan -> assertNoAggregatedColumns(plan, "orders_partitioned_parquet"));
assertPlan(partialAggregatePushdownEnabled(),
"select count(orderkey), max(orderpriority), min(ds) from orders_partitioned_parquet where ds = '2019-11-01' and orderkey = 100",
anyTree(PlanMatchPattern.tableScan("orders_partitioned_parquet")),
plan -> assertNoAggregatedColumns(plan, "orders_partitioned_parquet"));
Session session = Session.builder(getQueryRunner().getDefaultSession())
.setCatalogSessionProperty(HIVE_CATALOG, PARTIAL_AGGREGATION_PUSHDOWN_ENABLED, "true")
.build();
queryRunner.execute("CREATE TABLE variable_length_table(col1 varchar, col2 varchar(100), col3 varbinary) with (format='PARQUET')");
queryRunner.execute("INSERT INTO variable_length_table values ('foo','bar',cast('baz' as varbinary))");
assertPlan(session,
"select min(col1) from variable_length_table",
anyTree(PlanMatchPattern.tableScan("variable_length_table")),
plan -> assertNoAggregatedColumns(plan, "variable_length_table"));
assertPlan(session,
"select max(col3) from variable_length_table",
anyTree(PlanMatchPattern.tableScan("variable_length_table")),
plan -> assertNoAggregatedColumns(plan, "variable_length_table"));
}
finally {
queryRunner.execute("DROP TABLE IF EXISTS orders_partitioned_parquet");
queryRunner.execute("DROP TABLE IF EXISTS variable_length_table");
}
}
private static Set<Subfield> toSubfields(String... subfieldPaths)
{
return Arrays.stream(subfieldPaths)
.map(Subfield::new)
.collect(toImmutableSet());
}
private void assertPushdownSubfields(String query, String tableName, Map<String, Set<Subfield>> requiredSubfields)
{
assertPlan(query, anyTree(tableScan(tableName, requiredSubfields)));
}
private void assertPushdownSubfields(Session session, String query, String tableName, Map<String, Set<Subfield>> requiredSubfields)
{
assertPlan(session, query, anyTree(tableScan(tableName, requiredSubfields)));
}
private static PlanMatchPattern tableScan(String expectedTableName, Map<String, Set<Subfield>> expectedRequiredSubfields)
{
return PlanMatchPattern.tableScan(expectedTableName).with(new HiveTableScanMatcher(expectedRequiredSubfields));
}
private static PlanMatchPattern tableScanParquetDeferencePushDowns(String expectedTableName, Map<String, Subfield> expectedDeferencePushDowns)
{
return PlanMatchPattern.tableScan(expectedTableName).with(new HiveParquetDereferencePushdownMatcher(expectedDeferencePushDowns, TupleDomain.all(), ImmutableSet.of(), TRUE_CONSTANT));
}
private static PlanMatchPattern tableScanParquetDeferencePushDowns(String expectedTableName, Map<String, Subfield> expectedDeferencePushDowns,
TupleDomain<String> domainPredicate, Set<String> predicateColumns, RowExpression remainingPredicate)
{
return PlanMatchPattern.tableScan(expectedTableName).with(new HiveParquetDereferencePushdownMatcher(expectedDeferencePushDowns, domainPredicate, predicateColumns, remainingPredicate));
}
private static boolean isTableScanNode(PlanNode node, String tableName)
{
return node instanceof TableScanNode && ((HiveTableHandle) ((TableScanNode) node).getTable().getConnectorHandle()).getTableName().equals(tableName);
}
private void assertPushdownFilterOnSubfields(String query, Map<Subfield, Domain> predicateDomains)
{
String tableName = "test_pushdown_filter_on_subfields";
assertPlan(pushdownFilterAndNestedColumnFilterEnabled(), query,
output(exchange(PlanMatchPattern.tableScan(tableName))),
plan -> assertTableLayout(
plan,
tableName,
withColumnDomains(predicateDomains),
TRUE_CONSTANT,
predicateDomains.keySet().stream().map(Subfield::getRootName).collect(toImmutableSet())));
}
private void assertParquetDereferencePushDown(String query, String tableName, Map<String, Subfield> expectedDeferencePushDowns)
{
assertParquetDereferencePushDown(withParquetDereferencePushDownEnabled(), query, tableName, expectedDeferencePushDowns);
}
private void assertParquetDereferencePushDown(String query, String tableName, Map<String, Subfield> expectedDeferencePushDowns, TupleDomain<String> domainPredicate,
Set<String> predicateColumns, RowExpression remainingPredicate)
{
assertPlan(withParquetDereferencePushDownEnabled(), query,
anyTree(tableScanParquetDeferencePushDowns(tableName, expectedDeferencePushDowns, domainPredicate, predicateColumns, remainingPredicate)));
}
private void assertParquetDereferencePushDown(Session session, String query, String tableName, Map<String, Subfield> expectedDeferencePushDowns)
{
assertPlan(session, query, anyTree(tableScanParquetDeferencePushDowns(tableName, expectedDeferencePushDowns)));
}
private Session pushdownFilterEnabled()
{
return Session.builder(getQueryRunner().getDefaultSession())
.setCatalogSessionProperty(HIVE_CATALOG, PUSHDOWN_FILTER_ENABLED, "true")
.build();
}
private Session pushdownFilterAndNestedColumnFilterEnabled()
{
return Session.builder(getQueryRunner().getDefaultSession())
.setCatalogSessionProperty(HIVE_CATALOG, PUSHDOWN_FILTER_ENABLED, "true")
.setCatalogSessionProperty(HIVE_CATALOG, RANGE_FILTERS_ON_SUBSCRIPTS_ENABLED, "true")
.build();
}
private Session withParquetDereferencePushDownEnabled()
{
return Session.builder(getQueryRunner().getDefaultSession())
.setSystemProperty(PUSHDOWN_DEREFERENCE_ENABLED, "true")
.setCatalogSessionProperty(HIVE_CATALOG, PARQUET_DEREFERENCE_PUSHDOWN_ENABLED, "true")
.build();
}
private Session partialAggregatePushdownEnabled()
{
return Session.builder(getQueryRunner().getDefaultSession())
.setCatalogSessionProperty(HIVE_CATALOG, PARTIAL_AGGREGATION_PUSHDOWN_ENABLED, "true")
.setCatalogSessionProperty(HIVE_CATALOG, PARTIAL_AGGREGATION_PUSHDOWN_FOR_VARIABLE_LENGTH_DATATYPES_ENABLED, "true")
.build();
}
private RowExpression constant(long value)
{
return new ConstantExpression(value, BIGINT);
}
private static Map<String, String> identityMap(String...values)
{
return Arrays.stream(values).collect(toImmutableMap(Functions.identity(), Functions.identity()));
}
private void assertTableLayout(Plan plan, String tableName, TupleDomain<Subfield> domainPredicate, RowExpression remainingPredicate, Set<String> predicateColumnNames)
{
TableScanNode tableScan = searchFrom(plan.getRoot())
.where(node -> isTableScanNode(node, tableName))
.findOnlyElement();
assertTrue(tableScan.getTable().getLayout().isPresent());
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) tableScan.getTable().getLayout().get();
assertEquals(layoutHandle.getPredicateColumns().keySet(), predicateColumnNames);
assertEquals(layoutHandle.getDomainPredicate(), domainPredicate);
assertEquals(layoutHandle.getRemainingPredicate(), remainingPredicate);
assertEquals(layoutHandle.getRemainingPredicate(), remainingPredicate);
}
private void assertBucketFilter(Plan plan, String tableName, int readBucketCount, Set<Integer> bucketsToKeep)
{
TableScanNode tableScan = searchFrom(plan.getRoot())
.where(node -> isTableScanNode(node, tableName))
.findOnlyElement();
assertTrue(tableScan.getTable().getLayout().isPresent());
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) tableScan.getTable().getLayout().get();
assertTrue(layoutHandle.getBucketHandle().isPresent());
assertTrue(layoutHandle.getBucketFilter().isPresent());
assertEquals(layoutHandle.getBucketHandle().get().getReadBucketCount(), readBucketCount);
assertEquals(layoutHandle.getBucketFilter().get().getBucketsToKeep(), bucketsToKeep);
}
private void assertNoBucketFilter(Plan plan, String tableName, int readBucketCount)
{
TableScanNode tableScan = searchFrom(plan.getRoot())
.where(node -> isTableScanNode(node, tableName))
.findOnlyElement();
assertTrue(tableScan.getTable().getLayout().isPresent());
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) tableScan.getTable().getLayout().get();
assertEquals(layoutHandle.getBucketHandle().get().getReadBucketCount(), readBucketCount);
assertFalse(layoutHandle.getBucketFilter().isPresent());
}
private void assertNoAggregatedColumns(Plan plan, String tableName)
{
TableScanNode tableScan = searchFrom(plan.getRoot())
.where(node -> isTableScanNode(node, tableName))
.findOnlyElement();
for (ColumnHandle columnHandle : tableScan.getAssignments().values()) {
assertTrue(columnHandle instanceof HiveColumnHandle);
HiveColumnHandle hiveColumnHandle = (HiveColumnHandle) columnHandle;
assertFalse(hiveColumnHandle.getColumnType() == HiveColumnHandle.ColumnType.AGGREGATED);
assertFalse(hiveColumnHandle.getPartialAggregation().isPresent());
}
}
private void assertRequestedColumnsInLayout(Plan plan, String tableName, Set<String> expectedRequestedColumns)
{
TableScanNode tableScan = searchFrom(plan.getRoot())
.where(node -> isTableScanNode(node, tableName))
.findOnlyElement();
assertTrue(tableScan.getTable().getLayout().isPresent());
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) tableScan.getTable().getLayout().get();
assertTrue(layoutHandle.getRequestedColumns().isPresent());
Set<HiveColumnHandle> requestedColumns = layoutHandle.getRequestedColumns().get();
List<String> actualRequestedColumns = new ArrayList<>();
for (HiveColumnHandle column : requestedColumns) {
if (!column.getRequiredSubfields().isEmpty()) {
column.getRequiredSubfields().stream().map(Subfield::serialize).forEach(actualRequestedColumns::add);
}
else {
actualRequestedColumns.add(column.getName());
}
}
Set<String> requestedColumnsSet = ImmutableSet.copyOf(actualRequestedColumns);
assertEquals(requestedColumnsSet.size(), actualRequestedColumns.size(), "There should be no duplicates in the requested column list");
assertEquals(requestedColumnsSet, expectedRequestedColumns);
}
private static PlanMatchPattern tableScan(String tableName, TupleDomain<String> domainPredicate, RowExpression remainingPredicate, Set<String> predicateColumnNames)
{
return PlanMatchPattern.tableScan(tableName).with(new Matcher() {
@Override
public boolean shapeMatches(PlanNode node)
{
return node instanceof TableScanNode;
}
@Override
public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session session, Metadata metadata, SymbolAliases symbolAliases)
{
TableScanNode tableScan = (TableScanNode) node;
Optional<ConnectorTableLayoutHandle> layout = tableScan.getTable().getLayout();
if (!layout.isPresent()) {
return NO_MATCH;
}
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) layout.get();
if (!Objects.equals(layoutHandle.getPredicateColumns().keySet(), predicateColumnNames) ||
!Objects.equals(layoutHandle.getDomainPredicate(), domainPredicate.transform(Subfield::new)) ||
!Objects.equals(layoutHandle.getRemainingPredicate(), remainingPredicate)) {
return NO_MATCH;
}
return match();
}
});
}
private static final class HiveTableScanMatcher
implements Matcher
{
private final Map<String, Set<Subfield>> requiredSubfields;
private HiveTableScanMatcher(Map<String, Set<Subfield>> requiredSubfields)
{
this.requiredSubfields = requireNonNull(requiredSubfields, "requiredSubfields is null");
}
@Override
public boolean shapeMatches(PlanNode node)
{
return node instanceof TableScanNode;
}
@Override
public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session session, Metadata metadata, SymbolAliases symbolAliases)
{
TableScanNode tableScan = (TableScanNode) node;
for (ColumnHandle column : tableScan.getAssignments().values()) {
HiveColumnHandle hiveColumn = (HiveColumnHandle) column;
String columnName = hiveColumn.getName();
if (requiredSubfields.containsKey(columnName)) {
if (!requiredSubfields.get(columnName).equals(ImmutableSet.copyOf(hiveColumn.getRequiredSubfields()))) {
return NO_MATCH;
}
}
else {
if (!hiveColumn.getRequiredSubfields().isEmpty()) {
return NO_MATCH;
}
}
}
return match();
}
@Override
public String toString()
{
return toStringHelper(this)
.add("requiredSubfields", requiredSubfields)
.toString();
}
}
private static final class HiveParquetDereferencePushdownMatcher
implements Matcher
{
private final Map<String, Subfield> dereferenceColumns;
private final TupleDomain<String> domainPredicate;
private final Set<String> predicateColumns;
private final RowExpression remainingPredicate;
private HiveParquetDereferencePushdownMatcher(
Map<String, Subfield> dereferenceColumns,
TupleDomain<String> domainPredicate,
Set<String> predicateColumns,
RowExpression remainingPredicate)
{
this.dereferenceColumns = requireNonNull(dereferenceColumns, "dereferenceColumns is null");
this.domainPredicate = requireNonNull(domainPredicate, "domainPredicate is null");
this.predicateColumns = requireNonNull(predicateColumns, "predicateColumns is null");
this.remainingPredicate = requireNonNull(remainingPredicate, "remainingPredicate is null");
}
@Override
public boolean shapeMatches(PlanNode node)
{
return node instanceof TableScanNode;
}
@Override
public MatchResult detailMatches(PlanNode node, StatsProvider stats, Session session, Metadata metadata, SymbolAliases symbolAliases)
{
TableScanNode tableScan = (TableScanNode) node;
for (ColumnHandle column : tableScan.getAssignments().values()) {
HiveColumnHandle hiveColumn = (HiveColumnHandle) column;
String columnName = hiveColumn.getName();
if (dereferenceColumns.containsKey(columnName)) {
if (hiveColumn.getColumnType() != SYNTHESIZED ||
hiveColumn.getRequiredSubfields().size() != 1 ||
!hiveColumn.getRequiredSubfields().get(0).equals(dereferenceColumns.get(columnName))) {
return NO_MATCH;
}
dereferenceColumns.remove(columnName);
}
else {
if (isPushedDownSubfield(hiveColumn)) {
return NO_MATCH;
}
}
}
if (!dereferenceColumns.isEmpty()) {
return NO_MATCH;
}
Optional<ConnectorTableLayoutHandle> layout = tableScan.getTable().getLayout();
if (!layout.isPresent()) {
return NO_MATCH;
}
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) layout.get();
if (!Objects.equals(layoutHandle.getPredicateColumns().keySet(), predicateColumns) ||
!Objects.equals(layoutHandle.getDomainPredicate(), domainPredicate.transform(Subfield::new)) ||
!Objects.equals(layoutHandle.getRemainingPredicate(), remainingPredicate)) {
return NO_MATCH;
}
return match();
}
@Override
public String toString()
{
return toStringHelper(this)
.add("dereferenceColumns", dereferenceColumns)
.add("domainPredicate", domainPredicate)
.add("predicateColumns", predicateColumns)
.add("remainingPredicate", remainingPredicate)
.toString();
}
}
private static Map<String, Subfield> nestedColumnMap(String... columns)
{
return Arrays.stream(columns).collect(Collectors.toMap(
column -> pushdownColumnNameForSubfield(nestedColumn(column)),
column -> nestedColumn(column)));
}
private static Subfield nestedColumn(String column)
{
return new Subfield(column);
}
}
|
//79. Word Search - https://leetcode.com/problems/word-search/
//By Ratna Priya
class Solution {
public boolean exist(char[][] board, String word) {
for(int i=0; i<board.length;i++){
for(int j=0;j<board[i].length;j++){
if(board[i][j]== word.charAt(0) && dfs(board, i,j,0,word)){
return true;
}
}
}
return false;
}
public boolean dfs(char[][] board, int i, int j, int count, String word){
if(count == word.length()) return true;
if(i<0 || j<0 || i>=board.length || j>=board[i].length || board[i][j] != word.charAt(count)){
return false;
}
char temp = board[i][j];
board[i][j] = ' ';
boolean found = dfs(board, i+1,j,count+1,word)
|| dfs(board, i-1,j,count+1,word)
|| dfs(board, i,j+1,count+1,word)
|| dfs(board, i,j-1,count+1,word);
board[i][j] = temp;
return found;
}
}
|
/*
* Copyright (c) 2011-2019 Pivotal Software Inc, All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.hnettylibrary.netty.http.websocket;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.websocketx.WebSocketFrame;
import io.netty.handler.codec.http.websocketx.WebSocketFrameAggregator;
import reactor.core.publisher.Flux;
import reactor.netty.NettyInbound;
import reactor.util.annotation.Nullable;
/**
* A websocket framed inbound
*
* @author Stephane Maldini
* @author Simon Baslé
* @since 0.6
*/
public interface WebsocketInbound extends NettyInbound {
/**
* Returns the websocket subprotocol negotiated by the client and server during
* the websocket handshake, or null if none was requested.
*
* @return the subprotocol, or null
*/
@Nullable
String selectedSubprotocol();
/**
* Returns the websocket remote headers sent during handshake
*
* @return the websocket remote headers sent during handshake
*/
HttpHeaders headers();
/**
* Turn this {@link WebsocketInbound} into aggregating mode which will only produce
* fully formed frame that have been received fragmented.
*
* Will aggregate up to 65,536 bytes per frame
*
* @return this inbound
*/
default WebsocketInbound aggregateFrames() {
return aggregateFrames(65_536);
}
/**
* Turn this {@link WebsocketInbound} into aggregating mode which will only produce
* fully formed frame that have been received fragmented.
*
* @param maxContentLength the maximum frame length
*
* @return this inbound
*/
default WebsocketInbound aggregateFrames(int maxContentLength) {
withConnection(c -> c.addHandlerLast(new WebSocketFrameAggregator(maxContentLength)));
return this;
}
/**
* @return a {@link Flux} of {@link WebSocketFrame} formed frame content
*/
default Flux<WebSocketFrame> receiveFrames() {
return receiveObject().ofType(WebSocketFrame.class);
}
}
|
package ipmn.batch.service.impl;
import org.apache.ibatis.session.SqlSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import ipmn.batch.service.CronElkSecuAuditService;
import ipmn.batch.vo.CronResultVO;
import ipmn.batch.vo.CronSecuAuditVO;
@Service("CronElkSecuAuditService")
public class CronElkSecuAuditServiceImpl implements CronElkSecuAuditService {
private static final Logger logger = LoggerFactory.getLogger(CronElkSecuAuditServiceImpl.class);
@Autowired
private SqlSession sqlSession;
public void CronSecuAuditResult(CronResultVO vo){
logger.debug("======== CronBatchResult start =======");
sqlSession.insert("CronSecuAudit.CronSecuAuditResult", vo);
}
public int CronSecuAuditCnt() throws Exception {
logger.debug("======== CronSecuAuditCnt start =======");
int tCnt = 0;
tCnt = sqlSession.selectOne("CronSecuAudit.CronSecuAuditCnt");
return tCnt;
}
public void CronSecuAuditInsert(CronSecuAuditVO vo) throws Exception{
logger.debug("======== CronSolidInsert start =======");
sqlSession.insert("CronSecuAudit.CronSecuAuditInsert", vo);
}
public void CronSecuAuditDelete() throws Exception{
logger.debug("======== CronSecuAuditDelete start =======");
sqlSession.delete("CronSecuAudit.CronSecuAuditDelete");
}
}
|
package com.atguigu.gmall.pms.mapper;
import com.atguigu.gmall.pms.entity.BrandEntity;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import org.apache.ibatis.annotations.Mapper;
/**
* 品牌
*
* @author fengge
* @email fengge@atguigu.com
* @date 2020-10-12 21:14:25
*/
@Mapper
public interface BrandMapper extends BaseMapper<BrandEntity> {
}
|
/*
* arcus-java-client : Arcus Java client
* Copyright 2010-2014 NAVER Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.spy.memcached.ops;
public interface BTreeGetBulkOperation extends KeyedOperation {
interface Callback<K> extends OperationCallback {
void gotElement(String key, Object subkey, int flags, byte[] eflag, byte[] data);
void gotKey(String key, int elementCount, OperationStatus status);
}
}
|
/*
* BasicGraphRenderer.java Created on February 15, 2008, 1:38 PM To change this
* template, choose Tools | Template Manager and open the template in the
* editor.
*/
package netQuant.display;
import java.awt.AlphaComposite;
import java.awt.BasicStroke;
import java.awt.Color;
import java.awt.Dimension;
import java.awt.Font;
import java.awt.Graphics;
import java.awt.Graphics2D;
import java.awt.RenderingHints;
import java.awt.Shape;
import java.util.Iterator;
import javax.swing.JPanel;
import netQuant.NetQuantSettings;
import netQuant.graph.Edge;
import netQuant.graph.Graph;
import netQuant.graph.Node;
/**
*
* @author Sean
*/
public class BasicGraphRenderer extends JPanel {
/** Creates a new instance of BasicGraphRenderer */
public BasicGraphRenderer() {
typecorrections = new double[10];
typecorrections[0] = 1;
typecorrections[1] = -4;
typecorrections[2] = 3;
typecorrections[3] = -2;
typecorrections[4] = -0;
typecorrections[5] = -3;
typecorrections[6] = 2;
typecorrections[7] = 4;
typecorrections[8] = -1;
typecorrections[9] = -5;
}
public boolean alpha = true;
public boolean arrow = true; // show arrow or not
private final double[] typecorrections;
/**
* The default color of edges
*/
public Color basicEdgeColor = Color.gray;
// Node size, was final, but we decided to make it settable
public int nodeSize = 10;
// correction for node size
/**
* The correction for nodes, i.e if the node size is changed, the node will
* still center on its location
*/
protected int correct = (int) (nodeSize / 2.);
/**
* The default font color
*/
public Color fontColor = Color.black;
public Color fadedfontColor = Color.LIGHT_GRAY;
public Color diffColor = Color.WHITE;
public Color mediatorColor = Color.GRAY;
/**
* The current font size
*/
public int fontSize = 5;
// the graph object
public Graph graph;
public boolean hideWhenMove = false;
public boolean label = true;
// the initial font
public Font nodeFont = new Font("TimesNewRoman", Font.PLAIN, 10);
public boolean showAnnotation = false;
public boolean showConfidence = true;
// the attributes holder class
private NetQuantSettings stringSettings = null;
// popup menu for editing
// PopupMenu popup;
/**
* Clear graphics
*
* @param g
* Graphics g
*/
public void clear(Graphics g) {
super.paintComponent(g);
}
/**
* Returns the font color
*
* @return Font color
*/
public Color getFontColor() {
return fontColor;
}
public Graph getGraph() {
return graph;
}
/**
* Returns number of nodes and edges
*
* @return returns number of [nodes, edges, unique edges]
*/
public String[] getGraphData() {
String[] result = new String[3];
result[0] = (new Integer(graph.getNodeSize())).toString();
result[1] = (new Integer(graph.getEdgeSize())).toString();
result[2] = (new Integer(graph.getUniqueEdgeSize())).toString();
return result;
}
// the applications dimensions
final int defaultSize = 1200;
protected int panelWidth = defaultSize;
protected int panelHeight = defaultSize;
// Set new dimension
public Dimension dims = new Dimension(defaultSize, defaultSize);
@Override
public Dimension getMinimumSize() {
return dims;
}
public int getPanelHeight() {
return panelHeight;
}
public int getPanelWidth() {
return panelWidth;
}
/**
* Apparently these functions must be overridden, according to
* http://www.javaworld.com/javaworld/jw-09-2000/jw-0922-javatraps.html
*/
@Override
public Dimension getPreferredSize() {
return dims;
}
protected BasicStroke getStroke(float conf) {
float[] dashPattern1 = { 3, 2, 3, 2 };
float[] dashPattern2 = { 1, 4, 1, 4 };
if (conf > 0.67)
return new BasicStroke();
if (conf > 0.33)
return new BasicStroke(1, BasicStroke.CAP_BUTT, BasicStroke.JOIN_MITER, 10, dashPattern1, 0);
return new BasicStroke(1, BasicStroke.CAP_BUTT, BasicStroke.JOIN_MITER, 10, dashPattern2, 0);
// return new BasicStroke(conf);
}
protected BasicStroke getStroke(float conf, int type) {
float[] dashPattern = { 3, 2, 3, 2 };
if (type < 3)
return new BasicStroke(conf * 5);
else
return new BasicStroke(conf * 5, BasicStroke.CAP_BUTT, BasicStroke.JOIN_MITER, 10, dashPattern, 0);
}
public void setPanelWidth(int panelWidth) {
this.panelWidth = panelWidth;
}
public void setPanelHeight(int panelHeight) {
this.panelHeight = panelHeight;
}
/*
* Composite is used for fading effects
*/
/**
* Get the composite ranging from 1.0f (opaque) to 0.0f (invisible)
*
* @param alpha
* Float 0.0 to 1.0
* @return The composite
*/
protected AlphaComposite makeComposite(float alpha) {
int type = AlphaComposite.SRC_OVER;
return (AlphaComposite.getInstance(type, alpha));
}
/*
* Paint the panel. is called with repaint. Try without synch
*/
/**
* Paints the component
*
* @param g
* current graphics
*/
@Override
public synchronized void paintComponent(Graphics g) {
Graphics2D g2d = prePaint(g);
// setSize(dims);
paintNet(g2d);
}
protected Graphics2D prePaint(final Graphics g) {
Graphics2D g2d = (Graphics2D) g;
g2d.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, RenderingHints.VALUE_TEXT_ANTIALIAS_ON);
g2d.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);
g2d.setStroke(new BasicStroke(1));
clear(g2d);
return g2d;
}
public void paintEdge(Graphics2D g, Edge e) {
// if (!e.visible)
// return;
Node from = graph.getNode(e.getFromName());
Node to = graph.getNode(e.n2);
if (e.getType() != 10) {
int type = e.getType() - 1;
int x1 = (int) from.getX();
int y1 = (int) (from.getY() + typecorrections[type]);
int x2 = (int) to.getX();
int y2 = (int) (to.getY() + typecorrections[type]);
// handle confidence
if (showConfidence) {
if (alpha) {
g.setComposite(makeComposite(e.getConf()));
if (from.getSize() == 5 || to.getSize() == 5) {
g.setComposite(makeComposite(0.12f));
}
if (from.getSize() == 0 || to.getSize() == 0) {
g.setComposite(makeComposite(0.0f));
}
// g.setStroke(getStroke(e.getConf()*2));
} else {
g.setStroke(getStroke(e.getConf()));
}
}
// if (from.getSize()==5 || to.getSize()==5) {
// g.setPaint(Color.LIGHT_GRAY);
// } else {
g.setPaint(basicEdgeColor);
// }
// if (e.visible)
// g.setPaint(Color.red);
if (pretty) {
g.setPaint(getStringSettings().getColor(new Integer(e.getType())));
// System.out.println(stringSettings.getLabel(e.color));
PaintTools.paintPath(g, x1, y1, x2, y2, e.getOrientation(), BasicGraphPanel.offset, arrow);
} else {
g.drawLine(x1, y1, x2, y2);
}
}
}
// paint simple or more "pretty"
public boolean pretty = false;
/*
* The main paint method. Paints edges and nodes
*/
/**
* Paints nodes and edges
*
* @param g2d
* Graphics2D g2d
*/
public void paintNet(Graphics2D g2d) {
for (Iterator edges = graph.edgesIterator(); edges.hasNext();) {
Edge e = (Edge) edges.next();
paintEdge(g2d, e);
}
// make sure confidence is restored
g2d.setComposite(makeComposite(1.0f));
for (Iterator it = graph.nodesIterator(); it.hasNext();) {
Node node = (Node) it.next();
paintNode(g2d, node);
}
}
/**
* Paints out Node n
*
* @param g
* Graphics2D
* @param n
* Node n
*/
public void paintNode(Graphics2D g, Node n) {
g.setStroke(new BasicStroke(1));
g.setPaint(n.getColor());
int x = (int) n.getX() - correct;
int y = (int) n.getY() - correct;
Shape shape = PaintTools.getShape(n.getShape(), x, y, n.getSize());// nodeSize);
g.fill(shape);
g.setPaint(Color.BLACK);
if (n.getSize() == 5)
g.setPaint(Color.GRAY);
if (n.getSize() == 0)
g.setPaint(Color.WHITE);
// } else {
// g.setPaint(Color.black);
// }
if (n.isFixed())
g.setPaint(Color.yellow);
g.draw(shape);
g.setPaint(fontColor);
// if (n.getSize()==7) g.setPaint(mediatorColor);
if (n.getSize() == 5)
g.setPaint(fadedfontColor);
if (n.getSize() == 0)
g.setPaint(diffColor);
// } else {
// g.setPaint(fontColor);
// }
g.setFont(nodeFont);
if (label)
if (showAnnotation) {
// g.drawString(n.getAnnotation(), x - 2, y - 2);
g.drawString(n.getAnnotation(), x - 6, y - 4);
} else
g.drawString(n.getLabel(), x - 6, y - 4);
// g.drawString(n.getLabel(), x - 2, y + 2);
}
public NetQuantSettings getStringSettings() {
return stringSettings;
}
public void setStringSettings(NetQuantSettings stringSettings) {
this.stringSettings = stringSettings;
}
}
|
package com.warumono.repositories;
import java.util.List;
import org.jooq.DSLContext;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.annotation.Transactional;
import com.warumono.databases.public_.tables.Author;
import com.warumono.databases.public_.tables.AuthorBook;
import com.warumono.databases.public_.tables.Book;
import com.warumono.models.AuthorBookModel;
@Repository
public class AuthorBookRepository
{
Author author = Author.AUTHOR;
Book book = Book.BOOK;
AuthorBook authorBook = AuthorBook.AUTHOR_BOOK;
@Autowired
private DSLContext dsl;
@Transactional
public boolean insert(AuthorBookModel authorBookModel)
{
return dsl.insertInto(authorBook, authorBook.AUTHOR_ID, authorBook.BOOK_ID)
.values(authorBookModel.getAuthorId(), authorBookModel.getBookId())
.execute() == 1;
}
@Transactional
public boolean update(AuthorBookModel oldAuthorBookModel, AuthorBookModel newAuthorBookModel)
{
return dsl.update(authorBook)
.set(authorBook.AUTHOR_ID, newAuthorBookModel.getAuthorId())
.set(authorBook.AUTHOR_ID, newAuthorBookModel.getBookId())
.where(authorBook.AUTHOR_ID.eq(oldAuthorBookModel.getAuthorId()))
.and(authorBook.BOOK_ID.eq(oldAuthorBookModel.getBookId()))
.execute() == 1;
}
@Transactional
public boolean deleteByAuthorId(Long authorId)
{
return dsl.deleteFrom(authorBook)
.where(authorBook.AUTHOR_ID.eq(authorId))
.execute() > 0;
}
@Transactional
public boolean deleteByBookId(Long bookId)
{
return dsl.deleteFrom(authorBook)
.where(authorBook.BOOK_ID.eq(bookId))
.execute() > 0;
}
@Transactional
public boolean delete(Long authorId, Long bookId)
{
return dsl.deleteFrom(authorBook)
.where(authorBook.AUTHOR_ID.eq(authorId))
.and(authorBook.BOOK_ID.eq(bookId))
.execute() == 1;
}
@Transactional(readOnly = true)
public List<com.warumono.databases.public_.tables.pojos.AuthorBook> selectAll()
{
return dsl.selectFrom(authorBook)
.fetchInto(com.warumono.databases.public_.tables.pojos.AuthorBook.class);
}
@Transactional(readOnly = true)
public com.warumono.databases.public_.tables.pojos.AuthorBook selectOneById(Long authorId, Long bookId)
{
return dsl.selectFrom(authorBook)
.where(authorBook.AUTHOR_ID.eq(authorId))
.and(authorBook.BOOK_ID.eq(bookId))
.fetchOneInto(com.warumono.databases.public_.tables.pojos.AuthorBook.class);
}
public List<com.warumono.databases.public_.tables.pojos.AuthorBook> selectByAuthorId(Long authorId)
{
return dsl.select()
.from(authorBook)
.join(author)
.on(author.ID.eq(authorBook.AUTHOR_ID))
.where(authorBook.AUTHOR_ID.eq(authorId))
.fetchInto(com.warumono.databases.public_.tables.pojos.AuthorBook.class);
}
public List<com.warumono.databases.public_.tables.pojos.AuthorBook> selectByBookId(Long bookId)
{
return dsl.select()
.from(authorBook)
.join(book)
.on(book.ID.eq(authorBook.BOOK_ID))
.where(authorBook.BOOK_ID.eq(bookId))
.fetchInto(com.warumono.databases.public_.tables.pojos.AuthorBook.class);
}
}
|
package com.snowplowanalytics.client.nsq;
import com.google.common.base.Preconditions;
import io.netty.channel.EventLoopGroup;
import io.netty.handler.ssl.SslContext;
import org.apache.logging.log4j.LogManager;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Optional;
public class NSQConfig {
public enum Compression {NO_COMPRESSION, DEFLATE}
private String clientId;
private String hostname;
private boolean featureNegotiation = true;
private Integer heartbeatInterval = null;
private Integer outputBufferSize = null;
private Integer outputBufferTimeout = null;
private boolean tlsV1 = false;
private Compression compression = Compression.NO_COMPRESSION;
private Integer deflateLevel = null;
private Integer sampleRate = null;
private Optional<Integer> maxInFlight = Optional.empty();
private String userAgent = null;
private Integer msgTimeout = null;
private SslContext sslContext = null;
private EventLoopGroup eventLoopGroup = null;
public NSQConfig() {
try {
clientId = InetAddress.getLocalHost().getHostName();
hostname = InetAddress.getLocalHost().getCanonicalHostName();
userAgent = "JavaNSQClient";
} catch (UnknownHostException e) {
LogManager.getLogger(this).error("Local host name could not resolved", e);
}
}
public void setClientId(String clientId) {
this.clientId = clientId;
}
public boolean isFeatureNegotiation() {
return featureNegotiation;
}
public void setFeatureNegotiation(final boolean featureNegotiation) {
this.featureNegotiation = featureNegotiation;
}
public Integer getHeartbeatInterval() {
return heartbeatInterval;
}
public void setHeartbeatInterval(final Integer heartbeatInterval) {
this.heartbeatInterval = heartbeatInterval;
}
public Integer getOutputBufferSize() {
return outputBufferSize;
}
public NSQConfig setMaxInFlight(final int maxInFlight) {
this.maxInFlight = Optional.of(maxInFlight);
return this;
}
public Optional<Integer> getMaxInFlight() {
return maxInFlight;
}
public void setOutputBufferSize(final Integer outputBufferSize) {
this.outputBufferSize = outputBufferSize;
}
public Integer getOutputBufferTimeout() {
return outputBufferTimeout;
}
public void setOutputBufferTimeout(final Integer outputBufferTimeout) {
this.outputBufferTimeout = outputBufferTimeout;
}
public boolean isTlsV1() {
return tlsV1;
}
public Compression getCompression() {
return compression;
}
public void setCompression(final Compression compression) {
this.compression = compression;
}
public Integer getDeflateLevel() {
return deflateLevel;
}
public void setDeflateLevel(final Integer deflateLevel) {
this.deflateLevel = deflateLevel;
}
public Integer getSampleRate() {
return sampleRate;
}
public void setSampleRate(final Integer sampleRate) {
this.sampleRate = sampleRate;
}
public String getUserAgent() {
return userAgent;
}
public void setUserAgent(final String userAgent) {
this.userAgent = userAgent;
}
public Integer getMsgTimeout() {
return msgTimeout;
}
public void setMsgTimeout(final Integer msgTimeout) {
this.msgTimeout = msgTimeout;
}
public SslContext getSslContext() {
return sslContext;
}
public void setSslContext(SslContext sslContext) {
Preconditions.checkNotNull(sslContext);
tlsV1 = true;
this.sslContext = sslContext;
}
public EventLoopGroup getEventLoopGroup() {
return eventLoopGroup;
}
public void setEventLoopGroup(final EventLoopGroup eventLoopGroup) {
this.eventLoopGroup = eventLoopGroup;
}
@Override
public String toString() {
StringBuffer buffer = new StringBuffer();
buffer.append("{\"client_id\":\"" + clientId + "\", ");
buffer.append("\"hostname\":\"" + hostname + "\", ");
buffer.append("\"feature_negotiation\": true, ");
if (getHeartbeatInterval() != null) {
buffer.append("\"heartbeat_interval\":" + getHeartbeatInterval().toString() + ", ");
}
if (getOutputBufferSize() != null) {
buffer.append("\"output_buffer_size\":" + getOutputBufferSize().toString() + ", ");
}
if (getOutputBufferTimeout() != null) {
buffer.append("\"output_buffer_timeout\":" + getOutputBufferTimeout().toString() + ", ");
}
if (isTlsV1()) {
buffer.append("\"tls_v1\":" + isTlsV1() + ", ");
}
if (getCompression() == Compression.DEFLATE) {
buffer.append("\"deflate\": true, ");
}
if (getDeflateLevel() != null) {
buffer.append("\"deflate_level\":" + getDeflateLevel().toString() + ", ");
}
if (getSampleRate() != null) {
buffer.append("\"sample_rate\":" + getSampleRate().toString() + ", ");
}
if (getMsgTimeout() != null) {
buffer.append("\"msg_timeout\":" + getMsgTimeout().toString() + ", ");
}
buffer.append("\"user_agent\": \"" + userAgent + "\"}");
return buffer.toString();
}
}
|
/*
* Copyright 2018 Caleb.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gestionEscolar;
import java.io.IOException;
import java.net.URL;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.time.LocalDate;
import java.util.Optional;
import java.util.ResourceBundle;
import java.util.logging.Level;
import java.util.logging.Logger;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.event.ActionEvent;
import javafx.fxml.FXML;
import javafx.fxml.FXMLLoader;
import javafx.fxml.Initializable;
import javafx.scene.Node;
import javafx.scene.Parent;
import javafx.scene.Scene;
import javafx.scene.control.Alert;
import javafx.scene.control.Button;
import javafx.scene.control.ButtonType;
import javafx.scene.control.ComboBox;
import javafx.scene.control.DatePicker;
import javafx.scene.control.TableView;
import javafx.scene.control.TextField;
import javafx.scene.image.Image;
import javafx.scene.input.MouseEvent;
import javafx.scene.layout.Pane;
import javafx.scene.paint.Color;
import javafx.scene.text.Text;
import javafx.stage.Modality;
import javafx.stage.Stage;
import javafx.stage.WindowEvent;
import jefeAcademia.ProfesoresController;
import poliasistenciafx.ConsultarDatos;
import poliasistenciafx.Huella;
import poliasistenciafx.HuellaDigitalController;
import poliasistenciafx.baseDeDatos;
import poliasistenciafx.validaciones;
/**
* FXML Controller class
*
* @author Caleb
*/
public class RegistrarAlumnoController implements Initializable {
/**
* Initializes the controller class.
*/
@FXML
Text textInicio, textAlumnos;
@FXML
Button buttonContinuar, buttonCancelar, buttonAgregarHuella, buttonBorrarHuella, buttonGuardar;
@FXML
Pane paneDatosPersonales, paneHuellaDigital;
@FXML
TextField textfieldNombre, textfieldPaterno, textfieldMaterno, textfieldBoleta;
@FXML
ComboBox comboboxGenero;
@FXML
DatePicker datePickerNacimiento;
@FXML
private TableView<Huella> tableviewHuellas;
ObservableList<Huella> datos;
int pasoRegistro = 1, idPer = 0;
String mensajeBase = "";
ConsultarDatos consultar;
@Override
public void initialize(URL url, ResourceBundle rb) {
consultar = new ConsultarDatos();
datos = FXCollections.observableArrayList();
comboboxGenero.getItems().addAll(
"Masculino",
"Femenino",
"Indefinido"
);
textInicio.setOnMouseEntered((MouseEvent me) -> {
textInicio.setUnderline(true);
textInicio.setFill(Color.BLUE);
});
textInicio.setOnMouseExited((MouseEvent me) -> {
textInicio.setUnderline(false);
textInicio.setFill(Color.BLACK);
});
textInicio.setOnMouseClicked((MouseEvent me) -> {
irAInicio();
});
textAlumnos.setOnMouseEntered((MouseEvent me) -> {
textAlumnos.setUnderline(true);
textAlumnos.setFill(Color.BLUE);
});
textAlumnos.setOnMouseExited((MouseEvent me) -> {
textAlumnos.setUnderline(false);
textAlumnos.setFill(Color.BLACK);
});
textAlumnos.setOnMouseClicked((MouseEvent me) -> {
irAAlumnos();
});
buttonBorrarHuella.setDisable(true);
}
@FXML
public void ejecutarAccion(ActionEvent e) {
if (e.getSource().equals(buttonContinuar)){
registrarProfesor();
}
if (e.getSource().equals(buttonCancelar)) {
irAAlumnos();
}
if(e.getSource().equals(buttonGuardar)){
switch(buttonGuardar.getText()){
case "Omitir":
omitirGuardarHuella();
break;
case "Guardar y Salir":
Stage stageRegistrarAlumno = (Stage) (textAlumnos.getScene().getWindow());
FXMLLoader Alumnos = new FXMLLoader(getClass().getResource("Alumnos.fxml"));
try {
Scene sceneAlumnos = new Scene(Alumnos.load());
stageRegistrarAlumno.setScene(sceneAlumnos);
} catch (IOException ex) {
Logger.getLogger(ProfesoresController.class.getName()).log(Level.SEVERE, null, ex);
}
break;
}
}
}
@FXML
public void mostrarRegistroHuella(ActionEvent e) throws IOException{
Stage stage = new Stage();
HuellaDigitalController huella = new HuellaDigitalController(idPer);
FXMLLoader huellaDigital = new FXMLLoader(getClass().getResource("/poliasistenciafx/HuellaDigital.fxml"));
huellaDigital.setController(huella);
Parent root = huellaDigital.load();
stage.setScene(new Scene(root));
stage.setTitle("Huella Digital");
stage.getIcons().add(new Image("/imagenes/poliAsistencia.png"));
stage.initModality(Modality.WINDOW_MODAL);
stage.initOwner(((Node)e.getSource()).getScene().getWindow());
stage.setOnHidden((WindowEvent evento) -> {
huella.cerrar();
actualizarTabla();
});
stage.showAndWait();
}
@FXML
public void borrarHuella(ActionEvent e){
Huella huellax = tableviewHuellas.getSelectionModel().getSelectedItem();
if(huellax != null){
borrarHuellaDigital(Integer.parseInt(huellax.getId()), huellax.getNombre());
}
}
public void registrarProfesor(){
String nom, pat, mat, bol, fecha;
int gen;
nom = textfieldNombre.getText();
pat = textfieldPaterno.getText();
mat = textfieldMaterno.getText();
bol = textfieldBoleta.getText();
gen = comboboxGenero.getSelectionModel().getSelectedIndex() + 1;
LocalDate nacimiento = datePickerNacimiento.getValue();
if(nacimiento == null)
fecha = "";
else
fecha = nacimiento.toString();
System.out.println(fecha);
validaciones val = new validaciones();
if (val.soloLet(nom, "el nombre", 250)) {
if (val.soloLet(pat, "el apellido paterno", 250)) {
if (val.soloLet(mat, "el apellido materno", 250)) {
if (val.boleta(bol)) {
if (val.validarFecha(fecha)) {
if (gen != -1) {
baseDeDatos bd = new baseDeDatos();
try {
bd.conectar();
if (bol != null) {
ResultSet rs = bd.ejecuta("call spGuardaAlumnos("+gen+", '"+pat+"', '"+ mat+"', '"+nom+"', '"+fecha+"', 'sinasignar', '"+bol+"', 1);");
while (rs.next()) {
idPer = rs.getInt("idP");
mensajeBase = rs.getString("msj");
}
System.out.println("IDP: " + idPer);
}
} catch (SQLException ex) {
Logger.getLogger(ConsultarDatos.class.getName()).log(Level.SEVERE, null, ex);
}
if (idPer > 0) {
pasoRegistro++;
inicializarTabla();
paneDatosPersonales.setVisible(false);
paneHuellaDigital.setVisible(true);
buttonCancelar.setVisible(false);
}
} else {
comboboxGenero.requestFocus();
crearDialogo("Error", "Seleccione un genero", null);
}
} else {
datePickerNacimiento.requestFocus();
crearDialogo("Error", "Introduzca una fecha valida", null);
}
} else {
textfieldBoleta.requestFocus();
crearDialogo("Error", val.err(), null);
}
} else {
textfieldMaterno.requestFocus();
crearDialogo("Error", val.err(), null);
}
} else {
crearDialogo("Error", val.err(), null);
textfieldPaterno.requestFocus();
}
} else {
crearDialogo("Error", val.err(), null);
textfieldNombre.requestFocus();
}
}
public void irAAlumnos() {
Alert alert = new Alert(Alert.AlertType.CONFIRMATION);
alert.setTitle("Confirmar accion");
alert.setHeaderText("Estas seguro de que quieres cancelar el registro?");
alert.setContentText("Se perderan los datos ingresados");
Optional<ButtonType> resultado = alert.showAndWait();
if (resultado.get() == ButtonType.OK) {
Stage stageRegistrarAlumno = (Stage) (textAlumnos.getScene().getWindow());
FXMLLoader Alumnos = new FXMLLoader(getClass().getResource("Alumnos.fxml"));
try {
Scene sceneAlumnos = new Scene(Alumnos.load());
stageRegistrarAlumno.setScene(sceneAlumnos);
} catch (IOException ex) {
Logger.getLogger(ProfesoresController.class.getName()).log(Level.SEVERE, null, ex);
}
} else {
}
}
public void borrarHuellaDigital(int idHuella, String numeroHuella){
Alert alert = new Alert(Alert.AlertType.CONFIRMATION);
alert.setTitle("Confirmar acción");
alert.setHeaderText("¿Estas seguro de que quieres borrar esta huella digital?");
alert.setContentText("Huella a borrar: "+ numeroHuella);
Optional<ButtonType> resultado = alert.showAndWait();
if (resultado.get() == ButtonType.OK) {
if(consultar.borrarHuella(idHuella)){
Alert alertOk = new Alert(Alert.AlertType.INFORMATION);
alertOk.setTitle("PoliAsistencia");
alertOk.setHeaderText(null);
alertOk.setContentText("Huella Borrada");
alertOk.showAndWait();
actualizarTabla();
}
else{
Alert alertError = new Alert(Alert.AlertType.ERROR);
alertError.setTitle("PoliAsistencia");
alertError.setHeaderText("Error al borrar la huella digital");
alertError.setContentText("Lo sentimos, pero no se puede borrar la huella digital");
alertError.showAndWait();
actualizarTabla();
}
} else {
}
}
public void omitirGuardarHuella() {
Alert alert = new Alert(Alert.AlertType.CONFIRMATION);
alert.setTitle("Confirmar accion");
alert.setHeaderText("Estas seguro de que quieres omitir el registro de las huellas digitales?");
alert.setContentText("Puedes configurar tus hellas digitales posteriormente en el apartado de modificacón de datos");
Optional<ButtonType> resultado = alert.showAndWait();
if (resultado.get() == ButtonType.OK) {
Stage stageRegistrarAlumno = (Stage) (textAlumnos.getScene().getWindow());
FXMLLoader Alumnos = new FXMLLoader(getClass().getResource("Alumnos.fxml"));
try {
Scene sceneAlumnos = new Scene(Alumnos.load());
stageRegistrarAlumno.setScene(sceneAlumnos);
} catch (IOException ex) {
Logger.getLogger(ProfesoresController.class.getName()).log(Level.SEVERE, null, ex);
}
} else {
}
}
public void irAInicio() {
Alert alert = new Alert(Alert.AlertType.CONFIRMATION);
alert.setTitle("Confirmar accion");
alert.setHeaderText("Estas seguro de que quieres cancelar el registro?");
alert.setContentText("Se perderan los datos ingresados");
Optional<ButtonType> resultado = alert.showAndWait();
if (resultado.get() == ButtonType.OK) {
Stage stageRegistrarProfesor = (Stage) (textInicio.getScene().getWindow());
FXMLLoader inicioGestion = new FXMLLoader(getClass().getResource("Inicio.fxml"));
try {
Scene sceneInicioGestion = new Scene(inicioGestion.load());
stageRegistrarProfesor.setScene(sceneInicioGestion);
} catch (IOException ex) {
Logger.getLogger(ProfesoresController.class.getName()).log(Level.SEVERE, null, ex);
}
} else {
}
}
public void crearDialogo(String titulo, String header, String contexto) {
Alert alert = new Alert(Alert.AlertType.WARNING);
alert.setTitle(titulo);
alert.setHeaderText(header);
alert.setContentText(contexto);
alert.showAndWait();
}
public void inicializarTabla(){
datos = consultar.obtenerHuellasDigitales(idPer);
tableviewHuellas.setItems(datos);
tableviewHuellas.getSelectionModel().selectedItemProperty().addListener((observable, viejaSeleccion, nuevaSeleccion) -> {
buttonBorrarHuella.setDisable(nuevaSeleccion == null);
});
}
public void actualizarTabla(){
datos.removeAll(datos);
datos = consultar.obtenerHuellasDigitales(idPer);
tableviewHuellas.setItems(datos);
if(!datos.isEmpty()){
buttonGuardar.setText("Guardar y Salir");
buttonGuardar.setStyle("-fx-text-fill: #2196F3");
if(datos.size() == 10){
buttonAgregarHuella.setDisable(true);
}
else{
buttonAgregarHuella.setDisable(false);
}
}
else{
buttonGuardar.setText("Omitir");
buttonGuardar.setStyle("-fx-text-fill: #f44242");
}
}
}
|
package com.michaelflisar.changelog;
import com.michaelflisar.changelog.tags.ChangelogTagBugfix;
import com.michaelflisar.changelog.tags.ChangelogTagInfo;
import com.michaelflisar.changelog.tags.ChangelogTagNew;
import com.michaelflisar.changelog.tags.IChangelogTag;
import java.util.HashSet;
/**
* Created by flisar on 07.03.2018.
*/
public class ChangelogSetup {
private final HashSet<IChangelogTag> mValidTags = new HashSet<>();
private static ChangelogSetup INSTANCE = null;
private ChangelogSetup() {
// register default tag types
mValidTags.add(new ChangelogTagInfo());
mValidTags.add(new ChangelogTagNew());
mValidTags.add(new ChangelogTagBugfix());
}
public static ChangelogSetup get() {
if (INSTANCE == null) {
INSTANCE = new ChangelogSetup();
}
return INSTANCE;
}
public ChangelogSetup clearTags() {
mValidTags.clear();
return this;
}
public ChangelogSetup registerTag(IChangelogTag tag) {
mValidTags.add(tag);
return this;
}
public IChangelogTag findTag(String tag) {
for (IChangelogTag t : mValidTags) {
if (t.getXMLTagName().equals(tag)) {
return t;
}
}
return null;
}
}
|
package io.github.braully.constant;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Individual attribute
*
* @author braully
*/
@Target(value = {ElementType.METHOD, ElementType.FIELD, ElementType.TYPE})
@Retention(value = RetentionPolicy.RUNTIME)
public @interface Attr {
/**
* array of string {name, val}
*
* @return
*/
public String[] value() default {"", ""};
/**
* Name of attribute
*
* @return
*/
public String name() default "";
/**
* Value of atribute
*
* @return
*/
public String val() default "";
}
|
package redis.clients.jedis;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import redis.clients.jedis.util.JedisByteHashMap;
import redis.clients.jedis.util.SafeEncoder;
public final class BuilderFactory {
public static final Builder<Double> DOUBLE = new Builder<Double>() {
@Override
public Double build(Object data) {
String string = STRING.build(data);
if (string == null) return null;
try {
return Double.valueOf(string);
} catch (NumberFormatException e) {
if (string.equals("inf") || string.equals("+inf")) return Double.POSITIVE_INFINITY;
if (string.equals("-inf")) return Double.NEGATIVE_INFINITY;
throw e;
}
}
@Override
public String toString() {
return "double";
}
};
public static final Builder<Boolean> BOOLEAN = new Builder<Boolean>() {
@Override
public Boolean build(Object data) {
return ((Long) data) == 1;
}
@Override
public String toString() {
return "boolean";
}
};
public static final Builder<byte[]> BYTE_ARRAY = new Builder<byte[]>() {
@Override
public byte[] build(Object data) {
return ((byte[]) data); // deleted == 1
}
@Override
public String toString() {
return "byte[]";
}
};
public static final Builder<Long> LONG = new Builder<Long>() {
@Override
public Long build(Object data) {
return (Long) data;
}
@Override
public String toString() {
return "long";
}
};
public static final Builder<String> STRING = new Builder<String>() {
@Override
public String build(Object data) {
return data == null ? null : SafeEncoder.encode((byte[]) data);
}
@Override
public String toString() {
return "string";
}
};
public static final Builder<List<String>> STRING_LIST = new Builder<List<String>>() {
@Override
@SuppressWarnings("unchecked")
public List<String> build(Object data) {
if (null == data) {
return null;
}
List<byte[]> l = (List<byte[]>) data;
final ArrayList<String> result = new ArrayList<>(l.size());
for (final byte[] barray : l) {
if (barray == null) {
result.add(null);
} else {
result.add(SafeEncoder.encode(barray));
}
}
return result;
}
@Override
public String toString() {
return "List<String>";
}
};
public static final Builder<Map<String, String>> STRING_MAP = new Builder<Map<String, String>>() {
@Override
@SuppressWarnings("unchecked")
public Map<String, String> build(Object data) {
final List<byte[]> flatHash = (List<byte[]>) data;
final Map<String, String> hash = new HashMap<>(flatHash.size()/2, 1);
final Iterator<byte[]> iterator = flatHash.iterator();
while (iterator.hasNext()) {
hash.put(SafeEncoder.encode(iterator.next()), SafeEncoder.encode(iterator.next()));
}
return hash;
}
@Override
public String toString() {
return "Map<String, String>";
}
};
public static final Builder<Map<String, String>> PUBSUB_NUMSUB_MAP = new Builder<Map<String, String>>() {
@Override
@SuppressWarnings("unchecked")
public Map<String, String> build(Object data) {
final List<Object> flatHash = (List<Object>) data;
final Map<String, String> hash = new HashMap<>(flatHash.size()/2, 1);
final Iterator<Object> iterator = flatHash.iterator();
while (iterator.hasNext()) {
hash.put(SafeEncoder.encode((byte[]) iterator.next()),
String.valueOf((Long) iterator.next()));
}
return hash;
}
@Override
public String toString() {
return "PUBSUB_NUMSUB_MAP<String, String>";
}
};
public static final Builder<Set<String>> STRING_SET = new Builder<Set<String>>() {
@Override
@SuppressWarnings("unchecked")
public Set<String> build(Object data) {
if (null == data) {
return null;
}
List<byte[]> l = (List<byte[]>) data;
final Set<String> result = new HashSet<>(l.size(), 1);
for (final byte[] barray : l) {
if (barray == null) {
result.add(null);
} else {
result.add(SafeEncoder.encode(barray));
}
}
return result;
}
@Override
public String toString() {
return "Set<String>";
}
};
public static final Builder<List<byte[]>> BYTE_ARRAY_LIST = new Builder<List<byte[]>>() {
@Override
@SuppressWarnings("unchecked")
public List<byte[]> build(Object data) {
if (null == data) {
return null;
}
List<byte[]> l = (List<byte[]>) data;
return l;
}
@Override
public String toString() {
return "List<byte[]>";
}
};
public static final Builder<Set<byte[]>> BYTE_ARRAY_ZSET = new Builder<Set<byte[]>>() {
@Override
@SuppressWarnings("unchecked")
public Set<byte[]> build(Object data) {
if (null == data) {
return null;
}
List<byte[]> l = (List<byte[]>) data;
final Set<byte[]> result = new LinkedHashSet<>(l);
for (final byte[] barray : l) {
if (barray == null) {
result.add(null);
} else {
result.add(barray);
}
}
return result;
}
@Override
public String toString() {
return "ZSet<byte[]>";
}
};
public static final Builder<Map<byte[], byte[]>> BYTE_ARRAY_MAP = new Builder<Map<byte[], byte[]>>() {
@Override
@SuppressWarnings("unchecked")
public Map<byte[], byte[]> build(Object data) {
final List<byte[]> flatHash = (List<byte[]>) data;
final Map<byte[], byte[]> hash = new JedisByteHashMap();
final Iterator<byte[]> iterator = flatHash.iterator();
while (iterator.hasNext()) {
hash.put(iterator.next(), iterator.next());
}
return hash;
}
@Override
public String toString() {
return "Map<byte[], byte[]>";
}
};
public static final Builder<Set<String>> STRING_ZSET = new Builder<Set<String>>() {
@Override
@SuppressWarnings("unchecked")
public Set<String> build(Object data) {
if (null == data) {
return null;
}
List<byte[]> l = (List<byte[]>) data;
final Set<String> result = new LinkedHashSet<>(l.size(), 1);
for (final byte[] barray : l) {
if (barray == null) {
result.add(null);
} else {
result.add(SafeEncoder.encode(barray));
}
}
return result;
}
@Override
public String toString() {
return "ZSet<String>";
}
};
public static final Builder<Set<Tuple>> TUPLE_ZSET = new Builder<Set<Tuple>>() {
@Override
@SuppressWarnings("unchecked")
public Set<Tuple> build(Object data) {
if (null == data) {
return null;
}
List<byte[]> l = (List<byte[]>) data;
final Set<Tuple> result = new LinkedHashSet<>(l.size()/2, 1);
Iterator<byte[]> iterator = l.iterator();
while (iterator.hasNext()) {
result.add(new Tuple(iterator.next(), DOUBLE.build(iterator.next())));
}
return result;
}
@Override
public String toString() {
return "ZSet<Tuple>";
}
};
public static final Builder<Tuple> TUPLE = new Builder<Tuple>() {
@Override
@SuppressWarnings("unchecked")
public Tuple build(Object data) {
List<byte[]> l = (List<byte[]>) data; // never null
if (l.isEmpty()) {
return null;
}
return new Tuple(l.get(0), DOUBLE.build(l.get(1)));
}
@Override
public String toString() {
return "Tuple";
}
};
public static final Builder<Object> EVAL_RESULT = new Builder<Object>() {
@Override
public Object build(Object data) {
return evalResult(data);
}
@Override
public String toString() {
return "Eval<Object>";
}
private Object evalResult(Object result) {
if (result instanceof byte[]) return SafeEncoder.encode((byte[]) result);
if (result instanceof List<?>) {
List<?> list = (List<?>) result;
List<Object> listResult = new ArrayList<>(list.size());
for (Object bin : list) {
listResult.add(evalResult(bin));
}
return listResult;
}
return result;
}
};
public static final Builder<Object> EVAL_BINARY_RESULT = new Builder<Object>() {
@Override
public Object build(Object data) {
return evalResult(data);
}
@Override
public String toString() {
return "Eval<Object>";
}
private Object evalResult(Object result) {
if (result instanceof List<?>) {
List<?> list = (List<?>) result;
List<Object> listResult = new ArrayList<>(list.size());
for (Object bin : list) {
listResult.add(evalResult(bin));
}
return listResult;
}
return result;
}
};
public static final Builder<List<GeoCoordinate>> GEO_COORDINATE_LIST = new Builder<List<GeoCoordinate>>() {
@Override
public List<GeoCoordinate> build(Object data) {
if (null == data) {
return null;
}
return interpretGeoposResult((List<Object>) data);
}
@Override
public String toString() {
return "List<GeoCoordinate>";
}
private List<GeoCoordinate> interpretGeoposResult(List<Object> responses) {
List<GeoCoordinate> responseCoordinate = new ArrayList<>(responses.size());
for (Object response : responses) {
if (response == null) {
responseCoordinate.add(null);
} else {
List<Object> respList = (List<Object>) response;
GeoCoordinate coord = new GeoCoordinate(DOUBLE.build(respList.get(0)),
DOUBLE.build(respList.get(1)));
responseCoordinate.add(coord);
}
}
return responseCoordinate;
}
};
public static final Builder<List<GeoRadiusResponse>> GEORADIUS_WITH_PARAMS_RESULT = new Builder<List<GeoRadiusResponse>>() {
@Override
public List<GeoRadiusResponse> build(Object data) {
if (data == null) {
return null;
}
List<Object> objectList = (List<Object>) data;
List<GeoRadiusResponse> responses = new ArrayList<>(objectList.size());
if (objectList.isEmpty()) {
return responses;
}
if (objectList.get(0) instanceof List<?>) {
// list of members with additional informations
GeoRadiusResponse resp;
for (Object obj : objectList) {
List<Object> informations = (List<Object>) obj;
resp = new GeoRadiusResponse((byte[]) informations.get(0));
int size = informations.size();
for (int idx = 1; idx < size; idx++) {
Object info = informations.get(idx);
if (info instanceof List<?>) {
// coordinate
List<Object> coord = (List<Object>) info;
resp.setCoordinate(new GeoCoordinate(DOUBLE.build(coord.get(0)),
DOUBLE.build(coord.get(1))));
} else {
// distance
resp.setDistance(DOUBLE.build(info));
}
}
responses.add(resp);
}
} else {
// list of members
for (Object obj : objectList) {
responses.add(new GeoRadiusResponse((byte[]) obj));
}
}
return responses;
}
@Override
public String toString() {
return "GeoRadiusWithParamsResult";
}
};
public static final Builder<List<Module>> MODULE_LIST = new Builder<List<Module>>() {
@Override
public List<Module> build(Object data) {
if (data == null) {
return null;
}
List<List<Object>> objectList = (List<List<Object>>) data;
List<Module> responses = new ArrayList<>(objectList.size());
if (objectList.isEmpty()) {
return responses;
}
for (List<Object> moduleResp: objectList) {
Module m = new Module(SafeEncoder.encode((byte[]) moduleResp.get(1)), ((Long) moduleResp.get(3)).intValue());
responses.add(m);
}
return responses;
}
@Override
public String toString() {
return "List<Module>";
}
};
public static final Builder<List<Long>> LONG_LIST = new Builder<List<Long>>() {
@Override
@SuppressWarnings("unchecked")
public List<Long> build(Object data) {
if (null == data) {
return null;
}
return (List<Long>) data;
}
@Override
public String toString() {
return "List<Long>";
}
};
public static final Builder<StreamEntryID> STREAM_ENTRY_ID = new Builder<StreamEntryID>() {
@Override
@SuppressWarnings("unchecked")
public StreamEntryID build(Object data) {
if (null == data) {
return null;
}
String id = SafeEncoder.encode((byte[])data);
return new StreamEntryID(id);
}
@Override
public String toString() {
return "StreamEntryID";
}
};
public static final Builder<List<StreamEntry>> STREAM_ENTRY_LIST = new Builder<List<StreamEntry>>() {
@Override
@SuppressWarnings("unchecked")
public List<StreamEntry> build(Object data) {
if (null == data) {
return null;
}
List<ArrayList<Object>> objectList = (List<ArrayList<Object>>) data;
List<StreamEntry> responses = new ArrayList<>(objectList.size()/2);
if (objectList.isEmpty()) {
return responses;
}
for(ArrayList<Object> res : objectList) {
String entryIdString = SafeEncoder.encode((byte[])res.get(0));
StreamEntryID entryID = new StreamEntryID(entryIdString);
List<byte[]> hash = (List<byte[]>)res.get(1);
Iterator<byte[]> hashIterator = hash.iterator();
Map<String, String> map = new HashMap<>(hash.size()/2);
while(hashIterator.hasNext()) {
map.put(SafeEncoder.encode(hashIterator.next()), SafeEncoder.encode(hashIterator.next()));
}
responses.add(new StreamEntry(entryID, map));
}
return responses;
}
@Override
public String toString() {
return "List<StreamEntry>";
}
};
public static final Builder<StreamEntry> STREAM_ENTRY = new Builder<StreamEntry>() {
@Override
@SuppressWarnings("unchecked")
public StreamEntry build(Object data) {
if (null == data) {
return null;
}
List<Object> objectList = (List<Object>) data;
if (objectList.isEmpty()) {
return null;
}
String entryIdString = SafeEncoder.encode((byte[]) objectList.get(0));
StreamEntryID entryID = new StreamEntryID(entryIdString);
List<byte[]> hash = (List<byte[]>) objectList.get(1);
Iterator<byte[]> hashIterator = hash.iterator();
Map<String, String> map = new HashMap<>(hash.size() / 2);
while (hashIterator.hasNext()) {
map.put(SafeEncoder.encode(hashIterator.next()),
SafeEncoder.encode(hashIterator.next()));
}
StreamEntry streamEntry = new StreamEntry(entryID, map);
return streamEntry;
}
@Override
public String toString() {
return "StreamEntry";
}
};
public static final Builder<List<StreamPendingEntry>> STREAM_PENDING_ENTRY_LIST = new Builder<List<StreamPendingEntry>>() {
@Override
@SuppressWarnings("unchecked")
public List<StreamPendingEntry> build(Object data) {
if (null == data) {
return null;
}
List<Object> streamsEntries = (List<Object>)data;
List<StreamPendingEntry> result = new ArrayList<>(streamsEntries.size());
for(Object streamObj : streamsEntries) {
List<Object> stream = (List<Object>)streamObj;
String id = SafeEncoder.encode((byte[])stream.get(0));
String consumerName = SafeEncoder.encode((byte[])stream.get(1));
long idleTime = BuilderFactory.LONG.build(stream.get(2));
long deliveredTimes = BuilderFactory.LONG.build(stream.get(3));
result.add(new StreamPendingEntry(new StreamEntryID(id), consumerName, idleTime, deliveredTimes));
}
return result;
}
@Override
public String toString() {
return "List<StreamPendingEntry>";
}
};
public static final Builder<StreamInfo> STREAM_INFO = new Builder<StreamInfo>() {
Map<String,Builder> mappingFunctions = createDecoderMap();
private Map<String, Builder> createDecoderMap() {
Map<String,Builder> tempMappingFunctions = new HashMap<>();
tempMappingFunctions.put(StreamInfo.LAST_GENERATED_ID,STREAM_ENTRY_ID);
tempMappingFunctions.put(StreamInfo.FIRST_ENTRY,STREAM_ENTRY);
tempMappingFunctions.put(StreamInfo.LENGTH, LONG);
tempMappingFunctions.put(StreamInfo.RADIX_TREE_KEYS, LONG);
tempMappingFunctions.put(StreamInfo.RADIX_TREE_NODES, LONG);
tempMappingFunctions.put(StreamInfo.LAST_ENTRY,STREAM_ENTRY);
tempMappingFunctions.put(StreamInfo.GROUPS, LONG);
return tempMappingFunctions;
}
@Override
@SuppressWarnings("unchecked")
public StreamInfo build(Object data) {
if (null == data) {
return null;
}
List<Object> streamsEntries = (List<Object>)data;
Iterator<Object> iterator = streamsEntries.iterator();
StreamInfo streamInfo = new StreamInfo(
createMapFromDecodingFunctions(iterator,mappingFunctions));
return streamInfo;
}
@Override
public String toString() {
return "StreamInfo";
}
};
public static final Builder<List<StreamGroupInfo>> STREAM_GROUP_INFO_LIST = new Builder<List<StreamGroupInfo>>() {
Map<String,Builder> mappingFunctions = createDecoderMap();
private Map<String, Builder> createDecoderMap() {
Map<String,Builder> tempMappingFunctions = new HashMap<>();
tempMappingFunctions.put(StreamGroupInfo.NAME,STRING);
tempMappingFunctions.put(StreamGroupInfo.CONSUMERS, LONG);
tempMappingFunctions.put(StreamGroupInfo.PENDING, LONG);
tempMappingFunctions.put(StreamGroupInfo.LAST_DELIVERED,STREAM_ENTRY_ID);
return tempMappingFunctions;
}
@Override
@SuppressWarnings("unchecked")
public List<StreamGroupInfo> build(Object data) {
if (null == data) {
return null;
}
List<StreamGroupInfo> list = new ArrayList<>();
List<Object> streamsEntries = (List<Object>)data;
Iterator<Object> groupsArray = streamsEntries.iterator();
while (groupsArray.hasNext()) {
List<Object> groupInfo = (List<Object>) groupsArray.next();
Iterator<Object> groupInfoIterator = groupInfo.iterator();
StreamGroupInfo streamGroupInfo = new StreamGroupInfo(
createMapFromDecodingFunctions(groupInfoIterator,mappingFunctions));
list.add(streamGroupInfo);
}
return list;
}
@Override
public String toString() {
return "List<StreamGroupInfo>";
}
};
public static final Builder<List<StreamConsumersInfo>> STREAM_CONSUMERS_INFO_LIST = new Builder<List<StreamConsumersInfo>>() {
Map<String,Builder> mappingFunctions = createDecoderMap();
private Map<String, Builder> createDecoderMap() {
Map<String,Builder> tempMappingFunctions = new HashMap<>();
tempMappingFunctions.put(StreamConsumersInfo.NAME,STRING);
tempMappingFunctions.put(StreamConsumersInfo.IDLE, LONG);
tempMappingFunctions.put(StreamGroupInfo.PENDING, LONG);
tempMappingFunctions.put(StreamGroupInfo.LAST_DELIVERED,STRING);
return tempMappingFunctions;
}
@Override
@SuppressWarnings("unchecked")
public List<StreamConsumersInfo> build(Object data) {
if (null == data) {
return null;
}
List<StreamConsumersInfo> list = new ArrayList<>();
List<Object> streamsEntries = (List<Object>)data;
Iterator<Object> groupsArray = streamsEntries.iterator();
while (groupsArray.hasNext()) {
List<Object> groupInfo = (List<Object>) groupsArray.next();
Iterator<Object> consumerInfoIterator = groupInfo.iterator();
StreamConsumersInfo streamGroupInfo = new StreamConsumersInfo(
createMapFromDecodingFunctions(consumerInfoIterator,mappingFunctions));
list.add(streamGroupInfo);
}
return list;
}
@Override
public String toString() {
return "List<StreamConsumersInfo>";
}
};
public static final Builder<Object> OBJECT = new Builder<Object>() {
@Override
public Object build(Object data) {
return data;
}
@Override
public String toString() {
return "Object";
}
};
private BuilderFactory() {
throw new InstantiationError( "Must not instantiate this class" );
}
private static Map<String,Object> createMapFromDecodingFunctions( Iterator<Object> iterator, Map<String,Builder> mappingFunctions) {
Map<String,Object> resultMap = new HashMap<>();
while (iterator.hasNext()) {
String mapKey = STRING.build(iterator.next());
if (mappingFunctions.containsKey(mapKey)) {
resultMap.put(mapKey, mappingFunctions.get(mapKey).build(iterator.next()));
} else { //For future - if we don't find an element in our builder map
Object unknownData = iterator.next();
for (Builder b:mappingFunctions.values()) {
try {
resultMap.put(mapKey,b.build(unknownData));
break;
} catch (ClassCastException e) {
//We continue with next builder
}
}
}
}
return resultMap;
}
}
|
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package io.netty.channel.socket.nio;
import io.netty.buffer.ByteBuf;
import io.netty.channel.AddressedEnvelope;
import io.netty.channel.Channel;
import io.netty.channel.ChannelException;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelMetadata;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelOutboundBuffer;
import io.netty.channel.ChannelPromise;
import io.netty.channel.DefaultAddressedEnvelope;
import io.netty.channel.RecvByteBufAllocator;
import io.netty.channel.nio.AbstractNioMessageChannel;
import io.netty.channel.socket.DatagramChannelConfig;
import io.netty.channel.socket.DatagramPacket;
import io.netty.channel.socket.InternetProtocolFamily;
import io.netty.util.internal.ObjectUtil;
import io.netty.util.internal.SocketUtils;
import io.netty.util.internal.PlatformDependent;
import io.netty.util.internal.StringUtil;
import io.netty.util.internal.SuppressJava6Requirement;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
import java.net.SocketAddress;
import java.net.SocketException;
import java.nio.ByteBuffer;
import java.nio.channels.DatagramChannel;
import java.nio.channels.MembershipKey;
import java.nio.channels.SelectionKey;
import java.nio.channels.spi.SelectorProvider;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* An NIO datagram {@link Channel} that sends and receives an
* {@link AddressedEnvelope AddressedEnvelope<ByteBuf, SocketAddress>}.
*
* @see AddressedEnvelope
* @see DatagramPacket
*/
public final class NioDatagramChannel
extends AbstractNioMessageChannel implements io.netty.channel.socket.DatagramChannel {
private static final ChannelMetadata METADATA = new ChannelMetadata(true);
private static final SelectorProvider DEFAULT_SELECTOR_PROVIDER = SelectorProvider.provider();
private static final String EXPECTED_TYPES =
" (expected: " + StringUtil.simpleClassName(DatagramPacket.class) + ", " +
StringUtil.simpleClassName(AddressedEnvelope.class) + '<' +
StringUtil.simpleClassName(ByteBuf.class) + ", " +
StringUtil.simpleClassName(SocketAddress.class) + ">, " +
StringUtil.simpleClassName(ByteBuf.class) + ')';
private final DatagramChannelConfig config;
private Map<InetAddress, List<MembershipKey>> memberships;
private static DatagramChannel newSocket(SelectorProvider provider) {
try {
/**
* Use the {@link SelectorProvider} to open {@link SocketChannel} and so remove condition in
* {@link SelectorProvider#provider()} which is called by each DatagramChannel.open() otherwise.
*
* See <a href="https://github.com/netty/netty/issues/2308">#2308</a>.
*/
return provider.openDatagramChannel();
} catch (IOException e) {
throw new ChannelException("Failed to open a socket.", e);
}
}
@SuppressJava6Requirement(reason = "Usage guarded by java version check")
private static DatagramChannel newSocket(SelectorProvider provider, InternetProtocolFamily ipFamily) {
if (ipFamily == null) {
return newSocket(provider);
}
checkJavaVersion();
try {
return provider.openDatagramChannel(ProtocolFamilyConverter.convert(ipFamily));
} catch (IOException e) {
throw new ChannelException("Failed to open a socket.", e);
}
}
private static void checkJavaVersion() {
if (PlatformDependent.javaVersion() < 7) {
throw new UnsupportedOperationException("Only supported on java 7+.");
}
}
/**
* Create a new instance which will use the Operation Systems default {@link InternetProtocolFamily}.
*/
public NioDatagramChannel() {
this(newSocket(DEFAULT_SELECTOR_PROVIDER));
}
/**
* Create a new instance using the given {@link SelectorProvider}
* which will use the Operation Systems default {@link InternetProtocolFamily}.
*/
public NioDatagramChannel(SelectorProvider provider) {
this(newSocket(provider));
}
/**
* Create a new instance using the given {@link InternetProtocolFamily}. If {@code null} is used it will depend
* on the Operation Systems default which will be chosen.
*/
public NioDatagramChannel(InternetProtocolFamily ipFamily) {
this(newSocket(DEFAULT_SELECTOR_PROVIDER, ipFamily));
}
/**
* Create a new instance using the given {@link SelectorProvider} and {@link InternetProtocolFamily}.
* If {@link InternetProtocolFamily} is {@code null} it will depend on the Operation Systems default
* which will be chosen.
*/
public NioDatagramChannel(SelectorProvider provider, InternetProtocolFamily ipFamily) {
this(newSocket(provider, ipFamily));
}
/**
* Create a new instance from the given {@link DatagramChannel}.
*/
public NioDatagramChannel(DatagramChannel socket) {
super(null, socket, SelectionKey.OP_READ);
config = new NioDatagramChannelConfig(this, socket);
}
@Override
public ChannelMetadata metadata() {
return METADATA;
}
@Override
public DatagramChannelConfig config() {
return config;
}
@Override
@SuppressWarnings("deprecation")
public boolean isActive() {
DatagramChannel ch = javaChannel();
return ch.isOpen() && (
config.getOption(ChannelOption.DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION) && isRegistered()
|| ch.socket().isBound());
}
@Override
public boolean isConnected() {
return javaChannel().isConnected();
}
@Override
protected DatagramChannel javaChannel() {
return (DatagramChannel) super.javaChannel();
}
@Override
protected SocketAddress localAddress0() {
return javaChannel().socket().getLocalSocketAddress();
}
@Override
protected SocketAddress remoteAddress0() {
return javaChannel().socket().getRemoteSocketAddress();
}
@Override
protected void doBind(SocketAddress localAddress) throws Exception {
doBind0(localAddress);
}
private void doBind0(SocketAddress localAddress) throws Exception {
if (PlatformDependent.javaVersion() >= 7) {
SocketUtils.bind(javaChannel(), localAddress);
} else {
javaChannel().socket().bind(localAddress);
}
}
@Override
protected boolean doConnect(SocketAddress remoteAddress,
SocketAddress localAddress) throws Exception {
if (localAddress != null) {
doBind0(localAddress);
}
boolean success = false;
try {
javaChannel().connect(remoteAddress);
success = true;
return true;
} finally {
if (!success) {
doClose();
}
}
}
@Override
protected void doFinishConnect() throws Exception {
throw new Error();
}
@Override
protected void doDisconnect() throws Exception {
javaChannel().disconnect();
}
@Override
protected void doClose() throws Exception {
javaChannel().close();
}
@Override
protected int doReadMessages(List<Object> buf) throws Exception {
DatagramChannel ch = javaChannel();
DatagramChannelConfig config = config();
RecvByteBufAllocator.Handle allocHandle = unsafe().recvBufAllocHandle();
ByteBuf data = allocHandle.allocate(config.getAllocator());
allocHandle.attemptedBytesRead(data.writableBytes());
boolean free = true;
try {
ByteBuffer nioData = data.internalNioBuffer(data.writerIndex(), data.writableBytes());
int pos = nioData.position();
InetSocketAddress remoteAddress = (InetSocketAddress) ch.receive(nioData);
if (remoteAddress == null) {
return 0;
}
allocHandle.lastBytesRead(nioData.position() - pos);
buf.add(new DatagramPacket(data.writerIndex(data.writerIndex() + allocHandle.lastBytesRead()),
localAddress(), remoteAddress));
free = false;
return 1;
} catch (Throwable cause) {
PlatformDependent.throwException(cause);
return -1;
} finally {
if (free) {
data.release();
}
}
}
@Override
protected boolean doWriteMessage(Object msg, ChannelOutboundBuffer in) throws Exception {
final SocketAddress remoteAddress;
final ByteBuf data;
if (msg instanceof AddressedEnvelope) {
@SuppressWarnings("unchecked")
AddressedEnvelope<ByteBuf, SocketAddress> envelope = (AddressedEnvelope<ByteBuf, SocketAddress>) msg;
remoteAddress = envelope.recipient();
data = envelope.content();
} else {
data = (ByteBuf) msg;
remoteAddress = null;
}
final int dataLen = data.readableBytes();
if (dataLen == 0) {
return true;
}
final ByteBuffer nioData = data.nioBufferCount() == 1 ? data.internalNioBuffer(data.readerIndex(), dataLen)
: data.nioBuffer(data.readerIndex(), dataLen);
final int writtenBytes;
if (remoteAddress != null) {
writtenBytes = javaChannel().send(nioData, remoteAddress);
} else {
writtenBytes = javaChannel().write(nioData);
}
return writtenBytes > 0;
}
@Override
protected Object filterOutboundMessage(Object msg) {
if (msg instanceof DatagramPacket) {
DatagramPacket p = (DatagramPacket) msg;
ByteBuf content = p.content();
if (isSingleDirectBuffer(content)) {
return p;
}
return new DatagramPacket(newDirectBuffer(p, content), p.recipient());
}
if (msg instanceof ByteBuf) {
ByteBuf buf = (ByteBuf) msg;
if (isSingleDirectBuffer(buf)) {
return buf;
}
return newDirectBuffer(buf);
}
if (msg instanceof AddressedEnvelope) {
@SuppressWarnings("unchecked")
AddressedEnvelope<Object, SocketAddress> e = (AddressedEnvelope<Object, SocketAddress>) msg;
if (e.content() instanceof ByteBuf) {
ByteBuf content = (ByteBuf) e.content();
if (isSingleDirectBuffer(content)) {
return e;
}
return new DefaultAddressedEnvelope<ByteBuf, SocketAddress>(newDirectBuffer(e, content), e.recipient());
}
}
throw new UnsupportedOperationException(
"unsupported message type: " + StringUtil.simpleClassName(msg) + EXPECTED_TYPES);
}
/**
* Checks if the specified buffer is a direct buffer and is composed of a single NIO buffer.
* (We check this because otherwise we need to make it a non-composite buffer.)
*/
private static boolean isSingleDirectBuffer(ByteBuf buf) {
return buf.isDirect() && buf.nioBufferCount() == 1;
}
@Override
protected boolean continueOnWriteError() {
// Continue on write error as a DatagramChannel can write to multiple remote peers
//
// See https://github.com/netty/netty/issues/2665
return true;
}
@Override
public InetSocketAddress localAddress() {
return (InetSocketAddress) super.localAddress();
}
@Override
public InetSocketAddress remoteAddress() {
return (InetSocketAddress) super.remoteAddress();
}
@Override
public ChannelFuture joinGroup(InetAddress multicastAddress) {
return joinGroup(multicastAddress, newPromise());
}
@Override
public ChannelFuture joinGroup(InetAddress multicastAddress, ChannelPromise promise) {
try {
return joinGroup(
multicastAddress,
NetworkInterface.getByInetAddress(localAddress().getAddress()),
null, promise);
} catch (SocketException e) {
promise.setFailure(e);
}
return promise;
}
@Override
public ChannelFuture joinGroup(
InetSocketAddress multicastAddress, NetworkInterface networkInterface) {
return joinGroup(multicastAddress, networkInterface, newPromise());
}
@Override
public ChannelFuture joinGroup(
InetSocketAddress multicastAddress, NetworkInterface networkInterface,
ChannelPromise promise) {
return joinGroup(multicastAddress.getAddress(), networkInterface, null, promise);
}
@Override
public ChannelFuture joinGroup(
InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source) {
return joinGroup(multicastAddress, networkInterface, source, newPromise());
}
@SuppressJava6Requirement(reason = "Usage guarded by java version check")
@Override
public ChannelFuture joinGroup(
InetAddress multicastAddress, NetworkInterface networkInterface,
InetAddress source, ChannelPromise promise) {
checkJavaVersion();
ObjectUtil.checkNotNull(multicastAddress, "multicastAddress");
ObjectUtil.checkNotNull(networkInterface, "networkInterface");
try {
MembershipKey key;
if (source == null) {
key = javaChannel().join(multicastAddress, networkInterface);
} else {
key = javaChannel().join(multicastAddress, networkInterface, source);
}
synchronized (this) {
List<MembershipKey> keys = null;
if (memberships == null) {
memberships = new HashMap<InetAddress, List<MembershipKey>>();
} else {
keys = memberships.get(multicastAddress);
}
if (keys == null) {
keys = new ArrayList<MembershipKey>();
memberships.put(multicastAddress, keys);
}
keys.add(key);
}
promise.setSuccess();
} catch (Throwable e) {
promise.setFailure(e);
}
return promise;
}
@Override
public ChannelFuture leaveGroup(InetAddress multicastAddress) {
return leaveGroup(multicastAddress, newPromise());
}
@Override
public ChannelFuture leaveGroup(InetAddress multicastAddress, ChannelPromise promise) {
try {
return leaveGroup(
multicastAddress, NetworkInterface.getByInetAddress(localAddress().getAddress()), null, promise);
} catch (SocketException e) {
promise.setFailure(e);
}
return promise;
}
@Override
public ChannelFuture leaveGroup(
InetSocketAddress multicastAddress, NetworkInterface networkInterface) {
return leaveGroup(multicastAddress, networkInterface, newPromise());
}
@Override
public ChannelFuture leaveGroup(
InetSocketAddress multicastAddress,
NetworkInterface networkInterface, ChannelPromise promise) {
return leaveGroup(multicastAddress.getAddress(), networkInterface, null, promise);
}
@Override
public ChannelFuture leaveGroup(
InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source) {
return leaveGroup(multicastAddress, networkInterface, source, newPromise());
}
@SuppressJava6Requirement(reason = "Usage guarded by java version check")
@Override
public ChannelFuture leaveGroup(
InetAddress multicastAddress, NetworkInterface networkInterface, InetAddress source,
ChannelPromise promise) {
checkJavaVersion();
ObjectUtil.checkNotNull(multicastAddress, "multicastAddress");
ObjectUtil.checkNotNull(networkInterface, "networkInterface");
synchronized (this) {
if (memberships != null) {
List<MembershipKey> keys = memberships.get(multicastAddress);
if (keys != null) {
Iterator<MembershipKey> keyIt = keys.iterator();
while (keyIt.hasNext()) {
MembershipKey key = keyIt.next();
if (networkInterface.equals(key.networkInterface())) {
if (source == null && key.sourceAddress() == null ||
source != null && source.equals(key.sourceAddress())) {
key.drop();
keyIt.remove();
}
}
}
if (keys.isEmpty()) {
memberships.remove(multicastAddress);
}
}
}
}
promise.setSuccess();
return promise;
}
/**
* Block the given sourceToBlock address for the given multicastAddress on the given networkInterface
*/
@Override
public ChannelFuture block(
InetAddress multicastAddress, NetworkInterface networkInterface,
InetAddress sourceToBlock) {
return block(multicastAddress, networkInterface, sourceToBlock, newPromise());
}
/**
* Block the given sourceToBlock address for the given multicastAddress on the given networkInterface
*/
@SuppressJava6Requirement(reason = "Usage guarded by java version check")
@Override
public ChannelFuture block(
InetAddress multicastAddress, NetworkInterface networkInterface,
InetAddress sourceToBlock, ChannelPromise promise) {
checkJavaVersion();
ObjectUtil.checkNotNull(multicastAddress, "multicastAddress");
ObjectUtil.checkNotNull(sourceToBlock, "sourceToBlock");
ObjectUtil.checkNotNull(networkInterface, "networkInterface");
synchronized (this) {
if (memberships != null) {
List<MembershipKey> keys = memberships.get(multicastAddress);
for (MembershipKey key: keys) {
if (networkInterface.equals(key.networkInterface())) {
try {
key.block(sourceToBlock);
} catch (IOException e) {
promise.setFailure(e);
}
}
}
}
}
promise.setSuccess();
return promise;
}
/**
* Block the given sourceToBlock address for the given multicastAddress
*
*/
@Override
public ChannelFuture block(InetAddress multicastAddress, InetAddress sourceToBlock) {
return block(multicastAddress, sourceToBlock, newPromise());
}
/**
* Block the given sourceToBlock address for the given multicastAddress
*
*/
@Override
public ChannelFuture block(
InetAddress multicastAddress, InetAddress sourceToBlock, ChannelPromise promise) {
try {
return block(
multicastAddress,
NetworkInterface.getByInetAddress(localAddress().getAddress()),
sourceToBlock, promise);
} catch (SocketException e) {
promise.setFailure(e);
}
return promise;
}
@Override
@Deprecated
protected void setReadPending(boolean readPending) {
super.setReadPending(readPending);
}
void clearReadPending0() {
clearReadPending();
}
@Override
protected boolean closeOnReadError(Throwable cause) {
// We do not want to close on SocketException when using DatagramChannel as we usually can continue receiving.
// See https://github.com/netty/netty/issues/5893
if (cause instanceof SocketException) {
return false;
}
return super.closeOnReadError(cause);
}
}
|
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.api.internal.artifacts.repositories.metadata;
import com.google.common.collect.ImmutableList;
import org.gradle.caching.internal.BuildCacheHasher;
/**
* An immutable, usable representation of metadata sources.
*/
public interface ImmutableMetadataSources {
ImmutableList<MetadataSource<?>> sources();
void appendId(BuildCacheHasher hasher);
}
|
package baylandtag.av_combobox_edit_in_table;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.URL;
import javafx.scene.control.Alert;
import javafx.scene.control.Alert.AlertType;
import javafx.scene.control.Label;
import javafx.scene.control.TextArea;
import javafx.scene.layout.GridPane;
import javafx.scene.layout.Priority;
public class Utils {
public static void showException(Exception ex) {
Alert alert = new Alert(AlertType.ERROR);
alert.setTitle("Error");
alert.setHeaderText(ex.getMessage());
if (ex.getCause() != null)
alert.setContentText(ex.getCause().getMessage());
// Create expandable Exception.
StringWriter sw = new StringWriter();
PrintWriter pw = new PrintWriter(sw);
ex.printStackTrace(pw);
String exceptionText = sw.toString();
Label label = new Label("The exception stacktrace was:");
TextArea textArea = new TextArea(exceptionText);
textArea.setEditable(false);
textArea.setWrapText(true);
textArea.setMaxWidth(Double.MAX_VALUE);
textArea.setMaxHeight(Double.MAX_VALUE);
GridPane.setVgrow(textArea, Priority.ALWAYS);
GridPane.setHgrow(textArea, Priority.ALWAYS);
GridPane expContent = new GridPane();
expContent.setMaxWidth(Double.MAX_VALUE);
expContent.add(label, 0, 0);
expContent.add(textArea, 0, 1);
// Set expandable Exception into the dialog pane.
alert.getDialogPane().setExpandableContent(expContent);
alert.showAndWait();
}
public static URL getResourceInCurrentPackage(String name) {
return Utils.class.getResource("/" + Utils.class.getPackage().getName().replaceAll("\\.", "/") + "/" + name);
}
}
|
/*
* ====================================================================
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
*/
package com.epam.reportportal.apache.http.config;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import com.epam.reportportal.apache.http.annotation.ThreadSafe;
/**
* Generic registry of items keyed by low-case string ID.
*
* @since 4.3
*/
@ThreadSafe
public final class Registry<I> implements Lookup<I> {
private final Map<String, I> map;
Registry(final Map<String, I> map) {
super();
this.map = new ConcurrentHashMap<String, I>(map);
}
public I lookup(final String key) {
if (key == null) {
return null;
}
return map.get(key.toLowerCase(Locale.US));
}
@Override
public String toString() {
return map.toString();
}
}
|
/*
* Copyright (C) 2017 BiaoWu
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.biao.intent.router;
import android.app.Fragment;
import android.content.Intent;
/**
* @author biaowu.
*/
class FragmentCallImpl extends CommonCallImpl {
private Fragment fragment;
FragmentCallImpl(Fragment fragment) {
super(fragment.getActivity());
this.fragment = fragment;
}
@Override
protected void startActivityForResult(Intent intent, int requestCode) {
fragment.startActivityForResult(intent, requestCode);
}
}
|
package com.bstek.urule.springboot.service.impl;
import com.bstek.urule.Utils;
import com.bstek.urule.runtime.KnowledgePackage;
import com.bstek.urule.runtime.KnowledgeSession;
import com.bstek.urule.runtime.KnowledgeSessionFactory;
import com.bstek.urule.runtime.service.KnowledgeService;
import com.bstek.urule.springboot.entity.Customer;
import com.bstek.urule.springboot.service.InvokeService;
import org.springframework.stereotype.Service;
@Service
public class InvokeServiceImpl implements InvokeService {
@Override
public void invokeDemo() throws Exception {
//从Spring中获取KnowledgeService接口实例
KnowledgeService service=(KnowledgeService) Utils.getApplicationContext().getBean(KnowledgeService.BEAN_ID);
//通过KnowledgeService接口获取指定的资源包,格式:项目名/知识包ID
KnowledgePackage knowledgePackage=service.getKnowledge("demo/jclKno");
//通过取到的KnowledgePackage对象创建KnowledgeSession对象
KnowledgeSession session= KnowledgeSessionFactory.newKnowledgeSession(knowledgePackage);
Customer customer = new Customer();
customer.setAge(20);
customer.setLevel(15);
//将业务数据对象Employee插入到KnowledgeSession中
session.insert(customer);
//执行所有满足条件的规则
// session.fireRules();
//执行决策流,输入决策流ID
session.startProcess("JCL001");
//执行结果
System.out.println(customer.getScore());
}
}
|
/** Copyright 2020 bejson.com */
package com.haige.gulimall.product.vo.spuinfo;
import lombok.Data;
import java.math.BigDecimal;
/**
* Auto-generated: 2020-05-31 11:3:26
*
* @author bejson.com (i@bejson.com)
* @website http://www.bejson.com/java2pojo/
*/
@Data
public class Bounds {
private BigDecimal buyBounds;
private BigDecimal growBounds;
}
|
package com.unifig.bi.analysis.service;
import com.baomidou.mybatisplus.service.IService;
import com.unifig.bi.analysis.model.StSmsPromotion;
import java.sql.Date;
import java.util.Map;
/**
* <p>
* 活动类表 服务类
* </p>
*
*
* @since 2019-03-21
*/
public interface StSmsPromotionService extends IService<StSmsPromotion> {
Map<String, Object> line(Date startDate, Date stopDate, String promotionId);
}
|
/* Generated by: ParserGeneratorCC: Do not edit this line. StringProvider.java Version 1.1 */
/* ParserGeneratorCCOptions:KEEP_LINE_COLUMN=true */
package dev.jorel.fortelangprime.parser;
import java.io.IOException;
public class StringProvider implements Provider
{
private String m_sStr;
private int m_nPos = 0;
private final int m_nLen;
public StringProvider(final String sStr)
{
m_sStr = sStr;
m_nLen = sStr.length();
}
public int read (final char[] aDest, final int nOfs, final int nLen) throws IOException
{
final int nLeft = m_nLen - m_nPos;
if (nLeft <= 0)
return -1;
int nCharsRead = aDest.length - nOfs;
if (nLen < nCharsRead)
nCharsRead = nLen;
if (nLeft < nCharsRead)
nCharsRead = nLeft;
m_sStr.getChars(m_nPos, m_nPos + nCharsRead, aDest, nOfs);
m_nPos += nCharsRead;
return nCharsRead;
}
public void close()
{
m_sStr = null;
}
}
/* ParserGeneratorCC - OriginalChecksum=0191f9316d535cd904a9e5bd7474ef17 (do not edit this line) */
|
/*
* Copyright 2017-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onosproject.netconf;
import static com.google.common.base.Preconditions.checkNotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Listener to listen for event about specific Device.
*/
public class FilteringNetconfDeviceOutputEventListener
implements NetconfDeviceOutputEventListener {
private static final Logger log =
LoggerFactory.getLogger(FilteringNetconfDeviceOutputEventListener.class);
private final NetconfDeviceInfo deviceInfo;
public FilteringNetconfDeviceOutputEventListener(NetconfDeviceInfo deviceInfo) {
this.deviceInfo = checkNotNull(deviceInfo);
}
@Override
public void event(NetconfDeviceOutputEvent event) {
switch (event.type()) {
case DEVICE_REPLY:
log.debug("Device {} has reply: {}", deviceInfo, event.getMessagePayload());
break;
case DEVICE_NOTIFICATION:
log.info("Device {} has notification: {}", deviceInfo, event.getMessagePayload());
break;
case DEVICE_UNREGISTERED:
log.warn("Device {} has closed session", deviceInfo);
break;
case DEVICE_ERROR:
log.warn("Device {} has error: {}", deviceInfo, event.getMessagePayload());
break;
case SESSION_CLOSED:
log.warn("Device {} has closed Session: {}", deviceInfo, event.getMessagePayload());
break;
default:
log.warn("Wrong event type {} ", event.type());
}
}
@Override
public boolean isRelevant(NetconfDeviceOutputEvent event) {
return deviceInfo.equals(event.getDeviceInfo());
}
}
|
package com.accengage.samples.geofences;
public class GeofenceItem {
private static String mID;
private static String mServerId;
private static String mExternalId;
private static String mName;
private static String mLatitude;
private static String mLongitude;
private static String mRadius;
private static String mDetectedTime;
private static String mNotifiedTime;
private static String mDetectedCount;
private static String mDeviceLatitude;
private static String mDeviceLongitude;
private static String mDistance;
public GeofenceItem () {
}
public String getId() {
return mID;
}
public void setId(String Id) {
mID = Id;
}
public String getServerId() {
return mServerId;
}
public void setServerId(String serverId) {
mServerId = serverId;
}
public String getExternalId() {
return mExternalId;
}
public void setExternalId(String externalId) {
mExternalId = externalId;
}
public String getName() {
return mName;
}
public void setName(String name) {
GeofenceItem.mName = name;
}
public String getLatitude() {
return mLatitude;
}
public void setLatitude(String latitude) {
GeofenceItem.mLatitude = latitude;
}
public String getLongitude() {
return mLongitude;
}
public void setLongitude(String longitude) {
GeofenceItem.mLongitude = longitude;
}
public String getRadius() {
return mRadius;
}
public void setRadius(String radius) {
GeofenceItem.mRadius = radius;
}
public String getDetectedTime() {
return mDetectedTime;
}
public void setDetectedTime(String detectedTime) {
mDetectedTime = detectedTime;
}
public String getNotifiedTime() {
return mNotifiedTime;
}
public void setNotifiedTime(String notifiedTime) {
mNotifiedTime = notifiedTime;
}
public String getDetectedCount() {
return mDetectedCount;
}
public void setDetectedCount(String detectedCount) {
mDetectedCount = detectedCount;
}
public String getDeviceLatitude() {
return mDeviceLatitude;
}
public void setDeviceLatitude(String deviceLatitude) {
mDeviceLatitude = deviceLatitude;
}
public String getDeviceLongitude() {
return mDeviceLongitude;
}
public void setDeviceLongitude(String deviceLongitude) {
mDeviceLongitude = deviceLongitude;
}
public String getDistance() {
return mDistance;
}
public void setDistance(String distance) {
GeofenceItem.mDistance = distance;
}
}
|
package com.chicago.server.domain;
import lombok.Builder;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.Setter;
import org.hibernate.annotations.CreationTimestamp;
import org.hibernate.annotations.Type;
import javax.persistence.*;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.List;
@Entity
@Getter @Setter
@NoArgsConstructor
public class News {
//test of git kraken
@Id @GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "news_id")
private Long id;
private String title;
@Lob
@Type(type = "text")
private String content;
private String writer;
@Column(updatable = false)
@CreationTimestamp
private LocalDateTime writtenTime;
//@Enumerated(EnumType.STRING)
private int reliability;
@OneToMany(mappedBy = "news")
private List<Comment> comments = new ArrayList<>();
@OneToMany(mappedBy = "news")
private List<Likes> likes = new ArrayList<>();
@Builder
public News(String title, String content, String writer){
this.title = title;
this.content = content;
this.writer = writer;
}
}
|
package com.plkpiotr.kanban.domain;
import javax.persistence.*;
/**
* Represents a tasks.
* A tasks belongs to a project and employee.
*/
@Entity
@Table(name = "tasks")
public class Task {
@Id
@Column(columnDefinition = "serial")
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Integer id;
@Column(nullable = false, length = 5)
private String category;
@Column(nullable = false, length = 64)
private String content;
@ManyToOne
@JoinColumn(name = "id_project")
private Project project;
@ManyToOne
@JoinColumn(name = "id_employee")
private Employee employee;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getCategory() {
return category;
}
public void setCategory(String category) {
this.category = category;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
public Project getProject() {
return project;
}
public void setProject(Project project) {
this.project = project;
}
public Employee getEmployee() {
return employee;
}
public void setEmployee(Employee employee) {
this.employee = employee;
}
}
|
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.customerprofiles.model.transform;
import javax.annotation.Generated;
import com.amazonaws.SdkClientException;
import com.amazonaws.services.customerprofiles.model.*;
import com.amazonaws.protocol.*;
import com.amazonaws.annotation.SdkInternalApi;
/**
* ListIdentityResolutionJobsRequestMarshaller
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
@SdkInternalApi
public class ListIdentityResolutionJobsRequestMarshaller {
private static final MarshallingInfo<String> DOMAINNAME_BINDING = MarshallingInfo.builder(MarshallingType.STRING).marshallLocation(MarshallLocation.PATH)
.marshallLocationName("DomainName").build();
private static final MarshallingInfo<String> NEXTTOKEN_BINDING = MarshallingInfo.builder(MarshallingType.STRING)
.marshallLocation(MarshallLocation.QUERY_PARAM).marshallLocationName("next-token").build();
private static final MarshallingInfo<Integer> MAXRESULTS_BINDING = MarshallingInfo.builder(MarshallingType.INTEGER)
.marshallLocation(MarshallLocation.QUERY_PARAM).marshallLocationName("max-results").build();
private static final ListIdentityResolutionJobsRequestMarshaller instance = new ListIdentityResolutionJobsRequestMarshaller();
public static ListIdentityResolutionJobsRequestMarshaller getInstance() {
return instance;
}
/**
* Marshall the given parameter object.
*/
public void marshall(ListIdentityResolutionJobsRequest listIdentityResolutionJobsRequest, ProtocolMarshaller protocolMarshaller) {
if (listIdentityResolutionJobsRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(listIdentityResolutionJobsRequest.getDomainName(), DOMAINNAME_BINDING);
protocolMarshaller.marshall(listIdentityResolutionJobsRequest.getNextToken(), NEXTTOKEN_BINDING);
protocolMarshaller.marshall(listIdentityResolutionJobsRequest.getMaxResults(), MAXRESULTS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
}
}
|
package br.itau.decolar.model;
public class Clientes {
int codigo;
String nome;
int cpf;
String telefone;
String email;
public int getCodigo() {
return codigo;
}
public void setCodigo(int codigo) {
this.codigo = codigo;
}
public String getNome() {
return nome;
}
public void setNome(String nome) {
this.nome = nome;
}
public int getCpf() {
return cpf;
}
public void setCpf(int cpf) {
this.cpf = cpf;
}
public String getTelefone() {
return telefone;
}
public void setTelefone(String telefone) {
this.telefone = telefone;
}
public String getEmail() {
return email;
}
public void setEmail(String email) {
this.email = email;
}
}
|
package org.kohsuke.github;
import com.fasterxml.jackson.annotation.JsonSetter;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.IOException;
import java.io.Reader;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* Base type for types used in databinding of the event payload.
*
* @see GitHub#parseEventPayload(Reader, Class) GitHub#parseEventPayload(Reader, Class)
* @see GHEventInfo#getPayload(Class) GHEventInfo#getPayload(Class)
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads">Webhook events
* and payloads</a>
*/
@SuppressFBWarnings(value = { "UWF_UNWRITTEN_FIELD", "NP_UNWRITTEN_FIELD" }, justification = "JSON API")
public class GHEventPayload extends GitHubInteractiveObject {
// https://docs.github.com/en/free-pro-team@latest/developers/webhooks-and-events/webhook-events-and-payloads#webhook-payload-object-common-properties
// Webhook payload object common properties: action, sender, repository, organization, installation
private String action;
private GHUser sender;
private GHRepository repository;
private GHOrganization organization;
private GHAppInstallation installation;
GHEventPayload() {
}
/**
* Gets the action for the triggered event. Most but not all webhook payloads contain an action property that
* contains the specific activity that triggered the event.
*
* @return event action
*/
public String getAction() {
return action;
}
/**
* Gets the sender or {@code null} if accessed via the events API.
*
* @return the sender or {@code null} if accessed via the events API.
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHUser getSender() {
return sender;
}
/**
* Sets sender.
*
* @param sender
* the sender
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setSender(GHUser sender) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets repository.
*
* @return the repository
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHRepository getRepository() {
return repository;
}
/**
* Sets repository.
*
* @param repository
* the repository
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setRepository(GHRepository repository) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets organization.
*
* @return the organization
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHOrganization getOrganization() {
return organization;
}
/**
* Sets organization.
*
* @param organization
* the organization
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setOrganization(GHOrganization organization) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets installation
*
* @return the installation
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHAppInstallation getInstallation() {
return installation;
}
void wrapUp(GitHub root) {
this.root = root;
if (sender != null) {
sender.wrapUp(root);
}
if (repository != null) {
repository.wrap(root);
}
if (organization != null) {
organization.wrapUp(root);
}
if (installation != null) {
installation.wrapUp(root);
}
}
// List of events that still need to be added:
// ContentReferenceEvent
// DeployKeyEvent DownloadEvent FollowEvent ForkApplyEvent GitHubAppAuthorizationEvent GistEvent GollumEvent
// InstallationEvent InstallationRepositoriesEvent IssuesEvent LabelEvent MarketplacePurchaseEvent MemberEvent
// MembershipEvent MetaEvent MilestoneEvent OrganizationEvent OrgBlockEvent PackageEvent PageBuildEvent
// ProjectCardEvent ProjectColumnEvent ProjectEvent RepositoryDispatchEvent RepositoryImportEvent
// RepositoryVulnerabilityAlertEvent SecurityAdvisoryEvent StarEvent StatusEvent TeamEvent TeamAddEvent WatchEvent
/**
* A check run event has been created, rerequested, completed, or has a requested_action.
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#check_run">
* check_run event</a>
* @see <a href="https://docs.github.com/en/rest/reference/checks#check-runs">Check Runs</a>
*/
public static class CheckRun extends GHEventPayload {
private int number;
private GHCheckRun checkRun;
private GHRequestedAction requestedAction;
/**
* Gets number.
*
* @return the number
*/
public int getNumber() {
return number;
}
/**
* Sets Check Run object
*
* @param currentCheckRun
* the check run object
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setCheckRun(GHCheckRun currentCheckRun) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets Check Run object
*
* @return the current checkRun object
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHCheckRun getCheckRun() {
return checkRun;
}
/**
* Sets the Requested Action object
*
* @param currentRequestedAction
* the current action
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setCheckRun(GHRequestedAction currentRequestedAction) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets the Requested Action object
*
* @return the requested action
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHRequestedAction getRequestedAction() {
return requestedAction;
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
if (checkRun == null)
throw new IllegalStateException(
"Expected check_run payload, but got something else. Maybe we've got another type of event?");
GHRepository repository = getRepository();
if (repository != null) {
checkRun.wrap(repository);
} else {
checkRun.wrap(root);
}
}
}
/**
* A check suite event has been requested, rerequested or completed.
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#check_suite">
* check_suite event</a>
* @see <a href="https://docs.github.com/en/rest/reference/checks#check-suites">Check Suites</a>
*/
public static class CheckSuite extends GHEventPayload {
private GHCheckSuite checkSuite;
/**
* Gets the Check Suite object
*
* @return the Check Suite object
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHCheckSuite getCheckSuite() {
return checkSuite;
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
if (checkSuite == null)
throw new IllegalStateException(
"Expected check_suite payload, but got something else. Maybe we've got another type of event?");
GHRepository repository = getRepository();
if (repository != null) {
checkSuite.wrap(repository);
} else {
checkSuite.wrap(root);
}
}
}
/**
* An installation has been installed, uninstalled, or its permissions have been changed.
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#installation">
* installation event</a>
* @see <a href="https://docs.github.com/en/rest/reference/apps#installations">GitHub App Installation</a>
*/
public static class Installation extends GHEventPayload {
private List<GHRepository> repositories;
/**
* Gets repositories
*
* @return the repositories
*/
public List<GHRepository> getRepositories() {
return Collections.unmodifiableList(repositories);
};
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
if (getInstallation() == null) {
throw new IllegalStateException(
"Expected check_suite payload, but got something else. Maybe we've got another type of event?");
}
if (repositories != null && !repositories.isEmpty()) {
try {
for (GHRepository singleRepo : repositories) { // warp each of the repository
singleRepo.wrap(root);
singleRepo.populate();
}
} catch (IOException e) {
throw new GHException("Failed to refresh repositories", e);
}
}
}
}
/**
* A repository has been added or removed from an installation.
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#installation_repositories">
* installation_repositories event</a>
* @see <a href="https://docs.github.com/en/rest/reference/apps#installations">GitHub App installation</a>
*/
public static class InstallationRepositories extends GHEventPayload {
private String repositorySelection;
private List<GHRepository> repositoriesAdded;
private List<GHRepository> repositoriesRemoved;
/**
* Gets installation selection
*
* @return the installation selection
*/
public String getRepositorySelection() {
return repositorySelection;
}
/**
* Gets repositories added
*
* @return the repositories
*/
public List<GHRepository> getRepositoriesAdded() {
return Collections.unmodifiableList(repositoriesAdded);
}
/**
* Gets repositories removed
*
* @return the repositories
*/
public List<GHRepository> getRepositoriesRemoved() {
return Collections.unmodifiableList(repositoriesRemoved);
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
if (getInstallation() == null) {
throw new IllegalStateException(
"Expected check_suite payload, but got something else. Maybe we've got another type of event?");
}
List<GHRepository> repositories;
if ("added".equals(getAction()))
repositories = repositoriesAdded;
else // action == "removed"
repositories = repositoriesRemoved;
if (repositories != null && !repositories.isEmpty()) {
try {
for (GHRepository singleRepo : repositories) { // warp each of the repository
singleRepo.wrap(root);
singleRepo.populate();
}
} catch (IOException e) {
throw new GHException("Failed to refresh repositories", e);
}
}
}
}
/**
* A pull request status has changed.
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#pull_request">
* pull_request event</a>
* @see <a href="https://docs.github.com/en/rest/reference/pulls">Pull Requests</a>
*/
@SuppressFBWarnings(value = { "NP_UNWRITTEN_FIELD" }, justification = "JSON API")
public static class PullRequest extends GHEventPayload {
private int number;
private GHPullRequest pullRequest;
private GHLabel label;
private GHPullRequestChanges changes;
/**
* Gets number.
*
* @return the number
*/
public int getNumber() {
return number;
}
/**
* Gets pull request.
*
* @return the pull request
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHPullRequest getPullRequest() {
pullRequest.root = root;
return pullRequest;
}
/**
* Gets the added or removed label for labeled/unlabeled events.
*
* @return label the added or removed label
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHLabel getLabel() {
return label;
}
/**
* Get changes (for action="edited")
*
* @return changes
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHPullRequestChanges getChanges() {
return changes;
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
if (pullRequest == null)
throw new IllegalStateException(
"Expected pull_request payload, but got something else. Maybe we've got another type of event?");
GHRepository repository = getRepository();
if (repository != null) {
pullRequest.wrapUp(repository);
} else {
pullRequest.wrapUp(root);
}
}
}
/**
* A review was added to a pull request
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#pull_request_review">
* pull_request_review event</a>
* @see <a href="https://docs.github.com/en/rest/reference/pulls#reviews">Pull Request Reviews</a>
*/
public static class PullRequestReview extends GHEventPayload {
private GHPullRequestReview review;
private GHPullRequest pullRequest;
/**
* Gets review.
*
* @return the review
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHPullRequestReview getReview() {
return review;
}
/**
* Gets pull request.
*
* @return the pull request
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHPullRequest getPullRequest() {
return pullRequest;
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
if (review == null)
throw new IllegalStateException(
"Expected pull_request_review payload, but got something else. Maybe we've got another type of event?");
review.wrapUp(pullRequest);
GHRepository repository = getRepository();
if (repository != null) {
pullRequest.wrapUp(repository);
} else {
pullRequest.wrapUp(root);
}
}
}
/**
* A review comment was added to a pull request
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#pull_request_review_comment">
* pull_request_review_comment event</a>
* @see <a href="https://docs.github.com/en/rest/reference/pulls#review-comments">Pull Request Review Comments</a>
*/
public static class PullRequestReviewComment extends GHEventPayload {
private GHPullRequestReviewComment comment;
private GHPullRequest pullRequest;
/**
* Gets comment.
*
* @return the comment
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHPullRequestReviewComment getComment() {
return comment;
}
/**
* Gets pull request.
*
* @return the pull request
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHPullRequest getPullRequest() {
return pullRequest;
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
if (comment == null)
throw new IllegalStateException(
"Expected pull_request_review_comment payload, but got something else. Maybe we've got another type of event?");
comment.wrapUp(pullRequest);
GHRepository repository = getRepository();
if (repository != null) {
pullRequest.wrapUp(repository);
} else {
pullRequest.wrapUp(root);
}
}
}
/**
* A Issue has been assigned, unassigned, labeled, unlabeled, opened, edited, milestoned, demilestoned, closed, or
* reopened.
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#issues">
* issues events</a>
* @see <a href="https://docs.github.com/en/rest/reference/issues#comments">Issues Comments</a>
*/
public static class Issue extends GHEventPayload {
private GHIssue issue;
private GHLabel label;
private GHIssueChanges changes;
/**
* Gets issue.
*
* @return the issue
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHIssue getIssue() {
return issue;
}
/**
* Sets issue.
*
* @param issue
* the issue
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setIssue(GHIssue issue) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets the added or removed label for labeled/unlabeled events.
*
* @return label the added or removed label
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHLabel getLabel() {
return label;
}
/**
* Get changes (for action="edited")
*
* @return changes
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHIssueChanges getChanges() {
return changes;
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
GHRepository repository = getRepository();
if (repository != null) {
issue.wrap(repository);
} else {
issue.wrap(root);
}
}
}
/**
* A comment was added to an issue
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#issue_comment">
* issue_comment event</a>
* @see <a href="https://docs.github.com/en/rest/reference/issues#comments">Issue Comments</a>
*/
public static class IssueComment extends GHEventPayload {
private GHIssueComment comment;
private GHIssue issue;
/**
* Gets comment.
*
* @return the comment
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHIssueComment getComment() {
return comment;
}
/**
* Sets comment.
*
* @param comment
* the comment
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setComment(GHIssueComment comment) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets issue.
*
* @return the issue
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHIssue getIssue() {
return issue;
}
/**
* Sets issue.
*
* @param issue
* the issue
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setIssue(GHIssue issue) {
throw new RuntimeException("Do not use this method.");
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
GHRepository repository = getRepository();
if (repository != null) {
issue.wrap(repository);
} else {
issue.wrap(root);
}
comment.wrapUp(issue);
}
}
/**
* A comment was added to a commit
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#commit_comment">
* commit comment</a>
* @see <a href="https://docs.github.com/en/rest/reference/repos#comments">Comments</a>
*/
public static class CommitComment extends GHEventPayload {
private GHCommitComment comment;
/**
* Gets comment.
*
* @return the comment
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHCommitComment getComment() {
return comment;
}
/**
* Sets comment.
*
* @param comment
* the comment
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setComment(GHCommitComment comment) {
throw new RuntimeException("Do not use this method.");
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
GHRepository repository = getRepository();
if (repository != null) {
comment.wrap(repository);
}
}
}
/**
* A repository, branch, or tag was created
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#create">
* create event</a>
* @see <a href="https://docs.github.com/en/rest/reference/git">Git data</a>
*/
public static class Create extends GHEventPayload {
private String ref;
private String refType;
private String masterBranch;
private String description;
/**
* Gets ref.
*
* @return the ref
*/
public String getRef() {
return ref;
}
/**
* Gets ref type.
*
* @return the ref type
*/
public String getRefType() {
return refType;
}
/**
* Gets default branch.
*
* Name is an artifact of when "master" was the most common default.
*
* @return the default branch
*/
public String getMasterBranch() {
return masterBranch;
}
/**
* Gets description.
*
* @return the description
*/
public String getDescription() {
return description;
}
}
/**
* A branch, or tag was deleted
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#delete">
* delete event</a>
* @see <a href="https://docs.github.com/en/rest/reference/git">Git data</a>
*/
public static class Delete extends GHEventPayload {
private String ref;
private String refType;
/**
* Gets ref.
*
* @return the ref
*/
public String getRef() {
return ref;
}
/**
* Gets ref type.
*
* @return the ref type
*/
public String getRefType() {
return refType;
}
}
/**
* A deployment
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#deployment">
* deployment event</a>
* @see <a href="https://docs.github.com/en/rest/reference/repos#deployments">Deployments</a>
*/
public static class Deployment extends GHEventPayload {
private GHDeployment deployment;
/**
* Gets deployment.
*
* @return the deployment
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHDeployment getDeployment() {
return deployment;
}
/**
* Sets deployment.
*
* @param deployment
* the deployment
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setDeployment(GHDeployment deployment) {
throw new RuntimeException("Do not use this method.");
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
GHRepository repository = getRepository();
if (repository != null) {
deployment.wrap(repository);
}
}
}
/**
* A deployment status
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#deployment_status">
* deployment_status event</a>
* @see <a href="https://docs.github.com/en/rest/reference/repos#deployments">Deployments</a>
*/
public static class DeploymentStatus extends GHEventPayload {
private GHDeploymentStatus deploymentStatus;
private GHDeployment deployment;
/**
* Gets deployment status.
*
* @return the deployment status
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHDeploymentStatus getDeploymentStatus() {
return deploymentStatus;
}
/**
* Sets deployment status.
*
* @param deploymentStatus
* the deployment status
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setDeploymentStatus(GHDeploymentStatus deploymentStatus) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets deployment.
*
* @return the deployment
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHDeployment getDeployment() {
return deployment;
}
/**
* Sets deployment.
*
* @param deployment
* the deployment
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setDeployment(GHDeployment deployment) {
throw new RuntimeException("Do not use this method.");
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
GHRepository repository = getRepository();
if (repository != null) {
deployment.wrap(repository);
deploymentStatus.lateBind(repository);
}
}
}
/**
* A user forked a repository
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#fork"> fork
* event</a>
* @see <a href="https://docs.github.com/en/rest/reference/repos#forks">Forks</a>
*/
public static class Fork extends GHEventPayload {
private GHRepository forkee;
/**
* Gets forkee.
*
* @return the forkee
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHRepository getForkee() {
return forkee;
}
/**
* Sets forkee.
*
* @param forkee
* the forkee
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setForkee(GHRepository forkee) {
throw new RuntimeException("Do not use this method.");
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
forkee.wrap(root);
}
}
/**
* A ping.
*
* <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#ping"> ping
* event</a>
*/
public static class Ping extends GHEventPayload {
}
/**
* A repository was made public.
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#public">
* public event</a>
*/
public static class Public extends GHEventPayload {
}
/**
* A commit was pushed.
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#push"> push
* event</a>
*/
public static class Push extends GHEventPayload {
private String head, before;
private boolean created, deleted, forced;
private String ref;
private int size;
private List<PushCommit> commits;
private Pusher pusher;
private String compare;
/**
* The SHA of the HEAD commit on the repository
*
* @return the head
*/
public String getHead() {
return head;
}
/**
* This is undocumented, but it looks like this captures the commit that the ref was pointing to before the
* push.
*
* @return the before
*/
public String getBefore() {
return before;
}
@JsonSetter // alias
private void setAfter(String after) {
head = after;
}
/**
* The full Git ref that was pushed. Example: “refs/heads/main”
*
* @return the ref
*/
public String getRef() {
return ref;
}
/**
* The number of commits in the push. Is this always the same as {@code getCommits().size()}?
*
* @return the size
*/
public int getSize() {
return size;
}
/**
* Is created boolean.
*
* @return the boolean
*/
public boolean isCreated() {
return created;
}
/**
* Is deleted boolean.
*
* @return the boolean
*/
public boolean isDeleted() {
return deleted;
}
/**
* Is forced boolean.
*
* @return the boolean
*/
public boolean isForced() {
return forced;
}
/**
* The list of pushed commits.
*
* @return the commits
*/
public List<PushCommit> getCommits() {
return Collections.unmodifiableList(commits);
}
/**
* Gets pusher.
*
* @return the pusher
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public Pusher getPusher() {
return pusher;
}
/**
* Sets pusher.
*
* @param pusher
* the pusher
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setPusher(Pusher pusher) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets compare.
*
* @return compare
*/
public String getCompare() {
return compare;
}
/**
* The type Pusher.
*/
public static class Pusher {
private String name, email;
/**
* Gets name.
*
* @return the name
*/
public String getName() {
return name;
}
/**
* Sets name.
*
* @param name
* the name
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setName(String name) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets email.
*
* @return the email
*/
public String getEmail() {
return email;
}
/**
* Sets email.
*
* @param email
* the email
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setEmail(String email) {
throw new RuntimeException("Do not use this method.");
}
}
/**
* Commit in a push. Note: sha is an alias for id.
*/
public static class PushCommit {
private GitUser author;
private GitUser committer;
private String url, sha, message, timestamp;
private boolean distinct;
private List<String> added, removed, modified;
/**
* Gets author.
*
* @return the author
*/
public GitUser getAuthor() {
return author;
}
/**
* Gets committer.
*
* @return the committer
*/
public GitUser getCommitter() {
return committer;
}
/**
* Points to the commit API resource.
*
* @return the url
*/
public String getUrl() {
return url;
}
/**
* Gets sha (id).
*
* @return the sha
*/
public String getSha() {
return sha;
}
@JsonSetter
private void setId(String id) {
sha = id;
}
/**
* Gets message.
*
* @return the message
*/
public String getMessage() {
return message;
}
/**
* Whether this commit is distinct from any that have been pushed before.
*
* @return the boolean
*/
public boolean isDistinct() {
return distinct;
}
/**
* Gets added.
*
* @return the added
*/
public List<String> getAdded() {
return Collections.unmodifiableList(added);
}
/**
* Gets removed.
*
* @return the removed
*/
public List<String> getRemoved() {
return Collections.unmodifiableList(removed);
}
/**
* Gets modified.
*
* @return the modified
*/
public List<String> getModified() {
return Collections.unmodifiableList(modified);
}
/**
* Obtains the timestamp of the commit
*
* @return the timestamp
*/
public Date getTimestamp() {
return GitHubClient.parseDate(timestamp);
}
}
}
/**
* A release was added to the repo
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#release">
* release event</a>
* @see <a href="https://docs.github.com/en/rest/reference/repos#releases">Releases</a>
*/
@SuppressFBWarnings(value = { "UWF_FIELD_NOT_INITIALIZED_IN_CONSTRUCTOR", "NP_UNWRITTEN_FIELD" },
justification = "Constructed by JSON deserialization")
public static class Release extends GHEventPayload {
private GHRelease release;
/**
* Gets release.
*
* @return the release
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHRelease getRelease() {
return release;
}
/**
* Sets release.
*
* @param release
* the release
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setRelease(GHRelease release) {
throw new RuntimeException("Do not use this method.");
}
}
/**
* A repository was created, deleted, made public, or made private.
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#repository">
* repository event</a>
* @see <a href="https://docs.github.com/en/rest/reference/repos">Repositories</a>
*/
public static class Repository extends GHEventPayload {
}
/**
* A git commit status was changed.
*
* @see <a href="https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#status">
* status event</a>
* @see <a href="https://docs.github.com/en/rest/reference/repos#statuses">Repository Statuses</a>
*/
public static class Status extends GHEventPayload {
private String context;
private String description;
private GHCommitState state;
private GHCommit commit;
private String targetUrl;
/**
* Gets the status content.
*
* @return status content
*/
public String getContext() {
return context;
}
/**
* The optional link added to the status.
*
* @return a url
*/
public String getTargetUrl() {
return targetUrl;
}
/**
* Gets the status description.
*
* @return status description
*/
public String getDescription() {
return description;
}
/**
* Gets the status state.
*
* @return status state
*/
public GHCommitState getState() {
return state;
}
/**
* Sets the status stage.
*
* @param state
* status state
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setState(GHCommitState state) {
throw new RuntimeException("Do not use this method.");
}
/**
* Gets the commit associated with the status event.
*
* @return commit
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHCommit getCommit() {
return commit;
}
/**
* Sets the commit associated with the status event.
*
* @param commit
* commit
* @deprecated Do not use this method. It was added due to incomplete understanding of Jackson binding.
*/
@Deprecated
public void setCommit(GHCommit commit) {
throw new RuntimeException("Do not use this method.");
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
if (state == null) {
throw new IllegalStateException(
"Expected status payload, but got something else. Maybe we've got another type of event?");
}
GHRepository repository = getRepository();
if (repository != null) {
commit.wrapUp(repository);
}
}
}
/**
* Occurs when someone triggered a workflow run or sends a POST request to the "Create a workflow dispatch event"
* endpoint.
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#workflow_dispatch">
* workflow dispatch event</a>
* @see <a href=
* "https://docs.github.com/en/actions/reference/events-that-trigger-workflows#workflow_dispatch">Events that
* trigger workflows</a>
*/
public static class WorkflowDispatch extends GHEventPayload {
private Map<String, Object> inputs;
private String ref;
private String workflow;
/**
* Gets the map of input parameters passed to the workflow.
*
* @return the map of input parameters
*/
public Map<String, Object> getInputs() {
return Collections.unmodifiableMap(inputs);
}
/**
* Gets the ref of the branch (e.g. refs/heads/main)
*
* @return the ref of the branch
*/
public String getRef() {
return ref;
}
/**
* Gets the path of the workflow file (e.g. .github/workflows/hello-world-workflow.yml).
*
* @return the path of the workflow file
*/
public String getWorkflow() {
return workflow;
}
}
/**
* A workflow run was requested or completed.
*
* @see <a href=
* "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#workflow_run">
* workflow run event</a>
* @see <a href="https://docs.github.com/en/rest/reference/actions#workflow-runs">Actions Workflow Runs</a>
*/
public static class WorkflowRun extends GHEventPayload {
private GHWorkflowRun workflowRun;
private GHWorkflow workflow;
/**
* Gets the workflow run.
*
* @return the workflow run
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHWorkflowRun getWorkflowRun() {
return workflowRun;
}
/**
* Gets the associated workflow.
*
* @return the associated workflow
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected")
public GHWorkflow getWorkflow() {
return workflow;
}
@Override
void wrapUp(GitHub root) {
super.wrapUp(root);
if (workflowRun == null || workflow == null) {
throw new IllegalStateException(
"Expected workflow and workflow_run payload, but got something else. Maybe we've got another type of event?");
}
GHRepository repository = getRepository();
if (repository == null) {
throw new IllegalStateException("Repository must not be null");
}
workflowRun.wrapUp(repository);
workflow.wrapUp(repository);
}
}
/**
* A label was created, edited or deleted.
*
* @see <a href= "https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#label">
* label event</a>
*/
public static class Label extends GHEventPayload {
private GHLabel label;
private GHLabelChanges changes;
/**
* Gets the label.
*
* @return the label
*/
@SuppressFBWarnings(value = { "EI_EXPOSE_REP" }, justification = "Expected behavior")
public GHLabel getLabel() {
return label;
}
/**
* Gets changes (for action="edited")
*
* @return changes
*/
public GHLabelChanges getChanges() {
return changes;
}
}
}
|
package stream.consumer;
import org.springframework.cloud.stream.annotation.EnableBinding;
import org.springframework.cloud.stream.annotation.StreamListener;
import org.springframework.cloud.stream.messaging.Sink;
import org.springframework.messaging.handler.annotation.SendTo;
import org.springframework.stereotype.Component;
import stream.channal.LogSink;
import stream.channal.LogSource;
import stream.channal.MySink;
/**
* @author hall
* @date 2021-01-16 16:22
*/
@Component
@EnableBinding({Sink.class, MySink.class, LogSource.class, LogSink.class})
public class MessageConsumer {
@StreamListener(Sink.INPUT)
public void receive(String message) {
System.out.println("message = " + message);
}
/**
* 在监听到 MySink.MY_INPUT 后,通过 SendTo 将处理后的消息发送到 LogSource.LOG_OUTPUT
* @param message 收到的消息
* @return 日志内容
*/
@StreamListener(MySink.MY_INPUT)
@SendTo(LogSource.LOG_OUTPUT)
public String receiveMyInput(String message) {
System.out.println("my message = " + message);
return "log" + message;
}
@StreamListener(LogSink.LOG_INPUT)
public void logMessage(String message) {
System.out.println("log message = " + message);
}
}
|
package com.example.CustomExpandableListView;
import android.os.Bundle;
import android.view.View;
import android.widget.ExpandableListView;
import android.widget.ExpandableListView.OnGroupClickListener;
public class MainActivity extends BaseActivity {
private ExpandableListView elv;
/**
* THIS DATA CAN BE FETCHED FROM DATABASES OR FROM WEB USING WEB API'S AND LOAD TO ADAPTER
* **/
private static final String[] groupname = {"Bangalore","Mysore","Kodagu"};
private static final String[][] data = {{"Vidhanasouda","Cubbon park","Lalbagh"},
{"Palace","Chamundi Hills","Zoo"},
{"Abbey Falls","Talakaveri"}};
private static final String[][] listinfo = {{"Dr Ambedkar rd,, Sampangi Ramnagar, Bangalore, Karnataka","Kasturba Road, Bangalore, Karnataka","Lal Bagh Road, Lalbagh, Mavalli, Bangalore, Karnataka"},
{"Sayyaji Rao Rd, Mysore, Karnataka","Mysore","Ittige gudu, Mysore, Karnataka"},
{"Kodagu,Karnataka","Kodagu,Karnataka"}};
//private static final int[] ImgBckgrnd = {R.drawable.bangalore,R.drawable.mysore,R.drawable.coorg};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
elv = (ExpandableListView) findViewById(R.id.lvExp1);
elv.setFocusable(false);
/**
* THIS CAN BE USED IN ACTIVITY OR FRAGMENTS
* **/
elv.setAdapter(new CustomELVAdapter(this, MainActivity.this, groupname, listinfo,data));
elv.setOnGroupClickListener(new OnGroupClickListener() {
@Override
public boolean onGroupClick(ExpandableListView parent, View v,
int groupPosition, long id) {
/**
* TODO:return true to enable group click
*/
// DO SOMETHING
return false;
}
});
}
}
|
package org.jabref.gui.externalfiles;
import java.net.URL;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import org.jabref.Globals;
import org.jabref.gui.BasePanel;
import org.jabref.gui.DialogService;
import org.jabref.gui.actions.SimpleCommand;
import org.jabref.gui.fieldeditors.LinkedFileViewModel;
import org.jabref.gui.util.BackgroundTask;
import org.jabref.logic.importer.FulltextFetchers;
import org.jabref.logic.l10n.Localization;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.LinkedFile;
import org.jabref.preferences.JabRefPreferences;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Try to download fulltext PDF for selected entry(ies) by following URL or DOI link.
*/
public class FindFullTextAction extends SimpleCommand {
private static final Logger LOGGER = LoggerFactory.getLogger(FindFullTextAction.class);
// The minimum number of selected entries to ask the user for confirmation
private static final int WARNING_LIMIT = 5;
private final BasePanel basePanel;
private final DialogService dialogService;
public FindFullTextAction(BasePanel basePanel) {
this.basePanel = basePanel;
this.dialogService = basePanel.frame().getDialogService();
}
@Override
public void execute() {
BackgroundTask.wrap(this::findFullTexts)
.onSuccess(this::downloadFullTexts)
.executeWith(Globals.TASK_EXECUTOR);
}
private Map<Optional<URL>, BibEntry> findFullTexts() {
if (!basePanel.getSelectedEntries().isEmpty()) {
basePanel.output(Localization.lang("Looking for full text document..."));
} else {
LOGGER.debug("No entry selected for fulltext download.");
}
if (basePanel.getSelectedEntries().size() >= WARNING_LIMIT) {
boolean confirmDownload = dialogService.showConfirmationDialogAndWait(
Localization.lang("Look up full text documents"),
Localization.lang(
"You are about to look up full text documents for %0 entries.",
String.valueOf(basePanel.getSelectedEntries().size())) + "\n"
+ Localization.lang("JabRef will send at least one request per entry to a publisher.")
+ "\n"
+ Localization.lang("Do you still want to continue?"),
Localization.lang("Look up full text documents"),
Localization.lang("Cancel"));
if (!confirmDownload) {
basePanel.output(Localization.lang("Operation canceled."));
return null;
}
}
Map<Optional<URL>, BibEntry> downloads = new ConcurrentHashMap<>();
for (BibEntry entry : basePanel.getSelectedEntries()) {
FulltextFetchers fetchers = new FulltextFetchers(Globals.prefs.getImportFormatPreferences());
downloads.put(fetchers.findFullTextPDF(entry), entry);
}
return downloads;
}
private void downloadFullTexts(Map<Optional<URL>, BibEntry> downloads) {
List<Optional<URL>> finishedTasks = new ArrayList<>();
for (Map.Entry<Optional<URL>, BibEntry> download : downloads.entrySet()) {
BibEntry entry = download.getValue();
Optional<URL> result = download.getKey();
if (result.isPresent()) {
Optional<Path> dir = basePanel.getBibDatabaseContext().getFirstExistingFileDir(Globals.prefs.getFilePreferences());
if (!dir.isPresent()) {
dialogService.showErrorDialogAndWait(Localization.lang("Directory not found"),
Localization.lang("Main file directory not set!") + " " + Localization.lang("Preferences")
+ " -> " + Localization.lang("File"));
return;
}
//Download full text
addLinkedFileFromURL(result.get(), entry);
} else {
dialogService.notify(Localization.lang("No full text document found for entry %0.",
entry.getCiteKeyOptional().orElse(Localization.lang("undefined"))));
}
finishedTasks.add(result);
}
for (Optional<URL> result : finishedTasks) {
downloads.remove(result);
}
}
/**
* This method attaches a linked file from a URL (if not already linked) to an entry using the key and value pair
* from the findFullTexts map
* @param url the url "key"
* @param entry the entry "value"
*/
private void addLinkedFileFromURL(URL url, BibEntry entry) {
LinkedFile newLinkedFile = new LinkedFile(url, "");
if (!entry.getFiles().contains(newLinkedFile)) {
LinkedFileViewModel onlineFile = new LinkedFileViewModel(
newLinkedFile,
entry,
basePanel.getBibDatabaseContext(),
Globals.TASK_EXECUTOR,
dialogService,
JabRefPreferences.getInstance());
onlineFile.download();
entry.addFile(onlineFile.getFile());
dialogService.notify(Localization.lang("Finished downloading full text document for entry %0.",
entry.getCiteKeyOptional().orElse(Localization.lang("undefined"))));
} else {
dialogService.notify(Localization.lang("Full text document for entry %0 already linked.",
entry.getCiteKeyOptional().orElse(Localization.lang("undefined"))));
}
}
}
|
package pl.straburzynski.packt.ebook.controller;
import org.junit.Before;
import org.junit.Test;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
import org.springframework.http.MediaType;
import org.springframework.test.web.servlet.MockMvc;
import org.springframework.test.web.servlet.MvcResult;
import org.springframework.test.web.servlet.setup.MockMvcBuilders;
import pl.straburzynski.packt.ebook.exception.InvalidEbookException;
import pl.straburzynski.packt.ebook.exception.LoginFailedException;
import pl.straburzynski.packt.ebook.model.Ebook;
import pl.straburzynski.packt.ebook.service.EbookService;
import pl.straburzynski.packt.ebook.service.SlackService;
import static org.hamcrest.Matchers.is;
import static org.mockito.Mockito.*;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.*;
public class PacktControllerTest {
private MockMvc mockMvc;
@Mock
private EbookService ebookService;
@Mock
private SlackService slackService;
@InjectMocks
private PacktController packtController;
@Before
public void setUp() {
MockitoAnnotations.initMocks(this);
mockMvc = MockMvcBuilders.standaloneSetup(packtController).build();
}
@Test
public void getTodayFreeEbookData_successfully() throws Exception {
Ebook ebook = Ebook.builder()
.title("Test title")
.bookUrl("http://www.packtpub.com/application-development/test-ebook")
.description("Test description")
.imageUrl("https://test.com/123.png").build();
when(ebookService.getTodayFreeEbookDataFromPackt()).thenReturn(ebook);
mockMvc.perform(get("/packt/today-ebook"))
.andExpect(status().is2xxSuccessful())
.andExpect(jsonPath("$.title", is(ebook.getTitle())))
.andExpect(jsonPath("$.bookUrl", is(ebook.getBookUrl())))
.andExpect(jsonPath("$.description", is(ebook.getDescription())))
.andExpect(jsonPath("$.imageUrl", is(ebook.getImageUrl())));
verify(ebookService, times(1)).getTodayFreeEbookDataFromPackt();
verifyNoMoreInteractions(ebookService);
}
@Test
public void getTodayFreeEbookData_emptyEbookData() throws Exception {
doThrow(new InvalidEbookException("Description is empty")).when(ebookService).getTodayFreeEbookDataFromPackt();
mockMvc.perform(get("/packt/today-ebook"))
.andExpect(status().isInternalServerError());
verify(ebookService, times(1)).getTodayFreeEbookDataFromPackt();
verifyNoMoreInteractions(ebookService);
}
@Test
public void sendMessageToSlack_successfully() throws Exception {
doNothing().when(slackService).sendMessageToSlack();
mockMvc.perform(
post("/packt/send-to-slack")
.contentType(MediaType.APPLICATION_JSON_UTF8_VALUE))
.andExpect(status().is2xxSuccessful());
verify(slackService, times(1)).sendMessageToSlack();
verifyNoMoreInteractions(slackService);
}
@Test
public void checkCredentials_userExists() throws Exception {
String user = "Joe Doe";
when(ebookService.checkLogin()).thenReturn(user);
mockMvc.perform(
post("/packt/check-credentials")
.contentType(MediaType.APPLICATION_JSON_UTF8_VALUE))
.andExpect(status().is2xxSuccessful())
.andExpect(content().string(user));
verify(ebookService, times(1)).checkLogin();
verifyNoMoreInteractions(ebookService);
}
@Test
public void checkCredentials_userDoesNotExist() throws Exception {
when(ebookService.checkLogin()).thenThrow(new LoginFailedException("Login failed"));
mockMvc.perform(
post("/packt/check-credentials")
.contentType(MediaType.APPLICATION_JSON_UTF8_VALUE))
.andExpect(status().is4xxClientError());
verify(ebookService, times(1)).checkLogin();
verifyNoMoreInteractions(ebookService);
}
@Test
public void claimEbook_successfully() throws Exception {
doNothing().when(ebookService).claimFreeEbookFromPackt();
mockMvc.perform(
post("/packt/claim-ebook")
.contentType(MediaType.APPLICATION_JSON_UTF8_VALUE))
.andExpect(status().is2xxSuccessful());
verify(ebookService, times(1)).claimFreeEbookFromPackt();
verifyNoMoreInteractions(ebookService);
}
}
|
package netty;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.zip.*;
/**
* 解压缩字符串工具类
*
* @author zhanglinbo 20160827
*/
public class ZipUtil {
/**
* 功能:使用gzip进行压缩,然后再用Base64进行编码
*
* @param primStr 待压缩字符串
* @return 返回压缩后字符串
* @author zhanglinbo 20160827
*/
@SuppressWarnings("restriction")
public static String gzip(String primStr) {
if (primStr == null || primStr.length() == 0) {
return primStr;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
GZIPOutputStream gzip = null;
try {
gzip = new GZIPOutputStream(out);
gzip.write(primStr.getBytes());
} catch (IOException e) {
e.printStackTrace();
} finally {
if (gzip != null) {
try {
gzip.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return new sun.misc.BASE64Encoder().encode(out.toByteArray());
}
@SuppressWarnings("restriction")
public static byte[] gzip1(String primStr) {
// if (primStr == null || primStr.length() == 0) {
// return primStr;
// }
ByteArrayOutputStream out = new ByteArrayOutputStream();
GZIPOutputStream gzip = null;
try {
gzip = new GZIPOutputStream(out);
gzip.write(primStr.getBytes());
} catch (IOException e) {
e.printStackTrace();
} finally {
if (gzip != null) {
try {
gzip.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return out.toByteArray();
}
/**
* <p>
* Description:使用gzip进行解压缩
* 先对压缩数据进行BASE64解码。再进行Gzip解压
* </p>
*
* @param compressedStr 压缩字符串
* @return 返回解压字符串
*/
@SuppressWarnings("restriction")
public static String gunzip(String compressedStr) {
if (compressedStr == null) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
ByteArrayInputStream in = null;
GZIPInputStream ginzip = null;
byte[] compressed = null;
String decompressed = null;
try {
compressed = new sun.misc.BASE64Decoder().decodeBuffer(compressedStr);
in = new ByteArrayInputStream(compressed);
ginzip = new GZIPInputStream(in);
byte[] buffer = new byte[1024];
int offset = -1;
while ((offset = ginzip.read(buffer)) != -1) {
out.write(buffer, 0, offset);
}
decompressed = out.toString();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (ginzip != null) {
try {
ginzip.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (in != null) {
try {
in.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (out != null) {
try {
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return decompressed;
}
@SuppressWarnings("restriction")
public static String gunzip(byte[] compressedStr) {
if (compressedStr == null) {
return null;
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
ByteArrayInputStream in = null;
GZIPInputStream ginzip = null;
byte[] compressed = null;
String decompressed = null;
try {
// compressed = new sun.misc.BASE64Decoder().decodeBuffer(compressedStr);
in = new ByteArrayInputStream(compressedStr);
ginzip = new GZIPInputStream(in);
byte[] buffer = new byte[1024];
int offset = -1;
while ((offset = ginzip.read(buffer)) != -1) {
out.write(buffer, 0, offset);
}
decompressed = out.toString();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (ginzip != null) {
try {
ginzip.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (in != null) {
try {
in.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (out != null) {
try {
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return decompressed;
}
/**
* 使用zip进行压缩
*
* @param str 压缩前的文本
* @return 返回压缩后的文本
*/
@SuppressWarnings("restriction")
public static final String zip(String str) {
if (str == null)
return null;
byte[] compressed;
ByteArrayOutputStream out = null;
ZipOutputStream zout = null;
String compressedStr = null;
try {
out = new ByteArrayOutputStream();
zout = new ZipOutputStream(out);
zout.putNextEntry(new ZipEntry("0"));
zout.write(str.getBytes());
zout.closeEntry();
compressed = out.toByteArray();
compressedStr = new sun.misc.BASE64Encoder().encodeBuffer(compressed);
} catch (IOException e) {
compressed = null;
} finally {
if (zout != null) {
try {
zout.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (out != null) {
try {
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return compressedStr;
}
/**
* 使用zip进行解压缩
*
* @param compressedStr compressed
* 压缩后的文本
* @return 解压后的字符串
*/
@SuppressWarnings("restriction")
public static final String unzip(String compressedStr) {
if (compressedStr == null) {
return null;
}
ByteArrayOutputStream out = null;
ByteArrayInputStream in = null;
ZipInputStream zin = null;
String decompressed = null;
try {
byte[] compressed = new sun.misc.BASE64Decoder().decodeBuffer(compressedStr);
out = new ByteArrayOutputStream();
in = new ByteArrayInputStream(compressed);
zin = new ZipInputStream(in);
zin.getNextEntry();
byte[] buffer = new byte[1024];
int offset = -1;
while ((offset = zin.read(buffer)) != -1) {
out.write(buffer, 0, offset);
}
decompressed = out.toString();
} catch (IOException e) {
decompressed = null;
e.printStackTrace();
} finally {
if (zin != null) {
try {
zin.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (in != null) {
try {
in.close();
} catch (IOException e) {
e.printStackTrace();
}
}
if (out != null) {
try {
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
return decompressed;
}
}
|
package mapping;
// Generated 25 kwi 2019, 13:51:48 by Hibernate Tools 4.3.1
import java.util.HashSet;
import java.util.Set;
/**
* Uczen generated by hbm2java
*/
public class Uczen implements java.io.Serializable {
private long pesel;
private Autoryzacja autoryzacja;
private Klasa klasa;
private String imie;
private String nazwisko;
private Set rodzics = new HashSet(0);
private Set ocenas = new HashSet(0);
private Set obecnoscs = new HashSet(0);
private Set skladKlasies = new HashSet(0);
public Uczen() {
}
public Uczen(Autoryzacja autoryzacja) {
this.autoryzacja = autoryzacja;
}
public Uczen(Autoryzacja autoryzacja, Klasa klasa, String imie, String nazwisko, Set rodzics, Set ocenas, Set obecnoscs, Set skladKlasies) {
this.autoryzacja = autoryzacja;
this.klasa = klasa;
this.imie = imie;
this.nazwisko = nazwisko;
this.rodzics = rodzics;
this.ocenas = ocenas;
this.obecnoscs = obecnoscs;
this.skladKlasies = skladKlasies;
}
public long getPesel() {
return this.pesel;
}
public void setPesel(long pesel) {
this.pesel = pesel;
}
public Autoryzacja getAutoryzacja() {
return this.autoryzacja;
}
public void setAutoryzacja(Autoryzacja autoryzacja) {
this.autoryzacja = autoryzacja;
}
public Klasa getKlasa() {
return this.klasa;
}
public void setKlasa(Klasa klasa) {
this.klasa = klasa;
}
public String getImie() {
return this.imie;
}
public void setImie(String imie) {
this.imie = imie;
}
public String getNazwisko() {
return this.nazwisko;
}
public void setNazwisko(String nazwisko) {
this.nazwisko = nazwisko;
}
public Set getRodzics() {
return this.rodzics;
}
public void setRodzics(Set rodzics) {
this.rodzics = rodzics;
}
public Set getOcenas() {
return this.ocenas;
}
public void setOcenas(Set ocenas) {
this.ocenas = ocenas;
}
public Set getObecnoscs() {
return this.obecnoscs;
}
public void setObecnoscs(Set obecnoscs) {
this.obecnoscs = obecnoscs;
}
public Set getSkladKlasies() {
return this.skladKlasies;
}
public void setSkladKlasies(Set skladKlasies) {
this.skladKlasies = skladKlasies;
}
}
|
package com.github.tangyi.exam.service;
import com.github.pagehelper.PageInfo;
import com.github.tangyi.common.core.constant.CommonConstant;
import com.github.tangyi.common.core.service.CrudService;
import com.github.tangyi.common.security.utils.SysUtil;
import com.github.tangyi.exam.api.constants.AnswerConstant;
import com.github.tangyi.exam.api.dto.SubjectDto;
import com.github.tangyi.exam.api.module.ExaminationSubject;
import com.github.tangyi.exam.api.module.SubjectChoices;
import com.github.tangyi.exam.api.module.SubjectOption;
import com.github.tangyi.exam.mapper.SubjectChoicesMapper;
import com.github.tangyi.exam.utils.AnswerHandlerUtil;
import com.github.tangyi.exam.utils.SubjectUtil;
import lombok.AllArgsConstructor;
import org.apache.commons.collections4.CollectionUtils;
import org.springframework.beans.BeanUtils;
import org.springframework.cache.annotation.CacheEvict;
import org.springframework.cache.annotation.Cacheable;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.List;
/**
* 选择题service
*
* @author tangyi
* @date 2018/11/8 21:23
*/
@AllArgsConstructor
@Service
public class SubjectChoicesService extends CrudService<SubjectChoicesMapper, SubjectChoices>
implements ISubjectService {
private final SubjectOptionService subjectOptionService;
private final ExaminationSubjectService examinationSubjectService;
/**
* 查找题目
*
* @param subjectChoices subjectChoices
* @return SubjectChoices
* @author tangyi
* @date 2019/1/3 14:24
*/
@Override
@Cacheable(value = "subjectChoices#" + CommonConstant.CACHE_EXPIRE, key = "#subjectChoices.id")
public SubjectChoices get(SubjectChoices subjectChoices) {
SubjectChoices subject = super.get(subjectChoices);
// 查找选项信息
if (subject != null) {
SubjectOption subjectOption = new SubjectOption();
subjectOption.setSubjectChoicesId(subject.getId());
List<SubjectOption> options = subjectOptionService.getBySubjectChoicesId(subjectOption);
subject.setOptions(options);
}
return subject;
}
/**
* 根据考试ID查询题目数
*
* @param subjectChoices subjectChoices
* @return int
* @author tangyi
* @date 2019/01/23 20:19
*/
int getExaminationTotalSubject(SubjectChoices subjectChoices) {
return 0;
}
/**
* 新增
*
* @param subjectChoices subjectChoices
* @return int
* @author tangyi
* @date 2019/01/23 20:03
*/
@Override
@Transactional
public int insert(SubjectChoices subjectChoices) {
return super.insert(subjectChoices);
}
/**
* 更新题目
*
* @param subjectChoices subjectChoices
* @return int
* @author tangyi
* @date 2019/1/3 14:24
*/
@Override
@Transactional
@CacheEvict(value = "subjectChoices", key = "#subjectChoices.id")
public int update(SubjectChoices subjectChoices) {
// 更新选项
this.insertOptions(subjectChoices);
return super.update(subjectChoices);
}
/**
* 根据ID查询
*
* @param examinationId examinationId
* @param subjectChoices subjectChoices
* @return SubjectChoices
* @author tangyi
* @date 2019-09-14 16:47
*/
public SubjectChoices getByCurrentId(Long examinationId, SubjectChoices subjectChoices) {
ExaminationSubject examinationSubject = new ExaminationSubject();
examinationSubject.setExaminationId(examinationId);
examinationSubject.setSubjectId(subjectChoices.getId());
examinationSubject = examinationSubjectService.findByExaminationIdAndSubjectId(examinationSubject);
if (examinationSubject == null)
return null;
return this.getSubjectChoicesById(examinationSubject.getSubjectId());
}
/**
* 根据上一题ID查询下一题
*
* @param examinationId examinationId
* @param subjectChoices subjectChoices
* @return SubjectChoices
* @author tangyi
* @date 2019-09-14 16:47
*/
public SubjectChoices getByPreviousId(Long examinationId, SubjectChoices subjectChoices) {
ExaminationSubject examinationSubject = new ExaminationSubject();
examinationSubject.setExaminationId(examinationId);
examinationSubject.setSubjectId(subjectChoices.getId());
examinationSubject = examinationSubjectService.getByPreviousId(examinationSubject);
if (examinationSubject == null)
return null;
return this.getSubjectChoicesById(examinationSubject.getSubjectId());
}
/**
* 根据当前题目ID查询上一题
*
* @param examinationId examinationId
* @param subjectChoices subjectChoices
* @return SubjectChoices
* @author tangyi
* @date 2019/10/07 20:40:16
*/
public SubjectChoices getPreviousByCurrentId(Long examinationId, SubjectChoices subjectChoices) {
ExaminationSubject examinationSubject = new ExaminationSubject();
examinationSubject.setExaminationId(examinationId);
examinationSubject.setSubjectId(subjectChoices.getId());
examinationSubject = examinationSubjectService.getPreviousByCurrentId(examinationSubject);
if (examinationSubject == null)
return null;
return this.getSubjectChoicesById(examinationSubject.getSubjectId());
}
/**
* 删除题目
*
* @param subjectChoices subjectChoices
* @return int
* @author tangyi
* @date 2019/1/3 14:24
*/
@Override
@Transactional
@CacheEvict(value = "subjectChoices", key = "#subjectChoices.id")
public int delete(SubjectChoices subjectChoices) {
int update;
if ((update = super.delete(subjectChoices)) > 0)
this.deleteOptionAndRelation(subjectChoices.getId());
return update;
}
/**
* 物理删除
*
* @param subjectChoices subjectChoices
* @return int
* @author tangyi
* @date 2019/06/16 22:44
*/
@Transactional
@CacheEvict(value = "subjectChoices", key = "#subjectChoices.id")
public int physicalDelete(SubjectChoices subjectChoices) {
int update;
if ((update = this.dao.physicalDelete(subjectChoices)) > 0)
this.deleteOptionAndRelation(subjectChoices.getId());
return update;
}
/**
* 批量删除题目
*
* @param ids ids
* @return int
* @author tangyi
* @date 2019/1/3 14:24
*/
@Override
@Transactional
@CacheEvict(value = "subjectChoices", allEntries = true)
public int deleteAll(Long[] ids) {
int update;
if ((update = super.deleteAll(ids)) > 0) {
for (Long id : ids)
this.deleteOptionAndRelation(id);
}
return update;
}
/**
* 物理批量删除
*
* @param ids ids
* @return int
* @author tangyi
* @date 2019/06/16 22:44
*/
@Transactional
@CacheEvict(value = "subjectChoices", allEntries = true)
public int physicalDeleteAll(Long[] ids) {
int update;
if ((update = this.dao.physicalDeleteAll(ids)) > 0) {
for (Long id : ids)
this.deleteOptionAndRelation(id);
}
return update;
}
/**
* 删除题目的选项和与考试的关联
*
* @param subjectId subjectId
* @author tangyi
* @date 2019/06/16 22:09
*/
@Transactional
public void deleteOptionAndRelation(Long subjectId) {
// 删除选项
SubjectOption option = new SubjectOption();
option.setSubjectChoicesId(subjectId);
subjectOptionService.deleteBySubjectChoicesId(option);
// 删除关联关系
ExaminationSubject examinationSubject = new ExaminationSubject();
examinationSubject.setSubjectId(subjectId);
examinationSubjectService.deleteBySubjectId(examinationSubject);
}
/**
* 根据ID查询
*
* @param id id
* @return SubjectDto
* @author tangyi
* @date 2019/06/16 17:36
*/
@Override
public SubjectDto getSubject(Long id) {
SubjectChoices subject = this.get(id);
// 查找选项信息
if (subject != null) {
SubjectOption subjectOption = new SubjectOption();
subjectOption.setSubjectChoicesId(subject.getId());
List<SubjectOption> options = subjectOptionService.getBySubjectChoicesId(subjectOption);
subject.setOptions(options);
}
return SubjectUtil.subjectChoicesToDto(subject, true);
}
/**
* 根据上一题ID查询下一题
*
* @param examinationId examinationId
* @param previousId previousId
* @param nextType 0:下一题,1:上一题
* @return SubjectDto
* @author tangyi
* @date 2019/09/14 16:35
*/
@Override
@Transactional
public SubjectDto getNextByCurrentIdAndType(Long examinationId, Long previousId, Integer nextType) {
SubjectChoices subjectChoices = new SubjectChoices();
subjectChoices.setId(previousId);
if (AnswerConstant.CURRENT.equals(nextType)) {
subjectChoices = this.getByCurrentId(examinationId, subjectChoices);
} else if (AnswerConstant.NEXT.equals(nextType)) {
subjectChoices = this.getByPreviousId(examinationId, subjectChoices);
} else {
subjectChoices = this.getPreviousByCurrentId(examinationId, subjectChoices);
}
return SubjectUtil.subjectChoicesToDto(subjectChoices, true);
}
/**
* 保存
*
* @param subjectDto subjectDto
* @return int
* @author tangyi
* @date 2019/06/16 17:50
*/
@Override
@Transactional
@CacheEvict(value = "subjectChoices", key = "#subjectDto.id")
public int insertSubject(SubjectDto subjectDto) {
SubjectChoices subjectChoices = new SubjectChoices();
BeanUtils.copyProperties(subjectDto, subjectChoices);
subjectChoices.setAnswer(subjectDto.getAnswer().getAnswer());
subjectChoices.setChoicesType(subjectDto.getType());
insertOptions(subjectChoices);
return this.insert(subjectChoices);
}
/**
* 保存选项
* @param subjectChoices subjectChoices
* @author tangyi
* @date 2020/01/17 22:30:48
*/
@Transactional
public void insertOptions(SubjectChoices subjectChoices) {
if (CollectionUtils.isNotEmpty(subjectChoices.getOptions())) {
SubjectOption subjectOption = new SubjectOption();
subjectOption.setSubjectChoicesId(subjectChoices.getId());
subjectOptionService.deleteBySubjectChoicesId(subjectOption);
// 初始化
subjectChoices.getOptions().forEach(option -> {
option.setCommonValue(subjectChoices.getCreator(), subjectChoices.getApplicationCode(),
subjectChoices.getTenantCode());
option.setSubjectChoicesId(subjectChoices.getId());
});
// 批量插入
subjectOptionService.insertBatch(subjectChoices.getOptions());
}
}
/**
* 更新,包括更新选项
*
* @param subjectDto subjectDto
* @return int
* @author tangyi
* @date 2019/06/16 17:50
*/
@Override
@Transactional
@CacheEvict(value = "subjectChoices", key = "#subjectDto.id")
public int updateSubject(SubjectDto subjectDto) {
SubjectChoices subjectChoices = new SubjectChoices();
BeanUtils.copyProperties(subjectDto, subjectChoices);
subjectChoices.setCommonValue(SysUtil.getUser(), SysUtil.getSysCode(), SysUtil.getTenantCode());
// 参考答案
subjectChoices.setAnswer(AnswerHandlerUtil.replaceComma(subjectDto.getAnswer().getAnswer()));
return this.update(subjectChoices);
}
/**
* 删除
*
* @param subjectDto subjectDto
* @return int
* @author tangyi
* @date 2019/06/16 17:50
*/
@Override
@Transactional
@CacheEvict(value = "subjectChoices", key = "#subjectDto.id")
public int deleteSubject(SubjectDto subjectDto) {
SubjectChoices subjectChoices = new SubjectChoices();
BeanUtils.copyProperties(subjectDto, subjectChoices);
return this.delete(subjectChoices);
}
/**
* 物理删除
*
* @param subjectDto subjectDto
* @return int
* @author tangyi
* @date 2019/06/16 22:50
*/
@Override
@Transactional
@CacheEvict(value = "subjectChoices", key = "#subjectDto.id")
public int physicalDeleteSubject(SubjectDto subjectDto) {
SubjectChoices subjectChoices = new SubjectChoices();
BeanUtils.copyProperties(subjectDto, subjectChoices);
return this.physicalDelete(subjectChoices);
}
/**
* 批量删除
*
* @param ids ids
* @return int
* @author tangyi
* @date 2019/06/16 17:50
*/
@Override
@Transactional
@CacheEvict(value = "subjectChoices", allEntries = true)
public int deleteAllSubject(Long[] ids) {
return this.deleteAll(ids);
}
/**
* 物理删除
*
* @param ids ids
* @return int
* @author tangyi
* @date 2019/06/16 22:51
*/
@Override
@Transactional
@CacheEvict(value = "subjectChoices", allEntries = true)
public int physicalDeleteAllSubject(Long[] ids) {
return this.physicalDeleteAll(ids);
}
/**
* 查询列表
*
* @param subjectDto subjectDto
* @return List
* @author tangyi
* @date 2019/06/16 18:16
*/
@Override
public List<SubjectDto> findSubjectList(SubjectDto subjectDto) {
SubjectChoices subjectChoices = new SubjectChoices();
BeanUtils.copyProperties(subjectDto, subjectChoices);
return SubjectUtil.subjectChoicesToDto(this.findList(subjectChoices), true);
}
/**
* 查询分页列表
*
* @param pageInfo pageInfo
* @param subjectDto subjectDto
* @return PageInfo
* @author tangyi
* @date 2019/06/16 18:16
*/
@Override
public PageInfo<SubjectDto> findSubjectPage(PageInfo pageInfo, SubjectDto subjectDto) {
SubjectChoices subjectChoices = new SubjectChoices();
BeanUtils.copyProperties(subjectDto, subjectChoices);
// 选择题类型:单选或多选
if (subjectDto.getType() != null)
subjectChoices.setChoicesType(subjectDto.getType());
PageInfo<SubjectChoices> subjectChoicesPageInfo = this.findPage(pageInfo, subjectChoices);
List<SubjectDto> subjectDtos = SubjectUtil.subjectChoicesToDto(subjectChoicesPageInfo.getList(), true);
PageInfo<SubjectDto> subjectDtoPageInfo = new PageInfo<>();
subjectDtoPageInfo.setList(subjectDtos);
subjectDtoPageInfo.setTotal(subjectChoicesPageInfo.getTotal());
subjectDtoPageInfo.setPageSize(subjectChoicesPageInfo.getPageSize());
subjectDtoPageInfo.setPageNum(subjectChoicesPageInfo.getPageNum());
return subjectDtoPageInfo;
}
/**
* 根据ID批量查询
*
* @param ids ids
* @return List
* @author tangyi
* @date 2019/06/16 18:16
*/
@Override
public List<SubjectDto> findSubjectListById(Long[] ids) {
return SubjectUtil.subjectChoicesToDto(this.findListById(ids), true);
}
/**
* 根据题目ID查询题目信息和选项
*
* @param subjectId subjectId
* @return SubjectChoices
* @author tangyi
* @date 2019/10/07 21:03:43
*/
private SubjectChoices getSubjectChoicesById(Long subjectId) {
SubjectChoices subjectChoices = new SubjectChoices();
subjectChoices.setId(subjectId);
subjectChoices = this.get(subjectChoices);
SubjectOption subjectOption = new SubjectOption();
subjectOption.setSubjectChoicesId(subjectChoices.getId());
List<SubjectOption> options = subjectOptionService.getBySubjectChoicesId(subjectOption);
subjectChoices.setOptions(options);
return subjectChoices;
}
}
|
package com.petbattle.config;
import lombok.Getter;
import lombok.Setter;
public class InfinspanCreds {
@Getter
@Setter
private String username;
@Getter
@Setter
private String password;
public InfinspanCreds() {
}
public InfinspanCreds(String username, String password) {
this.username = username;
this.password = password;
}
}
|
package com.zhjl.tech.attest.annotationdemo.atlog;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* 平台日志注解
*/
@Retention(RetentionPolicy.RUNTIME)
@Target({ElementType.METHOD,ElementType.PARAMETER})
public @interface ZhijlLog {
String value() default "";
String rquestMethod() default "";
//traceID
// String traceIdLocal() default "";
/**
* 获取bizId的方法说明。目前只支持两个层次的查询
* 形式 p[$(index)], 或者p[$(index)].$(method)
* p[0].getOrdersn ,调用第0个参数的getOrdersn方法获取bizid
* p[2] ,直接采用第二个参数作为bizid
* @return
*/
// String traceLocation() default "";
// String Sn() default "";
// String Sntype() default "";
String orderSn() default "";
String ChannelorderSn() default "";
String SPLIT = ".";
String START = "[";
String END = "]";
}
|
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.stepfunctions.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Contains details about the start of an activity during an execution.
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/states-2016-11-23/ActivityStartedEventDetails" target="_top">AWS
* API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ActivityStartedEventDetails implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The name of the worker that the task is assigned to. These names are provided by the workers when calling
* <a>GetActivityTask</a>.
* </p>
*/
private String workerName;
/**
* <p>
* The name of the worker that the task is assigned to. These names are provided by the workers when calling
* <a>GetActivityTask</a>.
* </p>
*
* @param workerName
* The name of the worker that the task is assigned to. These names are provided by the workers when calling
* <a>GetActivityTask</a>.
*/
public void setWorkerName(String workerName) {
this.workerName = workerName;
}
/**
* <p>
* The name of the worker that the task is assigned to. These names are provided by the workers when calling
* <a>GetActivityTask</a>.
* </p>
*
* @return The name of the worker that the task is assigned to. These names are provided by the workers when calling
* <a>GetActivityTask</a>.
*/
public String getWorkerName() {
return this.workerName;
}
/**
* <p>
* The name of the worker that the task is assigned to. These names are provided by the workers when calling
* <a>GetActivityTask</a>.
* </p>
*
* @param workerName
* The name of the worker that the task is assigned to. These names are provided by the workers when calling
* <a>GetActivityTask</a>.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ActivityStartedEventDetails withWorkerName(String workerName) {
setWorkerName(workerName);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getWorkerName() != null)
sb.append("WorkerName: ").append(getWorkerName());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ActivityStartedEventDetails == false)
return false;
ActivityStartedEventDetails other = (ActivityStartedEventDetails) obj;
if (other.getWorkerName() == null ^ this.getWorkerName() == null)
return false;
if (other.getWorkerName() != null && other.getWorkerName().equals(this.getWorkerName()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getWorkerName() == null) ? 0 : getWorkerName().hashCode());
return hashCode;
}
@Override
public ActivityStartedEventDetails clone() {
try {
return (ActivityStartedEventDetails) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.stepfunctions.model.transform.ActivityStartedEventDetailsMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
|
package com.alipay.api.domain;
import com.alipay.api.AlipayObject;
import com.alipay.api.internal.mapping.ApiField;
/**
* 销售产品
*
* @author auto create
* @since 1.0, 2013-12-17 15:55:22
*/
public class SaleProduct extends AlipayObject {
private static final long serialVersionUID = 8528973599919994729L;
/**
* 宝贝来源 例如:TAOBAO ALIPAY
*/
@ApiField("channel_type")
private String channelType;
/**
* 销售产品DB ID
*/
@ApiField("id")
private String id;
/**
* 面额
*/
@ApiField("market_price")
private String marketPrice;
/**
* 充值产品提供商
*/
@ApiField("product_provider")
private ProductProvider productProvider;
/**
* 销售价格
*/
@ApiField("sale_price")
private String salePrice;
/**
* 产品状态, 0为不可用,1为可用
*/
@ApiField("status")
private String status;
public String getChannelType() {
return this.channelType;
}
public void setChannelType(String channelType) {
this.channelType = channelType;
}
public String getId() {
return this.id;
}
public void setId(String id) {
this.id = id;
}
public String getMarketPrice() {
return this.marketPrice;
}
public void setMarketPrice(String marketPrice) {
this.marketPrice = marketPrice;
}
public ProductProvider getProductProvider() {
return this.productProvider;
}
public void setProductProvider(ProductProvider productProvider) {
this.productProvider = productProvider;
}
public String getSalePrice() {
return this.salePrice;
}
public void setSalePrice(String salePrice) {
this.salePrice = salePrice;
}
public String getStatus() {
return this.status;
}
public void setStatus(String status) {
this.status = status;
}
}
|
package gui;
import java.util.ArrayList;
import java.util.List;
import cellsociety_team17.Grid;
import cellsociety_team17.Init;
import cellsociety_team17.SegregationInit;
import cellsociety_team17.LifeInit;
import cellsociety_team17.Main;
import cellsociety_team17.Processor;
import cellsociety_team17.SegregationGrid;
import cellsociety_team17.SegregationProcessor;
import cellsociety_team17.WaTorInit;
import cellsociety_team17.WaTorProcessor;
import javafx.animation.KeyFrame;
import javafx.animation.Timeline;
import javafx.beans.value.ChangeListener;
import javafx.beans.value.ObservableValue;
import javafx.scene.chart.XYChart.Series;
import javafx.util.Duration;
public class SimModel {
private Timeline timeline;
private SimView simView;
// private Grid segGrid;
private Processor[] processorArray;
// private SegregationInit initArray;
//
// temp for fire, until InitClass is extracted
// private Grid fireGrid;
// private FireInit initArray;
private List<CellShape> listOfCellRectangles;
private int CELLSHAPE_KEY = 0; // 0 is rectangle, 1 is triangle
// temp for life, until InitClass is extracted
private Grid grid;
private Init[] initArray;
private double[] stateCount;
/**
* @return the stateCount
*/
public double[] getStateCount() {
return stateCount;
}
/**
* @param stateCount the stateCount to set
*/
public void setStateCount(double[] stateCount) {
this.stateCount = stateCount;
}
/**
* @return the initArray
*/
public Init[] getInitArray() {
return initArray;
}
private int CURRENT_SIM; // Set to Fire
/**
* @return the CURRENT_SIM
*/
public int getCurrentSim() {
return CURRENT_SIM;
}
private int FRAMES_PER_SECOND = 1;
/**
* @return the fRAMES_PER_SECOND
*/
public int getFPS() {
return FRAMES_PER_SECOND;
}
/**
* @param fRAMES_PER_SECOND the fRAMES_PER_SECOND to set
*/
public void setFPS(int fps) {
FRAMES_PER_SECOND = fps;
}
private final double DEFAULT_FRAME_DURATION = 1000 / FRAMES_PER_SECOND;
/**
* @return the defaultFrameDuration
*/
public double getDefaultFrameDuration() {
return DEFAULT_FRAME_DURATION;
}
private double FRAME_DURATION = DEFAULT_FRAME_DURATION;
/**
* @param fRAME_DURATION the fRAME_DURATION to set
*/
public void setFrameDuration(double frameDuration) {
FRAME_DURATION = frameDuration;
}
/**
* @return the frame duration.
*
*/
public double getFrameDuration() {
return FRAME_DURATION;
}
public void init(SimView view, Timeline simTime, Processor[] process, Init[] x, int currentSim){
timeline=simTime;
simView=view;
processorArray = process;
initArray = x;
CURRENT_SIM = currentSim;
}
public void iterate(){
increaseFrameCount(simView);
// Remove all Cell Rectangles
stateCount = processorArray[CURRENT_SIM].getStateCount();
// Print number of states
//for(int i=0;i<(int)processorArray[CURRENT_SIM].getStateCount().length;i++){
////System.out.print((int) processorArray[CURRENT_SIM].getStateCount()[i]+ " ");
//}
//System.out.println(" ");
// Checks
checkWindowWidth();
checkWindowHeight();
grid = processorArray[CURRENT_SIM].generateNextGrid();
processorArray[CURRENT_SIM].initProcessor(grid, initArray[CURRENT_SIM]);
processorArray[CURRENT_SIM].setCurrentGrid(grid);
if(listOfCellRectangles!=null){
simView.getRoot().getChildren().removeAll(listOfCellRectangles);
}
if(CELLSHAPE_KEY==0){
listOfCellRectangles = simView.generateGridRectangles(grid);
}
else if(CELLSHAPE_KEY==1){
listOfCellRectangles = simView.generateGridTriangles(grid);
}
}
/**
* @param simView
*/
private void increaseFrameCount(SimView simView) {
// Increase the frame by one for each iteration
simView.setStepFrame(simView.getStepFrame()+1);
}
private void resetFrameCount(SimView simView){
simView.setStepFrame(0);
}
/**
*
*/
private void checkWindowHeight() {
simView.getScene().heightProperty().addListener(new ChangeListener<Number>(){
@Override
public void changed(ObservableValue<? extends Number> arg0, Number oldHeight, Number newHeight) {
simView.setCurrentHeight((double) newHeight);
//////System.out.println("New Height: "+ simView.getCurrentHeight());
// Check if window resized beyond screen's current dimension
// Grid resize?
}
});
}
/**
*
*/
private void checkWindowWidth() {
simView.getScene().widthProperty().addListener(new ChangeListener<Number>(){
@Override
public void changed(ObservableValue<? extends Number> arg0, Number oldWidth, Number newWidth) {
simView.setCurrentWidth((double) newWidth);
//////System.out.println("New Width: "+ simView.getCurrentWidth());
// Check if window resized beyond screen's current dimension
// Grid resize?
}
});
}
public void play(){
timeline.play();
}
public void pause(){
timeline.pause();
}
public void step(){
timeline.stop();
iterate();
}
public void rectShape(){
timeline.stop();
CELLSHAPE_KEY = 0;
simView.getRoot().getChildren().remove(simView.getGridBackground());
simView.getRoot().getChildren().add(simView.getGridBackground());
reinitLineChart();
reinitTimeline();
}
public void triangleShape(){
simView.getRoot().getChildren().remove(simView.getGridBackground());
simView.getRoot().getChildren().add(simView.getGridBackground());
timeline.stop();
CELLSHAPE_KEY = 1;
reinitLineChart();
reinitTimeline();
}
public void adjustSpeed(double sliderRatio){
timeline.stop();
setFrameDuration(getDefaultFrameDuration()*sliderRatio);
reinitKeyFrame();
}
//
// public void switchToFire(){
// timeline.stop();
// CURRENT_SIM=0;
// reinitTimeline();
// }
//
// public void switchToLife(){
// timeline.stop();
// CURRENT_SIM = 1;
// reinitTimeline();
// }
//
// public void switchToWaTor(){
// timeline.stop();
// CURRENT_SIM = 2;
// reinitTimeline();
// }
// public void switchToSeg(){
// timeline.stop();
// CURRENT_SIM = 3;
// reinitTimeline();
// }
//
// public void switchToSugar(){
// timeline.stop();
// CURRENT_SIM = 4;
// reinitTimeline();
// }
public void switchSim(int currentSim){
timeline.stop();
CURRENT_SIM=currentSim;
reinitLineChart();
//simView.getGraphGUI().getLineChart().getData().addAll(simView.getGraphGUI().getSeriesArray());
reinitTimeline();
}
/**
*
*/
private void reinitLineChart() {
simView.getGraphGUI().getLineChart().getData().clear();
simView.getRoot().getChildren().remove(simView.getGraphGUI().getLineChart());
simView.setGraphGUI(new GraphGUI());
simView.getRoot().setRight(simView.getGraphGUI().getLineChart());
}
/**
*
*/
private void reinitTimeline() {
resetFrameCount(simView);
timeline = new Timeline();
initArray = Main.initInitClassArray();
processorArray = Main.initProcessorArray();
init(simView, timeline, processorArray, initArray, CURRENT_SIM);
reinitKeyFrame();
}
private void reinitKeyFrame() {
KeyFrame frame = new KeyFrame(Duration.millis(getFrameDuration()),e->iterate());
timeline.setCycleCount(Timeline.INDEFINITE);
timeline.getKeyFrames().add(frame);
timeline.play();
}
void resetToInitial(){
initArray[CURRENT_SIM].setGrid();
reinitTimeline();
}
}
|
package cn.iocoder.dashboard.framework.sms.core.client;
import cn.iocoder.dashboard.common.core.KeyValue;
import cn.iocoder.dashboard.framework.sms.core.client.dto.SmsReceiveRespDTO;
import cn.iocoder.dashboard.framework.sms.core.client.dto.SmsSendRespDTO;
import cn.iocoder.dashboard.framework.sms.core.client.dto.SmsTemplateRespDTO;
import java.util.List;
/**
* 短信客户端接口
*
* @author zzf
* @date 2021/1/25 14:14
*/
public interface SmsClient {
/**
* 获得渠道编号
*
* @return 渠道编号
*/
Long getId();
/**
* 发送消息
*
* @param logId 日志编号
* @param mobile 手机号
* @param apiTemplateId 短信 API 的模板编号
* @param templateParams 短信模板参数。通过 List 数组,保证参数的顺序
* @return 短信发送结果
*/
SmsCommonResult<SmsSendRespDTO> sendSms(Long logId, String mobile, String apiTemplateId,
List<KeyValue<String, Object>> templateParams);
/**
* 解析接收短信的接收结果
*
* @param text 结果
* @return 结果内容
* @throws Throwable 当解析 text 发生异常时,则会抛出异常
*/
List<SmsReceiveRespDTO> parseSmsReceiveStatus(String text) throws Throwable;
/**
* 查询指定的短信模板
*
* @param apiTemplateId 短信 API 的模板编号
* @return 短信模板
*/
SmsCommonResult<SmsTemplateRespDTO> getSmsTemplate(String apiTemplateId);
}
|
/*
* Tencent is pleased to support the open source community by making Angel available.
*
* Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
*
* Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* https://opensource.org/licenses/BSD-3-Clause
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*
*/
package com.tencent.angel.ml.objective;
import com.tencent.angel.ml.RegTree.RegTDataStore;
import com.tencent.angel.ml.RegTree.GradPair;
import com.tencent.angel.ml.param.RegTParam;
import com.tencent.angel.ml.utils.MathUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import java.util.ArrayList;
import java.util.List;
/**
* Description: Softmax for multi-class classification output class index if outputProb = false
* output probability distribution if outputProb = true
*/
public class SoftmaxMultiClassObj implements ObjFunc {
private static final Log LOG = LogFactory.getLog(SoftmaxMultiClassObj.class);
public RegTParam param;
public int numClass;
private boolean outputProb;
public SoftmaxMultiClassObj(RegTParam param, boolean outputProb) {
this.param = param;
this.numClass = param.numClass;
this.outputProb = outputProb;
}
public SoftmaxMultiClassObj(RegTParam param) {
this(param, true);
}
/**
* get gradient over each of predictions, given existing information.
*
* @param preds: prediction of current round
* @param dataStore information about labels, weights, groups in rank
* @param iteration current iteration number. return:_gpair output of get gradient, saves gradient
* and second order gradient in
*/
@Override
public List<GradPair> calGrad(float[] preds, RegTDataStore dataStore, int iteration) {
assert preds.length == this.numClass * dataStore.labels.length;
List<GradPair> rec = new ArrayList<GradPair>();
int ndata = preds.length / numClass;
int labelError = -1;
float[] tmp = new float[numClass];
for (int insIdx = 0; insIdx < ndata; insIdx++) {
System.arraycopy(preds, insIdx * numClass, tmp, 0, numClass);
MathUtils.softmax(tmp);
int label = (int) dataStore.labels[insIdx];
if (label < 0 || label >= numClass) {
labelError = label;
label = 0;
}
float wt = dataStore.getWeight(insIdx);
for (int k = 0; k < numClass; ++k) {
float p = tmp[k];
float h = 2.0f * p * (1.0f - p) * wt;
if (label == k) {
GradPair pair = new GradPair((p - 1.0f) * wt, h);
rec.add(pair);
} else {
GradPair pair = new GradPair(p * wt, h);
rec.add(pair);
}
}
}
if (labelError >= 0 && labelError < numClass) {
LOG.error(String.format("SoftmaxMultiClassObj: label must be in [0, num_class), "
+ "numClass = %d, but found %d in label", numClass, labelError));
}
return rec;
}
public List<Float> transform(List<Float> preds, boolean prob) {
List<Float> rec = new ArrayList<Float>();
int ndata = preds.size() / numClass;
float[] tmp = new float[numClass];
for (int insIdx = 0; insIdx < ndata; insIdx++) {
for (int k = 0; k < numClass; k++) {
tmp[k] = preds.get(insIdx * numClass + k);
}
if (!prob) {
rec.add((float) MathUtils.findMaxIndex(tmp));
} else {
MathUtils.softmax(tmp);
for (int k = 0; k < numClass; k++) {
preds.set(insIdx * numClass + k, tmp[k]);
}
}
}
return rec;
}
@Override
public String defaultEvalMetric() {
return "merror";
}
/**
* transform prediction values, this is only called when Prediction is called preds: prediction
* values, saves to this vector as well
*
* @param preds
*/
@Override
public void transPred(List<Float> preds) {
this.transform(preds, this.outputProb);
}
/**
* transform prediction values, this is only called when Eval is called usually it redirect to
* transPred preds: prediction values, saves to this vector as well
*
* @param preds
*/
@Override
public void transEval(List<Float> preds) {
this.transform(preds, true);
}
/**
* transform probability value back to margin this is used to transform user-set base_score back
* to margin used by gradient boosting
*
* @param base_score
*/
@Override
public float prob2Margin(float base_score) {
return 0;
}
}
|
package com.icthh.xm.ms.configuration.web.rest;
import com.icthh.xm.commons.tenant.TenantContext;
import com.icthh.xm.commons.tenant.TenantContextHolder;
import com.icthh.xm.commons.tenant.TenantKey;
import com.icthh.xm.ms.configuration.service.ConfigurationService;
import lombok.SneakyThrows;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.web.servlet.WebMvcTest;
import org.springframework.boot.test.mock.mockito.MockBean;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.test.web.servlet.MockMvc;
import java.util.Optional;
import static com.icthh.xm.ms.configuration.config.Constants.*;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.*;
import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post;
import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status;
@RunWith(SpringRunner.class)
@WebMvcTest(controllers = ConfigurationClientResource.class, secure = false)
@ContextConfiguration(classes = {ConfigurationClientResource.class})
public class ConfigurationClientResourceMvcTest {
@MockBean
private ConfigurationAdminResource configurationAdminResource;
@MockBean
private ConfigurationService configurationService;
@MockBean
private TenantContextHolder tenantContextHolder;
@Autowired
private MockMvc mockMvc;
@Test
@SneakyThrows
public void ifPathPassedRefreshOnlyOneConfig() {
TenantContext context = mock(TenantContext.class);
when(context.getTenantKey()).thenReturn(Optional.ofNullable(TenantKey.valueOf("INTTEST")));
when(tenantContextHolder.getContext()).thenReturn(context);
String testPath = "/test/folder/subfolder/documentname";
mockMvc.perform(post(API_PREFIX + PROFILE + REFRESH + testPath))
.andExpect(status().is2xxSuccessful());
verify(configurationService).refreshConfiguration(eq(CONFIG + TENANTS + "/INTTEST" + testPath));
verifyNoMoreInteractions(configurationService);
}
@Test
@SneakyThrows
public void ifPathNotPassedRefreshAll() {
TenantContext context = mock(TenantContext.class);
when(context.getTenantKey()).thenReturn(Optional.ofNullable(TenantKey.valueOf("INTTEST")));
when(tenantContextHolder.getContext()).thenReturn(context);
mockMvc.perform(post(API_PREFIX + PROFILE + REFRESH))
.andExpect(status().is2xxSuccessful());
verify(configurationService).refreshTenantConfigurations();
verifyNoMoreInteractions(configurationService);
}
}
|
/**
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gravitee.repository.redis.management.internal.impl;
import io.gravitee.repository.redis.management.internal.DictionaryRedisRepository;
import io.gravitee.repository.redis.management.internal.ViewRedisRepository;
import io.gravitee.repository.redis.management.model.RedisDictionary;
import io.gravitee.repository.redis.management.model.RedisView;
import org.springframework.stereotype.Component;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
/**
* @author David BRASSELY (david.brassely at graviteesource.com)
* @author GraviteeSource Team
*/
@Component
public class DictionaryRedisRepositoryImpl extends AbstractRedisRepository implements DictionaryRedisRepository {
private final static String REDIS_KEY = "dictionary";
@Override
public Set<RedisDictionary> findAll() {
final Map<Object, Object> dictionaries = redisTemplate.opsForHash().entries(REDIS_KEY);
return dictionaries.values()
.stream()
.map(object -> convert(object, RedisDictionary.class))
.collect(Collectors.toSet());
}
@Override
public RedisDictionary findById(final String dictionaryId) {
Object dictionary = redisTemplate.opsForHash().get(REDIS_KEY, dictionaryId);
if (dictionary == null) {
return null;
}
return convert(dictionary, RedisDictionary.class);
}
@Override
public RedisDictionary saveOrUpdate(final RedisDictionary dictionary) {
redisTemplate.opsForHash().put(REDIS_KEY, dictionary.getId(), dictionary);
return dictionary;
}
@Override
public void delete(final String dictionary) {
redisTemplate.opsForHash().delete(REDIS_KEY, dictionary);
}
}
|
package br.com.rbg.pedidovenda.controller;
import java.io.Serializable;
import java.math.BigDecimal;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import javax.annotation.PostConstruct;
import javax.enterprise.context.RequestScoped;
import javax.inject.Inject;
import javax.inject.Named;
import org.primefaces.model.chart.Axis;
import org.primefaces.model.chart.AxisType;
import org.primefaces.model.chart.CartesianChartModel;
import org.primefaces.model.chart.CategoryAxis;
import org.primefaces.model.chart.ChartSeries;
import org.primefaces.model.chart.LineChartModel;
import org.primefaces.model.chart.PieChartModel;
import br.com.rbg.pedidovenda.model.Usuario;
import br.com.rbg.pedidovenda.repository.Pedidos;
import br.com.rbg.pedidovenda.security.UsuarioLogado;
import br.com.rbg.pedidovenda.security.UsuarioSistema;
@Named
@RequestScoped
public class GraficoPedidosCriadosBean implements Serializable {
private static final long serialVersionUID = 1L;
private static DateFormat DATE_FORMAT = new SimpleDateFormat("dd/MM");
@Inject
private Pedidos pedidos;
@Inject
@UsuarioLogado
private UsuarioSistema usuarioLogado;
private LineChartModel lineModel;
private PieChartModel pieModel;
@PostConstruct
public void init() {
this.lineModel = new LineChartModel();
this.pieModel = new PieChartModel();
createLineModels();
adicionarSerie("Todos os pedidos", null);
adicionarSerie("Meus pedidos", usuarioLogado.getUsuario());
createPieModel();
adicionarPieChart();
}
private void createLineModels() {
lineModel.setTitle("Pedidos Criados");
lineModel.setLegendPosition("e");
lineModel.setAnimate(true);
lineModel.getAxes().put(AxisType.X, new CategoryAxis("Período"));
Axis yAxis = lineModel.getAxis(AxisType.Y);
yAxis.setLabel("Valor total dos pedidos");
yAxis.setMin(0);
//yAxis.setMax(10000);
}
private void adicionarSerie(String rotulo, Usuario criadoPor) {
Map<Date, BigDecimal> valoresPorData = this.pedidos.valoresTotaisPorData(30, criadoPor);
ChartSeries series = new ChartSeries(rotulo);
series.setLabel(rotulo);
for (Date data : valoresPorData.keySet()) {
series.set(DATE_FORMAT.format(data), valoresPorData.get(data));
}
this.lineModel.addSeries(series);
}
private void adicionarPieChart() {
Map<String, BigDecimal> valoresPorVendedor = this.pedidos.valorTotalPorUsuario();
for (String nome : valoresPorVendedor.keySet()) {
pieModel.set(nome, valoresPorVendedor.get(nome));
}
}
private void createPieModel() {
pieModel.setTitle("Pedidos Criados");
pieModel.setLegendPosition("e");
pieModel.setFill(true);
pieModel.setShowDataLabels(true);
}
public LineChartModel getLineModel() {
return lineModel;
}
public PieChartModel getPieModel() {
return pieModel;
}
}
|
/*
*
*/
package xy.reflect.ui.info.menu;
import java.util.HashMap;
import java.util.Map;
/**
* Base class of menu model element specifications.
*
* @author olitank
*
*/
public abstract class AbstractMenuElementInfo implements IMenuElementInfo {
private Map<String, Object> specificProperties = new HashMap<String, Object>();
@Override
public String getName() {
throw new UnsupportedOperationException();
}
@Override
public String getOnlineHelp() {
throw new UnsupportedOperationException();
}
@Override
public Map<String, Object> getSpecificProperties() {
return specificProperties;
}
public void setSpecificProperties(Map<String, Object> specificProperties) {
this.specificProperties = specificProperties;
}
}
|
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.intellij.schema.utils;
import com.intellij.lang.ASTNode;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiWhiteSpace;
import com.intellij.psi.impl.source.tree.LeafPsiElement;
import com.intellij.psi.tree.IElementType;
import java.util.Arrays;
import java.util.stream.Collectors;
/** A collection of tokens with a current index. */
public class Tokens {
private final ASTNode[] nodes;
private int i = 0;
private Tokens(PsiElement element) {
nodes = element.getNode().getChildren(null);
}
/**
* Advances to the next token, if it is of the given type.
*
* @return true if the current token was of the given type and we advanced it, false
* if it was not and nothing was changed
*/
public boolean skip(IElementType... tokenTypes) {
if (current() == null) return false;
boolean is = is(tokenTypes);
if (is)
i++;
return is;
}
/**
* Advances beyond the next token, if it is whitespace.
*
* @return true if the current token was of the given type and we advanced it, false
* if it was not and nothing was changed
*/
public boolean skipWhitespace() {
if (current() == null) return false;
boolean is = isWhitespace();
if (is)
i++;
return is;
}
/** Returns whether the current token is of the given type */
public boolean is(IElementType... tokenTypes) {
if (current() == null) return false;
for (IElementType type : tokenTypes)
if (current().getElementType() == type) return true;
return false;
}
/** Returns whether the current token is whitespace */
public boolean isWhitespace() {
if (current() == null) return false;
return current().getPsi() instanceof PsiWhiteSpace;
}
/** Returns whether the current token is an element */
public boolean isElement() {
if (current() == null) return false;
return current() instanceof LeafPsiElement;
}
/** Returns the current token if it is of the required type and throws otherwise. */
public ASTNode require(IElementType... tokenTypes) {
if (!is(tokenTypes))
throw new IllegalArgumentException("Expected " + toString(tokenTypes) + " but got " + current());
ASTNode current = current();
i++;
return current;
}
/** Returns the current token if it is any element and throws otherwise. */
public ASTNode requireElement() {
if (!isElement())
throw new IllegalArgumentException("Expected an element but got " + current().getClass());
ASTNode current = current();
i++;
return current;
}
public void requireWhitespace() {
if (!isWhitespace())
throw new IllegalArgumentException("Expected whitespace, but got " + current());
i++;
}
/** Returns the current token (AST node), or null if we have reached the end. */
public ASTNode current() {
if (i >= nodes.length) return null;
return nodes[i];
}
private String toString(IElementType[] tokens) {
return Arrays.stream(tokens).map(token -> token.getDebugName()).collect(Collectors.joining(", "));
}
public static Tokens of(PsiElement element) {
return new Tokens(element);
}
/** For debugging: Prints the remaining to standard out. */
public void dump() {
for (int j = i; j < nodes.length; j++)
System.out.println(nodes[j]);
}
}
|
package com.subgraph.orchid.directory.consensus;
import com.subgraph.orchid.ConsensusDocument;
import com.subgraph.orchid.data.HexDigest;
public class RequiredCertificateImpl implements ConsensusDocument.RequiredCertificate {
private final HexDigest identity;
private final HexDigest signingKey;
private int downloadFailureCount;
public RequiredCertificateImpl(HexDigest identity, HexDigest signingKey) {
this.identity = identity;
this.signingKey = signingKey;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result
+ ((identity == null) ? 0 : identity.hashCode());
result = prime * result
+ ((signingKey == null) ? 0 : signingKey.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
RequiredCertificateImpl other = (RequiredCertificateImpl) obj;
if (identity == null) {
if (other.identity != null)
return false;
} else if (!identity.equals(other.identity))
return false;
if (signingKey == null) {
if (other.signingKey != null)
return false;
} else if (!signingKey.equals(other.signingKey))
return false;
return true;
}
public void incrementDownloadFailureCount() {
downloadFailureCount += 1;
}
public int getDownloadFailureCount() {
return downloadFailureCount;
}
public HexDigest getAuthorityIdentity() {
return identity;
}
public HexDigest getSigningKey() {
return signingKey;
}
}
|
package net.orekyuu.riho.animation;
import java.time.Duration;
import java.time.Instant;
public class EaseInAnimation extends Animation {
public EaseInAnimation(Instant startTime, Duration duration) {
super(startTime, duration);
}
public EaseInAnimation(Instant startTime, Duration duration, Duration delay) {
super(startTime, duration, delay);
}
@Override
public double getValue(double rate) {
double distance = rate * rate;
return valueFrom + (valueTo * distance);
}
}
|
package org.elmlang.intellijplugin.psi.impl;
import com.intellij.extapi.psi.ASTWrapperPsiElement;
import com.intellij.lang.ASTNode;
import com.intellij.psi.PsiElementVisitor;
import com.intellij.psi.PsiReference;
import org.elmlang.intellijplugin.psi.ElmVisitor;
import org.elmlang.intellijplugin.psi.references.ElmReference;
import org.jetbrains.annotations.Contract;
import org.jetbrains.annotations.NotNull;
import java.util.Arrays;
import java.util.stream.Stream;
public abstract class ElmPsiElement extends ASTWrapperPsiElement {
public ElmPsiElement(@NotNull ASTNode node) {
super(node);
}
@NotNull
@Contract(
pure = true
)
public PsiReference[] getReferences() {
return this.getReferencesStream().toArray(PsiReference[]::new);
}
public void accept(@NotNull PsiElementVisitor visitor) {
if (visitor instanceof ElmVisitor) {
((ElmVisitor)visitor).visitPsiElement(this);
}
else super.accept(visitor);
}
public Stream<ElmReference> getReferencesStream() {
return Arrays.stream(this.getChildren())
.filter(c -> c instanceof ElmPsiElement)
.map(c -> getReferencesFromChild((ElmPsiElement) c))
.reduce(Stream.empty(), Stream::concat);
}
private Stream<ElmReference> getReferencesFromChild(ElmPsiElement element) {
return element.getReferencesStream()
.map(r -> r.referenceInAncestor(this));
}
}
|
/*
* This file was last modified at 2020.04.15 22:24 by Victor N. Skurikhin.
* This is free and unencumbered software released into the public domain.
* For more information, please refer to <http://unlicense.org>
* UserOnlyLoginRoServiceImplTest.java
* $Id: f706ee3deaece0b979d1f1569695b00c7c3da6b4 $
*/
package su.svn.showcase.services.impl;
import org.jboss.weld.junit5.WeldInitiator;
import org.jboss.weld.junit5.WeldJunit5Extension;
import org.jboss.weld.junit5.WeldSetup;
import org.jboss.weld.junit5.auto.AddPackages;
import org.junit.jupiter.api.*;
import org.junit.jupiter.api.extension.ExtendWith;
import su.svn.showcase.converters.UserOnlyLoginConverter;
import su.svn.showcase.converters.user.UserOnlyLoginBaseConverter;
import su.svn.showcase.dao.UserLoginDao;
import su.svn.showcase.dao.jpa.UserLoginDaoEjb;
import su.svn.showcase.domain.UserLogin;
import su.svn.showcase.dto.UserOnlyLoginDto;
import su.svn.showcase.exceptions.ErrorCase;
import su.svn.showcase.services.CrudService;
import su.svn.showcase.services.UserOnlyLoginRoService;
import su.svn.showcase.services.impl.support.EntityManagerFactoryProducer;
import su.svn.showcase.services.impl.support.EntityManagerProducer;
import su.svn.showcase.services.impl.support.JtaEnvironment;
import su.svn.utils.InputStreamUtil;
import javax.ejb.EJB;
import javax.enterprise.context.RequestScoped;
import javax.enterprise.inject.spi.BeanManager;
import javax.enterprise.inject.spi.InjectionPoint;
import javax.inject.Inject;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import javax.transaction.UserTransaction;
import java.io.InputStream;
import java.time.LocalDateTime;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.function.Function;
import static su.svn.showcase.domain.TestData.*;
import static su.svn.showcase.dto.TestData.cloneUserOnlyLoginBaseDto1;
import static su.svn.showcase.services.impl.support.EntityManagerFactoryProducer.configure;
@DisplayName("A UserOnlyLoginRoServiceImplTest unit test cases")
@AddPackages(value = {UserLoginDao.class, CrudService.class})
@ExtendWith({JtaEnvironment.class, WeldJunit5Extension.class})
class UserOnlyLoginRoServiceImplTest {
static final Class<?> tClass = UserOnlyLoginRoServiceImplTest.class;
static final String resourceNamePrefix = "/META-INF/sql/" + tClass.getSimpleName();
static final UUID UUID10 = UUID.fromString("00000000-0000-0000-0000-000000000010");
static final UserLoginDao userLoginDaoEjb = new UserLoginDaoEjb();
static final UserOnlyLoginConverter userOnlyLoginConverter = new UserOnlyLoginBaseConverter();
static final UserOnlyLoginRoService userOnlyLoginRoService = new UserOnlyLoginRoServiceImpl();
private final Map<String, Object> ejbMap = new HashMap<String, Object>() {{
put("UserLoginDaoEjb", userLoginDaoEjb);
put("UserOnlyLoginBaseConverter", userOnlyLoginConverter);
put("UserOnlyLoginRoService", userOnlyLoginRoService);
}};
private Function<InjectionPoint, Object> ejbFactory() {
return ip -> {
String name = ip.getAnnotated().getAnnotation(EJB.class).beanName();
System.err.println("beanName: " + name);
return ejbMap.get(name);
};
}
@Inject
private BeanManager beanManager;
private EntityManagerFactory emf = Persistence.createEntityManagerFactory("PgPU", configure(beanManager));
@WeldSetup
private
WeldInitiator weld = WeldInitiator.from(
UserLoginDaoEjb.class,
UserOnlyLoginRoServiceImpl.class,
EntityManagerFactoryProducer.class,
EntityManagerProducer.class)
.activate(RequestScoped.class)
.setEjbFactory(ejbFactory())
.setPersistenceContextFactory(injectionPoint -> emf.createEntityManager())
.setPersistenceUnitFactory(injectionPoint -> emf)
.inject(userLoginDaoEjb)
.inject(userOnlyLoginRoService)
.inject(this)
.build();
@Inject
private EntityManager entityManager;
@Inject
private UserTransaction userTransaction;
@EJB(beanName = "UserOnlyLoginRoService")
UserOnlyLoginRoService service;
private UserLogin entity;
private UserOnlyLoginDto dto;
@BeforeEach
void setUp() throws Exception {
InputStream is = tClass.getResourceAsStream(resourceNamePrefix + "_setUp.sql");
userTransaction.begin();
InputStreamUtil.readAndExecuteLine(is, sql ->
entityManager.createNativeQuery(sql).executeUpdate());
userTransaction.commit();
entity = cloneUserLogin1();
dto = cloneUserOnlyLoginBaseDto1();
}
@AfterEach
void tearDown() throws Exception {
try {
userTransaction.rollback();
} catch (Exception ignored) {}
InputStream is = tClass.getResourceAsStream(resourceNamePrefix + "_tearDown.sql");
userTransaction.begin();
InputStreamUtil.readAndExecuteLine(is, sql ->
entityManager.createNativeQuery(sql).executeUpdate());
userTransaction.commit();
}
static UUID UUID1 = UUID.randomUUID();
static LocalDateTime NOW1 = LocalDateTime.now();
@Test
void create() throws Exception {
UserOnlyLoginDto loginDto = UserOnlyLoginDto.builder()
.id(UUID1)
.login("loginTest" + UUID1)
.build();
userTransaction.begin();
Assertions.assertThrows(ErrorCase.class, () -> service.create(loginDto));
userTransaction.rollback();
}
@Test
void readById() throws Exception {
userTransaction.begin();
UserOnlyLoginDto test = service.readById(UUID10);
userTransaction.rollback();
}
@Test
void readByLogin() throws Exception {
userTransaction.begin();
UserOnlyLoginDto test = service.readByLogin("loginTest10");
userTransaction.rollback();
System.out.println("test = " + test);
}
@Test
void readRange() throws Exception {
userTransaction.begin();
List<UserOnlyLoginDto> test = service.readRange(0, Integer.MAX_VALUE);
userTransaction.rollback();
}
@Test
void update() throws Exception {
UserOnlyLoginDto loginDto = UserOnlyLoginDto.builder()
.id(UUID10)
.login("loginTest" + UUID10)
.build();
userTransaction.begin();
Assertions.assertThrows(ErrorCase.class, () -> service.update(loginDto));
userTransaction.rollback();
}
@Test
void delete() throws Exception {
userTransaction.begin();
Assertions.assertThrows(ErrorCase.class, () -> service.delete(UUID10));
userTransaction.rollback();
}
@Test
void count() throws Exception {
userTransaction.begin();
Assertions.assertEquals(1, service.count());
userTransaction.rollback();
}
}
|
package org.azizkhani.core;
import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
/**
* This class provides an application-wide access to the
* Spring ApplicationContext! The ApplicationContext is
* injected in a static method of the class "AppContext".
*
* Use AppContext.getApplicationContext() to get access
* to all Spring Beans.
*
* @author Siegfried Bolz
*/
public class ApplicationContextProvider implements ApplicationContextAware {
public void setApplicationContext(ApplicationContext ctx) throws BeansException {
// Wiring the ApplicationContext into a static method
AppContext.setApplicationContext(ctx);
}
} // .EOF
|
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.lifecycle;
import static androidx.lifecycle.Lifecycle.Event.ON_ANY;
import static androidx.lifecycle.Lifecycle.Event.ON_CREATE;
import static androidx.lifecycle.Lifecycle.Event.ON_DESTROY;
import static androidx.lifecycle.Lifecycle.Event.ON_PAUSE;
import static androidx.lifecycle.Lifecycle.Event.ON_RESUME;
import static androidx.lifecycle.Lifecycle.Event.ON_START;
import static androidx.lifecycle.Lifecycle.Event.ON_STOP;
import static androidx.lifecycle.Lifecycle.State.CREATED;
import static androidx.lifecycle.Lifecycle.State.INITIALIZED;
import static androidx.lifecycle.Lifecycle.State.RESUMED;
import static androidx.lifecycle.Lifecycle.State.STARTED;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.when;
import androidx.annotation.NonNull;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.mockito.InOrder;
import org.mockito.Mockito;
@RunWith(JUnit4.class)
public class FullLifecycleObserverTest {
private LifecycleOwner mOwner;
private Lifecycle mLifecycle;
@Before
public void initMocks() {
mOwner = mock(LifecycleOwner.class);
mLifecycle = mock(Lifecycle.class);
when(mOwner.getLifecycle()).thenReturn(mLifecycle);
}
@Test
public void eachEvent() {
FullLifecycleObserver obj = mock(FullLifecycleObserver.class);
FullLifecycleObserverAdapter observer = new FullLifecycleObserverAdapter(obj, null);
when(mLifecycle.getCurrentState()).thenReturn(CREATED);
observer.onStateChanged(mOwner, ON_CREATE);
InOrder inOrder = Mockito.inOrder(obj);
inOrder.verify(obj).onCreate(mOwner);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(STARTED);
observer.onStateChanged(mOwner, ON_START);
inOrder.verify(obj).onStart(mOwner);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(RESUMED);
observer.onStateChanged(mOwner, ON_RESUME);
inOrder.verify(obj).onResume(mOwner);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(STARTED);
observer.onStateChanged(mOwner, ON_PAUSE);
inOrder.verify(obj).onPause(mOwner);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(CREATED);
observer.onStateChanged(mOwner, ON_STOP);
inOrder.verify(obj).onStop(mOwner);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(INITIALIZED);
observer.onStateChanged(mOwner, ON_DESTROY);
inOrder.verify(obj).onDestroy(mOwner);
reset(obj);
}
@Test
public void fullLifecycleObserverAndLifecycleEventObserver() {
class AllObservers implements FullLifecycleObserver, LifecycleEventObserver {
@Override
public void onCreate(LifecycleOwner owner) {
}
@Override
public void onStart(LifecycleOwner owner) {
}
@Override
public void onResume(LifecycleOwner owner) {
}
@Override
public void onPause(LifecycleOwner owner) {
}
@Override
public void onStop(LifecycleOwner owner) {
}
@Override
public void onDestroy(LifecycleOwner owner) {
}
@Override
public void onStateChanged(@NonNull LifecycleOwner source,
@NonNull Lifecycle.Event event) {
}
}
AllObservers obj = mock(AllObservers.class);
FullLifecycleObserverAdapter observer = new FullLifecycleObserverAdapter(obj, obj);
when(mLifecycle.getCurrentState()).thenReturn(CREATED);
observer.onStateChanged(mOwner, ON_CREATE);
InOrder inOrder = Mockito.inOrder(obj);
inOrder.verify(obj).onCreate(mOwner);
inOrder.verify(obj).onStateChanged(mOwner, ON_CREATE);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(STARTED);
observer.onStateChanged(mOwner, ON_START);
inOrder.verify(obj).onStart(mOwner);
inOrder.verify(obj).onStateChanged(mOwner, ON_START);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(RESUMED);
observer.onStateChanged(mOwner, ON_RESUME);
inOrder.verify(obj).onResume(mOwner);
inOrder.verify(obj).onStateChanged(mOwner, ON_RESUME);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(STARTED);
observer.onStateChanged(mOwner, ON_PAUSE);
inOrder.verify(obj).onPause(mOwner);
inOrder.verify(obj).onStateChanged(mOwner, ON_PAUSE);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(CREATED);
observer.onStateChanged(mOwner, ON_STOP);
inOrder.verify(obj).onStop(mOwner);
inOrder.verify(obj).onStateChanged(mOwner, ON_STOP);
reset(obj);
when(mLifecycle.getCurrentState()).thenReturn(INITIALIZED);
observer.onStateChanged(mOwner, ON_DESTROY);
inOrder.verify(obj).onDestroy(mOwner);
inOrder.verify(obj).onStateChanged(mOwner, ON_DESTROY);
reset(obj);
}
public void fullLifecycleObserverAndAnnotations() {
@SuppressWarnings("deprecation")
class AnnotatedFullLifecycleObserver implements FullLifecycleObserver {
@OnLifecycleEvent(ON_ANY)
public void onAny() {
throw new IllegalStateException("Annotations in FullLifecycleObserver "
+ "must not be called");
}
@Override
public void onCreate(LifecycleOwner owner) {
}
@Override
public void onStart(LifecycleOwner owner) {
}
@Override
public void onResume(LifecycleOwner owner) {
}
@Override
public void onPause(LifecycleOwner owner) {
}
@Override
public void onStop(LifecycleOwner owner) {
}
@Override
public void onDestroy(LifecycleOwner owner) {
}
}
}
}
|
package adapterpattern.classadapter;
public class Adapter extends Source implements Targetable {
public void method2() {
System.out.println("method2 is working");
}
}
|
package me.zhengjie.utils;
import javax.crypto.Cipher;
import javax.crypto.SecretKey;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.DESKeySpec;
import javax.crypto.spec.IvParameterSpec;
import java.nio.charset.StandardCharsets;
/**
* 加密
* @author Zheng Jie
* @date 2018-11-23
*/
public class EncryptUtils {
private static String strParam = "Passw0rd";
private static Cipher cipher;
private static IvParameterSpec iv = new IvParameterSpec(strParam.getBytes(StandardCharsets.UTF_8));
public static void main(String[] args) throws Exception{
System.out.println(desEncrypt("viki1230.i"));
}
private static DESKeySpec getDesKeySpec(String source) throws Exception {
if (source == null || source.length() == 0){
return null;
}
cipher = Cipher.getInstance("DES/CBC/PKCS5Padding");
String strKey = "Passw0rd";
return new DESKeySpec(strKey.getBytes(StandardCharsets.UTF_8));
}
/**
* 对称加密
*/
public static String desEncrypt(String source) throws Exception {
DESKeySpec desKeySpec = getDesKeySpec(source);
SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("DES");
SecretKey secretKey = keyFactory.generateSecret(desKeySpec);
cipher.init(Cipher.ENCRYPT_MODE, secretKey, iv);
return byte2hex(
cipher.doFinal(source.getBytes(StandardCharsets.UTF_8))).toUpperCase();
}
/**
* 对称解密
*/
public static String desDecrypt(String source) throws Exception {
byte[] src = hex2byte(source.getBytes());
DESKeySpec desKeySpec = getDesKeySpec(source);
SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("DES");
SecretKey secretKey = keyFactory.generateSecret(desKeySpec);
cipher.init(Cipher.DECRYPT_MODE, secretKey, iv);
byte[] retByte = cipher.doFinal(src);
return new String(retByte);
}
private static String byte2hex(byte[] inStr) {
String stmp;
StringBuilder out = new StringBuilder(inStr.length * 2);
for (byte b : inStr) {
stmp = Integer.toHexString(b & 0xFF);
if (stmp.length() == 1) {
// 如果是0至F的单位字符串,则添加0
out.append("0").append(stmp);
} else {
out.append(stmp);
}
}
return out.toString();
}
private static byte[] hex2byte(byte[] b) {
int size = 2;
if ((b.length % size) != 0){
throw new IllegalArgumentException("长度不是偶数");
}
byte[] b2 = new byte[b.length / 2];
for (int n = 0; n < b.length; n += size) {
String item = new String(b, n, 2);
b2[n / 2] = (byte) Integer.parseInt(item, 16);
}
return b2;
}
}
|
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.subscription;
import com.yahoo.foo.AppConfig;
import com.yahoo.foo.TestNonstringConfig;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
/**
* Tests different aspects of the ConfigInstance class and its underlying Nodes.
*
* @author gjoranv
*/
public class ConfigInstanceTest {
private ConfigSourceSet sourceSet = new ConfigSourceSet("config-instance-test");
/**
* Verifies that the subscriber's configure() method is only
* called once upon subscribe, even if there are more than one
* subscribers to the same ConfigInstance. This has previously
* been a problem, since ConfigInstance.subscribe() called
* configureSubscriber(), which configures all subscribers to the
* instance. Now, the new method configureSubscriber(Subscriber)
* is called instead.
*/
@Test
public void testConfigureOnlyOnceUponSubscribe() {
final String configId = "raw:times 1\n";
AppService service1 = new AppService(configId, sourceSet);
AppService service2 = new AppService(configId, sourceSet);
assertEquals(1, service1.timesConfigured());
assertEquals(1, service2.timesConfigured());
service1.cancelSubscription();
service2.cancelSubscription();
}
/**
* Verifies that values set in previous setConfig() calls are
* retained when the payload in a new setConfig() call does not
* overwrite them.
*/
@Test
@Ignore
public void testRetainOldValuesOnConfigUpdates() {
AppConfig config = new AppConfig(new AppConfig.Builder());
//config.setConfig(Arrays.asList("message \"one\"", "times 333"), "", 0L);
assertEquals("one", config.message());
assertEquals(333, config.times());
//config.setConfig(Arrays.asList("message \"two\""), "", 0L);
assertEquals("two", config.message());
assertEquals("config.times retains previously set value", 333, config.times());
//config.setConfig(Arrays.asList("times 666"), "", 0L);
assertEquals("config.message retains previously set value", "two", config.message());
assertEquals(666, config.times());
}
/**
* Verifies that an exception is thrown when one attempts to set an
* illegal config value for parameters that have default values.
*/
@Test
public void testFailUponIllegalValue() {
verifyIllegalValue("i notAnInt");
verifyIllegalValue("i 3.0");
verifyIllegalValue("d noDouble");
verifyIllegalValue("d 3.0.");
// verifyIllegalValue("b notTrueOrFalse");
//verifyIllegalValue("b 1");
//verifyIllegalValue("b 0");
verifyIllegalValue("e undeclaredEnumValue");
verifyIllegalValue("e 0");
}
private void verifyIllegalValue(String line) {
String configId = "raw:" + line + "\n";
try {
new TestNonstring(configId);
fail("Expected ConfigurationRuntimeException when setting a parameter value of wrong type.");
} catch (RuntimeException expected) {
verifyException(expected, "Not able to create config builder for payload", "Got ConfigurationRuntimeException for the wrong reason. " +
"Expected to fail when setting a parameter value of wrong type.");
}
}
private void verifyException(Throwable throwable, String expected, String failMessage) {
Throwable t = throwable;
boolean ok = false;
while (t != null) {
if (t.getMessage() != null && t.getMessage().contains(expected)) {
ok = true;
break;
}
t = t.getCause();
}
if (!ok) {
throwable.printStackTrace();
fail(failMessage);
}
}
private class TestNonstring {
public TestNonstring(String configId) {
ConfigSubscriber subscriber = new ConfigSubscriber();
ConfigHandle<TestNonstringConfig> handle = subscriber.subscribe(TestNonstringConfig.class, configId);
subscriber.nextConfig(false);
handle.getConfig();
}
}
}
|
package examples2;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
public class MapsInJava {
public static void main(String[] args) {
final Map<String,String> pairs = new HashMap<String,String>(); // <1>
System.err.println("Put[1]: "+pairs.put("key1","value1")); // <2>
System.err.println("Put[2]: "+pairs.put("key1","newValue1"));
pairs.put("key2","value2");
for (Entry<String, String> item : pairs.entrySet()) { // <3>
System.err.println(item.getKey()+'='+item.getValue());
}
System.err.println("Contains:"+pairs.containsKey("key2")+", value="+pairs.get("key2")); // <4>
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.impl;
import org.apache.camel.Endpoint;
import org.apache.camel.Producer;
/**
* A service pool for {@link Producer}.
* <p/>
* For instance camel-mina and camel-ftp leverages this to allow a pool of producers so we
* can support concurrent producers in a thread safe manner.
*
* @version
*/
public class DefaultProducerServicePool extends DefaultServicePool<Endpoint, Producer> {
public DefaultProducerServicePool() {
}
public DefaultProducerServicePool(int capacity) {
super(capacity);
}
}
|
/*
* Copyright (c) 2016, The National Archives <pronom@nationalarchives.gov.uk>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following
* conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of the The National Archives nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package uk.gov.nationalarchives.droid.core.interfaces.archive;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.junit.Test;
import uk.gov.nationalarchives.droid.core.interfaces.AsynchDroid;
import uk.gov.nationalarchives.droid.core.interfaces.IdentificationRequest;
import uk.gov.nationalarchives.droid.core.interfaces.RequestIdentifier;
import uk.gov.nationalarchives.droid.core.interfaces.resource.RequestMetaData;
/**
* @author rflitcroft
*
*/
public class GZipArchiveHandlerTest {
@Test
public void testHandleGZipFile() throws Exception {
final Path file = Paths.get(getClass().getResource("/testXmlFile.xml.gz").toURI());
IdentificationRequest request = mock(IdentificationRequest.class);
IdentificationRequestFactory factory = mock(IdentificationRequestFactory.class);
URI expectedUri = ArchiveFileUtils.toGZipUri(file.toUri());
RequestIdentifier identifier = new RequestIdentifier(file.toUri());
identifier.setAncestorId(10L);
identifier.setParentId(20L);
identifier.setNodeId(30L);
RequestIdentifier expectedIdentifier = new RequestIdentifier(expectedUri);
expectedIdentifier.setAncestorId(10L);
expectedIdentifier.setParentId(30L);
when(factory.newRequest(any(RequestMetaData.class), eq(expectedIdentifier)))
.thenReturn(request);
AsynchDroid droidCore = mock(AsynchDroid.class);
GZipArchiveHandler handler = new GZipArchiveHandler();
handler.setFactory(factory);
handler.setDroidCore(droidCore);
IdentificationRequest originalRequest = mock(IdentificationRequest.class);
when(originalRequest.getIdentifier()).thenReturn(identifier);
when(originalRequest.getSourceInputStream()).thenReturn(Files.newInputStream(file));
handler.handle(originalRequest);
verify(droidCore).submit(request);
}
// private static Matcher<RequestMetaData> uriMatcher(final URI uri) {
//
// return new TypeSafeMatcher<RequestMetaData>() {
// @Override
// public void describeTo(Description arg0) {
// arg0.appendText("Matches " + uri);
//
// }
// @Override
// public boolean matchesSafely(RequestMetaData item) {
// return item.getUri().equals(uri);
// }
// };
// }
}
|
package com.efortunetech;
import android.content.Context;
import android.os.AsyncTask;
import android.text.TextUtils;
import android.util.Log;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.util.List;
import java.util.Map;
import java.util.UUID;
/**
* Created by jackymok on 7/1/16.
* Edited by Pan on 2018/07/26
* Edited by Lau on 2019/05/30
*/
public class asyncNetwork extends AsyncTask<String, Void, String> {
public interface OnAsyncTaskCompleted {
void onAsyncTaskCompleted(String response);
}
public interface OnAsyncNetworkCompleted {
void onAsyncTaskCompleted(String response, int responseCode);
}
private String json = "";
private Context context;
private String url;
private String data = "";
private int responseCode = -1;
private Map<String, String> fields;
private Map<String, File> files;
private OnAsyncTaskCompleted listener;
private OnAsyncNetworkCompleted newListener;
public asyncNetwork(OnAsyncTaskCompleted callback, Context c, String u) {
context = c;
url = u;
listener = callback;
}
public asyncNetwork(OnAsyncTaskCompleted callback, Context c, String u, String d) {
context = c;
url = u;
listener = callback;
data = d;
}
public asyncNetwork(Context c, String u) {
context = c;
url = u;
}
public asyncNetwork(OnAsyncNetworkCompleted callback, Context c, String u, Map<String, String> params) {
init(callback, c, u, params, null);
}
public asyncNetwork(OnAsyncNetworkCompleted callback, Context c, String u, Map<String, String> params, Map<String, File> files) {
init(callback, c, u, params, files);
}
private void init(OnAsyncNetworkCompleted callback, Context c, String u, Map<String, String> params, Map<String, File> files) {
this.context = c;
this.url = u;
this.fields = params;
this.files = files;
this.newListener = callback;
}
@Override
protected String doInBackground(String... params) {
InputStream myStream;
if (TextUtils.isEmpty(data) && listener != null) {
myStream = getStream(url);
} else if (!TextUtils.isEmpty(data) && listener != null) {
myStream = getStream(url, data);
} else {
myStream = getStream(url, fields, files);
}
if (myStream != null) {
try {
StringBuilder sb = new StringBuilder();
BufferedReader reader = new BufferedReader(new InputStreamReader(myStream));
String newLine = System.getProperty("line.separator");
String line;
while ((line = reader.readLine()) != null) {
sb.append(line);
sb.append(newLine);
}
json = sb.toString();
} catch (Exception e) {
Log.e("Buffer Error", "Error converting result " + e.toString());
}
} else {
Log.e("ORM", "ASyncNetwork doInBackground Exception ");
}
return json;
}
private InputStream getStream(String u) {
try {
URL url = new URL(u);
URLConnection urlConnection = url.openConnection();
urlConnection.setConnectTimeout(15000);
urlConnection.setRequestProperty("connection", "close");
return urlConnection.getInputStream();
} catch (Exception ex) {
Log.e("ORM", ex.toString());
return null;
}
}
private InputStream getStream(String u, String data) {
try {
URL url = new URL(u);
HttpURLConnection urlConnection = (HttpURLConnection) url.openConnection();
urlConnection.setDoOutput(true);
OutputStreamWriter writer = new OutputStreamWriter(urlConnection.getOutputStream());
writer.write(data);
writer.flush();
urlConnection.setConnectTimeout(15000);
return urlConnection.getInputStream();
} catch (Exception ex) {
Log.e("ORM", ex.toString());
return null;
}
}
private InputStream getStream(String u, Map<String, String> params, Map<String, File> files) {
String BOUNDARY = UUID.randomUUID().toString();
HttpURLConnection conn = null;
String PREFIX = "--";
String LINE_END = "\r\n";
Log.i("ORM", "getStream: map");
Log.i("ORM", "getStream: params" + params);
try {
URL url = new URL(u);
conn = (HttpURLConnection) url.openConnection();
conn.setReadTimeout(15000);
conn.setConnectTimeout(15000);
conn.setDoInput(true);
conn.setUseCaches(false);
conn.setRequestProperty("Accept", "*/*");
conn.setRequestProperty("Connection", "Keep-alive");
// conn.setRequestProperty("Charset", "UTF-8");
conn.setRequestProperty("Content-Type", "multipart/form-data;boundary=" + BOUNDARY);
if ((params != null && !params.isEmpty()) || (files != null && !files.isEmpty())) {
conn.setDoOutput(true);
// conn.setRequestMethod("POST");
DataOutputStream outputStream = new DataOutputStream(conn.getOutputStream());
if (params != null && !params.isEmpty()) {
Log.i("ORM", "getStream: params write");
StringBuffer sb = new StringBuffer();
for (Map.Entry<String, String> entry : params.entrySet()) {
sb.append(PREFIX);
sb.append(BOUNDARY);
sb.append(LINE_END);
sb.append("Content-Disposition:form-data;name=\"" + entry.getKey() + "\"" + LINE_END);
sb.append("Content-Transfer-Encoding:8bit" + LINE_END);
sb.append(LINE_END);
sb.append(entry.getValue());
sb.append(LINE_END);
}
outputStream.write(sb.toString().getBytes());
}
if (files != null && !files.isEmpty()) {
Log.i("ORM", "getStream: file write");
for (Map.Entry<String, File> file : files.entrySet()) {
StringBuffer sb1 = new StringBuffer();
sb1.append(PREFIX);
sb1.append(BOUNDARY);
sb1.append(LINE_END);
sb1.append("Content-Disposition:form-data;name=\"" + file.getKey() + "\";filename=\"" + file.getValue().getName() + "\"" + LINE_END);
sb1.append("Content-Type:application/octet-stream;charset=UTF-8" + LINE_END);
sb1.append(LINE_END);
outputStream.write(sb1.toString().getBytes());
InputStream is = new FileInputStream(file.getValue());
byte[] buffer = new byte[1024];
int len = 0;
while ((len = is.read(buffer)) != -1) {
outputStream.write(buffer, 0, len);
}
is.close();
outputStream.write(LINE_END.getBytes());
}
}
if ((params != null && !params.isEmpty()) || (files != null && !files.isEmpty())) {
Log.i("ORM", "getStream: end write");
byte[] end_data = (PREFIX + BOUNDARY + PREFIX + LINE_END).getBytes();
outputStream.write(end_data);
}
outputStream.flush();
}
responseCode = conn.getResponseCode();
return responseCode == 200 ? conn.getInputStream() : null;
} catch (Exception e) {
e.printStackTrace();
return null;
}
}
protected void onPostExecute(String result) {
if (listener != null) {
listener.onAsyncTaskCompleted(result);
}
if (newListener != null) {
newListener.onAsyncTaskCompleted(result, responseCode);
}
}
}
|
/*
* Copyright 2019 JimVoris.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.qumasoft.qvcslib;
import com.qumasoft.qvcslib.commandargs.CreateArchiveCommandArgs;
import java.util.Collection;
import javax.swing.event.ChangeListener;
import org.junit.After;
import org.junit.AfterClass;
import static org.junit.Assert.assertEquals;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/**
*
* @author JimVoris
*/
public class DirectoryManagerForRootTest {
public DirectoryManagerForRootTest() {
}
@BeforeClass
public static void setUpClass() {
}
@AfterClass
public static void tearDownClass() {
}
@Before
public void setUp() {
}
@After
public void tearDown() {
}
/**
* Test of getUserName method, of class DirectoryManagerForRoot.
*/
@Test
public void testGetSetUserName() {
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
String expResult = "testUser";
instance.setUserName(expResult);
String result = instance.getUserName();
assertEquals(expResult, result);
}
/**
* Test of getAppendedPath method, of class DirectoryManagerForRoot.
*/
@Test
public void testGetAppendedPath() {
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
String expResult = "";
String result = instance.getAppendedPath();
assertEquals(expResult, result);
}
/**
* Test of addChangeListener method, of class DirectoryManagerForRoot.
*/
@Test
public void testAddChangeListener() {
ChangeListener listener = null;
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
instance.addChangeListener(listener);
}
/**
* Test of removeChangeListener method, of class DirectoryManagerForRoot.
*/
@Test
public void testRemoveChangeListener() {
ChangeListener listener = null;
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
instance.removeChangeListener(listener);
}
/**
* Test of getCount method, of class DirectoryManagerForRoot.
*/
@Test
public void testGetCount() {
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
int expResult = 0;
int result = instance.getCount();
assertEquals(expResult, result);
}
/**
* Test of getMergedInfoCollection method, of class DirectoryManagerForRoot.
*/
@Test
public void testGetMergedInfoCollection() {
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
Collection<MergedInfoInterface> expResult = null;
Collection<MergedInfoInterface> result = instance.getMergedInfoCollection();
assertEquals(expResult, result);
}
/**
* Test of getProjectName method, of class DirectoryManagerForRoot.
*/
@Test
public void testGetProjectName() {
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
String expResult = QVCSConstants.QWIN_DEFAULT_PROJECT_NAME;
String result = instance.getProjectName();
assertEquals(expResult, result);
}
/**
* Test of getBranchName method, of class DirectoryManagerForRoot.
*/
@Test
public void testGetBranchName() {
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
String expResult = QVCSConstants.QVCS_TRUNK_BRANCH;
String result = instance.getBranchName();
assertEquals(expResult, result);
}
/**
* Test of getMergedInfo method, of class DirectoryManagerForRoot.
*/
@Test
public void testGetMergedInfo() {
String shortWorkfileName = "test";
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
MergedInfoInterface expResult = null;
MergedInfoInterface result = instance.getMergedInfo(shortWorkfileName);
assertEquals(expResult, result);
}
/**
* Test of getHasChanged method, of class DirectoryManagerForRoot.
*/
@Test
public void testGetHasChanged() {
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
boolean expResult = false;
boolean result = instance.getHasChanged();
assertEquals(expResult, result);
}
/**
* Test of createArchive method, of class DirectoryManagerForRoot.
*/
@Test
public void testCreateArchive() {
CreateArchiveCommandArgs commandLineArgs = null;
String filename = "";
DirectoryManagerForRoot instance = new DirectoryManagerForRoot();
boolean expResult = false;
boolean result = instance.createArchive(commandLineArgs, filename);
assertEquals(expResult, result);
}
}
|
/*
* Copyright 2008 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.aqnote.app.barcode.core.datamatrix.decoder;
import com.aqnote.app.barcode.core.FormatException;
import com.aqnote.app.barcode.core.common.BitSource;
import com.aqnote.app.barcode.core.common.DecoderResult;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
/**
* <p>Data Matrix Codes can encode text as bits in one of several modes, and can use multiple modes
* in one Data Matrix Code. This class decodes the bits back into text.</p>
*
* <p>See ISO 16022:2006, 5.2.1 - 5.2.9.2</p>
*
* @author bbrown@google.com (Brian Brown)
* @author Sean Owen
*/
final class DecodedBitStreamParser {
private enum Mode {
PAD_ENCODE, // Not really a mode
ASCII_ENCODE,
C40_ENCODE,
TEXT_ENCODE,
ANSIX12_ENCODE,
EDIFACT_ENCODE,
BASE256_ENCODE
}
/**
* See ISO 16022:2006, Annex C Table C.1
* The C40 Basic Character Set (*'s used for placeholders for the shift values)
*/
private static final char[] C40_BASIC_SET_CHARS = {
'*', '*', '*', ' ', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'
};
private static final char[] C40_SHIFT2_SET_CHARS = {
'!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.',
'/', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_'
};
/**
* See ISO 16022:2006, Annex C Table C.2
* The Text Basic Character Set (*'s used for placeholders for the shift values)
*/
private static final char[] TEXT_BASIC_SET_CHARS = {
'*', '*', '*', ' ', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
};
// Shift 2 for Text is the same encoding as C40
private static final char[] TEXT_SHIFT2_SET_CHARS = C40_SHIFT2_SET_CHARS;
private static final char[] TEXT_SHIFT3_SET_CHARS = {
'`', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '{', '|', '}', '~', (char) 127
};
private DecodedBitStreamParser() {
}
static DecoderResult decode(byte[] bytes) throws FormatException {
BitSource bits = new BitSource(bytes);
StringBuilder result = new StringBuilder(100);
StringBuilder resultTrailer = new StringBuilder(0);
List<byte[]> byteSegments = new ArrayList<>(1);
Mode mode = Mode.ASCII_ENCODE;
do {
if (mode == Mode.ASCII_ENCODE) {
mode = decodeAsciiSegment(bits, result, resultTrailer);
} else {
switch (mode) {
case C40_ENCODE:
decodeC40Segment(bits, result);
break;
case TEXT_ENCODE:
decodeTextSegment(bits, result);
break;
case ANSIX12_ENCODE:
decodeAnsiX12Segment(bits, result);
break;
case EDIFACT_ENCODE:
decodeEdifactSegment(bits, result);
break;
case BASE256_ENCODE:
decodeBase256Segment(bits, result, byteSegments);
break;
default:
throw FormatException.getFormatInstance();
}
mode = Mode.ASCII_ENCODE;
}
} while (mode != Mode.PAD_ENCODE && bits.available() > 0);
if (resultTrailer.length() > 0) {
result.append(resultTrailer);
}
return new DecoderResult(bytes, result.toString(), byteSegments.isEmpty() ? null : byteSegments, null);
}
/**
* See ISO 16022:2006, 5.2.3 and Annex C, Table C.2
*/
private static Mode decodeAsciiSegment(BitSource bits,
StringBuilder result,
StringBuilder resultTrailer) throws FormatException {
boolean upperShift = false;
do {
int oneByte = bits.readBits(8);
if (oneByte == 0) {
throw FormatException.getFormatInstance();
} else if (oneByte <= 128) { // ASCII data (ASCII value + 1)
if (upperShift) {
oneByte += 128;
//upperShift = false;
}
result.append((char) (oneByte - 1));
return Mode.ASCII_ENCODE;
} else if (oneByte == 129) { // Pad
return Mode.PAD_ENCODE;
} else if (oneByte <= 229) { // 2-digit data 00-99 (Numeric Value + 130)
int value = oneByte - 130;
if (value < 10) { // pad with '0' for single digit values
result.append('0');
}
result.append(value);
} else if (oneByte == 230) { // Latch to C40 encodation
return Mode.C40_ENCODE;
} else if (oneByte == 231) { // Latch to Base 256 encodation
return Mode.BASE256_ENCODE;
} else if (oneByte == 232) {
// FNC1
result.append((char) 29); // translate as ASCII 29
} else if (oneByte == 233 || oneByte == 234) {
// Structured Append, Reader Programming
// Ignore these symbols for now
//throw ReaderException.getInstance();
} else if (oneByte == 235) { // Upper Shift (shift to Extended ASCII)
upperShift = true;
} else if (oneByte == 236) { // 05 Macro
result.append("[)>\u001E05\u001D");
resultTrailer.insert(0, "\u001E\u0004");
} else if (oneByte == 237) { // 06 Macro
result.append("[)>\u001E06\u001D");
resultTrailer.insert(0, "\u001E\u0004");
} else if (oneByte == 238) { // Latch to ANSI X12 encodation
return Mode.ANSIX12_ENCODE;
} else if (oneByte == 239) { // Latch to Text encodation
return Mode.TEXT_ENCODE;
} else if (oneByte == 240) { // Latch to EDIFACT encodation
return Mode.EDIFACT_ENCODE;
} else if (oneByte == 241) { // ECI Character
// TODO(bbrown): I think we need to support ECI
//throw ReaderException.getInstance();
// Ignore this symbol for now
} else if (oneByte >= 242) { // Not to be used in ASCII encodation
// ... but work around encoders that end with 254, latch back to ASCII
if (oneByte != 254 || bits.available() != 0) {
throw FormatException.getFormatInstance();
}
}
} while (bits.available() > 0);
return Mode.ASCII_ENCODE;
}
/**
* See ISO 16022:2006, 5.2.5 and Annex C, Table C.1
*/
private static void decodeC40Segment(BitSource bits, StringBuilder result) throws FormatException {
// Three C40 values are encoded in a 16-bit value as
// (1600 * C1) + (40 * C2) + C3 + 1
// TODO(bbrown): The Upper Shift with C40 doesn't work in the 4 value scenario all the time
boolean upperShift = false;
int[] cValues = new int[3];
int shift = 0;
do {
// If there is only one byte left then it will be encoded as ASCII
if (bits.available() == 8) {
return;
}
int firstByte = bits.readBits(8);
if (firstByte == 254) { // Unlatch codeword
return;
}
parseTwoBytes(firstByte, bits.readBits(8), cValues);
for (int i = 0; i < 3; i++) {
int cValue = cValues[i];
switch (shift) {
case 0:
if (cValue < 3) {
shift = cValue + 1;
} else if (cValue < C40_BASIC_SET_CHARS.length) {
char c40char = C40_BASIC_SET_CHARS[cValue];
if (upperShift) {
result.append((char) (c40char + 128));
upperShift = false;
} else {
result.append(c40char);
}
} else {
throw FormatException.getFormatInstance();
}
break;
case 1:
if (upperShift) {
result.append((char) (cValue + 128));
upperShift = false;
} else {
result.append((char) cValue);
}
shift = 0;
break;
case 2:
if (cValue < C40_SHIFT2_SET_CHARS.length) {
char c40char = C40_SHIFT2_SET_CHARS[cValue];
if (upperShift) {
result.append((char) (c40char + 128));
upperShift = false;
} else {
result.append(c40char);
}
} else if (cValue == 27) { // FNC1
result.append((char) 29); // translate as ASCII 29
} else if (cValue == 30) { // Upper Shift
upperShift = true;
} else {
throw FormatException.getFormatInstance();
}
shift = 0;
break;
case 3:
if (upperShift) {
result.append((char) (cValue + 224));
upperShift = false;
} else {
result.append((char) (cValue + 96));
}
shift = 0;
break;
default:
throw FormatException.getFormatInstance();
}
}
} while (bits.available() > 0);
}
/**
* See ISO 16022:2006, 5.2.6 and Annex C, Table C.2
*/
private static void decodeTextSegment(BitSource bits, StringBuilder result) throws FormatException {
// Three Text values are encoded in a 16-bit value as
// (1600 * C1) + (40 * C2) + C3 + 1
// TODO(bbrown): The Upper Shift with Text doesn't work in the 4 value scenario all the time
boolean upperShift = false;
int[] cValues = new int[3];
int shift = 0;
do {
// If there is only one byte left then it will be encoded as ASCII
if (bits.available() == 8) {
return;
}
int firstByte = bits.readBits(8);
if (firstByte == 254) { // Unlatch codeword
return;
}
parseTwoBytes(firstByte, bits.readBits(8), cValues);
for (int i = 0; i < 3; i++) {
int cValue = cValues[i];
switch (shift) {
case 0:
if (cValue < 3) {
shift = cValue + 1;
} else if (cValue < TEXT_BASIC_SET_CHARS.length) {
char textChar = TEXT_BASIC_SET_CHARS[cValue];
if (upperShift) {
result.append((char) (textChar + 128));
upperShift = false;
} else {
result.append(textChar);
}
} else {
throw FormatException.getFormatInstance();
}
break;
case 1:
if (upperShift) {
result.append((char) (cValue + 128));
upperShift = false;
} else {
result.append((char) cValue);
}
shift = 0;
break;
case 2:
// Shift 2 for Text is the same encoding as C40
if (cValue < TEXT_SHIFT2_SET_CHARS.length) {
char textChar = TEXT_SHIFT2_SET_CHARS[cValue];
if (upperShift) {
result.append((char) (textChar + 128));
upperShift = false;
} else {
result.append(textChar);
}
} else if (cValue == 27) { // FNC1
result.append((char) 29); // translate as ASCII 29
} else if (cValue == 30) { // Upper Shift
upperShift = true;
} else {
throw FormatException.getFormatInstance();
}
shift = 0;
break;
case 3:
if (cValue < TEXT_SHIFT3_SET_CHARS.length) {
char textChar = TEXT_SHIFT3_SET_CHARS[cValue];
if (upperShift) {
result.append((char) (textChar + 128));
upperShift = false;
} else {
result.append(textChar);
}
shift = 0;
} else {
throw FormatException.getFormatInstance();
}
break;
default:
throw FormatException.getFormatInstance();
}
}
} while (bits.available() > 0);
}
/**
* See ISO 16022:2006, 5.2.7
*/
private static void decodeAnsiX12Segment(BitSource bits,
StringBuilder result) throws FormatException {
// Three ANSI X12 values are encoded in a 16-bit value as
// (1600 * C1) + (40 * C2) + C3 + 1
int[] cValues = new int[3];
do {
// If there is only one byte left then it will be encoded as ASCII
if (bits.available() == 8) {
return;
}
int firstByte = bits.readBits(8);
if (firstByte == 254) { // Unlatch codeword
return;
}
parseTwoBytes(firstByte, bits.readBits(8), cValues);
for (int i = 0; i < 3; i++) {
int cValue = cValues[i];
if (cValue == 0) { // X12 segment terminator <CR>
result.append('\r');
} else if (cValue == 1) { // X12 segment separator *
result.append('*');
} else if (cValue == 2) { // X12 sub-element separator >
result.append('>');
} else if (cValue == 3) { // space
result.append(' ');
} else if (cValue < 14) { // 0 - 9
result.append((char) (cValue + 44));
} else if (cValue < 40) { // A - Z
result.append((char) (cValue + 51));
} else {
throw FormatException.getFormatInstance();
}
}
} while (bits.available() > 0);
}
private static void parseTwoBytes(int firstByte, int secondByte, int[] result) {
int fullBitValue = (firstByte << 8) + secondByte - 1;
int temp = fullBitValue / 1600;
result[0] = temp;
fullBitValue -= temp * 1600;
temp = fullBitValue / 40;
result[1] = temp;
result[2] = fullBitValue - temp * 40;
}
/**
* See ISO 16022:2006, 5.2.8 and Annex C Table C.3
*/
private static void decodeEdifactSegment(BitSource bits, StringBuilder result) {
do {
// If there is only two or less bytes left then it will be encoded as ASCII
if (bits.available() <= 16) {
return;
}
for (int i = 0; i < 4; i++) {
int edifactValue = bits.readBits(6);
// Check for the unlatch character
if (edifactValue == 0x1F) { // 011111
// Read rest of byte, which should be 0, and stop
int bitsLeft = 8 - bits.getBitOffset();
if (bitsLeft != 8) {
bits.readBits(bitsLeft);
}
return;
}
if ((edifactValue & 0x20) == 0) { // no 1 in the leading (6th) bit
edifactValue |= 0x40; // Add a leading 01 to the 6 bit binary value
}
result.append((char) edifactValue);
}
} while (bits.available() > 0);
}
/**
* See ISO 16022:2006, 5.2.9 and Annex B, B.2
*/
private static void decodeBase256Segment(BitSource bits,
StringBuilder result,
Collection<byte[]> byteSegments)
throws FormatException {
// Figure out how long the Base 256 Segment is.
int codewordPosition = 1 + bits.getByteOffset(); // position is 1-indexed
int d1 = unrandomize255State(bits.readBits(8), codewordPosition++);
int count;
if (d1 == 0) { // Read the remainder of the symbol
count = bits.available() / 8;
} else if (d1 < 250) {
count = d1;
} else {
count = 250 * (d1 - 249) + unrandomize255State(bits.readBits(8), codewordPosition++);
}
// We're seeing NegativeArraySizeException errors from users.
if (count < 0) {
throw FormatException.getFormatInstance();
}
byte[] bytes = new byte[count];
for (int i = 0; i < count; i++) {
// Have seen this particular error in the wild, such as at
// http://www.bcgen.com/demo/IDAutomationStreamingDataMatrix.aspx?MODE=3&D=Fred&PFMT=3&PT=F&X=0.3&O=0&LM=0.2
if (bits.available() < 8) {
throw FormatException.getFormatInstance();
}
bytes[i] = (byte) unrandomize255State(bits.readBits(8), codewordPosition++);
}
byteSegments.add(bytes);
try {
result.append(new String(bytes, "ISO8859_1"));
} catch (UnsupportedEncodingException uee) {
throw new IllegalStateException("Platform does not support required encoding: " + uee);
}
}
/**
* See ISO 16022:2006, Annex B, B.2
*/
private static int unrandomize255State(int randomizedBase256Codeword,
int base256CodewordPosition) {
int pseudoRandomNumber = ((149 * base256CodewordPosition) % 255) + 1;
int tempVariable = randomizedBase256Codeword - pseudoRandomNumber;
return tempVariable >= 0 ? tempVariable : tempVariable + 256;
}
}
|
/*
* Copyright (c) 2010, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wso2.integrationstudio.samples.menu.contributors;
import org.eclipse.jface.action.Action;
import org.eclipse.swt.widgets.Shell;
import org.wso2.integrationstudio.logging.core.IIntegrationStudioLog;
import org.wso2.integrationstudio.logging.core.Logger;
import org.wso2.integrationstudio.samples.Activator;
import org.wso2.integrationstudio.samples.contributor.IIntegrationStudioSampleContributor;
import org.wso2.integrationstudio.utils.data.ITemporaryFileTag;
import org.wso2.integrationstudio.utils.file.FileUtils;
public abstract class SampleAction extends Action {
private static IIntegrationStudioLog log=Logger.getLog(Activator.PLUGIN_ID);
private IIntegrationStudioSampleContributor contributor;
private Shell shell;
public SampleAction(Shell shell,IIntegrationStudioSampleContributor contributor) {
super(contributor.getCaption(),contributor.getImage());
setToolTipText(contributor.getToolTip());
setContributor(contributor);
setShell(shell);
}
public void setContributor(IIntegrationStudioSampleContributor contributor) {
this.contributor = contributor;
}
public IIntegrationStudioSampleContributor getContributor() {
return contributor;
}
public void run() {
ITemporaryFileTag tempTag = FileUtils.createNewTempTag();
if (getContributor().isCustomCreateSample()){
try {
getContributor().createSample(getShell());
} catch (Exception e) {
log.error(e);
}
}else{
execute();
}
tempTag.clearAndEnd();
}
public abstract void execute();
public void setShell(Shell shell) {
this.shell = shell;
}
public Shell getShell() {
return shell;
}
}
|
package seedu.address.storage;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Optional;
import seedu.address.commons.exceptions.DataConversionException;
import seedu.address.model.ReadOnlyTaskCollection;
import seedu.address.model.TaskCollection;
/**
* Represents a storage for {@link TaskCollection}.
*/
public interface ImportExportStorage {
/**
* Returns the file path of the data file.
*/
Path getTaskCollectionFilePath();
/**
* Returns TaskCollection data as a {@link ReadOnlyTaskCollection}. Returns {@code Optional.empty()}
* if import file is not found.
*
* @param filePath location of import file.
* @throws DataConversionException if the data in storage is not in the expected format.
* @throws IOException if there was any problem when reading from the storage.
*/
Optional<ReadOnlyTaskCollection> importTaskCollection(Path filePath) throws DataConversionException, IOException;
/**
* Saves the given {@link ReadOnlyTaskCollection} to the path specified.
* @param taskCollection cannot be null. The task collection to be saved.
* @param filePath the destination to save the file
* @param shouldOverwrite if the file exists, whether the file should be overwritten.
* @param isCsvFormat whether the task collection should be saved as a CSV or a XML file.
* @throws IOException
*/
void exportTaskCollection(ReadOnlyTaskCollection taskCollection, Path filePath, boolean shouldOverwrite,
boolean isCsvFormat)
throws IOException;
}
|
/**
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gravitee.gateway.policy;
import io.gravitee.gateway.api.ExecutionContext;
import io.gravitee.gateway.api.Request;
import io.gravitee.gateway.api.Response;
import io.gravitee.gateway.policy.impl.PolicyChain;
import java.util.Collections;
import java.util.Iterator;
/**
* A no-op policy chain used to chain an empty policy collection.
* It immediately returns a successful result when invoking its <code>doNext</code> method.
*
* @author David BRASSELY (david.brassely at graviteesource.com)
* @author GraviteeSource Team
*/
public class NoOpPolicyChain extends PolicyChain {
public NoOpPolicyChain(ExecutionContext executionContext) {
super(Collections.emptyList(), executionContext);
}
@Override
public void doNext(Request request, Response response) {
resultHandler.handle(SUCCESS_POLICY_CHAIN);
}
@Override
protected void execute(Policy policy, Object... args) throws PolicyChainException {
// Nothing to do
}
@Override
protected Iterator<Policy> iterator() {
return null;
}
}
|
/*
* SoapUI, Copyright (C) 2004-2016 SmartBear Software
*
* Licensed under the EUPL, Version 1.1 or - as soon as they will be approved by the European Commission - subsequent
* versions of the EUPL (the "Licence");
* You may not use this work except in compliance with the Licence.
* You may obtain a copy of the Licence at:
*
* http://ec.europa.eu/idabc/eupl
*
* Unless required by applicable law or agreed to in writing, software distributed under the Licence is
* distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the Licence for the specific language governing permissions and limitations
* under the Licence.
*/
package com.eviware.soapui.impl.wsdl.actions.iface.tools.soapui;
import com.eviware.soapui.SoapUI;
import com.eviware.soapui.impl.wsdl.WsdlProject;
import com.eviware.soapui.impl.wsdl.actions.iface.tools.support.AbstractToolsAction;
import com.eviware.soapui.impl.wsdl.actions.iface.tools.support.ArgumentBuilder;
import com.eviware.soapui.impl.wsdl.actions.iface.tools.support.ProcessToolRunner;
import com.eviware.soapui.impl.wsdl.actions.iface.tools.support.ToolHost;
import com.eviware.soapui.impl.wsdl.loadtest.WsdlLoadTest;
import com.eviware.soapui.impl.wsdl.support.HelpUrls;
import com.eviware.soapui.model.iface.Interface;
import com.eviware.soapui.model.support.ModelSupport;
import com.eviware.soapui.model.testsuite.LoadTest;
import com.eviware.soapui.model.testsuite.TestCase;
import com.eviware.soapui.model.testsuite.TestSuite;
import com.eviware.soapui.support.StringUtils;
import com.eviware.soapui.support.UISupport;
import com.eviware.soapui.support.types.StringToStringMap;
import com.eviware.x.form.XForm;
import com.eviware.x.form.XFormDialog;
import com.eviware.x.form.XFormDialogBuilder;
import com.eviware.x.form.XFormFactory;
import com.eviware.x.form.XFormField;
import com.eviware.x.form.XFormFieldListener;
import com.eviware.x.form.XFormTextField;
import com.eviware.x.impl.swing.JTextAreaFormField;
import org.apache.log4j.Logger;
import javax.swing.Action;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;
/**
* Invokes SoapUI TestRunner tool
*
* @author Ole.Matzura
*/
public class LoadTestRunnerAction extends AbstractToolsAction<WsdlProject> {
private static final String ALL_VALUE = "<all>";
protected static final String ENVIRONMENT = "Environment";
protected static final String ENDPOINT = "Endpoint";
protected static final String HOSTPORT = "Host:Port";
private static final String LIMIT = "Limit";
private static final String TESTSUITE = "TestSuite";
private static final String TESTCASE = "TestCase";
private static final String LOADTEST = "LoadTest";
private static final String THREADCOUNT = "ThreadCount";
protected static final String USERNAME = "Username";
protected static final String PASSWORD = "Password";
protected static final String DOMAIN = "Domain";
private static final String PRINTREPORTSTATISTICS = "Print Report Statistics";
private static final String ROOTFOLDER = "Root Folder";
private static final String TESTRUNNERPATH = "TestRunner Path";
private static final String SAVEPROJECT = "Save Project";
private static final String ADDSETTINGS = "Add Settings";
private static final String PROJECTPASSWORD = "Project Password";
private static final String SAVEAFTER = "Save After";
protected static final String WSSTYPE = "WSS Password Type";
private static final String OPEN_REPORT = "Open Report";
private static final String GENERATEREPORTSEACHTESTCASE = "Report to Generate";
private static final String REPORTFORMAT = "Report Format(s)";
private static final String GLOBALPROPERTIES = "Global Properties";
private static final String SYSTEMPROPERTIES = "System Properties";
private static final String PROJECTPROPERTIES = "Project Properties";
private XForm mainForm;
private final static Logger log = Logger.getLogger(LoadTestRunnerAction.class);
public static final String SOAPUI_ACTION_ID = "LoadTestRunnerAction";
protected XForm advForm;
private XForm propertyForm;
private XForm reportForm;
private boolean updating;
private boolean proVersion;
public LoadTestRunnerAction() {
super("Launch LoadTestRunner", "Launch command-line LoadTestRunner for this project");
}
protected XFormDialog buildDialog(WsdlProject modelItem) {
if (modelItem == null) {
return null;
}
proVersion = isProVersion(modelItem);
XFormDialogBuilder builder = XFormFactory.createDialogBuilder("Launch LoadTestRunner");
mainForm = builder.createForm("Basic");
mainForm.addComboBox(TESTSUITE, new String[]{}, "The TestSuite to run").addFormFieldListener(
new XFormFieldListener() {
public void valueChanged(XFormField sourceField, String newValue, String oldValue) {
updateCombos();
}
});
mainForm.addComboBox(TESTCASE, new String[]{}, "The TestCase to run").addFormFieldListener(
new XFormFieldListener() {
public void valueChanged(XFormField sourceField, String newValue, String oldValue) {
updateCombos();
}
});
mainForm.addComboBox(LOADTEST, new String[]{}, "The LoadTest to run");
mainForm.addSeparator();
XFormTextField path = mainForm.addTextField(TESTRUNNERPATH, "Folder containing TestRunner.bat to use",
XForm.FieldType.FOLDER);
path.setValue(System.getProperty("soapui.home", ""));
mainForm.addCheckBox(SAVEPROJECT, "Saves project before running").setEnabled(!modelItem.isRemote());
mainForm.addCheckBox(SAVEAFTER, "Sets to save the project file after tests have been run");
mainForm.addCheckBox(ADDSETTINGS, "Adds global settings to command-line");
mainForm.addSeparator();
mainForm.addTextField(PROJECTPASSWORD, "Set project password", XForm.FieldType.PASSWORD);
mainForm.addTextField(SOAPUISETTINGSPASSWORD, "Set soapui-settings.xml password", XForm.FieldType.PASSWORD);
advForm = builder.createForm("Overrides");
advForm.addComboBox(ENVIRONMENT, new String[]{"Default"}, "The environment to set for all requests")
.setEnabled(proVersion);
advForm.addComboBox(ENDPOINT, new String[]{""}, "endpoint to forward to");
advForm.addTextField(HOSTPORT, "Host:Port to use for requests", XForm.FieldType.TEXT);
advForm.addTextField(LIMIT, "Limit for LoadTest", XForm.FieldType.TEXT);
advForm.addTextField(THREADCOUNT, "ThreadCount for LoadTest", XForm.FieldType.TEXT);
advForm.addSeparator();
advForm.addTextField(USERNAME, "The username to set for all requests", XForm.FieldType.TEXT);
advForm.addTextField(PASSWORD, "The password to set for all requests", XForm.FieldType.PASSWORD);
advForm.addTextField(DOMAIN, "The domain to set for all requests", XForm.FieldType.TEXT);
advForm.addComboBox(WSSTYPE, new String[]{"", "Text", "Digest"}, "The username to set for all requests");
reportForm = builder.createForm("Reports");
createReportTab();
propertyForm = builder.createForm("Properties");
propertyForm.addComponent(GLOBALPROPERTIES, createTextArea());
propertyForm.addComponent(SYSTEMPROPERTIES, createTextArea());
propertyForm.addComponent(PROJECTPROPERTIES, createTextArea());
setToolsSettingsAction(null);
buildArgsForm(builder, false, "TestRunner");
return builder.buildDialog(buildDefaultActions(HelpUrls.TESTRUNNER_HELP_URL, modelItem),
"Specify arguments for launching SoapUI LoadTestRunner", UISupport.TOOL_ICON);
}
/**
*
*/
private void createReportTab() {
reportForm.addCheckBox(PRINTREPORTSTATISTICS, "Creates a report statistics in the specified folder");
reportForm.addTextField(ROOTFOLDER, "Folder for reporting", XForm.FieldType.FOLDER);
reportForm.addCheckBox(OPEN_REPORT, "Opens generated report(s) in browser (SoapUI Pro only)").setEnabled(
proVersion);
reportForm.addTextField(GENERATEREPORTSEACHTESTCASE, "Report to Generate (SoapUI Pro only)",
XForm.FieldType.TEXT).setEnabled(proVersion);
reportForm.addTextField(REPORTFORMAT, "Choose report format(s), comma-separated (SoapUI Pro only)",
XForm.FieldType.TEXT).setEnabled(proVersion);
}
private JTextAreaFormField createTextArea() {
JTextAreaFormField textArea = new JTextAreaFormField();
textArea.setWidth(40);
textArea.getTextArea().setRows(4);
textArea.setToolTip("name=value pairs separated by space or enter");
return textArea;
}
protected Action createRunOption(WsdlProject modelItem) {
Action action = super.createRunOption(modelItem);
action.putValue(Action.NAME, "Launch");
return action;
}
protected StringToStringMap initValues(WsdlProject modelItem, Object param) {
if (modelItem != null && mainForm != null) {
List<String> endpoints = new ArrayList<String>();
for (Interface iface : modelItem.getInterfaceList()) {
for (String endpoint : iface.getEndpoints()) {
if (!endpoints.contains(endpoint)) {
endpoints.add(endpoint);
}
}
}
endpoints.add(0, null);
advForm.setOptions(ENDPOINT, endpoints.toArray());
List<TestSuite> testSuites = modelItem.getTestSuiteList();
for (int c = 0; c < testSuites.size(); c++) {
int cnt = 0;
for (TestCase testCase : testSuites.get(c).getTestCaseList()) {
cnt += testCase.getLoadTestCount();
}
if (cnt == 0) {
testSuites.remove(c);
c--;
}
}
mainForm.setOptions(TESTSUITE, ModelSupport.getNames(new String[]{ALL_VALUE}, testSuites));
} else if (mainForm != null) {
mainForm.setOptions(ENDPOINT, new String[]{null});
}
initEnvironment(modelItem);
StringToStringMap values = super.initValues(modelItem, param);
updateCombos();
if (mainForm != null && param instanceof WsdlLoadTest) {
mainForm.getFormField(TESTSUITE).setValue(((WsdlLoadTest) param).getTestCase().getTestSuite().getName());
mainForm.getFormField(TESTCASE).setValue(((WsdlLoadTest) param).getTestCase().getName());
mainForm.getFormField(LOADTEST).setValue(((WsdlLoadTest) param).getName());
values.put(TESTSUITE, mainForm.getComponentValue(TESTSUITE));
values.put(TESTCASE, mainForm.getComponentValue(TESTCASE));
values.put(LOADTEST, mainForm.getComponentValue(LOADTEST));
mainForm.getComponent(SAVEPROJECT).setEnabled(!modelItem.isRemote());
}
return values;
}
protected void generate(StringToStringMap values, ToolHost toolHost, WsdlProject modelItem) throws Exception {
String testRunnerDir = mainForm.getComponentValue(TESTRUNNERPATH);
ProcessBuilder builder = new ProcessBuilder();
ArgumentBuilder args = buildArgs(modelItem);
builder.command(args.getArgs());
if (StringUtils.isNullOrEmpty(testRunnerDir)) {
builder.directory(new File("."));
} else {
builder.directory(new File(testRunnerDir));
}
if (mainForm.getComponentValue(SAVEPROJECT).equals(Boolean.TRUE.toString())) {
modelItem.save();
} else if (StringUtils.isNullOrEmpty(modelItem.getPath())) {
UISupport.showErrorMessage("Project [" + modelItem.getName() + "] has not been saved to file.");
return;
}
if (log.isDebugEnabled()) {
log.debug("Launching loadtestrunner in directory [" + builder.directory() + "] with arguments ["
+ args.toString() + "]");
}
toolHost.run(new ProcessToolRunner(builder, "SoapUI LoadTestRunner", modelItem, args));
}
private ArgumentBuilder buildArgs(WsdlProject modelItem) throws IOException {
XFormDialog dialog = getDialog();
if (dialog == null) {
ArgumentBuilder builder = new ArgumentBuilder(new StringToStringMap());
builder.startScript("loadtestrunner", ".bat", ".sh");
return builder;
}
StringToStringMap values = dialog.getValues();
ArgumentBuilder builder = new ArgumentBuilder(values);
builder.startScript("loadtestrunner", ".bat", ".sh");
builder.addString(ENDPOINT, "-e", "");
builder.addString(HOSTPORT, "-h", "");
if (!values.get(TESTSUITE).equals(ALL_VALUE)) {
builder.addString(TESTSUITE, "-s", "");
}
if (!values.get(TESTCASE).equals(ALL_VALUE)) {
builder.addString(TESTCASE, "-c", "");
}
if (!values.get(LOADTEST).equals(ALL_VALUE)) {
builder.addString(LOADTEST, "-l", "");
}
builder.addString(LIMIT, "-m", "");
builder.addString(THREADCOUNT, "-n", "");
builder.addString(USERNAME, "-u", "");
builder.addStringShadow(PASSWORD, "-p", "");
builder.addString(DOMAIN, "-d", "");
builder.addBoolean(PRINTREPORTSTATISTICS, "-r");
builder.addString(ROOTFOLDER, "-f", "");
builder.addStringShadow(PROJECTPASSWORD, "-x", "");
builder.addStringShadow(SOAPUISETTINGSPASSWORD, "-v", "");
builder.addBoolean(SAVEAFTER, "-S");
builder.addString(WSSTYPE, "-w", "");
if (proVersion) {
builder.addBoolean(OPEN_REPORT, "-o");
builder.addString(GENERATEREPORTSEACHTESTCASE, "-R", "");
builder.addStrings(REPORTFORMAT, "-F", ",");
builder.addString(ENVIRONMENT, "-E", "");
}
addPropertyArguments(builder);
if (dialog.getBooleanValue(ADDSETTINGS)) {
try {
builder.addBoolean(ADDSETTINGS, "-t" + SoapUI.saveSettings());
} catch (Exception e) {
SoapUI.logError(e);
}
}
builder.addArgs(new String[]{modelItem.getPath()});
addToolArgs(values, builder);
return builder;
}
private void updateCombos() {
if (updating) {
return;
}
updating = true;
List<String> testCases = new ArrayList<String>();
List<String> loadTests = new ArrayList<String>();
TestSuite ts = getModelItem().getTestSuiteByName(mainForm.getComponentValue(TESTSUITE));
String testCaseName = mainForm.getComponentValue(TESTCASE);
if (ALL_VALUE.equals(testCaseName)) {
testCaseName = null;
}
for (TestSuite testSuite : getModelItem().getTestSuiteList()) {
if (ts != null && testSuite != ts) {
continue;
}
for (TestCase testCase : testSuite.getTestCaseList()) {
if (testCase.getLoadTestCount() == 0) {
continue;
}
if (!testCases.contains(testCase.getName())) {
testCases.add(testCase.getName());
}
if (testCaseName != null && !testCase.getName().equals(testCaseName)) {
continue;
}
for (LoadTest loadTest : testCase.getLoadTestList()) {
if (!loadTests.contains(loadTest.getName())) {
loadTests.add(loadTest.getName());
}
}
}
}
testCases.add(0, ALL_VALUE);
mainForm.setOptions(TESTCASE, testCases.toArray());
loadTests.add(0, ALL_VALUE);
mainForm.setOptions(LOADTEST, loadTests.toArray());
updating = false;
}
/**
* check whether this is Pro or Core version
*
* @param modelItem
* @return
*/
private boolean isProVersion(WsdlProject modelItem) {
if (modelItem.getClass().getName().contains("WsdlProjectPro")) {
return true;
}
return false;
}
private void addPropertyArguments(ArgumentBuilder builder) {
List<String> propertyArguments = new ArrayList<String>();
addProperties(propertyArguments, GLOBALPROPERTIES, "-G");
addProperties(propertyArguments, SYSTEMPROPERTIES, "-D");
addProperties(propertyArguments, PROJECTPROPERTIES, "-P");
builder.addArgs(propertyArguments.toArray(new String[propertyArguments.size()]));
}
private void addProperties(List<String> propertyArguments, String propertyDomain, String arg) {
StringTokenizer tokenizer = new StringTokenizer(getDialog().getValue(propertyDomain));
while (tokenizer.hasMoreTokens()) {
propertyArguments.add(arg + tokenizer.nextToken());
}
}
protected void initEnvironment(final WsdlProject modelItem) {
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.mledger.impl;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNotEquals;
import static org.testng.Assert.assertNull;
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.collect.Range;
import com.google.common.collect.Sets;
import java.lang.reflect.Field;
import java.nio.charset.Charset;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import lombok.Cleanup;
import org.apache.bookkeeper.client.BKException;
import org.apache.bookkeeper.client.BookKeeper;
import org.apache.bookkeeper.client.BookKeeper.DigestType;
import org.apache.bookkeeper.client.LedgerEntry;
import org.apache.bookkeeper.mledger.AsyncCallbacks;
import org.apache.bookkeeper.mledger.AsyncCallbacks.AddEntryCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.DeleteCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.MarkDeleteCallback;
import org.apache.bookkeeper.mledger.AsyncCallbacks.ReadEntriesCallback;
import org.apache.bookkeeper.mledger.Entry;
import org.apache.bookkeeper.mledger.ManagedCursor;
import org.apache.bookkeeper.mledger.ManagedCursor.IndividualDeletedEntries;
import org.apache.bookkeeper.mledger.ManagedLedger;
import org.apache.bookkeeper.mledger.ManagedLedgerConfig;
import org.apache.bookkeeper.mledger.ManagedLedgerException;
import org.apache.bookkeeper.mledger.ManagedLedgerException.MetaStoreException;
import org.apache.bookkeeper.mledger.ManagedLedgerFactory;
import org.apache.bookkeeper.mledger.ManagedLedgerFactoryConfig;
import org.apache.bookkeeper.mledger.Position;
import org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.VoidCallback;
import org.apache.bookkeeper.mledger.impl.MetaStore.MetaStoreCallback;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedCursorInfo;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.PositionInfo;
import org.apache.bookkeeper.test.MockedBookKeeperTestCase;
import org.apache.pulsar.metadata.api.Stat;
import org.apache.pulsar.common.api.proto.IntRange;
import org.apache.zookeeper.KeeperException.Code;
import org.apache.zookeeper.MockZooKeeper;
import org.awaitility.Awaitility;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
public class ManagedCursorTest extends MockedBookKeeperTestCase {
private static final Charset Encoding = Charsets.UTF_8;
@DataProvider(name = "useOpenRangeSet")
public static Object[][] useOpenRangeSet() {
return new Object[][] { { Boolean.TRUE }, { Boolean.FALSE } };
}
@Test(timeOut = 20000)
void readFromEmptyLedger() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor c1 = ledger.openCursor("c1");
List<Entry> entries = c1.readEntries(10);
assertEquals(entries.size(), 0);
entries.forEach(e -> e.release());
ledger.addEntry("test".getBytes(Encoding));
entries = c1.readEntries(10);
assertEquals(entries.size(), 1);
entries.forEach(e -> e.release());
entries = c1.readEntries(10);
assertEquals(entries.size(), 0);
entries.forEach(e -> e.release());
// Test string representation
assertEquals(c1.toString(), "ManagedCursorImpl{ledger=my_test_ledger, name=c1, ackPos=3:-1, readPos=3:1}");
}
@Test(timeOut = 20000)
void readTwice() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(1));
ManagedCursor c1 = ledger.openCursor("c1");
ManagedCursor c2 = ledger.openCursor("c2");
ledger.addEntry("entry-1".getBytes(Encoding));
ledger.addEntry("entry-2".getBytes(Encoding));
List<Entry> entries = c1.readEntries(2);
assertEquals(entries.size(), 2);
entries.forEach(e -> e.release());
entries = c1.readEntries(2);
assertEquals(entries.size(), 0);
entries.forEach(e -> e.release());
entries = c2.readEntries(2);
assertEquals(entries.size(), 2);
entries.forEach(e -> e.release());
entries = c2.readEntries(2);
assertEquals(entries.size(), 0);
entries.forEach(e -> e.release());
}
@Test(timeOut = 20000)
void readWithCacheDisabled() throws Exception {
ManagedLedgerFactoryConfig config = new ManagedLedgerFactoryConfig();
config.setMaxCacheSize(0);
factory = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle(), config);
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(1));
ManagedCursor c1 = ledger.openCursor("c1");
ManagedCursor c2 = ledger.openCursor("c2");
ledger.addEntry("entry-1".getBytes(Encoding));
ledger.addEntry("entry-2".getBytes(Encoding));
List<Entry> entries = c1.readEntries(2);
assertEquals(entries.size(), 2);
assertEquals(new String(entries.get(0).getData(), Encoding), "entry-1");
assertEquals(new String(entries.get(1).getData(), Encoding), "entry-2");
entries.forEach(e -> e.release());
entries = c1.readEntries(2);
assertEquals(entries.size(), 0);
entries.forEach(e -> e.release());
entries = c2.readEntries(2);
assertEquals(entries.size(), 2);
entries.forEach(e -> e.release());
entries = c2.readEntries(2);
assertEquals(entries.size(), 0);
entries.forEach(e -> e.release());
}
@Test(timeOut = 20000)
void getEntryDataTwice() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor c1 = ledger.openCursor("c1");
ledger.addEntry("entry-1".getBytes(Encoding));
List<Entry> entries = c1.readEntries(2);
assertEquals(entries.size(), 1);
Entry entry = entries.get(0);
assertEquals(entry.getLength(), "entry-1".length());
byte[] data1 = entry.getData();
byte[] data2 = entry.getData();
assertEquals(data1, data2);
entry.release();
}
@Test(timeOut = 20000)
void readFromClosedLedger() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(1));
ManagedCursor c1 = ledger.openCursor("c1");
ledger.close();
try {
c1.readEntries(2);
fail("ledger is closed, should fail");
} catch (ManagedLedgerException e) {
// ok
}
}
@Test(timeOut = 20000)
void testNumberOfEntries() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(2));
ManagedCursor c1 = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ManagedCursor c2 = ledger.openCursor("c2");
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ManagedCursor c3 = ledger.openCursor("c3");
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
ManagedCursor c4 = ledger.openCursor("c4");
ledger.addEntry("dummy-entry-4".getBytes(Encoding));
ManagedCursor c5 = ledger.openCursor("c5");
assertEquals(c1.getNumberOfEntries(), 4);
assertTrue(c1.hasMoreEntries());
assertEquals(c2.getNumberOfEntries(), 3);
assertTrue(c2.hasMoreEntries());
assertEquals(c3.getNumberOfEntries(), 2);
assertTrue(c3.hasMoreEntries());
assertEquals(c4.getNumberOfEntries(), 1);
assertTrue(c4.hasMoreEntries());
assertEquals(c5.getNumberOfEntries(), 0);
assertFalse(c5.hasMoreEntries());
List<Entry> entries = c1.readEntries(2);
assertEquals(entries.size(), 2);
c1.markDelete(entries.get(1).getPosition());
assertEquals(c1.getNumberOfEntries(), 2);
entries.forEach(e -> e.release());
}
@Test(timeOut = 20000)
void testNumberOfEntriesInBacklog() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(2));
ManagedCursor c1 = ledger.openCursor("c1");
Position p1 = ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ManagedCursor c2 = ledger.openCursor("c2");
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ManagedCursor c3 = ledger.openCursor("c3");
Position p3 = ledger.addEntry("dummy-entry-3".getBytes(Encoding));
ManagedCursor c4 = ledger.openCursor("c4");
Position p4 = ledger.addEntry("dummy-entry-4".getBytes(Encoding));
ManagedCursor c5 = ledger.openCursor("c5");
assertEquals(c1.getNumberOfEntriesInBacklog(false), 4);
assertEquals(c2.getNumberOfEntriesInBacklog(false), 3);
assertEquals(c3.getNumberOfEntriesInBacklog(false), 2);
assertEquals(c4.getNumberOfEntriesInBacklog(false), 1);
assertEquals(c5.getNumberOfEntriesInBacklog(false), 0);
List<Entry> entries = c1.readEntries(2);
assertEquals(entries.size(), 2);
entries.forEach(e -> e.release());
assertEquals(c1.getNumberOfEntries(), 2);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 4);
c1.markDelete(p1);
assertEquals(c1.getNumberOfEntries(), 2);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 3);
c1.delete(p3);
assertEquals(c1.getNumberOfEntries(), 1);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 2);
c1.markDelete(p4);
assertEquals(c1.getNumberOfEntries(), 0);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 0);
}
@Test(timeOut = 20000)
void testNumberOfEntriesInBacklogWithFallback() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(2));
ManagedCursor c1 = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ManagedCursor c2 = ledger.openCursor("c2");
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ManagedCursor c3 = ledger.openCursor("c3");
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
ManagedCursor c4 = ledger.openCursor("c4");
ledger.addEntry("dummy-entry-4".getBytes(Encoding));
ManagedCursor c5 = ledger.openCursor("c5");
Field field = ManagedCursorImpl.class.getDeclaredField("messagesConsumedCounter");
field.setAccessible(true);
long counter = ((ManagedLedgerImpl) ledger).getEntriesAddedCounter() + 1;
field.setLong(c1, counter);
field.setLong(c2, counter);
field.setLong(c3, counter);
field.setLong(c4, counter);
field.setLong(c5, counter);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 4);
assertEquals(c2.getNumberOfEntriesInBacklog(false), 3);
assertEquals(c3.getNumberOfEntriesInBacklog(false), 2);
assertEquals(c4.getNumberOfEntriesInBacklog(false), 1);
assertEquals(c5.getNumberOfEntriesInBacklog(false), 0);
}
@Test(timeOut = 20000)
void testNumberOfEntriesWithReopen() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(1));
ManagedCursor c1 = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ManagedCursor c2 = ledger.openCursor("c2");
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ManagedCursor c3 = ledger.openCursor("c3");
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = factory2.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(1));
c1 = ledger.openCursor("c1");
c2 = ledger.openCursor("c2");
c3 = ledger.openCursor("c3");
assertEquals(c1.getNumberOfEntries(), 2);
assertTrue(c1.hasMoreEntries());
assertEquals(c2.getNumberOfEntries(), 1);
assertTrue(c2.hasMoreEntries());
assertEquals(c3.getNumberOfEntries(), 0);
assertFalse(c3.hasMoreEntries());
factory2.shutdown();
}
@Test(timeOut = 20000)
void asyncReadWithoutErrors() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor cursor = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
final CountDownLatch counter = new CountDownLatch(1);
cursor.asyncReadEntries(100, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
assertNull(ctx);
assertEquals(entries.size(), 1);
entries.forEach(e -> e.release());
counter.countDown();
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
fail(exception.getMessage());
}
}, null, PositionImpl.latest);
counter.await();
}
@Test(timeOut = 20000)
void asyncReadWithErrors() throws Exception {
ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger");
ManagedCursor cursor = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
final CountDownLatch counter = new CountDownLatch(1);
stopBookKeeper();
cursor.asyncReadEntries(100, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
entries.forEach(e -> e.release());
counter.countDown();
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
fail("async-call should not have failed");
}
}, null, PositionImpl.latest);
counter.await();
cursor.rewind();
// Clear the cache to force reading from BK
ledger.entryCache.clear();
final CountDownLatch counter2 = new CountDownLatch(1);
cursor.asyncReadEntries(100, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
fail("async-call should have failed");
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
counter2.countDown();
}
}, null, PositionImpl.latest);
counter2.await();
}
@Test(timeOut = 20000, expectedExceptions = IllegalArgumentException.class)
void asyncReadWithInvalidParameter() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor cursor = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
final CountDownLatch counter = new CountDownLatch(1);
stopBookKeeper();
cursor.asyncReadEntries(0, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
fail("async-call should have failed");
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
counter.countDown();
}
}, null, PositionImpl.latest);
counter.await();
}
@Test(timeOut = 20000)
void testAsyncReadWithMaxSizeByte() throws Exception {
ManagedLedger ledger = factory.open("testAsyncReadWithMaxSizeByte");
ManagedCursor cursor = ledger.openCursor("c1");
for (int i = 0; i < 100; i++) {
ledger.addEntry(new byte[1024]);
}
// First time, since we don't have info, we'll get 1 single entry
readAndCheck(cursor, 10, 3 * 1024, 1);
// We should only return 3 entries, based on the max size
readAndCheck(cursor, 20, 3 * 1024, 3);
// If maxSize is < avg, we should get 1 entry
readAndCheck(cursor, 10, 500, 1);
}
private void readAndCheck(ManagedCursor cursor, int numEntriesToRead,
long maxSizeBytes, int expectedNumRead) throws InterruptedException {
CountDownLatch counter = new CountDownLatch(1);
cursor.asyncReadEntries(numEntriesToRead, maxSizeBytes, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
Assert.assertEquals(entries.size(), expectedNumRead);
entries.forEach(e -> e.release());
counter.countDown();
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
fail(exception.getMessage());
}
}, null, null);
counter.await();
}
@Test(timeOut = 20000)
void markDeleteWithErrors() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor cursor = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
List<Entry> entries = cursor.readEntries(100);
stopBookKeeper();
assertEquals(entries.size(), 1);
// Mark-delete should succeed if BK is down
cursor.markDelete(entries.get(0).getPosition());
entries.forEach(e -> e.release());
}
@Test(timeOut = 20000)
void markDeleteWithZKErrors() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor cursor = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
List<Entry> entries = cursor.readEntries(100);
assertEquals(entries.size(), 1);
stopBookKeeper();
stopZooKeeper();
try {
cursor.markDelete(entries.get(0).getPosition());
fail("Should have failed");
} catch (Exception e) {
// Expected
}
entries.forEach(e -> e.release());
}
@Test(timeOut = 20000)
void markDeleteAcrossLedgers() throws Exception {
ManagedLedger ml1 = factory.open("my_test_ledger");
ManagedCursor mc1 = ml1.openCursor("c1");
// open ledger id 3 for ml1
// markDeletePosition for mc1 is 3:-1
// readPosition is 3:0
ml1.close();
mc1.close();
// force removal of this ledger from the cache
factory.close(ml1);
ManagedLedger ml2 = factory.open("my_test_ledger");
ManagedCursor mc2 = ml2.openCursor("c1");
// open ledger id 5 for ml2
// this entry is written at 5:0
Position pos = ml2.addEntry("dummy-entry-1".getBytes(Encoding));
List<Entry> entries = mc2.readEntries(1);
assertEquals(entries.size(), 1);
assertEquals(new String(entries.get(0).getData(), Encoding), "dummy-entry-1");
entries.forEach(e -> e.release());
mc2.delete(pos);
// verify if the markDeletePosition moves from 3:-1 to 5:0
assertEquals(mc2.getMarkDeletedPosition(), pos);
assertEquals(mc2.getMarkDeletedPosition().getNext(), mc2.getReadPosition());
}
@Test(timeOut = 20000)
void testResetCursor() throws Exception {
ManagedLedger ledger = factory.open("my_test_move_cursor_ledger",
new ManagedLedgerConfig().setMaxEntriesPerLedger(10));
ManagedCursor cursor = ledger.openCursor("trc1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
PositionImpl lastPosition = (PositionImpl) ledger.addEntry("dummy-entry-4".getBytes(Encoding));
final AtomicBoolean moveStatus = new AtomicBoolean(false);
PositionImpl resetPosition = new PositionImpl(lastPosition.getLedgerId(), lastPosition.getEntryId() - 2);
try {
cursor.resetCursor(resetPosition);
moveStatus.set(true);
} catch (Exception e) {
log.warn("error in reset cursor", e.getCause());
}
assertTrue(moveStatus.get());
assertEquals(resetPosition, cursor.getReadPosition());
cursor.close();
ledger.close();
}
@Test(timeOut = 20000)
void testResetCursor1() throws Exception {
ManagedLedger ledger = factory.open("my_test_move_cursor_ledger",
new ManagedLedgerConfig().setMaxEntriesPerLedger(2));
ManagedCursor cursor = ledger.openCursor("trc1");
PositionImpl actualEarliest = (PositionImpl) ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
PositionImpl lastInPrev = (PositionImpl) ledger.addEntry("dummy-entry-4".getBytes(Encoding));
PositionImpl firstInNext = (PositionImpl) ledger.addEntry("dummy-entry-5".getBytes(Encoding));
ledger.addEntry("dummy-entry-6".getBytes(Encoding));
ledger.addEntry("dummy-entry-7".getBytes(Encoding));
ledger.addEntry("dummy-entry-8".getBytes(Encoding));
ledger.addEntry("dummy-entry-9".getBytes(Encoding));
PositionImpl last = (PositionImpl) ledger.addEntry("dummy-entry-10".getBytes(Encoding));
final AtomicBoolean moveStatus = new AtomicBoolean(false);
// reset to earliest
PositionImpl earliest = PositionImpl.earliest;
try {
cursor.resetCursor(earliest);
moveStatus.set(true);
} catch (Exception e) {
log.warn("error in reset cursor", e.getCause());
}
assertTrue(moveStatus.get());
PositionImpl earliestPos = new PositionImpl(actualEarliest.getLedgerId(), -1);
assertEquals(earliestPos, cursor.getReadPosition());
moveStatus.set(false);
// reset to one after last entry in a ledger should point to the first entry in the next ledger
PositionImpl resetPosition = new PositionImpl(lastInPrev.getLedgerId(), lastInPrev.getEntryId() + 1);
try {
cursor.resetCursor(resetPosition);
moveStatus.set(true);
} catch (Exception e) {
log.warn("error in reset cursor", e.getCause());
}
assertTrue(moveStatus.get());
assertEquals(firstInNext, cursor.getReadPosition());
moveStatus.set(false);
// reset to a non exist larger ledger should point to the first non-exist entry in the last ledger
PositionImpl latest = new PositionImpl(last.getLedgerId() + 2, 0);
try {
cursor.resetCursor(latest);
moveStatus.set(true);
} catch (Exception e) {
log.warn("error in reset cursor", e.getCause());
}
assertTrue(moveStatus.get());
PositionImpl lastPos = new PositionImpl(last.getLedgerId(), last.getEntryId() + 1);
assertEquals(lastPos, cursor.getReadPosition());
moveStatus.set(false);
// reset to latest should point to the first non-exist entry in the last ledger
PositionImpl anotherLast = PositionImpl.latest;
try {
cursor.resetCursor(anotherLast);
moveStatus.set(true);
} catch (Exception e) {
log.warn("error in reset cursor", e.getCause());
}
assertTrue(moveStatus.get());
assertEquals(lastPos, cursor.getReadPosition());
cursor.close();
ledger.close();
}
@Test(timeOut = 20000)
void testasyncResetCursor() throws Exception {
ManagedLedger ledger = factory.open("my_test_move_cursor_ledger",
new ManagedLedgerConfig().setMaxEntriesPerLedger(10));
ManagedCursor cursor = ledger.openCursor("tarc1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
PositionImpl lastPosition = (PositionImpl) ledger.addEntry("dummy-entry-4".getBytes(Encoding));
final AtomicBoolean moveStatus = new AtomicBoolean(false);
CountDownLatch countDownLatch = new CountDownLatch(1);
PositionImpl resetPosition = new PositionImpl(lastPosition.getLedgerId(), lastPosition.getEntryId() - 2);
cursor.asyncResetCursor(resetPosition, new AsyncCallbacks.ResetCursorCallback() {
@Override
public void resetComplete(Object ctx) {
moveStatus.set(true);
countDownLatch.countDown();
}
@Override
public void resetFailed(ManagedLedgerException exception, Object ctx) {
moveStatus.set(false);
countDownLatch.countDown();
}
});
countDownLatch.await();
assertTrue(moveStatus.get());
assertEquals(resetPosition, cursor.getReadPosition());
cursor.close();
ledger.close();
}
@Test(timeOut = 20000)
void testConcurrentResetCursor() throws Exception {
ManagedLedger ledger = factory.open("my_test_concurrent_move_ledger");
final int Messages = 100;
final int Consumers = 5;
List<Future<AtomicBoolean>> futures = Lists.newArrayList();
@Cleanup("shutdownNow")
ExecutorService executor = Executors.newCachedThreadPool();
final CyclicBarrier barrier = new CyclicBarrier(Consumers + 1);
for (int i = 0; i < Messages; i++) {
ledger.addEntry("test".getBytes());
}
final PositionImpl lastPosition = (PositionImpl) ledger.addEntry("dummy-entry-4".getBytes(Encoding));
for (int i = 0; i < Consumers; i++) {
final ManagedCursor cursor = ledger.openCursor("tcrc" + i);
final int idx = i;
futures.add(executor.submit(new Callable<AtomicBoolean>() {
@Override
public AtomicBoolean call() throws Exception {
barrier.await();
final AtomicBoolean moveStatus = new AtomicBoolean(false);
CountDownLatch countDownLatch = new CountDownLatch(1);
final PositionImpl resetPosition = new PositionImpl(lastPosition.getLedgerId(),
lastPosition.getEntryId() - (5 * idx));
cursor.asyncResetCursor(resetPosition, new AsyncCallbacks.ResetCursorCallback() {
@Override
public void resetComplete(Object ctx) {
moveStatus.set(true);
PositionImpl pos = (PositionImpl) ctx;
log.info("move to [{}] completed for consumer [{}]", pos.toString(), idx);
countDownLatch.countDown();
}
@Override
public void resetFailed(ManagedLedgerException exception, Object ctx) {
moveStatus.set(false);
PositionImpl pos = (PositionImpl) ctx;
log.warn("move to [{}] failed for consumer [{}]", pos.toString(), idx);
countDownLatch.countDown();
}
});
countDownLatch.await();
assertEquals(resetPosition, cursor.getReadPosition());
cursor.close();
return moveStatus;
}
}));
}
barrier.await();
for (Future<AtomicBoolean> f : futures) {
assertTrue(f.get().get());
}
ledger.close();
}
@Test(timeOut = 20000)
void seekPosition() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(10));
ManagedCursor cursor = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
PositionImpl lastPosition = (PositionImpl) ledger.addEntry("dummy-entry-4".getBytes(Encoding));
cursor.seek(new PositionImpl(lastPosition.getLedgerId(), lastPosition.getEntryId() - 1));
}
@Test(timeOut = 20000)
void seekPosition2() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(2));
ManagedCursor cursor = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
PositionImpl seekPosition = (PositionImpl) ledger.addEntry("dummy-entry-3".getBytes(Encoding));
ledger.addEntry("dummy-entry-4".getBytes(Encoding));
ledger.addEntry("dummy-entry-5".getBytes(Encoding));
ledger.addEntry("dummy-entry-6".getBytes(Encoding));
cursor.seek(new PositionImpl(seekPosition.getLedgerId(), seekPosition.getEntryId()));
}
@Test(timeOut = 20000)
void seekPosition3() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(2));
ManagedCursor cursor = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
PositionImpl seekPosition = (PositionImpl) ledger.addEntry("dummy-entry-4".getBytes(Encoding));
Position entry5 = ledger.addEntry("dummy-entry-5".getBytes(Encoding));
Position entry6 = ledger.addEntry("dummy-entry-6".getBytes(Encoding));
cursor.seek(new PositionImpl(seekPosition.getLedgerId(), seekPosition.getEntryId()));
assertEquals(cursor.getReadPosition(), seekPosition);
List<Entry> entries = cursor.readEntries(1);
assertEquals(entries.size(), 1);
assertEquals(new String(entries.get(0).getData(), Encoding), "dummy-entry-4");
entries.forEach(e -> e.release());
cursor.seek(entry5.getNext());
assertEquals(cursor.getReadPosition(), entry6);
entries = cursor.readEntries(1);
assertEquals(entries.size(), 1);
assertEquals(new String(entries.get(0).getData(), Encoding), "dummy-entry-6");
entries.forEach(e -> e.release());
}
@Test(timeOut = 20000)
void seekPosition4() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor cursor = ledger.openCursor("c1");
Position p1 = ledger.addEntry("dummy-entry-1".getBytes(Encoding));
Position p2 = ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
ledger.addEntry("dummy-entry-4".getBytes(Encoding));
ledger.addEntry("dummy-entry-5".getBytes(Encoding));
ledger.addEntry("dummy-entry-6".getBytes(Encoding));
cursor.markDelete(p1);
assertEquals(cursor.getMarkDeletedPosition(), p1);
assertEquals(cursor.getReadPosition(), p2);
List<Entry> entries = cursor.readEntries(2);
entries.forEach(e -> e.release());
cursor.seek(p2);
assertEquals(cursor.getMarkDeletedPosition(), p1);
assertEquals(cursor.getReadPosition(), p2);
}
@Test(timeOut = 20000)
void rewind() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(2));
ManagedCursor c1 = ledger.openCursor("c1");
Position p1 = ledger.addEntry("dummy-entry-1".getBytes(Encoding));
Position p2 = ledger.addEntry("dummy-entry-2".getBytes(Encoding));
Position p3 = ledger.addEntry("dummy-entry-3".getBytes(Encoding));
Position p4 = ledger.addEntry("dummy-entry-4".getBytes(Encoding));
log.debug("p1: {}", p1);
log.debug("p2: {}", p2);
log.debug("p3: {}", p3);
log.debug("p4: {}", p4);
assertEquals(c1.getNumberOfEntries(), 4);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 4);
c1.markDelete(p1);
assertEquals(c1.getNumberOfEntries(), 3);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 3);
List<Entry> entries = c1.readEntries(10);
assertEquals(entries.size(), 3);
entries.forEach(e -> e.release());
assertEquals(c1.getNumberOfEntries(), 0);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 3);
c1.rewind();
assertEquals(c1.getNumberOfEntries(), 3);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 3);
c1.markDelete(p2);
assertEquals(c1.getNumberOfEntries(), 2);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 2);
entries = c1.readEntries(10);
assertEquals(entries.size(), 2);
entries.forEach(e -> e.release());
assertEquals(c1.getNumberOfEntries(), 0);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 2);
c1.rewind();
assertEquals(c1.getNumberOfEntries(), 2);
c1.markDelete(p4);
assertEquals(c1.getNumberOfEntries(), 0);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 0);
c1.rewind();
assertEquals(c1.getNumberOfEntries(), 0);
ledger.addEntry("dummy-entry-5".getBytes(Encoding));
assertEquals(c1.getNumberOfEntries(), 1);
ledger.addEntry("dummy-entry-6".getBytes(Encoding));
assertEquals(c1.getNumberOfEntries(), 2);
}
@Test(timeOut = 20000)
void markDeleteSkippingMessage() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(10));
ManagedCursor cursor = ledger.openCursor("c1");
Position p1 = ledger.addEntry("dummy-entry-1".getBytes(Encoding));
Position p2 = ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
PositionImpl p4 = (PositionImpl) ledger.addEntry("dummy-entry-4".getBytes(Encoding));
assertEquals(cursor.getNumberOfEntries(), 4);
cursor.markDelete(p1);
assertTrue(cursor.hasMoreEntries());
assertEquals(cursor.getNumberOfEntries(), 3);
assertEquals(cursor.getReadPosition(), p2);
List<Entry> entries = cursor.readEntries(1);
assertEquals(entries.size(), 1);
assertEquals(new String(entries.get(0).getData(), Encoding), "dummy-entry-2");
entries.forEach(e -> e.release());
cursor.markDelete(p4);
assertFalse(cursor.hasMoreEntries());
assertEquals(cursor.getNumberOfEntries(), 0);
assertEquals(cursor.getReadPosition(), new PositionImpl(p4.getLedgerId(), p4.getEntryId() + 1));
}
@Test(timeOut = 20000)
void removingCursor() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(1));
ManagedCursor cursor = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
ledger.addEntry("dummy-entry-4".getBytes(Encoding));
ledger.addEntry("dummy-entry-5".getBytes(Encoding));
ledger.addEntry("dummy-entry-6".getBytes(Encoding));
assertEquals(cursor.getNumberOfEntries(), 6);
assertEquals(ledger.getNumberOfEntries(), 6);
ledger.deleteCursor("c1");
// Verify that it's a new empty cursor
cursor = ledger.openCursor("c1");
assertEquals(cursor.getNumberOfEntries(), 0);
ledger.addEntry("dummy-entry-7".getBytes(Encoding));
// Verify that GC trimming kicks in
while (ledger.getNumberOfEntries() > 2) {
Thread.sleep(10);
}
}
@Test(timeOut = 20000)
void cursorPersistence() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor c1 = ledger.openCursor("c1");
ManagedCursor c2 = ledger.openCursor("c2");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
ledger.addEntry("dummy-entry-4".getBytes(Encoding));
ledger.addEntry("dummy-entry-5".getBytes(Encoding));
ledger.addEntry("dummy-entry-6".getBytes(Encoding));
List<Entry> entries = c1.readEntries(3);
Position p1 = entries.get(2).getPosition();
c1.markDelete(p1);
entries.forEach(e -> e.release());
entries = c1.readEntries(4);
Position p2 = entries.get(2).getPosition();
c2.markDelete(p2);
entries.forEach(e -> e.release());
// Reopen
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = factory2.open("my_test_ledger");
c1 = ledger.openCursor("c1");
c2 = ledger.openCursor("c2");
assertEquals(c1.getMarkDeletedPosition(), p1);
assertEquals(c2.getMarkDeletedPosition(), p2);
factory2.shutdown();
}
@Test(timeOut = 20000)
void cursorPersistence2() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger",
new ManagedLedgerConfig().setMetadataMaxEntriesPerLedger(1));
ManagedCursor c1 = ledger.openCursor("c1");
ManagedCursor c2 = ledger.openCursor("c2");
ManagedCursor c3 = ledger.openCursor("c3");
Position p0 = c3.getMarkDeletedPosition();
Position p1 = ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ManagedCursor c4 = ledger.openCursor("c4");
Position p2 = ledger.addEntry("dummy-entry-2".getBytes(Encoding));
Position p3 = ledger.addEntry("dummy-entry-3".getBytes(Encoding));
Position p4 = ledger.addEntry("dummy-entry-4".getBytes(Encoding));
Position p5 = ledger.addEntry("dummy-entry-5".getBytes(Encoding));
ledger.addEntry("dummy-entry-6".getBytes(Encoding));
c1.markDelete(p1);
c1.markDelete(p2);
c1.markDelete(p3);
c1.markDelete(p4);
c1.markDelete(p5);
c2.markDelete(p1);
// Reopen
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = factory.open("my_test_ledger");
c1 = ledger.openCursor("c1");
c2 = ledger.openCursor("c2");
c3 = ledger.openCursor("c3");
c4 = ledger.openCursor("c4");
assertEquals(c1.getMarkDeletedPosition(), p5);
assertEquals(c2.getMarkDeletedPosition(), p1);
assertEquals(c3.getMarkDeletedPosition(), p0);
assertEquals(c4.getMarkDeletedPosition(), p1);
factory2.shutdown();
}
@Test(timeOut = 20000)
public void asyncMarkDeleteBlocking() throws Exception {
ManagedLedgerConfig config = new ManagedLedgerConfig();
config.setMaxEntriesPerLedger(10);
config.setMetadataMaxEntriesPerLedger(5);
ManagedLedger ledger = factory.open("my_test_ledger", config);
final ManagedCursor c1 = ledger.openCursor("c1");
final AtomicReference<Position> lastPosition = new AtomicReference<Position>();
final int N = 100;
final CountDownLatch latch = new CountDownLatch(N);
for (int i = 0; i < N; i++) {
ledger.asyncAddEntry("entry".getBytes(Encoding), new AddEntryCallback() {
@Override
public void addFailed(ManagedLedgerException exception, Object ctx) {
}
@Override
public void addComplete(Position position, ByteBuf entryData, Object ctx) {
lastPosition.set(position);
c1.asyncMarkDelete(position, new MarkDeleteCallback() {
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
}
@Override
public void markDeleteComplete(Object ctx) {
latch.countDown();
}
}, null);
}
}, null);
}
latch.await();
assertEquals(c1.getNumberOfEntries(), 0);
// Reopen
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = factory2.open("my_test_ledger");
ManagedCursor c2 = ledger.openCursor("c1");
assertEquals(c2.getMarkDeletedPosition(), lastPosition.get());
factory2.shutdown();
}
@Test(timeOut = 20000)
void cursorPersistenceAsyncMarkDeleteSameThread() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger",
new ManagedLedgerConfig().setMetadataMaxEntriesPerLedger(5));
final ManagedCursor c1 = ledger.openCursor("c1");
final int N = 100;
List<Position> positions = Lists.newArrayList();
for (int i = 0; i < N; i++) {
Position p = ledger.addEntry("dummy-entry".getBytes(Encoding));
positions.add(p);
}
Position lastPosition = positions.get(N - 1);
final CountDownLatch latch = new CountDownLatch(N);
for (final Position p : positions) {
c1.asyncMarkDelete(p, new MarkDeleteCallback() {
@Override
public void markDeleteComplete(Object ctx) {
latch.countDown();
}
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
log.error("Failed to markdelete", exception);
latch.countDown();
}
}, null);
}
latch.await();
// Reopen
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = factory2.open("my_test_ledger");
ManagedCursor c2 = ledger.openCursor("c1");
assertEquals(c2.getMarkDeletedPosition(), lastPosition);
factory2.shutdown();
}
@Test(timeOut = 20000)
void unorderedMarkDelete() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
final ManagedCursor c1 = ledger.openCursor("c1");
Position p1 = ledger.addEntry("entry-1".getBytes(Encoding));
Position p2 = ledger.addEntry("entry-2".getBytes(Encoding));
c1.markDelete(p2);
try {
c1.markDelete(p1);
fail("Should have thrown exception");
} catch (ManagedLedgerException e) {
// ok
}
assertEquals(c1.getMarkDeletedPosition(), p2);
}
@Test(timeOut = 20000)
void unorderedAsyncMarkDelete() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
final ManagedCursor c1 = ledger.openCursor("c1");
Position p1 = ledger.addEntry("entry-1".getBytes(Encoding));
Position p2 = ledger.addEntry("entry-2".getBytes(Encoding));
final CountDownLatch latch = new CountDownLatch(2);
c1.asyncMarkDelete(p2, new MarkDeleteCallback() {
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
fail();
}
@Override
public void markDeleteComplete(Object ctx) {
latch.countDown();
}
}, null);
c1.asyncMarkDelete(p1, new MarkDeleteCallback() {
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
latch.countDown();
}
@Override
public void markDeleteComplete(Object ctx) {
fail();
}
}, null);
latch.await();
assertEquals(c1.getMarkDeletedPosition(), p2);
}
@Test(timeOut = 20000)
void deleteCursor() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor c1 = ledger.openCursor("c1");
ledger.addEntry("entry-1".getBytes(Encoding));
Position p2 = ledger.addEntry("entry-2".getBytes(Encoding));
assertEquals(c1.getNumberOfEntries(), 2);
// Remove and recreate the same cursor
ledger.deleteCursor("c1");
try {
c1.readEntries(10);
fail("must fail, the cursor should be closed");
} catch (ManagedLedgerException e) {
// ok
}
try {
c1.markDelete(p2);
fail("must fail, the cursor should be closed");
} catch (ManagedLedgerException e) {
// ok
}
c1 = ledger.openCursor("c1");
assertEquals(c1.getNumberOfEntries(), 0);
c1.close();
try {
c1.readEntries(10);
fail("must fail, the cursor should be closed");
} catch (ManagedLedgerException e) {
// ok
}
c1.close();
}
@Test(timeOut = 20000)
void errorCreatingCursor() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
bkc.failAfter(1, BKException.Code.NotEnoughBookiesException);
zkc.failConditional(Code.SESSIONEXPIRED, (op, path) -> {
return path.equals("/managed-ledgers/my_test_ledger/c1")
&& op == MockZooKeeper.Op.CREATE;
});
try {
ledger.openCursor("c1");
fail("should have failed");
} catch (ManagedLedgerException e) {
// ok
}
}
@Test
void failDuringRecoveryWithEmptyLedger() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursor cursor = ledger.openCursor("cursor");
ledger.addEntry("entry-1".getBytes());
Position p2 = ledger.addEntry("entry-2".getBytes());
Position p3 = ledger.addEntry("entry-3".getBytes());
cursor.markDelete(p2);
// Do graceful close so snapshot is forced
ledger.close();
// Re-open
ledger = factory.open("my_test_ledger");
cursor = ledger.openCursor("cursor");
cursor.markDelete(p3);
// Force-reopen so the recovery will be forced to read from ledger
bkc.returnEmptyLedgerAfter(1);
ManagedLedgerFactoryConfig conf = new ManagedLedgerFactoryConfig();
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, zkc, conf);
ledger = factory2.open("my_test_ledger");
cursor = ledger.openCursor("cursor");
// Cursor was rolled back to p2 because of the ledger recovery failure
assertEquals(cursor.getMarkDeletedPosition(), p2);
factory2.shutdown();
}
@Test(timeOut = 20000)
void errorRecoveringCursor() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
Position p1 = ledger.addEntry("entry".getBytes());
ledger.addEntry("entry".getBytes());
ManagedCursor c1 = ledger.openCursor("c1");
Position p3 = ledger.addEntry("entry".getBytes());
assertEquals(c1.getReadPosition(), p3);
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
bkc.failAfter(3, BKException.Code.LedgerRecoveryException);
ledger = factory2.open("my_test_ledger");
c1 = ledger.openCursor("c1");
// Verify the ManagedCursor was rewind back to the snapshotted position
assertEquals(c1.getReadPosition(), p3);
factory2.shutdown();
}
@Test(timeOut = 20000)
void errorRecoveringCursor2() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ledger.openCursor("c1");
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
bkc.failAfter(4, BKException.Code.MetadataVersionException);
try {
ledger = factory2.open("my_test_ledger");
fail("should have failed");
} catch (ManagedLedgerException e) {
// ok
}
factory2.shutdown();
}
@Test(timeOut = 20000)
void errorRecoveringCursor3() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
Position p1 = ledger.addEntry("entry".getBytes());
ledger.addEntry("entry".getBytes());
ManagedCursor c1 = ledger.openCursor("c1");
Position p3 = ledger.addEntry("entry".getBytes());
assertEquals(c1.getReadPosition(), p3);
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
bkc.failAfter(4, BKException.Code.ReadException);
ledger = factory2.open("my_test_ledger");
c1 = ledger.openCursor("c1");
// Verify the ManagedCursor was rewind back to the snapshotted position
assertEquals(c1.getReadPosition(), p3);
factory2.shutdown();
}
@Test(timeOut = 20000)
void testSingleDelete() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(3));
ManagedCursor cursor = ledger.openCursor("c1");
Position p1 = ledger.addEntry("entry1".getBytes());
Position p2 = ledger.addEntry("entry2".getBytes());
Position p3 = ledger.addEntry("entry3".getBytes());
Position p4 = ledger.addEntry("entry4".getBytes());
Position p5 = ledger.addEntry("entry5".getBytes());
Position p6 = ledger.addEntry("entry6".getBytes());
Position p0 = cursor.getMarkDeletedPosition();
cursor.delete(p4);
assertEquals(cursor.getMarkDeletedPosition(), p0);
cursor.delete(p1);
assertEquals(cursor.getMarkDeletedPosition(), p1);
cursor.delete(p3);
// Delete will silently succeed
cursor.delete(p3);
assertEquals(cursor.getMarkDeletedPosition(), p1);
cursor.delete(p2);
assertEquals(cursor.getMarkDeletedPosition(), p4);
cursor.delete(p5);
assertEquals(cursor.getMarkDeletedPosition(), p5);
cursor.close();
try {
cursor.delete(p6);
} catch (ManagedLedgerException e) {
// Ok
}
}
@Test(timeOut = 20000)
void testFilteringReadEntries() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(3));
ManagedCursor cursor = ledger.openCursor("c1");
/* Position p1 = */ledger.addEntry("entry1".getBytes());
/* Position p2 = */ledger.addEntry("entry2".getBytes());
/* Position p3 = */ledger.addEntry("entry3".getBytes());
/* Position p4 = */ledger.addEntry("entry4".getBytes());
Position p5 = ledger.addEntry("entry5".getBytes());
/* Position p6 = */ledger.addEntry("entry6".getBytes());
assertEquals(cursor.getNumberOfEntries(), 6);
assertEquals(cursor.getNumberOfEntriesInBacklog(false), 6);
List<Entry> entries = cursor.readEntries(3);
assertEquals(entries.size(), 3);
entries.forEach(e -> e.release());
assertEquals(cursor.getNumberOfEntries(), 3);
assertEquals(cursor.getNumberOfEntriesInBacklog(false), 6);
log.info("Deleting {}", p5);
cursor.delete(p5);
assertEquals(cursor.getNumberOfEntries(), 2);
assertEquals(cursor.getNumberOfEntriesInBacklog(false), 5);
entries = cursor.readEntries(3);
assertEquals(entries.size(), 2);
entries.forEach(e -> e.release());
assertEquals(cursor.getNumberOfEntries(), 0);
assertEquals(cursor.getNumberOfEntriesInBacklog(false), 5);
}
@Test(timeOut = 20000)
void testReadingAllFilteredEntries() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(3));
ledger.openCursor("c1");
ManagedCursor c2 = ledger.openCursor("c2");
ledger.addEntry("entry1".getBytes());
Position p2 = ledger.addEntry("entry2".getBytes());
Position p3 = ledger.addEntry("entry3".getBytes());
Position p4 = ledger.addEntry("entry4".getBytes());
Position p5 = ledger.addEntry("entry5".getBytes());
c2.readEntries(1).get(0).release();
c2.delete(p2);
c2.delete(p3);
List<Entry> entries = c2.readEntries(2);
assertEquals(entries.size(), 2);
assertEquals(entries.get(0).getPosition(), p4);
assertEquals(entries.get(1).getPosition(), p5);
entries.forEach(e -> e.release());
}
@Test(timeOut = 20000)
void testCountingWithDeletedEntries() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(2));
ManagedCursor cursor = ledger.openCursor("c1");
Position p1 = ledger.addEntry("entry1".getBytes());
/* Position p2 = */ledger.addEntry("entry2".getBytes());
/* Position p3 = */ledger.addEntry("entry3".getBytes());
/* Position p4 = */ledger.addEntry("entry4".getBytes());
Position p5 = ledger.addEntry("entry5".getBytes());
Position p6 = ledger.addEntry("entry6".getBytes());
Position p7 = ledger.addEntry("entry7".getBytes());
Position p8 = ledger.addEntry("entry8".getBytes());
assertEquals(cursor.getNumberOfEntries(), 8);
assertEquals(cursor.getNumberOfEntriesInBacklog(false), 8);
cursor.delete(p8);
assertEquals(cursor.getNumberOfEntries(), 7);
assertEquals(cursor.getNumberOfEntriesInBacklog(false), 7);
cursor.delete(p1);
assertEquals(cursor.getNumberOfEntries(), 6);
assertEquals(cursor.getNumberOfEntriesInBacklog(false), 6);
cursor.delete(p7);
cursor.delete(p6);
cursor.delete(p5);
assertEquals(cursor.getNumberOfEntries(), 3);
assertEquals(cursor.getNumberOfEntriesInBacklog(false), 3);
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testMarkDeleteTwice(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig()
.setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet).setMaxEntriesPerLedger(2));
ManagedCursor cursor = ledger.openCursor("c1");
Position p1 = ledger.addEntry("entry1".getBytes());
cursor.markDelete(p1);
cursor.markDelete(p1);
assertEquals(cursor.getMarkDeletedPosition(), p1);
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testSkipEntries(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig()
.setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet).setMaxEntriesPerLedger(2));
Position pos;
ManagedCursor c1 = ledger.openCursor("c1");
// test skip on empty ledger
pos = c1.getReadPosition();
c1.skipEntries(1, IndividualDeletedEntries.Exclude);
assertEquals(c1.getReadPosition(), pos);
pos = ledger.addEntry("dummy-entry-1".getBytes(Encoding));
pos = ledger.addEntry("dummy-entry-2".getBytes(Encoding));
// skip entries in same ledger
c1.skipEntries(1, IndividualDeletedEntries.Exclude);
assertEquals(c1.getNumberOfEntries(), 1);
// skip entries until end of ledger
c1.skipEntries(1, IndividualDeletedEntries.Exclude);
assertEquals(c1.getNumberOfEntries(), 0);
assertEquals(c1.getReadPosition(), pos.getNext());
assertEquals(c1.getMarkDeletedPosition(), pos);
// skip entries across ledgers
for (int i = 0; i < 6; i++) {
pos = ledger.addEntry("dummy-entry".getBytes(Encoding));
}
c1.skipEntries(5, IndividualDeletedEntries.Exclude);
assertEquals(c1.getNumberOfEntries(), 1);
// skip more than the current set of entries
c1.skipEntries(10, IndividualDeletedEntries.Exclude);
assertEquals(c1.getNumberOfEntries(), 0);
assertFalse(c1.hasMoreEntries());
assertEquals(c1.getReadPosition(), pos.getNext());
assertEquals(c1.getMarkDeletedPosition(), pos);
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testSkipEntriesWithIndividualDeletedMessages(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("testSkipEntriesWithIndividualDeletedMessages", new ManagedLedgerConfig()
.setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet).setMaxEntriesPerLedger(5));
ManagedCursor c1 = ledger.openCursor("c1");
Position pos1 = ledger.addEntry("dummy-entry-1".getBytes(Encoding));
Position pos2 = ledger.addEntry("dummy-entry-2".getBytes(Encoding));
Position pos3 = ledger.addEntry("dummy-entry-3".getBytes(Encoding));
Position pos4 = ledger.addEntry("dummy-entry-4".getBytes(Encoding));
Position pos5 = ledger.addEntry("dummy-entry-5".getBytes(Encoding));
// delete individual messages
c1.delete(pos2);
c1.delete(pos4);
c1.skipEntries(3, IndividualDeletedEntries.Exclude);
assertEquals(c1.getNumberOfEntries(), 0);
assertEquals(c1.getReadPosition(), pos5.getNext());
assertEquals(c1.getMarkDeletedPosition(), pos5);
pos1 = ledger.addEntry("dummy-entry-1".getBytes(Encoding));
pos2 = ledger.addEntry("dummy-entry-2".getBytes(Encoding));
pos3 = ledger.addEntry("dummy-entry-3".getBytes(Encoding));
pos4 = ledger.addEntry("dummy-entry-4".getBytes(Encoding));
pos5 = ledger.addEntry("dummy-entry-5".getBytes(Encoding));
c1.delete(pos2);
c1.delete(pos4);
c1.skipEntries(4, IndividualDeletedEntries.Include);
assertEquals(c1.getNumberOfEntries(), 1);
assertEquals(c1.getReadPosition(), pos5);
assertEquals(c1.getMarkDeletedPosition(), pos4);
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testClearBacklog(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig()
.setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet).setMaxEntriesPerLedger(1));
ManagedCursor c1 = ledger.openCursor("c1");
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ManagedCursor c2 = ledger.openCursor("c2");
ledger.addEntry("dummy-entry-2".getBytes(Encoding));
ManagedCursor c3 = ledger.openCursor("c3");
ledger.addEntry("dummy-entry-3".getBytes(Encoding));
assertEquals(c1.getNumberOfEntriesInBacklog(false), 3);
assertEquals(c1.getNumberOfEntries(), 3);
assertTrue(c1.hasMoreEntries());
c1.clearBacklog();
c3.clearBacklog();
assertEquals(c1.getNumberOfEntriesInBacklog(false), 0);
assertEquals(c1.getNumberOfEntries(), 0);
assertFalse(c1.hasMoreEntries());
assertEquals(c2.getNumberOfEntriesInBacklog(false), 2);
assertEquals(c2.getNumberOfEntries(), 2);
assertTrue(c2.hasMoreEntries());
assertEquals(c3.getNumberOfEntriesInBacklog(false), 0);
assertEquals(c3.getNumberOfEntries(), 0);
assertFalse(c3.hasMoreEntries());
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = factory2.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(1));
c1 = ledger.openCursor("c1");
c2 = ledger.openCursor("c2");
c3 = ledger.openCursor("c3");
assertEquals(c1.getNumberOfEntriesInBacklog(false), 0);
assertEquals(c1.getNumberOfEntries(), 0);
assertFalse(c1.hasMoreEntries());
assertEquals(c2.getNumberOfEntriesInBacklog(false), 2);
assertEquals(c2.getNumberOfEntries(), 2);
assertTrue(c2.hasMoreEntries());
assertEquals(c3.getNumberOfEntriesInBacklog(false), 0);
assertEquals(c3.getNumberOfEntries(), 0);
assertFalse(c3.hasMoreEntries());
factory2.shutdown();
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testRateLimitMarkDelete(boolean useOpenRangeSet) throws Exception {
ManagedLedgerConfig config = new ManagedLedgerConfig();
config.setThrottleMarkDelete(1).setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet); // Throttle to 1/s
ManagedLedger ledger = factory.open("my_test_ledger", config);
ManagedCursor c1 = ledger.openCursor("c1");
Position p1 = ledger.addEntry("dummy-entry-1".getBytes(Encoding));
Position p2 = ledger.addEntry("dummy-entry-2".getBytes(Encoding));
Position p3 = ledger.addEntry("dummy-entry-3".getBytes(Encoding));
assertEquals(c1.getNumberOfEntriesInBacklog(false), 3);
c1.markDelete(p1);
c1.markDelete(p2);
c1.markDelete(p3);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 0);
// Re-open to recover from storage
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = factory2.open("my_test_ledger", new ManagedLedgerConfig());
c1 = ledger.openCursor("c1");
// Only the 1st mark-delete was persisted
assertEquals(c1.getNumberOfEntriesInBacklog(false), 2);
factory2.shutdown();
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void deleteSingleMessageTwice(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger",
new ManagedLedgerConfig().setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet));
ManagedCursor c1 = ledger.openCursor("c1");
Position p1 = ledger.addEntry("entry-1".getBytes(Encoding));
Position p2 = ledger.addEntry("entry-2".getBytes(Encoding));
Position p3 = ledger.addEntry("entry-3".getBytes(Encoding));
Position p4 = ledger.addEntry("entry-4".getBytes(Encoding));
assertEquals(c1.getNumberOfEntriesInBacklog(false), 4);
assertEquals(c1.getNumberOfEntries(), 4);
c1.delete(p1);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 3);
assertEquals(c1.getNumberOfEntries(), 3);
assertEquals(c1.getMarkDeletedPosition(), p1);
assertEquals(c1.getReadPosition(), p2);
// Should have not effect since p1 is already deleted
c1.delete(p1);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 3);
assertEquals(c1.getNumberOfEntries(), 3);
assertEquals(c1.getMarkDeletedPosition(), p1);
assertEquals(c1.getReadPosition(), p2);
c1.delete(p2);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 2);
assertEquals(c1.getNumberOfEntries(), 2);
assertEquals(c1.getMarkDeletedPosition(), p2);
assertEquals(c1.getReadPosition(), p3);
// Should have not effect since p2 is already deleted
c1.delete(p2);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 2);
assertEquals(c1.getNumberOfEntries(), 2);
assertEquals(c1.getMarkDeletedPosition(), p2);
assertEquals(c1.getReadPosition(), p3);
c1.delete(p3);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 1);
assertEquals(c1.getNumberOfEntries(), 1);
assertEquals(c1.getMarkDeletedPosition(), p3);
assertEquals(c1.getReadPosition(), p4);
// Should have not effect since p3 is already deleted
c1.delete(p3);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 1);
assertEquals(c1.getNumberOfEntries(), 1);
assertEquals(c1.getMarkDeletedPosition(), p3);
assertEquals(c1.getReadPosition(), p4);
c1.delete(p4);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 0);
assertEquals(c1.getNumberOfEntries(), 0);
assertEquals(c1.getMarkDeletedPosition(), p4);
assertEquals(c1.getReadPosition(), p4.getNext());
// Should have not effect since p4 is already deleted
c1.delete(p4);
assertEquals(c1.getNumberOfEntriesInBacklog(false), 0);
assertEquals(c1.getNumberOfEntries(), 0);
assertEquals(c1.getMarkDeletedPosition(), p4);
assertEquals(c1.getReadPosition(), p4.getNext());
}
@Test(timeOut = 10000, dataProvider = "useOpenRangeSet")
void testReadEntriesOrWait(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger",
new ManagedLedgerConfig().setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet));
final int Consumers = 10;
final CountDownLatch counter = new CountDownLatch(Consumers);
for (int i = 0; i < Consumers; i++) {
ManagedCursor c = ledger.openCursor("c" + i);
c.asyncReadEntriesOrWait(1, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
assertEquals(entries.size(), 1);
entries.forEach(e -> e.release());
counter.countDown();
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
log.error("Error reading", exception);
}
}, null, PositionImpl.latest);
}
ledger.addEntry("test".getBytes());
counter.await();
}
@Test(timeOut = 20000)
void testReadEntriesOrWaitBlocking() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
final int Messages = 100;
final int Consumers = 10;
List<Future<Void>> futures = Lists.newArrayList();
@Cleanup("shutdownNow")
ExecutorService executor = Executors.newCachedThreadPool();
final CyclicBarrier barrier = new CyclicBarrier(Consumers + 1);
for (int i = 0; i < Consumers; i++) {
final ManagedCursor cursor = ledger.openCursor("c" + i);
futures.add(executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
barrier.await();
int toRead = Messages;
while (toRead > 0) {
List<Entry> entries = cursor.readEntriesOrWait(10);
assertTrue(entries.size() <= 10);
toRead -= entries.size();
entries.forEach(e -> e.release());
}
return null;
}
}));
}
barrier.await();
for (int i = 0; i < Messages; i++) {
ledger.addEntry("test".getBytes());
}
for (Future<Void> f : futures) {
f.get();
}
}
@Test(timeOut = 20000)
void testFindNewestMatching() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertNull(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))));
}
@Test(timeOut = 20000)
void testFindNewestMatchingOdd1() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
Position p1 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p1);
}
@Test(timeOut = 20000)
void testFindNewestMatchingOdd2() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
Position p2 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p2);
}
@Test(timeOut = 20000)
void testFindNewestMatchingOdd3() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position p3 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p3);
}
@Test(timeOut = 20000)
void testFindNewestMatchingOdd4() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position p4 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p4);
}
@Test(timeOut = 20000)
void testFindNewestMatchingOdd5() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position p5 = ledger.addEntry("expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p5);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEven1() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
Position p1 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p1);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEven2() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
Position p2 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p2);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEven3() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position p3 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p3);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEven4() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position p4 = ledger.addEntry("expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p4);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEdgeCase1() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
assertNull(c1.findNewestMatching(
entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))));
}
@Test(timeOut = 20000)
void testFindNewestMatchingEdgeCase2() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
Position p1 = ledger.addEntry("expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p1);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEdgeCase3() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
Position p1 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p1);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEdgeCase4() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
Position p1 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p1);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEdgeCase5() throws Exception {
ManagedLedger ledger = factory.open("testFindNewestMatchingEdgeCase5");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
Position p2 = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
p2);
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testFindNewestMatchingEdgeCase6(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("testFindNewestMatchingEdgeCase6", new ManagedLedgerConfig()
.setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet).setMaxEntriesPerLedger(3));
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position newPosition = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
List<Entry> entries = c1.readEntries(3);
c1.markDelete(entries.get(2).getPosition());
entries.forEach(e -> e.release());
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
newPosition);
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testFindNewestMatchingEdgeCase7(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("testFindNewestMatchingEdgeCase7",
new ManagedLedgerConfig().setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet));
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position lastPosition = ledger.addEntry("expired".getBytes(Encoding));
List<Entry> entries = c1.readEntries(4);
c1.markDelete(entries.get(0).getPosition());
c1.delete(entries.get(2).getPosition());
entries.forEach(e -> e.release());
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
lastPosition);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEdgeCase8() throws Exception {
ManagedLedger ledger = factory.open("testFindNewestMatchingEdgeCase8");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position lastPosition = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
List<Entry> entries = c1.readEntries(4);
c1.delete(entries.get(1).getPosition());
c1.delete(entries.get(2).getPosition());
entries.forEach(e -> e.release());
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
lastPosition);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEdgeCase9() throws Exception {
ManagedLedger ledger = factory.open("testFindNewestMatchingEdgeCase9");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position lastPosition = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
List<Entry> entries = c1.readEntries(5);
c1.delete(entries.get(1).getPosition());
c1.delete(entries.get(3).getPosition());
entries.forEach(e -> e.release());
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
lastPosition);
}
@Test(timeOut = 20000)
void testFindNewestMatchingEdgeCase10() throws Exception {
ManagedLedger ledger = factory.open("testFindNewestMatchingEdgeCase10");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("expired".getBytes(Encoding));
Position lastPosition = ledger.addEntry("expired".getBytes(Encoding));
ledger.addEntry("not-expired".getBytes(Encoding));
List<Entry> entries = c1.readEntries(7);
c1.delete(entries.get(1).getPosition());
c1.delete(entries.get(3).getPosition());
c1.delete(entries.get(6).getPosition());
entries.forEach(e -> e.release());
assertEquals(
c1.findNewestMatching(entry -> Arrays.equals(entry.getDataAndRelease(), "expired".getBytes(Encoding))),
lastPosition);
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testIndividuallyDeletedMessages(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("testIndividuallyDeletedMessages",
new ManagedLedgerConfig().setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet));
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("entry-0".getBytes(Encoding));
ledger.addEntry("entry-1".getBytes(Encoding));
ledger.addEntry("entry-2".getBytes(Encoding));
ledger.addEntry("entry-3".getBytes(Encoding));
ledger.addEntry("entry-4".getBytes(Encoding));
List<Entry> entries = c1.readEntries(4);
c1.delete(entries.get(1).getPosition());
c1.delete(entries.get(2).getPosition());
c1.markDelete(entries.get(3).getPosition());
entries.forEach(e -> e.release());
assertTrue(c1.isIndividuallyDeletedEntriesEmpty());
}
@Test(timeOut = 20000)
void testIndividuallyDeletedMessages1() throws Exception {
ManagedLedger ledger = factory.open("testIndividuallyDeletedMessages1");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("entry-0".getBytes(Encoding));
ledger.addEntry("entry-1".getBytes(Encoding));
ledger.addEntry("entry-2".getBytes(Encoding));
ledger.addEntry("entry-3".getBytes(Encoding));
ledger.addEntry("entry-4".getBytes(Encoding));
List<Entry> entries = c1.readEntries(4);
c1.delete(entries.get(1).getPosition());
c1.markDelete(entries.get(3).getPosition());
entries.forEach(e -> e.release());
assertTrue(c1.isIndividuallyDeletedEntriesEmpty());
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testIndividuallyDeletedMessages2(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("testIndividuallyDeletedMessages2",
new ManagedLedgerConfig().setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet));
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("entry-0".getBytes(Encoding));
ledger.addEntry("entry-1".getBytes(Encoding));
ledger.addEntry("entry-2".getBytes(Encoding));
ledger.addEntry("entry-3".getBytes(Encoding));
ledger.addEntry("entry-4".getBytes(Encoding));
List<Entry> entries = c1.readEntries(4);
c1.delete(entries.get(1).getPosition());
c1.delete(entries.get(2).getPosition());
c1.delete(entries.get(0).getPosition());
entries.forEach(e -> e.release());
assertTrue(c1.isIndividuallyDeletedEntriesEmpty());
}
@Test(timeOut = 20000, dataProvider = "useOpenRangeSet")
void testIndividuallyDeletedMessages3(boolean useOpenRangeSet) throws Exception {
ManagedLedger ledger = factory.open("testIndividuallyDeletedMessages3",
new ManagedLedgerConfig().setUnackedRangesOpenCacheSetEnabled(useOpenRangeSet));
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("entry-0".getBytes(Encoding));
ledger.addEntry("entry-1".getBytes(Encoding));
ledger.addEntry("entry-2".getBytes(Encoding));
ledger.addEntry("entry-3".getBytes(Encoding));
ledger.addEntry("entry-4".getBytes(Encoding));
List<Entry> entries = c1.readEntries(4);
c1.delete(entries.get(1).getPosition());
c1.delete(entries.get(2).getPosition());
c1.markDelete(entries.get(0).getPosition());
entries.forEach(e -> e.release());
assertTrue(c1.isIndividuallyDeletedEntriesEmpty());
}
@Test(timeOut = 20000)
void testFindNewestMatchingAfterLedgerRollover() throws Exception {
ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
ledger.addEntry("first-expired".getBytes(Encoding));
ledger.addEntry("second".getBytes(Encoding));
ledger.addEntry("third".getBytes(Encoding));
ledger.addEntry("fourth".getBytes(Encoding));
Position last = ledger.addEntry("last-expired".getBytes(Encoding));
// roll a new ledger
int numLedgersBefore = ledger.getLedgersInfo().size();
ledger.getConfig().setMaxEntriesPerLedger(1);
ledger.rollCurrentLedgerIfFull();
Awaitility.await().atMost(20, TimeUnit.SECONDS)
.until(() -> ledger.getLedgersInfo().size() > numLedgersBefore);
// the algorithm looks for "expired" messages
// starting from the first, then it moves to the last message
// if the condition evaluates to true on the last message
// then we are done
// there was a bug (https://github.com/apache/pulsar/issues/9082)
// in which if the last message was in a different ledger
// the jump from the first message to the last message went
// to an invalid position and so the search stopped at the first message
// we want to assert here that the algorithm returns the position of the
// last message
assertEquals(last,
c1.findNewestMatching(entry -> {
byte[] data = entry.getDataAndRelease();
return Arrays.equals(data, "first-expired".getBytes(Encoding))
|| Arrays.equals(data, "last-expired".getBytes(Encoding));
}));
}
public static byte[] getEntryPublishTime(String msg) throws Exception {
return Long.toString(System.currentTimeMillis()).getBytes();
}
public Position findPositionFromAllEntries(ManagedCursor c1, final long timestamp) throws Exception {
final CountDownLatch counter = new CountDownLatch(1);
class Result {
ManagedLedgerException exception = null;
Position position = null;
}
final Result result = new Result();
AsyncCallbacks.FindEntryCallback findEntryCallback = new AsyncCallbacks.FindEntryCallback() {
@Override
public void findEntryComplete(Position position, Object ctx) {
result.position = position;
counter.countDown();
}
@Override
public void findEntryFailed(ManagedLedgerException exception, Optional<Position> failedReadPosition,
Object ctx) {
result.exception = exception;
counter.countDown();
}
};
c1.asyncFindNewestMatching(ManagedCursor.FindPositionConstraint.SearchAllAvailableEntries, entry -> {
try {
long publishTime = Long.valueOf(new String(entry.getData()));
return publishTime <= timestamp;
} catch (Exception e) {
log.error("Error de-serializing message for message position find", e);
} finally {
entry.release();
}
return false;
}, findEntryCallback, ManagedCursorImpl.FindPositionConstraint.SearchAllAvailableEntries);
counter.await();
if (result.exception != null) {
throw result.exception;
}
return result.position;
}
void internalTestFindNewestMatchingAllEntries(final String name, final int entriesPerLedger,
final int expectedEntryId) throws Exception {
final String ledgerAndCursorName = name;
ManagedLedgerConfig config = new ManagedLedgerConfig();
config.setRetentionSizeInMB(10);
config.setMaxEntriesPerLedger(entriesPerLedger);
config.setRetentionTime(1, TimeUnit.HOURS);
ManagedLedger ledger = factory.open(ledgerAndCursorName, config);
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor(ledgerAndCursorName);
ledger.addEntry(getEntryPublishTime("retained1"));
// space apart message publish times
Thread.sleep(100);
ledger.addEntry(getEntryPublishTime("retained2"));
Thread.sleep(100);
ledger.addEntry(getEntryPublishTime("retained3"));
Thread.sleep(100);
Position newPosition = ledger.addEntry(getEntryPublishTime("expectedresetposition"));
long timestamp = System.currentTimeMillis();
long ledgerId = ((PositionImpl) newPosition).getLedgerId();
Thread.sleep(2);
ledger.addEntry(getEntryPublishTime("not-read"));
List<Entry> entries = c1.readEntries(3);
c1.markDelete(entries.get(2).getPosition());
c1.close();
ledger.close();
entries.forEach(e -> e.release());
// give timed ledger trimming a chance to run
Thread.sleep(100);
ledger = factory.open(ledgerAndCursorName, config);
c1 = (ManagedCursorImpl) ledger.openCursor(ledgerAndCursorName);
PositionImpl found = (PositionImpl) findPositionFromAllEntries(c1, timestamp);
assertEquals(found.getLedgerId(), ledgerId);
assertEquals(found.getEntryId(), expectedEntryId);
found = (PositionImpl) findPositionFromAllEntries(c1, 0);
assertNull(found);
}
@Test(timeOut = 20000)
void testFindNewestMatchingAllEntries() throws Exception {
final String ledgerAndCursorName = "testFindNewestMatchingAllEntries";
// condition below assumes entries per ledger is 2
// needs to be changed if entries per ledger is changed
int expectedEntryId = 1;
int entriesPerLedger = 2;
internalTestFindNewestMatchingAllEntries(ledgerAndCursorName, entriesPerLedger, expectedEntryId);
}
@Test(timeOut = 20000)
void testFindNewestMatchingAllEntries2() throws Exception {
final String ledgerAndCursorName = "testFindNewestMatchingAllEntries2";
// condition below assumes entries per ledger is 1
// needs to be changed if entries per ledger is changed
int expectedEntryId = 0;
int entriesPerLedger = 1;
internalTestFindNewestMatchingAllEntries(ledgerAndCursorName, entriesPerLedger, expectedEntryId);
}
@Test(timeOut = 20000)
void testFindNewestMatchingAllEntriesSingleLedger() throws Exception {
final String ledgerAndCursorName = "testFindNewestMatchingAllEntriesSingleLedger";
ManagedLedgerConfig config = new ManagedLedgerConfig();
// needs to be changed if entries per ledger is changed
int expectedEntryId = 3;
int entriesPerLedger = config.getMaxEntriesPerLedger();
internalTestFindNewestMatchingAllEntries(ledgerAndCursorName, entriesPerLedger, expectedEntryId);
}
@Test(timeOut = 20000)
void testReplayEntries() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
PositionImpl p1 = (PositionImpl) ledger.addEntry("entry1".getBytes(Encoding));
PositionImpl p2 = (PositionImpl) ledger.addEntry("entry2".getBytes(Encoding));
PositionImpl p3 = (PositionImpl) ledger.addEntry("entry3".getBytes(Encoding));
ledger.addEntry("entry4".getBytes(Encoding));
// 1. Replay empty position set should return empty entry set
Set<PositionImpl> positions = Sets.newHashSet();
assertTrue(c1.replayEntries(positions).isEmpty());
positions.add(p1);
positions.add(p3);
// 2. entries 1 and 3 should be returned, but they can be in any order
List<Entry> entries = c1.replayEntries(positions);
assertEquals(entries.size(), 2);
assertTrue((Arrays.equals(entries.get(0).getData(), "entry1".getBytes(Encoding))
&& Arrays.equals(entries.get(1).getData(), "entry3".getBytes(Encoding)))
|| (Arrays.equals(entries.get(0).getData(), "entry3".getBytes(Encoding))
&& Arrays.equals(entries.get(1).getData(), "entry1".getBytes(Encoding))));
entries.forEach(Entry::release);
// 3. Fail on reading non-existing position
PositionImpl invalidPosition = new PositionImpl(100, 100);
positions.add(invalidPosition);
try {
c1.replayEntries(positions);
fail("Should fail");
} catch (ManagedLedgerException e) {
// ok
}
positions.remove(invalidPosition);
// 4. Fail to attempt to read mark-deleted position (p1)
c1.markDelete(p2);
try {
// as mark-delete is at position: p2 it should read entry : p3
assertEquals(1, c1.replayEntries(positions).size());
} catch (ManagedLedgerException e) {
fail("Should have not failed");
}
}
@Test(timeOut = 20000)
void testGetLastIndividualDeletedRange() throws Exception {
ManagedLedger ledger = factory.open("test_last_individual_deleted");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
PositionImpl markDeletedPosition = (PositionImpl) c1.getMarkDeletedPosition();
for(int i = 0; i < 10; i++) {
ledger.addEntry(("entry" + i).getBytes(Encoding));
}
PositionImpl p1 = PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 1);
PositionImpl p2 = PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 2);
PositionImpl p3 = PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 5);
PositionImpl p4 = PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 6);
c1.delete(Lists.newArrayList(p1, p2, p3, p4));
assertEquals(c1.getLastIndividualDeletedRange(), Range.openClosed(PositionImpl.get(p3.getLedgerId(),
p3.getEntryId() - 1), p4));
PositionImpl p5 = PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 8);
c1.delete(p5);
assertEquals(c1.getLastIndividualDeletedRange(), Range.openClosed(PositionImpl.get(p5.getLedgerId(),
p5.getEntryId() - 1), p5));
}
@Test(timeOut = 20000)
void testTrimDeletedEntries() throws ManagedLedgerException, InterruptedException {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
PositionImpl markDeletedPosition = (PositionImpl) c1.getMarkDeletedPosition();
for(int i = 0; i < 10; i++) {
ledger.addEntry(("entry" + i).getBytes(Encoding));
}
PositionImpl p1 = PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 1);
PositionImpl p2 = PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 2);
PositionImpl p3 = PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 5);
PositionImpl p4 = PositionImpl.get(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 6);
c1.delete(Lists.newArrayList(p1, p2, p3, p4));
EntryImpl entry1 = EntryImpl.create(p1, ByteBufAllocator.DEFAULT.buffer(0));
EntryImpl entry2 = EntryImpl.create(p2, ByteBufAllocator.DEFAULT.buffer(0));
EntryImpl entry3 = EntryImpl.create(p3, ByteBufAllocator.DEFAULT.buffer(0));
EntryImpl entry4 = EntryImpl.create(p4, ByteBufAllocator.DEFAULT.buffer(0));
EntryImpl entry5 = EntryImpl.create(markDeletedPosition.getLedgerId() , markDeletedPosition.getEntryId() + 7,
ByteBufAllocator.DEFAULT.buffer(0));
List<Entry> entries = Lists.newArrayList(entry1, entry2, entry3, entry4, entry5);
c1.trimDeletedEntries(entries);
assertEquals(entries.size(), 1);
assertEquals(entries.get(0).getPosition(), PositionImpl.get(markDeletedPosition.getLedgerId() ,
markDeletedPosition.getEntryId() + 7));
}
@Test(timeOut = 20000)
void outOfOrderAcks() throws Exception {
ManagedLedger ledger = factory.open("outOfOrderAcks");
ManagedCursor c1 = ledger.openCursor("c1");
int N = 10;
List<Position> positions = new ArrayList<>();
for (int i = 0; i < N; i++) {
positions.add(ledger.addEntry("entry".getBytes()));
}
assertEquals(c1.getNumberOfEntriesInBacklog(false), N);
c1.delete(positions.get(3));
assertEquals(c1.getNumberOfEntriesInBacklog(false), N - 1);
c1.delete(positions.get(2));
assertEquals(c1.getNumberOfEntriesInBacklog(false), N - 2);
c1.delete(positions.get(1));
assertEquals(c1.getNumberOfEntriesInBacklog(false), N - 3);
c1.delete(positions.get(0));
assertEquals(c1.getNumberOfEntriesInBacklog(false), N - 4);
}
@Test(timeOut = 20000)
void randomOrderAcks() throws Exception {
ManagedLedger ledger = factory.open("outOfOrderAcks");
ManagedCursor c1 = ledger.openCursor("c1");
int N = 10;
List<Position> positions = new ArrayList<>();
for (int i = 0; i < N; i++) {
positions.add(ledger.addEntry("entry".getBytes()));
}
assertEquals(c1.getNumberOfEntriesInBacklog(false), N);
// Randomize the ack sequence
Collections.shuffle(positions);
int toDelete = N;
for (Position p : positions) {
assertEquals(c1.getNumberOfEntriesInBacklog(false), toDelete);
c1.delete(p);
--toDelete;
assertEquals(c1.getNumberOfEntriesInBacklog(false), toDelete);
}
}
@Test(timeOut = 20000)
void testGetEntryAfterN() throws Exception {
ManagedLedger ledger = factory.open("testGetEntryAfterN");
ManagedCursor c1 = ledger.openCursor("c1");
Position pos1 = ledger.addEntry("msg1".getBytes());
Position pos2 = ledger.addEntry("msg2".getBytes());
Position pos3 = ledger.addEntry("msg3".getBytes());
Position pos4 = ledger.addEntry("msg4".getBytes());
Position pos5 = ledger.addEntry("msg5".getBytes());
List<Entry> entries = c1.readEntries(4);
entries.forEach(e -> e.release());
long currentLedger = ((PositionImpl) c1.getMarkDeletedPosition()).getLedgerId();
// check if the first message is returned for '0'
Entry e = c1.getNthEntry(1, IndividualDeletedEntries.Exclude);
assertEquals(e.getDataAndRelease(), "msg1".getBytes());
// check that if we call get entry for the same position twice, it returns the same entry
e = c1.getNthEntry(1, IndividualDeletedEntries.Exclude);
assertEquals(e.getDataAndRelease(), "msg1".getBytes());
// check for a position 'n' after md position
e = c1.getNthEntry(3, IndividualDeletedEntries.Exclude);
assertEquals(e.getDataAndRelease(), "msg3".getBytes());
// check for the last position
e = c1.getNthEntry(5, IndividualDeletedEntries.Exclude);
assertEquals(e.getDataAndRelease(), "msg5".getBytes());
// check for a position outside the limits of the number of entries that exists, it should return null
e = c1.getNthEntry(10, IndividualDeletedEntries.Exclude);
assertNull(e);
// check that the mark delete and read positions have not been updated after all the previous operations
assertEquals(c1.getMarkDeletedPosition(), new PositionImpl(currentLedger, -1));
assertEquals(c1.getReadPosition(), new PositionImpl(currentLedger, 4));
c1.markDelete(pos4);
assertEquals(c1.getMarkDeletedPosition(), pos4);
e = c1.getNthEntry(1, IndividualDeletedEntries.Exclude);
assertEquals(e.getDataAndRelease(), "msg5".getBytes());
c1.readEntries(1);
c1.markDelete(pos5);
e = c1.getNthEntry(1, IndividualDeletedEntries.Exclude);
assertNull(e);
}
@Test(timeOut = 20000)
void testGetEntryAfterNWithIndividualDeletedMessages() throws Exception {
ManagedLedger ledger = factory.open("testGetEnteryAfterNWithIndividualDeletedMessages");
ManagedCursor c1 = ledger.openCursor("c1");
Position pos1 = ledger.addEntry("msg1".getBytes());
Position pos2 = ledger.addEntry("msg2".getBytes());
Position pos3 = ledger.addEntry("msg3".getBytes());
Position pos4 = ledger.addEntry("msg4".getBytes());
Position pos5 = ledger.addEntry("msg5".getBytes());
c1.delete(pos3);
c1.delete(pos4);
Entry e = c1.getNthEntry(3, IndividualDeletedEntries.Exclude);
assertEquals(e.getDataAndRelease(), "msg5".getBytes());
e = c1.getNthEntry(3, IndividualDeletedEntries.Include);
assertEquals(e.getDataAndRelease(), "msg3".getBytes());
}
@Test(timeOut = 20000)
void cancelReadOperation() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig().setMaxEntriesPerLedger(1));
ManagedCursor c1 = ledger.openCursor("c1");
// No read request so far
assertFalse(c1.cancelPendingReadRequest());
CountDownLatch counter = new CountDownLatch(1);
c1.asyncReadEntriesOrWait(1, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
counter.countDown();
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
counter.countDown();
}
}, null, PositionImpl.latest);
assertTrue(c1.cancelPendingReadRequest());
CountDownLatch counter2 = new CountDownLatch(1);
c1.asyncReadEntriesOrWait(1, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
counter2.countDown();
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
counter2.countDown();
}
}, null, PositionImpl.latest);
ledger.addEntry("entry-1".getBytes(Encoding));
Thread.sleep(100);
// Read operation should have already been completed
assertFalse(c1.cancelPendingReadRequest());
counter2.await();
}
@Test(timeOut = 20000)
public void testReopenMultipleTimes() throws Exception {
ManagedLedger ledger = factory.open("testReopenMultipleTimes");
ManagedCursor c1 = ledger.openCursor("c1");
Position mdPosition = c1.getMarkDeletedPosition();
c1.close();
ledger.close();
ledger = factory.open("testReopenMultipleTimes");
c1 = ledger.openCursor("c1");
// since the empty data ledger will be deleted, the cursor position should also be updated
assertNotEquals(c1.getMarkDeletedPosition(), mdPosition);
c1.close();
ledger.close();
ledger = factory.open("testReopenMultipleTimes");
c1 = ledger.openCursor("c1");
}
@Test(timeOut = 20000)
public void testOutOfOrderDeletePersistenceWithClose() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig());
ManagedCursor c1 = ledger.openCursor("c1");
List<Position> addedPositions = new ArrayList<>();
for (int i = 0; i < 20; i++) {
Position p = ledger.addEntry(("dummy-entry-" + i).getBytes(Encoding));
addedPositions.add(p);
}
// Acknowledge few messages leaving holes
c1.delete(addedPositions.get(2));
c1.delete(addedPositions.get(5));
c1.delete(addedPositions.get(7));
c1.delete(addedPositions.get(8));
c1.delete(addedPositions.get(9));
assertEquals(c1.getNumberOfEntriesInBacklog(false), 20 - 5);
ledger.close();
factory.shutdown();
// Re-Open
factory = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = factory.open("my_test_ledger", new ManagedLedgerConfig());
c1 = ledger.openCursor("c1");
assertEquals(c1.getNumberOfEntriesInBacklog(false), 20 - 5);
List<Entry> entries = c1.readEntries(20);
assertEquals(entries.size(), 20 - 5);
List<String> entriesStr = entries.stream().map(e -> new String(e.getDataAndRelease(), Encoding))
.collect(Collectors.toList());
assertEquals(entriesStr.get(0), "dummy-entry-0");
assertEquals(entriesStr.get(1), "dummy-entry-1");
// Entry-2 was deleted
assertEquals(entriesStr.get(2), "dummy-entry-3");
assertEquals(entriesStr.get(3), "dummy-entry-4");
// Entry-6 was deleted
assertEquals(entriesStr.get(4), "dummy-entry-6");
assertFalse(c1.hasMoreEntries());
}
@Test(timeOut = 20000)
public void testOutOfOrderDeletePersistenceAfterCrash() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig());
ManagedCursor c1 = ledger.openCursor("c1");
List<Position> addedPositions = new ArrayList<>();
for (int i = 0; i < 20; i++) {
Position p = ledger.addEntry(("dummy-entry-" + i).getBytes(Encoding));
addedPositions.add(p);
}
// Acknowledge few messages leaving holes
c1.delete(addedPositions.get(2));
c1.delete(addedPositions.get(5));
c1.delete(addedPositions.get(7));
c1.delete(addedPositions.get(8));
c1.delete(addedPositions.get(9));
assertEquals(c1.getNumberOfEntriesInBacklog(false), 20 - 5);
// Re-Open
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = factory2.open("my_test_ledger", new ManagedLedgerConfig());
c1 = ledger.openCursor("c1");
assertEquals(c1.getNumberOfEntriesInBacklog(false), 20 - 5);
List<Entry> entries = c1.readEntries(20);
assertEquals(entries.size(), 20 - 5);
List<String> entriesStr = entries.stream().map(e -> new String(e.getDataAndRelease(), Encoding))
.collect(Collectors.toList());
assertEquals(entriesStr.get(0), "dummy-entry-0");
assertEquals(entriesStr.get(1), "dummy-entry-1");
// Entry-2 was deleted
assertEquals(entriesStr.get(2), "dummy-entry-3");
assertEquals(entriesStr.get(3), "dummy-entry-4");
// Entry-6 was deleted
assertEquals(entriesStr.get(4), "dummy-entry-6");
assertFalse(c1.hasMoreEntries());
factory2.shutdown();
}
/**
* <pre>
* Verifies that {@link ManagedCursorImpl#createNewMetadataLedger()} cleans up orphan ledgers if fails to switch new
* ledger
* </pre>
* @throws Exception
*/
@Test(timeOut=5000)
public void testLeakFailedLedgerOfManageCursor() throws Exception {
ManagedLedgerConfig mlConfig = new ManagedLedgerConfig();
ManagedLedger ledger = factory.open("my_test_ledger", mlConfig);
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
CountDownLatch latch = new CountDownLatch(1);
c1.createNewMetadataLedger(new VoidCallback() {
@Override
public void operationComplete() {
latch.countDown();
}
@Override
public void operationFailed(ManagedLedgerException exception) {
latch.countDown();
}
});
// update cursor-info with data which makes bad-version for existing managed-cursor
CountDownLatch latch1 = new CountDownLatch(1);
String path = "/managed-ledgers/my_test_ledger/c1";
zkc.setData(path, "".getBytes(), -1, (rc, path1, ctx, stat) -> {
// updated path
latch1.countDown();
}, null);
latch1.await();
// try to create ledger again which will fail because managedCursorInfo znode is already updated with different
// version so, this call will fail with BadVersionException
CountDownLatch latch2 = new CountDownLatch(1);
// create ledger will create ledgerId = 6
long ledgerId = 6;
c1.createNewMetadataLedger(new VoidCallback() {
@Override
public void operationComplete() {
latch2.countDown();
}
@Override
public void operationFailed(ManagedLedgerException exception) {
latch2.countDown();
}
});
// Wait until operation is completed and the failed ledger should have been deleted
latch2.await();
try {
bkc.openLedgerNoRecovery(ledgerId, DigestType.fromApiDigestType(mlConfig.getDigestType()),
mlConfig.getPassword());
fail("ledger should have deleted due to update-cursor failure");
} catch (BKException e) {
// ok
}
}
/**
* Verifies cursor persists individually unack range into cursor-ledger if range count is higher than
* MaxUnackedRangesToPersistInZk
*
* @throws Exception
*/
@Test(timeOut = 20000)
public void testOutOfOrderDeletePersistenceIntoLedgerWithClose() throws Exception {
final int totalAddEntries = 100;
String ledgerName = "my_test_ledger";
String cursorName = "c1";
ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig();
// metaStore is allowed to store only up to 10 deleted entries range
managedLedgerConfig.setMaxUnackedRangesToPersistInZk(10);
ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open(ledgerName, managedLedgerConfig);
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor(cursorName);
List<Position> addedPositions = new ArrayList<>();
for (int i = 0; i < totalAddEntries; i++) {
Position p = ledger.addEntry(("dummy-entry-" + i).getBytes(Encoding));
addedPositions.add(p);
if (i % 2 == 0) {
// Acknowledge alternative message to create totalEntries/2 holes
c1.delete(addedPositions.get(i));
}
}
assertEquals(c1.getNumberOfEntriesInBacklog(false), totalAddEntries / 2);
// Close ledger to persist individual-deleted positions into cursor-ledger
ledger.close();
// verify cursor-ledgerId is updated properly into cursor-metaStore
CountDownLatch cursorLedgerLatch = new CountDownLatch(1);
AtomicLong cursorLedgerId = new AtomicLong(0);
ledger.getStore().asyncGetCursorInfo(ledger.getName(), cursorName, new MetaStoreCallback<ManagedCursorInfo>() {
@Override
public void operationComplete(ManagedCursorInfo result, Stat stat) {
cursorLedgerId.set(result.getCursorsLedgerId());
cursorLedgerLatch.countDown();
}
@Override
public void operationFailed(MetaStoreException e) {
cursorLedgerLatch.countDown();
}
});
cursorLedgerLatch.await();
assertEquals(cursorLedgerId.get(), c1.getCursorLedger());
// verify cursor-ledger's last entry has individual-deleted positions
final CountDownLatch latch = new CountDownLatch(1);
final AtomicInteger individualDeletedMessagesCount = new AtomicInteger(0);
bkc.asyncOpenLedger(c1.getCursorLedger(), DigestType.CRC32C, "".getBytes(), (rc, lh, ctx) -> {
if (rc == BKException.Code.OK) {
long lastEntry = lh.getLastAddConfirmed();
lh.asyncReadEntries(lastEntry, lastEntry, (rc1, lh1, seq, ctx1) -> {
try {
LedgerEntry entry = seq.nextElement();
PositionInfo positionInfo;
positionInfo = PositionInfo.parseFrom(entry.getEntry());
individualDeletedMessagesCount.set(positionInfo.getIndividualDeletedMessagesCount());
} catch (Exception e) {
}
latch.countDown();
}, null);
} else {
latch.countDown();
}
}, null);
latch.await();
assertEquals(individualDeletedMessagesCount.get(), totalAddEntries / 2 - 1);
// Re-Open
factory = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = (ManagedLedgerImpl) factory.open(ledgerName, managedLedgerConfig);
c1 = (ManagedCursorImpl) ledger.openCursor("c1");
// verify cursor has been recovered
assertEquals(c1.getNumberOfEntriesInBacklog(false), totalAddEntries / 2);
// try to read entries which should only read non-deleted positions
List<Entry> entries = c1.readEntries(totalAddEntries);
assertEquals(entries.size(), totalAddEntries / 2);
}
/**
* Close Cursor without MaxUnackedRangesToPersistInZK: It should store individually unack range into Zk
*
* @throws Exception
*/
@Test(timeOut = 20000)
public void testOutOfOrderDeletePersistenceIntoZkWithClose() throws Exception {
final int totalAddEntries = 100;
String ledgerName = "my_test_ledger_zk";
String cursorName = "c1";
ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig();
ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open(ledgerName, managedLedgerConfig);
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor(cursorName);
List<Position> addedPositions = new ArrayList<>();
for (int i = 0; i < totalAddEntries; i++) {
Position p = ledger.addEntry(("dummy-entry-" + i).getBytes(Encoding));
addedPositions.add(p);
if (i % 2 == 0) {
// Acknowledge alternative message to create totalEntries/2 holes
c1.delete(addedPositions.get(i));
}
}
assertEquals(c1.getNumberOfEntriesInBacklog(false), totalAddEntries / 2);
// Close ledger to persist individual-deleted positions into cursor-ledger
ledger.close();
// verify cursor-ledgerId is updated as -1 into cursor-metaStore
CountDownLatch latch = new CountDownLatch(1);
AtomicInteger individualDeletedMessagesCount = new AtomicInteger(0);
ledger.getStore().asyncGetCursorInfo(ledger.getName(), cursorName, new MetaStoreCallback<ManagedCursorInfo>() {
@Override
public void operationComplete(ManagedCursorInfo result, Stat stat) {
individualDeletedMessagesCount.set(result.getIndividualDeletedMessagesCount());
latch.countDown();
}
@Override
public void operationFailed(MetaStoreException e) {
latch.countDown();
}
});
latch.await();
assertEquals(individualDeletedMessagesCount.get(), totalAddEntries / 2 - 1);
// Re-Open
factory = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ledger = (ManagedLedgerImpl) factory.open(ledgerName, managedLedgerConfig);
c1 = (ManagedCursorImpl) ledger.openCursor(cursorName);
// verify cursor has been recovered
assertEquals(c1.getNumberOfEntriesInBacklog(false), totalAddEntries / 2);
// try to read entries which should only read non-deleted positions
List<Entry> entries = c1.readEntries(totalAddEntries);
assertEquals(entries.size(), totalAddEntries / 2);
}
@Test
public void testInvalidMarkDelete() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig());
ManagedCursor cursor = ledger.openCursor("c1");
Position readPosition = cursor.getReadPosition();
Position markDeletePosition = cursor.getMarkDeletedPosition();
List<Position> addedPositions = new ArrayList<>();
for (int i = 0; i < 20; i++) {
Position p = ledger.addEntry(("dummy-entry-" + i).getBytes(Encoding));
addedPositions.add(p);
}
// validate: cursor.asyncMarkDelete(..)
CountDownLatch markDeleteCallbackLatch = new CountDownLatch(1);
Position position = PositionImpl.get(100, 100);
AtomicBoolean markDeleteCallFailed = new AtomicBoolean(false);
cursor.asyncMarkDelete(position, new MarkDeleteCallback() {
@Override
public void markDeleteComplete(Object ctx) {
markDeleteCallbackLatch.countDown();
}
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
markDeleteCallFailed.set(true);
markDeleteCallbackLatch.countDown();
}
}, null);
markDeleteCallbackLatch.await();
assertEquals(readPosition, cursor.getReadPosition());
assertEquals(markDeletePosition, cursor.getMarkDeletedPosition());
// validate : cursor.asyncDelete(..)
CountDownLatch deleteCallbackLatch = new CountDownLatch(1);
markDeleteCallFailed.set(false);
cursor.asyncDelete(position, new DeleteCallback() {
@Override
public void deleteComplete(Object ctx) {
deleteCallbackLatch.countDown();
}
@Override
public void deleteFailed(ManagedLedgerException exception, Object ctx) {
markDeleteCallFailed.set(true);
deleteCallbackLatch.countDown();
}
}, null);
deleteCallbackLatch.await();
assertEquals(readPosition, cursor.getReadPosition());
assertEquals(markDeletePosition, cursor.getMarkDeletedPosition());
}
@Test
public void testEstimatedUnackedSize() throws Exception {
ManagedLedgerConfig config = new ManagedLedgerConfig();
config.setMaxEntriesPerLedger(10);
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig());
ManagedCursor cursor = ledger.openCursor("c1");
byte[] entryData = new byte[5];
// write 15 entries, saving position of 5th
for (int i = 0; i < 4; i++) { ledger.addEntry(entryData); }
Position deleteAt = ledger.addEntry(entryData);
for (int i = 0; i < 10; i++) { ledger.addEntry(entryData); }
assertEquals(cursor.getEstimatedSizeSinceMarkDeletePosition(), 15 * entryData.length);
cursor.markDelete(deleteAt);
// it's not an estimate if all entries are the same size
assertEquals(cursor.getEstimatedSizeSinceMarkDeletePosition(), 10 * entryData.length);
}
@Test(timeOut = 20000)
public void testRecoverCursorAheadOfLastPosition() throws Exception {
final String mlName = "my_test_ledger";
final PositionImpl lastPosition = new PositionImpl(1L, 10L);
final PositionImpl nextPosition = new PositionImpl(3L, -1L);
final String cursorName = "my_test_cursor";
final long cursorsLedgerId = -1L;
final long markDeleteLedgerId = 2L;
final long markDeleteEntryId = -1L;
MetaStore mockMetaStore = mock(MetaStore.class);
doAnswer(new Answer<Object>() {
public Object answer(InvocationOnMock invocation) {
ManagedCursorInfo info = ManagedCursorInfo.newBuilder().setCursorsLedgerId(cursorsLedgerId)
.setMarkDeleteLedgerId(markDeleteLedgerId).setMarkDeleteEntryId(markDeleteEntryId)
.setLastActive(0L).build();
Stat stat = mock(Stat.class);
MetaStoreCallback<ManagedCursorInfo> callback = (MetaStoreCallback<ManagedCursorInfo>) invocation
.getArguments()[2];
callback.operationComplete(info, stat);
return null;
}
}).when(mockMetaStore).asyncGetCursorInfo(eq(mlName), eq(cursorName), any(MetaStoreCallback.class));
ManagedLedgerImpl ml = mock(ManagedLedgerImpl.class);
when(ml.getName()).thenReturn(mlName);
when(ml.getStore()).thenReturn(mockMetaStore);
when(ml.getLastPosition()).thenReturn(lastPosition);
when(ml.getNextValidLedger(markDeleteLedgerId)).thenReturn(3L);
when(ml.getNextValidPosition(lastPosition)).thenReturn(nextPosition);
when(ml.ledgerExists(markDeleteLedgerId)).thenReturn(false);
BookKeeper mockBookKeeper = mock(BookKeeper.class);
final ManagedCursorImpl cursor = new ManagedCursorImpl(mockBookKeeper, new ManagedLedgerConfig(), ml,
cursorName);
cursor.recover(new VoidCallback() {
@Override
public void operationComplete() {
assertEquals(cursor.getMarkDeletedPosition(), lastPosition);
assertEquals(cursor.getReadPosition(), nextPosition);
assertEquals(cursor.getNumberOfEntries(), 0L);
}
@Override
public void operationFailed(ManagedLedgerException exception) {
fail("Cursor recovery should not fail");
}
});
}
@Test
void testAlwaysInactive() throws Exception {
ManagedLedger ml = factory.open("testAlwaysInactive");
ManagedCursor cursor = ml.openCursor("c1");
assertTrue(cursor.isActive());
cursor.setAlwaysInactive();
assertFalse(cursor.isActive());
cursor.setActive();
assertFalse(cursor.isActive());
}
@Test
void testNonDurableCursorActive() throws Exception {
ManagedLedger ml = factory.open("testInactive");
ManagedCursor cursor = ml.newNonDurableCursor(PositionImpl.latest, "c1");
assertTrue(cursor.isActive());
cursor.setInactive();
assertFalse(cursor.isActive());
}
@Test
public void deleteMessagesCheckhMarkDelete() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
ManagedCursorImpl c1 = (ManagedCursorImpl) ledger.openCursor("c1");
final int totalEntries = 1000;
final Position[] positions = new Position[totalEntries];
for (int i = 0; i < totalEntries; i++) {
// add entry
positions[i] = ledger.addEntry(("entry-" + i).getBytes(Encoding));
}
assertEquals(c1.getNumberOfEntries(), totalEntries);
int totalDeletedMessages = 0;
for (int i = 0; i < totalEntries; i++) {
// delete entry
if ((i % 3) == 0) {
c1.delete(positions[i]);
totalDeletedMessages += 1;
}
}
assertEquals(c1.getNumberOfEntriesInBacklog(false), totalEntries - totalDeletedMessages);
assertEquals(c1.getNumberOfEntries(), totalEntries - totalDeletedMessages);
assertEquals(c1.getMarkDeletedPosition(), positions[0]);
assertEquals(c1.getReadPosition(), positions[1]);
// delete 1/2 of the messags
for (int i = 0; i < totalEntries / 2; i++) {
// delete entry
if ((i % 3) != 0) {
c1.delete(positions[i]);
totalDeletedMessages += 1;
}
}
int markDelete = totalEntries / 2 - 1;
assertEquals(c1.getNumberOfEntriesInBacklog(false), totalEntries - totalDeletedMessages);
assertEquals(c1.getNumberOfEntries(), totalEntries - totalDeletedMessages);
assertEquals(c1.getMarkDeletedPosition(), positions[markDelete]);
assertEquals(c1.getReadPosition(), positions[markDelete + 1]);
}
@Test
public void testBatchIndexDelete() throws ManagedLedgerException, InterruptedException {
ManagedLedger ledger = factory.open("test_batch_index_delete");
ManagedCursor cursor = ledger.openCursor("c1");
final int totalEntries = 100;
final Position[] positions = new Position[totalEntries];
for (int i = 0; i < totalEntries; i++) {
// add entry
positions[i] = ledger.addEntry(("entry-" + i).getBytes(Encoding));
}
assertEquals(cursor.getNumberOfEntries(), totalEntries);
deleteBatchIndex(cursor, positions[0], 10, Lists.newArrayList(new IntRange().setStart(2).setEnd(4)));
List<IntRange> deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[0]), 10);
Assert.assertEquals(1, deletedIndexes.size());
Assert.assertEquals(2, deletedIndexes.get(0).getStart());
Assert.assertEquals(4, deletedIndexes.get(0).getEnd());
deleteBatchIndex(cursor, positions[0], 10, Lists.newArrayList(new IntRange().setStart(3).setEnd(8)));
deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[0]), 10);
Assert.assertEquals(1, deletedIndexes.size());
Assert.assertEquals(2, deletedIndexes.get(0).getStart());
Assert.assertEquals(8, deletedIndexes.get(0).getEnd());
deleteBatchIndex(cursor, positions[0], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(0)));
deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[0]), 10);
Assert.assertEquals(2, deletedIndexes.size());
Assert.assertEquals(0, deletedIndexes.get(0).getStart());
Assert.assertEquals(0, deletedIndexes.get(0).getEnd());
Assert.assertEquals(2, deletedIndexes.get(1).getStart());
Assert.assertEquals(8, deletedIndexes.get(1).getEnd());
deleteBatchIndex(cursor, positions[0], 10, Lists.newArrayList(new IntRange().setStart(1).setEnd(1)));
deleteBatchIndex(cursor, positions[0], 10, Lists.newArrayList(new IntRange().setStart(9).setEnd(9)));
deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[0]), 10);
Assert.assertNull(deletedIndexes);
Assert.assertEquals(positions[0], cursor.getMarkDeletedPosition());
deleteBatchIndex(cursor, positions[1], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(5)));
cursor.delete(positions[1]);
deleteBatchIndex(cursor, positions[1], 10, Lists.newArrayList(new IntRange().setStart(6).setEnd(8)));
deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[1]), 10);
Assert.assertNull(deletedIndexes);
deleteBatchIndex(cursor, positions[2], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(5)));
cursor.markDelete(positions[3]);
deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[2]), 10);
Assert.assertNull(deletedIndexes);
deleteBatchIndex(cursor, positions[3], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(5)));
cursor.resetCursor(positions[0]);
deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[3]), 10);
Assert.assertNull(deletedIndexes);
}
@Test
public void testBatchIndexesDeletionPersistAndRecover() throws ManagedLedgerException, InterruptedException {
ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig();
// Make sure the cursor metadata updated by the cursor ledger ID.
managedLedgerConfig.setMaxUnackedRangesToPersistInZk(-1);
ManagedLedger ledger = factory.open("test_batch_indexes_deletion_persistent", managedLedgerConfig);
ManagedCursor cursor = ledger.openCursor("c1");
final int totalEntries = 100;
final Position[] positions = new Position[totalEntries];
for (int i = 0; i < totalEntries; i++) {
// add entry
positions[i] = ledger.addEntry(("entry-" + i).getBytes(Encoding));
}
assertEquals(cursor.getNumberOfEntries(), totalEntries);
deleteBatchIndex(cursor, positions[6], 10, Lists.newArrayList(new IntRange().setStart(1).setEnd(3)));
deleteBatchIndex(cursor, positions[5], 10, Lists.newArrayList(new IntRange().setStart(3).setEnd(6)));
deleteBatchIndex(cursor, positions[0], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(9)));
deleteBatchIndex(cursor, positions[1], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(9)));
deleteBatchIndex(cursor, positions[2], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(9)));
deleteBatchIndex(cursor, positions[3], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(9)));
deleteBatchIndex(cursor, positions[4], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(9)));
cursor.close();
ledger.close();
ledger = factory.open("test_batch_indexes_deletion_persistent", managedLedgerConfig);
cursor = ledger.openCursor("c1");
List<IntRange> deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[5]), 10);
Assert.assertEquals(deletedIndexes.size(), 1);
Assert.assertEquals(deletedIndexes.get(0).getStart(), 3);
Assert.assertEquals(deletedIndexes.get(0).getEnd(), 6);
Assert.assertEquals(cursor.getMarkDeletedPosition(), positions[4]);
deleteBatchIndex(cursor, positions[5], 10, Lists.newArrayList(new IntRange().setStart(0).setEnd(9)));
deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[5]), 10);
Assert.assertNull(deletedIndexes);
Assert.assertEquals(cursor.getMarkDeletedPosition(), positions[5]);
deletedIndexes = getAckedIndexRange(cursor.getDeletedBatchIndexesAsLongArray((PositionImpl) positions[6]), 10);
Assert.assertEquals(deletedIndexes.size(), 1);
Assert.assertEquals(deletedIndexes.get(0).getStart(), 1);
Assert.assertEquals(deletedIndexes.get(0).getEnd(), 3);
}
private void deleteBatchIndex(ManagedCursor cursor, Position position, int batchSize,
List<IntRange> deleteIndexes) throws InterruptedException {
CountDownLatch latch = new CountDownLatch(1);
PositionImpl pos = (PositionImpl) position;
BitSet bitSet = new BitSet(batchSize);
bitSet.set(0, batchSize);
deleteIndexes.forEach(intRange -> {
bitSet.clear(intRange.getStart(), intRange.getEnd() + 1);
});
pos.ackSet = bitSet.toLongArray();
cursor.asyncDelete(pos,
new DeleteCallback() {
@Override
public void deleteComplete(Object ctx) {
latch.countDown();
}
@Override
public void deleteFailed(ManagedLedgerException exception, Object ctx) {
latch.countDown();
}
}, null);
latch.await();
pos.ackSet = null;
}
private List<IntRange> getAckedIndexRange(long[] bitSetLongArray, int batchSize) {
if (bitSetLongArray == null) {
return null;
}
List<IntRange> result = new ArrayList<>();
BitSet bitSet = BitSet.valueOf(bitSetLongArray);
int nextClearBit = bitSet.nextClearBit(0);
while (nextClearBit != -1 && nextClearBit <= batchSize) {
int nextSetBit = bitSet.nextSetBit(nextClearBit);
if (nextSetBit == -1) {
break;
}
result.add(new IntRange().setStart(nextClearBit).setEnd(nextSetBit - 1));
nextClearBit = bitSet.nextClearBit(nextSetBit);
}
return result;
}
@Test
public void testReadEntriesOrWaitWithMaxSize() throws Exception {
ManagedLedger ledger = factory.open("testReadEntriesOrWaitWithMaxSize");
ManagedCursor c = ledger.openCursor("c");
for (int i = 0; i < 20; i++) {
ledger.addEntry(new byte[1024]);
}
// First time, since we don't have info, we'll get 1 single entry
List<Entry> entries = c.readEntriesOrWait(10, 3 * 1024);
assertEquals(entries.size(), 1);
entries.forEach(e -> e.release());
// We should only return 3 entries, based on the max size
entries = c.readEntriesOrWait(10, 3 * 1024);
assertEquals(entries.size(), 3);
entries.forEach(e -> e.release());
// If maxSize is < avg, we should get 1 entry
entries = c.readEntriesOrWait(10, 5);
assertEquals(entries.size(), 1);
entries.forEach(e -> e.release());
}
@Test
public void testReadEntriesOrWaitWithMaxPosition() throws Exception {
int readMaxNumber = 10;
int sendNumber = 20;
ManagedLedger ledger = factory.open("testReadEntriesOrWaitWithMaxPosition");
ManagedCursor c = ledger.openCursor("c");
Position position = PositionImpl.earliest;
Position maxCanReadPosition = PositionImpl.earliest;
for (int i = 0; i < sendNumber; i++) {
if (i == readMaxNumber - 1) {
position = ledger.addEntry(new byte[1024]);
} else if (i == sendNumber - 1) {
maxCanReadPosition = ledger.addEntry(new byte[1024]);
} else {
ledger.addEntry(new byte[1024]);
}
}
CompletableFuture<Integer> completableFuture = new CompletableFuture<>();
c.asyncReadEntriesOrWait(sendNumber, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
completableFuture.complete(entries.size());
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
completableFuture.completeExceptionally(exception);
}
}, null, (PositionImpl) position);
int number = completableFuture.get();
assertEquals(number, readMaxNumber);
c.asyncReadEntriesOrWait(sendNumber, new ReadEntriesCallback() {
@Override
public void readEntriesComplete(List<Entry> entries, Object ctx) {
completableFuture.complete(entries.size());
}
@Override
public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
completableFuture.completeExceptionally(exception);
}
}, null, (PositionImpl) maxCanReadPosition);
assertEquals(number, sendNumber - readMaxNumber);
}
@Test
public void testFlushCursorAfterInactivity() throws Exception {
ManagedLedgerConfig config = new ManagedLedgerConfig();
config.setThrottleMarkDelete(1.0);
ManagedLedgerFactoryConfig factoryConfig = new ManagedLedgerFactoryConfig();
factoryConfig.setCursorPositionFlushSeconds(1);
ManagedLedgerFactory factory1 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle(), factoryConfig);
ManagedLedger ledger1 = factory1.open("testFlushCursorAfterInactivity", config);
ManagedCursor c1 = ledger1.openCursor("c");
List<Position> positions = new ArrayList<Position>();
for (int i = 0; i < 20; i++) {
positions.add(ledger1.addEntry(new byte[1024]));
}
CountDownLatch latch = new CountDownLatch(positions.size());
positions.forEach(p -> c1.asyncMarkDelete(p, new MarkDeleteCallback() {
@Override
public void markDeleteComplete(Object ctx) {
latch.countDown();
}
@Override
public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
throw new RuntimeException(exception);
}
}, null));
latch.await();
assertEquals(c1.getMarkDeletedPosition(), positions.get(positions.size() - 1));
Awaitility.await()
// Give chance to the flush to be automatically triggered.
// NOTE: this can't be set too low, or it causes issues with ZK thread pool rejecting
.pollDelay(Duration.ofMillis(2000))
.untilAsserted(() -> {
// Abruptly re-open the managed ledger without graceful close
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
try {
ManagedLedger ledger2 = factory2.open("testFlushCursorAfterInactivity", config);
ManagedCursor c2 = ledger2.openCursor("c");
assertEquals(c2.getMarkDeletedPosition(), positions.get(positions.size() - 1));
} finally {
factory2.shutdown();
}
});
factory1.shutdown();
}
@Test
public void testFlushCursorAfterIndividualDeleteInactivity() throws Exception {
ManagedLedgerConfig config = new ManagedLedgerConfig();
config.setThrottleMarkDelete(1.0);
ManagedLedgerFactoryConfig factoryConfig = new ManagedLedgerFactoryConfig();
factoryConfig.setCursorPositionFlushSeconds(1);
ManagedLedgerFactory factory1 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle(), factoryConfig);
ManagedLedger ledger1 = factory1.open("testFlushCursorAfterIndDelInactivity", config);
ManagedCursor c1 = ledger1.openCursor("c");
List<Position> positions = new ArrayList<Position>();
for (int i = 0; i < 20; i++) {
positions.add(ledger1.addEntry(new byte[1024]));
}
CountDownLatch latch = new CountDownLatch(positions.size());
positions.forEach(p -> c1.asyncDelete(p, new DeleteCallback() {
@Override
public void deleteComplete(Object ctx) {
latch.countDown();
}
@Override
public void deleteFailed(ManagedLedgerException exception, Object ctx) {
throw new RuntimeException(exception);
}
}, null));
latch.await();
assertEquals(c1.getMarkDeletedPosition(), positions.get(positions.size() - 1));
// reopen the cursor and we should see entries not be flushed
ManagedLedgerFactory dirtyFactory = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
ManagedLedger ledgerDirty = dirtyFactory.open("testFlushCursorAfterIndDelInactivity", config);
ManagedCursor dirtyCursor = ledgerDirty.openCursor("c");
assertNotEquals(dirtyCursor.getMarkDeletedPosition(), positions.get(positions.size() - 1));
Awaitility.await()
// Give chance to the flush to be automatically triggered.
// NOTE: this can't be set too low, or it causes issues with ZK thread pool rejecting
.pollDelay(Duration.ofMillis(2000))
.untilAsserted(() -> {
// Abruptly re-open the managed ledger without graceful close
ManagedLedgerFactory factory2 = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
try {
ManagedLedger ledger2 = factory2.open("testFlushCursorAfterIndDelInactivity", config);
ManagedCursor c2 = ledger2.openCursor("c");
assertEquals(c2.getMarkDeletedPosition(), positions.get(positions.size() - 1));
} finally {
factory2.shutdown();
}
});
factory1.shutdown();
dirtyFactory.shutdown();
}
@Test
public void testCursorCheckReadPositionChanged() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger", new ManagedLedgerConfig());
ManagedCursor c1 = ledger.openCursor("c1");
// check empty ledger
assertTrue(c1.checkAndUpdateReadPositionChanged());
assertTrue(c1.checkAndUpdateReadPositionChanged());
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
ledger.addEntry("dummy-entry-1".getBytes(Encoding));
// read-position has not been moved
assertFalse(c1.checkAndUpdateReadPositionChanged());
List<Entry> entries = c1.readEntries(2);
entries.forEach(e -> {
try {
c1.markDelete(e.getPosition());
e.release();
} catch (Exception e1) {
// Ok
}
});
// read-position is moved
assertTrue(c1.checkAndUpdateReadPositionChanged());
// read-position has not been moved since last read
assertFalse(c1.checkAndUpdateReadPositionChanged());
c1.close();
ledger.close();
ledger = factory.open("my_test_ledger", new ManagedLedgerConfig());
// recover cursor
ManagedCursor c2 = ledger.openCursor("c1");
assertTrue(c2.checkAndUpdateReadPositionChanged());
assertFalse(c2.checkAndUpdateReadPositionChanged());
entries = c2.readEntries(2);
entries.forEach(e -> {
try {
c2.markDelete(e.getPosition());
e.release();
} catch (Exception e1) {
// Ok
}
});
assertTrue(c2.checkAndUpdateReadPositionChanged());
// returns true because read-position is on tail
assertTrue(c2.checkAndUpdateReadPositionChanged());
assertTrue(c2.checkAndUpdateReadPositionChanged());
ledger.close();
}
private static final Logger log = LoggerFactory.getLogger(ManagedCursorTest.class);
}
|
/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.util.concurrency;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.EmptyRunnable;
import com.intellij.util.TimeoutUtil;
import junit.framework.TestCase;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.ide.PooledThreadExecutor;
import java.util.List;
import java.util.Random;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
public class BoundedScheduledExecutorTest extends TestCase {
private static final Logger LOG = Logger.getInstance(BoundedScheduledExecutorTest.class);
public void testSchedulesAreReallyBound() throws InterruptedException, ExecutionException {
ExecutorService backendExecutor = AppExecutorUtil.getAppExecutorService();
for (int maxTasks=1; maxTasks<5;maxTasks++) {
LOG.debug("maxTasks = " + maxTasks);
BoundedScheduledExecutorService executor = createBoundedScheduledExecutor(backendExecutor, maxTasks);
AtomicInteger running = new AtomicInteger();
AtomicInteger max = new AtomicInteger();
AtomicInteger executed = new AtomicInteger();
int N = 10000;
ScheduledFuture[] futures = new ScheduledFuture[N];
for (int i = 0; i < N; i++) {
futures[i] = executor.schedule(() -> {
int r = running.incrementAndGet();
try {
TimeoutUtil.sleep(1);
max.accumulateAndGet(r, Math::max);
executed.incrementAndGet();
}
finally {
running.decrementAndGet();
}
}, i%10, TimeUnit.MILLISECONDS);
}
for (ScheduledFuture future : futures) {
future.get();
}
assertEquals(0, executor.shutdownNow().size());
assertTrue(executor.awaitTermination(N + N + 100000, TimeUnit.MILLISECONDS));
assertEquals(maxTasks, max.get());
assertEquals(N, executed.get());
}
}
@NotNull
private BoundedScheduledExecutorService createBoundedScheduledExecutor(@NotNull ExecutorService backendExecutor, int maxTasks) {
return new BoundedScheduledExecutorService(getName(), backendExecutor, maxTasks);
}
public void testSubmitsAreReallyBound() throws InterruptedException, ExecutionException {
ExecutorService backendExecutor = AppExecutorUtil.getAppExecutorService();
for (int maxTasks=1; maxTasks<5;maxTasks++) {
LOG.debug("maxTasks = " + maxTasks);
BoundedScheduledExecutorService executor = createBoundedScheduledExecutor(backendExecutor, maxTasks);
AtomicInteger running = new AtomicInteger();
AtomicInteger max = new AtomicInteger();
AtomicInteger executed = new AtomicInteger();
int N = 10000;
Future[] futures = new Future[N];
for (int i = 0; i < N; i++) {
futures[i] = executor.submit(() -> {
int r = running.incrementAndGet();
try {
TimeoutUtil.sleep(1);
max.accumulateAndGet(r, Math::max);
executed.incrementAndGet();
}
finally {
running.decrementAndGet();
}
});
}
for (Future future : futures) {
future.get();
}
assertEquals(0, executor.shutdownNow().size());
assertTrue(executor.awaitTermination(N + N+100000, TimeUnit.MILLISECONDS));
assertEquals(maxTasks, max.get());
assertEquals(N, executed.get());
}
}
public void testCallableReallyReturnsValue() throws Exception{
BoundedScheduledExecutorService executor = createBoundedScheduledExecutor(AppExecutorUtil.getAppExecutorService(), 1);
Future<Integer> f1 = executor.schedule(() -> 42, 1, TimeUnit.SECONDS);
Integer result = f1.get();
assertEquals(42, result.intValue());
executor.shutdownNow();
assertTrue(executor.awaitTermination(100, TimeUnit.SECONDS));
}
public void testEarlyCancelPreventsRunning() throws InterruptedException {
AtomicBoolean run = new AtomicBoolean();
BoundedScheduledExecutorService executor = createBoundedScheduledExecutor(AppExecutorUtil.getAppExecutorService(), 1);
int delayMs = 10*1000;
Future<?> s1 = executor.schedule(EmptyRunnable.getInstance(), delayMs, TimeUnit.MILLISECONDS);
Future<Integer> f1 = executor.schedule(() -> {
run.set(true);
return 42;
}, delayMs, TimeUnit.MILLISECONDS);
f1.cancel(false);
TimeoutUtil.sleep(delayMs + 1000);
assertTrue(f1.isDone());
assertTrue(f1.isCancelled());
assertFalse(run.get());
assertTrue(s1.isDone());
executor.shutdownNow();
assertTrue(executor.awaitTermination(100, TimeUnit.SECONDS));
}
public void testStressWhenSomeTasksCallOtherTasksGet() throws ExecutionException, InterruptedException {
ExecutorService backendExecutor = AppExecutorUtil.getAppExecutorService();
for (int maxSimultaneousTasks = 1; maxSimultaneousTasks<20; maxSimultaneousTasks++) {
LOG.debug("maxSimultaneousTasks = " + maxSimultaneousTasks);
BoundedScheduledExecutorService executor = createBoundedScheduledExecutor(backendExecutor, maxSimultaneousTasks);
AtomicInteger running = new AtomicInteger();
AtomicInteger maxThreads = new AtomicInteger();
AtomicInteger availableThreads = new AtomicInteger(maxSimultaneousTasks); // to avoid deadlocks when trying to wait inside the pool thread
try {
int N = 1000;
Future[] futures = new Future[N];
Random random = new Random();
for (int i = 0; i < N; i++) {
final int finalI = i;
final int finalMaxSimultaneousTasks = maxSimultaneousTasks;
futures[i] = executor.schedule(() -> {
maxThreads.accumulateAndGet(running.incrementAndGet(), Math::max);
try {
int r = random.nextInt(finalMaxSimultaneousTasks);
int prev = finalI - r;
if (prev < finalI && prev >= 0) {
if (availableThreads.decrementAndGet() > 0) {
try {
futures[prev].get();
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
availableThreads.incrementAndGet();
}
TimeoutUtil.sleep(r);
}
finally {
running.decrementAndGet();
}
}, i % 10, TimeUnit.MILLISECONDS);
}
for (Future future : futures) {
future.get();
}
}
finally {
executor.shutdownNow();
assertTrue(executor.awaitTermination(100, TimeUnit.SECONDS));
}
assertTrue("Max threads was: "+maxThreads+" but bound was: "+maxSimultaneousTasks, maxThreads.get() <= maxSimultaneousTasks);
}
}
public void testSequentialSchedulesMustExecuteSequentially() throws ExecutionException, InterruptedException {
BoundedScheduledExecutorService executor = createBoundedScheduledExecutor(AppExecutorUtil.getAppExecutorService(), 1);
int N = 100000;
StringBuffer log = new StringBuffer(N*4);
StringBuilder expected = new StringBuilder(N * 4);
Future[] futures = new Future[N];
for (int i = 0; i < N; i++) {
final int finalI = i;
//noinspection StringConcatenationInsideStringBufferAppend
futures[i] = executor.schedule(() -> log.append(finalI+" "), 0, TimeUnit.MILLISECONDS);
}
for (int i = 0; i < N; i++) {
expected.append(i).append(" ");
futures[i].get();
}
String logs = log.toString();
assertEquals(expected.toString(), logs);
executor.shutdownNow();
assertTrue(executor.awaitTermination(100, TimeUnit.SECONDS));
}
public void testShutdownNowMustCancel() throws InterruptedException {
BoundedScheduledExecutorService executor = createBoundedScheduledExecutor(AppExecutorUtil.getAppExecutorService(), 1);
int N = 100000;
StringBuffer log = new StringBuffer(N*4);
Future[] futures = new Future[N];
for (int i = 0; i < N; i++) {
futures[i] = executor.schedule(() -> log.append(" "), 10, TimeUnit.SECONDS);
}
List<Runnable> runnables = executor.shutdownNow();
assertTrue(executor.isShutdown());
assertEquals(N, runnables.size());
try {
executor.schedule(EmptyRunnable.getInstance(), 10, TimeUnit.SECONDS);
fail("Must reject");
}
catch (RejectedExecutionException ignored) {
}
try {
executor.execute(EmptyRunnable.getInstance());
fail("Must reject");
}
catch (RejectedExecutionException ignored) {
}
for (int i = 0; i < N; i++) {
assertTrue(futures[i].isCancelled());
}
String logs = log.toString();
assertEquals("", logs);
assertTrue(executor.awaitTermination(100, TimeUnit.SECONDS));
}
public void testShutdownMustDisableSubmit() throws InterruptedException {
BoundedScheduledExecutorService executor = createBoundedScheduledExecutor(AppExecutorUtil.getAppExecutorService(), 1);
int N = 100000;
StringBuffer log = new StringBuffer(N*4);
Future[] futures = new Future[N];
for (int i = 0; i < N; i++) {
futures[i] = executor.schedule(() -> log.append(" "), 10, TimeUnit.SECONDS);
}
executor.shutdown();
assertTrue(executor.isShutdown());
try {
executor.schedule(EmptyRunnable.getInstance(), 10, TimeUnit.SECONDS);
fail("Must reject");
}
catch (RejectedExecutionException ignored) {
}
try {
executor.execute(EmptyRunnable.getInstance());
fail("Must reject");
}
catch (RejectedExecutionException ignored) {
}
for (int i = 0; i < N; i++) {
assertTrue(futures[i].isCancelled());
}
String logs = log.toString();
assertEquals("", logs);
assertTrue(executor.awaitTermination(100, TimeUnit.SECONDS));
}
public void testAwaitTerminationDoesWait() throws InterruptedException {
for (int maxTasks=1; maxTasks<10;maxTasks++) {
ExecutorService executor = createBoundedScheduledExecutor(PooledThreadExecutor.INSTANCE, maxTasks);
int N = 100000;
StringBuffer log = new StringBuffer(N*4);
Future[] futures = new Future[N];
for (int i = 0; i < N; i++) {
futures[i] = executor.submit(() -> log.append(" "));
}
executor.shutdown();
assertTrue(executor.awaitTermination(100, TimeUnit.SECONDS));
String logs = log.toString();
assertEquals(N, logs.length());
for (Future future : futures) {
assertTrue(future.isDone());
assertTrue(!future.isCancelled());
}
}
}
public void testAwaitTerminationDoesNotCompletePrematurely() throws InterruptedException {
ExecutorService executor2 = createBoundedScheduledExecutor(PooledThreadExecutor.INSTANCE, 1);
Future<?> future = executor2.submit(() -> TimeoutUtil.sleep(10000));
executor2.shutdown();
assertFalse(executor2.awaitTermination(1, TimeUnit.SECONDS));
assertFalse(future.isDone());
assertFalse(future.isCancelled());
assertTrue(executor2.awaitTermination(100, TimeUnit.SECONDS));
assertTrue(future.isDone());
assertFalse(future.isCancelled());
}
public void testAwaitTerminationOfScheduledTask() throws InterruptedException {
ScheduledExecutorService executor = createBoundedScheduledExecutor(PooledThreadExecutor.INSTANCE, 1);
Future<?> future = executor.schedule(() -> TimeoutUtil.sleep(10000), 100, TimeUnit.MILLISECONDS);
executor.shutdown();
assertTrue(future.isDone());
assertTrue(future.isCancelled());
assertTrue(executor.awaitTermination(100, TimeUnit.SECONDS));
assertTrue(future.isDone());
assertTrue(future.isCancelled());
}
}
|
package com.javarush.task.task05.task0522;
/*
Максимум конструкторов
*/
public class Circle {
private double x;
private double y;
private double radius;
//напишите тут ваш код
public Circle() {
this.x = 1.0;
this.y = 1.0;
this.radius = 1.0;
}
public Circle(double x, double y) {
this.x = x;
this.y = y;
this.radius = 1.0;
}
public Circle(double x) {
this.x = x;
this.y = x;
this.radius = 1.0;
}
public Circle(Circle circ) {
this.x = circ.x;
this.y = circ.y;
this.radius = circ.radius;
}
public static void main(String[] args) {
}
}
|
package com.example.shiftschedule;
import android.os.Bundle;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.fragment.app.Fragment;
public class F_schedule_day_view extends Fragment {
@Nullable
@Override
public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) {
return inflater.inflate(R.layout.fragment_schedule_day_view,container,false);
}
}
|
package com.example.android.miwok;
import android.content.Context;
import android.media.AudioManager;
import android.media.MediaPlayer;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import android.widget.AdapterView;
import android.widget.ListView;
import java.util.ArrayList;
public class ColorsActivity extends AppCompatActivity {
private MediaPlayer mMediaPlayer;
private AudioManager mAudioManager;
private AudioManager.OnAudioFocusChangeListener mOnAudioFocusChangeListener = new AudioManager.OnAudioFocusChangeListener() {
@Override
public void onAudioFocusChange(int focusChange) {
if (focusChange == AudioManager.AUDIOFOCUS_LOSS_TRANSIENT ||
focusChange == AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK) {
mMediaPlayer.pause();
mMediaPlayer.seekTo(0);
} else if (focusChange == AudioManager.AUDIOFOCUS_GAIN) {
mMediaPlayer.start();
} else if (focusChange == AudioManager.AUDIOFOCUS_LOSS) {
releaseMediaPlayer();
}
}
};
private MediaPlayer.OnCompletionListener mCompletionListener = new MediaPlayer.OnCompletionListener() {
@Override
public void onCompletion(MediaPlayer mp) {
releaseMediaPlayer();
}
};
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.word_list);
mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
//Create list of words
final ArrayList<Word> words = new ArrayList<Word>();
words.add(new Word("red", "weṭeṭṭi", R.drawable.color_red, R.raw.color_red));
words.add(new Word("mustard yellow", "chiwiiṭә", R.drawable.color_mustard_yellow, R.raw.color_mustard_yellow));
words.add(new Word("dusty yellow", "ṭopiisә", R.drawable.color_dusty_yellow, R.raw.color_dusty_yellow));
words.add(new Word("green", "chokokki", R.drawable.color_green, R.raw.color_green));
words.add(new Word("brown", "ṭakaakki", R.drawable.color_brown, R.raw.color_brown));
words.add(new Word("gray", "ṭopoppi", R.drawable.color_gray, R.raw.color_gray));
words.add(new Word("black", "kululli", R.drawable.color_black, R.raw.color_black));
words.add(new Word("white", "kelelli", R.drawable.color_white, R.raw.color_white));
WordAdapter adapter = new WordAdapter(this, words, R.color.category_colors);
ListView listView = (ListView) findViewById(R.id.list);
listView.setAdapter(adapter);
listView.setOnItemClickListener(new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> adapterView, View view, int position, long l) {
releaseMediaPlayer();
Word word = words.get(position);
int result = mAudioManager.requestAudioFocus(mOnAudioFocusChangeListener,
AudioManager.STREAM_MUSIC, AudioManager.AUDIOFOCUS_GAIN_TRANSIENT);
if (result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED) {
mMediaPlayer = MediaPlayer.create(ColorsActivity.this, word.getAudioResourceId());
mMediaPlayer.start();
mMediaPlayer.setOnCompletionListener(mCompletionListener);
}
}
});
}
@Override
protected void onStop() {
super.onStop();
releaseMediaPlayer();
}
/**
* Clean up the media player by releasing its resources.
*/
private void releaseMediaPlayer() {
// If the media player is not null, then it may be currently playing a sound.
if (mMediaPlayer != null) {
mMediaPlayer.release();
mMediaPlayer = null;
mAudioManager.abandonAudioFocus(mOnAudioFocusChangeListener);
}
}
}
|
/*
* Licensed to GraphHopper GmbH under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*
* GraphHopper GmbH licenses this file to you under the Apache License,
* Version 2.0 (the "License"); you may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.graphhopper.jsprit.core.algorithm.recreate;
import com.graphhopper.jsprit.core.problem.JobActivityFactory;
import com.graphhopper.jsprit.core.problem.Location;
import com.graphhopper.jsprit.core.problem.constraint.*;
import com.graphhopper.jsprit.core.problem.constraint.HardActivityConstraint.ConstraintsStatus;
import com.graphhopper.jsprit.core.problem.cost.VehicleRoutingActivityCosts;
import com.graphhopper.jsprit.core.problem.cost.VehicleRoutingTransportCosts;
import com.graphhopper.jsprit.core.problem.driver.Driver;
import com.graphhopper.jsprit.core.problem.job.Break;
import com.graphhopper.jsprit.core.problem.job.Job;
import com.graphhopper.jsprit.core.problem.misc.JobInsertionContext;
import com.graphhopper.jsprit.core.problem.solution.route.VehicleRoute;
import com.graphhopper.jsprit.core.problem.solution.route.activity.BreakActivity;
import com.graphhopper.jsprit.core.problem.solution.route.activity.End;
import com.graphhopper.jsprit.core.problem.solution.route.activity.Start;
import com.graphhopper.jsprit.core.problem.solution.route.activity.TourActivity;
import com.graphhopper.jsprit.core.problem.vehicle.Vehicle;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
/**
* Calculator that calculates the best insertion position for a service.
*
* @author schroeder
*/
final class BreakInsertionCalculator implements JobInsertionCostsCalculator {
private static final Logger logger = LoggerFactory.getLogger(BreakInsertionCalculator.class);
private HardRouteConstraint hardRouteLevelConstraint;
private HardActivityConstraint hardActivityLevelConstraint;
private SoftRouteConstraint softRouteConstraint;
private SoftActivityConstraint softActivityConstraint;
private VehicleRoutingTransportCosts transportCosts;
private final VehicleRoutingActivityCosts activityCosts;
private ActivityInsertionCostsCalculator additionalTransportCostsCalculator;
private JobActivityFactory activityFactory;
private AdditionalAccessEgressCalculator additionalAccessEgressCalculator;
public BreakInsertionCalculator(VehicleRoutingTransportCosts routingCosts, VehicleRoutingActivityCosts activityCosts, ActivityInsertionCostsCalculator additionalTransportCostsCalculator, ConstraintManager constraintManager) {
super();
this.transportCosts = routingCosts;
this.activityCosts = activityCosts;
hardRouteLevelConstraint = constraintManager;
hardActivityLevelConstraint = constraintManager;
softActivityConstraint = constraintManager;
softRouteConstraint = constraintManager;
this.additionalTransportCostsCalculator = additionalTransportCostsCalculator;
additionalAccessEgressCalculator = new AdditionalAccessEgressCalculator(routingCosts);
logger.debug("initialise " + this);
}
public void setJobActivityFactory(JobActivityFactory jobActivityFactory) {
this.activityFactory = jobActivityFactory;
}
@Override
public String toString() {
return "[name=calculatesServiceInsertion]";
}
/**
* Calculates the marginal cost of inserting job i locally. This is based on the
* assumption that cost changes can entirely covered by only looking at the predecessor i-1 and its successor i+1.
*/
@Override
public InsertionData getInsertionData(final VehicleRoute currentRoute, final Job jobToInsert, final Vehicle newVehicle, double newVehicleDepartureTime, final Driver newDriver, final double bestKnownCosts) {
Break breakToInsert = (Break) jobToInsert;
if (newVehicle.getBreak() == null || newVehicle.getBreak() != breakToInsert) {
return InsertionData.createEmptyInsertionData();
}
if (currentRoute.isEmpty()) return InsertionData.createEmptyInsertionData();
JobInsertionContext insertionContext = new JobInsertionContext(currentRoute, jobToInsert, newVehicle, newDriver, newVehicleDepartureTime);
int insertionIndex = InsertionData.NO_INDEX;
BreakActivity breakAct2Insert = (BreakActivity) activityFactory.createActivities(breakToInsert).get(0);
insertionContext.getAssociatedActivities().add(breakAct2Insert);
/*
check hard constraints at route level
*/
if (!hardRouteLevelConstraint.fulfilled(insertionContext)) {
return InsertionData.createEmptyInsertionData();
}
/*
check soft constraints at route level
*/
double additionalICostsAtRouteLevel = softRouteConstraint.getCosts(insertionContext);
double bestCost = bestKnownCosts;
additionalICostsAtRouteLevel += additionalAccessEgressCalculator.getCosts(insertionContext);
/*
generate new start and end for new vehicle
*/
Start start = new Start(newVehicle.getStartLocation(), newVehicle.getEarliestDeparture(), Double.MAX_VALUE);
start.setEndTime(newVehicleDepartureTime);
End end = new End(newVehicle.getEndLocation(), 0.0, newVehicle.getLatestArrival());
Location bestLocation = null;
TourActivity prevAct = start;
double prevActStartTime = newVehicleDepartureTime;
int actIndex = 0;
Iterator<TourActivity> activityIterator = currentRoute.getActivities().iterator();
boolean tourEnd = false;
while (!tourEnd) {
TourActivity nextAct;
if (activityIterator.hasNext()) nextAct = activityIterator.next();
else {
nextAct = end;
tourEnd = true;
}
boolean breakThis = true;
List<Location> locations = Arrays.asList(prevAct.getLocation(), nextAct.getLocation());
for (Location location : locations) {
breakAct2Insert.setLocation(location);
breakAct2Insert.setTheoreticalEarliestOperationStartTime(breakToInsert.getTimeWindow().getStart());
breakAct2Insert.setTheoreticalLatestOperationStartTime(breakToInsert.getTimeWindow().getEnd());
ConstraintsStatus status = hardActivityLevelConstraint.fulfilled(insertionContext, prevAct, breakAct2Insert, nextAct, prevActStartTime);
if (status.equals(ConstraintsStatus.FULFILLED)) {
//from job2insert induced costs at activity level
double additionalICostsAtActLevel = softActivityConstraint.getCosts(insertionContext, prevAct, breakAct2Insert, nextAct, prevActStartTime);
double additionalTransportationCosts = additionalTransportCostsCalculator.getCosts(insertionContext, prevAct, nextAct, breakAct2Insert, prevActStartTime);
if (additionalICostsAtRouteLevel + additionalICostsAtActLevel + additionalTransportationCosts < bestCost) {
bestCost = additionalICostsAtRouteLevel + additionalICostsAtActLevel + additionalTransportationCosts;
insertionIndex = actIndex;
bestLocation = location;
}
breakThis = false;
} else if (status.equals(ConstraintsStatus.NOT_FULFILLED)) {
breakThis = false;
}
}
double nextActArrTime = prevActStartTime + transportCosts.getTransportTime(prevAct.getLocation(), nextAct.getLocation(), prevActStartTime, newDriver, newVehicle);
prevActStartTime = Math.max(nextActArrTime, nextAct.getTheoreticalEarliestOperationStartTime()) + activityCosts.getActivityDuration(nextAct,nextActArrTime,newDriver,newVehicle);
prevAct = nextAct;
actIndex++;
if (breakThis) break;
}
if (insertionIndex == InsertionData.NO_INDEX) {
return InsertionData.createEmptyInsertionData();
}
InsertionData insertionData = new InsertionData(bestCost, InsertionData.NO_INDEX, insertionIndex, newVehicle, newDriver);
breakAct2Insert.setLocation(bestLocation);
insertionData.getEvents().add(new InsertBreak(currentRoute, newVehicle, breakAct2Insert, insertionIndex));
insertionData.getEvents().add(new SwitchVehicle(currentRoute, newVehicle, newVehicleDepartureTime));
insertionData.setVehicleDepartureTime(newVehicleDepartureTime);
return insertionData;
}
}
|
/**
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gravitee.gateway.security.keyless;
import io.gravitee.gateway.api.ExecutionContext;
import io.gravitee.gateway.api.Request;
import io.gravitee.gateway.security.core.PluginAuthenticationPolicy;
import io.gravitee.gateway.security.core.AuthenticationPolicy;
import io.gravitee.gateway.security.core.AuthenticationHandler;
import java.util.Collections;
import java.util.List;
/**
* A key-less {@link AuthenticationHandler} meaning that no authentication is required to access
* the public service.
*
* @author David BRASSELY (david.brassely at graviteesource.com)
* @author GraviteeSource Team
*/
public class KeylessAuthenticationHandler implements AuthenticationHandler {
static final String KEYLESS_POLICY = "key-less";
@Override
public boolean canHandle(Request request) {
return true;
}
@Override
public String name() {
return "key_less";
}
@Override
public int order() {
return 1000;
}
@Override
public List<AuthenticationPolicy> handle(ExecutionContext executionContext) {
return Collections.singletonList(
(PluginAuthenticationPolicy) () -> KEYLESS_POLICY);
}
}
|
package cn.wyz.wyzmall.member.service.impl;
import org.springframework.stereotype.Service;
import java.util.Map;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import cn.wyz.common.utils.PageUtils;
import cn.wyz.common.utils.Query;
import cn.wyz.wyzmall.member.dao.MemberStatisticsInfoDao;
import cn.wyz.wyzmall.member.entity.MemberStatisticsInfoEntity;
import cn.wyz.wyzmall.member.service.MemberStatisticsInfoService;
@Service("memberStatisticsInfoService")
public class MemberStatisticsInfoServiceImpl extends ServiceImpl<MemberStatisticsInfoDao, MemberStatisticsInfoEntity> implements MemberStatisticsInfoService {
@Override
public PageUtils queryPage(Map<String, Object> params) {
IPage<MemberStatisticsInfoEntity> page = this.page(
new Query<MemberStatisticsInfoEntity>().getPage(params),
new QueryWrapper<MemberStatisticsInfoEntity>()
);
return new PageUtils(page);
}
}
|
/*
* Copyright 2013 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.optaplanner.core.impl.localsearch.decider.acceptor.tabu.size;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import org.junit.jupiter.api.Test;
import org.optaplanner.core.impl.localsearch.scope.LocalSearchPhaseScope;
import org.optaplanner.core.impl.localsearch.scope.LocalSearchStepScope;
import org.optaplanner.core.impl.solver.scope.DefaultSolverScope;
public class ValueRatioTabuSizeStrategyTest {
@Test
public void tabuSize() {
LocalSearchPhaseScope phaseScope = new LocalSearchPhaseScope(mock(DefaultSolverScope.class));
when(phaseScope.getWorkingValueCount()).thenReturn(100);
LocalSearchStepScope stepScope = new LocalSearchStepScope(phaseScope);
assertEquals(10, new ValueRatioTabuSizeStrategy(0.1).determineTabuSize(stepScope));
assertEquals(50, new ValueRatioTabuSizeStrategy(0.5).determineTabuSize(stepScope));
// Rounding
assertEquals(11, new ValueRatioTabuSizeStrategy(0.1051).determineTabuSize(stepScope));
assertEquals(10, new ValueRatioTabuSizeStrategy(0.1049).determineTabuSize(stepScope));
// Corner cases
assertEquals(1, new ValueRatioTabuSizeStrategy(0.0000001).determineTabuSize(stepScope));
assertEquals(99, new ValueRatioTabuSizeStrategy(0.9999999).determineTabuSize(stepScope));
}
}
|
package com.exasol.csv.customer;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
class CustomerFactory {
static final String[] FIRST_NAMES = new String[]{
"Anderson",
"Mark",
"Mike",
"Angela",
"Katherine",
"Stephen",
"Adam",
"Marco",
"Daniela"
};
static final String[] LAST_NAMES = new String[]{
"Berger",
"Lowe",
"Cantrell",
"White",
"Müller",
"Stevens",
"Jagger",
"Schmitz",
"Quinn"
};
static final int MAX_AGE = 90;
private final Random random = new Random();
List<Customer> createCustomerList(final int count) {
final int endOfRange = count + 1;
return IntStream.range(1, endOfRange)
.boxed()
.map(this::createCustomer)
.collect(Collectors.toList());
}
Customer createCustomer(final int id) {
final int age = random.nextInt(MAX_AGE);
final String firstName = FIRST_NAMES[random.nextInt(FIRST_NAMES.length)];
final String lastName = LAST_NAMES[random.nextInt(LAST_NAMES.length)];
return Customer.builder()
.age(age)
.firstName(firstName)
.lastName(lastName)
.id(id)
.build();
}
}
|
package com.koalatea.thehollidayinn.softwareengineeringdaily.data.repositories;
import android.arch.lifecycle.LiveData;
import android.os.AsyncTask;
import com.koalatea.thehollidayinn.softwareengineeringdaily.data.AppDatabase;
import com.koalatea.thehollidayinn.softwareengineeringdaily.data.models.Download;
import java.util.List;
public class DownloadRepository {
private DownloadDao downloadDao;
public DownloadRepository() {
AppDatabase db = AppDatabase.getDatabase();
downloadDao = db.downloadDao();
}
public void insert(Download download) {
new insertAsyncTask(downloadDao).execute(download);
}
public void remove(String podcastId) {
new removeAsyncTask(downloadDao).execute(podcastId);
}
public List<Download> getDownloads() {
return downloadDao.getAll();
}
private static class insertAsyncTask extends AsyncTask<Download, Void, Void> {
private DownloadDao downloadAsyncDao;
insertAsyncTask(DownloadDao dao) {
downloadAsyncDao = dao;
}
@Override
protected Void doInBackground(final Download... params) {
downloadAsyncDao.insertOne(params[0]);
return null;
}
}
private static class removeAsyncTask extends AsyncTask<String, Void, Void> {
private DownloadDao downloadAsyncDao;
removeAsyncTask(DownloadDao dao) {
downloadAsyncDao = dao;
}
@Override
protected Void doInBackground(final String... params) {
Download download = downloadAsyncDao.loadById(params[0]);
if (download == null) return null;
downloadAsyncDao.delete(download);
return null;
}
}
}
|
/*
* Copyright 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package androidx.emoji2.benchmark.text;
import static org.mockito.Mockito.mock;
import android.graphics.Typeface;
import androidx.annotation.NonNull;
import androidx.emoji2.text.EmojiCompat;
import androidx.emoji2.text.MetadataRepo;
public class NoFontTestEmojiConfig extends EmojiCompat.Config {
static EmojiCompat.Config emptyConfig() {
return new NoFontTestEmojiConfig(new EmptyEmojiDataLoader());
}
static EmojiCompat.Config neverLoadsConfig() {
return new NoFontTestEmojiConfig(new NeverCompletesMetadataRepoLoader());
}
static EmojiCompat.Config fromLoader(EmojiCompat.MetadataRepoLoader loader) {
return new NoFontTestEmojiConfig(loader);
}
private NoFontTestEmojiConfig(EmojiCompat.MetadataRepoLoader loader) {
super(loader);
}
private static class EmptyEmojiDataLoader implements EmojiCompat.MetadataRepoLoader {
@Override
public void load(@NonNull EmojiCompat.MetadataRepoLoaderCallback loaderCallback) {
loaderCallback.onLoaded(MetadataRepo.create(mock(Typeface.class)));
}
}
private static class NeverCompletesMetadataRepoLoader
implements EmojiCompat.MetadataRepoLoader {
@Override
public void load(@NonNull final EmojiCompat.MetadataRepoLoaderCallback loaderCallback) {
// do nothing, this will be called on the test thread and is a no-op
}
}
}
|
package org.SirTobiSwobi.c3.classifiertrainer.db;
import static org.junit.Assert.*;
import org.junit.Test;
public class CategorizationManagerTest {
@Test
public void test() {
CategorizationManager cznMan = new CategorizationManager();
Categorization czn = new Categorization(0,0,0,.8);
cznMan.setCategorization(czn);
czn = new Categorization(1,3,2,.2);
cznMan.setCategorization(czn);
Categorization result = cznMan.getCategorizationByAddress(0);
assertTrue("Categorization(0) has docId=0 ("+result.getDocumentId()+")and catId=0 ("+result.getCategoryId()+") and probability=0.8("+
result.getProbability()+")",result.getDocumentId()==0&&result.getCategoryId()==0&&result.getProbability()==0.8);
result = cznMan.getCategoryCategorizations(0)[0];
assertTrue("Category Categorization(0) has docId=0 ("+result.getDocumentId()+")and catId=0 ("+result.getCategoryId()+") and probability=0.8("+
result.getProbability()+")",result.getDocumentId()==0&&result.getCategoryId()==0&&result.getProbability()==0.8);
result = cznMan.getDocumentCategorizations(0)[0];
assertTrue("Document Categorization(0) has docId=0 ("+result.getDocumentId()+")and catId=0 ("+result.getCategoryId()+") and probability=0.8("+
result.getProbability()+")",result.getDocumentId()==0&&result.getCategoryId()==0&&result.getProbability()==0.8);
result = cznMan.getCategorizationByAddress(1);
assertTrue("Document Categorization(1) has docId=3 ("+result.getDocumentId()+")and catId=2 ("+result.getCategoryId()+") and probability=0.2("+
result.getProbability()+")",result.getDocumentId()==3&&result.getCategoryId()==2&&result.getProbability()==0.2);
result = cznMan.getCategoryCategorizations(2)[0];
assertTrue("Document Categorization(2) has docId=3 ("+result.getDocumentId()+")and catId=2 ("+result.getCategoryId()+") and probability=0.2("+
result.getProbability()+")",result.getDocumentId()==3&&result.getCategoryId()==2&&result.getProbability()==0.2);
cznMan.deleteCategorization(1);
assertTrue("CategorizationManager doesn't contain categorization 1",!cznMan.containsCategorization(1));
}
}
|
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.api.ads.dfp.jaxws.v201708;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlType;
/**
*
* The action used for submitting and overbooking {@link Order} objects for approval.
*
*
* <p>Java class for SubmitOrdersForApprovalAndOverbook complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="SubmitOrdersForApprovalAndOverbook">
* <complexContent>
* <extension base="{https://www.google.com/apis/ads/publisher/v201708}SubmitOrdersForApproval">
* <sequence>
* </sequence>
* </extension>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "SubmitOrdersForApprovalAndOverbook")
public class SubmitOrdersForApprovalAndOverbook
extends SubmitOrdersForApproval
{
}
|
/* ========================================================================
* PlantUML : a free UML diagram generator
* ========================================================================
*
* Project Info: http://plantuml.com
*
* This file is part of Smetana.
* Smetana is a partial translation of Graphviz/Dot sources from C to Java.
*
* (C) Copyright 2009-2017, Arnaud Roques
*
* This translation is distributed under the same Licence as the original C program:
*
*************************************************************************
* Copyright (c) 2011 AT&T Intellectual Property
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors: See CVS logs. Details at http://www.graphviz.org/
*************************************************************************
*
* THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC
* LICENSE ("AGREEMENT"). [Eclipse Public License - v 1.0]
*
* ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES
* RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
*
* You may obtain a copy of the License at
*
* http://www.eclipse.org/legal/epl-v10.html
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package h;
import java.util.Arrays;
import java.util.List;
//2 1emyokhi9lvf2dq2tz1mt5lq6
public interface GVJ_t extends GVJ_s {
public static List<String> DEFINITION = Arrays.asList(
"typedef struct GVJ_s GVJ_t");
}
// typedef struct GVJ_s GVJ_t;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.