text
stringlengths 7
1.01M
|
|---|
package com.github.zuihou.cloud.ribbon;
import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.util.StrUtil;
import com.alibaba.cloud.nacos.ribbon.NacosServer;
import com.github.zuihou.context.BaseContextHandler;
import com.google.common.base.Optional;
import com.netflix.loadbalancer.AvailabilityFilteringRule;
import com.netflix.loadbalancer.Server;
import lombok.extern.slf4j.Slf4j;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.github.zuihou.context.BaseContextConstants.GRAY_VERSION;
/**
* 负载均衡策略:可用于灰度发布 & 访问指定服务
* <p>
* 若请求头中的 grayversion 若为空, 则优先从服务元数据中 没有设置 grayversion 的服务中获取,若所有服务均设置了 grayversion, 则从设置了 grayversion 的服务列表轮训
* 若请求头中的 grayversion 若不为空, 则优先从服务元数据中 设置了 grayversion 的服务中获取,若所有服务均没有了 grayversion, 则从 所有的服务列表轮训
*/
@Slf4j
public class GrayRule extends AvailabilityFilteringRule {
@Override
public Server choose(Object key) {
List<Server> serverList = this.getPredicate().getEligibleServers(this.getLoadBalancer().getAllServers(), key);
if (CollUtil.isEmpty(serverList)) {
return null;
}
String serviceVersion = BaseContextHandler.get(GRAY_VERSION);
log.debug("======>GrayMetadataRule: serviceVersionTL{}", serviceVersion);
List<Server> noMetaServerList = new ArrayList<>();
List<Server> metaServerList = new ArrayList<>();
for (Server server : serverList) {
Map<String, String> metadata = ((NacosServer) server).getInstance().getMetadata();
// version策略
String metaVersion = metadata.get(GRAY_VERSION);
if (StrUtil.isNotEmpty(metaVersion)) {
metaServerList.add(server);
} else {
noMetaServerList.add(server);
}
}
if (StrUtil.isEmpty(serviceVersion)) {
if (noMetaServerList.isEmpty()) {
return originChoose(metaServerList, key, serviceVersion);
}
log.debug("====> 请求未指定服务版本,将无版本号的服务进行负载均衡");
return originChoose(noMetaServerList, key, serviceVersion);
}
Map<String, List<Server>> listMap = metaServerList.stream().collect(
Collectors.groupingBy(server -> ((NacosServer) server).getInstance().getMetadata().get(GRAY_VERSION)));
List<Server> servers = listMap.get(serviceVersion);
// 前端传递错误的 GRAY_VERSION
if (CollUtil.isEmpty(servers)) {
return originChoose((List<Server>) CollUtil.addAll(metaServerList, noMetaServerList), key, serviceVersion);
}
return originChoose(servers, key, serviceVersion);
}
private Server originChoose(List<Server> serverList, Object key, String serviceVersion) {
if (CollUtil.isEmpty(serverList)) {
log.error("====> 版本号:{}对应的服务列表为空,无法进行负载均衡", serviceVersion);
return null;
}
//默认的轮训规则
Optional<Server> server = getPredicate().chooseRoundRobinAfterFiltering(serverList, key);
if (server.isPresent()) {
return server.get();
} else {
return null;
}
}
}
|
package it.musichub.skill.rest.ex;
public class MusicHubSkillException extends Exception {
public MusicHubSkillException() {
super();
}
public MusicHubSkillException(String message) {
super(message);
}
public MusicHubSkillException(Throwable cause) {
super(cause);
}
public MusicHubSkillException(String message, Throwable cause) {
super(message, cause);
}
}
|
package com.fitchle.datafitch.mongo;
import com.fitchle.datafitch.mongo.models.MongoDatabase;
import com.mongodb.MongoClient;
import lombok.AllArgsConstructor;
@AllArgsConstructor
public final class DatafitchMongo {
private final String host;
private final int port;
private final String username;
private final String password;
public MongoClient connect() {
MongoDatasource datasource = new MongoDatasource(host, port, username, password);
return datasource.connect();
}
public MongoDatabase database(String name) {
return new MongoDatabase(name, this);
}
}
|
package cn.ibizlab.util.enums;
/**
* 属性重复值检查
*/
public enum DupCheck {
/**
* 不检查
*/
NONE,
/**
* 全部检查
*/
ALL,
/**
* 非空检查
*/
NOTNULL,
/**
* 指定范围检查
*/
RANGE,
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.shardingsphere.distsql.parser.statement.rql.show;
import lombok.Getter;
import org.apache.shardingsphere.sql.parser.sql.common.segment.generic.SchemaSegment;
/**
* Show encrypt rules statement.
*/
@Getter
public final class ShowEncryptRulesStatement extends ShowRulesStatement {
private final String tableName;
public ShowEncryptRulesStatement(final String tableName, final SchemaSegment schema) {
super(schema);
this.tableName = tableName;
}
}
|
package com.chqiuu.proxy.modules.api.query;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import lombok.Data;
import java.io.Serializable;
@Data
@ApiModel(value = "代理IP API分页查询对象")
public class ProxyIpCommonPageQuery implements Serializable {
private static final long serialVersionUID = 1L;
/**
* 排序参数
*/
@ApiModelProperty(value = "排序参数")
private String sortParam;
/**
* 排序方式:正序asc,倒序desc,默认为desc
*/
@ApiModelProperty(value = "排序方式:正序asc,倒序desc,默认为desc")
private String sortord;
/**
* 当前页
*/
@ApiModelProperty(value = "当前页")
private Integer current = 1;
/**
* 每页显示条数
*/
@ApiModelProperty(value = "每页显示条数")
private Integer size = 10;
/**
* 支持https `https` tinyint(4) DEFAULT 0 COMMENT 支持https
*/
@ApiModelProperty(value = "支持https")
private Integer https;
/**
* 支持http `http` tinyint(4) DEFAULT 0 COMMENT 支持http
*/
@ApiModelProperty(value = "支持http")
private Integer http;
/**
* 匿名性 `anonymity` tinyint(4) DEFAULT 0 COMMENT 匿名性
*/
@ApiModelProperty(value = "匿名性")
private Integer anonymity;
}
|
/* $Id: ModuleInterfaceForTesting1.java 17768 2010-01-11 21:22:14Z linus $
*****************************************************************************
* Copyright (c) 2009 Contributors - see below
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* linus
*****************************************************************************
*
* Some portions of this file was previously release using the BSD License:
*/
// Copyright (c) 2006 The Regents of the University of California. All
// Rights Reserved. Permission to use, copy, modify, and distribute this
// software and its documentation without fee, and without a written
// agreement is hereby granted, provided that the above copyright notice
// and this paragraph appear in all copies. This software program and
// documentation are copyrighted by The Regents of the University of
// California. The software program and documentation are supplied "AS
// IS", without any accompanying services from The Regents. The Regents
// does not warrant that the operation of the program will be
// uninterrupted or error-free. The end-user understands that the program
// was developed for research purposes and is advised not to rely
// exclusively on the program for any reason. IN NO EVENT SHALL THE
// UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
// SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
// ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
// THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE. THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE
// PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF
// CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT,
// UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
package org.argouml.moduleloader;
/**
* A module for testing.
*/
public class ModuleInterfaceForTesting1 implements ModuleInterface {
static final String TEST_MODULE_NAME = "test-module-153";
private static boolean readyToBeEnabled;
/**
* Constructor.
*/
public ModuleInterfaceForTesting1() {
GUITestModuleLoader2.interfaceCreatedForTesting(this);
}
public static boolean isReadyToBeEnabled() {
return readyToBeEnabled;
}
public static void setReadyToBeEnabled(boolean r) {
readyToBeEnabled = r;
}
public boolean disable() {
// TODO: Auto-generated method stub
return false;
}
public boolean enable() {
return readyToBeEnabled;
}
public String getInfo(int type) {
// TODO: Auto-generated method stub
return null;
}
public String getName() {
return ModuleInterfaceForTesting1.TEST_MODULE_NAME;
}
}
|
package at.medunigraz.imi.bst.trec.evaluator;
import at.medunigraz.imi.bst.config.TrecConfig;
import at.medunigraz.imi.bst.trec.model.Result;
import at.medunigraz.imi.bst.trec.model.ResultList;
import com.opencsv.CSVWriter;
import de.julielab.ir.ltr.Document;
import de.julielab.ir.ltr.DocumentList;
import de.julielab.ir.ltr.features.IRScoreFeatureKey;
import de.julielab.ir.model.QueryDescription;
import java.io.Closeable;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.function.Function;
public class TrecWriter implements Closeable {
private static final String VALID_RUN_NAME_REGEX = "[a-zA-Z0-9]{1,12}";
private static final int NUM_FIELDS = 6;
private static final Function<Result, String> defaultdocIdFunction = r -> String.valueOf(r.getId());
private static final Function<QueryDescription, String> defaultQueryIdFunction = q -> String.valueOf(q.getNumber());
private CSVWriter writer;
private String runName;
public TrecWriter(File output, String runName) {
if (!checkRunName(runName)) {
throw new RuntimeException("Invalid run name!");
}
if (!output.getParentFile().exists())
output.getParentFile().mkdirs();
this.runName = runName;
try {
writer = new CSVWriter(new FileWriter(output), '\t', CSVWriter.NO_QUOTE_CHARACTER, '\\', System.getProperty("line.separator"));
} catch (IOException e) {
e.printStackTrace();
}
}
private boolean checkRunName(String runName) {
//final Pattern valid = Pattern.compile(VALID_RUN_NAME_REGEX);
//return valid.matcher(runName).matches();
return true;
}
public <T extends QueryDescription> void write(List<ResultList<T>> resultListSet) {
write(resultListSet, null, null);
}
public <T extends QueryDescription> void write(List<ResultList<T>> resultListSet, Function<QueryDescription, String> queryIdFunction) {
write(resultListSet, null, queryIdFunction);
}
public <T extends QueryDescription> void write(List<ResultList<T>> resultListSet, Function<Result, String> docIdFunction, Function<QueryDescription, String> queryIdFunction) {
for (ResultList resultList : resultListSet) {
write(resultList, docIdFunction, queryIdFunction);
}
}
public <T extends QueryDescription> void writeDocuments(List<DocumentList<T>> documents, IRScoreFeatureKey scoreToWrite, Function<QueryDescription, String> queryIdFunction) {
writeDocuments(documents, scoreToWrite, null, queryIdFunction);
}
public <T extends QueryDescription> void writeDocuments(List<DocumentList<T>> documents, IRScoreFeatureKey scoreToWrite, Function<Result, String> docIdFunction, Function<QueryDescription, String> queryIdFunction) {
List<ResultList<T>> resultLists = new ArrayList<>();
for (DocumentList<T> documentList : documents) {
final ResultList<T> resultList = new ResultList<>(documentList.get(0).getQueryDescription());
for (Document<T> doc : documentList) {
final Result result = new Result(doc.getId(), doc.getIrScore(scoreToWrite));
resultList.add(result);
}
resultLists.add(resultList);
}
write(resultLists, docIdFunction, queryIdFunction);
}
public void write(ResultList<?> resultList) {
write(resultList, null, null);
}
public void write(ResultList<?> resultList, Function<Result, String> docIdFunction, Function<QueryDescription, String> queryIdFunction) {
if (queryIdFunction == null)
queryIdFunction = defaultQueryIdFunction;
if (docIdFunction == null)
docIdFunction = defaultdocIdFunction;
int rank = 1;
for (Result result : resultList.getResults()) {
Set<String> treatments = result.getUniqueTreatments();
String[] entries = new String[NUM_FIELDS + Math.min(TrecConfig.MAX_TREATMENTS, treatments.size())];
// 0, 1, and 5 are fixed fields, but we set then here because the array size is unknown beforehand.
entries[0] = queryIdFunction.apply(resultList.getTopic());
entries[1] = "Q0";
entries[2] = docIdFunction.apply(result);
entries[3] = String.valueOf(rank++);
entries[4] = String.format(Locale.ROOT, "%.6f", result.getScore());
entries[5] = runName; // XXX must be 1-12 alphanumeric characters
// Traditional runs should not set the extra stored fields and therefore will have an empty treatment list
int i = 0;
for (String treatment : treatments) {
entries[6 + i++] = String.format("\"%s\"", treatment);
if (i >= TrecConfig.MAX_TREATMENTS) {
break;
}
}
writer.writeNext(entries);
}
}
public void flush() {
try {
writer.flush();
} catch (IOException e) {
e.printStackTrace();
}
}
public void close() {
try {
writer.close();
} catch (IOException e) {
throw new IllegalStateException(e);
}
}
}
|
package com.mycloudwear.util;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.os.Build;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.support.annotation.RequiresApi;
import android.util.DisplayMetrics;
import android.util.LruCache;
import android.view.ViewGroup;
import android.widget.ImageView;
import java.lang.reflect.Field;
import java.util.LinkedList;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Semaphore;
/**
* @author kongkongdaren
* @version 1.0.1
* @since 15/5/2019
* Created by Android on 26/6/2018
* The original code was provided by kongkongdaren (https://github.com/kongkongdaren), but in our app
* we only use part of his code to achieve our function.
*/
public class ImageLoader {
private static ImageLoader mInStance;
//The core object of the picture cache
private LruCache<String,Bitmap> mLruCache;
//The thread pool of the image service.
private ExecutorService mThreadPool;
private static final int DEAFULT_THREAD_COUNT=1;
//set the scheduling strategy of queue is LIFO
private Type mType= Type.LIFO;
//The linked list is queue for tasks.
private LinkedList<Runnable> mTaskQueue;
private Thread mPoolThread;
private Handler mPoolThreadHandler;
//It is the handler in the thread of UI.
private Handler mUIHandler;
private Semaphore mSemaphorePoolThreadHandler=new Semaphore(0);
private Semaphore mSemaphoreThreadPool;
public enum Type{
FIFO,LIFO;
}
/**
* This constructor is used to declare a loader of images.
* @param threadCount The count of different threads.
* @param type The trpe of the thread.
*/
public ImageLoader(int threadCount, Type type) {
init(threadCount,type);
}
/**
* This function initializes the thread.
* @param threadCount The count of different threads.
* @param type The type of the thread.
*/
private void init(int threadCount, Type type) {
//The backend starts to polling threads.
mPoolThread=new Thread(){
@Override
public void run() {
Looper.prepare();
mPoolThreadHandler=new Handler(){
@Override
public void handleMessage(Message msg) {
//take a thread from the thread pool and execute it.
mThreadPool.execute(getTask());
try {
mSemaphoreThreadPool.acquire();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
};
//if this thread is finished, release a signal to the handler.
mSemaphorePoolThreadHandler.release();
Looper.loop();
}
};
mPoolThread.start();
//Get the maximum available memory for our service.
int maxMemory= (int) Runtime.getRuntime().maxMemory();
int cacheMemory = maxMemory / 8;
mLruCache=new LruCache<String,Bitmap>(cacheMemory){
@Override
protected int sizeOf(String key, Bitmap value) {
return value.getRowBytes()*value.getHeight();
}
};
mThreadPool= Executors.newFixedThreadPool(threadCount);
mTaskQueue=new LinkedList<>();
mType=type==null? Type.LIFO:type;
mSemaphoreThreadPool=new Semaphore(threadCount);
}
/**
* This function gets a task through the task queue.
* @return if this task is existed, retuen this specified task.
*/
private Runnable getTask() {
if (mType== Type.FIFO){
return mTaskQueue.removeFirst();
}else if (mType== Type.LIFO){
return mTaskQueue.removeLast();
}
return null;
}
/**
* This function instantiates an empty loader of the image.
* @return an image loader.
*/
public static ImageLoader getInStance(){
if (mInStance==null){
synchronized (ImageLoader.class){
if (mInStance==null){
mInStance=new ImageLoader(DEAFULT_THREAD_COUNT, Type.LIFO);
}
}
}
return mInStance;
}
/**
* This function instantiates a image loader with a specific thread.
* @param threadCount The count of different threads.
* @param type The type of the thread.
* @return an image loader.
*/
public static ImageLoader getInStance(int threadCount,Type type){
if (mInStance==null){
synchronized (ImageLoader.class){
if (mInStance==null){
mInStance=new ImageLoader(threadCount,type);
}
}
}
return mInStance;
}
/**
* This function sets the image of each imageView throw the path of the image.
* @param path This string is the path of a specific image.
* @param imageView This view is a imageView which will be used.
*/
public void loadImage(final String path, final ImageView imageView){
imageView.setTag(path);
if (mUIHandler==null){
mUIHandler=new Handler(){
@Override
public void handleMessage(Message msg) {
//Get the image and set its imageView.
ImgBeanHolder holder= (ImgBeanHolder) msg.obj;
Bitmap bitmap = holder.bitmap;
ImageView imageview= holder.imageView;
String path = holder.path;
if (imageview.getTag().toString().equals( path)){
imageview.setImageBitmap(bitmap);
}
}
};
}
//Get the bitmap through the path in the cache.
Bitmap bm=getBitmapFromLruCache(path);
if (bm!=null){
refreshBitmap(bm, path, imageView);
}else{
addTask(() -> {
//Get the size of the image which wants to be displayed.
ImageSize imageSize= getImageViewSize(imageView);
//Compress the image.
Bitmap bm1 =decodeSampledBitmapFromPath(imageSize.width,imageSize.height,path);
//Put the image into the cache.
addBitmapToLruCache(path, bm1);
//Refresh the display of the image.
refreshBitmap(bm1, path, imageView);
mSemaphoreThreadPool.release();
});
}
}
/**
* This function refreshes the bitmap on the imageView.
* @param bm The bitmap of the image.
* @param path The stored path of the image.
* @param imageView The imageView which contains this image.
*/
private void refreshBitmap(Bitmap bm, String path, ImageView imageView) {
Message message = Message.obtain();
ImgBeanHolder holder=new ImgBeanHolder();
holder.bitmap=bm;
holder.path=path;
holder.imageView=imageView;
message.obj=holder;
mUIHandler.sendMessage(message);
}
/**
* This function puts the image into the LruCache.
* @param path The bitmap of the image.
* @param bm The stored path of the image.
*/
private void addBitmapToLruCache(String path, Bitmap bm) {
if (getBitmapFromLruCache(path)==null){
if (bm!=null){
mLruCache.put(path,bm);
}
}
}
/**
* This function compresses the image according to its width and height when displaying.
* @param width The width of original image.
* @param height The height of original image.
* @param path The stored path of the image.
* @return The bitmap after compress.
*/
private Bitmap decodeSampledBitmapFromPath(int width, int height, String path) {
//Get the width and the height of the image and stored it into the cache.
BitmapFactory.Options options=new BitmapFactory.Options();
options.inJustDecodeBounds=true;
BitmapFactory.decodeFile(path,options);
//Use InSampleSize decodes the image again.
options.inSampleSize=caculateInSampleSize(options,width,height);
options.inJustDecodeBounds=false;
Bitmap bitmap= BitmapFactory.decodeFile(path,options);
return bitmap;
}
/**
* This function calculate the sample size according to its target and original width and height.
* @param options This param decodes and creates a new bitmap.
* @param width The width of original image.
* @param height The width of original image.
* @return a new sample size of the image.
*/
private int caculateInSampleSize(BitmapFactory.Options options, int width, int height) {
int outWidth = options.outWidth;
int outHeight = options.outHeight;
int inSampleSize=1;
if (outWidth>width||outHeight>height){
int widthRadio= Math.round(outWidth*1.0f/width);
int heightRadio= Math.round(outHeight*1.0f/height);
inSampleSize= Math.max(widthRadio,heightRadio);
}
return inSampleSize;
}
/**
* This function gets the suitable width and heught after the compress according the ImageView.
* @param imageView The imageView of the target image container.
* @return the suitable ImageSize.
*/
private ImageSize getImageViewSize(ImageView imageView) {
ImageSize imageSize=new ImageSize();
DisplayMetrics displayMetrics = imageView.getContext().getResources().getDisplayMetrics();
ViewGroup.LayoutParams lp = imageView.getLayoutParams();
//Get the actual with of the imageView.
int width = imageView.getWidth();
if (width <=0){
//Get the imageView and declare the width of the layout.
width=lp.width;
}
if (width<=0){
//Check the maximum width of the imageView.
width= getImageViewFieldValue(imageView,"mMaxWidth");
}
if (width<=0){
width=displayMetrics.widthPixels;
}
//Get the actual height of the imageView.
int height = imageView.getHeight();
if (height <=0){
//Get the imageView and declare the height of the layout.
height=lp.height;
}
if (height<=0){
//Check the maximum height of the imageView.
height=getImageViewFieldValue(imageView,"mMaxHeight");
}
if (height<=0){
height=displayMetrics.heightPixels;
}
imageSize.width=width;
imageSize.height=height;
return imageSize;
}
/**
* This function gets a specific attribute value of the imageView by reflection.
* @return The attribute value.
*/
private static int getImageViewFieldValue(Object object, String fieldName){
int value=0;
try {
Field field= ImageView.class.getDeclaredField(fieldName);
field.setAccessible(true);
int fieldValue = field.getInt(object);
if (fieldValue>0&&fieldValue< Integer.MAX_VALUE){
value=fieldValue;
}
} catch (NoSuchFieldException e) {
e.printStackTrace();
} catch (IllegalAccessException e) {
e.printStackTrace();
}
return value;
}
/**
* This function adds a new task thread into the thread pool.
* @param runnable A interface of the thread.
*/
private synchronized void addTask(Runnable runnable) {
mTaskQueue.add(runnable);
try {
if (mPoolThreadHandler==null){
mSemaphorePoolThreadHandler.acquire();
}
} catch (InterruptedException e) {
e.printStackTrace();
}
mPoolThreadHandler.sendEmptyMessage(0x110);
}
/**
* This function sets the imageView throw its path.
* @param key The path of the image.
* @return The bitmap of this image in the imageView.
*/
private Bitmap getBitmapFromLruCache(String key) {
return mLruCache.get(key);
}
/**
* This constructor declares the size of the image.
*/
private class ImageSize{
int width;
int height;
}
/**
* This constructor declares the holder of the image bean.
*/
private class ImgBeanHolder{
Bitmap bitmap;
ImageView imageView;
String path;
}
}
|
package com.freesky.hostapp.service;
import android.os.RemoteException;
import android.util.Log;
import com.freesky.appbridge.service.IDispatchService;
import com.freesky.appbridge.service.IMessageCallback;
import com.freesky.hostapp.BuildConfig;
/**
* Created by letgogo on 2018/11/20.
* 消息分发服务端Binder
*/
public class DispatchServiceStub extends IDispatchService.Stub {
private static final boolean DEBUG = BuildConfig.DEBUG;
private static final String TAG = DispatchServiceStub.class.getSimpleName();
public DispatchServiceStub() {
if (DEBUG) {
Log.d(TAG, "DispatchServiceStub inited");
}
}
@Override
public String send(String pkg, String msg) throws RemoteException {
if (DEBUG) {
Log.d(TAG, "send from pkg: " + pkg + ", msg: " + msg);
}
return MessageDispatcher.getInstance().handleClientMessage(new Message(pkg, msg));
}
@Override
public void registerMessageCallback(String pkg,
IMessageCallback messageCallback)
throws RemoteException {
if (DEBUG) {
Log.d(TAG, "registerMessageCallback from pkg: " + pkg + ", messageCallback: " + messageCallback);
}
MessageDispatcher.getInstance().registerMessageCallback(pkg, messageCallback);
}
}
|
package com.netsteadfast.greenstep.po.hbm;
import java.util.Date;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.Table;
import javax.persistence.UniqueConstraint;
import com.netsteadfast.greenstep.base.model.BaseEntity;
import com.netsteadfast.greenstep.base.model.EntityPK;
import com.netsteadfast.greenstep.base.model.EntityUK;
@Entity
@Table(
name="bb_pdca_owner",
uniqueConstraints = {
@UniqueConstraint( columnNames = {"PDCA_OID", "EMP_ID"} )
}
)
public class BbPdcaOwner extends BaseEntity<String> implements java.io.Serializable {
private static final long serialVersionUID = -7968899280717109235L;
private String oid;
private String pdcaOid;
private String empId;
private String cuserid;
private Date cdate;
private String uuserid;
private Date udate;
@Override
@Id
@EntityPK(name="oid")
@Column(name="OID")
public String getOid() {
return oid;
}
@Override
public void setOid(String oid) {
this.oid = oid;
}
@EntityUK(name="pdcaOid")
@Column(name="PDCA_OID")
public String getPdcaOid() {
return pdcaOid;
}
public void setPdcaOid(String pdcaOid) {
this.pdcaOid = pdcaOid;
}
@EntityUK(name="empId")
@Column(name="EMP_ID")
public String getEmpId() {
return empId;
}
public void setEmpId(String empId) {
this.empId = empId;
}
@Override
@Column(name="CUSERID")
public String getCuserid() {
return this.cuserid;
}
@Override
public void setCuserid(String cuserid) {
this.cuserid = cuserid;
}
@Override
@Column(name="CDATE")
public Date getCdate() {
return this.cdate;
}
@Override
public void setCdate(Date cdate) {
this.cdate = cdate;
}
@Override
@Column(name="UUSERID")
public String getUuserid() {
return this.uuserid;
}
@Override
public void setUuserid(String uuserid) {
this.uuserid = uuserid;
}
@Override
@Column(name="UDATE")
public Date getUdate() {
return this.udate;
}
@Override
public void setUdate(Date udate) {
this.udate = udate;
}
}
|
package ru.soknight.packetinventoryapi.menu.item.page.element.filler;
import lombok.Getter;
import org.bukkit.Material;
import org.bukkit.entity.Player;
import org.bukkit.inventory.ItemStack;
import org.bukkit.inventory.meta.ItemMeta;
import org.bukkit.inventory.meta.SkullMeta;
import org.jetbrains.annotations.NotNull;
import ru.soknight.packetinventoryapi.api.PacketInventoryAPI;
import ru.soknight.packetinventoryapi.item.update.content.ContentUpdateRequest;
import ru.soknight.packetinventoryapi.menu.item.DisplayableMenuItem;
import ru.soknight.packetinventoryapi.menu.item.WrappedItemStack;
import ru.soknight.packetinventoryapi.menu.item.page.element.renderer.SlotItemRenderer;
import ru.soknight.packetinventoryapi.menu.item.regular.RegularMenuItem;
import ru.soknight.packetinventoryapi.nms.NMSAssistant;
import ru.soknight.packetinventoryapi.nms.vanilla.VanillaItem;
import ru.soknight.packetinventoryapi.placeholder.container.list.ListContainer;
import ru.soknight.packetinventoryapi.placeholder.container.string.StringContainer;
import ru.soknight.packetinventoryapi.placeholder.element.ElementPlaceholderReplacer;
import ru.soknight.packetinventoryapi.placeholder.element.LiteElementPlaceholderReplacer;
import ru.soknight.packetinventoryapi.util.Validate;
import java.util.ArrayList;
import java.util.List;
@Getter
public abstract class AbstractPageContentFiller<I extends DisplayableMenuItem> implements PageContentFiller<I> {
private static final String EMPTY_COMPONENT_JSON = "{\"text\":\"\"}";
protected final SlotItemRenderer slotItemRenderer;
protected final boolean replaceWithEmptyItems;
protected final List<ElementPlaceholderReplacer> placeholderReplacers;
protected AbstractPageContentFiller(SlotItemRenderer slotItemRenderer, boolean replaceWithEmptyItems) {
this.slotItemRenderer = slotItemRenderer;
this.replaceWithEmptyItems = replaceWithEmptyItems;
this.placeholderReplacers = new ArrayList<>();
}
@Override
public void fillPageContent(Player viewer, I menuItem, int startIndex, ContentUpdateRequest<?, ?> contentUpdateRequest) {
if(slotItemRenderer == null)
return;
for(int pageIndex = 0; pageIndex < 90; pageIndex++) {
int totalIndex = startIndex + pageIndex;
RegularMenuItem<?, ?> item;
if(menuItem.isRegular())
item = menuItem.asRegularItem();
else if(menuItem.isStateable())
item = menuItem.asStateableItem().getItemFor(viewer, -1, pageIndex, totalIndex);
else
throw new IllegalArgumentException("'menuItem' has unexprected displayable menu item type");
if(item == null)
return;
int[] slots = item.getSlots();
if(slots == null || slots.length == 0)
return;
if(pageIndex >= slots.length)
break;
int slot = slots[pageIndex];
ItemStack itemStack = slotItemRenderer.renderItem(viewer, item, slot, pageIndex, totalIndex);
if(itemStack == null || (!replaceWithEmptyItems && itemStack.getType() == Material.AIR))
continue;
replacePlaceholders(viewer, itemStack, slot, pageIndex, totalIndex);
contentUpdateRequest.set(itemStack, slot, true);
}
}
protected String replacePlaceholders(Player viewer, String original, int slot, int pageIndex, int totalIndex) {
if(placeholderReplacers.isEmpty())
return original;
if(original == null || original.isEmpty())
return original;
StringContainer wrapper = StringContainer.wrap(original, slot);
for(ElementPlaceholderReplacer replacer : placeholderReplacers)
replacer.replace(viewer, wrapper, slot, pageIndex, totalIndex);
return wrapper.getString();
}
protected List<String> replacePlaceholders(Player viewer, List<String> original, int slot, int pageIndex, int totalIndex) {
if(placeholderReplacers.isEmpty())
return original;
if(original == null || original.isEmpty())
return original;
ListContainer wrapper = ListContainer.wrap(original, slot);
for(ElementPlaceholderReplacer replacer : placeholderReplacers)
replacer.replace(viewer, wrapper, slot, pageIndex, totalIndex);
return wrapper.getList();
}
protected void replacePlaceholders(Player viewer, ItemStack item, int slot, int pageIndex, int totalIndex) {
if(item == null || !item.hasItemMeta())
return;
ItemMeta itemMeta = item.getItemMeta();
if(itemMeta.hasDisplayName()) {
String displayName = itemMeta.getDisplayName();
itemMeta.setDisplayName(replacePlaceholders(viewer, displayName, slot, pageIndex, totalIndex));
}
if(itemMeta.hasDisplayName()) {
String displayName = itemMeta.getDisplayName();
String value = replacePlaceholders(viewer, displayName, slot, pageIndex, totalIndex);
if(value.isEmpty()) {
NMSAssistant.getItemStackPatcher().setDisplayName(itemMeta, EMPTY_COMPONENT_JSON);
} else {
itemMeta.setDisplayName(value);
}
}
if(itemMeta.hasLore()) {
List<String> lore = itemMeta.getLore();
itemMeta.setLore(replacePlaceholders(viewer, lore, slot, pageIndex, totalIndex));
}
if(item instanceof WrappedItemStack) {
WrappedItemStack wrapper = (WrappedItemStack) item;
VanillaItem<?, ?> vanillaItem = wrapper.getVanillaItem();
// empty name
String name = vanillaItem.getName();
if(name != null && name.isEmpty()) {
NMSAssistant.getItemStackPatcher().setDisplayName(itemMeta, EMPTY_COMPONENT_JSON);
}
// player head
String playerHead = vanillaItem.getPlayerHead();
if(playerHead != null && !playerHead.isEmpty() && itemMeta instanceof SkullMeta) {
String playerName = replacePlaceholders(viewer, playerHead, slot, pageIndex, totalIndex);
PacketInventoryAPI.getInstance()
.skinsProvidingBus()
.findPlayerSkin(playerName)
.ifPresent(gameProfile -> vanillaItem.assignHeadTexture((SkullMeta) itemMeta, gameProfile));
}
}
item.setItemMeta(itemMeta);
}
private void setEmptyDisplayName(@NotNull ItemMeta itemMeta) {
}
@Override
public PageContentFiller<I> appendReplacerFirst(@NotNull LiteElementPlaceholderReplacer replacer) {
return appendReplacerFirst((ElementPlaceholderReplacer) replacer);
}
@Override
public PageContentFiller<I> appendReplacerFirst(@NotNull ElementPlaceholderReplacer replacer) {
Validate.notNull(replacer, "replacer");
this.placeholderReplacers.add(0, replacer);
return this;
}
@Override
public PageContentFiller<I> appendReplacer(@NotNull LiteElementPlaceholderReplacer replacer) {
return appendReplacer((ElementPlaceholderReplacer) replacer);
}
@Override
public PageContentFiller<I> appendReplacer(@NotNull ElementPlaceholderReplacer replacer) {
Validate.notNull(replacer, "replacer");
this.placeholderReplacers.add(replacer);
return this;
}
@Override
public PageContentFiller<I> removeReplacer(@NotNull ElementPlaceholderReplacer replacer) {
Validate.notNull(replacer, "replacer");
this.placeholderReplacers.remove(replacer);
return this;
}
}
|
/*
* Copyright 2015 Benoit LETONDOR
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.benoitletondor.easybudgetapp.model;
import androidx.annotation.Nullable;
/**
* An enum that reference different kind of deletion for a recurring expense
*
* @author Benoit LETONDOR
*/
public enum RecurringExpenseDeleteType
{
/**
* Delete all from a date
*/
FROM(0),
/**
* Delete all before a date
*/
TO(1),
/**
* Delete all
*/
ALL(2),
/**
* Delete this expense occurrence only
*/
ONE(3);
// ------------------------------------->
/**
* Integer value (for serialization)
*/
private final int value;
/**
* Constructor private
*
* @param value
*/
RecurringExpenseDeleteType(int value)
{
this.value = value;
}
// ------------------------------------->
/**
* Get the value for serialization
*
* @return
*/
public int getValue()
{
return value;
}
// ------------------------------------->
/**
* Retrieve the enum for the given value
*
* @param value
* @return
*/
@Nullable
public static RecurringExpenseDeleteType fromValue(int value)
{
for (RecurringExpenseDeleteType type : values())
{
if (value == type.getValue())
{
return type;
}
}
return null;
}
}
|
/*
* Copyright 2002-2005 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory.config;
import java.lang.reflect.Field;
import org.springframework.beans.factory.BeanNameAware;
import org.springframework.beans.factory.FactoryBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.util.ClassUtils;
/**
* FactoryBean which retrieves a static or non-static field value.
* Typically used for retrieving public static final constants.
*
* <p>Usage example:
*
* <pre>
* // standard definition for exposing a static field, specifying the "staticField" property
* <bean id="myField" class="org.springframework.beans.factory.config.FieldRetrievingFactoryBean">
* <property name="staticField"><value>java.sql.Connection.TRANSACTION_SERIALIZABLE</value></property>
* </bean>
*
* // convenience version that specifies a static field pattern as bean name
* <bean id="java.sql.Connection.TRANSACTION_SERIALIZABLE" class="org.springframework.beans.factory.config.FieldRetrievingFactoryBean"/></pre>
* </pre>
*
* @author Juergen Hoeller
* @since 1.1
* @see #setStaticField
*/
public class FieldRetrievingFactoryBean implements FactoryBean, BeanNameAware, InitializingBean {
private Class targetClass;
private Object targetObject;
private String targetField;
private String staticField;
private String beanName;
// the field we will retrieve
private Field fieldObject;
/**
* Set the target class on which the field is defined.
* Only necessary when the target field is static; else,
* a target object needs to be specified anyway.
* @see #setTargetObject
* @see #setTargetField
*/
public void setTargetClass(Class targetClass) {
this.targetClass = targetClass;
}
/**
* Return the target class on which the field is defined.
*/
public Class getTargetClass() {
return targetClass;
}
/**
* Set the target object on which the field is defined.
* Only necessary when the target field is not static;
* else, a target class is sufficient.
* @see #setTargetClass
* @see #setTargetField
*/
public void setTargetObject(Object targetObject) {
this.targetObject = targetObject;
}
/**
* Return the target object on which the field is defined.
*/
public Object getTargetObject() {
return targetObject;
}
/**
* Set the name of the field to be retrieved.
* Refers to either a static field or a non-static field,
* depending on a target object being set.
* @see #setTargetClass
* @see #setTargetObject
*/
public void setTargetField(String targetField) {
this.targetField = (targetField != null ? targetField.trim() : null);
}
/**
* Return the name of the field to be retrieved.
*/
public String getTargetField() {
return targetField;
}
/**
* Set a fully qualified static field name to retrieve,
* e.g. "example.MyExampleClass.MY_EXAMPLE_FIELD".
* Convenient alternative to specifying targetClass and targetField.
* @see #setTargetClass
* @see #setTargetField
*/
public void setStaticField(String staticField) {
this.staticField = (staticField != null ? staticField.trim() : null);
}
/**
* The bean name of this FieldRetrievingFactoryBean will be interpreted
* as "staticField" pattern, if neither "targetClass" nor "targetObject"
* nor "targetField" have been specified.
* This allows for concise bean definitions with just an id/name.
*/
public void setBeanName(String beanName) {
this.beanName = beanName;
}
public void afterPropertiesSet() throws ClassNotFoundException, NoSuchFieldException {
if (this.targetClass != null && this.targetObject != null) {
throw new IllegalArgumentException("Specify either targetClass or targetObject, not both");
}
if (this.targetClass == null && this.targetObject == null) {
if (this.targetField != null) {
throw new IllegalArgumentException(
"Specify targetClass or targetObject in combination with targetField");
}
// If no other property specified, consider bean name as static field expression.
if (this.staticField == null) {
this.staticField = this.beanName;
}
// try to parse static field into class and field
int lastDotIndex = this.staticField.lastIndexOf('.');
if (lastDotIndex == -1 || lastDotIndex == this.staticField.length()) {
throw new IllegalArgumentException(
"staticField must be a fully qualified class plus method name: " +
"e.g. 'example.MyExampleClass.MY_EXAMPLE_FIELD'");
}
String className = this.staticField.substring(0, lastDotIndex);
String fieldName = this.staticField.substring(lastDotIndex + 1);
this.targetClass = ClassUtils.forName(className);
this.targetField = fieldName;
}
else if (this.targetField == null) {
// either targetClass or targetObject specified
throw new IllegalArgumentException("targetField is required");
}
// try to get the exact method first
Class targetClass = (this.targetObject != null) ? this.targetObject.getClass() : this.targetClass;
this.fieldObject = targetClass.getField(this.targetField);
}
public Object getObject() throws IllegalAccessException {
if (this.targetObject != null) {
// instance field
return this.fieldObject.get(this.targetObject);
}
else{
// class field
return this.fieldObject.get(null);
}
}
public Class getObjectType() {
return this.fieldObject.getType();
}
public boolean isSingleton() {
return true;
}
}
|
/**
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
*/
package com.oracle.bmc.core.requests;
import com.oracle.bmc.core.model.*;
/**
* <b>Example: </b>Click <a href="https://docs.cloud.oracle.com/en-us/iaas/tools/java-sdk-examples/latest/core/DeleteInstanceConsoleConnectionExample.java.html" target="_blank" rel="noopener noreferrer">here</a> to see how to use DeleteInstanceConsoleConnectionRequest.
*/
@javax.annotation.Generated(value = "OracleSDKGenerator", comments = "API Version: 20160918")
@lombok.Builder(
builderClassName = "Builder",
buildMethodName = "buildWithoutInvocationCallback",
toBuilder = true
)
@lombok.ToString(callSuper = true)
@lombok.EqualsAndHashCode(callSuper = true)
@lombok.Getter
public class DeleteInstanceConsoleConnectionRequest
extends com.oracle.bmc.requests.BmcRequest<java.lang.Void> {
/**
* The OCID of the instance console connection.
*/
private String instanceConsoleConnectionId;
/**
* For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match`
* parameter to the value of the etag from a previous GET or POST response for that resource. The resource
* will be updated or deleted only if the etag you provide matches the resource's current etag value.
*
*/
private String ifMatch;
public static class Builder
implements com.oracle.bmc.requests.BmcRequest.Builder<
DeleteInstanceConsoleConnectionRequest, java.lang.Void> {
private com.oracle.bmc.util.internal.Consumer<javax.ws.rs.client.Invocation.Builder>
invocationCallback = null;
private com.oracle.bmc.retrier.RetryConfiguration retryConfiguration = null;
/**
* Set the invocation callback for the request to be built.
* @param invocationCallback the invocation callback to be set for the request
* @return this builder instance
*/
public Builder invocationCallback(
com.oracle.bmc.util.internal.Consumer<javax.ws.rs.client.Invocation.Builder>
invocationCallback) {
this.invocationCallback = invocationCallback;
return this;
}
/**
* Set the retry configuration for the request to be built.
* @param retryConfiguration the retry configuration to be used for the request
* @return this builder instance
*/
public Builder retryConfiguration(
com.oracle.bmc.retrier.RetryConfiguration retryConfiguration) {
this.retryConfiguration = retryConfiguration;
return this;
}
/**
* Copy method to populate the builder with values from the given instance.
* @return this builder instance
*/
public Builder copy(DeleteInstanceConsoleConnectionRequest o) {
instanceConsoleConnectionId(o.getInstanceConsoleConnectionId());
ifMatch(o.getIfMatch());
invocationCallback(o.getInvocationCallback());
retryConfiguration(o.getRetryConfiguration());
return this;
}
/**
* Build the instance of DeleteInstanceConsoleConnectionRequest as configured by this builder
*
* Note that this method takes calls to {@link Builder#invocationCallback(com.oracle.bmc.util.internal.Consumer)} into account,
* while the method {@link Builder#buildWithoutInvocationCallback} does not.
*
* This is the preferred method to build an instance.
*
* @return instance of DeleteInstanceConsoleConnectionRequest
*/
public DeleteInstanceConsoleConnectionRequest build() {
DeleteInstanceConsoleConnectionRequest request = buildWithoutInvocationCallback();
request.setInvocationCallback(invocationCallback);
request.setRetryConfiguration(retryConfiguration);
return request;
}
}
}
|
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.camera.tinyplanet;
import android.graphics.Bitmap;
/**
* TinyPlanet native interface.
*/
public class TinyPlanetNative
{
static
{
System.loadLibrary("jni_tinyplanet");
}
/**
* Create a tiny planet.
*
* @param in the 360 degree stereographically mapped panoramic input image.
* @param width the width of the input image.
* @param height the height of the input image.
* @param out the resulting tiny planet.
* @param outputSize the width and height of the square output image.
* @param scale the scale factor (used for fast previews).
* @param angleRadians the angle of the tiny planet in radians.
*/
public static native void process(Bitmap in, int width, int height, Bitmap out, int outputSize,
float scale, float angleRadians);
}
|
/*
* Copyright 2020 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.kogito.addon.cloudevents.quarkus;
import java.util.concurrent.CompletionStage;
import javax.enterprise.context.ApplicationScoped;
import javax.enterprise.inject.Produces;
import javax.inject.Named;
import org.eclipse.microprofile.reactive.messaging.Incoming;
import org.eclipse.microprofile.reactive.messaging.Message;
import org.kie.kogito.event.KogitoEventStreams;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.quarkus.runtime.Startup;
import io.smallrye.mutiny.Multi;
import io.smallrye.mutiny.operators.multi.processors.BroadcastProcessor;
@Startup
@ApplicationScoped
public class QuarkusCloudEventPublisher {
private static final Logger LOGGER = LoggerFactory.getLogger(QuarkusCloudEventPublisher.class);
protected BroadcastProcessor<String> processor = BroadcastProcessor.create();
/**
* Broadcasts the received/produced messages to subscribers
*
* @see <a href="https://smallrye.io/smallrye-mutiny/guides/hot-streams">How to create a hot stream?</a>
* @return A {@link Multi} message to subscribers
*/
@Produces
@ApplicationScoped
@Named(KogitoEventStreams.PUBLISHER)
public Multi<String> producerFactory() {
return processor;
}
/**
* Listens to a message published in the {@link KogitoEventStreams#INCOMING} channel
*
* @param message the given message in JSON format
* @return a {@link CompletionStage} after ack-ing the message
*/
@Incoming(KogitoEventStreams.INCOMING)
public CompletionStage<Void> onEvent(Message<String> message) {
LOGGER.debug("Received message from channel {}: {}", KogitoEventStreams.INCOMING, message);
produce(message.getPayload());
return message
.ack()
.exceptionally(e -> {
LOGGER.error("Failed to ack message", e);
return null;
});
}
/**
* Produces a message in the internal application bus
*
* @param message the given CE message in JSON format
*/
public void produce(final String message) {
LOGGER.debug("Producing message to internal bus: {}", message);
processor.onNext(message);
}
}
|
package de.tu_berlin.mpds.covid_notifier.model;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonPropertyOrder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
import java.time.LocalDateTime;
import java.util.UUID;
@EqualsAndHashCode(callSuper = true)
@ToString(callSuper = true)
@JsonPropertyOrder({"uuid", "personId", "eventType"})
@JsonIgnoreProperties("occuredOn")
@Getter
public class InfectionReported extends DomainEvent {
private final Long personId;
@JsonCreator
public InfectionReported(@JsonProperty("eventType") String eventType,
@JsonProperty("uuid") UUID uuid,
@JsonProperty("sequenceNumber") Long sequenceNumber,
@JsonProperty("personId") Long personId,
@JsonProperty("occurredOn") LocalDateTime occurredOn,
@JsonProperty("city") String city) {
super(eventType, uuid, sequenceNumber, occurredOn,city);
this.personId=personId;
}
@Override
public String eventType() {
return this.getClass().getSimpleName();
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler;
import java.io.StringWriter;
import java.lang.invoke.MethodHandles;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BooleanSupplier;
import java.util.function.Function;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.util.Utils;
import org.apache.solr.util.LogLevel;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.handler.ReplicationHandler.CMD_FETCH_INDEX;
import static org.apache.solr.handler.ReplicationHandler.CMD_GET_FILE_LIST;
import static org.apache.solr.handler.TestReplicationHandler.createAndStartJetty;
import static org.apache.solr.handler.TestReplicationHandler.createNewSolrClient;
import static org.apache.solr.handler.TestReplicationHandler.invokeReplicationCommand;
@LogLevel("org.apache.solr.handler.IndexFetcher=DEBUG")
@SolrTestCaseJ4.SuppressSSL
public class TestReplicationHandlerDiskOverFlow extends SolrTestCaseJ4 {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
JettySolrRunner masterJetty, slaveJetty;
SolrClient masterClient, slaveClient;
TestReplicationHandler.SolrInstance master = null, slave = null;
static String context = "/solr";
@Before
public void setUp() throws Exception {
super.setUp();
System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory");
String factory = random().nextInt(100) < 75 ? "solr.NRTCachingDirectoryFactory" : "solr.StandardDirectoryFactory"; // test the default most of the time
System.setProperty("solr.directoryFactory", factory);
master = new TestReplicationHandler.SolrInstance(createTempDir("solr-instance").toFile(), "master", null);
master.setUp();
masterJetty = createAndStartJetty(master);
masterClient = createNewSolrClient(masterJetty.getLocalPort());
slave = new TestReplicationHandler.SolrInstance(createTempDir("solr-instance").toFile(), "slave", masterJetty.getLocalPort());
slave.setUp();
slaveJetty = createAndStartJetty(slave);
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
System.setProperty("solr.indexfetcher.sotimeout2", "45000");
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
masterJetty.stop();
slaveJetty.stop();
masterJetty = slaveJetty = null;
master = slave = null;
masterClient.close();
slaveClient.close();
masterClient = slaveClient = null;
System.clearProperty("solr.indexfetcher.sotimeout");
}
@Test
public void testDiskOverFlow() throws Exception {
invokeReplicationCommand(slaveJetty.getLocalPort(), "disablepoll");
//index docs
System.out.println("MASTER");
int docsInMaster = 1000;
long szMaster = indexDocs(masterClient, docsInMaster, 0);
System.out.println("SLAVE");
long szSlave = indexDocs(slaveClient, 1200, 1000);
Function<String, Long> originalDiskSpaceprovider = IndexFetcher.usableDiskSpaceProvider;
IndexFetcher.usableDiskSpaceProvider = new Function<String, Long>() {
@Override
public Long apply(String s) {
return szMaster;
}
};
QueryResponse response;
CountDownLatch latch = new CountDownLatch(1);
AtomicBoolean searchDisabledFound = new AtomicBoolean(false);
try {
IndexFetcher.testWait = new BooleanSupplier() {
@Override
public boolean getAsBoolean() {
try {
latch.await(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
}
return true;
}
};
new Thread(() -> {
for (int i = 0; i < 20; i++) {
try {
QueryResponse rsp = slaveClient.query(new SolrQuery()
.setQuery("*:*")
.setRows(0));
Thread.sleep(100);
} catch (Exception e) {
if (e.getMessage().contains("Search is temporarily disabled")) {
searchDisabledFound.set(true);
}
latch.countDown();
break;
}
}
}).start();
response = slaveClient.query(new SolrQuery()
.add("qt", "/replication")
.add("command", CMD_FETCH_INDEX)
.add("wait", "true")
);
} finally {
IndexFetcher.usableDiskSpaceProvider = originalDiskSpaceprovider;
}
assertTrue(searchDisabledFound.get());
assertEquals("OK", response._getStr("status", null));
// System.out.println("MASTER INDEX: " + szMaster);
// System.out.println("SLAVE INDEX: " + szSlave);
response = slaveClient.query(new SolrQuery().setQuery("*:*").setRows(0));
assertEquals(docsInMaster, response.getResults().getNumFound());
response = slaveClient.query(new SolrQuery()
.add("qt", "/replication")
.add("command", ReplicationHandler.CMD_DETAILS)
);
System.out.println("DETAILS" + Utils.writeJson(response, new StringWriter(), true).toString());
assertEquals("true", response._getStr("details/slave/clearedLocalIndexFirst", null));
}
private long indexDocs(SolrClient client, int totalDocs, int start) throws Exception {
for (int i = 0; i < totalDocs; i++)
TestReplicationHandler.index(client, "id", i + start, "name", TestUtil.randomSimpleString(random(), 1000, 5000));
client.commit(true, true);
QueryResponse response = client.query(new SolrQuery()
.add("qt", "/replication")
.add("command", "filelist")
.add("generation", "-1"));
long totalSize = 0;
for (Map map : (List<Map>) response.getResponse().get(CMD_GET_FILE_LIST)) {
Long sz = (Long) map.get(ReplicationHandler.SIZE);
totalSize += sz;
}
return totalSize;
}
}
|
package org.broadinstitute.hellbender.tools.spark.sv;
import com.google.common.annotations.VisibleForTesting;
import htsjdk.samtools.CigarElement;
import htsjdk.samtools.CigarOperator;
import org.broadinstitute.hellbender.utils.read.GATKRead;
import java.util.*;
import java.util.function.Function;
/**
* Figures out what kind of BreakpointEvidence, if any, a read represents.
*/
public class ReadClassifier implements Function<GATKRead, Iterator<BreakpointEvidence>> {
@VisibleForTesting static final int ALLOWED_SHORT_FRAGMENT_OVERHANG = 10;
@VisibleForTesting static final int MIN_SOFT_CLIP_LEN = 30; // minimum length of an interesting soft clip
@VisibleForTesting static final int MIN_INDEL_LEN = 40; // minimum length of an interesting indel
private static final byte MIN_QUALITY = 15; // minimum acceptable quality in a soft-clip window
private static final int MAX_LOW_QUALITY_SCORES = 3; // maximum # of low quality base calls in soft-clip window
private static final float MAX_ZISH_SCORE = 6.f; // maximum fragment-length "z" score for a normal fragment
private static final float MIN_CRAZY_ZISH_SCORE = 100.f; // "z" score that's probably associated with a mapping error
private final ReadMetadata readMetadata;
public ReadClassifier( final ReadMetadata readMetadata ) {
this.readMetadata = readMetadata;
}
@Override
public Iterator<BreakpointEvidence> apply( final GATKRead read ) {
if ( read.isUnmapped() ) return Collections.emptyIterator();
final List<BreakpointEvidence> evidenceList = new ArrayList<>();
checkForSplitRead(read, evidenceList);
checkDiscordantPair(read, evidenceList);
return evidenceList.iterator();
}
private void checkForSplitRead( final GATKRead read,
final List<BreakpointEvidence> evidenceList ) {
final List<CigarElement> cigarElements = read.getCigar().getCigarElements();
if ( hasInitialSoftClip(cigarElements, read) ) {
evidenceList.add(new BreakpointEvidence.SplitRead(read, readMetadata, true));
}
if ( hasFinalSoftClip(cigarElements, read) ) {
evidenceList.add(new BreakpointEvidence.SplitRead(read, readMetadata, false));
}
checkBigIndel(cigarElements, read, evidenceList);
}
private static boolean hasInitialSoftClip( final List<CigarElement> cigarElements, final GATKRead read ) {
final ListIterator<CigarElement> itr = cigarElements.listIterator();
if ( !itr.hasNext() ) return false;
CigarElement firstEle = itr.next();
if ( firstEle.getOperator() == CigarOperator.HARD_CLIP && itr.hasNext() ) {
firstEle = itr.next();
}
final int clipStart = firstEle.getLength() - MIN_SOFT_CLIP_LEN;
return firstEle.getOperator() == CigarOperator.SOFT_CLIP &&
clipStart >= 0 &&
isHighQualityRegion(read.getBaseQualities(), clipStart);
}
private static boolean hasFinalSoftClip( final List<CigarElement> cigarElements, final GATKRead read ) {
final ListIterator<CigarElement> itr = cigarElements.listIterator(cigarElements.size());
if ( !itr.hasPrevious() ) return false;
CigarElement lastEle = itr.previous();
if ( lastEle.getOperator() == CigarOperator.HARD_CLIP && itr.hasPrevious() ) {
lastEle = itr.previous();
}
return lastEle.getOperator() == CigarOperator.SOFT_CLIP &&
lastEle.getLength() >= MIN_SOFT_CLIP_LEN &&
isHighQualityRegion(read.getBaseQualities(), read.getLength() - lastEle.getLength());
}
private static boolean isHighQualityRegion( final byte[] quals, int idx ) {
int lowQuals = 0;
for ( final int end = idx+MIN_SOFT_CLIP_LEN; idx != end; ++idx ) {
if ( quals[idx] < MIN_QUALITY ) {
lowQuals += 1;
if ( lowQuals > MAX_LOW_QUALITY_SCORES ) return false;
}
}
return true;
}
private void checkBigIndel( final List<CigarElement> cigarElements,
final GATKRead read,
final List<BreakpointEvidence> evidenceList ) {
int locus = read.getStart();
for ( final CigarElement ele : cigarElements ) {
final CigarOperator op = ele.getOperator();
if ( ele.getLength() >= MIN_INDEL_LEN ) {
if ( op == CigarOperator.INSERTION ) {
evidenceList.add(new BreakpointEvidence.LargeIndel(read, readMetadata, locus));
} else if ( op == CigarOperator.DELETION ) {
evidenceList.add(new BreakpointEvidence.LargeIndel(read, readMetadata, locus+ele.getLength()/2));
}
}
if ( op.consumesReferenceBases() ) {
locus += ele.getLength();
}
}
}
private void checkDiscordantPair( final GATKRead read, final List<BreakpointEvidence> evidenceList ) {
if ( read.mateIsUnmapped() ) {
evidenceList.add(new BreakpointEvidence.MateUnmapped(read, readMetadata));
} else if ( isInterContig(read.getContig(),read.getMateContig()) ) {
evidenceList.add(new BreakpointEvidence.InterContigPair(read, readMetadata));
} else if ( read.isReverseStrand() == read.mateIsReverseStrand() ) {
evidenceList.add(new BreakpointEvidence.SameStrandPair(read, readMetadata));
} else if ( read.isReverseStrand() ?
read.getStart() + ALLOWED_SHORT_FRAGMENT_OVERHANG < read.getMateStart() :
read.getStart() - ALLOWED_SHORT_FRAGMENT_OVERHANG > read.getMateStart() ) {
evidenceList.add(new BreakpointEvidence.OutiesPair(read, readMetadata));
} else {
final float zIshScore =
readMetadata.getStatistics(read.getReadGroup()).getZIshScore(Math.abs(read.getFragmentLength()));
if ( zIshScore > MAX_ZISH_SCORE && zIshScore < MIN_CRAZY_ZISH_SCORE ) {
evidenceList.add(new BreakpointEvidence.WeirdTemplateSize(read, readMetadata));
}
// TODO: see if there's anything we can do about anomalously short fragment sizes
// (With current fragment sizes and read lengths there aren't enough bases to have a >=50bp insertion
// between the mates.)
}
}
private boolean isInterContig( final String contigName1, final String contigName2 ) {
final int contigID1 = readMetadata.getContigID(contigName1);
final int contigID2 = readMetadata.getContigID(contigName2);
return !(contigID1 == contigID2 ||
readMetadata.ignoreCrossContigID(contigID1) ||
readMetadata.ignoreCrossContigID(contigID2));
}
}
|
// Copyright (C) 2009 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.httpd.rpc.project;
import com.google.gerrit.httpd.rpc.RpcServletModule;
import com.google.gerrit.httpd.rpc.UiRpcModule;
import com.google.gerrit.server.config.FactoryModule;
public class ProjectModule extends RpcServletModule {
public ProjectModule() {
super(UiRpcModule.PREFIX);
}
@Override
protected void configureServlets() {
install(new FactoryModule() {
@Override
protected void configure() {
factory(AddBranch.Factory.class);
factory(AddRefRight.Factory.class);
factory(ChangeProjectSettings.Factory.class);
factory(DeleteBranches.Factory.class);
factory(DeleteRefRights.Factory.class);
factory(ListBranches.Factory.class);
factory(VisibleProjects.Factory.class);
factory(ProjectDetailFactory.Factory.class);
}
});
rpc(ProjectAdminServiceImpl.class);
}
}
|
/*
* Copyright (C) 2019 Pieter Pauwels, Ghent University
* Modifications Copyright (C) 2020 Giovanni Velludo
*
* This file is part of IFC.JAVA.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package buildingsmart.ifc;
public interface IfcSpecularHighlightSelect {
}
|
package org.jabref.logic.integrity;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import org.jabref.logic.bibtexkeypattern.BibtexKeyPatternPreferences;
import org.jabref.logic.journals.JournalAbbreviationRepository;
import org.jabref.model.database.BibDatabaseContext;
import org.jabref.model.entry.BibEntry;
import org.jabref.model.entry.FieldName;
import org.jabref.model.metadata.FilePreferences;
public class IntegrityCheck {
private final BibDatabaseContext bibDatabaseContext;
private final FilePreferences filePreferences;
private final BibtexKeyPatternPreferences bibtexKeyPatternPreferences;
private final JournalAbbreviationRepository journalAbbreviationRepository;
private final boolean enforceLegalKey;
public IntegrityCheck(BibDatabaseContext bibDatabaseContext,
FilePreferences filePreferences,
BibtexKeyPatternPreferences bibtexKeyPatternPreferences,
JournalAbbreviationRepository journalAbbreviationRepository,
boolean enforceLegalKey) {
this.bibDatabaseContext = Objects.requireNonNull(bibDatabaseContext);
this.filePreferences = Objects.requireNonNull(filePreferences);
this.bibtexKeyPatternPreferences = Objects.requireNonNull(bibtexKeyPatternPreferences);
this.journalAbbreviationRepository = Objects.requireNonNull(journalAbbreviationRepository);
this.enforceLegalKey = enforceLegalKey;
}
public List<IntegrityMessage> checkBibtexDatabase() {
List<IntegrityMessage> result = new ArrayList<>();
for (BibEntry entry : bibDatabaseContext.getDatabase().getEntries()) {
result.addAll(checkBibtexEntry(entry));
}
return result;
}
private List<IntegrityMessage> checkBibtexEntry(BibEntry entry) {
List<IntegrityMessage> result = new ArrayList<>();
if (entry == null) {
return result;
}
FieldCheckers fieldCheckers = new FieldCheckers(bibDatabaseContext, filePreferences, journalAbbreviationRepository, enforceLegalKey);
for (FieldChecker checker : fieldCheckers.getAll()) {
result.addAll(checker.check(entry));
}
if (!bibDatabaseContext.isBiblatexMode()) {
// BibTeX only checkers
result.addAll(new ASCIICharacterChecker().check(entry));
result.addAll(new NoBibtexFieldChecker().check(entry));
result.addAll(new BibTeXEntryTypeChecker().check(entry));
result.addAll(new JournalInAbbreviationListChecker(FieldName.JOURNAL, journalAbbreviationRepository).check(entry));
} else {
result.addAll(new JournalInAbbreviationListChecker(FieldName.JOURNALTITLE, journalAbbreviationRepository).check(entry));
}
result.addAll(new BibtexKeyChecker().check(entry));
result.addAll(new TypeChecker().check(entry));
result.addAll(new BibStringChecker().check(entry));
result.addAll(new HTMLCharacterChecker().check(entry));
result.addAll(new EntryLinkChecker(bibDatabaseContext.getDatabase()).check(entry));
result.addAll(new BibtexkeyDeviationChecker(bibDatabaseContext, bibtexKeyPatternPreferences).check(entry));
result.addAll(new BibtexKeyDuplicationChecker(bibDatabaseContext.getDatabase()).check(entry));
return result;
}
@FunctionalInterface
public interface Checker {
List<IntegrityMessage> check(BibEntry entry);
}
}
|
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/cloud/bigquery/reservation/v1beta1/reservation.proto
package com.google.cloud.bigquery.reservation.v1beta1;
public interface CreateCapacityCommitmentRequestOrBuilder extends
// @@protoc_insertion_point(interface_extends:google.cloud.bigquery.reservation.v1beta1.CreateCapacityCommitmentRequest)
com.google.protobuf.MessageOrBuilder {
/**
* <pre>
* Required. Resource name of the parent reservation. E.g.,
* `projects/myproject/locations/US`
* </pre>
*
* <code>string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }</code>
* @return The parent.
*/
java.lang.String getParent();
/**
* <pre>
* Required. Resource name of the parent reservation. E.g.,
* `projects/myproject/locations/US`
* </pre>
*
* <code>string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }</code>
* @return The bytes for parent.
*/
com.google.protobuf.ByteString
getParentBytes();
/**
* <pre>
* Content of the capacity commitment to create.
* </pre>
*
* <code>.google.cloud.bigquery.reservation.v1beta1.CapacityCommitment capacity_commitment = 2;</code>
* @return Whether the capacityCommitment field is set.
*/
boolean hasCapacityCommitment();
/**
* <pre>
* Content of the capacity commitment to create.
* </pre>
*
* <code>.google.cloud.bigquery.reservation.v1beta1.CapacityCommitment capacity_commitment = 2;</code>
* @return The capacityCommitment.
*/
com.google.cloud.bigquery.reservation.v1beta1.CapacityCommitment getCapacityCommitment();
/**
* <pre>
* Content of the capacity commitment to create.
* </pre>
*
* <code>.google.cloud.bigquery.reservation.v1beta1.CapacityCommitment capacity_commitment = 2;</code>
*/
com.google.cloud.bigquery.reservation.v1beta1.CapacityCommitmentOrBuilder getCapacityCommitmentOrBuilder();
/**
* <pre>
* If true, fail the request if another project in the organization has a
* capacity commitment.
* </pre>
*
* <code>bool enforce_single_admin_project_per_org = 4;</code>
* @return The enforceSingleAdminProjectPerOrg.
*/
boolean getEnforceSingleAdminProjectPerOrg();
}
|
/*
* Copyright 2013 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.bitcoin.protocols.channels;
import com.google.bitcoin.core.Coin;
import com.google.bitcoin.core.InsufficientMoneyException;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.protobuf.ByteString;
import org.bitcoin.paymentchannel.Protos;
import javax.annotation.Nullable;
/**
* A class implementing this interface supports the basic operations of a payment channel. An implementation is provided
* in {@link PaymentChannelClient}, but alternative implementations are possible. For example, an implementor might
* send RPCs to a separate (locally installed or even remote) wallet app rather than implementing the algorithm locally.
*/
public interface IPaymentChannelClient {
/**
* Called when a message is received from the server. Processes the given message and generates events based on its
* content.
*/
void receiveMessage(Protos.TwoWayChannelMessage msg) throws InsufficientMoneyException;
/**
* <p>Called when the connection to the server terminates.</p>
*
* <p>For stateless protocols, this translates to a client not using the channel for the immediate future, but
* intending to reopen the channel later. There is likely little reason to use this in a stateless protocol.</p>
*
* <p>Note that this <b>MUST</b> still be called even after either
* {@link PaymentChannelClient.ClientConnection#destroyConnection(com.google.bitcoin.protocols.channels.PaymentChannelCloseException.CloseReason)} or
* {@link IPaymentChannelClient#settle()} is called, to actually handle the connection close logic.</p>
*/
void connectionClosed();
/**
* <p>Settles the channel, notifying the server it can broadcast the most recent payment transaction.</p>
*
* <p>Note that this only generates a CLOSE message for the server and calls
* {@link PaymentChannelClient.ClientConnection#destroyConnection(com.google.bitcoin.protocols.channels.PaymentChannelCloseException.CloseReason)}
* to settle the connection, it does not actually handle connection close logic, and
* {@link PaymentChannelClient#connectionClosed()} must still be called after the connection fully settles.</p>
*
* @throws IllegalStateException If the connection is not currently open (ie the CLOSE message cannot be sent)
*/
void settle() throws IllegalStateException;
/**
* <p>Called to indicate the connection has been opened and messages can now be generated for the server.</p>
*
* <p>Attempts to find a channel to resume and generates a CLIENT_VERSION message for the server based on the
* result.</p>
*/
void connectionOpen();
/**
* Increments the total value which we pay the server. Note that the amount of money sent may not be the same as the
* amount of money actually requested. It can be larger if the amount left over in the channel would be too small to
* be accepted by the Bitcoin network. ValueOutOfRangeException will be thrown, however, if there's not enough money
* left in the channel to make the payment at all. Only one payment can be in-flight at once. You have to ensure
* you wait for the previous increase payment future to complete before incrementing the payment again.
*
* @param size How many satoshis to increment the payment by (note: not the new total).
* @param info Information about this update, used to extend this protocol.
* @throws ValueOutOfRangeException If the size is negative or would pay more than this channel's total value
* ({@link PaymentChannelClientConnection#state()}.getTotalValue())
* @throws IllegalStateException If the channel has been closed or is not yet open
* (see {@link PaymentChannelClientConnection#getChannelOpenFuture()} for the second)
* @return a future that completes when the server acknowledges receipt and acceptance of the payment.
*/
ListenableFuture<PaymentIncrementAck> incrementPayment(Coin size, @Nullable ByteString info) throws ValueOutOfRangeException, IllegalStateException;
/**
* Implements the connection between this client and the server, providing an interface which allows messages to be
* sent to the server, requests for the connection to the server to be closed, and a callback which occurs when the
* channel is fully open.
*/
interface ClientConnection {
/**
* <p>Requests that the given message be sent to the server. There are no blocking requirements for this method,
* however the order of messages must be preserved.</p>
*
* <p>If the send fails, no exception should be thrown, however
* {@link com.google.bitcoin.protocols.channels.PaymentChannelClient#connectionClosed()} should be called immediately. In the case of messages which
* are a part of initialization, initialization will simply fail and the refund transaction will be broadcasted
* when it unlocks (if necessary). In the case of a payment message, the payment will be lost however if the
* channel is resumed it will begin again from the channel value <i>after</i> the failed payment.</p>
*
* <p>Called while holding a lock on the {@link com.google.bitcoin.protocols.channels.PaymentChannelClient} object - be careful about reentrancy</p>
*/
void sendToServer(Protos.TwoWayChannelMessage msg);
/**
* <p>Requests that the connection to the server be closed. For stateless protocols, note that after this call,
* no more messages should be received from the server and this object is no longer usable. A
* {@link com.google.bitcoin.protocols.channels.PaymentChannelClient#connectionClosed()} event should be generated immediately after this call.</p>
*
* <p>Called while holding a lock on the {@link com.google.bitcoin.protocols.channels.PaymentChannelClient} object - be careful about reentrancy</p>
*
* @param reason The reason for the closure, see the individual values for more details.
* It is usually safe to ignore this and treat any value below
* {@link com.google.bitcoin.protocols.channels.PaymentChannelCloseException.CloseReason#CLIENT_REQUESTED_CLOSE} as "unrecoverable error" and all others as
* "try again once and see if it works then"
*/
void destroyConnection(PaymentChannelCloseException.CloseReason reason);
/**
* <p>Indicates the channel has been successfully opened and
* {@link com.google.bitcoin.protocols.channels.PaymentChannelClient#incrementPayment(Coin)}
* may be called at will.</p>
*
* <p>Called while holding a lock on the {@link com.google.bitcoin.protocols.channels.PaymentChannelClient}
* object - be careful about reentrancy</p>
*
* @param wasInitiated If true, the channel is newly opened. If false, it was resumed.
*/
void channelOpen(boolean wasInitiated);
}
/**
* An implementor of this interface creates payment channel clients that "talk back" with the given connection.
* The client might be a PaymentChannelClient, or an RPC interface, or something else entirely.
*/
interface Factory {
IPaymentChannelClient create(String serverPaymentIdentity, ClientConnection connection);
}
}
|
package com.uycode.mockapiserver.base;
import com.uycode.mockapiserver.base.statics.SmallDog;
import com.uycode.mockapiserver.base.statics.WolfDog;
/**
* @author Hyper
* @email Hyper-Hack@outlook.com
* @since 2021/4/6
*/
public class Main {
public static void main(String[] args) {
SmallDog dog = new WolfDog();
dog.callDog();
dog.getDogColor();
dog.dogSize();
}
}
|
/*
* Copyright 2012-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.autoconfigure.amqp;
import java.util.List;
import org.springframework.amqp.rabbit.config.AbstractRabbitListenerContainerFactory;
import org.springframework.amqp.rabbit.config.RetryInterceptorBuilder;
import org.springframework.amqp.rabbit.connection.ConnectionFactory;
import org.springframework.amqp.rabbit.listener.RabbitListenerContainerFactory;
import org.springframework.amqp.rabbit.retry.MessageRecoverer;
import org.springframework.amqp.rabbit.retry.RejectAndDontRequeueRecoverer;
import org.springframework.amqp.support.converter.MessageConverter;
import org.springframework.boot.autoconfigure.amqp.RabbitProperties.ListenerRetry;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.util.Assert;
/**
* Configure {@link RabbitListenerContainerFactory} with sensible defaults.
*
* @param <T> the container factory type.
* @author Gary Russell
* @author Stephane Nicoll
* @since 2.0.0
*/
public abstract class AbstractRabbitListenerContainerFactoryConfigurer<T extends AbstractRabbitListenerContainerFactory<?>> {
private MessageConverter messageConverter;
private MessageRecoverer messageRecoverer;
private List<RabbitRetryTemplateCustomizer> retryTemplateCustomizers;
private RabbitProperties rabbitProperties;
/**
* Set the {@link MessageConverter} to use or {@code null} if the out-of-the-box
* converter should be used.
* @param messageConverter the {@link MessageConverter}
*/
protected void setMessageConverter(MessageConverter messageConverter) {
this.messageConverter = messageConverter;
}
/**
* Set the {@link MessageRecoverer} to use or {@code null} to rely on the default.
* @param messageRecoverer the {@link MessageRecoverer}
*/
protected void setMessageRecoverer(MessageRecoverer messageRecoverer) {
this.messageRecoverer = messageRecoverer;
}
/**
* Set the {@link RabbitRetryTemplateCustomizer} instances to use.
* @param retryTemplateCustomizers the retry template customizers
*/
protected void setRetryTemplateCustomizers(
List<RabbitRetryTemplateCustomizer> retryTemplateCustomizers) {
this.retryTemplateCustomizers = retryTemplateCustomizers;
}
/**
* Set the {@link RabbitProperties} to use.
* @param rabbitProperties the {@link RabbitProperties}
*/
protected void setRabbitProperties(RabbitProperties rabbitProperties) {
this.rabbitProperties = rabbitProperties;
}
protected final RabbitProperties getRabbitProperties() {
return this.rabbitProperties;
}
/**
* Configure the specified rabbit listener container factory. The factory can be
* further tuned and default settings can be overridden.
* @param factory the {@link AbstractRabbitListenerContainerFactory} instance to
* configure
* @param connectionFactory the {@link ConnectionFactory} to use
*/
public abstract void configure(T factory, ConnectionFactory connectionFactory);
protected void configure(T factory, ConnectionFactory connectionFactory,
RabbitProperties.AmqpContainer configuration) {
Assert.notNull(factory, "Factory must not be null");
Assert.notNull(connectionFactory, "ConnectionFactory must not be null");
Assert.notNull(configuration, "Configuration must not be null");
factory.setConnectionFactory(connectionFactory);
if (this.messageConverter != null) {
factory.setMessageConverter(this.messageConverter);
}
factory.setAutoStartup(configuration.isAutoStartup());
if (configuration.getAcknowledgeMode() != null) {
factory.setAcknowledgeMode(configuration.getAcknowledgeMode());
}
if (configuration.getPrefetch() != null) {
factory.setPrefetchCount(configuration.getPrefetch());
}
if (configuration.getDefaultRequeueRejected() != null) {
factory.setDefaultRequeueRejected(configuration.getDefaultRequeueRejected());
}
if (configuration.getIdleEventInterval() != null) {
factory.setIdleEventInterval(configuration.getIdleEventInterval().toMillis());
}
factory.setMissingQueuesFatal(configuration.isMissingQueuesFatal());
ListenerRetry retryConfig = configuration.getRetry();
if (retryConfig.isEnabled()) {
RetryInterceptorBuilder<?, ?> builder = (retryConfig.isStateless())
? RetryInterceptorBuilder.stateless()
: RetryInterceptorBuilder.stateful();
RetryTemplate retryTemplate = new RetryTemplateFactory(
this.retryTemplateCustomizers).createRetryTemplate(retryConfig,
RabbitRetryTemplateCustomizer.Target.LISTENER);
builder.retryOperations(retryTemplate);
MessageRecoverer recoverer = (this.messageRecoverer != null)
? this.messageRecoverer : new RejectAndDontRequeueRecoverer();
builder.recoverer(recoverer);
factory.setAdviceChain(builder.build());
}
}
}
|
package com.baomidou.mybatisplus.extension.conditions.query;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.conditions.ChainWrapper;
import com.baomidou.mybatisplus.extension.toolkit.SqlHelper;
import java.util.List;
import java.util.Optional;
/**
* 具有查询方法的定义
* 链式查询
* @author miemie
* @since 2018-12-19
*/
public interface ChainQuery<T> extends ChainWrapper<T> {
/**
* 获取集合
*
* @return 集合
*/
default List<T> list() {
return getBaseMapper().selectList(getWrapper());
}
/**
* 获取单个
*
* @return 单个
*/
default T one() {
return getBaseMapper().selectOne(getWrapper());
}
/**
* 获取单个
*
* @return 单个
* @since 3.3.0
*/
default Optional<T> oneOpt() {
return Optional.ofNullable(one());
}
/**
* 获取 count
*
* @return count
*/
default Long count() {
return SqlHelper.retCount(getBaseMapper().selectCount(getWrapper()));
}
/**
* 判断数据是否存在
*
* @return true 存在 false 不存在
*/
default boolean exists() {
return this.count() > 0;
}
/**
* 获取分页数据
*
* @param page 分页条件
* @return 分页数据
*/
default <E extends IPage<T>> E page(E page) {
return getBaseMapper().selectPage(page, getWrapper());
}
}
|
/*
* Copyright 2011-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.data.neo4j.integration.shared.common;
import org.springframework.beans.factory.annotation.Value;
/**
* @author Gerrit Meier
*/
public interface NamesWithSpELCity {
String getFirstName();
String getLastName();
@Value("#{target.address.city}")
String getCity();
}
|
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.github.stawirej.fluentapi.prepositions.complex;
/**
* Represents an operation that accepts a single input argument, returns no result and substitutes 'closeTo' word.
*
* @param <T> the type of the input to the operation
* @author Piotr Stawirej
*/
@FunctionalInterface
public interface CloseToConsumer<T> {
/**
* Performs this operation on the given argument.
*
* @param t the input argument
*/
void closeTo(T t);
}
|
package com.worldline.bookstore.service;
import com.worldline.bookstore.domain.User;
import io.github.jhipster.config.JHipsterProperties;
import org.apache.commons.lang3.CharEncoding;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.MessageSource;
import org.springframework.mail.javamail.JavaMailSender;
import org.springframework.mail.javamail.MimeMessageHelper;
import org.springframework.scheduling.annotation.Async;
import org.springframework.stereotype.Service;
import org.thymeleaf.context.Context;
import org.thymeleaf.spring5.SpringTemplateEngine;
import javax.mail.internet.MimeMessage;
import java.util.Locale;
/**
* Service for sending e-mails.
* <p>
* We use the @Async annotation to send e-mails asynchronously.
* </p>
*/
@Service
public class MailService {
private final Logger log = LoggerFactory.getLogger(MailService.class);
private static final String USER = "user";
private static final String BASE_URL = "baseUrl";
private final JHipsterProperties jHipsterProperties;
private final JavaMailSender javaMailSender;
private final MessageSource messageSource;
private final SpringTemplateEngine templateEngine;
public MailService(JHipsterProperties jHipsterProperties, JavaMailSender javaMailSender,
MessageSource messageSource, SpringTemplateEngine templateEngine) {
this.jHipsterProperties = jHipsterProperties;
this.javaMailSender = javaMailSender;
this.messageSource = messageSource;
this.templateEngine = templateEngine;
}
@Async
public void sendEmail(String to, String subject, String content, boolean isMultipart, boolean isHtml) {
log.debug("Send e-mail[multipart '{}' and html '{}'] to '{}' with subject '{}' and content={}",
isMultipart, isHtml, to, subject, content);
// Prepare message using a Spring helper
MimeMessage mimeMessage = javaMailSender.createMimeMessage();
try {
MimeMessageHelper message = new MimeMessageHelper(mimeMessage, isMultipart, CharEncoding.UTF_8);
message.setTo(to);
message.setFrom(jHipsterProperties.getMail().getFrom());
message.setSubject(subject);
message.setText(content, isHtml);
javaMailSender.send(mimeMessage);
log.debug("Sent e-mail to User '{}'", to);
} catch (Exception e) {
log.warn("E-mail could not be sent to user '{}'", to, e);
}
}
@Async
public void sendActivationEmail(User user) {
log.debug("Sending activation e-mail to '{}'", user.getEmail());
Locale locale = Locale.forLanguageTag(user.getLangKey());
Context context = new Context(locale);
context.setVariable(USER, user);
context.setVariable(BASE_URL, jHipsterProperties.getMail().getBaseUrl());
String content = templateEngine.process("activationEmail", context);
String subject = messageSource.getMessage("email.activation.title", null, locale);
sendEmail(user.getEmail(), subject, content, false, true);
}
@Async
public void sendCreationEmail(User user) {
log.debug("Sending creation e-mail to '{}'", user.getEmail());
Locale locale = Locale.forLanguageTag(user.getLangKey());
Context context = new Context(locale);
context.setVariable(USER, user);
context.setVariable(BASE_URL, jHipsterProperties.getMail().getBaseUrl());
String content = templateEngine.process("creationEmail", context);
String subject = messageSource.getMessage("email.activation.title", null, locale);
sendEmail(user.getEmail(), subject, content, false, true);
}
@Async
public void sendPasswordResetMail(User user) {
log.debug("Sending password reset e-mail to '{}'", user.getEmail());
Locale locale = Locale.forLanguageTag(user.getLangKey());
Context context = new Context(locale);
context.setVariable(USER, user);
context.setVariable(BASE_URL, jHipsterProperties.getMail().getBaseUrl());
String content = templateEngine.process("passwordResetEmail", context);
String subject = messageSource.getMessage("email.reset.title", null, locale);
sendEmail(user.getEmail(), subject, content, false, true);
}
}
|
/*
* Copyright 2012-2016 bambooCORE, greenstep of copyright Chen Xin Nien
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------
*
* author: Chen Xin Nien
* contact: chen.xin.nien@gmail.com
*
*/
package com.netsteadfast.greenstep.bsc.dao;
import com.netsteadfast.greenstep.base.dao.IBaseDAO;
import com.netsteadfast.greenstep.po.hbm.BbSwotReportDtl;
public interface ISwotReportDtlDAO<T extends java.io.Serializable, PK extends java.io.Serializable> extends IBaseDAO<BbSwotReportDtl, String> {
}
|
package com.barentine.totalconnect.ws;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import javax.xml.bind.annotation.XmlType;
/**
* <p>Java class for anonymous complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType>
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="SessionID" type="{http://www.w3.org/2001/XMLSchema}string" minOccurs="0"/>
* <element name="LocationID" type="{http://www.w3.org/2001/XMLSchema}long"/>
* <element name="RSIDeviceID" type="{http://www.w3.org/2001/XMLSchema}long"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "", propOrder = {
"sessionID",
"locationID",
"rsiDeviceID"
})
@XmlRootElement(name = "GetRSIDeviceStatus")
public class GetRSIDeviceStatus {
@XmlElement(name = "SessionID")
protected String sessionID;
@XmlElement(name = "LocationID")
protected long locationID;
@XmlElement(name = "RSIDeviceID")
protected long rsiDeviceID;
/**
* Gets the value of the sessionID property.
*
* @return
* possible object is
* {@link String }
*
*/
public String getSessionID() {
return sessionID;
}
/**
* Sets the value of the sessionID property.
*
* @param value
* allowed object is
* {@link String }
*
*/
public void setSessionID(String value) {
this.sessionID = value;
}
/**
* Gets the value of the locationID property.
*
*/
public long getLocationID() {
return locationID;
}
/**
* Sets the value of the locationID property.
*
*/
public void setLocationID(long value) {
this.locationID = value;
}
/**
* Gets the value of the rsiDeviceID property.
*
*/
public long getRSIDeviceID() {
return rsiDeviceID;
}
/**
* Sets the value of the rsiDeviceID property.
*
*/
public void setRSIDeviceID(long value) {
this.rsiDeviceID = value;
}
}
|
/*
* Copyright (c) 2011-2017, Peter Abeles. All Rights Reserved.
*
* This file is part of BoofCV (http://boofcv.org).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package boofcv.alg.filter.derivative.impl;
import boofcv.core.image.border.ImageBorder_S32;
import boofcv.struct.image.GrayU8;
import boofcv.struct.sparse.GradientValue_I32;
import boofcv.struct.sparse.SparseImageGradient;
/**
* Sparse computation of the Prewitt gradient operator.
*
* @author Peter Abeles
*/
public class GradientSparsePrewitt_U8 implements SparseImageGradient<GrayU8,GradientValue_I32> {
// image being processed
GrayU8 input;
// specifies how the image border is handled
ImageBorder_S32<GrayU8> border;
// storage for computed gradient
GradientValue_I32 gradient = new GradientValue_I32();
/**
* Specifies how border pixels are handled. If null then the border is not handled.
* @param border how borders are handled
*/
public GradientSparsePrewitt_U8(ImageBorder_S32<GrayU8> border) {
this.border = border;
}
@Override
public GradientValue_I32 compute(int x, int y) {
int a00,a01,a02,a10,a12,a20,a21,a22;
if( x >= 1 && y >= 1 && x < input.width - 1 && y < input.height - 1 ) {
int s = input.stride;
int tl = input.startIndex + input.stride*(y-1) + x-1;
a00 = input.data[tl ] & 0xFF;
a01 = input.data[tl+1 ] & 0xFF;
a02 = input.data[tl+2 ] & 0xFF;
a10 = input.data[tl + s ] & 0xFF;
a12 = input.data[tl+2 + s ] & 0xFF;
a20 = input.data[tl + 2*s] & 0xFF;
a21 = input.data[tl+1 + 2*s] & 0xFF;
a22 = input.data[tl+2 + 2*s] & 0xFF;
} else {
a00 = border.get(x-1,y-1);
a01 = border.get(x ,y-1);
a02 = border.get(x+1,y-1);
a10 = border.get(x-1,y );
a12 = border.get(x+1,y );
a20 = border.get(x-1,y+1);
a21 = border.get(x ,y+1);
a22 = border.get(x+1,y+1);
}
gradient.y = -(a00 + a01 + a02);
gradient.y += (a20 + a21 + a22);
gradient.x = -(a00 + a10 + a20);
gradient.x += (a02 + a12 + a22);
return gradient;
}
@Override
public Class<GradientValue_I32> getGradientType() {
return GradientValue_I32.class;
}
@Override
public void setImage(GrayU8 input) {
this.input = input;
if( border != null ) {
border.setImage(input);
}
}
@Override
public boolean isInBounds(int x, int y) {
return border != null || x >= 1 && y >= 1 && x < input.width - 1 && y < input.height - 1;
}
}
|
package org.sqlbroker;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.NClob;
import java.sql.PreparedStatement;
import java.sql.Ref;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.Calendar;
import java.util.Date;
import root.adt.ListExtractable;
import root.jdbc.SqlType;
import root.lang.Extractable;
import root.lang.StringExtractor;
/**
* - Does not include @Deprecated methods such as <code>void setUnicodeStream()</code>.
*
* TODO:
* + Search for SQLException, there shouldn't be any until the Value classes (done)
* + Go to beginning of Value class declarations and search for index, there shouldn't be any in the Value classes (done)
*
* @author esmith
*/
public final class Parameters implements Extractable {
// <><><><><><><><><><><><><>< Class Attributes ><><><><><><><><><><><><><>
private int index;
private final ListExtractable<Value> values;
// <><><><><><><><><><><><><><>< Constructors ><><><><><><><><><><><><><><>
public Parameters(final int size) {
values = new ListExtractable<>(size);
}
// <><><><><><><><><><><><><><> Public Methods <><><><><><><><><><><><><><>
public final Parameters add(final Array a) {
index++;
values.add(new ArrayValue(index, a));
return this;
}
public final Parameters addAsciiStream(final InputStream is) {
index++;
values.add(new AsciiStreamValue(index, is));
return this;
}
public final Parameters addAsciiStream(final InputStream is, final int length) {
index++;
values.add(new AsciiStreamValue(index, is, length));
return this;
}
public final Parameters add(final BigDecimal b) {
index++;
values.add((b == null) ? new NullValue(index, SqlType.DECIMAL) : new BigDecimalValue(index, b));
return this;
}
public final Parameters addBinaryStream(final InputStream is) {
index++;
values.add(new BinaryStreamValue(index, is));
return this;
}
public final Parameters addBinaryStream(final InputStream is, final int length) {
index++;
values.add(new BinaryStreamValue(index, is, length));
return this;
}
public final Parameters addBlob(final Blob b) {
index++;
values.add(new BlobValue(index, b));
return this;
}
public final Parameters addBlob(final InputStream is) {
index++;
values.add(new BlobValue(index, is));
return this;
}
public final Parameters addBlob(final InputStream is, final long length) {
index++;
values.add(new BlobValue(index, is, length));
return this;
}
public final Parameters add(final boolean b) {
index++;
values.add(new BooleanValue(index, b));
return this;
}
public final Parameters add(final Boolean b) {
index++;
values.add((b == null) ? new NullValue(index, SqlType.BOOLEAN) : new BooleanValue(index, b));
return this;
}
public final Parameters add(final byte b) {
index++;
values.add(new ByteValue(index, b));
return this;
}
public final Parameters add(final Byte b) {
index++;
values.add((b == null) ? new NullValue(index, SqlType.TINYINT) : new ByteValue(index, b));
return this;
}
public final Parameters add(final byte[] b) {
index++;
values.add((b == null) ? new NullValue(index, SqlType.BINARY) : new ByteArrayValue(index, b));
return this;
}
public final Parameters addCharacterStream(final Reader reader) {
index++;
values.add(new CharacterStreamValue(index, reader));
return this;
}
public final Parameters addCharacterStream(final Reader reader, final int length) {
index++;
values.add(new CharacterStreamValue(index, reader, length));
return this;
}
public final Parameters addClob(final Clob c) {
index++;
values.add(new ClobValue(index, c));
return this;
}
public final Parameters addClob(final Reader reader) {
index++;
values.add(new ClobValue(index, reader));
return this;
}
public final Parameters addClob(final Reader reader, final long length) {
index++;
values.add(new ClobValue(index, reader, length));
return this;
}
public final Parameters add(final Date d) {
index++;
values.add((d == null) ? new NullValue(index, SqlType.DATE) : new DateValue(index, d));
return this;
}
public final Parameters add(final Date d, final Calendar cal) {
index++;
values.add((d == null) ? new NullValue(index, SqlType.DATE) : new DateValue(index, d, cal));
return this;
}
public final Parameters add(final double d) {
index++;
values.add(new DoubleValue(index, d));
return this;
}
public final Parameters add(final Double d) {
index++;
values.add((d == null) ? new NullValue(index, SqlType.DOUBLE) : new DoubleValue(index, d));
return this;
}
public final Parameters add(final float f) {
index++;
values.add(new FloatValue(index, f));
return this;
}
public final Parameters add(final Float f) {
index++;
values.add((f == null) ? new NullValue(index, SqlType.FLOAT) : new FloatValue(index, f));
return this;
}
public final Parameters add(final int i) {
index++;
values.add(new IntegerValue(index, i));
return this;
}
public final Parameters add(final Integer i) {
index++;
values.add((i == null) ? new NullValue(index, SqlType.INTEGER) : new IntegerValue(index, i));
return this;
}
public final Parameters add(final long l) {
index++;
values.add(new LongValue(index, l));
return this;
}
public final Parameters add(final Long l) {
index++;
values.add((l == null) ? new NullValue(index, SqlType.BIGINT) : new LongValue(index, l));
return this;
}
public final Parameters addNCharacterStream(final Reader reader) {
index++;
values.add(new NCharacterStreamValue(index, reader));
return this;
}
public final Parameters addNCharacterStream(final Reader reader, final long length) {
index++;
values.add(new NCharacterStreamValue(index, reader, length));
return this;
}
public final Parameters addNClob(final NClob c) {
index++;
values.add(new NClobValue(index, c));
return this;
}
public final Parameters addNClob(final Reader reader) {
index++;
values.add(new NClobValue(index, reader));
return this;
}
public final Parameters addNClob(final Reader reader, final long length) {
index++;
values.add(new NClobValue(index, reader, length));
return this;
}
public final Parameters addNString(final String s) {
index++;
values.add((s == null) ? new NullValue(index, SqlType.NVARCHAR) : new NStringValue(index, s));
return this;
}
public final Parameters add(final Object o) {
index++;
values.add(new ObjectValue(index, o));
return this;
}
public final Parameters add(final Object o, final SqlType targetType) {
index++;
values.add(new ObjectValue(index, o, targetType));
return this;
}
public final Parameters add(final Object o, final SqlType targetType, final int scale) {
index++;
values.add(new ObjectValue(index, o, targetType, scale));
return this;
}
public final Parameters add(final Object... objs) {
for (Object o : objs) {
index++;
values.add(new ObjectValue(index, o));
}
return this;
}
public final Parameters add(final Ref r) {
index++;
values.add(new RefValue(index, r));
return this;
}
public final Parameters add(final RowId r) {
index++;
values.add(new RowIdValue(index, r));
return this;
}
public final Parameters add(final short s) {
index++;
values.add(new ShortValue(index, s));
return this;
}
public final Parameters add(final Short s) {
index++;
values.add((s == null) ? new NullValue(index, SqlType.SMALLINT) : new ShortValue(index, s));
return this;
}
public final Parameters add(final SQLXML s) {
index++;
values.add(new SQLXMLValue(index, s));
return this;
}
public final Parameters add(final String s) {
index++;
values.add((s == null) ? new NullValue(index, SqlType.VARCHAR) : new StringValue(index, s));
return this;
}
public final Parameters add(final Time t) {
index++;
values.add((t == null) ? new NullValue(index, SqlType.TIME) : new TimeValue(index, t));
return this;
}
public final Parameters add(final Time t, final Calendar cal) {
index++;
values.add((t == null) ? new NullValue(index, SqlType.TIME) : new TimeValue(index, t, cal));
return this;
}
public final Parameters add(final Timestamp t) {
index++;
values.add((t == null) ? new NullValue(index, SqlType.TIMESTAMP) : new TimestampValue(index, t));
return this;
}
public final Parameters add(final Timestamp t, final Calendar cal) {
index++;
values.add((t == null) ? new NullValue(index, SqlType.TIMESTAMP) : new TimestampValue(index, t, cal));
return this;
}
public final Parameters add(final URL url) {
index++;
values.add(new URLValue(index, url));
return this;
}
public final int getSize() {
return values.getSize();
}
public final ListExtractable<Value> getValues() {
return values;
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append("Parameters:");
values.extract(extractor);
}
@Override
public final String toString() {
final StringExtractor chars = new StringExtractor(256);
extract(chars);
return chars.toString();
}
// <><><><><><><><><><><><><>< Private Classes ><><><><><><><><><><><><><>
abstract class Value implements Extractable {
protected final int i;
Value(final int i) {
this.i = i;
}
public abstract void setValue(PreparedStatement statement) throws SQLException;
} // End Value
private final class ArrayValue extends Value {
private final Array a;
private ArrayValue(final int i, final Array a) {
super(i);
this.a = a;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setArray(i, a);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(a);
}
} // End ArrayValue
private final class AsciiStreamValue extends Value {
private final int length;
private final InputStream is;
private AsciiStreamValue(final int i, final InputStream is) {
super(i);
this.is = is;
this.length = 0;
}
private AsciiStreamValue(final int i, final InputStream is, final int length) {
super(i);
this.is = is;
this.length = length;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (length > 0) {
statement.setAsciiStream(i, is, length);
} else {
statement.setAsciiStream(i, is);
}
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(is);
}
} // End AsciiStreamValue
private final class BigDecimalValue extends Value {
private final BigDecimal b;
private BigDecimalValue(final int i, final BigDecimal b) {
super(i);
this.b = b;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setBigDecimal(i, b);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(b);
}
} // End BigDecimalValue
private final class BinaryStreamValue extends Value {
private final int length;
private final InputStream is;
private BinaryStreamValue(final int i, final InputStream is) {
super(i);
this.is = is;
this.length = 0;
}
private BinaryStreamValue(final int i, final InputStream is, final int length) {
super(i);
this.is = is;
this.length = length;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (length > 0) {
statement.setBinaryStream(i, is, length);
} else {
statement.setBinaryStream(i, is);
}
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(is);
}
} // End BinaryStreamValue
private final class BlobValue extends Value {
private final Blob b;
private final InputStream is;
private final long length;
private BlobValue(final int i, final Blob b) {
super(i);
this.b = b;
this.is = null;
this.length = 0;
}
private BlobValue(final int i, final InputStream is) {
super(i);
this.b = null;
this.is = is;
this.length = 0;
}
private BlobValue(final int i, final InputStream is, final long length) {
super(i);
this.b = null;
this.is = is;
this.length = length;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (is == null) {
statement.setBlob(i, b);
} else {
if (length > 0) {
statement.setBlob(i, is, length);
} else {
statement.setBlob(i, is);
}
}
}
@Override
public final void extract(final StringExtractor extractor) {
if (is == null) {
extractor.append(b);
} else {
extractor.append(is);
}
}
} // End BlobValue
private final class BooleanValue extends Value {
private final boolean b;
private BooleanValue(final int i, final boolean b) {
super(i);
this.b = b;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setBoolean(i, b);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(b);
}
} // End BooleanValue
private final class ByteValue extends Value {
private final byte b;
private ByteValue(final int i, final byte b) {
super(i);
this.b = b;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setByte(i, b);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(b);
}
} // End ByteValue
private final class ByteArrayValue extends Value {
private final byte[] b;
private ByteArrayValue(final int i, final byte[] b) {
super(i);
this.b = b;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setBytes(i, b);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(b);
}
} // End ByteArrayValue
private final class CharacterStreamValue extends Value {
private final int length;
private final Reader r;
private CharacterStreamValue(final int i, final Reader r) {
super(i);
this.r = r;
this.length = 0;
}
private CharacterStreamValue(final int i, final Reader r, final int length) {
super(i);
this.r = r;
this.length = length;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (length > 0) {
statement.setCharacterStream(i, r, length);
} else {
statement.setCharacterStream(i, r);
}
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(r);
}
} // End CharacterStreamValue
private final class ClobValue extends Value {
private final Clob c;
private final Reader reader;
private final long length;
private ClobValue(final int i, final Clob c) {
super(i);
this.c = c;
this.reader = null;
this.length = 0;
}
private ClobValue(final int i, final Reader reader) {
super(i);
this.c = null;
this.reader = reader;
this.length = 0;
}
private ClobValue(final int i, final Reader reader, final long length) {
super(i);
this.c = null;
this.reader = reader;
this.length = length;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (reader == null) {
statement.setClob(i, c);
} else {
if (length > 0) {
statement.setClob(i, reader, length);
} else {
statement.setClob(i, reader);
}
}
}
@Override
public final void extract(final StringExtractor extractor) {
if (reader == null) {
extractor.append(c);
} else {
extractor.append(reader);
}
}
} // End ClobValue
private final class DateValue extends Value {
private final Date d;
private final Calendar cal;
private DateValue(final int i, final Date d) {
super(i);
this.d = d;
this.cal = null;
}
private DateValue(final int i, final Date d, final Calendar cal) {
super(i);
this.d = d;
this.cal = cal;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (cal == null) {
statement.setDate(i, new java.sql.Date(d.getTime()));
} else {
statement.setDate(i, new java.sql.Date(d.getTime()), cal);
}
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(d);
}
} // End DateValue
private final class DoubleValue extends Value {
private final double d;
private DoubleValue(final int i, final double d) {
super(i);
this.d = d;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setDouble(i, d);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(d);
}
} // End DoubleValue
private final class FloatValue extends Value {
private final float f;
private FloatValue(final int i, final float f) {
super(i);
this.f = f;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setFloat(i, f);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(f);
}
} // End FloatValue
private final class IntegerValue extends Value {
private final int j;
private IntegerValue(final int i, final int j) {
super(i);
this.j = j;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setInt(i, j);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(j);
}
} // End IntegerValue
private final class LongValue extends Value {
private final long l;
private LongValue(final int i, final long l) {
super(i);
this.l = l;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setLong(i, l);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(l);
}
} // End LongValue
private final class NCharacterStreamValue extends Value {
private final Reader reader;
private final long length;
private NCharacterStreamValue(final int i, final Reader reader) {
super(i);
this.reader = reader;
this.length = 0;
}
private NCharacterStreamValue(final int i, final Reader reader, final long length) {
super(i);
this.reader = reader;
this.length = length;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (length > 0) {
statement.setNCharacterStream(i, reader, length);
} else {
statement.setNCharacterStream(i, reader);
}
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(reader);
}
} // End NCharacterStreamValue
private final class NClobValue extends Value {
private final NClob c;
private final Reader reader;
private final long length;
private NClobValue(final int i, final NClob c) {
super(i);
this.c = c;
this.reader = null;
this.length = 0;
}
private NClobValue(final int i, final Reader reader) {
super(i);
this.c = null;
this.reader = reader;
this.length = 0;
}
private NClobValue(final int i, final Reader reader, final long length) {
super(i);
this.c = null;
this.reader = reader;
this.length = length;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (reader == null) {
statement.setNClob(i, c);
} else {
if (length > 0) {
statement.setNClob(i, reader, length);
} else {
statement.setNClob(i, reader);
}
}
}
@Override
public final void extract(final StringExtractor extractor) {
if (reader == null) {
extractor.append(c);
} else {
extractor.append(reader);
}
}
} // End NClobValue
private final class NStringValue extends Value {
private final String s;
private NStringValue(final int i, final String s) {
super(i);
this.s = s;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setNString(i, s);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(s);
}
} // End NStringValue
private final class NullValue extends Value {
private final SqlType t;
private NullValue(final int i, final SqlType t) {
super(i);
this.t = t;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setNull(i, t.getCode());
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(t);
}
} // End NullValue
private final class ObjectValue extends Value {
private final Object o;
private final SqlType targetType;
private final int scale;
private ObjectValue(final int i, final Object o) {
super(i);
this.o = o;
this.targetType = null;
this.scale = -1;
}
private ObjectValue(final int i, final Object o, final SqlType targetType) {
super(i);
this.o = o;
this.targetType = targetType;
this.scale = -1;
}
private ObjectValue(final int i, final Object o, final SqlType targetType, final int scale) {
super(i);
this.o = o;
this.targetType = targetType;
this.scale = scale;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (targetType == null) {
statement.setObject(i, o);
} else {
if (scale < 0) {
statement.setObject(i, o, targetType.getCode());
} else {
statement.setObject(i, o, targetType.getCode(), scale);
}
}
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(o);
}
} // End ObjectValue
private final class RefValue extends Value {
private final Ref r;
private RefValue(final int i, final Ref r) {
super(i);
this.r = r;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setRef(i, r);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(r);
}
} // End RefValue
private final class RowIdValue extends Value {
private final RowId r;
private RowIdValue(final int i, final RowId r) {
super(i);
this.r = r;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setRowId(i, r);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(r);
}
} // End RowIdValue
private final class ShortValue extends Value {
private final short s;
private ShortValue(final int i, final short s) {
super(i);
this.s = s;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setShort(i, s);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(s);
}
} // End ShortValue
private final class SQLXMLValue extends Value {
private final SQLXML s;
private SQLXMLValue(final int i, final SQLXML s) {
super(i);
this.s = s;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setSQLXML(i, s);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(s);
}
} // End SQLXMLValue
private final class StringValue extends Value {
private final String s;
private StringValue(final int i, final String s) {
super(i);
this.s = s;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setString(i, s);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(s);
}
} // End StringValue
private final class TimeValue extends Value {
private final Time t;
private final Calendar cal;
private TimeValue(final int i, final Time t) {
super(i);
this.t = t;
this.cal = null;
}
private TimeValue(final int i, final Time t, final Calendar cal) {
super(i);
this.t = t;
this.cal = cal;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (cal == null) {
statement.setTime(i, t);
} else {
statement.setTime(i, t, cal);
}
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(t);
}
} // End TimeValue
private final class TimestampValue extends Value {
private final Timestamp t;
private final Calendar cal;
private TimestampValue(final int i, final Timestamp t) {
super(i);
this.t = t;
this.cal = null;
}
private TimestampValue(final int i, final Timestamp t, final Calendar cal) {
super(i);
this.t = t;
this.cal = cal;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
if (cal == null) {
statement.setTimestamp(i, t);
} else {
statement.setTimestamp(i, t, cal);
}
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(t);
}
} // End TimestampValue
private final class URLValue extends Value {
private final URL url;
private URLValue(final int i, final URL url) {
super(i);
this.url = url;
}
@Override
public final void setValue(final PreparedStatement statement) throws SQLException {
statement.setURL(i, url);
}
@Override
public final void extract(final StringExtractor extractor) {
extractor.append(url);
}
} // End URLValue
} // End Parameters
|
package machines.real.warehouse.behaviours.simple;
import commons.tools.LoggerUtil;
import jade.core.behaviours.OneShotBehaviour;
import machines.real.commons.request.WarehouseRequest;
import machines.real.warehouse.WarehouseAgent;
import machines.real.warehouse.WarehouseHal;
/**
* .
*
* @author <a href="mailto:junfeng_pan96@qq.com">junfeng</a>
* @version 1.0.0.0
* @since 1.8
*/
public class ItemImportBehaviour extends OneShotBehaviour {
private WarehouseHal hal;
private Integer posIn;
private WarehouseRequest request;
public ItemImportBehaviour(WarehouseAgent whagent, WarehouseRequest request) {
super(whagent);
hal = whagent.getHal();
this.posIn = whagent.getPosIn();
this.request = request;
}
@Override
public void action() {
if (request != null) {
Integer itemPosition = request.getItemPosition();
if (hal.moveItem(posIn, itemPosition)) {
LoggerUtil.hal.info(String.format("Succeed! Import item to %d", itemPosition));
} else {
LoggerUtil.hal.info(String.format("Failed! Import item to %d", itemPosition));
}
}
}
}
|
package com.lijingbo.customview_viewpager.activity;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import com.lijingbo.customview_viewpager.R;
public class MainActivity extends AppCompatActivity {
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
}
}
|
/*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.syndesis.connector.email.model;
import java.util.Objects;
import io.syndesis.connector.email.EMailConstants;
public final class EMailMessageModel implements EMailConstants {
private String subject;
private String from;
private String to;
private String cc;
private String bcc;
//
// Can be a MimeMultiPart for ensuring consistent
// data integrity is maintained rather than reducing
// content to just plain text.
//
private Object content;
public String getSubject() {
return subject;
}
public void setSubject(String subject) {
this.subject = subject;
}
public String getFrom() {
return this.from;
}
public void setFrom(String from) {
this.from = from;
}
public String getTo() {
return to;
}
public void setTo(String to) {
this.to = to;
}
public String getCc() {
return cc;
}
public void setCc(String cc) {
this.cc = cc;
}
public String getBcc() {
return bcc;
}
public void setBcc(String bcc) {
this.bcc = bcc;
}
public Object getContent() {
return content;
}
public void setContent(Object content) {
this.content = content;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((bcc == null) ? 0 : bcc.hashCode());
result = prime * result + ((cc == null) ? 0 : cc.hashCode());
result = prime * result + ((subject == null) ? 0 : subject.hashCode());
result = prime * result + ((content == null) ? 0 : content.hashCode());
result = prime * result + ((to == null) ? 0 : to.hashCode());
result = prime * result + ((from == null) ? 0 : from.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (! (obj instanceof EMailMessageModel)) {
return false;
}
EMailMessageModel other = (EMailMessageModel) obj;
boolean equal = Objects.equals(bcc, other.bcc);
equal &= Objects.equals(cc, other.cc);
equal &= Objects.equals(subject, other.subject);
equal &= Objects.equals(content, other.content);
equal &= Objects.equals(from, other.from);
equal &= Objects.equals(to, other.to);
return equal;
}
@Override
public String toString() {
return "EMailMessageModel [subject=" + subject + ", from=" + from + ", to=" + to + ", cc=" + cc + ", bcc=" + bcc
+ ", content=" + content + "]";
}
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.procedure;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@Category({MasterTests.class, LargeTests.class})
public class TestWALProcedureStoreOnHDFS {
private static final Log LOG = LogFactory.getLog(TestWALProcedureStoreOnHDFS.class);
protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private WALProcedureStore store;
private ProcedureStore.ProcedureStoreListener stopProcedureListener = new ProcedureStore.ProcedureStoreListener() {
@Override
public void postSync() {}
@Override
public void abortProcess() {
LOG.fatal("Abort the Procedure Store");
store.stop(true);
}
};
private static void initConfig(Configuration conf) {
conf.setInt("dfs.replication", 3);
conf.setInt("dfs.namenode.replication.min", 3);
// increase the value for slow test-env
conf.setInt(WALProcedureStore.WAIT_BEFORE_ROLL_CONF_KEY, 1000);
conf.setInt(WALProcedureStore.ROLL_RETRIES_CONF_KEY, 10);
conf.setInt(WALProcedureStore.MAX_SYNC_FAILURE_ROLL_CONF_KEY, 10);
}
public void setup() throws Exception {
MiniDFSCluster dfs = UTIL.startMiniDFSCluster(3);
Path logDir = new Path(new Path(dfs.getFileSystem().getUri()), "/test-logs");
store = ProcedureTestingUtility.createWalStore(UTIL.getConfiguration(), logDir);
store.registerListener(stopProcedureListener);
store.start(8);
store.recoverLease();
}
public void tearDown() throws Exception {
store.stop(false);
UTIL.getDFSCluster().getFileSystem().delete(store.getWALDir(), true);
try {
UTIL.shutdownMiniCluster();
} catch (Exception e) {
LOG.warn("failure shutting down cluster", e);
}
}
@Test(timeout=60000, expected=RuntimeException.class)
public void testWalAbortOnLowReplication() throws Exception {
initConfig(UTIL.getConfiguration());
setup();
try {
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
LOG.info("Stop DataNode");
UTIL.getDFSCluster().stopDataNode(0);
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
store.insert(new TestProcedure(1, -1), null);
for (long i = 2; store.isRunning(); ++i) {
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
store.insert(new TestProcedure(i, -1), null);
Thread.sleep(100);
}
assertFalse(store.isRunning());
fail("The store.insert() should throw an exeption");
} finally {
tearDown();
}
}
@Test(timeout=60000)
public void testWalAbortOnLowReplicationWithQueuedWriters() throws Exception {
initConfig(UTIL.getConfiguration());
setup();
try {
assertEquals(3, UTIL.getDFSCluster().getDataNodes().size());
store.registerListener(new ProcedureStore.ProcedureStoreListener() {
@Override
public void postSync() {
Threads.sleepWithoutInterrupt(2000);
}
@Override
public void abortProcess() {}
});
final AtomicInteger reCount = new AtomicInteger(0);
Thread[] thread = new Thread[store.getNumThreads() * 2 + 1];
for (int i = 0; i < thread.length; ++i) {
final long procId = i + 1;
thread[i] = new Thread() {
public void run() {
try {
LOG.debug("[S] INSERT " + procId);
store.insert(new TestProcedure(procId, -1), null);
LOG.debug("[E] INSERT " + procId);
} catch (RuntimeException e) {
reCount.incrementAndGet();
LOG.debug("[F] INSERT " + procId + ": " + e.getMessage());
}
}
};
thread[i].start();
}
Thread.sleep(1000);
LOG.info("Stop DataNode");
UTIL.getDFSCluster().stopDataNode(0);
assertEquals(2, UTIL.getDFSCluster().getDataNodes().size());
for (int i = 0; i < thread.length; ++i) {
thread[i].join();
}
assertFalse(store.isRunning());
assertTrue(reCount.toString(), reCount.get() >= store.getNumThreads() &&
reCount.get() < thread.length);
} finally {
tearDown();
}
}
@Test(timeout=60000)
public void testWalRollOnLowReplication() throws Exception {
initConfig(UTIL.getConfiguration());
UTIL.getConfiguration().setInt("dfs.namenode.replication.min", 1);
setup();
try {
int dnCount = 0;
store.insert(new TestProcedure(1, -1), null);
UTIL.getDFSCluster().restartDataNode(dnCount);
for (long i = 2; i < 100; ++i) {
store.insert(new TestProcedure(i, -1), null);
waitForNumReplicas(3);
Thread.sleep(100);
if ((i % 30) == 0) {
LOG.info("Restart Data Node");
UTIL.getDFSCluster().restartDataNode(++dnCount % 3);
}
}
assertTrue(store.isRunning());
} finally {
tearDown();
}
}
public void waitForNumReplicas(int numReplicas) throws Exception {
while (UTIL.getDFSCluster().getDataNodes().size() < numReplicas) {
Thread.sleep(100);
}
for (int i = 0; i < numReplicas; ++i) {
for (DataNode dn: UTIL.getDFSCluster().getDataNodes()) {
while (!dn.isDatanodeFullyStarted()) {
Thread.sleep(100);
}
}
}
}
}
|
package com.epam.community.z.spring.testing.importer;
import com.epam.community.z.spring.testing.post.Post;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.cloud.contract.stubrunner.StubFinder;
import org.springframework.cloud.contract.stubrunner.spring.AutoConfigureStubRunner;
import org.springframework.cloud.contract.stubrunner.spring.StubRunnerPort;
import org.springframework.cloud.contract.stubrunner.spring.StubRunnerProperties;
import org.springframework.test.context.junit.jupiter.SpringJUnitConfig;
import org.springframework.test.util.ReflectionTestUtils;
import org.springframework.web.client.RestTemplate;
import java.time.LocalDate;
import java.util.Collection;
import static org.junit.jupiter.api.Assertions.*;
@SpringJUnitConfig(classes = {
PostImporter.class,
RestTemplate.class
})
@AutoConfigureStubRunner(
stubsMode = StubRunnerProperties.StubsMode.LOCAL,
generateStubs = true,
classifier = "",
failOnNoStubs = true,
ids = {
"com.epam.community.z:external-post-generator-contract"
}
)
class PostImporterContractStubTest {
@Autowired
private PostImporter importer;
@Autowired
private StubFinder stubFinder;
@BeforeEach
public void init() {
final String url = stubFinder.findStubUrl("external-post-generator-contract").toString() + "/post-range";
ReflectionTestUtils.setField(importer, "importerAddress", url);
}
@Test
void check_contextStarts() {
assertNotNull(importer);
}
@Test
public void importer_sendsRequest() {
final Collection<Post> posts = importer.importPosts(
LocalDate.of(2020, 01, 01),
LocalDate.of(2020, 12, 12)
);
assertNotNull(posts);
assertFalse(posts.isEmpty());
}
}
|
/*
* Copyright 2021 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.apiv1.internalenvironments.representers;
import com.thoughtworks.go.api.base.OutputWriter;
import com.thoughtworks.go.config.EnvironmentConfig;
import java.util.List;
import java.util.function.Function;
public class MergedEnvironmentsRepresenter {
public static void toJSON(OutputWriter outputWriter, List<EnvironmentConfig> allMergedEnvironments) {
outputWriter.addChild("_embedded", embeddedWriter -> {
embeddedWriter.addChildList("environments", outputListWriter -> {
allMergedEnvironments.forEach(environmentConfig -> {
outputListWriter.addChild(childWriter -> MergedEnvironmentRepresenter.toJSON(childWriter, environmentConfig));
});
});
});
}
}
|
// Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package com.twitter.intellij.pants.service.project.model;
import com.intellij.openapi.util.text.StringUtil;
import com.intellij.util.containers.ContainerUtil;
import com.twitter.intellij.pants.model.PantsSourceType;
import com.twitter.intellij.pants.util.PantsUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import java.util.*;
public class TargetInfo {
public TargetInfo() {
}
public TargetInfo(
Set<String> libraries,
Set<String> targets,
Set<SourceRoot> roots,
String target_type,
Boolean is_code_gen
) {
this.libraries = libraries;
this.targets = targets;
this.roots = roots;
this.target_type = target_type;
this.is_code_gen = is_code_gen;
}
/**
* List of libraries. Just names.
*/
protected Set<String> libraries;
/**
* List of dependencies.
*/
protected Set<String> targets;
/**
* List of source roots.
*/
protected Set<SourceRoot> roots;
/**
* Target type.
*/
protected String target_type;
/**
* Pants target type
*/
private String pants_target_type = null;
private Boolean is_code_gen;
public Set<String> getLibraries() {
return libraries;
}
public void setLibraries(Set<String> libraries) {
this.libraries = libraries;
}
public Set<String> getTargets() {
return targets;
}
public void setTargets(Set<String> targets) {
this.targets = targets;
}
public Set<SourceRoot> getRoots() {
return roots;
}
public void setRoots(Set<SourceRoot> roots) {
this.roots = roots;
}
@Nullable
public String getTargetType() {
return target_type;
}
@Nullable
public void setTargetType(@NotNull String target_type) {
this.target_type = target_type;
}
@Nullable
public String getInternalPantsTargetType() {
return pants_target_type;
}
public boolean isEmpty() {
return libraries.isEmpty() && targets.isEmpty() && roots.isEmpty();
}
public boolean isCodeGen() {
return is_code_gen;
}
public boolean isScalaTarget() {
return StringUtil.equals("scala_library", getInternalPantsTargetType());
}
public boolean isAnnotationProcessorTarget() {
return StringUtil.equals("annotation_processor", getInternalPantsTargetType());
}
public boolean hasScalaLib() {
for (String libraryId : libraries) {
if (StringUtil.startsWith(libraryId, "org.scala-lang:scala-library")) {
return true;
}
}
return false;
}
public boolean dependOn(@NotNull String targetName) {
return targets.contains(targetName);
}
@NotNull
public PantsSourceType getSourcesType() {
return PantsUtil.getSourceTypeForTargetType(getTargetType());
}
public void addDependency(@NotNull String targetName) {
getTargets().add(targetName);
}
public boolean removeDependency(@NotNull String targetName) {
return getTargets().remove(targetName);
}
public void replaceDependency(@NotNull String targetName, @NotNull String newTargetName) {
if (removeDependency(targetName)) {
addDependency(newTargetName);
}
}
public TargetInfo intersect(@NotNull TargetInfo other) {
final Collection<String> libs = ContainerUtil.intersection(getLibraries(), other.getLibraries());
final Collection<String> targets = ContainerUtil.intersection(getTargets(), other.getTargets());
final Collection<SourceRoot> roots = ContainerUtil.intersection(getRoots(), other.getRoots());
return new TargetInfo(
new HashSet<String>(libs),
new HashSet<String>(targets),
new HashSet<SourceRoot>(roots),
getTargetType(),
is_code_gen
);
}
public TargetInfo union(@NotNull TargetInfo other) {
return new TargetInfo(
ContainerUtil.union(getLibraries(), other.getLibraries()),
ContainerUtil.union(getTargets(), other.getTargets()),
ContainerUtil.union(getRoots(), other.getRoots()),
getTargetType(),
is_code_gen
);
}
@Override
public String toString() {
return "TargetInfo{" +
"libraries=" + libraries +
", targets=" + targets +
", roots=" + roots +
", target_type='" + target_type + '\'' +
", is_code_gen=" + is_code_gen +
'}';
}
}
|
/*
SPECIAL PERMUTATION
A Special Permutation is a permutation of n numbers such that there
are at-least (n - k) indices where the value is equal to the index
of that number.
*/
import java.util.Scanner;
class Special_Permutation {
static int dearrangement[] = new int[10000];
public static int count(int number) {
dearrangement[0] = 1;
dearrangement[1] = 0;
dearrangement[2] = 1;
for (int i = 3; i <= number; i++) {
dearrangement[i] = (i - 1) *
(dearrangement[i - 1] + dearrangement[i - 2]);
}
return dearrangement[number];
}
public static int mod = 1000000007;
public static int nCr(int n, int r, int mod) {
if (n < r) {
return -1;
}
// We create a pascal triangle.
int Pascal[] = new int[r + 1];
Pascal[0] = 1;
for (int i = 1; i <= r; i++) {
Pascal[i] = 0;
}
// We use the known formula nCr = (n-1)C(r) + (n-1)C(r-1)
// for computing the values.
for (int i = 1; i <= n; i++) {
int k = (i < r) ? (i) : (r);
// we know, nCr = nC(n-r). Thus, at any point we only need min
for (int j = k; j > 0; j--) {
// of the two, so as to improve our computation time.
Pascal[j] = (Pascal[j] + Pascal[j - 1]) % mod;
}
}
return Pascal[r];
}
public static int special_permutation(int n, int k) {
int ans = 0;
for (int i = n - k; i <= n; i++) {
ans += (nCr(n, i, mod) * count(n - i));
}
return ans;
}
public static void main(String args[]) {
int number, k;
Scanner s = new Scanner(System.in);
number = s.nextInt();
k = s.nextInt();
int special = special_permutation(number, k);
System.out.print("The number of special permutations is " + special);
}
}
/*
INPUT :
n = 7
k = 3
OUTPUT :
The number of special permutations is 92
*/
|
package org.finos.waltz.service.workflow;
import org.finos.waltz.model.EntityKind;
import org.immutables.value.Value;
/**
* This is a friendly entity ref for users to declare in their scripts
* i.e. ASSESSMENT_DEFINITION, LEGAL_HOLD rather than ASSESSMENT_DEFINITION, 27
*/
@Value.Immutable
public abstract class ContextVariableReference {
public abstract EntityKind kind();
public abstract String externalId();
public static ContextVariableReference mkVarRef(EntityKind kind,
String externalId) {
return ImmutableContextVariableReference
.builder()
.kind(kind)
.externalId(externalId)
.build();
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.processor.internals;
public class RecordContextStub implements RecordContext {
private final long offset;
private final long timestamp;
private final int partition;
private final String topic;
public RecordContextStub() {
this(-1, -1, -1, "");
}
public RecordContextStub(final long offset, final long timestamp, final int partition, final String topic) {
this.offset = offset;
this.timestamp = timestamp;
this.partition = partition;
this.topic = topic;
}
@Override
public long offset() {
return offset;
}
@Override
public long timestamp() {
return timestamp;
}
@Override
public String topic() {
return topic;
}
@Override
public int partition() {
return partition;
}
}
|
package com.nawin.androidmvparchitecture.data.remote;
import okhttp3.OkHttpClient;
/**
* Created by nawin on 6/13/17.
*/
public class DataModule {
public static final String BASE_URL = "https://androidragger.000webhostapp.com/mvp_android/api.php/";
public static OkHttpClient getHttpClient() {
return new OkHttpClient.Builder().build();
}
}
|
package org.folio.inventory.domain;
public class Holding {
public final String id;
public final String instanceId;
public final String permanentLocationId;
public Holding(String id, String instanceId, String permanentLocationId) {
this.id = id;
this.instanceId = instanceId;
this.permanentLocationId = permanentLocationId;
}
}
|
package com.water.scrollscreenshot.task;
import android.content.ContentResolver;
import android.content.ContentValues;
import android.content.Context;
import android.content.res.Resources;
import android.graphics.Bitmap;
import android.media.ExifInterface;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Build;
import android.os.Environment;
import android.os.ParcelFileDescriptor;
import android.provider.MediaStore;
import android.provider.MediaStore.MediaColumns;
import android.text.format.DateUtils;
import com.water.scrollscreenshot.listener.TaskListener;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.text.SimpleDateFormat;
import java.time.Instant;
import java.time.ZoneId;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Date;
import java.util.Objects;
import java.util.UUID;
public class SaveImageTask extends AsyncTask<Void, Void, Uri> {
private static final String SCREENSHOT_FILE_NAME_TEMPLATE = "Screenshot_%s.png";
private static final String SCREENSHOT_ID_TEMPLATE = "Screenshot_%s";
private static final String SCREENSHOT_SHARE_SUBJECT_TEMPLATE = "Screenshot (%s)";
private final Context mContext;
private Bitmap mBitmap;
private final String mImageFileName;
private final long mImageTime;
private final String mScreenshotId;
private TaskListener mListener;
public SaveImageTask(Context context, Bitmap bitmap) {
mContext =context;
mBitmap =bitmap;
mImageTime = System.currentTimeMillis();
String imageDate = new SimpleDateFormat("yyyyMMdd-HHmmss").format(new Date(mImageTime));
mImageFileName = String.format(SCREENSHOT_FILE_NAME_TEMPLATE, imageDate);
mScreenshotId = String.format(SCREENSHOT_ID_TEMPLATE, UUID.randomUUID());
}
@Override
protected Uri doInBackground(Void... voids) {
if (isCancelled()) {
return null;
}
Uri uri = null;
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
ContentResolver resolver = mContext.getContentResolver();
Resources r = mContext.getResources();
try {
// Save the screenshot to the MediaStore
final ContentValues values = new ContentValues();
values.put(MediaColumns.RELATIVE_PATH, Environment.DIRECTORY_PICTURES
+ File.separator + Environment.DIRECTORY_SCREENSHOTS);
values.put(MediaColumns.DISPLAY_NAME, mImageFileName);
values.put(MediaColumns.MIME_TYPE, "image/png");
values.put(MediaColumns.DATE_ADDED, mImageTime / 1000);
values.put(MediaColumns.DATE_MODIFIED, mImageTime / 1000);
values.put(MediaColumns.DATE_EXPIRES, (mImageTime + DateUtils.DAY_IN_MILLIS) / 1000);
values.put(MediaColumns.IS_PENDING, 1);
uri = resolver.insert(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, values);
try {
// First, write the actual data for our screenshot
try (OutputStream out = resolver.openOutputStream(uri)) {
if (!mBitmap.compress(Bitmap.CompressFormat.PNG, 100, out)) {
throw new IOException("Failed to compress");
}
}
// Next, write metadata to help index the screenshot
try (ParcelFileDescriptor pfd = resolver.openFile(uri, "rw", null)) {
final ExifInterface exif = new ExifInterface(pfd.getFileDescriptor());
exif.setAttribute(ExifInterface.TAG_SOFTWARE,
"Android " + Build.DISPLAY);
exif.setAttribute(ExifInterface.TAG_IMAGE_WIDTH,
Integer.toString(mBitmap.getWidth()));
exif.setAttribute(ExifInterface.TAG_IMAGE_LENGTH,
Integer.toString(mBitmap.getHeight()));
final ZonedDateTime time = ZonedDateTime.ofInstant(
Instant.ofEpochMilli(mImageTime), ZoneId.systemDefault());
exif.setAttribute(ExifInterface.TAG_DATETIME_ORIGINAL,
DateTimeFormatter.ofPattern("yyyy:MM:dd HH:mm:ss").format(time));
exif.setAttribute(ExifInterface.TAG_SUBSEC_TIME_ORIGINAL,
DateTimeFormatter.ofPattern("SSS").format(time));
if (Objects.equals(time.getOffset(), ZoneOffset.UTC)) {
exif.setAttribute(ExifInterface.TAG_OFFSET_TIME_ORIGINAL, "+00:00");
} else {
exif.setAttribute(ExifInterface.TAG_OFFSET_TIME_ORIGINAL,
DateTimeFormatter.ofPattern("XXX").format(time));
}
exif.saveAttributes();
}
// Everything went well above, publish it!
values.clear();
values.put(MediaColumns.IS_PENDING, 0);
values.putNull(MediaColumns.DATE_EXPIRES);
resolver.update(uri, values, null, null);
} catch (Exception e) {
resolver.delete(uri, null);
throw e;
}
} catch (Exception e) {
}
return uri;
}
@Override
protected void onCancelled(Uri uri) {
// If we are cancelled while the task is running in the background, we may get null
// params. The finisher is expected to always be called back, so just use the baked-in
// params from the ctor in any case.
if(mBitmap!=null){
mBitmap.recycle();
mBitmap=null;
}
}
@Override
protected void onPostExecute(Uri uri) {
super.onPostExecute(uri);
if(mListener!=null){
mListener.onComplete(uri);
}
}
public void setListener(TaskListener listener){
mListener=listener;
}
}
|
/*
************************************************************************************
* Copyright (C) 2001-2011 encuestame: system online surveys Copyright (C) 2011
* encuestame Development Team.
* Licensed under the Apache Software License version 2.0
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
************************************************************************************
*/
package org.encuestame.persistence.dao.imp;
import java.util.List;
import org.encuestame.persistence.dao.IProjectDao;
import org.encuestame.persistence.domain.Attachment;
import org.encuestame.persistence.domain.Project;
import org.encuestame.persistence.domain.security.Account;
import org.hibernate.HibernateException;
import org.hibernate.SessionFactory;
import org.hibernate.criterion.DetachedCriteria;
import org.hibernate.criterion.Restrictions;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.support.DataAccessUtils;
import org.springframework.stereotype.Repository;
/**
* Project Dao.
* @author Picado, Juan Carlos juanATencuestame.org
* @since May 26, 2009
* @version $Id$
*/
@Repository("projectDaoImp")
public class ProjectDaoImp extends AbstractHibernateDaoSupport implements IProjectDao {
@Autowired
public ProjectDaoImp(SessionFactory sessionFactory) {
setSessionFactory(sessionFactory);
}
/**
* Find all projects.
* @return List of Project
* @throws HibernateException hibernate expcetion
*/
@SuppressWarnings("unchecked")
public List findAll() throws HibernateException {
return getHibernateTemplate().find("from Project");
}
/**
* Find Projects by {@link Account} id.
* @param userId user id.
* @return list of projects.
*/
@SuppressWarnings("unchecked")
public List findProjectsByUserID(final Long userId) throws HibernateException{
return getHibernateTemplate().findByNamedParam("from Project where users.id = :userId", "userId", userId);
}
/**
* Retrieve project by id.
* @param projectId project id
* @return {@link Project}
* @throws HibernateException hibernate expcetion
*/
public Project getProjectbyId(Long projectId) throws HibernateException {
return (Project) getHibernateTemplate().get(Project.class, projectId);
}
/**
* Get Projects By Location Id.
* @return list of projects.
* @throws HibernateException HibernateException
*/
@SuppressWarnings("unchecked")
public List getProjectByLocationId() throws HibernateException{
/* final String queryLocProject = "FROM Projects where"
return getHibernateTemplate().fin
final String queryLocation = "FROM CatLocation WHERE tidtype.id =?";*/
return getHibernateTemplate().find("");
}
/**
* Get Attachment by Id.
* @param attachmentId
* @return
*/
@SuppressWarnings("unchecked")
public Attachment getAttachmentbyId(final Long attachmentId){
final DetachedCriteria criteria = DetachedCriteria.forClass(Attachment.class);
criteria.add(Restrictions.eq("attachmentId", attachmentId));
return (Attachment) DataAccessUtils.uniqueResult(getHibernateTemplate().findByCriteria(criteria));
}
/**
* Get Attachment List by Project Id.
* @param projectId
* @return
*/
@SuppressWarnings("unchecked")
public List getAttachmentsListbyProject(final Long projectId){
final DetachedCriteria criteria = DetachedCriteria.forClass(Attachment.class);
criteria.add(Restrictions.eq("projectAttachment.proyectId", projectId));
return getHibernateTemplate().findByCriteria(criteria);
}
/**
* Get Attachment by Name.
* @param filename
* @return {@link Attachment} filename
*/
@SuppressWarnings("unchecked")
public Attachment getAttachmentbyName(final String filename){
final DetachedCriteria criteria = DetachedCriteria.forClass(Attachment.class);
criteria.add(Restrictions.eq("filename", filename));
return (Attachment) DataAccessUtils.uniqueResult(getHibernateTemplate().findByCriteria(criteria));
}
}
|
package codemeans.shopify4j.rest.admin.model.discounts;
import java.util.List;
import lombok.Data;
import lombok.experimental.Accessors;
/**
* @author: yuanwq
* @date: 2021-01-19
*/
@Data
@Accessors(chain = true)
public class PriceRuleList {
private List<PriceRule> priceRules;
}
|
package br.com.agateownz.foodsocial.modules.hashtag.dto.response;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class HashtagResponse {
private Long id;
private String value;
}
|
package org.javaunit.autoparams.generator;
import java.beans.ConstructorProperties;
import java.lang.reflect.Constructor;
import java.lang.reflect.Modifier;
import java.lang.reflect.Parameter;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Stream;
import org.javaunit.autoparams.Builder;
final class ComplexObjectGenerator implements ObjectGenerator {
@Override
public ObjectContainer generate(ObjectQuery query, ObjectGenerationContext context) {
if (query.getType() instanceof Class<?>) {
return generateNonGeneric((Class<?>) query.getType(), context);
} else if (query.getType() instanceof ParameterizedType) {
return generateGeneric((ParameterizedType) query.getType(), context);
} else {
return ObjectContainer.EMPTY;
}
}
private ObjectContainer generateNonGeneric(Class<?> type, ObjectGenerationContext context) {
if (isAbstract(type)) {
return ObjectContainer.EMPTY;
}
Constructor<?> constructor = resolveConstructor(type);
Stream<ObjectQuery> argumentQueries = Arrays
.stream(constructor.getParameters())
.map(ObjectQuery::fromParameter);
return new ObjectContainer(createInstance(constructor, argumentQueries, context));
}
private ObjectContainer generateGeneric(
ParameterizedType parameterizedType,
ObjectGenerationContext context
) {
Class<?> type = (Class<?>) parameterizedType.getRawType();
if (isAbstract(type) || type.equals(Builder.class)) {
return ObjectContainer.EMPTY;
}
Map<TypeVariable<?>, Type> genericMap = getGenericMap(type, parameterizedType);
Constructor<?> constructor = resolveConstructor(type);
Stream<ObjectQuery> argumentQueries = Arrays
.stream(constructor.getParameters())
.map(parameter -> resolveArgumentQuery(parameter, genericMap));
return new ObjectContainer(createInstance(constructor, argumentQueries, context));
}
private boolean isAbstract(Class<?> type) {
return type.isInterface() || Modifier.isAbstract(type.getModifiers());
}
private Constructor<?> resolveConstructor(Class<?> type) {
return ConstructorResolver
.compose(
t -> Arrays
.stream(t.getConstructors())
.filter(c -> c.isAnnotationPresent(ConstructorProperties.class))
.sorted(Comparator.comparing(c -> c.getParameterCount()))
.findFirst(),
t -> Arrays
.stream(t.getConstructors())
.sorted(Comparator.comparing(c -> c.getParameterCount()))
.findFirst()
)
.resolve(type)
.get();
}
private Map<TypeVariable<?>, Type> getGenericMap(
Class<?> type,
ParameterizedType parameterizedType
) {
HashMap<TypeVariable<?>, Type> map = new HashMap<>();
TypeVariable<?>[] typeVariables = type.getTypeParameters();
Type[] typeValues = parameterizedType.getActualTypeArguments();
for (int i = 0; i < typeVariables.length; i++) {
map.put(typeVariables[i], typeValues[i]);
}
return map;
}
private ObjectQuery resolveArgumentQuery(
Parameter parameter,
Map<TypeVariable<?>, Type> genericMap
) {
return parameter.getParameterizedType() instanceof TypeVariable
? () -> genericMap.get((TypeVariable<?>) parameter.getParameterizedType())
: ObjectQuery.fromParameter(parameter);
}
private Object createInstance(
Constructor<?> constructor,
Stream<ObjectQuery> argumentQueries,
ObjectGenerationContext context
) {
try {
return constructor.newInstance(generateArguments(argumentQueries, context));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private Object[] generateArguments(
Stream<ObjectQuery> argumentQueries,
ObjectGenerationContext context
) {
return argumentQueries
.map(query -> context.getGenerator().generate(query, context))
.map(ObjectContainer::unwrapOrElseThrow)
.toArray();
}
}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.doris.common;
import org.apache.doris.common.io.Text;
import org.apache.doris.common.io.Writable;
import org.apache.doris.persist.gson.GsonUtils;
import com.google.gson.annotations.SerializedName;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
/*
* Currently just used for persisting schema version and schema hash pair
* using GSON
*/
public class SchemaVersionAndHash implements Writable {
@SerializedName(value = "version")
public int schemaVersion;
@SerializedName(value = "hash")
public int schemaHash;
public SchemaVersionAndHash(int schemaVersion, int schemaHash) {
this.schemaVersion = schemaVersion;
this.schemaHash = schemaHash;
}
@Override
public void write(DataOutput out) throws IOException {
String json = GsonUtils.GSON.toJson(this);
Text.writeString(out, json);
}
@Override
public String toString() {
return schemaVersion + ":" + schemaHash;
}
public static SchemaVersionAndHash read(DataInput in) throws IOException {
String json = Text.readString(in);
return GsonUtils.GSON.fromJson(json, SchemaVersionAndHash.class);
}
}
|
/*
* The Plaid API
* The Plaid REST API. Please see https://plaid.com/docs/api for more details.
*
* The version of the OpenAPI document: 2020-09-14_1.39.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package com.plaid.client.model;
import java.util.Objects;
import java.util.Arrays;
import com.google.gson.TypeAdapter;
import com.google.gson.annotations.JsonAdapter;
import com.google.gson.annotations.SerializedName;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
import io.swagger.annotations.ApiModel;
import io.swagger.annotations.ApiModelProperty;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* Information describing a transaction category
*/
@ApiModel(description = "Information describing a transaction category")
@javax.annotation.Generated(value = "org.openapitools.codegen.languages.JavaClientCodegen", date = "2021-10-15T19:47:39.714Z[GMT]")
public class Category {
public static final String SERIALIZED_NAME_CATEGORY_ID = "category_id";
@SerializedName(SERIALIZED_NAME_CATEGORY_ID)
private String categoryId;
public static final String SERIALIZED_NAME_GROUP = "group";
@SerializedName(SERIALIZED_NAME_GROUP)
private String group;
public static final String SERIALIZED_NAME_HIERARCHY = "hierarchy";
@SerializedName(SERIALIZED_NAME_HIERARCHY)
private List<String> hierarchy = new ArrayList<>();
public Category categoryId(String categoryId) {
this.categoryId = categoryId;
return this;
}
/**
* An identifying number for the category. `category_id` is a Plaid-specific identifier and does not necessarily correspond to merchant category codes.
* @return categoryId
**/
@ApiModelProperty(required = true, value = "An identifying number for the category. `category_id` is a Plaid-specific identifier and does not necessarily correspond to merchant category codes.")
public String getCategoryId() {
return categoryId;
}
public void setCategoryId(String categoryId) {
this.categoryId = categoryId;
}
public Category group(String group) {
this.group = group;
return this;
}
/**
* `place` for physical transactions or `special` for other transactions such as bank charges.
* @return group
**/
@ApiModelProperty(required = true, value = "`place` for physical transactions or `special` for other transactions such as bank charges.")
public String getGroup() {
return group;
}
public void setGroup(String group) {
this.group = group;
}
public Category hierarchy(List<String> hierarchy) {
this.hierarchy = hierarchy;
return this;
}
public Category addHierarchyItem(String hierarchyItem) {
this.hierarchy.add(hierarchyItem);
return this;
}
/**
* A hierarchical array of the categories to which this `category_id` belongs.
* @return hierarchy
**/
@ApiModelProperty(required = true, value = "A hierarchical array of the categories to which this `category_id` belongs.")
public List<String> getHierarchy() {
return hierarchy;
}
public void setHierarchy(List<String> hierarchy) {
this.hierarchy = hierarchy;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Category category = (Category) o;
return Objects.equals(this.categoryId, category.categoryId) &&
Objects.equals(this.group, category.group) &&
Objects.equals(this.hierarchy, category.hierarchy);
}
@Override
public int hashCode() {
return Objects.hash(categoryId, group, hierarchy);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("class Category {\n");
sb.append(" categoryId: ").append(toIndentedString(categoryId)).append("\n");
sb.append(" group: ").append(toIndentedString(group)).append("\n");
sb.append(" hierarchy: ").append(toIndentedString(hierarchy)).append("\n");
sb.append("}");
return sb.toString();
}
/**
* Convert the given object to string with each line indented by 4 spaces
* (except the first line).
*/
private String toIndentedString(Object o) {
if (o == null) {
return "null";
}
return o.toString().replace("\n", "\n ");
}
}
|
package com.google.android.gms.internal;
import android.util.SparseArray;
import com.google.android.gms.common.api.PendingResult;
import com.google.android.gms.common.api.ResultCallback;
import com.google.android.gms.common.api.ResultCallbacks;
import com.google.android.gms.common.api.ResultStore;
public class zzrr extends ResultStore {
private final SparseArray<ResultCallbacks<?>> GA = new SparseArray();
private final SparseArray<PendingResult<?>> Gz = new SparseArray();
private final Object zzaiw = new Object();
public void remove(int i) {
synchronized (this.zzaiw) {
PendingResult pendingResult = (PendingResult) this.Gz.get(i);
if (pendingResult != null) {
this.Gz.remove(i);
if (((ResultCallback) this.GA.get(i)) != null) {
pendingResult.setResultCallback(null);
}
}
}
}
public void zzauq() {
synchronized (this.zzaiw) {
this.GA.clear();
for (int i = 0; i < this.Gz.size(); i++) {
((PendingResult) this.Gz.valueAt(i)).setResultCallback(null);
}
}
}
public void zzy(Object obj) {
synchronized (this.zzaiw) {
for (int i = 0; i < this.Gz.size(); i++) {
((PendingResult) this.Gz.valueAt(i)).cancel();
}
}
ResultStore.zzw(obj);
}
}
|
package Association.Aggregaion.Composition;
import java.util.ArrayList;
public class Aggregation {
public static void main(String[] args) {
/*
* Build cars (2 of them)>> Composition
*/
ArrayList<Part> bMWParts = new ArrayList<Part>();
Part bMWEngine = new Part();
Part bMWBody = new Part();
bMWParts.add(bMWEngine);
bMWParts.add(bMWBody);
Vehicle bmw = new Vehicle(1500, 500, 50, 0.7, bMWParts);
ArrayList<Part> volvoParts = new ArrayList<Part>();
Part volvoEngine = new Part();
Part volvoBody = new Part();
volvoParts.add(volvoEngine);
volvoParts.add(volvoBody);
Vehicle volvo = new Vehicle(1400, 300, 50, 0.6, volvoParts);
/*
* Build a vehicle pool >> Aggregation
*/
ArrayList<Vehicle> vehiclePool1 = new ArrayList<Vehicle>();
vehiclePool1.add(volvo);
vehiclePool1.add(bmw);
/*
* if vehiclePool1 is garbage collected the "volvo" and "bmw" objects might not
* be come garbage themselves as they could be part of another vehicle pool
*/
ArrayList<Vehicle> vehiclePool2 = new ArrayList<Vehicle>();
vehiclePool2.add(volvo);
vehiclePool2.add(bmw);
}
}
|
// Copyright 2017 The Nomulus Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package google.registry.whois;
import static google.registry.model.EppResourceUtils.loadByForeignKeyCached;
import com.google.common.net.InternetDomainName;
import google.registry.model.domain.DomainResource;
import java.util.Optional;
import org.joda.time.DateTime;
/** Represents a WHOIS lookup on a domain name (i.e. SLD). */
public class DomainLookupCommand extends DomainOrHostLookupCommand {
private final boolean fullOutput;
public DomainLookupCommand(InternetDomainName domainName, boolean fullOutput) {
super(domainName, "Domain");
this.fullOutput = fullOutput;
}
@Override
protected Optional<WhoisResponse> getResponse(InternetDomainName domainName, DateTime now) {
final DomainResource domainResource =
loadByForeignKeyCached(DomainResource.class, domainName.toString(), now);
return Optional.ofNullable(
domainResource == null ? null : new DomainWhoisResponse(domainResource, fullOutput, now));
}
}
|
// ------------------------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
// ------------------------------------------------------------------------------
package com.microsoft.graph.requests.extensions;
import com.microsoft.graph.models.extensions.Place;
import com.microsoft.graph.concurrency.ICallback;
import com.microsoft.graph.core.ClientException;
import com.microsoft.graph.http.IHttpRequest;
// **NOTE** This file was generated by a tool and any changes will be overwritten.
/**
* The interface for the Place Request.
*/
public interface IPlaceRequest extends IHttpRequest {
/**
* Gets the Place from the service
*
* @param callback the callback to be called after success or failure
*/
void get(final ICallback<? super Place> callback);
/**
* Gets the Place from the service
*
* @return the Place from the request
* @throws ClientException this exception occurs if the request was unable to complete for any reason
*/
Place get() throws ClientException;
/**
* Delete this item from the service
*
* @param callback the callback when the deletion action has completed
*/
void delete(final ICallback<? super Place> callback);
/**
* Delete this item from the service
*
* @throws ClientException if there was an exception during the delete operation
*/
void delete() throws ClientException;
/**
* Patches this Place with a source
*
* @param sourcePlace the source object with updates
* @param callback the callback to be called after success or failure
*/
void patch(final Place sourcePlace, final ICallback<? super Place> callback);
/**
* Patches this Place with a source
*
* @param sourcePlace the source object with updates
* @return the updated Place
* @throws ClientException this exception occurs if the request was unable to complete for any reason
*/
Place patch(final Place sourcePlace) throws ClientException;
/**
* Posts a Place with a new object
*
* @param newPlace the new object to create
* @param callback the callback to be called after success or failure
*/
void post(final Place newPlace, final ICallback<? super Place> callback);
/**
* Posts a Place with a new object
*
* @param newPlace the new object to create
* @return the created Place
* @throws ClientException this exception occurs if the request was unable to complete for any reason
*/
Place post(final Place newPlace) throws ClientException;
/**
* Posts a Place with a new object
*
* @param newPlace the object to create/update
* @param callback the callback to be called after success or failure
*/
void put(final Place newPlace, final ICallback<? super Place> callback);
/**
* Posts a Place with a new object
*
* @param newPlace the object to create/update
* @return the created Place
* @throws ClientException this exception occurs if the request was unable to complete for any reason
*/
Place put(final Place newPlace) throws ClientException;
/**
* Sets the select clause for the request
*
* @param value the select clause
* @return the updated request
*/
IPlaceRequest select(final String value);
/**
* Sets the expand clause for the request
*
* @param value the expand clause
* @return the updated request
*/
IPlaceRequest expand(final String value);
}
|
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.transfer.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.AmazonWebServiceRequest;
/**
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/ListTagsForResource" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class ListTagsForResourceRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable {
/**
* <p>
* Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a specific
* Amazon Web Services resource, such as a server, user, or role.
* </p>
*/
private String arn;
/**
* <p>
* Specifies the number of tags to return as a response to the <code>ListTagsForResource</code> request.
* </p>
*/
private Integer maxResults;
/**
* <p>
* When you request additional results from the <code>ListTagsForResource</code> operation, a <code>NextToken</code>
* parameter is returned in the input. You can then pass in a subsequent command to the <code>NextToken</code>
* parameter to continue listing additional tags.
* </p>
*/
private String nextToken;
/**
* <p>
* Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a specific
* Amazon Web Services resource, such as a server, user, or role.
* </p>
*
* @param arn
* Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a
* specific Amazon Web Services resource, such as a server, user, or role.
*/
public void setArn(String arn) {
this.arn = arn;
}
/**
* <p>
* Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a specific
* Amazon Web Services resource, such as a server, user, or role.
* </p>
*
* @return Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a
* specific Amazon Web Services resource, such as a server, user, or role.
*/
public String getArn() {
return this.arn;
}
/**
* <p>
* Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a specific
* Amazon Web Services resource, such as a server, user, or role.
* </p>
*
* @param arn
* Requests the tags associated with a particular Amazon Resource Name (ARN). An ARN is an identifier for a
* specific Amazon Web Services resource, such as a server, user, or role.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListTagsForResourceRequest withArn(String arn) {
setArn(arn);
return this;
}
/**
* <p>
* Specifies the number of tags to return as a response to the <code>ListTagsForResource</code> request.
* </p>
*
* @param maxResults
* Specifies the number of tags to return as a response to the <code>ListTagsForResource</code> request.
*/
public void setMaxResults(Integer maxResults) {
this.maxResults = maxResults;
}
/**
* <p>
* Specifies the number of tags to return as a response to the <code>ListTagsForResource</code> request.
* </p>
*
* @return Specifies the number of tags to return as a response to the <code>ListTagsForResource</code> request.
*/
public Integer getMaxResults() {
return this.maxResults;
}
/**
* <p>
* Specifies the number of tags to return as a response to the <code>ListTagsForResource</code> request.
* </p>
*
* @param maxResults
* Specifies the number of tags to return as a response to the <code>ListTagsForResource</code> request.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListTagsForResourceRequest withMaxResults(Integer maxResults) {
setMaxResults(maxResults);
return this;
}
/**
* <p>
* When you request additional results from the <code>ListTagsForResource</code> operation, a <code>NextToken</code>
* parameter is returned in the input. You can then pass in a subsequent command to the <code>NextToken</code>
* parameter to continue listing additional tags.
* </p>
*
* @param nextToken
* When you request additional results from the <code>ListTagsForResource</code> operation, a
* <code>NextToken</code> parameter is returned in the input. You can then pass in a subsequent command to
* the <code>NextToken</code> parameter to continue listing additional tags.
*/
public void setNextToken(String nextToken) {
this.nextToken = nextToken;
}
/**
* <p>
* When you request additional results from the <code>ListTagsForResource</code> operation, a <code>NextToken</code>
* parameter is returned in the input. You can then pass in a subsequent command to the <code>NextToken</code>
* parameter to continue listing additional tags.
* </p>
*
* @return When you request additional results from the <code>ListTagsForResource</code> operation, a
* <code>NextToken</code> parameter is returned in the input. You can then pass in a subsequent command to
* the <code>NextToken</code> parameter to continue listing additional tags.
*/
public String getNextToken() {
return this.nextToken;
}
/**
* <p>
* When you request additional results from the <code>ListTagsForResource</code> operation, a <code>NextToken</code>
* parameter is returned in the input. You can then pass in a subsequent command to the <code>NextToken</code>
* parameter to continue listing additional tags.
* </p>
*
* @param nextToken
* When you request additional results from the <code>ListTagsForResource</code> operation, a
* <code>NextToken</code> parameter is returned in the input. You can then pass in a subsequent command to
* the <code>NextToken</code> parameter to continue listing additional tags.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public ListTagsForResourceRequest withNextToken(String nextToken) {
setNextToken(nextToken);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getArn() != null)
sb.append("Arn: ").append(getArn()).append(",");
if (getMaxResults() != null)
sb.append("MaxResults: ").append(getMaxResults()).append(",");
if (getNextToken() != null)
sb.append("NextToken: ").append(getNextToken());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof ListTagsForResourceRequest == false)
return false;
ListTagsForResourceRequest other = (ListTagsForResourceRequest) obj;
if (other.getArn() == null ^ this.getArn() == null)
return false;
if (other.getArn() != null && other.getArn().equals(this.getArn()) == false)
return false;
if (other.getMaxResults() == null ^ this.getMaxResults() == null)
return false;
if (other.getMaxResults() != null && other.getMaxResults().equals(this.getMaxResults()) == false)
return false;
if (other.getNextToken() == null ^ this.getNextToken() == null)
return false;
if (other.getNextToken() != null && other.getNextToken().equals(this.getNextToken()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getArn() == null) ? 0 : getArn().hashCode());
hashCode = prime * hashCode + ((getMaxResults() == null) ? 0 : getMaxResults().hashCode());
hashCode = prime * hashCode + ((getNextToken() == null) ? 0 : getNextToken().hashCode());
return hashCode;
}
@Override
public ListTagsForResourceRequest clone() {
return (ListTagsForResourceRequest) super.clone();
}
}
|
/*
* Decompiled with CFR 0.150.
*
* Could not load the following classes:
* net.minecraft.network.Packet
*/
package me.independed.inceptice.event.events;
import net.minecraft.network.Packet;
public class EventNetworkPacketEvent
extends MinecraftEvent {
public Packet m_Packet;
public EventNetworkPacketEvent(Packet packet) {
this.m_Packet = packet;
}
public Packet getPacket() {
return this.m_Packet;
}
public Packet GetPacket() {
return this.m_Packet;
}
}
|
package com.jojolabs.johttp.toolbox;
import android.content.Context;
import android.util.AttributeSet;
import android.view.ViewGroup.LayoutParams;
import android.widget.ImageView.ScaleType;
import org.junit.Before;
import org.junit.Test;
import org.robolectric.RuntimeEnvironment;
import static org.junit.Assert.*;
public class NetworkImageViewTest {
private NetworkImageView mNIV;
private MockImageLoader mMockImageLoader;
@Before
public void setUp() throws Exception {
mMockImageLoader = new MockImageLoader();
mNIV = new NetworkImageView(RuntimeEnvironment.application);
}
// TODO uncomment once android is more mocked
// @Test
// public void setImageUrl_requestsImage() {
// mNIV.setLayoutParams(new LayoutParams(LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT));
// mNIV.setImageUrl("http://foo", mMockImageLoader);
// assertEquals("http://foo", mMockImageLoader.lastRequestUrl);
// assertEquals(0, mMockImageLoader.lastMaxWidth);
// assertEquals(0, mMockImageLoader.lastMaxHeight);
// }
private class MockImageLoader extends ImageLoader {
public MockImageLoader() {
super(null, null);
}
public String lastRequestUrl;
public int lastMaxWidth;
public int lastMaxHeight;
public ImageContainer get(String requestUrl, ImageListener imageListener, int maxWidth,
int maxHeight, ScaleType scaleType) {
lastRequestUrl = requestUrl;
lastMaxWidth = maxWidth;
lastMaxHeight = maxHeight;
return null;
}
}
@Test
public void publicMethods() throws Exception {
// Catch-all test to find API-breaking changes.
assertNotNull(NetworkImageView.class.getConstructor(Context.class));
assertNotNull(NetworkImageView.class.getConstructor(Context.class, AttributeSet.class));
assertNotNull(NetworkImageView.class.getConstructor(Context.class, AttributeSet.class,
int.class));
assertNotNull(NetworkImageView.class.getMethod("setImageUrl", String.class, ImageLoader.class));
assertNotNull(NetworkImageView.class.getMethod("setDefaultImageResId", int.class));
assertNotNull(NetworkImageView.class.getMethod("setErrorImageResId", int.class));
}
}
|
/**
*
*/
package com.upgrad.pgbde.course6;
import static org.apache.spark.sql.functions.col;
import static org.apache.spark.sql.functions.when;
import java.io.FileWriter;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Objects;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
//import static org.apache.spark.sql.DataFrameNaFunctions.*;
/**
* @author raghs
*
*/
public class FirstTry {
/**
* @param args
*/
public static void main(String[] args) {
//firstTry();
latest();
//log("Test message to be written to a log file");
/** Write the messages to a file, finally */
try {
writeToFile();
} catch (Exception e) {
e.printStackTrace();
}
}
public static void firstTry() {
// https://stackoverflow.com/questions/35652665/java-io-ioexception-could-not-locate-executable-null-bin-winutils-exe-in-the-ha
System.setProperty("hadoop.home.dir", "C:\\winutils");
SparkSession spark = SparkSession.builder().master("local").appName("GenderIdentificationFromTweet").getOrCreate();
spark.sparkContext().setLogLevel("ERROR");
Dataset<Row> df = spark.read().option("header", true).option("inferSchema", true).csv("gender-classifier-DFE-791531.csv");
df.show(15);
df.printSchema();
df.describe().show();
df.show();
log();
log("=================================================");
log("Total # of rows in the dataset :: " + df.count());
df = df.na().drop();
log("Total # of rows in the dataset - after dropping null :: " + df.count());
//df = df.withColumn("gender", when(col("gender").equalTo("brand"), null).otherwise(col("gender")));
//log("Total # of rows in the dataset - [gender] after dropping 'brand' :: " + df.na().drop().count());
df = df.na().drop();
/*df = df.withColumn("gender", when(col("gender").equalTo("unknown"), null).otherwise(col("gender")));
df= df.na().drop();
log("Total # of rows in the dataset - [gender] after dropping 'unknown' :: " + df.count());
df = df.withColumn("gender", when(col("gender").equalTo("0"), null).otherwise(col("gender")));
df= df.na().drop();
log("Total # of rows in the dataset - [gender] after dropping '0' :: " + df.count());
df = df.withColumn("gender", when(col("gender").equalTo(""), null).otherwise(col("gender")));
df= df.na().drop();
log("Total # of rows in the dataset - [gender] after dropping 'blank' :: " + df.count());*/
log("=================================================");
log();
df.show(5);
spark.close();
}
private static void checkFilteredCount(Dataset<Row> df, String colName, String condition)
{
long genderNullCount = df.filter(df.col(colName).isNull()).count();
log("genderNullCount : " + genderNullCount);
}
private static void getStats(Dataset<Row> df) {
log("*********************************");
log(" >>> getStats() >>>>>> ");
log("*********************************");
//df.filter(df(colName).isNull || df(colName) === "" || df(colName).isNaN).count()
String colName = "gender";
if(df.col(colName)==null || df.col(colName).equals("") || df.col(colName).equals("blank") || df.col(colName).equals("unknown") || df.col(colName).equals("0"))
log("Count of missing gender : " + df.filter(df.col("gender")).count());
long genderNullCount = df.filter(df.col("gender").isNull()).count();
log("genderNullCount : " + genderNullCount);
long genderEmptyCount = df.filter(df.col("gender").contains(" ")).count();
log("genderEmptyCount : " + genderEmptyCount);
long genderBrandCount = df.filter(df.col("gender").contains("brand")).count();
log("genderBrandCount : " + genderBrandCount);
long genderBrandTripeEqualsCount = df.filter(df.col("gender").$eq$eq$eq("brand")).count();
log("genderBrandTripeEqualsCount : " + genderBrandTripeEqualsCount);
log("*********************************");
log(" <<<<< getStats() <<<<<< ");
log("*********************************");
}
/**
* References
* ===========
* https://stackoverflow.com/questions/56005857/fill-null-values-with-empty-string-in-datasetrow-using-apache-spark-in-java?noredirect=1&lq=1
* https://stackoverflow.com/questions/44671597/how-to-replace-null-values-with-a-specific-value-in-dataframe-using-spark-in-jav/44671923 (Direct Link : https:stackoverflow.com/a/44671923/1001242)
* https://spark.apache.org/docs/latest/api/java/index.html
* https://spark.apache.org/docs/latest/api/java/org/apache/spark/sql/DataFrameNaFunctions.html#fill-java.lang.String-java.lang.String:A-
* https://github.com/eBay/Spark/blob/master/examples/src/main/java/org/apache/spark/examples/ml/JavaDecisionTreeClassificationExample.java
* [Excellent] https:towardsdatascience.com/apache-spark-mllib-tutorial-7aba8a1dce6e
* https://spark.apache.org/docs/1.6.1/ml-guide.html#estimators
* https://spark.apache.org/docs/1.6.1/ml-guide.html#transformers
* https://stackoverflow.com/questions/17910290/clustering-sparse-vector-and-dense-vector/26706528#26706528
*
* [Good One] https://spark.apache.org/docs/latest/ml-features
* https://en.wikipedia.org/wiki/One-hot
* https://spark.apache.org/docs/latest/ml-features#onehotencoderestimator
* https://spark.apache.org/docs/latest/ml-features.html#stringindexer
* https://spark.apache.org/docs/latest/ml-pipeline.html
* https://spark.apache.org/docs/latest/ml-tuning.html
* https://databricks.com/glossary/what-are-ml-pipelines
* https://spark.apache.org/docs/2.3.0/api/java/org/apache/spark/ml/feature/StringIndexerModel.html
* https://spark.apache.org/docs/2.1.2/api/java/org/apache/spark/mllib/feature/ChiSqSelector.html
* https://spark.apache.org/docs/latest/api/java/org/apache/spark/ml/feature/ChiSqSelector.html
* https://spark.apache.org/docs/latest/mllib-feature-extraction.html#chisqselector
*
* https://stackoverflow.com/questions/46064099/failed-to-load-implementation-nativesystemblas-hibench/49811375
* https://www.quora.com/What-is-more-preferable-in-machine-learning-the-accuracy-of-model-A-is-50-on-training-data-and-97-on-test-data-or-is-model-B-with-80-accuracy-on-train-data-and-75-accuracy-on-test-data-more-detail-in-comment-below-thank-you
* https://medium.com/machine-learning-intuition/overfitting-what-they-are-regularization-e950c2d66d50
*
* https://examples.javacodegeeks.com/apache-spark-machine-learning-tutorial/
* https://www.javacodegeeks.com/2016/02/apache-spark-machine-learning-tutorial.html
* https://datascience.stackexchange.com/questions/9159/when-to-choose-linear-regression-or-decision-tree-or-random-forest-regression?newreg=5fdc4aecdeef46f98afb29ac141b50fd
*
* https://sparkbyexamples.com/spark/spark-read-csv-file-into-dataframe/
* https://medium.com/@ManningBooks/ingesting-data-from-files-with-spark-part-1-csv-21b00e3cd270
* https://www.programcreek.com/java-api-examples/?class=org.apache.spark.sql.Dataset&method=show
* https://spark.apache.org/docs/latest/api/java/org/apache/spark/sql/DataFrameReader.html
* https://spark.apache.org/docs/latest/sql-programming-guide.html
* https://spark.apache.org/docs/latest/rdd-programming-guide.html
* https://www.analyticsvidhya.com/blog/2019/11/build-machine-learning-pipelines-pyspark/
*
*/
public static void latest() {
// https://stackoverflow.com/questions/35652665/java-io-ioexception-could-not-locate-executable-null-bin-winutils-exe-in-the-ha
System.setProperty("hadoop.home.dir", "C:\\winutils");
SparkSession spark = SparkSession.builder().master("local").appName("GenderIdentificationFromTweet").getOrCreate();
spark.sparkContext().setLogLevel("ERROR");
Dataset<Row> df = spark.read()
.option("header", true)
.option("inferSchema", true)
.option("mode", "DROPMALFORMED")
//.option("charset", "ASCII")
.option("charset", "UTF-8")
.option("encoding", "UTF-8")
.option("multiLine", "true")
.option("escape", "\"")
.option("timestampFormat", "MM/dd/yy HH:mm")
.csv("gender-classifier-DFE-791531.csv");
getStats(df);
df.show(15);
df.printSchema();
//df.describe().show();
//df.show();
log("================================================================");
log("=============== AFTER COLUMN SELECTION ======================");
log("================================================================");
//"gender:confidence", "profile_yn","profile_yn:confidence","created","description","fav_number","gender_gold","link_color","name","profile_yn_gold","profileimage","retweet_count","sidebar_color","text","tweet_coord","tweet_count","tweet_created","tweet_id","tweet_location","user_timezone"
//df.select("_unit_state","_trusted_judgments","gender","gender:confidence","profile_yn:confidence","gender_gold","link_color","name","profile_yn_gold","tweet_count","tweet_created","tweet_id","tweet_location","user_timezone");
/** All columns in the CSV File */
//df = df.select("_unit_id","_golden","_unit_state","_trusted_judgments","_last_judgment_at","gender","gender:confidence","profile_yn","profile_yn:confidence","created","description","fav_number","gender_gold","link_color","name","profile_yn_gold","profileimage","retweet_count","sidebar_color","text","tweet_coord","tweet_count","tweet_created","tweet_id","tweet_location","user_timezone");
//df = df.select("gender","gender:confidence","profile_yn:confidence","gender_gold","link_color","name","profile_yn_gold","tweet_count","tweet_id","tweet_location","user_timezone");
df = df.select("gender","gender:confidence","profile_yn:confidence","description","name","profile_yn_gold","link_color", "tweet_location","user_timezone");
df.show(15);
df.printSchema();
//df.describe().show();
//df.show();
log("");
log("=================================================");
log("Total # of rows in the dataset :: " + df.count());
String[] colNames = {"user_timezone"};
df = df.na().fill("missing_utc", colNames);
colNames = new String[]{"gender:confidence"};
df = df.na().fill("missing_gc", colNames);
//colNames = new String[]{"text"};
//df = df.na().fill("missing_text", colNames);
colNames = new String[]{"description"};
df = df.na().fill("missing_desc", colNames);
//df = df.withColumn("gender", when(col("gender").equalTo("brand"), null).otherwise(col("gender")));
//log("Total # of rows in the dataset - [gender] after dropping 'brand' :: " + df.na().drop().count());
df = df.withColumn("gender", when(col("gender").equalTo("unknown"), null).otherwise(col("gender")));
//log("Total # of rows in the dataset - [gender] after dropping 'unknown' :: " + df.count());
df = df.withColumn("gender", when(col("gender").equalTo("0"), null).otherwise(col("gender")));
//log("Total # of rows in the dataset - [gender] after dropping '0' :: " + df.count());
df = df.withColumn("gender", when(col("gender").equalTo(""), null).otherwise(col("gender")));
log("Total # of rows in the dataset - [gender] after dropping 'blank' :: " + df.count());
df= df.na().drop();
log("Total # of rows in the dataset after dropping nulls :: " + df.count());
log("=================================================");
log("");
df.show();
spark.close();
}
public static String getNow() {
return getNow(null);
}
public static String getNow(String pattern) {
if(Objects.isNull(pattern))
pattern = "YYYY-MM-dd_HH-mm-ss_a_z";
SimpleDateFormat sdf = new SimpleDateFormat(pattern);
String now = sdf.format(new Date());
//System.out.println("Current Date and Time is : " + now);
return now;
}
private static String fileName = getNow() + "_ConsoleOutput-TwitterGenderAnalysis" + ".txt";
private static StringBuilder outputMsgBfr = new StringBuilder();
private static String NEW_LINE = System.getProperty("line.separator");
public static void log(String msg) {
outputMsgBfr
.append("[" + getNow() + "] - ")
.append(msg)
.append(NEW_LINE);
}
public static void log() {
outputMsgBfr.append("");
}
public static void writeToFile() throws Exception {
FileWriter writer = new FileWriter(fileName, true);
writer.write(outputMsgBfr.toString());
writer.flush();
writer.close();
System.out.println("Output is written to the file -> [" + fileName + "]");
System.out.println();
System.out.println(outputMsgBfr.toString());
}
}
|
/**
* Copyright 2019 LinkedIn Corporation. All rights reserved.
* Licensed under the BSD 2-Clause License. See the LICENSE file in the project root for license information.
* See the NOTICE file in the project root for additional information regarding copyright ownership.
*/
package com.linkedin.datastream.server;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.BooleanSupplier;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.lang.Validate;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.codahale.metrics.MetricRegistry;
import com.linkedin.datastream.common.Datastream;
import com.linkedin.datastream.common.DatastreamAlreadyExistsException;
import com.linkedin.datastream.common.DatastreamConstants;
import com.linkedin.datastream.common.DatastreamDestination;
import com.linkedin.datastream.common.DatastreamException;
import com.linkedin.datastream.common.DatastreamMetadataConstants;
import com.linkedin.datastream.common.DatastreamStatus;
import com.linkedin.datastream.common.DatastreamTransientException;
import com.linkedin.datastream.common.DatastreamUtils;
import com.linkedin.datastream.common.ErrorLogger;
import com.linkedin.datastream.common.VerifiableProperties;
import com.linkedin.datastream.metrics.BrooklinCounterInfo;
import com.linkedin.datastream.metrics.BrooklinGaugeInfo;
import com.linkedin.datastream.metrics.BrooklinMeterInfo;
import com.linkedin.datastream.metrics.BrooklinMetricInfo;
import com.linkedin.datastream.metrics.DynamicMetricsManager;
import com.linkedin.datastream.metrics.MetricsAware;
import com.linkedin.datastream.serde.SerDe;
import com.linkedin.datastream.serde.SerDeSet;
import com.linkedin.datastream.server.api.connector.Connector;
import com.linkedin.datastream.server.api.connector.DatastreamDeduper;
import com.linkedin.datastream.server.api.connector.DatastreamValidationException;
import com.linkedin.datastream.server.api.security.AuthorizationException;
import com.linkedin.datastream.server.api.security.Authorizer;
import com.linkedin.datastream.server.api.serde.SerdeAdmin;
import com.linkedin.datastream.server.api.strategy.AssignmentStrategy;
import com.linkedin.datastream.server.api.transport.TransportException;
import com.linkedin.datastream.server.api.transport.TransportProvider;
import com.linkedin.datastream.server.api.transport.TransportProviderAdmin;
import com.linkedin.datastream.server.providers.CheckpointProvider;
import com.linkedin.datastream.server.providers.ZookeeperCheckpointProvider;
import com.linkedin.datastream.server.zk.ZkAdapter;
import static com.linkedin.datastream.common.DatastreamMetadataConstants.CREATION_MS;
import static com.linkedin.datastream.common.DatastreamMetadataConstants.SYSTEM_DESTINATION_PREFIX;
import static com.linkedin.datastream.common.DatastreamMetadataConstants.TTL_MS;
import static com.linkedin.datastream.common.DatastreamUtils.hasValidDestination;
import static com.linkedin.datastream.common.DatastreamUtils.isReuseAllowed;
/**
*
* Coordinator is the object that bridges ZooKeeper with Connector implementations. There is one instance
* of Coordinator for each deployable Brooklin service instance. The Coordinator can connect multiple connectors,
* but each of them must belong to a different type. The Coordinator calls the Connector.getConnectorType() to
* inspect the type of the connectors to make sure that there is only one connector for each type.
*
* <p> ZooKeeper interactions wrapped in {@link ZkAdapter}, and depending on the state of the instance, it
* emits callbacks:
*
* <ul>
* <li>{@link Coordinator#onBecomeLeader()} This callback is triggered when this instance becomes the
* leader of the Datastream cluster</li>
*
* <li>{@link Coordinator#onDatastreamAddOrDrop()} Only the Coordinator leader monitors the Datastream definitions
* in ZooKeeper. When there are changes made to datastream definitions through Datastream Management Service,
* this callback will be triggered on the Coordinator Leader so it can reassign datastream tasks among
* live instances.</li>
*
* <li>{@link Coordinator#onLiveInstancesChange()} Only the Coordinator leader monitors the list of
* live instances in the cluster. If there are any instances go online or offline, this callback is triggered
* so the Coordinator leader can reassign datastream tasks among live instances.</li>
*
* <li>{@link Coordinator#onDatastreamUpdate()} This callback is triggered when any updates have been made
* to existing datastreams to schedule an AssignmentChange event for task reassignment if necessary. </li>
*
* <li>{@link Coordinator#onAssignmentChange()} </li> All Coordinators, including the leader instance, will
* get notified if the datastream tasks assigned to it is updated through this callback. This is where
* the Coordinator can trigger the Connector API to notify corresponding connectors
* </ul>
*
*
*
* Coordinator Connector
*
* ┌──────────────┐ ┌─────────────────────────────────────────┐ ┌─────────────────┐
* │ │ │ │ │ │
* │ │ │ │ │ │
* │ │ │ ┌──────────┐ ┌────────────────┐ │ │ │
* │ │ │ │ZkAdapter ├──▶ onBecomeLeader │ │ │ │
* │ │ │ │ │ └────────────────┘ │ │ │
* │ ├───────┼─▶ │ ┌──────────────────┐ │ │ │
* │ │ │ │ ├──▶ onBecomeFollower │ │ │ │
* │ │ │ │ │ └──────────────────┘ │ │ │
* │ │ │ │ │ ┌────────────────────┐ │ │ │
* │ ZooKeeper ├───────┼─▶ ├──▶ onAssignmentChange ├────┼────▶ │
* │ │ │ │ │ └────────────────────┘ │ │ │
* │ │ │ │ │ ┌───────────────────────┐ │ │ │
* │ │ │ │ ├──▶ onLiveInstancesChange │ │ │ │
* │ ├───────┼─▶ │ └───────────────────────┘ │ │ │
* │ │ │ │ │ ┌────────────────────┐ │ │ │
* │ │ │ │ ├──▶ onDatastreamUpdate ├────┼────▶ │
* │ │ │ │ │ └────────────────────┘ │ │ │
* │ │ │ └──────────┘ │ │ │
* │ │ │ │ │ │
* └──────────────┘ │ │ │ │
* └─────────────────────────────────────────┘ └─────────────────┘
*
*/
public class Coordinator implements ZkAdapter.ZkAdapterListener, MetricsAware {
/*
There are situation where we need to pause a Datastream without taking down the Brooklin Server.
For example to temporary stop a misbehaving datastream, or to fix some connectivity issues.
The coordinator reassign the tasks of the paused datastream to a dummy instance "PAUSED_INSTANCE",
effectively suspending processing of the current tasks.
- In case a datastream is deduped, the tasks are reassigned only if all the datastreams are paused.
- The tasks status are changed from OK to Paused.
*/
public static final String PAUSED_INSTANCE = "PAUSED_INSTANCE";
private static final String EVENT_PRODUCER_CONFIG_DOMAIN = "brooklin.server.eventProducer";
private static final String MODULE = Coordinator.class.getSimpleName();
private static final long EVENT_THREAD_LONG_JOIN_TIMEOUT = 30000L;
private static final long EVENT_THREAD_SHORT_JOIN_TIMEOUT = 3000L;
private static final Duration ASSIGNMENT_TIMEOUT = Duration.ofSeconds(30);
private static final String NUM_REBALANCES = "numRebalances";
private static final String NUM_ERRORS = "numErrors";
private static final String NUM_RETRIES = "numRetries";
private static final String NUM_HEARTBEATS = "numHeartbeats";
private static final String NUM_ASSIGNMENT_CHANGES = "numAssignmentChanges";
private static final String NUM_PARTITION_ASSIGNMENTS = "numPartitionAssignments";
private static final String NUM_PARTITION_MOVEMENTS = "numPartitionMovements";
private static final String NUM_PAUSED_DATASTREAMS_GROUPS = "numPausedDatastreamsGroups";
private static final String MAX_PARTITION_COUNT_IN_TASK = "maxPartitionCountInTask";
private static final String IS_LEADER = "isLeader";
// Connector common metrics
private static final String NUM_DATASTREAMS = "numDatastreams";
private static final String NUM_DATASTREAM_TASKS = "numDatastreamTasks";
private static final AtomicLong PAUSED_DATASTREAMS_GROUPS = new AtomicLong(0L);
private static final AtomicLong MAX_PARTITION_COUNT = new AtomicLong(0L);
private final CachedDatastreamReader _datastreamCache;
private final Properties _eventProducerConfig;
private final CheckpointProvider _cpProvider;
private final Map<String, TransportProviderAdmin> _transportProviderAdmins = new HashMap<>();
private final CoordinatorEventBlockingQueue _eventQueue;
private final CoordinatorEventProcessor _eventThread;
private final Map<String, ExecutorService> _assignmentChangeThreadPool = new ConcurrentHashMap<>();
private final String _clusterName;
private final CoordinatorConfig _config;
private final ZkAdapter _adapter;
// mapping from connector type to connector Info instance
private final Map<String, ConnectorInfo> _connectors = new HashMap<>();
// Currently assigned datastream tasks by taskName
private final Map<String, DatastreamTask> _assignedDatastreamTasks = new ConcurrentHashMap<>();
private final List<BrooklinMetricInfo> _metrics = new ArrayList<>();
private final DynamicMetricsManager _dynamicMetricsManager;
// One coordinator heartbeat per minute, heartbeat helps detect dead/live-lock
// where no events can be handled if coordinator locks up. This can happen because
// handleEvent is synchronized and downstream code can misbehave.
private final Duration _heartbeatPeriod;
private final Logger _log = LoggerFactory.getLogger(Coordinator.class.getName());
private final ScheduledExecutorService _executor = Executors.newSingleThreadScheduledExecutor();
// make sure the scheduled retries are not duplicated
private final AtomicBoolean leaderDatastreamAddOrDeleteEventScheduled = new AtomicBoolean(false);
// make sure the scheduled retries are not duplicated
private final AtomicBoolean leaderDoAssignmentScheduled = new AtomicBoolean(false);
// make sure the scheduled retries are not duplicated
private final AtomicBoolean leaderPartitionAssignmentScheduled = new AtomicBoolean(false);
private final Map<String, SerdeAdmin> _serdeAdmins = new HashMap<>();
private final Map<String, Authorizer> _authorizers = new HashMap<>();
private volatile boolean _shutdown = false;
/**
* Constructor for coordinator
* @param datastreamCache Cache to maintain all the datastreams in the cluster.
* @param config Config properties to use while creating coordinator.
* @throws DatastreamException if coordinator creation fails.
*/
public Coordinator(CachedDatastreamReader datastreamCache, Properties config) throws DatastreamException {
this(datastreamCache, new CoordinatorConfig(config));
}
/**
* Construtor for coordinator
* @param datastreamCache Cache to maintain all the datastreams in the cluster.
* @param config Coordinator config to use while creating coordinator.
*/
public Coordinator(CachedDatastreamReader datastreamCache, CoordinatorConfig config) throws DatastreamException {
_datastreamCache = datastreamCache;
_config = config;
_clusterName = _config.getCluster();
_heartbeatPeriod = Duration.ofMillis(config.getHeartbeatPeriodMs());
_adapter = new ZkAdapter(_config.getZkAddress(), _clusterName, _config.getDefaultTransportProviderName(),
_config.getZkSessionTimeout(), _config.getZkConnectionTimeout(), this);
_eventQueue = new CoordinatorEventBlockingQueue();
_eventThread = new CoordinatorEventProcessor();
_eventThread.setDaemon(true);
_dynamicMetricsManager = DynamicMetricsManager.getInstance();
_dynamicMetricsManager.registerGauge(MODULE, NUM_PAUSED_DATASTREAMS_GROUPS, PAUSED_DATASTREAMS_GROUPS::get);
_dynamicMetricsManager.registerGauge(MODULE, IS_LEADER, () -> getIsLeader().getAsBoolean() ? 1 : 0);
_dynamicMetricsManager.registerGauge(MODULE, MAX_PARTITION_COUNT_IN_TASK, MAX_PARTITION_COUNT::get);
VerifiableProperties coordinatorProperties = new VerifiableProperties(_config.getConfigProperties());
_eventProducerConfig = coordinatorProperties.getDomainProperties(EVENT_PRODUCER_CONFIG_DOMAIN);
_cpProvider = new ZookeeperCheckpointProvider(_adapter);
Optional.ofNullable(_cpProvider.getMetricInfos()).ifPresent(_metrics::addAll);
_metrics.addAll(EventProducer.getMetricInfos());
}
/**
* Start Coordinator (and all connectors)
*/
public void start() {
_log.info("Starting coordinator");
_eventThread.start();
_adapter.connect();
for (String connectorType : _connectors.keySet()) {
ConnectorInfo connectorInfo = _connectors.get(connectorType);
ConnectorWrapper connector = connectorInfo.getConnector();
// Creating a separate thread pool for making the onAssignmentChange calls to the connector
_assignmentChangeThreadPool.put(connectorType, Executors.newSingleThreadExecutor());
// populate the instanceName. We only know the instance name after _adapter.connect()
connector.setInstanceName(getInstanceName());
// make sure connector znode exists upon instance start. This way in a brand new cluster
// we can inspect ZooKeeper and know what connectors are created
_adapter.addConnectorType(connector.getConnectorType());
// call connector::start API
connector.start(connectorInfo.getCheckpointProvider());
_log.info("Coordinator started");
}
// now that instance is started, make sure it doesn't miss any assignment created during
// the slow startup
_eventQueue.put(CoordinatorEvent.createHandleAssignmentChangeEvent());
// Queue up one heartbeat per period with a initial delay of 3 periods
_executor.scheduleAtFixedRate(() -> _eventQueue.put(CoordinatorEvent.HEARTBEAT_EVENT),
_heartbeatPeriod.toMillis() * 3, _heartbeatPeriod.toMillis(), TimeUnit.MILLISECONDS);
}
/**
* Stop coordinator (and all connectors)
*/
public void stop() {
_log.info("Stopping coordinator");
_shutdown = true;
// queue a NO_OP event to unblock eventThread if it is waiting on the queue
_eventQueue.put(CoordinatorEvent.NO_OP_EVENT);
// wait for eventThread to gracefully finish
try {
_eventThread.join(EVENT_THREAD_LONG_JOIN_TIMEOUT);
} catch (InterruptedException e) {
_log.warn("Exception caught while waiting event thread to stop", e);
return;
}
// interrupt the thread if it's not gracefully shutdown
while (_eventThread.isAlive()) {
try {
_eventThread.interrupt();
_eventThread.join(EVENT_THREAD_SHORT_JOIN_TIMEOUT);
} catch (InterruptedException e) {
_log.warn("Exception caught while stopping coordinator", e);
return;
}
}
// Stopping all the connectors so that they stop producing.
for (String connectorType : _connectors.keySet()) {
try {
_connectors.get(connectorType).getConnector().stop();
} catch (Exception ex) {
_log.warn(String.format(
"Connector stop threw an exception for connectorType %s, " + "Swallowing it and continuing shutdown.",
connectorType), ex);
}
}
// Shutdown the event producer.
for (DatastreamTask task : _assignedDatastreamTasks.values()) {
((EventProducer) task.getEventProducer()).shutdown();
}
_adapter.disconnect();
_log.info("Coordinator stopped");
}
/**
* Notify all instances in the cluster that some datastreams get updated. We need this because currently
* Coordinator wouldn't watch the data change within a datastream. So they won't be able to react to
* a datastream update unless explicitly get notified.
*/
public synchronized void broadcastDatastreamUpdate() {
_adapter.touchAllInstanceAssignments();
}
public String getInstanceName() {
return _adapter.getInstanceName();
}
public Collection<DatastreamTask> getDatastreamTasks() {
return _assignedDatastreamTasks.values();
}
/**
* {@inheritDoc}
* There can only be one leader in a datastream cluster.
*/
@Override
public void onBecomeLeader() {
_log.info("Coordinator::onBecomeLeader is called");
// when an instance becomes a leader, make sure we don't miss new datastreams and
// new assignment tasks that was not finished by the previous leader
_eventQueue.put(CoordinatorEvent.createHandleDatastreamAddOrDeleteEvent());
_eventQueue.put(CoordinatorEvent.createLeaderDoAssignmentEvent());
_log.info("Coordinator::onBecomeLeader completed successfully");
}
/**
* {@inheritDoc}
* This method is called when a new datastream server is added or existing datastream server goes down.
*/
@Override
public void onLiveInstancesChange() {
_log.info("Coordinator::onLiveInstancesChange is called");
_eventQueue.put(CoordinatorEvent.createLeaderDoAssignmentEvent());
_log.info("Coordinator::onLiveInstancesChange completed successfully");
}
/**
* {@inheritDoc}
* This method is called when a new datastream is created/deleted.
*/
@Override
public void onDatastreamAddOrDrop() {
_log.info("Coordinator::onDatastreamAddOrDrop is called");
// if there are new datastreams created, we need to trigger the topic creation logic
_eventQueue.put(CoordinatorEvent.createHandleDatastreamAddOrDeleteEvent());
_eventQueue.put(CoordinatorEvent.createLeaderDoAssignmentEvent());
_log.info("Coordinator::onDatastreamAddOrDrop completed successfully");
}
/**
* {@inheritDoc}
*/
@Override
public void onDatastreamUpdate() {
_log.info("Coordinator::onDatastreamUpdate is called");
// We need this synchronization to protect the updates on _assignedDatastreamTasks
synchronized (_assignedDatastreamTasks) {
// On datastream update the CachedDatastreamReader won't refresh its data, so we need to invalidate the cache
_datastreamCache.invalidateAllCache();
List<DatastreamGroup> datastreamGroups = _datastreamCache.getDatastreamGroups();
// Refresh the datastream task
_assignedDatastreamTasks.values().forEach(task -> {
Optional<DatastreamGroup> dg =
datastreamGroups.stream().filter(x -> x.getTaskPrefix().equals(task.getTaskPrefix())).findFirst();
if (dg.isPresent()) {
((DatastreamTaskImpl) task).setDatastreams(dg.get().getDatastreams());
} else {
_log.warn("Can't find datastream group for task {}", task);
}
});
}
_eventQueue.put(CoordinatorEvent.createHandleDatastreamChangeEvent());
_log.info("Coordinator::onDatastreamUpdate completed successfully");
}
/**
* onPartitionMovement is called when partition movement info has been put into zookeeper
*/
@Override
public void onPartitionMovement(Long notifyTimestamp) {
_log.info("Coordinator::onPartitionMovement is called");
_eventQueue.put(CoordinatorEvent.createPartitionMovementEvent(notifyTimestamp));
_log.info("Coordinator::onPartitionMovement completed successfully");
}
/**
* {@inheritDoc}
*
* To handle assignment change, we need to take the following steps:
* (1) get a list of all current assignment.
* (2) inspect the task to find out which connectors are responsible for handling the changed assignment
* (3) call corresponding connector API so that the connectors can handle the assignment changes.
*
*/
@Override
public void onAssignmentChange() {
_log.info("Coordinator::onAssignmentChange is called");
_eventQueue.put(CoordinatorEvent.createHandleAssignmentChangeEvent());
_log.info("Coordinator::onAssignmentChange completed successfully");
}
private void handleAssignmentChange(boolean isDatastreamUpdate) throws TimeoutException {
long startAt = System.currentTimeMillis();
// when there is any change to the assignment for this instance. Need to find out what is the connector
// type of the changed assignment, and then call the corresponding callback of the connector instance
List<String> assignment = _adapter.getInstanceAssignment(_adapter.getInstanceName());
_log.info("START: Coordinator::handleAssignmentChange. Instance: " + _adapter.getInstanceName() + ", assignment: "
+ assignment + " isDatastreamUpdate: " + isDatastreamUpdate);
// all datastream tasks for all connector types
Map<String, List<DatastreamTask>> currentAssignment = new HashMap<>();
assignment.forEach(ds -> {
DatastreamTask task = getDatastreamTask(ds);
if (task != null) {
String connectorType = task.getConnectorType();
if (!currentAssignment.containsKey(connectorType)) {
currentAssignment.put(connectorType, new ArrayList<>());
}
currentAssignment.get(connectorType).add(task);
}
});
_log.info(printAssignmentByType(currentAssignment));
//
// diff the currentAssignment with last saved assignment _assignedDatastreamTasksByConnectorType and make sure
// the affected connectors are notified through the callback. There are following cases:
// (1) a connector is removed of all assignment. This means the connector type does not exist in
// currentAssignment, but exist in the previous assignment in _assignedDatastreamTasksByConnectorType
// (2) there are any changes of assignment for an existing connector type, including datastreamtasks
// added or removed. We do not handle the case when datastreamtask is updated. This include the
// case a connector previously doesn't have assignment but now has. This means the connector type
// is not contained in currentAssignment, but contained in _assignedDatastreamTasksByConnectorType
//
// case (1), find connectors that now doesn't handle any tasks
List<String> oldConnectorList = _assignedDatastreamTasks.values()
.stream()
.map(DatastreamTask::getConnectorType)
.distinct()
.collect(Collectors.toList());
List<String> newConnectorList = new ArrayList<>(currentAssignment.keySet());
List<String> deactivated = new ArrayList<>(oldConnectorList);
deactivated.removeAll(newConnectorList);
List<Future<Boolean>> assignmentChangeFutures = deactivated.stream()
.map(connectorType -> dispatchAssignmentChangeIfNeeded(connectorType, new ArrayList<>(), isDatastreamUpdate))
.filter(Objects::nonNull)
.collect(Collectors.toList());
// case (2) - Dispatch all the assignment changes in a separate thread
assignmentChangeFutures.addAll(newConnectorList.stream()
.map(connectorType -> dispatchAssignmentChangeIfNeeded(connectorType, currentAssignment.get(connectorType),
isDatastreamUpdate))
.filter(Objects::nonNull)
.collect(Collectors.toList()));
// Wait till all the futures are complete or timeout.
Instant start = Instant.now();
try {
for (Future<Boolean> assignmentChangeFuture : assignmentChangeFutures) {
if (Duration.between(start, Instant.now()).compareTo(ASSIGNMENT_TIMEOUT) > 0) {
throw new TimeoutException("Timeout doing assignment");
}
try {
assignmentChangeFuture.get(ASSIGNMENT_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
} catch (ExecutionException e) {
_log.warn("onAssignmentChange call threw exception", e);
}
}
} catch (TimeoutException e) {
// if it's timeout then we will retry
_log.warn("Timeout when doing the assignment", e);
if (isDatastreamUpdate) {
_eventQueue.put(CoordinatorEvent.createHandleDatastreamChangeEvent());
} else {
_eventQueue.put(CoordinatorEvent.createHandleAssignmentChangeEvent());
}
throw e;
} catch (InterruptedException e) {
_log.warn("onAssignmentChange call got interrupted", e);
} finally {
assignmentChangeFutures.forEach(future -> future.cancel(true));
}
// now save the current assignment
_assignedDatastreamTasks.clear();
_assignedDatastreamTasks.putAll(currentAssignment.values()
.stream()
.flatMap(Collection::stream)
.collect(Collectors.toMap(DatastreamTask::getDatastreamTaskName, Function.identity())));
long endAt = System.currentTimeMillis();
_log.info(String.format("END: Coordinator::handleAssignmentChange, Duration: %d milliseconds", endAt - startAt));
_dynamicMetricsManager.createOrUpdateMeter(MODULE, NUM_ASSIGNMENT_CHANGES, 1);
}
private DatastreamTask getDatastreamTask(String taskName) {
if (_assignedDatastreamTasks.containsKey(taskName)) {
return _assignedDatastreamTasks.get(taskName);
} else {
DatastreamTaskImpl task = _adapter.getAssignedDatastreamTask(_adapter.getInstanceName(), taskName);
if (task != null) {
DatastreamGroup dg = _datastreamCache.getDatastreamGroups()
.stream()
.filter(x -> x.getTaskPrefix().equals(task.getTaskPrefix()))
.findFirst()
.get();
task.setDatastreams(dg.getDatastreams());
}
return task;
}
}
private Future<Boolean> dispatchAssignmentChangeIfNeeded(String connectorType, List<DatastreamTask> assignment,
boolean isDatastreamUpdate) {
ConnectorInfo connectorInfo = _connectors.get(connectorType);
ConnectorWrapper connector = connectorInfo.getConnector();
List<DatastreamTask> addedTasks = new ArrayList<>(assignment);
List<DatastreamTask> removedTasks;
List<DatastreamTask> oldAssignment = _assignedDatastreamTasks.values()
.stream()
.filter(t -> t.getConnectorType().equals(connectorType))
.collect(Collectors.toList());
// if there are any difference in the list of assignment. Note that if there are no difference
// between the two lists, then the connector onAssignmentChange() is not called.
addedTasks.removeAll(oldAssignment);
oldAssignment.removeAll(assignment);
removedTasks = oldAssignment;
if (isDatastreamUpdate || !addedTasks.isEmpty() || !removedTasks.isEmpty()) {
// Populate the event producers before calling the connector with the list of tasks.
addedTasks.stream().filter(t -> t.getEventProducer() == null).forEach(this::initializeTask);
// Dispatch the onAssignmentChange to the connector in a separate thread.
return _assignmentChangeThreadPool.get(connectorType).submit(() -> {
try {
connector.onAssignmentChange(assignment);
// Unassign tasks with producers
removedTasks.forEach(this::uninitializeTask);
} catch (Exception ex) {
_log.warn(String.format("connector.onAssignmentChange for connector %s threw an exception, "
+ "Queuing up a new onAssignmentChange event for retry.", connectorType), ex);
_eventQueue.put(CoordinatorEvent.createHandleInstanceErrorEvent(ExceptionUtils.getRootCauseMessage(ex)));
if (isDatastreamUpdate) {
_eventQueue.put(CoordinatorEvent.createHandleDatastreamChangeEvent());
} else {
_eventQueue.put(CoordinatorEvent.createHandleAssignmentChangeEvent());
}
return false;
}
return true;
});
}
return null;
}
private void uninitializeTask(DatastreamTask t) {
TransportProviderAdmin tpAdmin = _transportProviderAdmins.get(t.getTransportProviderName());
tpAdmin.unassignTransportProvider(t);
_cpProvider.unassignDatastreamTask(t);
}
private void initializeTask(DatastreamTask task) {
DatastreamTaskImpl taskImpl = (DatastreamTaskImpl) task;
assignSerdes(taskImpl);
boolean customCheckpointing = _connectors.get(task.getConnectorType()).isCustomCheckpointing();
TransportProviderAdmin tpAdmin = _transportProviderAdmins.get(task.getTransportProviderName());
TransportProvider transportProvider = tpAdmin.assignTransportProvider(task);
EventProducer producer =
new EventProducer(task, transportProvider, _cpProvider, _eventProducerConfig, customCheckpointing);
taskImpl.setEventProducer(producer);
Map<Integer, String> checkpoints = producer.loadCheckpoints(task);
taskImpl.setCheckpoints(checkpoints);
}
private void assignSerdes(DatastreamTaskImpl datastreamTask) {
Datastream datastream = datastreamTask.getDatastreams().get(0);
SerDeSet destinationSet = null;
if (datastream.hasDestination()) {
DatastreamDestination destination = datastream.getDestination();
SerDe envelopeSerDe = null;
SerDe keySerDe = null;
SerDe valueSerDe = null;
if (destination.hasEnvelopeSerDe() && StringUtils.isNotEmpty(destination.getEnvelopeSerDe())) {
envelopeSerDe = _serdeAdmins.get(destination.getEnvelopeSerDe()).assignSerde(datastreamTask);
}
if (destination.hasKeySerDe() && StringUtils.isNotEmpty(destination.getKeySerDe())) {
keySerDe = _serdeAdmins.get(destination.getKeySerDe()).assignSerde(datastreamTask);
}
if (destination.hasPayloadSerDe() && StringUtils.isNotEmpty(destination.getPayloadSerDe())) {
valueSerDe = _serdeAdmins.get(destination.getPayloadSerDe()).assignSerde(datastreamTask);
}
destinationSet = new SerDeSet(keySerDe, valueSerDe, envelopeSerDe);
}
datastreamTask.assignSerDes(destinationSet);
}
protected synchronized void handleEvent(CoordinatorEvent event) {
_log.info("START: Handle event " + event.getType() + ", Instance: " + _adapter.getInstanceName());
try {
switch (event.getType()) {
case LEADER_DO_ASSIGNMENT:
handleLeaderDoAssignment();
break;
case HANDLE_ASSIGNMENT_CHANGE:
// synchronize between this and onDatastreamUpdate. See more comments there
synchronized (_assignedDatastreamTasks) {
handleAssignmentChange(false);
}
break;
case HANDLE_DATASTREAM_CHANGE_WITH_UPDATE:
// synchronize between this and onDatastreamUpdate. See more comments there
synchronized (_assignedDatastreamTasks) {
handleAssignmentChange(true);
}
break;
case HANDLE_ADD_OR_DELETE_DATASTREAM:
handleDatastreamAddOrDelete();
break;
case HANDLE_INSTANCE_ERROR:
handleInstanceError((CoordinatorEvent.HandleInstanceError) event);
break;
case HEARTBEAT:
handleHeartbeat();
break;
case LEADER_PARTITION_ASSIGNMENT:
if (event.getEventMetadata() == null) {
_log.error("Datastream group is not found when performing partition assignment, ignore the assignment");
} else {
performPartitionAssignment((String) event.getEventMetadata());
}
break;
case LEADER_PARTITION_MOVEMENT:
performPartitionMovement((Long) event.getEventMetadata());
break;
default:
String errorMessage = String.format("Unknown event type %s.", event.getType());
ErrorLogger.logAndThrowDatastreamRuntimeException(_log, errorMessage, null);
break;
}
} catch (Exception e) {
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "handleEvent-" + event.getType(), NUM_ERRORS, 1);
_log.error("ERROR: event + " + event + " failed.", e);
}
_log.info("END: Handle event " + event);
}
// when we encounter an error, we need to persist the error message in ZooKeeper. We only persist the
// first 10 messages. Why we put this logic in event loop instead of synchronously handle it? This
// is because the same reason that can result in error can also result in the failure of persisting
// the error message.
private void handleInstanceError(CoordinatorEvent.HandleInstanceError event) {
String msg = event.getEventData();
_adapter.zkSaveInstanceError(msg);
}
/**
* Increment a heartbeat counter as a way to report liveliness of the coordinator
*/
private void handleHeartbeat() {
_dynamicMetricsManager.createOrUpdateCounter(MODULE, NUM_HEARTBEATS, 1);
}
/**
* Check if a datastream is either marked as deleting or its TTL has expired
*/
private boolean isDeletingOrExpired(Datastream stream) {
boolean isExpired = false;
// Check TTL
if (stream.getMetadata().containsKey(TTL_MS) && stream.getMetadata().containsKey(CREATION_MS)) {
try {
long ttlMs = Long.parseLong(stream.getMetadata().get(TTL_MS));
long creationMs = Long.parseLong(stream.getMetadata().get(CREATION_MS));
if (System.currentTimeMillis() - creationMs >= ttlMs) {
isExpired = true;
}
} catch (NumberFormatException e) {
_log.error("Ignoring TTL as some metadata is not numeric, CREATION_MS={}, TTL_MS={}",
stream.getMetadata().get(CREATION_MS), stream.getMetadata().get(TTL_MS), e);
}
}
return isExpired || stream.getStatus() == DatastreamStatus.DELETING;
}
/**
* This method performs two tasks:
* 1) initializes destination for a newly created datastream and update it in ZooKeeper
* 2) delete an existing datastream if it is marked as deleted or its TTL has expired.
*
* If #2 occurs, it also invalidates the datastream cache for the next assignment.
*
* This means TTL is enforced only for below events:
* 1) new leader is elected
* 2) a new stream is added
* 3) an existing stream is deleted
*
* Note that expired streams are not handled during rebalancing which is okay because
* if there are no more streams getting created, there is no pressure to delete streams
* either. Also, expired streams are excluded from any future task assignments.
*/
private void handleDatastreamAddOrDelete() {
boolean shouldRetry = false;
// Get the list of all datastreams
List<Datastream> allStreams = _datastreamCache.getAllDatastreams(true);
// do nothing if there are zero datastreams
if (allStreams.isEmpty()) {
_log.warn("Received a new datastream event, but there were no datastreams");
return;
}
for (Datastream ds : allStreams) {
if (ds.getStatus() == DatastreamStatus.INITIALIZING) {
try {
if (DatastreamUtils.isConnectorManagedDestination(ds)) {
_log.info("Connector will manage destination(s) for datastream {}, skipping destination creation.", ds);
} else {
createTopic(ds);
}
// Set the datastream status as ready for use (both producing and consumption)
ds.setStatus(DatastreamStatus.READY);
if (!_adapter.updateDatastream(ds)) {
_log.warn("Failed to update datastream: {} after initializing. This datastream will not be scheduled for "
+ "producing events ", ds.getName());
shouldRetry = true;
}
} catch (Exception e) {
_log.warn("Failed to update the destination of new datastream {}", ds, e);
shouldRetry = true;
}
} else if (isDeletingOrExpired(ds)) {
_log.info("Trying to hard delete datastream {} (reason={})", ds,
ds.getStatus() == DatastreamStatus.DELETING ? "deleting" : "expired");
hardDeleteDatastream(ds, allStreams);
}
}
if (shouldRetry) {
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "handleDatastreamAddOrDelete", NUM_RETRIES, 1);
// If there are any failure, we will need to schedule retry if
// there is no pending retry scheduled already.
if (leaderDatastreamAddOrDeleteEventScheduled.compareAndSet(false, true)) {
_log.warn("Schedule retry for handling new datastream");
_executor.schedule(() -> {
_eventQueue.put(CoordinatorEvent.createHandleDatastreamAddOrDeleteEvent());
// Allow further retry scheduling
leaderDatastreamAddOrDeleteEventScheduled.set(false);
}, _config.getRetryIntervalMs(), TimeUnit.MILLISECONDS);
}
}
_eventQueue.put(CoordinatorEvent.createLeaderDoAssignmentEvent());
}
private void hardDeleteDatastream(Datastream ds, List<Datastream> allStreams) {
String taskPrefix;
if (DatastreamUtils.containsTaskPrefix(ds)) {
taskPrefix = DatastreamUtils.getTaskPrefix(ds);
} else {
taskPrefix = DatastreamTaskImpl.getTaskPrefix(ds);
}
Optional<Datastream> duplicateStream = allStreams.stream()
.filter(DatastreamUtils::containsTaskPrefix)
.filter(x -> !x.getName().equals(ds.getName()) && DatastreamUtils.getTaskPrefix(x).equals(taskPrefix))
.findFirst();
if (!duplicateStream.isPresent()) {
_log.info(
"No datastream left in the datastream group with taskPrefix {}. Deleting all tasks corresponding to the datastream.",
taskPrefix);
_adapter.deleteTasksWithPrefix(_connectors.keySet(), taskPrefix);
deleteTopic(ds);
} else {
_log.info("Found duplicate datastream {} for the datastream to be deleted {}. Not deleting the tasks.",
duplicateStream.get().getName(), ds.getName());
}
_adapter.deleteDatastream(ds.getName());
}
private String createTopic(Datastream datastream) throws TransportException {
_transportProviderAdmins.get(datastream.getTransportProviderName()).createDestination(datastream);
// For deduped datastreams, all destination-related metadata have been copied by
// populateDatastreamDestinationFromExistingDatastream().
if (!datastream.getMetadata().containsKey(DatastreamMetadataConstants.DESTINATION_CREATION_MS)) {
// Set destination creation time and retention
datastream.getMetadata()
.put(DatastreamMetadataConstants.DESTINATION_CREATION_MS, String.valueOf(Instant.now().toEpochMilli()));
try {
Duration retention = _transportProviderAdmins.get(datastream.getTransportProviderName()).getRetention(datastream);
if (retention != null) {
datastream.getMetadata()
.put(DatastreamMetadataConstants.DESTINATION_RETENTION_MS, String.valueOf(retention.toMillis()));
}
} catch (UnsupportedOperationException e) {
_log.warn("Transport doesn't support mechanism to get retention, Unable to populate retention in datastream", e);
}
}
return datastream.getDestination().getConnectionString();
}
private void deleteTopic(Datastream datastream) {
try {
if (DatastreamUtils.isUserManagedDestination(datastream)) {
_log.info("BYOT(bring your own topic), topic will not be deleted");
} else if (DatastreamUtils.isConnectorManagedDestination(datastream)) {
_log.info("Datastream contains connector-managed destinations, topic will not be deleted");
} else {
_transportProviderAdmins.get(datastream.getTransportProviderName()).dropDestination(datastream);
}
} catch (Exception e) {
_log.error("Runtime Exception while delete topic", e);
}
}
private List<DatastreamGroup> fetchDatastreamGroups() {
// Get all streams that are assignable. Assignable datastreams are the ones:
// 1) has a valid destination
// 2) status is READY or PAUSED, STOPPED or other datastream status will NOT get assigned
// 3) TTL has not expired
// Note: We do not need to flush the cache, because the datastreams should have been read as part of the
// handleDatastreamAddOrDelete event (that should occur before handleLeaderDoAssignment)
List<Datastream> allStreams = _datastreamCache.getAllDatastreams(false)
.stream()
.filter(datastream -> datastream.hasStatus() && (datastream.getStatus() == DatastreamStatus.READY
|| datastream.getStatus() == DatastreamStatus.PAUSED) && hasValidDestination(datastream)
&& !isDeletingOrExpired(datastream))
.collect(Collectors.toList());
Set<Datastream> invalidDatastreams =
allStreams.stream().filter(s -> !DatastreamUtils.containsTaskPrefix(s)).collect(Collectors.toSet());
if (!invalidDatastreams.isEmpty()) {
_log.error("Datastreams {} are ignored during assignment because they didn't contain task prefixes",
invalidDatastreams);
}
// Process only the streams that contains the taskPrefix.
Map<String, List<Datastream>> streamsByTaskPrefix = allStreams.stream()
.filter(s -> !invalidDatastreams.contains(s))
.collect(Collectors.groupingBy(DatastreamUtils::getTaskPrefix, Collectors.toList()));
return streamsByTaskPrefix.keySet()
.stream()
.map(x -> new DatastreamGroup(streamsByTaskPrefix.get(x)))
.collect(Collectors.toList());
}
private void handleLeaderDoAssignment() {
boolean succeeded = true;
List<String> liveInstances = Collections.emptyList();
Map<String, Set<DatastreamTask>> previousAssignmentByInstance = Collections.emptyMap();
Map<String, List<DatastreamTask>> newAssignmentsByInstance = Collections.emptyMap();
try {
List<DatastreamGroup> datastreamGroups = fetchDatastreamGroups();
onDatastreamChange(datastreamGroups);
_log.debug("handleLeaderDoAssignment: final datastreams for task assignment: {}", datastreamGroups);
// get all current live instances
liveInstances = _adapter.getLiveInstances();
// Map between instance to tasks assigned to the instance.
previousAssignmentByInstance = _adapter.getAllAssignedDatastreamTasks();
// Map between Instance and the tasks
newAssignmentsByInstance = performAssignment(liveInstances, previousAssignmentByInstance, datastreamGroups);
// persist the assigned result to ZooKeeper. This means we will need to compare with the current
// assignment and do remove and add zNodes accordingly. In the case of ZooKeeper failure (when
// it failed to create or delete zNodes), we will do our best to continue the current process
// and schedule a retry. The retry should be able to diff the remaining ZooKeeper work
_adapter.updateAllAssignments(newAssignmentsByInstance);
} catch (RuntimeException e) {
_log.error("handleLeaderDoAssignment: runtime exception.", e);
succeeded = false;
}
_log.info("handleLeaderDoAssignment: completed ");
_log.debug("handleLeaderDoAssignment: new assignment: " + newAssignmentsByInstance);
// clean up tasks under dead instances if everything went well
if (succeeded) {
List<String> instances = new ArrayList<>(liveInstances);
instances.add(PAUSED_INSTANCE);
_adapter.cleanupDeadInstanceAssignments(instances);
_adapter.cleanupOldUnusedTasks(previousAssignmentByInstance, newAssignmentsByInstance);
_dynamicMetricsManager.createOrUpdateMeter(MODULE, NUM_REBALANCES, 1);
}
// schedule retry if failure
if (!succeeded && !leaderDoAssignmentScheduled.get()) {
_log.info("Schedule retry for leader assigning tasks");
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "handleLeaderDoAssignment", NUM_RETRIES, 1);
leaderDoAssignmentScheduled.set(true);
_executor.schedule(() -> {
_eventQueue.put(CoordinatorEvent.createLeaderDoAssignmentEvent());
leaderDoAssignmentScheduled.set(false);
}, _config.getRetryIntervalMs(), TimeUnit.MILLISECONDS);
}
}
/**
* assign the partition to tasks for a particular datastreamGroup
*
* @param datastreamGroupName the datastreamGroup that needs to perform the partition assignment
*/
private void performPartitionAssignment(String datastreamGroupName) {
boolean succeeded = false;
Map<String, Set<DatastreamTask>> previousAssignmentByInstance = new HashMap<>();
Map<String, List<DatastreamTask>> newAssignmentsByInstance = new HashMap<>();
try {
previousAssignmentByInstance = _adapter.getAllAssignedDatastreamTasks();
Map<String, Set<DatastreamTask>> assignmentByInstance = new HashMap<>(previousAssignmentByInstance);
// retrieve the datastreamGroups for validation
DatastreamGroup toProcessDatastream =
fetchDatastreamGroups().stream().filter(dg -> datastreamGroupName.equals(dg.getName())).findFirst().orElse(null);
if (toProcessDatastream != null) {
AssignmentStrategy strategy = _connectors.get(toProcessDatastream.getConnectorName()).getAssignmentStrategy();
Connector connectorInstance = _connectors.get(toProcessDatastream.getConnectorName()).getConnector()
.getConnectorInstance();
Map<String, Optional<DatastreamGroupPartitionsMetadata>> datastreamPartitions =
connectorInstance.getDatastreamPartitions();
if (datastreamPartitions.containsKey(toProcessDatastream.getName())) {
DatastreamGroupPartitionsMetadata subscribes = connectorInstance.getDatastreamPartitions()
.get(toProcessDatastream.getName())
.orElseThrow(() ->
new DatastreamTransientException("Subscribed partition is not ready yet for datastream " +
toProcessDatastream.getName()));
assignmentByInstance = strategy.assignPartitions(assignmentByInstance, subscribes);
} else {
// The datastream group will not found only when the datastream was just paused/removed but we happened to
// handle the scheduled LEADER_PARTITION_EVENT. In either case we should just ignore and don't retry.
_log.warn("partitions for {} is not found, ignore the partition assignment", toProcessDatastream.getName());
}
} else {
_log.warn("datastream group {} is not active, ignore the partition assignment", datastreamGroupName);
}
for (String key : assignmentByInstance.keySet()) {
newAssignmentsByInstance.put(key, new ArrayList<>(assignmentByInstance.get(key)));
}
_adapter.updateAllAssignments(newAssignmentsByInstance);
_log.info("Partition assignment completed: datastreamGroup, assignment {} ", assignmentByInstance);
succeeded = true;
} catch (Exception ex) {
_log.info("Partition assignment failed, Exception: ", ex);
succeeded = false;
}
// schedule retry if failure
if (succeeded) {
_adapter.cleanupOldUnusedTasks(previousAssignmentByInstance, newAssignmentsByInstance);
updateCounterForMaxPartitionInTask(newAssignmentsByInstance);
_dynamicMetricsManager.createOrUpdateMeter(MODULE, NUM_PARTITION_ASSIGNMENTS, 1);
} else if (!leaderPartitionAssignmentScheduled.get()) {
_log.info("Schedule retry for leader assigning tasks");
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "handleLeaderPartitionAssignment", NUM_RETRIES, 1);
leaderPartitionAssignmentScheduled.set(true);
_executor.schedule(() -> {
_eventQueue.put(CoordinatorEvent.createLeaderPartitionAssignmentEvent(datastreamGroupName));
leaderPartitionAssignmentScheduled.set(false);
}, _config.getRetryIntervalMs(), TimeUnit.MILLISECONDS);
}
}
private void updateCounterForMaxPartitionInTask(Map<String, List<DatastreamTask>> assignments) {
long maxPartitionCount = 0;
for (List<DatastreamTask> tasks : assignments.values()) {
maxPartitionCount = Math.max(maxPartitionCount,
tasks.stream().map(DatastreamTask::getPartitionsV2).map(List::size).mapToInt(v -> v).max().orElse(0));
}
_log.info("Max partition count assigned in the task {}", maxPartitionCount);
MAX_PARTITION_COUNT.getAndSet(maxPartitionCount);
}
private void onDatastreamChange(List<DatastreamGroup> datastreamGroups) {
//We need to perform handleDatastream only active datastream for partition listening
List<DatastreamGroup> activeDataStreams = datastreamGroups.stream().filter(dg -> !dg.isPaused()).collect(Collectors.toList());
for (String connectorType : _connectors.keySet()) {
ConnectorWrapper connectorWrapper = _connectors.get(connectorType).getConnector();
List<DatastreamGroup> datastreamsPerConnectorType = activeDataStreams.stream()
.filter(x -> x.getConnectorName().equals(connectorType))
.collect(Collectors.toList());
connectorWrapper.getConnectorInstance().handleDatastream(datastreamsPerConnectorType);
}
}
/**
* move the partitions based on targetAssignmentInfo stored in the Zookeeper
* @param notifyTimestamp the timestamp when partition movement is triggered
*/
private void performPartitionMovement(Long notifyTimestamp) {
boolean shouldRetry = true;
Map<String, Set<DatastreamTask>> previousAssignmentByInstance = _adapter.getAllAssignedDatastreamTasks();
Map<String, List<DatastreamTask>> newAssignmentsByInstance = new HashMap<>();
try {
Map<String, Set<DatastreamTask>> assignmentByInstance = new HashMap<>(previousAssignmentByInstance);
List<DatastreamGroup> toCleanup = new ArrayList<>();
for (String connectorType : _connectors.keySet()) {
AssignmentStrategy strategy = _connectors.get(connectorType).getAssignmentStrategy();
Connector connectorInstance = _connectors.get(connectorType).getConnector().getConnectorInstance();
// Get the partition assignment information
Map<String, Optional<DatastreamGroupPartitionsMetadata>> datastreamPartitions =
connectorInstance.getDatastreamPartitions();
// Get the datastream Group name which have the target assignment
List<String> toMoveDatastream = _adapter.getDatastreamsNeedPartitionMovement(connectorType);
// Fetch all live datastreamGroups
List<DatastreamGroup> liveDatastreamGroups =
fetchDatastreamGroups().stream().filter(group1 -> connectorType.equals(group1.getConnectorName())).collect(
Collectors.toList());
// clean up the datastreams if they are not in the live datastreams
toMoveDatastream.stream().filter(dgName -> !liveDatastreamGroups.stream().map(DatastreamGroup::getName)
.collect(Collectors.toList()).contains(dgName)).forEach(obsoleteDs ->
_adapter.cleanUpPartitionMovement(connectorType, obsoleteDs, notifyTimestamp));
// Filtered all live datastreamGroup as we process only datastream which have
// both partition assignment info and the target assignment
List<DatastreamGroup> toProcessedDatastreamGroups =
liveDatastreamGroups.stream().filter(group2 -> toMoveDatastream.contains(group2.getName()))
.filter(group3 -> datastreamPartitions.keySet().contains(group3.getName()))
.collect(Collectors.toList());
for (DatastreamGroup dg : toProcessedDatastreamGroups) {
// Right now we fails the entire partition movement if any failure is encountered in any datastreamGroup
// The behavior can be improved to enhance the isolation in partition movement from different datastreamGroups
DatastreamGroupPartitionsMetadata subscribedPartitions = connectorInstance.getDatastreamPartitions().get(dg.getName())
.orElseThrow(() -> new DatastreamTransientException("partition listener is not ready yet for datastream " + dg.getName()));
Map<String, Set<String>> suggestedAssignment =
_adapter.getPartitionMovement(dg.getConnectorName(), dg.getName(), notifyTimestamp);
assignmentByInstance = strategy.movePartitions(assignmentByInstance, suggestedAssignment,
subscribedPartitions);
toCleanup.add(dg);
}
}
for (String key : assignmentByInstance.keySet()) {
newAssignmentsByInstance.put(key, new ArrayList<>(assignmentByInstance.get(key)));
}
_adapter.updateAllAssignments(newAssignmentsByInstance);
//clean up stored target assignment after the assignment is updated
for (DatastreamGroup dg : toCleanup) {
_adapter.cleanUpPartitionMovement(dg.getConnectorName(), dg.getName(), notifyTimestamp);
}
_log.info("Partition movement completed: datastreamGroup, assignment {} ", assignmentByInstance);
shouldRetry = false;
} catch (DatastreamTransientException ex) {
_log.warn("Partition movement failed, retry again after a configurable period", ex);
shouldRetry = true;
} catch (Exception ex) {
// We do not retry if it is not transient exception. Unfortunately we don't have a good way to communicate to the
// caller about individual failure as partition movement is an async process. A caller could only verify if the
// request is completed by query the assignment
_log.error("Partition movement failed, Exception: ", ex);
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "handleLeaderPartitionMovement", NUM_ERRORS, 1);
}
if (!shouldRetry) {
_adapter.cleanupOldUnusedTasks(previousAssignmentByInstance, newAssignmentsByInstance);
updateCounterForMaxPartitionInTask(newAssignmentsByInstance);
_dynamicMetricsManager.createOrUpdateMeter(MODULE, NUM_PARTITION_MOVEMENTS, 1);
} else {
_log.info("Schedule retry for leader movement tasks");
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "handleLeaderPartitionMovement", NUM_RETRIES, 1);
_executor.schedule(() -> {
_eventQueue.put(CoordinatorEvent.createPartitionMovementEvent(notifyTimestamp));
}, _config.getRetryIntervalMs(), TimeUnit.MILLISECONDS);
}
}
private Map<String, List<DatastreamTask>> performAssignment(List<String> liveInstances,
Map<String, Set<DatastreamTask>> previousAssignmentByInstance, List<DatastreamGroup> datastreamGroups) {
Map<String, List<DatastreamTask>> newAssignmentsByInstance = new HashMap<>();
_log.info("handleLeaderDoAssignment: start");
_log.debug("handleLeaderDoAssignment: assignment before re-balancing: " + previousAssignmentByInstance);
Set<DatastreamGroup> pausedDatastreamGroups =
datastreamGroups.stream().filter(DatastreamGroup::isPaused).collect(Collectors.toSet());
PAUSED_DATASTREAMS_GROUPS.set(pausedDatastreamGroups.size());
// If a datastream group is paused, park tasks with the virtual PausedInstance.
List<DatastreamTask> pausedTasks = pausedTasks(pausedDatastreamGroups, previousAssignmentByInstance);
if (!pausedTasks.isEmpty()) {
_log.info("Paused Task count:" + pausedTasks.size() + "; Task list: " + pausedTasks);
}
newAssignmentsByInstance.put(PAUSED_INSTANCE, pausedTasks);
for (String connectorType : _connectors.keySet()) {
AssignmentStrategy strategy = _connectors.get(connectorType).getAssignmentStrategy();
List<DatastreamGroup> datastreamsPerConnectorType = datastreamGroups.stream()
.filter(x -> x.getConnectorName().equals(connectorType))
.filter(g -> !(pausedDatastreamGroups.contains(g)))
.collect(Collectors.toList());
// Get the list of tasks per instance for the given connector type
// We need to call assign even if the number of datastreams are empty, This is to make sure that
// the assignments get cleaned up for the deleted datastreams.
Map<String, Set<DatastreamTask>> tasksByConnectorAndInstance =
strategy.assign(datastreamsPerConnectorType, liveInstances, previousAssignmentByInstance);
for (String instance : tasksByConnectorAndInstance.keySet()) {
newAssignmentsByInstance.computeIfAbsent(instance, (x) -> new ArrayList<>());
// Add the tasks for this connector type to the instance
tasksByConnectorAndInstance.get(instance).forEach(task -> {
// Each task must have a valid zkAdapter
((DatastreamTaskImpl) task).setZkAdapter(_adapter);
if (task.getStatus() != null && DatastreamTaskStatus.Code.PAUSED.equals(task.getStatus().getCode())) {
// Removed the Paused Status.
task.setStatus(DatastreamTaskStatus.ok());
}
newAssignmentsByInstance.get(instance).add(task);
});
}
}
return newAssignmentsByInstance;
}
/**
* Get tasks assigned to paused groups
*/
private List<DatastreamTask> pausedTasks(Collection<DatastreamGroup> pausedDatastreamGroups,
Map<String, Set<DatastreamTask>> currentlyAssignedDatastream) {
List<DatastreamTask> currentlyAssignedDatastreamTasks =
currentlyAssignedDatastream.values().stream().flatMap(Collection::stream).collect(Collectors.toList());
List<DatastreamTask> pausedTasks = new ArrayList<>();
for (DatastreamGroup dg : pausedDatastreamGroups) {
currentlyAssignedDatastreamTasks.stream().filter(dg::belongsTo).forEach(task -> {
if (task.getStatus() == null || DatastreamTaskStatus.Code.OK.equals(task.getStatus().getCode())) {
// Set task status to Paused.
task.setStatus(DatastreamTaskStatus.paused());
}
pausedTasks.add(task);
});
}
return pausedTasks;
}
/**
* Add a connector to the coordinator. A coordinator can handle multiples type of connectors, but only one
* connector per connector type.
*
* @param connectorName of the connector.
* @param connector a connector that implements the Connector interface
* @param strategy the assignment strategy deciding how to distribute datastream tasks among instances
* @param customCheckpointing whether connector uses custom checkpointing. if the custom checkpointing is set to true
* Coordinator will not perform checkpointing to ZooKeeper.
* @param deduper the deduper used by connector
* @param authorizerName name of the authorizer configured by connector
*
*/
public void addConnector(String connectorName, Connector connector, AssignmentStrategy strategy,
boolean customCheckpointing, DatastreamDeduper deduper, String authorizerName) {
Validate.notNull(strategy, "strategy cannot be null");
Validate.notEmpty(connectorName, "connectorName cannot be empty");
Validate.notNull(connector, "Connector cannot be null");
_log.info("Add new connector of type {}, strategy {} with custom checkpointing {} to coordinator", connectorName,
strategy.getClass().getTypeName(), customCheckpointing);
if (_connectors.containsKey(connectorName)) {
String err = "A connector of type " + connectorName + " already exists.";
_log.error(err);
throw new IllegalArgumentException(err);
}
Optional<List<BrooklinMetricInfo>> connectorMetrics = Optional.ofNullable(connector.getMetricInfos());
connectorMetrics.ifPresent(_metrics::addAll);
connector.onPartitionChange(datastreamGroup ->
_eventQueue.put(CoordinatorEvent.createLeaderPartitionAssignmentEvent(datastreamGroup.getName()))
);
ConnectorInfo connectorInfo =
new ConnectorInfo(connectorName, connector, strategy, customCheckpointing, _cpProvider, deduper, authorizerName);
_connectors.put(connectorName, connectorInfo);
// Register common connector metrics
// Use connector name for the metrics, as there can be multiple connectors specified in the config that use
// same connector class.
_dynamicMetricsManager.registerGauge(connectorName, NUM_DATASTREAMS,
() -> connectorInfo.getConnector().getNumDatastreams());
_dynamicMetricsManager.registerGauge(connectorName, NUM_DATASTREAM_TASKS,
() -> connectorInfo.getConnector().getNumDatastreamTasks());
_metrics.add(new BrooklinGaugeInfo(MetricRegistry.name(connectorName, NUM_DATASTREAMS)));
_metrics.add(new BrooklinGaugeInfo(MetricRegistry.name(connectorName, NUM_DATASTREAM_TASKS)));
}
/**
* Validate updates to given datastreams
* @param datastreams List of datastreams whose updates are validated.
* @throws DatastreamValidationException if any update is invalid.
*/
public void validateDatastreamsUpdate(List<Datastream> datastreams) throws DatastreamValidationException {
_log.info("About to validate datastreams update: " + datastreams);
try {
// DatastreamResources checks ensure we dont have more than one connector type in the updated datastream list
String connectorName = datastreams.get(0).getConnectorName();
ConnectorInfo connectorInfo = _connectors.get(connectorName);
if (connectorInfo == null) {
throw new DatastreamValidationException("Invalid connector: " + connectorName);
}
connectorInfo.getConnector()
.validateUpdateDatastreams(datastreams, _datastreamCache.getAllDatastreams()
.stream()
.filter(d -> d.getConnectorName().equals(connectorName))
.collect(Collectors.toList()));
} catch (Exception e) {
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "validateDatastreamsUpdate", NUM_ERRORS, 1);
throw e;
}
}
/**
* Validate the partition is managed by connector for this datastream
* @param datastream datastream which needs the verification
* @throws DatastreamValidationException if partition assignment is not supported
*/
public void validatePartitionAssignmentSupported(Datastream datastream) throws DatastreamValidationException {
try {
String connectorName = datastream.getConnectorName();
ConnectorInfo connectorInfo = _connectors.get(connectorName);
if (connectorInfo == null) {
throw new DatastreamValidationException("Invalid connector: " + connectorName);
}
Connector connectorInstance = connectorInfo.getConnector().getConnectorInstance();
if (!connectorInstance.isPartitionManagementSupported()) {
String msg = String.format("Partition assignment is not managed by connector, datastream %s",
datastream.getName());
_log.error(msg);
throw new DatastreamValidationException(msg);
}
} catch (Exception e) {
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "isPartitionAssignmentSupported", NUM_ERRORS, 1);
throw e;
}
}
/**
* Checks if given datastream update type is supported by connector for given datastream.
* @param datastream datastream to check against
* @param updateType - Type of datastream update to validate
*/
public void isDatastreamUpdateTypeSupported(Datastream datastream, DatastreamConstants.UpdateType updateType)
throws DatastreamValidationException {
_log.info("About to validate datastream update type {} for datastream {}", updateType, datastream);
try {
String connectorName = datastream.getConnectorName();
ConnectorInfo connectorInfo = _connectors.get(connectorName);
if (connectorInfo == null) {
throw new DatastreamValidationException("Invalid connector: " + datastream.getConnectorName());
}
if (!connectorInfo.getConnector().isDatastreamUpdateTypeSupported(datastream, updateType)) {
throw new DatastreamValidationException(
String.format("Datastream update of type : %s for datastream: %s is not supported by connector: %s",
updateType, datastream.getName(), connectorName));
}
} catch (Exception e) {
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "isDatastreamUpdateTypeSupported", NUM_ERRORS, 1);
throw e;
}
}
/**
* Initializes the datastream. Datastream management service will call this before writing the
* Datastream into ZooKeeper. This method should ensure that the source has sufficient details.
* @param datastream datastream for validation
*/
public void initializeDatastream(Datastream datastream) throws DatastreamValidationException {
datastream.setStatus(DatastreamStatus.INITIALIZING);
String connectorName = datastream.getConnectorName();
ConnectorInfo connectorInfo = _connectors.get(connectorName);
if (connectorInfo == null) {
String errorMessage = "Invalid connector: " + connectorName;
_log.error(errorMessage);
throw new DatastreamValidationException(errorMessage);
}
ConnectorWrapper connector = connectorInfo.getConnector();
DatastreamDeduper deduper = connectorInfo.getDatastreamDeduper();
// Changing a non-flush cache version to flush version to avoid errors in deduping datastreams which
// should be deduped, but fail to due to being created back to back and ZK client not syncing with master
List<Datastream> allDatastreams = _datastreamCache.getAllDatastreams(true)
.stream()
.filter(d -> d.getConnectorName().equals(connectorName))
.collect(Collectors.toList());
// If datastream of name already exists return error
if (allDatastreams.stream().anyMatch(x -> x.getName().equals(datastream.getName()))) {
String errMsg = String.format("Datastream with name %s already exists", datastream.getName());
_log.error(errMsg);
throw new DatastreamAlreadyExistsException(errMsg);
}
if (!StringUtils.isEmpty(_config.getDefaultTransportProviderName())) {
if (!datastream.hasTransportProviderName() || StringUtils.isEmpty(datastream.getTransportProviderName())) {
datastream.setTransportProviderName(_config.getDefaultTransportProviderName());
}
}
try {
if (connectorInfo.getAuthorizerName().isPresent()) {
Authorizer authz = _authorizers.getOrDefault(connectorInfo.getAuthorizerName().get(), null);
if (authz == null) {
String errMsg = String.format("No authorizer '%s' was configured", connectorInfo.getAuthorizerName().get());
_log.error(errMsg);
throw new DatastreamValidationException(errMsg);
}
// Security principals are passed in through OWNER metadata
// DatastreamResources has validated OWNER key is present
String principal = datastream.getMetadata().get(DatastreamMetadataConstants.OWNER_KEY);
// CREATE is already verified through the SSL layer of the HTTP framework (optional)
// READ is the operation for datastream source-level authorization
if (!authz.authorize(datastream, Authorizer.Operation.READ, () -> principal)) {
String errMsg =
String.format("Consumer %s has not been approved for %s over %s", principal, datastream.getSource(),
datastream.getTransportProviderName());
_log.warn(errMsg);
throw new AuthorizationException(errMsg);
}
}
connector.initializeDatastream(datastream, allDatastreams);
initializeDatastreamDestination(connector, datastream, deduper, allDatastreams);
connector.postDatastreamInitialize(datastream, allDatastreams);
} catch (Exception e) {
_dynamicMetricsManager.createOrUpdateMeter(MODULE, "initializeDatastream", NUM_ERRORS, 1);
throw e;
}
datastream.getMetadata().putIfAbsent(CREATION_MS, String.valueOf(Instant.now().toEpochMilli()));
}
private void initializeDatastreamDestination(ConnectorWrapper connector, Datastream datastream,
DatastreamDeduper deduper, List<Datastream> allDatastreams) throws DatastreamValidationException {
Optional<Datastream> existingDatastream = Optional.empty();
// Dedupe datastream only when its destination is not populated and allows reuse
if (!hasValidDestination(datastream) && isReuseAllowed(datastream)) {
existingDatastream = deduper.findExistingDatastream(datastream, allDatastreams);
}
// For a BYOT datastream, check that the destination is not already in use by other streams
if (DatastreamUtils.isUserManagedDestination(datastream)) {
List<Datastream> sameDestinationDatastreams = allDatastreams.stream()
.filter(
ds -> ds.getDestination().getConnectionString().equals(datastream.getDestination().getConnectionString()))
.collect(Collectors.toList());
if (!sameDestinationDatastreams.isEmpty()) {
String datastreamNames = sameDestinationDatastreams.stream()
.map(Datastream::getName)
.collect(Collectors.joining(", "));
String errMsg = String.format("Cannot create a BYOT datastream where the destination is being used by other datastream(s): %s",
datastreamNames);
_log.error(errMsg);
throw new DatastreamValidationException(errMsg);
}
}
if (existingDatastream.isPresent()) {
populateDatastreamDestinationFromExistingDatastream(datastream, existingDatastream.get());
} else {
if (!_transportProviderAdmins.containsKey(datastream.getTransportProviderName())) {
throw new DatastreamValidationException(
String.format("Transport provider \"%s\" is undefined", datastream.getTransportProviderName()));
}
String destinationName = connector.getDestinationName(datastream);
_transportProviderAdmins.get(datastream.getTransportProviderName())
.initializeDestinationForDatastream(datastream, destinationName);
// Populate the task prefix if it is not already present.
if (!datastream.getMetadata().containsKey(DatastreamMetadataConstants.TASK_PREFIX)) {
datastream.getMetadata()
.put(DatastreamMetadataConstants.TASK_PREFIX, DatastreamTaskImpl.getTaskPrefix(datastream));
}
_log.info("Datastream {} has an unique source or topicReuse is set to true, Assigning a new destination {}",
datastream.getName(), datastream.getDestination());
}
}
private void populateDatastreamDestinationFromExistingDatastream(Datastream datastream, Datastream existingStream) {
DatastreamDestination destination = existingStream.getDestination();
datastream.setDestination(destination);
// Copy destination-related metadata
existingStream.getMetadata().entrySet().stream()
.filter(e -> e.getKey().startsWith(SYSTEM_DESTINATION_PREFIX))
.forEach(e -> datastream.getMetadata().put(e.getKey(), e.getValue()));
// If the existing datastream group is paused, also pause this datastream.
// This is to avoid the creation of a datastream to RESUME event production.
if (existingStream.getStatus().equals(DatastreamStatus.PAUSED)) {
datastream.setStatus(DatastreamStatus.PAUSED);
}
datastream.getMetadata()
.put(DatastreamMetadataConstants.TASK_PREFIX,
existingStream.getMetadata().get(DatastreamMetadataConstants.TASK_PREFIX));
}
@Override
public List<BrooklinMetricInfo> getMetricInfos() {
_metrics.add(new BrooklinMeterInfo(buildMetricName(MODULE, NUM_REBALANCES)));
_metrics.add(new BrooklinMeterInfo(buildMetricName(MODULE, NUM_ASSIGNMENT_CHANGES)));
_metrics.add(new BrooklinMeterInfo(buildMetricName(MODULE, NUM_PARTITION_ASSIGNMENTS)));
_metrics.add(new BrooklinMeterInfo(buildMetricName(MODULE, NUM_PARTITION_MOVEMENTS)));
_metrics.add(new BrooklinGaugeInfo(buildMetricName(MODULE, MAX_PARTITION_COUNT_IN_TASK)));
_metrics.add(new BrooklinMeterInfo(getDynamicMetricPrefixRegex(MODULE) + NUM_ERRORS));
_metrics.add(new BrooklinMeterInfo(getDynamicMetricPrefixRegex(MODULE) + NUM_RETRIES));
_metrics.add(new BrooklinCounterInfo(buildMetricName(MODULE, NUM_HEARTBEATS)));
_metrics.add(new BrooklinGaugeInfo(buildMetricName(MODULE, NUM_PAUSED_DATASTREAMS_GROUPS)));
_metrics.add(new BrooklinGaugeInfo(buildMetricName(MODULE, IS_LEADER)));
return Collections.unmodifiableList(_metrics);
}
/**
* Get the datastream clusterName
*/
public String getClusterName() {
return _clusterName;
}
/**
* Add a transport provider that the coordinator can assign to datastreams it creates.
* @param transportProviderName Name of transport provider.
* @param admin Instance of TransportProviderAdmin that the coordinator can assign.
*/
public void addTransportProvider(String transportProviderName, TransportProviderAdmin admin) {
_transportProviderAdmins.put(transportProviderName, admin);
Optional<List<BrooklinMetricInfo>> transportProviderMetrics = Optional.ofNullable(admin.getMetricInfos());
transportProviderMetrics.ifPresent(_metrics::addAll);
}
/**
* Add a Serde that the coordinator can assign to datastreams it creates.
* @param serdeName Name of Serde.
* @param admin Instance of SerdeAdmin that the coordinator can assign.
*/
public void addSerde(String serdeName, SerdeAdmin admin) {
_serdeAdmins.put(serdeName, admin);
}
/**
* Get a boolean supplier which can be queried to check if the current
* Coordinator instance is a leader in the Brooklin server cluster. This
* allows other part of the server to perform cluster level operations only
* on the leader.
*/
public BooleanSupplier getIsLeader() {
return _adapter::isLeader;
}
/**
* Set an authorizer implementation to enforce ACL on datastream CRUD operations.
*/
public void addAuthorizer(String name, Authorizer authorizer) {
Validate.notNull(authorizer, "null authorizer");
if (_authorizers.containsKey(name)) {
_log.warn("Registering duplicate authorizer with name={}, auth={}", name, authorizer);
}
_authorizers.put(name, authorizer);
if (authorizer instanceof MetricsAware) {
Optional.ofNullable(((MetricsAware) authorizer).getMetricInfos()).ifPresent(_metrics::addAll);
}
}
// helper method for logging
private String printAssignmentByType(Map<String, List<DatastreamTask>> assignment) {
StringBuilder sb = new StringBuilder();
sb.append("Current assignment for instance: ").append(getInstanceName()).append(":\n");
for (Map.Entry<String, List<DatastreamTask>> entry : assignment.entrySet()) {
sb.append(entry.getKey()).append(": ").append(entry.getValue()).append("\n");
}
// remove the final "\n"
String result = sb.toString();
return result.substring(0, result.length() - 1);
}
/**
* Get connector by name
* @param name Name of the connector.
* @return Instance of the connector (if present), null otherwise.
*/
public Connector getConnector(String name) {
if (!_connectors.containsKey(name)) {
return null;
}
return _connectors.get(name).getConnector().getConnectorInstance();
}
private class CoordinatorEventProcessor extends Thread {
@Override
public void run() {
_log.info("START CoordinatorEventProcessor thread");
while (!_shutdown && !isInterrupted()) {
try {
CoordinatorEvent event = _eventQueue.take();
if (event != null) {
handleEvent(event);
}
} catch (InterruptedException e) {
_log.warn("CoordinatorEventProcessor interrupted", e);
interrupt();
} catch (Exception t) {
_log.error("CoordinatorEventProcessor failed", t);
}
}
_log.info("END CoordinatorEventProcessor");
}
}
}
|
package com.bt.ahsanzaman.mapsample.ui.main.adapter;
import android.support.v7.widget.RecyclerView;
import android.text.Html;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.TextView;
import com.bt.ahsanzaman.mapsample.R;
import com.bt.ahsanzaman.mapsample.domain.Steps;
import java.util.ArrayList;
import java.util.List;
import butterknife.BindView;
import butterknife.ButterKnife;
/**
* Created by Ahsan Zaman on 11-06-2017.
*/
public class DirectionsAdapter extends RecyclerView.Adapter<DirectionsHolder> {
private final ArrayList<Steps> mSteps;
private int mPosition;
public DirectionsAdapter(ArrayList<Steps> steps) {
mSteps = steps;
mPosition = -1;
}
@Override
public DirectionsHolder onCreateViewHolder(ViewGroup parent, int viewType) {
LayoutInflater inflater = LayoutInflater.from(parent.getContext());
View view = inflater.inflate(R.layout.direction_item, parent, false);
return new DirectionsHolder(view);
}
@Override
public void onBindViewHolder(DirectionsHolder holder, int position) {
Steps step = mSteps.get(position);
if(step!=null) {
holder.mDirectionsText.setText(Html.fromHtml(step.getInstructions()), TextView.BufferType.NORMAL);
holder.mDirectionsSNo.setText(position+1+".");
}
}
@Override
public int getItemCount() {
return mSteps==null?0:mSteps.size();
}
public void setItems(ArrayList<Steps> steps){
mSteps.clear();
if(steps!=null){
mSteps.addAll(steps);
}
}
public void setItems(List<Steps> steps, int position) {
if(mPosition !=position){
mSteps.clear();
if(steps!=null){
mSteps.addAll(steps);
}
}
notifyDataSetChanged();
}
}
class DirectionsHolder extends RecyclerView.ViewHolder{
@BindView(R.id.directions_text)
TextView mDirectionsText;
@BindView(R.id.directions_s_no)
TextView mDirectionsSNo;
public DirectionsHolder(View itemView) {
super(itemView);
ButterKnife.bind(this, itemView);
}
}
|
package org.jeecg.modules.system.util;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.net.ftp.FTP;
import org.apache.commons.net.ftp.FTPClient;
import org.apache.commons.net.ftp.FTPReply;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Configuration;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.util.Date;
import static org.apache.shiro.web.servlet.Cookie.ROOT_PATH;
/**
* @ClassName: FtpClientUtil
* @Description:
* @Author: lijianwei
* @DATE: 2020/9/28 10:11
**/
@Slf4j
//@Configuration
public class FtpClientUtil {
private static final String FILE_PAKAGE = "/jeecg/upload/";
private static String timePath = new SimpleDateFormat("yyyyMM/dd/").format(new Date());
private static int port;//FTP端口
private static String host;//FTP主机地址
private static String userName;//FTP用户名
private static String password;//FTP密码
private static final int CONNECTTIMEOUT = 3000;//连接超时时间
private static final String ENCODE = "UTF-8";
private static final String DEFAULT_ENCODE = "iso-8859-1";
@Value("${jeecg.ftpclient.port}")
public static void setPort(int port) {
FtpClientUtil.port = port;
}
@Value("${jeecg.ftpclient.host}")
public static void setHost(String host) {
FtpClientUtil.host = host;
}
@Value("${jeecg.ftpclient.userName}")
public static void setUserName(String userName) {
FtpClientUtil.userName = userName;
}
@Value("${jeecg.ftpclient.password}")
public static void setPassword(String password) {
FtpClientUtil.password = password;
}
private static FTPClient ftpClient = getFtpClient();
private static FTPClient getFtpClient() {
if (ftpClient != null) {
return ftpClient;
} else {
ftpClient = new FTPClient();
ftpClient.setConnectTimeout(CONNECTTIMEOUT);
try {
ftpClient.connect(host, port);
int reply = ftpClient.getReplyCode();
if (!FTPReply.isPositiveCompletion(reply)) {
ftpClient.disconnect();
log.warn("--------------FTPServer refused connection------------");
return null;
}
boolean result = ftpClient.login(userName, password);
if (!result) {
log.error("ftpClient登陆失败! userName:{}, password:{}", userName, password);
return null;
}
ftpClient.setFileType(FTP.BINARY_FILE_TYPE);
ftpClient.setBufferSize(1024);
ftpClient.setControlEncoding("UTF-8");
ftpClient.enterLocalPassiveMode();
return ftpClient;
} catch (Exception e) {
log.error(ExceptionUtil.getErrorStack(e));
return null;
}
}
}
public static String upload(MultipartFile file) {
InputStream in = null;
try {
in = file.getInputStream();
} catch (IOException e) {
log.error(ExceptionUtil.getErrorStack(e));
return null;
}
return upload(file.getOriginalFilename(), in);
}
public static String upload(String fileName, InputStream in) {
return upload(FILE_PAKAGE + timePath, fileName, in);
}
public static String upload(String direc, String fileName, InputStream input) {
if (null == input || StringUtils.isEmpty(fileName) || StringUtils.isEmpty(direc)) {
log.warn("--------------------文件信息不能为空------------------------");
return null;
}
if (ftpClient == null) {
log.info("--------------ftpclient is null----------------");
return null;
}
try {
createDirectory(direc);
} catch (IOException e) {
log.error("------------------创建ftp目录失败:{}--------------------", direc);
return null;
}
try {
boolean result = ftpClient.storeFile(new String(fileName.getBytes(ENCODE), DEFAULT_ENCODE), input);
if (result) {
return FILE_PAKAGE + timePath + fileName;
}
} catch (Exception e) {
log.error("上传文件{}至ftp失败:{}", fileName, ExceptionUtil.getErrorStack(e));
} finally {
try {
input.close();
} catch (IOException e2) {
e2.printStackTrace();
}
System.gc();
}
return null;
}
public boolean removeFile(String url) throws Exception {
if (ftpClient == null) {
log.info("--------------ftpclient is null----------------");
return false;
}
try {
String path = url.substring(url.indexOf(FILE_PAKAGE) + 9);
boolean suc = ftpClient.deleteFile(new String(path.getBytes(ENCODE), DEFAULT_ENCODE));
if (!suc) {
log.info("---------------删除文件:{} 失败!------------------", path);
} else {
log.info("-------------------删除文件:{} 成功!----------------", path);
return true;
}
} catch (Exception e) {
log.error("删除文件异常:{}", ExceptionUtil.getErrorStack(e));
}
return false;
}
private static void createDirectory(String directory) throws IOException {
if (!StringUtils.isEmpty(directory)) {
if (directory.contains("/")) {
String[] dirs = directory.split("/");
for (String dir : dirs) {
ftpClient.mkd(dir);
ftpClient.changeWorkingDirectory(dir);
}
} else {
ftpClient.mkd(directory);
ftpClient.changeWorkingDirectory(directory);
}
}
}
}
|
/*
* Adito
*
* Copyright (C) 2003-2006 3SP LTD. All Rights Reserved
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
package com.adito.core;
import java.awt.Rectangle;
import com.adito.boot.Util;
/**
* Constructs a fragment of JavaScript to open a link in a new Window with
* parameters.
*/
public class WindowOpenJavascriptLink implements JavascriptLink {
// Private instance variables
private String uri;
private String windowId;
private Rectangle bounds;
private boolean resizable;
private boolean menuBar;
private boolean toolBar;
private boolean scrollBars;
private boolean location;
/**
* Construct a new link that does nothing.
*
*/
public WindowOpenJavascriptLink() {
this(null, null, null, false, false, false, false, false);
}
/**
* Constructor.
*
* @param uri uri to open (must be encoded)
* @param windowId ID to give window
* @param bounds window bounds
* @param resizable resizable
* @param menuBar show menu bar
* @param toolBar show tool bar
* @param scrollBars show scroll bars
* @param location show location bar
*
*/
public WindowOpenJavascriptLink(String uri, String windowId, Rectangle bounds,
boolean resizable, boolean menuBar, boolean toolBar, boolean scrollBars, boolean location) {
this.uri = uri;
this.windowId = windowId;
this.bounds = bounds;
this.resizable = resizable;
this.menuBar = menuBar;
this.toolBar = toolBar;
this.scrollBars = scrollBars;
this.location = location;
}
/**
* Get the URI
*
* @return uri
*/
public String getURI() {
return uri;
}
/**
* Generate the Javascript fragment.
*
* @return javascript fragement to open the window
*/
public String toJavascript() {
if(uri == null) {
return "void();";
}
StringBuffer openBuf = new StringBuffer();
openBuf.append("windowRef = window.open('");
openBuf.append(Util.escapeForJavascriptString(uri));
openBuf.append("','");
openBuf.append(windowId);
openBuf.append("','");
if(bounds != null) {
openBuf.append("top=");
openBuf.append(bounds.y);
openBuf.append(",left=");
openBuf.append(bounds.x);
openBuf.append(",width=");
openBuf.append(bounds.width);
openBuf.append(",height=");
openBuf.append(bounds.height);
openBuf.append(",");
}
openBuf.append("location=");
openBuf.append(location ? 1 : 0);
openBuf.append(",resizable=");
openBuf.append(resizable ? 1 : 0);
openBuf.append(",toolbar=");
openBuf.append(toolBar ? 1 : 0);
openBuf.append(",menubar=");
openBuf.append(menuBar ? 1 : 0);
openBuf.append(",scrollbars=");
openBuf.append(scrollBars ? 1 : 0);
openBuf.append("'); ");
StringBuffer buf = new StringBuffer();
buf.append("this.blur(); ");
buf.append(openBuf.toString());
buf.append("if(windowRef==null || typeof(windowRef)=='undefined') { ");
buf.append(" if(setPopupBlocked) { setPopupBlocked(); }");
buf.append("} else { windowRef.focus(); }");
return buf.toString();
}
}
|
/*
* */
package com.synectiks.process.common.testing.completebackend;
public enum Lifecycle {
/**
* Use this, if you can make sure
* that the individual tests will not interfere with each other, e.g., by creating test data that
* would affect the outcome of a different test.
*/
CLASS,
/**
* This is the safest
* way to isolate tests. Test execution will take much longer due to the time it takes to spin up
* the necessary container, especially the server node itself.
*/
METHOD {
@Override
void afterEach(GraylogBackend backend) {
backend.fullReset();
}
};
void afterEach(GraylogBackend backend) {
}
}
|
/**
* This code was generated by
* \ / _ _ _| _ _
* | (_)\/(_)(_|\/| |(/_ v1.0.0
* / /
*/
package com.twilio.twiml.voice;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlProperty;
import com.twilio.twiml.TwiML;
import com.twilio.twiml.TwiMLException;
import java.util.HashMap;
import java.util.Map;
/**
* TwiML wrapper for {@code <break>}
*/
@JsonDeserialize(builder = SsmlBreak.Builder.class)
public class SsmlBreak extends TwiML {
public enum Strength {
NONE("none"),
X_WEAK("x-weak"),
WEAK("weak"),
MEDIUM("medium"),
STRONG("strong"),
X_STRONG("x-strong");
private final String value;
private Strength(final String value) {
this.value = value;
}
public String toString() {
return value;
}
}
private final SsmlBreak.Strength strength;
private final String time;
/**
* For XML Serialization/Deserialization
*/
private SsmlBreak() {
this(new Builder());
}
/**
* Create a new {@code <break>} element
*/
private SsmlBreak(Builder b) {
super("break", b);
this.strength = b.strength;
this.time = b.time;
}
/**
* Attributes to set on the generated XML element
*
* @return A Map of attribute keys to values
*/
protected Map<String, String> getElementAttributes() {
// Preserve order of attributes
Map<String, String> attrs = new HashMap<>();
if (this.getStrength() != null) {
attrs.put("strength", this.getStrength().toString());
}
if (this.getTime() != null) {
attrs.put("time", this.getTime());
}
return attrs;
}
/**
* Set a pause based on strength
*
* @return Set a pause based on strength
*/
public SsmlBreak.Strength getStrength() {
return strength;
}
/**
* Set a pause to a specific length of time in seconds or milliseconds,
* available values: [number]s, [number]ms
*
* @return Set a pause to a specific length of time in seconds or milliseconds,
* available values: [number]s, [number]ms
*/
public String getTime() {
return time;
}
/**
* Create a new {@code <break>} element
*/
public static class Builder extends TwiML.Builder<Builder> {
/**
* Create and return a {@code <SsmlBreak.Builder>} from an XML string
*/
public static Builder fromXml(final String xml) throws TwiMLException {
try {
return OBJECT_MAPPER.readValue(xml, Builder.class);
} catch (final JsonProcessingException jpe) {
throw new TwiMLException(
"Failed to deserialize a SsmlBreak.Builder from the provided XML string: " + jpe.getMessage());
} catch (final Exception e) {
throw new TwiMLException("Unhandled exception: " + e.getMessage());
}
}
private SsmlBreak.Strength strength;
private String time;
/**
* Set a pause based on strength
*/
@JacksonXmlProperty(isAttribute = true, localName = "strength")
public Builder strength(SsmlBreak.Strength strength) {
this.strength = strength;
return this;
}
/**
* Set a pause to a specific length of time in seconds or milliseconds,
* available values: [number]s, [number]ms
*/
@JacksonXmlProperty(isAttribute = true, localName = "time")
public Builder time(String time) {
this.time = time;
return this;
}
/**
* Create and return resulting {@code <break>} element
*/
public SsmlBreak build() {
return new SsmlBreak(this);
}
}
}
|
package com.github.wukan1986.kwebspeaker;
import android.app.Activity;
import android.content.Context;
import android.content.SharedPreferences;
import android.view.View;
import android.widget.Button;
import android.widget.LinearLayout;
import android.widget.SeekBar;
import android.widget.TextView;
public class SpeakerView extends LinearLayout implements SeekBar.OnSeekBarChangeListener, View.OnClickListener {
public static final String TAG = "SpeakerView";
public static final String Preferences_KEY_Speed = "setSpeedRate";
public static final String Preferences_KEY_Pitch = "setPitch";
private final Activity mContext;
private SeekBar mSpeedSeekBar;
private SeekBar mPitchSeekBar;
private TextView mSpeedTxt;
private TextView mPitchTxt;
WebSpeaker mWebSpeaker;
float mSpeed;
float mPitch;
private Button mTestBtn;
public SpeakerView(Activity context) {
super(context);
mContext = context;
this.mSpeed = mContext.getPreferences(Context.MODE_PRIVATE).getFloat(Preferences_KEY_Speed, 1.5f);
this.mPitch = mContext.getPreferences(Context.MODE_PRIVATE).getFloat(Preferences_KEY_Pitch, 1.0f);
initView();
}
private void initView() {
mContext.getLayoutInflater().inflate(R.layout.speaker_view, this);
mSpeedSeekBar = findViewById(R.id.speed_seekbar);
mPitchSeekBar = findViewById(R.id.pitch_seekbar);
mSpeedTxt = findViewById(R.id.speed_txt);
mPitchTxt = findViewById(R.id.pitch_txt);
mSpeedSeekBar.setMax(20);
mPitchSeekBar.setMax(20);
this.mSpeedSeekBar.setOnSeekBarChangeListener(this);
this.mPitchSeekBar.setOnSeekBarChangeListener(this);
int pos = float_2_int(this.mSpeed);
this.mSpeedSeekBar.setProgress(pos);
pos = float_2_int(this.mPitch);
this.mPitchSeekBar.setProgress(pos);
mTestBtn = findViewById(R.id.button_test);
this.mTestBtn.setOnClickListener(this);
}
@Override
public void onClick(View v) {
this.mWebSpeaker.Speak(1,"朗读测试,欢迎使用侃侃朗读,可选段的网页朗读神器");
}
/**
* 拖动条停止拖动的时候调用
*/
@Override
public void onStopTrackingTouch(SeekBar seekBar) {
SharedPreferences.Editor paramSeekBar = mContext.getPreferences(Context.MODE_PRIVATE).edit();
switch (seekBar.getId()) {
case R.id.speed_seekbar:
paramSeekBar.putFloat(Preferences_KEY_Speed, this.mSpeed);
paramSeekBar.commit();
this.mWebSpeaker.SetSpeed(this.mSpeed);
break;
case R.id.pitch_seekbar:
paramSeekBar.putFloat(Preferences_KEY_Pitch, this.mPitch);
paramSeekBar.commit();
this.mWebSpeaker.SetPitch(this.mPitch);
break;
}
}
/**
* 拖动条开始拖动的时候调用
*/
@Override
public void onStartTrackingTouch(SeekBar seekBar) {
}
/**
* 拖动条进度改变的时候调用
*/
@Override
public void onProgressChanged(SeekBar seekBar, int progress,
boolean fromUser) {
switch (seekBar.getId()) {
case R.id.speed_seekbar:
this.mSpeed = int_2_float(progress);
mSpeedTxt.setText("当前语速:" + this.mSpeed + "x");
break;
case R.id.pitch_seekbar:
this.mPitch = int_2_float(progress);
mPitchTxt.setText("当前音调:" + this.mPitch + "x");
break;
}
}
private int float_2_int(float f) {
int i_min = 0;
int i_max = 20;
float f_min = 0.5f;
float f_max = 2.5f;
return (int) ((f - f_min) * (i_max - i_min) / (f_max - f_min));
}
private float int_2_float(int i) {
int i_min = 0;
int i_max = 20;
float f_min = 0.5f;
float f_max = 2.5f;
return i * (f_max - f_min) / (i_max - i_min) + f_min;
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.processors.aws.dynamodb;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.nifi.annotation.behavior.SystemResourceConsideration;
import org.apache.nifi.annotation.behavior.InputRequirement;
import org.apache.nifi.annotation.behavior.ReadsAttribute;
import org.apache.nifi.annotation.behavior.ReadsAttributes;
import org.apache.nifi.annotation.behavior.InputRequirement.Requirement;
import org.apache.nifi.annotation.behavior.SupportsBatching;
import org.apache.nifi.annotation.behavior.SystemResource;
import org.apache.nifi.annotation.behavior.WritesAttribute;
import org.apache.nifi.annotation.behavior.WritesAttributes;
import org.apache.nifi.annotation.documentation.CapabilityDescription;
import org.apache.nifi.annotation.documentation.SeeAlso;
import org.apache.nifi.annotation.documentation.Tags;
import org.apache.nifi.components.PropertyDescriptor;
import org.apache.nifi.flowfile.FlowFile;
import org.apache.nifi.processor.ProcessContext;
import org.apache.nifi.processor.ProcessSession;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.dynamodbv2.document.BatchWriteItemOutcome;
import com.amazonaws.services.dynamodbv2.document.DynamoDB;
import com.amazonaws.services.dynamodbv2.document.Item;
import com.amazonaws.services.dynamodbv2.document.TableWriteItems;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.WriteRequest;
@SupportsBatching
@SeeAlso({DeleteDynamoDB.class, GetDynamoDB.class, PutDynamoDBRecord.class})
@InputRequirement(Requirement.INPUT_REQUIRED)
@Tags({"Amazon", "DynamoDB", "AWS", "Put", "Insert"})
@CapabilityDescription("Puts a document from DynamoDB based on hash and range key. The table can have either hash and range or hash key alone."
+ " Currently the keys supported are string and number and value can be json document. "
+ "In case of hash and range keys both key are required for the operation."
+ " The FlowFile content must be JSON. FlowFile content is mapped to the specified Json Document attribute in the DynamoDB item.")
@WritesAttributes({
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_KEY_ERROR_UNPROCESSED, description = "DynamoDB unprocessed keys"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_RANGE_KEY_VALUE_ERROR, description = "DynamoDB range key error"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_KEY_ERROR_NOT_FOUND, description = "DynamoDB key not found"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ERROR_EXCEPTION_MESSAGE, description = "DynamoDB exception message"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ERROR_CODE, description = "DynamoDB error code"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ERROR_MESSAGE, description = "DynamoDB error message"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ERROR_TYPE, description = "DynamoDB error type"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ERROR_SERVICE, description = "DynamoDB error service"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ERROR_RETRYABLE, description = "DynamoDB error is retryable"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ERROR_REQUEST_ID, description = "DynamoDB error request id"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ERROR_STATUS_CODE, description = "DynamoDB error status code"),
@WritesAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ITEM_IO_ERROR, description = "IO exception message on creating item")
})
@ReadsAttributes({
@ReadsAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ITEM_HASH_KEY_VALUE, description = "Items hash key value"),
@ReadsAttribute(attribute = AbstractDynamoDBProcessor.DYNAMODB_ITEM_RANGE_KEY_VALUE, description = "Items range key value")
})
@SystemResourceConsideration(resource = SystemResource.MEMORY)
public class PutDynamoDB extends AbstractWriteDynamoDBProcessor {
public static final List<PropertyDescriptor> properties = Collections.unmodifiableList(
Arrays.asList(TABLE, HASH_KEY_NAME, RANGE_KEY_NAME, HASH_KEY_VALUE, RANGE_KEY_VALUE,
HASH_KEY_VALUE_TYPE, RANGE_KEY_VALUE_TYPE, JSON_DOCUMENT, DOCUMENT_CHARSET, BATCH_SIZE,
REGION, ACCESS_KEY, SECRET_KEY, CREDENTIALS_FILE, AWS_CREDENTIALS_PROVIDER_SERVICE, TIMEOUT, SSL_CONTEXT_SERVICE,
PROXY_CONFIGURATION_SERVICE, PROXY_HOST, PROXY_HOST_PORT, PROXY_USERNAME, PROXY_PASSWORD));
/**
* Dyamodb max item size limit 400 kb
*/
public static final int DYNAMODB_MAX_ITEM_SIZE = 400 * 1024;
@Override
protected List<PropertyDescriptor> getSupportedPropertyDescriptors() {
return properties;
}
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
List<FlowFile> flowFiles = session.get(context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger());
if (flowFiles == null || flowFiles.size() == 0) {
return;
}
Map<ItemKeys, FlowFile> keysToFlowFileMap = new HashMap<>();
final String table = context.getProperty(TABLE).evaluateAttributeExpressions().getValue();
final String hashKeyName = context.getProperty(HASH_KEY_NAME).evaluateAttributeExpressions().getValue();
final String hashKeyValueType = context.getProperty(HASH_KEY_VALUE_TYPE).getValue();
final String rangeKeyName = context.getProperty(RANGE_KEY_NAME).evaluateAttributeExpressions().getValue();
final String rangeKeyValueType = context.getProperty(RANGE_KEY_VALUE_TYPE).getValue();
final String jsonDocument = context.getProperty(JSON_DOCUMENT).evaluateAttributeExpressions().getValue();
final String charset = context.getProperty(DOCUMENT_CHARSET).evaluateAttributeExpressions().getValue();
TableWriteItems tableWriteItems = new TableWriteItems(table);
for (FlowFile flowFile : flowFiles) {
final Object hashKeyValue = getValue(context, HASH_KEY_VALUE_TYPE, HASH_KEY_VALUE, flowFile.getAttributes());
final Object rangeKeyValue = getValue(context, RANGE_KEY_VALUE_TYPE, RANGE_KEY_VALUE, flowFile.getAttributes());
if (!isHashKeyValueConsistent(hashKeyName, hashKeyValue, session, flowFile)) {
continue;
}
if (!isRangeKeyValueConsistent(rangeKeyName, rangeKeyValue, session, flowFile)) {
continue;
}
if (!isDataValid(flowFile, jsonDocument)) {
flowFile = session.putAttribute(flowFile, AWS_DYNAMO_DB_ITEM_SIZE_ERROR, "Max size of item + attribute should be 400kb but was " + flowFile.getSize() + jsonDocument.length());
session.transfer(flowFile, REL_FAILURE);
continue;
}
ByteArrayOutputStream baos = new ByteArrayOutputStream();
session.exportTo(flowFile, baos);
try {
if (rangeKeyValue == null || StringUtils.isBlank(rangeKeyValue.toString())) {
tableWriteItems.addItemToPut(new Item().withKeyComponent(hashKeyName, hashKeyValue)
.withJSON(jsonDocument, IOUtils.toString(baos.toByteArray(), charset)));
} else {
tableWriteItems.addItemToPut(new Item().withKeyComponent(hashKeyName, hashKeyValue)
.withKeyComponent(rangeKeyName, rangeKeyValue)
.withJSON(jsonDocument, IOUtils.toString(baos.toByteArray(), charset)));
}
} catch (IOException ioe) {
getLogger().error("IOException while creating put item : " + ioe.getMessage());
flowFile = session.putAttribute(flowFile, DYNAMODB_ITEM_IO_ERROR, ioe.getMessage());
session.transfer(flowFile, REL_FAILURE);
}
keysToFlowFileMap.put(new ItemKeys(hashKeyValue, rangeKeyValue), flowFile);
}
if (keysToFlowFileMap.isEmpty()) {
return;
}
final DynamoDB dynamoDB = getDynamoDB();
try {
BatchWriteItemOutcome outcome = dynamoDB.batchWriteItem(tableWriteItems);
handleUnprocessedItems(session, keysToFlowFileMap, table, hashKeyName, hashKeyValueType, rangeKeyName,
rangeKeyValueType, outcome);
// Handle any remaining flowfiles
for (FlowFile flowFile : keysToFlowFileMap.values()) {
getLogger().debug("Successful posted items to dynamodb : " + table);
session.transfer(flowFile, REL_SUCCESS);
}
} catch (AmazonServiceException exception) {
getLogger().error("Could not process flowFiles due to service exception : " + exception.getMessage());
List<FlowFile> failedFlowFiles = processServiceException(session, flowFiles, exception);
session.transfer(failedFlowFiles, REL_FAILURE);
} catch (AmazonClientException exception) {
getLogger().error("Could not process flowFiles due to client exception : " + exception.getMessage());
List<FlowFile> failedFlowFiles = processClientException(session, flowFiles, exception);
session.transfer(failedFlowFiles, REL_FAILURE);
} catch (Exception exception) {
getLogger().error("Could not process flowFiles due to exception : " + exception.getMessage());
List<FlowFile> failedFlowFiles = processException(session, flowFiles, exception);
session.transfer(failedFlowFiles, REL_FAILURE);
}
}
private boolean isDataValid(FlowFile flowFile, String jsonDocument) {
return (flowFile.getSize() + jsonDocument.length()) < DYNAMODB_MAX_ITEM_SIZE;
}
/**
* {@inheritDoc}
*/
protected Map<String, AttributeValue> getRequestItem(WriteRequest writeRequest) {
return writeRequest.getPutRequest().getItem();
}
}
|
package org.treblereel.gwt.elemental2.three;
import elemental2.core.JsArray;
import elemental2.core.JsObject;
import jsinterop.annotations.JsOverlay;
import jsinterop.annotations.JsPackage;
import jsinterop.annotations.JsType;
import jsinterop.base.Js;
@JsType(isNative = true, name = "THREE.ShapeGeometry", namespace = JsPackage.GLOBAL)
public class ShapeGeometry extends BufferGeometry {
@JsType(isNative = true, name = "?", namespace = JsPackage.GLOBAL)
public interface ConstructorShapesUnionType {
@JsOverlay
static ShapeGeometry.ConstructorShapesUnionType of(Object o) {
return Js.cast(o);
}
@JsOverlay
default JsArray<Shape> asJsArray() {
return Js.cast(this);
}
@JsOverlay
default Shape asShape() {
return Js.cast(this);
}
@JsOverlay
default boolean isJsArray() {
return (Object) this instanceof JsArray;
}
@JsOverlay
default boolean isShape() {
return (Object) this instanceof Shape;
}
}
public static native ShapeGeometry fromJSON(JsObject data);
@JsOverlay
public static final ShapeGeometry fromJSON(Object data) {
return fromJSON(Js.<JsObject>uncheckedCast(data));
}
public String type;
public ShapeGeometry() {}
public ShapeGeometry(ShapeGeometry.ConstructorShapesUnionType shapes, double curveSegments) {}
public ShapeGeometry(ShapeGeometry.ConstructorShapesUnionType shapes) {}
public ShapeGeometry(JsArray<Shape> shapes, double curveSegments) {}
public ShapeGeometry(JsArray<Shape> shapes) {}
public ShapeGeometry(Shape shapes, double curveSegments) {}
public ShapeGeometry(Shape[] shapes, double curveSegments) {}
public ShapeGeometry(Shape shapes) {}
public ShapeGeometry(Shape[] shapes) {}
}
|
package com.alphawallet.app.widget;
import android.content.Context;
import androidx.annotation.NonNull;
import android.view.LayoutInflater;
import android.widget.Button;
import android.widget.FrameLayout;
import android.widget.TextView;
import com.alphawallet.app.C;
import com.alphawallet.app.R;
import com.alphawallet.app.entity.CustomViewSettings;
import com.alphawallet.app.ui.HomeActivity;
public class EmptyTransactionsView extends FrameLayout {
public EmptyTransactionsView(@NonNull Context context, OnClickListener onClickListener) {
super(context);
LayoutInflater.from(getContext())
.inflate(R.layout.layout_empty_transactions, this, true);
/*findViewById(R.id.action_buy).setOnClickListener(onClickListener);
((TextView)findViewById(R.id.no_transactions_subtext)).setText(context.getString(R.string.no_recent_transactions_subtext,
CustomViewSettings.primaryNetworkName()));
Button buyButton = findViewById(R.id.action_buy);
if (CustomViewSettings.primaryNetworkName().equals(C.ETHEREUM_NETWORK_NAME))
{
buyButton.setVisibility(VISIBLE);
buyButton.setOnClickListener(((HomeActivity) context));
buyButton.setText(context.getString(R.string.action_buy, CustomViewSettings.primaryNetworkName()));
}
else
{
buyButton.setVisibility(GONE);
}*/
}
}
|
package top.kairuiyang.picture.util;
import top.kairuiyang.commons.entity.SystemConfig;
import top.kairuiyang.commons.feign.AdminFeignClient;
import top.kairuiyang.commons.feign.WebFeignClient;
import top.kairuiyang.picture.global.MessageConf;
import top.kairuiyang.picture.global.RedisConf;
import top.kairuiyang.picture.global.SysConf;
import top.kairuiyang.utils.JsonUtils;
import top.kairuiyang.utils.StringUtils;
import top.kairuiyang.base.enums.EOpenStatus;
import top.kairuiyang.base.exception.exceptionType.QueryException;
import top.kairuiyang.base.global.Constants;
import top.kairuiyang.base.global.ErrorCode;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.stereotype.Component;
import org.springframework.web.context.request.RequestContextHolder;
import org.springframework.web.context.request.ServletRequestAttributes;
import javax.servlet.http.HttpServletRequest;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* Feign操作工具类
*
* @author: 陌溪
* @create: 2020-02-29-15:39
*/
@Slf4j
@Component
public class FeignUtil {
@Autowired
AdminFeignClient adminFeignClient;
@Autowired
WebFeignClient webFeignClient;
@Autowired
private StringRedisTemplate stringRedisTemplate;
/**
* 通过Token获取系统配置【返回Map类型】
*
* @return
*/
public Map<String, String> getSystemConfigMap(String token) {
// 判断该token的有效性
String adminJsonResult = stringRedisTemplate.opsForValue().get(RedisConf.LOGIN_TOKEN_KEY + Constants.SYMBOL_COLON + token);
if (StringUtils.isEmpty(adminJsonResult)) {
throw new QueryException(ErrorCode.INVALID_TOKEN, MessageConf.INVALID_TOKEN);
}
// 从Redis中获取的SystemConf 或者 通过feign获取的
Map<String, String> resultMap = new HashMap<>();
//从Redis中获取内容
String jsonResult = stringRedisTemplate.opsForValue().get(RedisConf.SYSTEM_CONFIG);
// 判断Redis中是否有数据
if (StringUtils.isNotEmpty(jsonResult)) {
resultMap = (Map<String, String>) JsonUtils.jsonToMap(jsonResult, String.class);
} else {
// 通过feign获取系统配置
String resultStr = adminFeignClient.getSystemConfig();
Map<String, Object> resultTempMap = JsonUtils.jsonToMap(resultStr);
if (resultTempMap.get(SysConf.CODE) != null && SysConf.SUCCESS.equals(resultTempMap.get(SysConf.CODE).toString())) {
resultMap = (Map<String, String>) resultTempMap.get(SysConf.DATA);
//将从token存储到redis中,设置30分钟后过期
stringRedisTemplate.opsForValue().set(RedisConf.SYSTEM_CONFIG, JsonUtils.objectToJson(resultMap), 30, TimeUnit.MINUTES);
}
}
return resultMap;
}
/**
* 获取系统配置,不论是Admin端还是Web端
*
* @return
*/
public SystemConfig getSystemConfig() {
ServletRequestAttributes attribute = (ServletRequestAttributes) RequestContextHolder.getRequestAttributes();
HttpServletRequest request = attribute.getRequest();
// 后台携带的token
Object token = request.getAttribute(SysConf.TOKEN);
// 参数中携带的token
String paramsToken = request.getParameter(SysConf.TOKEN);
// 获取平台【web:门户,admin:管理端】
String platform = request.getParameter(SysConf.PLATFORM);
Map<String, String> systemConfigMap = new HashMap<>();
// 判断是否是web端发送过来的请求【后端发送过来的token长度为32】
if (SysConf.WEB.equals(platform) || (paramsToken != null && paramsToken.length() == Constants.THIRTY_TWO)) {
// 如果是调用web端获取配置的接口
systemConfigMap = this.getSystemConfigByWebToken(paramsToken);
} else {
// 调用admin端获取配置接口
if (token != null) {
// 判断是否是后台过来的请求
systemConfigMap = this.getSystemConfigMap(token.toString());
} else {
// 判断是否是通过params参数传递过来的
systemConfigMap = this.getSystemConfigMap(paramsToken);
}
}
if (systemConfigMap == null) {
log.error(MessageConf.PLEASE_SET_QI_NIU);
throw new QueryException(ErrorCode.PLEASE_SET_QI_NIU, MessageConf.PLEASE_SET_QI_NIU);
}
SystemConfig systemConfig = new SystemConfig();
if (systemConfigMap == null) {
throw new QueryException(ErrorCode.SYSTEM_CONFIG_NOT_EXIST, MessageConf.SYSTEM_CONFIG_NOT_EXIST);
} else {
String uploadQiNiu = systemConfigMap.get(SysConf.UPLOAD_QI_NIU);
String uploadLocal = systemConfigMap.get(SysConf.UPLOAD_LOCAL);
String localPictureBaseUrl = systemConfigMap.get(SysConf.LOCAL_PICTURE_BASE_URL);
String qiNiuPictureBaseUrl = systemConfigMap.get(SysConf.QI_NIU_PICTURE_BASE_URL);
String qiNiuAccessKey = systemConfigMap.get(SysConf.QI_NIU_ACCESS_KEY);
String qiNiuSecretKey = systemConfigMap.get(SysConf.QI_NIU_SECRET_KEY);
String qiNiuBucket = systemConfigMap.get(SysConf.QI_NIU_BUCKET);
String qiNiuArea = systemConfigMap.get(SysConf.QI_NIU_AREA);
String minioEndPoint = systemConfigMap.get(SysConf.MINIO_END_POINT);
String minioAccessKey = systemConfigMap.get(SysConf.MINIO_ACCESS_KEY);
String minioSecretKey = systemConfigMap.get(SysConf.MINIO_SECRET_KEY);
String minioBucket = systemConfigMap.get(SysConf.MINIO_BUCKET);
String uploadMinio = systemConfigMap.get(SysConf.UPLOAD_MINIO);
String minioPictureBaseUrl = systemConfigMap.get(SysConf.MINIO_PICTURE_BASE_URL);
// 判断七牛云参数是否存在异常
if (EOpenStatus.OPEN.equals(uploadQiNiu) && (StringUtils.isEmpty(qiNiuPictureBaseUrl) || StringUtils.isEmpty(qiNiuAccessKey)
|| StringUtils.isEmpty(qiNiuSecretKey) || StringUtils.isEmpty(qiNiuBucket) || StringUtils.isEmpty(qiNiuArea))) {
throw new QueryException(ErrorCode.PLEASE_SET_QI_NIU, MessageConf.PLEASE_SET_QI_NIU);
}
// 判断本地服务参数是否存在异常
if (EOpenStatus.OPEN.equals(uploadLocal) && StringUtils.isEmpty(localPictureBaseUrl)) {
throw new QueryException(ErrorCode.PLEASE_SET_LOCAL, MessageConf.PLEASE_SET_QI_NIU);
}
// 判断Minio服务是否存在异常
if (EOpenStatus.OPEN.equals(uploadMinio) && (StringUtils.isEmpty(minioEndPoint) || StringUtils.isEmpty(minioPictureBaseUrl) || StringUtils.isEmpty(minioAccessKey)
|| StringUtils.isEmpty(minioSecretKey) || StringUtils.isEmpty(minioBucket))) {
throw new QueryException(ErrorCode.PLEASE_SET_MINIO, MessageConf.PLEASE_SET_MINIO);
}
systemConfig.setQiNiuAccessKey(qiNiuAccessKey);
systemConfig.setQiNiuSecretKey(qiNiuSecretKey);
systemConfig.setQiNiuBucket(qiNiuBucket);
systemConfig.setQiNiuArea(qiNiuArea);
systemConfig.setUploadQiNiu(uploadQiNiu);
systemConfig.setUploadLocal(uploadLocal);
systemConfig.setPicturePriority(systemConfigMap.get(SysConf.PICTURE_PRIORITY));
systemConfig.setLocalPictureBaseUrl(systemConfigMap.get(SysConf.LOCAL_PICTURE_BASE_URL));
systemConfig.setQiNiuPictureBaseUrl(systemConfigMap.get(SysConf.QI_NIU_PICTURE_BASE_URL));
systemConfig.setMinioEndPoint(minioEndPoint);
systemConfig.setMinioAccessKey(minioAccessKey);
systemConfig.setMinioSecretKey(minioSecretKey);
systemConfig.setMinioBucket(minioBucket);
systemConfig.setMinioPictureBaseUrl(minioPictureBaseUrl);
systemConfig.setUploadMinio(uploadMinio);
}
return systemConfig;
}
/**
* 通过Token获取系统配置 【传入AdminToken】
*
* @param token
* @return
*/
public SystemConfig getSystemConfig(String token) {
Map<String, String> systemConfigMap = this.getSystemConfigMap(token);
SystemConfig systemConfig = new SystemConfig();
if (systemConfigMap == null) {
throw new QueryException(ErrorCode.SYSTEM_CONFIG_NOT_EXIST, MessageConf.SYSTEM_CONFIG_NOT_EXIST);
} else {
String uploadQiNiu = systemConfigMap.get(SysConf.UPLOAD_QI_NIU);
String uploadLocal = systemConfigMap.get(SysConf.UPLOAD_LOCAL);
String localPictureBaseUrl = systemConfigMap.get(SysConf.LOCAL_PICTURE_BASE_URL);
String qiNiuPictureBaseUrl = systemConfigMap.get(SysConf.QI_NIU_PICTURE_BASE_URL);
String qiNiuAccessKey = systemConfigMap.get(SysConf.QI_NIU_ACCESS_KEY);
String qiNiuSecretKey = systemConfigMap.get(SysConf.QI_NIU_SECRET_KEY);
String qiNiuBucket = systemConfigMap.get(SysConf.QI_NIU_BUCKET);
String qiNiuArea = systemConfigMap.get(SysConf.QI_NIU_AREA);
String minioEndPoint = systemConfigMap.get(SysConf.MINIO_END_POINT);
String minioAccessKey = systemConfigMap.get(SysConf.MINIO_ACCESS_KEY);
String minioSecretKey = systemConfigMap.get(SysConf.MINIO_SECRET_KEY);
String minioBucket = systemConfigMap.get(SysConf.MINIO_BUCKET);
String uploadMinio = systemConfigMap.get(SysConf.UPLOAD_MINIO);
String minioPictureBaseUrl = systemConfigMap.get(SysConf.MINIO_PICTURE_BASE_URL);
if (EOpenStatus.OPEN.equals(uploadQiNiu) && (StringUtils.isEmpty(qiNiuPictureBaseUrl) || StringUtils.isEmpty(qiNiuAccessKey)
|| StringUtils.isEmpty(qiNiuSecretKey) || StringUtils.isEmpty(qiNiuBucket) || StringUtils.isEmpty(qiNiuArea))) {
throw new QueryException(ErrorCode.PLEASE_SET_QI_NIU, MessageConf.PLEASE_SET_QI_NIU);
}
if (EOpenStatus.OPEN.equals(uploadLocal) && StringUtils.isEmpty(localPictureBaseUrl)) {
throw new QueryException(ErrorCode.PLEASE_SET_LOCAL, MessageConf.PLEASE_SET_QI_NIU);
}
// 判断Minio服务是否存在异常
if (EOpenStatus.OPEN.equals(uploadMinio) && (StringUtils.isEmpty(minioEndPoint) || StringUtils.isEmpty(minioPictureBaseUrl) || StringUtils.isEmpty(minioAccessKey)
|| StringUtils.isEmpty(minioSecretKey) || StringUtils.isEmpty(minioBucket))) {
throw new QueryException(ErrorCode.PLEASE_SET_MINIO, MessageConf.PLEASE_SET_MINIO);
}
systemConfig.setQiNiuAccessKey(qiNiuAccessKey);
systemConfig.setQiNiuSecretKey(qiNiuSecretKey);
systemConfig.setQiNiuBucket(qiNiuBucket);
systemConfig.setQiNiuArea(qiNiuArea);
systemConfig.setUploadQiNiu(uploadQiNiu);
systemConfig.setUploadLocal(uploadLocal);
systemConfig.setPicturePriority(systemConfigMap.get(SysConf.PICTURE_PRIORITY));
systemConfig.setLocalPictureBaseUrl(systemConfigMap.get(SysConf.LOCAL_PICTURE_BASE_URL));
systemConfig.setQiNiuPictureBaseUrl(systemConfigMap.get(SysConf.QI_NIU_PICTURE_BASE_URL));
systemConfig.setMinioEndPoint(minioEndPoint);
systemConfig.setMinioAccessKey(minioAccessKey);
systemConfig.setMinioSecretKey(minioSecretKey);
systemConfig.setMinioBucket(minioBucket);
systemConfig.setMinioPictureBaseUrl(minioPictureBaseUrl);
systemConfig.setUploadMinio(uploadMinio);
}
return systemConfig;
}
/**
* 从Map中获取系统配置
*
* @param systemConfigMap
* @return
*/
public SystemConfig getSystemConfigByMap(Map<String, String> systemConfigMap) {
SystemConfig systemConfig = new SystemConfig();
if (systemConfigMap == null) {
throw new QueryException(ErrorCode.SYSTEM_CONFIG_NOT_EXIST, MessageConf.SYSTEM_CONFIG_NOT_EXIST);
} else {
String uploadQiNiu = systemConfigMap.get(SysConf.UPLOAD_QI_NIU);
String uploadLocal = systemConfigMap.get(SysConf.UPLOAD_LOCAL);
String localPictureBaseUrl = systemConfigMap.get(SysConf.LOCAL_PICTURE_BASE_URL);
String qiNiuPictureBaseUrl = systemConfigMap.get(SysConf.QI_NIU_PICTURE_BASE_URL);
String qiNiuAccessKey = systemConfigMap.get(SysConf.QI_NIU_ACCESS_KEY);
String qiNiuSecretKey = systemConfigMap.get(SysConf.QI_NIU_SECRET_KEY);
String qiNiuBucket = systemConfigMap.get(SysConf.QI_NIU_BUCKET);
String qiNiuArea = systemConfigMap.get(SysConf.QI_NIU_AREA);
String picturePriority = systemConfigMap.get(SysConf.PICTURE_PRIORITY);
String contentPicturePriority = systemConfigMap.get(SysConf.CONTENT_PICTURE_PRIORITY);
String minioEndPoint = systemConfigMap.get(SysConf.MINIO_END_POINT);
String minioAccessKey = systemConfigMap.get(SysConf.MINIO_ACCESS_KEY);
String minioSecretKey = systemConfigMap.get(SysConf.MINIO_SECRET_KEY);
String minioBucket = systemConfigMap.get(SysConf.MINIO_BUCKET);
String uploadMinio = systemConfigMap.get(SysConf.UPLOAD_MINIO);
String minioPictureBaseUrl = systemConfigMap.get(SysConf.MINIO_PICTURE_BASE_URL);
if (EOpenStatus.OPEN.equals(uploadQiNiu) && (StringUtils.isEmpty(qiNiuPictureBaseUrl) || StringUtils.isEmpty(qiNiuAccessKey)
|| StringUtils.isEmpty(qiNiuSecretKey) || StringUtils.isEmpty(qiNiuBucket) || StringUtils.isEmpty(qiNiuArea))) {
throw new QueryException(ErrorCode.PLEASE_SET_QI_NIU, MessageConf.PLEASE_SET_QI_NIU);
}
if (EOpenStatus.OPEN.equals(uploadLocal) && StringUtils.isEmpty(localPictureBaseUrl)) {
throw new QueryException(ErrorCode.PLEASE_SET_LOCAL, MessageConf.PLEASE_SET_QI_NIU);
}
// 判断Minio服务是否存在异常
if (EOpenStatus.OPEN.equals(uploadMinio) && (StringUtils.isEmpty(minioEndPoint) || StringUtils.isEmpty(minioPictureBaseUrl) || StringUtils.isEmpty(minioAccessKey)
|| StringUtils.isEmpty(minioSecretKey) || StringUtils.isEmpty(minioBucket))) {
throw new QueryException(ErrorCode.PLEASE_SET_MINIO, MessageConf.PLEASE_SET_MINIO);
}
systemConfig.setQiNiuAccessKey(qiNiuAccessKey);
systemConfig.setQiNiuSecretKey(qiNiuSecretKey);
systemConfig.setQiNiuBucket(qiNiuBucket);
systemConfig.setQiNiuArea(qiNiuArea);
systemConfig.setUploadQiNiu(uploadQiNiu);
systemConfig.setUploadLocal(uploadLocal);
systemConfig.setPicturePriority(picturePriority);
systemConfig.setContentPicturePriority(contentPicturePriority);
systemConfig.setLocalPictureBaseUrl(localPictureBaseUrl);
systemConfig.setQiNiuPictureBaseUrl(qiNiuPictureBaseUrl);
systemConfig.setMinioEndPoint(minioEndPoint);
systemConfig.setMinioAccessKey(minioAccessKey);
systemConfig.setMinioSecretKey(minioSecretKey);
systemConfig.setMinioBucket(minioBucket);
systemConfig.setMinioPictureBaseUrl(minioPictureBaseUrl);
systemConfig.setUploadMinio(uploadMinio);
}
return systemConfig;
}
/**
* 通过Web端的token获取系统配置文件 【传入Admin端的token】
*
* @param token
* @return
*/
public Map<String, String> getSystemConfigByWebToken(String token) {
// 判断该token的有效性
String webUserJsonResult = stringRedisTemplate.opsForValue().get(RedisConf.USER_TOKEN + Constants.SYMBOL_COLON + token);
if (StringUtils.isEmpty(webUserJsonResult)) {
throw new QueryException(ErrorCode.INVALID_TOKEN, MessageConf.INVALID_TOKEN);
}
// 从Redis中获取的SystemConf 或者 通过feign获取的
Map<String, String> resultMap = new HashMap<>();
//从Redis中获取内容
String jsonResult = stringRedisTemplate.opsForValue().get(RedisConf.SYSTEM_CONFIG);
// 判断Redis中是否有数据
if (StringUtils.isNotEmpty(jsonResult)) {
resultMap = (Map<String, String>) JsonUtils.jsonToMap(jsonResult, String.class);
} else {
// 进行七牛云校验
String resultStr = webFeignClient.getSystemConfig(token);
Map<String, Object> resultTempMap = JsonUtils.jsonToMap(resultStr);
if (resultTempMap.get(SysConf.CODE) != null && SysConf.SUCCESS.equals(resultTempMap.get(SysConf.CODE).toString())) {
resultMap = (Map<String, String>) resultTempMap.get(SysConf.DATA);
//将从token存储到redis中,设置30分钟后过期
stringRedisTemplate.opsForValue().set(RedisConf.SYSTEM_CONFIG, JsonUtils.objectToJson(resultMap), 30, TimeUnit.MINUTES);
}
}
return resultMap;
}
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.compute.fluent;
import com.azure.core.annotation.BodyParam;
import com.azure.core.annotation.Delete;
import com.azure.core.annotation.ExpectedResponses;
import com.azure.core.annotation.Get;
import com.azure.core.annotation.Headers;
import com.azure.core.annotation.Host;
import com.azure.core.annotation.HostParam;
import com.azure.core.annotation.Patch;
import com.azure.core.annotation.PathParam;
import com.azure.core.annotation.Put;
import com.azure.core.annotation.QueryParam;
import com.azure.core.annotation.ReturnType;
import com.azure.core.annotation.ServiceInterface;
import com.azure.core.annotation.ServiceMethod;
import com.azure.core.annotation.UnexpectedResponseExceptionType;
import com.azure.core.http.rest.PagedFlux;
import com.azure.core.http.rest.PagedIterable;
import com.azure.core.http.rest.PagedResponse;
import com.azure.core.http.rest.PagedResponseBase;
import com.azure.core.http.rest.Response;
import com.azure.core.http.rest.RestProxy;
import com.azure.core.management.exception.ManagementException;
import com.azure.core.util.Context;
import com.azure.core.util.FluxUtil;
import com.azure.core.util.logging.ClientLogger;
import com.azure.resourcemanager.compute.ComputeManagementClient;
import com.azure.resourcemanager.compute.fluent.inner.AvailabilitySetInner;
import com.azure.resourcemanager.compute.fluent.inner.AvailabilitySetListResultInner;
import com.azure.resourcemanager.compute.fluent.inner.VirtualMachineSizeInner;
import com.azure.resourcemanager.compute.fluent.inner.VirtualMachineSizeListResultInner;
import com.azure.resourcemanager.compute.models.AvailabilitySetUpdate;
import com.azure.resourcemanager.resources.fluentcore.collection.InnerSupportsDelete;
import com.azure.resourcemanager.resources.fluentcore.collection.InnerSupportsGet;
import com.azure.resourcemanager.resources.fluentcore.collection.InnerSupportsListing;
import reactor.core.publisher.Mono;
/** An instance of this class provides access to all the operations defined in AvailabilitySets. */
public final class AvailabilitySetsClient
implements InnerSupportsGet<AvailabilitySetInner>,
InnerSupportsListing<AvailabilitySetInner>,
InnerSupportsDelete<Void> {
private final ClientLogger logger = new ClientLogger(AvailabilitySetsClient.class);
/** The proxy service used to perform REST calls. */
private final AvailabilitySetsService service;
/** The service client containing this operation class. */
private final ComputeManagementClient client;
/**
* Initializes an instance of AvailabilitySetsClient.
*
* @param client the instance of the service client containing this operation class.
*/
public AvailabilitySetsClient(ComputeManagementClient client) {
this.service =
RestProxy.create(AvailabilitySetsService.class, client.getHttpPipeline(), client.getSerializerAdapter());
this.client = client;
}
/**
* The interface defining all the services for ComputeManagementClientAvailabilitySets to be used by the proxy
* service to perform REST calls.
*/
@Host("{$host}")
@ServiceInterface(name = "ComputeManagementCli")
private interface AvailabilitySetsService {
@Headers({"Accept: application/json", "Content-Type: application/json"})
@Put(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute"
+ "/availabilitySets/{availabilitySetName}")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<AvailabilitySetInner>> createOrUpdate(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("availabilitySetName") String availabilitySetName,
@QueryParam("api-version") String apiVersion,
@PathParam("subscriptionId") String subscriptionId,
@BodyParam("application/json") AvailabilitySetInner parameters,
Context context);
@Headers({"Accept: application/json", "Content-Type: application/json"})
@Patch(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute"
+ "/availabilitySets/{availabilitySetName}")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<AvailabilitySetInner>> update(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("availabilitySetName") String availabilitySetName,
@QueryParam("api-version") String apiVersion,
@PathParam("subscriptionId") String subscriptionId,
@BodyParam("application/json") AvailabilitySetUpdate parameters,
Context context);
@Headers({"Accept: application/json;q=0.9", "Content-Type: application/json"})
@Delete(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute"
+ "/availabilitySets/{availabilitySetName}")
@ExpectedResponses({200, 204})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<Void>> delete(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("availabilitySetName") String availabilitySetName,
@QueryParam("api-version") String apiVersion,
@PathParam("subscriptionId") String subscriptionId,
Context context);
@Headers({"Accept: application/json", "Content-Type: application/json"})
@Get(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute"
+ "/availabilitySets/{availabilitySetName}")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<AvailabilitySetInner>> getByResourceGroup(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("availabilitySetName") String availabilitySetName,
@QueryParam("api-version") String apiVersion,
@PathParam("subscriptionId") String subscriptionId,
Context context);
@Headers({"Accept: application/json", "Content-Type: application/json"})
@Get("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<AvailabilitySetListResultInner>> list(
@HostParam("$host") String endpoint,
@QueryParam("api-version") String apiVersion,
@PathParam("subscriptionId") String subscriptionId,
@QueryParam("$expand") String expand,
Context context);
@Headers({"Accept: application/json", "Content-Type: application/json"})
@Get(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute"
+ "/availabilitySets")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<AvailabilitySetListResultInner>> listByResourceGroup(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@QueryParam("api-version") String apiVersion,
@PathParam("subscriptionId") String subscriptionId,
Context context);
@Headers({"Accept: application/json", "Content-Type: application/json"})
@Get(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute"
+ "/availabilitySets/{availabilitySetName}/vmSizes")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<VirtualMachineSizeListResultInner>> listAvailableSizes(
@HostParam("$host") String endpoint,
@PathParam("resourceGroupName") String resourceGroupName,
@PathParam("availabilitySetName") String availabilitySetName,
@QueryParam("api-version") String apiVersion,
@PathParam("subscriptionId") String subscriptionId,
Context context);
@Headers({"Accept: application/json", "Content-Type: application/json"})
@Get("{nextLink}")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<AvailabilitySetListResultInner>> listBySubscriptionNext(
@PathParam(value = "nextLink", encoded = true) String nextLink, Context context);
@Headers({"Accept: application/json", "Content-Type: application/json"})
@Get("{nextLink}")
@ExpectedResponses({200})
@UnexpectedResponseExceptionType(ManagementException.class)
Mono<Response<AvailabilitySetListResultInner>> listNext(
@PathParam(value = "nextLink", encoded = true) String nextLink, Context context);
}
/**
* Create or update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Virtual machines specified in the same availability set are allocated to different nodes to maximize
* availability. For more information about availability sets, see [Manage the availability of virtual
* machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
* <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual
* machines in
* Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
* <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM
* cannot be added to an availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AvailabilitySetInner>> createOrUpdateWithResponseAsync(
String resourceGroupName, String availabilitySetName, AvailabilitySetInner parameters) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
if (parameters == null) {
return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null."));
} else {
parameters.validate();
}
final String apiVersion = "2019-12-01";
return FluxUtil
.withContext(
context ->
service
.createOrUpdate(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
parameters,
context))
.subscriberContext(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext())));
}
/**
* Create or update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Virtual machines specified in the same availability set are allocated to different nodes to maximize
* availability. For more information about availability sets, see [Manage the availability of virtual
* machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
* <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual
* machines in
* Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
* <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM
* cannot be added to an availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AvailabilitySetInner>> createOrUpdateWithResponseAsync(
String resourceGroupName, String availabilitySetName, AvailabilitySetInner parameters, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
if (parameters == null) {
return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null."));
} else {
parameters.validate();
}
final String apiVersion = "2019-12-01";
context = this.client.mergeContext(context);
return service
.createOrUpdate(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
parameters,
context);
}
/**
* Create or update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Virtual machines specified in the same availability set are allocated to different nodes to maximize
* availability. For more information about availability sets, see [Manage the availability of virtual
* machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
* <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual
* machines in
* Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
* <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM
* cannot be added to an availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AvailabilitySetInner> createOrUpdateAsync(
String resourceGroupName, String availabilitySetName, AvailabilitySetInner parameters) {
return createOrUpdateWithResponseAsync(resourceGroupName, availabilitySetName, parameters)
.flatMap(
(Response<AvailabilitySetInner> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Create or update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Virtual machines specified in the same availability set are allocated to different nodes to maximize
* availability. For more information about availability sets, see [Manage the availability of virtual
* machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
* <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual
* machines in
* Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
* <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM
* cannot be added to an availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AvailabilitySetInner> createOrUpdateAsync(
String resourceGroupName, String availabilitySetName, AvailabilitySetInner parameters, Context context) {
return createOrUpdateWithResponseAsync(resourceGroupName, availabilitySetName, parameters, context)
.flatMap(
(Response<AvailabilitySetInner> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Create or update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Virtual machines specified in the same availability set are allocated to different nodes to maximize
* availability. For more information about availability sets, see [Manage the availability of virtual
* machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
* <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual
* machines in
* Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
* <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM
* cannot be added to an availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AvailabilitySetInner createOrUpdate(
String resourceGroupName, String availabilitySetName, AvailabilitySetInner parameters) {
return createOrUpdateAsync(resourceGroupName, availabilitySetName, parameters).block();
}
/**
* Create or update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Virtual machines specified in the same availability set are allocated to different nodes to maximize
* availability. For more information about availability sets, see [Manage the availability of virtual
* machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
* <br><br> For more information on Azure planned maintenance, see [Planned maintenance for virtual
* machines in
* Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)
* <br><br> Currently, a VM can only be added to availability set at creation time. An existing VM
* cannot be added to an availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AvailabilitySetInner createOrUpdate(
String resourceGroupName, String availabilitySetName, AvailabilitySetInner parameters, Context context) {
return createOrUpdateAsync(resourceGroupName, availabilitySetName, parameters, context).block();
}
/**
* Update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Only tags may be updated.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AvailabilitySetInner>> updateWithResponseAsync(
String resourceGroupName, String availabilitySetName, AvailabilitySetUpdate parameters) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
if (parameters == null) {
return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null."));
} else {
parameters.validate();
}
final String apiVersion = "2019-12-01";
return FluxUtil
.withContext(
context ->
service
.update(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
parameters,
context))
.subscriberContext(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext())));
}
/**
* Update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Only tags may be updated.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AvailabilitySetInner>> updateWithResponseAsync(
String resourceGroupName, String availabilitySetName, AvailabilitySetUpdate parameters, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
if (parameters == null) {
return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null."));
} else {
parameters.validate();
}
final String apiVersion = "2019-12-01";
context = this.client.mergeContext(context);
return service
.update(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
parameters,
context);
}
/**
* Update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Only tags may be updated.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AvailabilitySetInner> updateAsync(
String resourceGroupName, String availabilitySetName, AvailabilitySetUpdate parameters) {
return updateWithResponseAsync(resourceGroupName, availabilitySetName, parameters)
.flatMap(
(Response<AvailabilitySetInner> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Only tags may be updated.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AvailabilitySetInner> updateAsync(
String resourceGroupName, String availabilitySetName, AvailabilitySetUpdate parameters, Context context) {
return updateWithResponseAsync(resourceGroupName, availabilitySetName, parameters, context)
.flatMap(
(Response<AvailabilitySetInner> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Only tags may be updated.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AvailabilitySetInner update(
String resourceGroupName, String availabilitySetName, AvailabilitySetUpdate parameters) {
return updateAsync(resourceGroupName, availabilitySetName, parameters).block();
}
/**
* Update an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param parameters Specifies information about the availability set that the virtual machine should be assigned
* to. Only tags may be updated.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AvailabilitySetInner update(
String resourceGroupName, String availabilitySetName, AvailabilitySetUpdate parameters, Context context) {
return updateAsync(resourceGroupName, availabilitySetName, parameters, context).block();
}
/**
* Delete an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteWithResponseAsync(String resourceGroupName, String availabilitySetName) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
return FluxUtil
.withContext(
context ->
service
.delete(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
context))
.subscriberContext(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext())));
}
/**
* Delete an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<Void>> deleteWithResponseAsync(
String resourceGroupName, String availabilitySetName, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
context = this.client.mergeContext(context);
return service
.delete(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
context);
}
/**
* Delete an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteAsync(String resourceGroupName, String availabilitySetName) {
return deleteWithResponseAsync(resourceGroupName, availabilitySetName)
.flatMap((Response<Void> res) -> Mono.empty());
}
/**
* Delete an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the completion.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Void> deleteAsync(String resourceGroupName, String availabilitySetName, Context context) {
return deleteWithResponseAsync(resourceGroupName, availabilitySetName, context)
.flatMap((Response<Void> res) -> Mono.empty());
}
/**
* Delete an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete(String resourceGroupName, String availabilitySetName) {
deleteAsync(resourceGroupName, availabilitySetName).block();
}
/**
* Delete an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public void delete(String resourceGroupName, String availabilitySetName, Context context) {
deleteAsync(resourceGroupName, availabilitySetName, context).block();
}
/**
* Retrieves information about an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AvailabilitySetInner>> getByResourceGroupWithResponseAsync(
String resourceGroupName, String availabilitySetName) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
return FluxUtil
.withContext(
context ->
service
.getByResourceGroup(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
context))
.subscriberContext(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext())));
}
/**
* Retrieves information about an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<Response<AvailabilitySetInner>> getByResourceGroupWithResponseAsync(
String resourceGroupName, String availabilitySetName, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
context = this.client.mergeContext(context);
return service
.getByResourceGroup(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
context);
}
/**
* Retrieves information about an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AvailabilitySetInner> getByResourceGroupAsync(String resourceGroupName, String availabilitySetName) {
return getByResourceGroupWithResponseAsync(resourceGroupName, availabilitySetName)
.flatMap(
(Response<AvailabilitySetInner> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Retrieves information about an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<AvailabilitySetInner> getByResourceGroupAsync(
String resourceGroupName, String availabilitySetName, Context context) {
return getByResourceGroupWithResponseAsync(resourceGroupName, availabilitySetName, context)
.flatMap(
(Response<AvailabilitySetInner> res) -> {
if (res.getValue() != null) {
return Mono.just(res.getValue());
} else {
return Mono.empty();
}
});
}
/**
* Retrieves information about an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AvailabilitySetInner getByResourceGroup(String resourceGroupName, String availabilitySetName) {
return getByResourceGroupAsync(resourceGroupName, availabilitySetName).block();
}
/**
* Retrieves information about an availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return specifies information about the availability set that the virtual machine should be assigned to.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public AvailabilitySetInner getByResourceGroup(
String resourceGroupName, String availabilitySetName, Context context) {
return getByResourceGroupAsync(resourceGroupName, availabilitySetName, context).block();
}
/**
* Lists all availability sets in a subscription.
*
* @param expand The expand expression to apply to the operation. Allowed values are 'instanceView'.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<AvailabilitySetInner>> listSinglePageAsync(String expand) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
return FluxUtil
.withContext(
context ->
service
.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), expand, context))
.<PagedResponse<AvailabilitySetInner>>map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null))
.subscriberContext(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext())));
}
/**
* Lists all availability sets in a subscription.
*
* @param expand The expand expression to apply to the operation. Allowed values are 'instanceView'.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<AvailabilitySetInner>> listSinglePageAsync(String expand, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
context = this.client.mergeContext(context);
return service
.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), expand, context)
.map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null));
}
/**
* Lists all availability sets in a subscription.
*
* @param expand The expand expression to apply to the operation. Allowed values are 'instanceView'.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<AvailabilitySetInner> listAsync(String expand) {
return new PagedFlux<>(
() -> listSinglePageAsync(expand), nextLink -> listBySubscriptionNextSinglePageAsync(nextLink));
}
/**
* Lists all availability sets in a subscription.
*
* @param expand The expand expression to apply to the operation. Allowed values are 'instanceView'.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<AvailabilitySetInner> listAsync(String expand, Context context) {
return new PagedFlux<>(
() -> listSinglePageAsync(expand, context),
nextLink -> listBySubscriptionNextSinglePageAsync(nextLink, context));
}
/**
* Lists all availability sets in a subscription.
*
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<AvailabilitySetInner> listAsync() {
final String expand = null;
final Context context = null;
return new PagedFlux<>(
() -> listSinglePageAsync(expand), nextLink -> listBySubscriptionNextSinglePageAsync(nextLink, context));
}
/**
* Lists all availability sets in a subscription.
*
* @param expand The expand expression to apply to the operation. Allowed values are 'instanceView'.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<AvailabilitySetInner> list(String expand) {
return new PagedIterable<>(listAsync(expand));
}
/**
* Lists all availability sets in a subscription.
*
* @param expand The expand expression to apply to the operation. Allowed values are 'instanceView'.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<AvailabilitySetInner> list(String expand, Context context) {
return new PagedIterable<>(listAsync(expand, context));
}
/**
* Lists all availability sets in a subscription.
*
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<AvailabilitySetInner> list() {
final String expand = null;
final Context context = null;
return new PagedIterable<>(listAsync(expand));
}
/**
* Lists all availability sets in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<AvailabilitySetInner>> listByResourceGroupSinglePageAsync(String resourceGroupName) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
return FluxUtil
.withContext(
context ->
service
.listByResourceGroup(
this.client.getEndpoint(),
resourceGroupName,
apiVersion,
this.client.getSubscriptionId(),
context))
.<PagedResponse<AvailabilitySetInner>>map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null))
.subscriberContext(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext())));
}
/**
* Lists all availability sets in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<AvailabilitySetInner>> listByResourceGroupSinglePageAsync(
String resourceGroupName, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
context = this.client.mergeContext(context);
return service
.listByResourceGroup(
this.client.getEndpoint(), resourceGroupName, apiVersion, this.client.getSubscriptionId(), context)
.map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null));
}
/**
* Lists all availability sets in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<AvailabilitySetInner> listByResourceGroupAsync(String resourceGroupName) {
return new PagedFlux<>(
() -> listByResourceGroupSinglePageAsync(resourceGroupName), nextLink -> listNextSinglePageAsync(nextLink));
}
/**
* Lists all availability sets in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<AvailabilitySetInner> listByResourceGroupAsync(String resourceGroupName, Context context) {
return new PagedFlux<>(
() -> listByResourceGroupSinglePageAsync(resourceGroupName, context),
nextLink -> listNextSinglePageAsync(nextLink, context));
}
/**
* Lists all availability sets in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<AvailabilitySetInner> listByResourceGroup(String resourceGroupName) {
return new PagedIterable<>(listByResourceGroupAsync(resourceGroupName));
}
/**
* Lists all availability sets in a resource group.
*
* @param resourceGroupName The name of the resource group.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<AvailabilitySetInner> listByResourceGroup(String resourceGroupName, Context context) {
return new PagedIterable<>(listByResourceGroupAsync(resourceGroupName, context));
}
/**
* Lists all available virtual machine sizes that can be used to create a new virtual machine in an existing
* availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Virtual Machine operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<VirtualMachineSizeInner>> listAvailableSizesSinglePageAsync(
String resourceGroupName, String availabilitySetName) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
return FluxUtil
.withContext(
context ->
service
.listAvailableSizes(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
context))
.<PagedResponse<VirtualMachineSizeInner>>map(
res ->
new PagedResponseBase<>(
res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), null, null))
.subscriberContext(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext())));
}
/**
* Lists all available virtual machine sizes that can be used to create a new virtual machine in an existing
* availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Virtual Machine operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<VirtualMachineSizeInner>> listAvailableSizesSinglePageAsync(
String resourceGroupName, String availabilitySetName, Context context) {
if (this.client.getEndpoint() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getEndpoint() is required and cannot be null."));
}
if (resourceGroupName == null) {
return Mono
.error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."));
}
if (availabilitySetName == null) {
return Mono
.error(new IllegalArgumentException("Parameter availabilitySetName is required and cannot be null."));
}
if (this.client.getSubscriptionId() == null) {
return Mono
.error(
new IllegalArgumentException(
"Parameter this.client.getSubscriptionId() is required and cannot be null."));
}
final String apiVersion = "2019-12-01";
context = this.client.mergeContext(context);
return service
.listAvailableSizes(
this.client.getEndpoint(),
resourceGroupName,
availabilitySetName,
apiVersion,
this.client.getSubscriptionId(),
context)
.map(
res ->
new PagedResponseBase<>(
res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), null, null));
}
/**
* Lists all available virtual machine sizes that can be used to create a new virtual machine in an existing
* availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Virtual Machine operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<VirtualMachineSizeInner> listAvailableSizesAsync(
String resourceGroupName, String availabilitySetName) {
return new PagedFlux<>(() -> listAvailableSizesSinglePageAsync(resourceGroupName, availabilitySetName));
}
/**
* Lists all available virtual machine sizes that can be used to create a new virtual machine in an existing
* availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Virtual Machine operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedFlux<VirtualMachineSizeInner> listAvailableSizesAsync(
String resourceGroupName, String availabilitySetName, Context context) {
return new PagedFlux<>(
() -> listAvailableSizesSinglePageAsync(resourceGroupName, availabilitySetName, context));
}
/**
* Lists all available virtual machine sizes that can be used to create a new virtual machine in an existing
* availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Virtual Machine operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<VirtualMachineSizeInner> listAvailableSizes(
String resourceGroupName, String availabilitySetName) {
return new PagedIterable<>(listAvailableSizesAsync(resourceGroupName, availabilitySetName));
}
/**
* Lists all available virtual machine sizes that can be used to create a new virtual machine in an existing
* availability set.
*
* @param resourceGroupName The name of the resource group.
* @param availabilitySetName The name of the availability set.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Virtual Machine operation response.
*/
@ServiceMethod(returns = ReturnType.COLLECTION)
public PagedIterable<VirtualMachineSizeInner> listAvailableSizes(
String resourceGroupName, String availabilitySetName, Context context) {
return new PagedIterable<>(listAvailableSizesAsync(resourceGroupName, availabilitySetName, context));
}
/**
* Get the next page of items.
*
* @param nextLink The nextLink parameter.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<AvailabilitySetInner>> listBySubscriptionNextSinglePageAsync(String nextLink) {
if (nextLink == null) {
return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null."));
}
return FluxUtil
.withContext(context -> service.listBySubscriptionNext(nextLink, context))
.<PagedResponse<AvailabilitySetInner>>map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null))
.subscriberContext(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext())));
}
/**
* Get the next page of items.
*
* @param nextLink The nextLink parameter.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<AvailabilitySetInner>> listBySubscriptionNextSinglePageAsync(
String nextLink, Context context) {
if (nextLink == null) {
return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null."));
}
context = this.client.mergeContext(context);
return service
.listBySubscriptionNext(nextLink, context)
.map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null));
}
/**
* Get the next page of items.
*
* @param nextLink The nextLink parameter.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<AvailabilitySetInner>> listNextSinglePageAsync(String nextLink) {
if (nextLink == null) {
return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null."));
}
return FluxUtil
.withContext(context -> service.listNext(nextLink, context))
.<PagedResponse<AvailabilitySetInner>>map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null))
.subscriberContext(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext())));
}
/**
* Get the next page of items.
*
* @param nextLink The nextLink parameter.
* @param context The context to associate with this operation.
* @throws IllegalArgumentException thrown if parameters fail the validation.
* @throws ManagementException thrown if the request is rejected by server.
* @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.
* @return the List Availability Set operation response.
*/
@ServiceMethod(returns = ReturnType.SINGLE)
public Mono<PagedResponse<AvailabilitySetInner>> listNextSinglePageAsync(String nextLink, Context context) {
if (nextLink == null) {
return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null."));
}
context = this.client.mergeContext(context);
return service
.listNext(nextLink, context)
.map(
res ->
new PagedResponseBase<>(
res.getRequest(),
res.getStatusCode(),
res.getHeaders(),
res.getValue().value(),
res.getValue().nextLink(),
null));
}
}
|
package techreborn.compat.minetweaker;
import minetweaker.MineTweakerAPI;
import minetweaker.api.item.IIngredient;
import minetweaker.api.item.IItemStack;
import net.minecraft.item.ItemStack;
import stanhebben.zenscript.annotations.ZenClass;
import stanhebben.zenscript.annotations.ZenMethod;
import techreborn.api.recipe.machines.ImplosionCompressorRecipe;
import techreborn.lib.Reference;
@ZenClass("mods.techreborn.implosionCompressor")
public class MTImplosionCompressor extends MTGeneric {
@ZenMethod
public static void addRecipe(IItemStack output1, IItemStack output2, IIngredient input1, IIngredient input2, int ticktime, int euTick) {
ItemStack oInput1 = (ItemStack) MinetweakerCompat.toObject(input1);
ItemStack oInput2 = (ItemStack) MinetweakerCompat.toObject(input2);
ImplosionCompressorRecipe r = new ImplosionCompressorRecipe(oInput1, oInput2, MinetweakerCompat.toStack(output1), MinetweakerCompat.toStack(output2), ticktime, euTick);
addRecipe(r);
}
@ZenMethod
public static void addRecipe(IItemStack output1, IItemStack output2, IIngredient input1, IIngredient input2, int ticktime, int euTick, boolean useOredict) {
ItemStack oInput1 = (ItemStack) MinetweakerCompat.toObject(input1);
ItemStack oInput2 = (ItemStack) MinetweakerCompat.toObject(input2);
ImplosionCompressorRecipe r = new ImplosionCompressorRecipe(oInput1, oInput2, MinetweakerCompat.toStack(output1), MinetweakerCompat.toStack(output2), ticktime, euTick, useOredict);
addRecipe(r);
}
@ZenMethod
public static void removeInputRecipe(IIngredient iIngredient) {
MineTweakerAPI.apply(new RemoveInput(iIngredient, getMachineName()));
}
@ZenMethod
public static void removeRecipe(IItemStack output) {
MineTweakerAPI.apply(new Remove(MinetweakerCompat.toStack(output), getMachineName()));
}
public static String getMachineName() {
return Reference.implosionCompressorRecipe;
}
}
|
package suppliers;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import java.util.HashMap;
import java.util.Map;
public class MoodleLoginSupplier implements LoginSupplier {
@Override
public Map<String, String> login(String url, String username, String password) throws Exception {
Map<String, String> result = new HashMap<>();
MoodleWSConnection moodleWS = new MoodleWSConnection();
// Gets the user's token (as a response)
String tokenResponse = moodleWS.getTokenResponse(url, username, password);
// Check if the response contains the user's token:
JsonObject jsonResponse = new JsonParser().parse(tokenResponse).getAsJsonObject();
if (jsonResponse.has("errorcode")) {
// An error happened, throw that error back
String error = jsonResponse.get("errorcode").toString();
throw new Exception(error);
}
// Everything went correctly
String token = jsonResponse.get("token").getAsString();
result.put("token", token);
// Add the userid to the response
JsonObject infoJson = moodleWS.getMoodleInfo(url, token);
result.put("userid", infoJson.get("userid").getAsString());
result.put("sitename", infoJson.get("sitename").getAsString());
System.out.println(result);
// Return the response + userid
return result;
}
}
|
/*
* Copyright (c) 2010-2011 Brigham Young University
*
* This file is part of the BYU RapidSmith Tools.
*
* BYU RapidSmith Tools is free software: you may redistribute it
* and/or modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, either version 2 of
* the License, or (at your option) any later version.
*
* BYU RapidSmith Tools is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* A copy of the GNU General Public License is included with the BYU
* RapidSmith Tools. It can be found at doc/gpl2.txt. You may also
* get a copy of the license at <http://www.gnu.org/licenses/>.
*
*/
package edu.byu.ece.rapidSmith.bitstreamTools.configurationSpecification;
import java.io.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
/**
* This class generates Java source files that describe Xilinx device specifications
* for the virtex5 architecture.
*/
public class XilinxV5DeviceClassGenerator extends XilinxDeviceClassGenerator {
protected static final String kFilePrefix = "o7aTOwq3";
/**
* Construct a new class pl.edu.pwr.iiar.tywin.tywin.pl.edu.pwr.iiar.zak.thermalKit.ThermalDesign and collect all of the data needed
* to write Java source code for a part library. Data is collected
* using the Xilinx partgen, xdl, and bitgen tools.
*/
public XilinxV5DeviceClassGenerator() {
super("virtex5", V56ConfigurationSpecification.V56_ROW_MASK, V56ConfigurationSpecification.V56_ROW_BIT_POS, V56ConfigurationSpecification.V56_TOP_BOTTOM_MASK, V56ConfigurationSpecification.V56_TOP_BOTTOM_BIT_POS);
}
/**
* Create the Java source code for a part library of the architecture associated
* with the class pl.edu.pwr.iiar.tywin.tywin.pl.edu.pwr.iiar.zak.thermalKit.ThermalDesign instance.
*/
public void createJavaSourceFile() {
String famNumber = Character.toString(_architecture.charAt(_architecture.length() - 1));
String namePrefix = "V" + famNumber;
String fileName = namePrefix + "PartLibrary.java";
try {
BufferedWriter buf = new BufferedWriter(new FileWriter(fileName));
createFileHeader(buf);
buf.write("package edu.byu.ece.bitstreamTools.configurationSpecification;\n\n");
buf.write("import java.util.ArrayList;\nimport java.util.Arrays;\n\n");
buf.write("public class " + namePrefix + "PartLibrary extends PartLibrary {\n\n");
// constructor
buf.write("\tpublic " + namePrefix + "PartLibrary() {\n");
buf.write("\t\tsuper();\n");
buf.write("\t}\n\n");
// add parts method
buf.write("\tprotected void addParts() {\n");
for (String currPartName : _partNames) {
buf.write("\t\taddPart(new " + currPartName.toUpperCase() + "());\n");
}
buf.write("\t}\n\n");
// define parts
int i = 0;
for (String currPartName : _partNames) {
buf.write("\tclass " + currPartName.toUpperCase() + " extends V" + famNumber + "ConfigurationSpecification {\n\n");
buf.write("\t\tpublic " + currPartName.toUpperCase() + "() {\n");
buf.write("\t\t\tsuper();\n");
buf.write("\t\t\t_deviceName = \"" + currPartName.toUpperCase() + "\";\n");
buf.write("\t\t\t_deviceIDCode = \"" + _deviceIDCodes.get(i) + "\";\n");
// Write out Packages
buf.write("\t\t\t_validPackages = new String[] {");
for (String pkg : _validPackages.get(i)) {
buf.write("\"" + pkg + "\", ");
}
buf.write("};\n");
// Write out Speed Grades
buf.write("\t\t\t_validSpeedGrades = new String[] {");
for (String speeds : _validSpeedGrades.get(i)) {
buf.write("\"" + speeds + "\", ");
}
buf.write("};\n");
buf.write("\t\t\t_topRows = " + _numTopRows.get(i) + ";\n");
buf.write("\t\t\t_bottomRows = " + _numBottomRows.get(i) + ";\n");
buf.write("\t\t\t_blockTypeLayouts = new ArrayList<BlockTypeInstance>(Arrays.asList(new BlockTypeInstance[] {\n");
buf.write("\t\t\t\t\tnew BlockTypeInstance(LOGIC_INTERCONNECT_BLOCKTYPE, new BlockSubType[] {\n\t\t\t\t\t\t");
for (BlockSubType blk : _logicLayouts.get(i)) {
buf.write(blk + ", ");
}
buf.write("\n\t\t\t\t\t}),\n");
buf.write("\t\t\t\t\tnew BlockTypeInstance(BRAM_CONTENT_BLOCKTYPE, new BlockSubType[] {\n\t\t\t\t\t\t");
for (BlockSubType blk : _bramContentLayouts.get(i)) {
buf.write(blk + ", ");
}
buf.write("\n\t\t\t\t\t}),\n");
buf.write("\t\t\t}));\n");
buf.write("\t\t\t_overallColumnLayout = _blockTypeLayouts.get(0).getColumnLayout();\n");
buf.write("\t\t}\n");
buf.write("\t}\n\n");
i++;
}
buf.write("}\n");
buf.flush();
} catch (IOException e) {
e.printStackTrace();
System.err.println("Error writing file: " + fileName);
System.exit(1);
}
}
protected void getPartColumnTypes() {
BufferedReader in;
String line;
String[] tokens;
for (String partName : _partNames) {
System.out.println("Generating/parsing .xdlrc for " + partName);
// Generate XDLRC first
generateBriefXDLRCFile(partName, _xdlrcFile);
//int currNumRows = 0;
List<BlockSubType> currLogicLayout = new ArrayList<BlockSubType>();
List<BlockSubType> currBramContentLayout = new ArrayList<BlockSubType>();
Map<Integer, BlockSubType> columnMap = new TreeMap<Integer, BlockSubType>();
try {
in = new BufferedReader(new FileReader(_xdlrcFile));
line = in.readLine();
while (line != null) {
tokens = line.split("\\s+");
if (tokens.length > 1 && tokens[1].equals("(tile")) {
if (tokens[1].equals("(tile")) {
//currNumRows = Integer.parseInt(tokens[2]) / 44;
}
String name = tokens[5];
int column = Integer.parseInt(tokens[3]);
if (name.equals("LIOB") || name.equals("RIOB") || name.equals("CIOB")) {
columnMap.put(column, V5ConfigurationSpecification.IOB);
} else if (name.equals("CLBLM") || name.equals("CLBLL")) {
columnMap.put(column, V5ConfigurationSpecification.CLB);
} else if (name.equals("DSP")) {
columnMap.put(column, V5ConfigurationSpecification.DSP);
} else if (name.equals("CLKV")) {
columnMap.put(column, V5ConfigurationSpecification.CLK);
} else if (name.equals("GTX") || name.equals("GTX_L_TERM_INT")) {
columnMap.put(column, V5ConfigurationSpecification.GTX);
} else if (name.equals("GT3")) {
columnMap.put(column, V5ConfigurationSpecification.GTP);
} else if (name.equals("BRAM") || name.equals("PCIE_BRAM")) {
columnMap.put(column, V5ConfigurationSpecification.BRAMINTERCONNECT);
}
}
line = in.readLine();
}
in.close();
for (Integer key : columnMap.keySet()) {
BlockSubType subType = columnMap.get(key);
currLogicLayout.add(subType);
if (subType == V5ConfigurationSpecification.BRAMINTERCONNECT) {
currBramContentLayout.add(V5ConfigurationSpecification.BRAMCONTENT);
}
}
currLogicLayout.add(V5ConfigurationSpecification.LOGIC_OVERHEAD);
currBramContentLayout.add(V5ConfigurationSpecification.BRAMOVERHEAD);
_logicLayouts.add(currLogicLayout);
_bramContentLayouts.add(currBramContentLayout);
} catch (FileNotFoundException e) {
e.printStackTrace();
System.err.println("Error opening temporary file: " + _xdlrcFile.getAbsolutePath());
System.exit(1);
} catch (IOException e) {
e.printStackTrace();
System.err.println("Error reading temporary file: " + _xdlrcFile.getAbsolutePath());
System.exit(1);
}
}
}
/**
* Allows this class to run stand alone from the rest of the project
*
* @param args The architectures for which to generate the java files (ie. virtex4, virtex5 ...)
*/
public static void main(String args[]) {
XilinxV5DeviceClassGenerator gen = new XilinxV5DeviceClassGenerator();
gen.createJavaSourceFile();
}
}
|
/*
* MIT License
*
* Copyright (c) 2021 MASES s.r.l.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/**************************************************************************************
* <auto-generated>
* This code was generated from a template using JCOReflector
*
* Manual changes to this file may cause unexpected behavior in your application.
* Manual changes to this file will be overwritten if the code is regenerated.
* </auto-generated>
*************************************************************************************/
package system.runtime.interopservices;
import org.mases.jcobridge.*;
import org.mases.jcobridge.netreflection.*;
import java.util.ArrayList;
// Import section
/**
* The base .NET class managing System.Runtime.InteropServices.DispatchWrapper, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089.
* <p>
*
* See: <a href="https://docs.microsoft.com/en-us/dotnet/api/System.Runtime.InteropServices.DispatchWrapper" target="_top">https://docs.microsoft.com/en-us/dotnet/api/System.Runtime.InteropServices.DispatchWrapper</a>
*/
public class DispatchWrapper extends NetObject {
/**
* Fully assembly qualified name: mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089
*/
public static final String assemblyFullName = "mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089";
/**
* Assembly name: mscorlib
*/
public static final String assemblyShortName = "mscorlib";
/**
* Qualified class name: System.Runtime.InteropServices.DispatchWrapper
*/
public static final String className = "System.Runtime.InteropServices.DispatchWrapper";
static JCOBridge bridge = JCOBridgeInstance.getInstance(assemblyFullName);
/**
* The type managed from JCOBridge. See {@link JCType}
*/
public static JCType classType = createType();
static JCEnum enumInstance = null;
JCObject classInstance = null;
static JCType createType() {
try {
String classToCreate = className + ", "
+ (JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
if (JCOReflector.getDebug())
JCOReflector.writeLog("Creating %s", classToCreate);
JCType typeCreated = bridge.GetType(classToCreate);
if (JCOReflector.getDebug())
JCOReflector.writeLog("Created: %s",
(typeCreated != null) ? typeCreated.toString() : "Returned null value");
return typeCreated;
} catch (JCException e) {
JCOReflector.writeLog(e);
return null;
}
}
void addReference(String ref) throws Throwable {
try {
bridge.AddReference(ref);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
/**
* Internal constructor. Use with caution
*/
public DispatchWrapper(java.lang.Object instance) throws Throwable {
super(instance);
if (instance instanceof JCObject) {
classInstance = (JCObject) instance;
} else
throw new Exception("Cannot manage object, it is not a JCObject");
}
public String getJCOAssemblyName() {
return assemblyFullName;
}
public String getJCOClassName() {
return className;
}
public String getJCOObjectName() {
return className + ", " + (JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
}
public java.lang.Object getJCOInstance() {
return classInstance;
}
public void setJCOInstance(JCObject instance) {
classInstance = instance;
super.setJCOInstance(classInstance);
}
public JCType getJCOType() {
return classType;
}
/**
* Try to cast the {@link IJCOBridgeReflected} instance into {@link DispatchWrapper}, a cast assert is made to check if types are compatible.
* @param from {@link IJCOBridgeReflected} instance to be casted
* @return {@link DispatchWrapper} instance
* @throws java.lang.Throwable in case of error during cast operation
*/
public static DispatchWrapper cast(IJCOBridgeReflected from) throws Throwable {
NetType.AssertCast(classType, from);
return new DispatchWrapper(from.getJCOInstance());
}
// Constructors section
public DispatchWrapper() throws Throwable {
}
public DispatchWrapper(NetObject obj) throws Throwable {
try {
// add reference to assemblyName.dll file
addReference(JCOReflector.getUseFullAssemblyName() ? assemblyFullName : assemblyShortName);
setJCOInstance((JCObject)classType.NewObject(obj == null ? null : obj.getJCOInstance()));
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
// Methods section
// Properties section
public NetObject getWrappedObject() throws Throwable {
if (classInstance == null)
throw new UnsupportedOperationException("classInstance is null.");
try {
JCObject val = (JCObject)classInstance.Get("WrappedObject");
return new NetObject(val);
} catch (JCNativeException jcne) {
throw translateException(jcne);
}
}
// Instance Events section
}
|
package org.example.config;
import java.io.File;
public class Config {
public static final String ORG1_MSP = "Org1MSP";
public static final String ORG1 = "org1";
public static final String ORG2_MSP = "Org2MSP";
public static final String ORG2 = "org2";
public static final String ADMIN = "admin";
public static final String ADMIN_PASSWORD = "adminpw";
public static final String CHANNEL_CONFIG_PATH = "config/channel.tx";
public static final String ORG1_USR_BASE_PATH = "crypto-config" + File.separator + "peerOrganizations" + File.separator
+ "org1.adcinfo.cn" + File.separator + "users" + File.separator + "Admin@org1.adcinfo.cn"
+ File.separator + "msp";
public static final String ORG2_USR_BASE_PATH = "crypto-config" + File.separator + "peerOrganizations" + File.separator
+ "org2.adcinfo.cn" + File.separator + "users" + File.separator + "Admin@org2.adcinfo.cn"
+ File.separator + "msp";
public static final String ORG1_USR_ADMIN_PK = ORG1_USR_BASE_PATH + File.separator + "keystore";
public static final String ORG1_USR_ADMIN_CERT = ORG1_USR_BASE_PATH + File.separator + "admincerts";
public static final String ORG2_USR_ADMIN_PK = ORG2_USR_BASE_PATH + File.separator + "keystore";
public static final String ORG2_USR_ADMIN_CERT = ORG2_USR_BASE_PATH + File.separator + "admincerts";
public static final String CA_ORG1_URL = "http://localhost:7054";
public static final String CA_ORG2_URL = "http://localhost:8054";
public static final String ORDERER_URL = "grpc://localhost:7050";
public static final String ORDERER_NAME = "orderer.adcinfo.cn";
public static final String CHANNEL_NAME = "channel001";
public static final String ORG1_PEER_0 = "peer0.org1.adcinfo.cn";
public static final String ORG1_PEER_0_URL = "grpc://localhost:7051";
public static final String ORG1_PEER_1 = "peer1.org1.adcinfo.cn";
public static final String ORG1_PEER_1_URL = "grpc://localhost:7056";
public static final String ORG2_PEER_0 = "peer0.org2.adcinfo.cn";
public static final String ORG2_PEER_0_URL = "grpc://localhost:8051";
public static final String ORG2_PEER_1 = "peer1.org2.adcinfo.cn";
public static final String ORG2_PEER_1_URL = "grpc://localhost:8056";
public static final String CHAINCODE_ROOT_DIR = "chaincode";
public static final String CHAINCODE_1_NAME = "auth";
public static final String CHAINCODE_1_PATH = "github.com/auth";
public static final String CHAINCODE_1_VERSION = "1.1";
}
|
/*
* Copyright 2009-2016 Weibo, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.weibo.api.motan.transport;
import java.net.InetSocketAddress;
import com.weibo.api.motan.codec.Codec;
import com.weibo.api.motan.common.ChannelState;
import com.weibo.api.motan.common.URLParamType;
import com.weibo.api.motan.core.extension.ExtensionLoader;
import com.weibo.api.motan.exception.MotanFrameworkException;
import com.weibo.api.motan.rpc.Request;
import com.weibo.api.motan.rpc.URL;
import com.weibo.api.motan.util.LoggerUtil;
import com.weibo.api.motan.util.MotanFrameworkUtil;
/**
* @author maijunsheng
* @version 创建时间:2013-5-21
*
*/
public abstract class AbstractClient implements Client {
protected InetSocketAddress localAddress;
protected InetSocketAddress remoteAddress;
protected URL url;
protected Codec codec;
protected volatile ChannelState state = ChannelState.UNINIT;
public AbstractClient(URL url) {
this.url = url;
this.codec =
ExtensionLoader.getExtensionLoader(Codec.class).getExtension(
url.getParameter(URLParamType.codec.getName(), URLParamType.codec.getValue()));
LoggerUtil.info("init nettyclient. url:" + url.getHost() + "-" + url.getPath() + ", use codec:" + codec.getClass().getSimpleName());
}
@Override
public InetSocketAddress getLocalAddress() {
return localAddress;
}
@Override
public InetSocketAddress getRemoteAddress() {
return remoteAddress;
}
@Override
public void heartbeat(Request request) {
throw new MotanFrameworkException("heartbeat not support: " + MotanFrameworkUtil.toString(request));
}
public void setLocalAddress(InetSocketAddress localAddress) {
this.localAddress = localAddress;
}
public void setRemoteAddress(InetSocketAddress remoteAddress) {
this.remoteAddress = remoteAddress;
}
}
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest;
import java.util.Objects;
public class ExecuteSnapshotLifecyclePolicyRequest extends TimedRequest {
private final String policyId;
public ExecuteSnapshotLifecyclePolicyRequest(String policyId) {
this.policyId = policyId;
}
public String getPolicyId() {
return this.policyId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ExecuteSnapshotLifecyclePolicyRequest other = (ExecuteSnapshotLifecyclePolicyRequest) o;
return this.policyId.equals(other.policyId);
}
@Override
public int hashCode() {
return Objects.hash(this.policyId);
}
}
|
package com.kunyang.android.qq.Data.Restapi;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageManager;
import com.hyphenate.chat.EMClient;
import com.kunyang.android.qq.Data.Model.LiveRoom;
import com.kunyang.android.qq.Data.Restapi.Model.LiveStatusModule;
import com.kunyang.android.qq.Data.Restapi.Model.ResponseModule;
import com.kunyang.android.qq.Data.Restapi.Model.StatisticsType;
import com.kunyang.android.qq.MyApplication;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.IOException;
import java.util.List;
import okhttp3.Interceptor;
import okhttp3.MediaType;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.logging.HttpLoggingInterceptor;
import retrofit2.Call;
import retrofit2.Response;
import retrofit2.Retrofit;
import retrofit2.converter.gson.GsonConverterFactory;
/**
* Created by 坤阳 on 2017/12/30.
*/
public class LiveManager {
private String appkey;
private ApiService apiService;
private static LiveManager instance;
private LiveManager(){
try {
ApplicationInfo appInfo = MyApplication.getInstance().getPackageManager().getApplicationInfo(
MyApplication.getInstance().getPackageName(), PackageManager.GET_META_DATA);
appkey = appInfo.metaData.getString("EASEMOB_APPKEY");
appkey = appkey.replace("#","/");
} catch (PackageManager.NameNotFoundException e) {
throw new RuntimeException("must set the easemob appkey");
}
HttpLoggingInterceptor httpLoggingInterceptor = new HttpLoggingInterceptor();
httpLoggingInterceptor.setLevel(HttpLoggingInterceptor.Level.BODY);
OkHttpClient httpClient = new OkHttpClient.Builder()
.addInterceptor(new RequestInterceptor())
.addInterceptor(httpLoggingInterceptor)
.build();
Retrofit retrofit = new Retrofit.Builder()
.baseUrl("http://a1.easemob.com/"+appkey+"/")
.addConverterFactory(GsonConverterFactory.create())
.client(httpClient)
.build();
apiService = retrofit.create(ApiService.class);
}
static class RequestInterceptor implements Interceptor {
@Override public okhttp3.Response intercept(Chain chain) throws IOException {
Request original = chain.request();
Request request = original.newBuilder()
.header("Authorization", "Bearer " + EMClient.getInstance().getAccessToken())
.header("Accept", "application/json")
.header("Content-Type", "application/json")
.method(original.method(), original.body())
.build();
okhttp3.Response response = chain.proceed(request);
return response;
}
}
public static LiveManager getInstance(){
if(instance == null){
instance = new LiveManager();
}
return instance;
}
/**
* 创建直播室
* @param name 直播室名称
* @param description 直播室描述
* @param coverUrl 直播封面图片url
* @return
* @throws LiveException
*/
public LiveRoom createLiveRoom(String name, String description, String coverUrl) throws LiveException {
return createLiveRoomWithRequest(name, description, coverUrl, null);
}
/**
* 根据指定的已经关联的直播室id创建直播
* @param name 直播室名称
* @param description 直播室描述
* @param coverUrl 直播封面图片url
* @param liveRoomId 要关联直播的直播室id
* @return
* @throws LiveException
*/
public LiveRoom createLiveRoom(String name, String description, String coverUrl, String liveRoomId) throws LiveException {
return createLiveRoomWithRequest(name, description, coverUrl, liveRoomId);
}
private LiveRoom createLiveRoomWithRequest(String name, String description, String coverUrl, String liveRoomId) throws LiveException {
LiveRoom liveRoom = new LiveRoom();
liveRoom.setName(name);
liveRoom.setDescription(description);
liveRoom.setAnchorId(EMClient.getInstance().getCurrentUser());
liveRoom.setCover(coverUrl);
Call<ResponseModule<LiveRoom>> responseCall;
if(liveRoomId != null){
responseCall = apiService.createLiveShow(liveRoomId, liveRoom);
}else {
responseCall = apiService.createLiveRoom(liveRoom);
}
ResponseModule<LiveRoom> response = handleResponseCall(responseCall).body();
LiveRoom room = response.data;
if(room.getId() != null) {
liveRoom.setId(room.getId());
}else {
liveRoom.setId(liveRoomId);
}
liveRoom.setChatroomId(room.getChatroomId());
//liveRoom.setAudienceNum(1);
liveRoom.setLivePullUrl(room.getLivePullUrl());
liveRoom.setLivePushUrl(room.getLivePushUrl());
return liveRoom;
}
/**
* 更新直播室封面
* @param roomId
* @param coverUrl
* @throws LiveException
*/
public void updateLiveRoomCover(String roomId, String coverUrl) throws LiveException {
JSONObject jobj = new JSONObject();
JSONObject picObj = new JSONObject();
try {
picObj.put("cover_picture_url", coverUrl);
jobj.put("liveroom", picObj);
} catch (JSONException e) {
e.printStackTrace();
}
Call<ResponseModule> responseCall = apiService.updateLiveRoom(roomId, jsonToRequestBody(jobj.toString()));
handleResponseCall(responseCall);
}
//public void joinLiveRoom(String roomId, String userId) throws LiveException {
// JSONObject jobj = new JSONObject();
// String[] arr = new String[]{userId};
// JSONArray jarr = new JSONArray(Arrays.asList(arr));
// try {
// jobj.put("usernames", jarr);
// } catch (JSONException e) {
// e.printStackTrace();
// }
// handleResponseCall(apiService.joinLiveRoom(roomId, jsonToRequestBody(jobj.toString())));
//}
//public void updateLiveRoom(LiveRoom liveRoom) throws LiveException {
// Call respCall = apiService.updateLiveRoom(liveRoom.getId(), liveRoom);
// handleResponseCall(respCall);
//}
/**
* 获取直播室直播状态
* @param roomId
* @return
* @throws LiveException
*/
public LiveStatusModule.LiveStatus getLiveRoomStatus(String roomId) throws LiveException {
Call<ResponseModule<LiveStatusModule>> respCall = apiService.getStatus(roomId);
return handleResponseCall(respCall).body().data.status;
}
/**
* 结束直播
* @param roomId
* @throws LiveException
*/
public void terminateLiveRoom(String roomId) throws LiveException {
LiveStatusModule module = new LiveStatusModule();
module.status = LiveStatusModule.LiveStatus.completed;
handleResponseCall(apiService.updateStatus(roomId, module));
}
//public void closeLiveRoom(String roomId) throws LiveException {
// Call respCall = apiService.closeLiveRoom(roomId);
// handleResponseCall(respCall);
//}
public List<LiveRoom> getLiveRoomList(int pageNum, int pageSize) throws LiveException {
Call<ResponseModule<List<LiveRoom>>> respCall = apiService.getLiveRoomList(pageNum, pageSize);
ResponseModule<List<LiveRoom>> response = handleResponseCall(respCall).body();
return response.data;
}
/**
* 获取正在直播的直播室列表
* @param limit 取多少
* @param cursor 在这个游标基础上取数据,首次获取传null
* @return
* @throws LiveException
*/
public ResponseModule<List<LiveRoom>> getLivingRoomList(int limit, String cursor) throws LiveException {
Call<ResponseModule<List<LiveRoom>>> respCall = apiService.getLivingRoomList(limit, cursor);
ResponseModule<List<LiveRoom>> response = handleResponseCall(respCall).body();
return response;
}
/**
* 获取直播间详情
* @param roomId
* @return
* @throws LiveException
*/
public LiveRoom getLiveRoomDetails(String roomId) throws LiveException {
return handleResponseCall(apiService.getLiveRoomDetails(roomId)).body().data;
}
/**
* 获取用户已经关联的直播间
* @param userId
* @return
* @throws LiveException
*/
public List<String> getAssociatedRooms(String userId) throws LiveException {
ResponseModule<List<String>> response = handleResponseCall(apiService.getAssociatedRoom(userId)).body();
return response.data;
}
//public void grantLiveRoomAdmin(String roomId, String adminId) throws LiveException {
// GrantAdminModule module = new GrantAdminModule();
// module.newAdmin = adminId;
// handleResponseCall(apiService.grantAdmin(roomId, module));
//}
//
//public void revokeLiveRoomAdmin(String roomId, String adminId) throws LiveException {
// handleResponseCall(apiService.revokeAdmin(roomId, adminId));
//}
//
//public void grantLiveRoomAnchor(String roomId, String anchorId) throws LiveException {
// handleResponseCall(apiService.grantAnchor(roomId, anchorId));
//}
//
//public void revokeLiveRoomAnchor(String roomId, String anchorId) throws LiveException {
// handleResponseCall(apiService.revokeAdmin(roomId, anchorId));
//}
//
//public void kickLiveRoomMember(String roomId, String memberId) throws LiveException {
// handleResponseCall(apiService.kickMember(roomId, memberId));
//}
public void postStatistics(StatisticsType type, String roomId, int count) throws LiveException {
JSONObject jobj = new JSONObject();
try {
jobj.put("type", type);
jobj.put("count", count);
} catch (JSONException e) {
e.printStackTrace();
}
handleResponseCall(apiService.postStatistics(roomId, jsonToRequestBody(jobj.toString())));
}
//public void postStatistics(StatisticsType type, String roomId, String username) throws LiveException {
// JSONObject jobj = new JSONObject();
// try {
// jobj.put("type", type);
// jobj.put("count", username);
// } catch (JSONException e) {
// e.printStackTrace();
// }
// handleResponseCall(apiService.postStatistics(roomId, jsonToRequestBody(jobj.toString())));
//}
private <T> Response<T> handleResponseCall(Call<T> responseCall) throws LiveException{
try {
Response<T> response = responseCall.execute();
if(!response.isSuccessful()){
throw new LiveException(response.code(), response.errorBody().string());
}
return response;
} catch (IOException e) {
throw new LiveException(e.getMessage());
}
}
private RequestBody jsonToRequestBody(String jsonStr){
return RequestBody.create(MediaType.parse("application/json; charset=utf-8"), jsonStr);
}
}
|
package edu.jhu.thrax.hadoop.features.mapreduce;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import edu.jhu.thrax.hadoop.comparators.FieldComparator;
import edu.jhu.thrax.hadoop.comparators.PrimitiveArrayMarginalComparator;
import edu.jhu.thrax.hadoop.datatypes.Annotation;
import edu.jhu.thrax.hadoop.datatypes.FeaturePair;
import edu.jhu.thrax.hadoop.datatypes.PrimitiveUtils;
import edu.jhu.thrax.hadoop.datatypes.RuleWritable;
import edu.jhu.thrax.util.Vocabulary;
@SuppressWarnings("rawtypes")
public class TargetPhraseGivenSourceandLHSFeature extends MapReduceFeature {
public static final String NAME = "e_given_f_and_lhs";
public static final String LABEL = "p(e|f,LHS)";
public String getName() {
return NAME;
}
public String getLabel() {
return LABEL;
}
public Class<? extends WritableComparator> sortComparatorClass() {
return Comparator.class;
}
public Class<? extends Partitioner> partitionerClass() {
return SourceandLHSPartitioner.class;
}
public Class<? extends Mapper> mapperClass() {
return Map.class;
}
public Class<? extends Reducer> reducerClass() {
return Reduce.class;
}
private static class Map extends Mapper<RuleWritable, Annotation, RuleWritable, FloatWritable> {
protected void map(RuleWritable key, Annotation value, Context context) throws IOException,
InterruptedException {
RuleWritable lhs_source_marginal = new RuleWritable(key);
lhs_source_marginal.target = PrimitiveArrayMarginalComparator.MARGINAL;
lhs_source_marginal.monotone = false;
FloatWritable count = new FloatWritable(value.count());
context.write(key, count);
context.write(lhs_source_marginal, count);
}
}
private static class Reduce extends Reducer<RuleWritable, FloatWritable, RuleWritable, FeaturePair> {
private float marginal;
protected void setup(Context context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
String vocabulary_path = conf.getRaw("thrax.work-dir") + "vocabulary/part-*";
Vocabulary.initialize(conf, vocabulary_path);
}
protected void reduce(RuleWritable key, Iterable<FloatWritable> values, Context context)
throws IOException, InterruptedException {
if (Arrays.equals(key.target, PrimitiveArrayMarginalComparator.MARGINAL)) {
// we only get here if it is the very first time we saw the LHS
// and source combination
marginal = 0;
for (FloatWritable x : values)
marginal += x.get();
return;
}
// control only gets here if we are using the same marginal
float count = 0;
for (FloatWritable x : values)
count += x.get();
FloatWritable prob = new FloatWritable((float) -Math.log(count / marginal));
context.write(key, new FeaturePair(Vocabulary.id(LABEL), prob));
}
}
public static class Comparator extends WritableComparator {
private static final WritableComparator PARRAY_COMP = new PrimitiveArrayMarginalComparator();
private static final FieldComparator SOURCE_COMP = new FieldComparator(0, PARRAY_COMP);
private static final FieldComparator TARGET_COMP = new FieldComparator(1, PARRAY_COMP);
public Comparator() {
super(RuleWritable.class);
}
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
try {
int h1 = WritableUtils.decodeVIntSize(b1[s1 + 1]) + 1;
int h2 = WritableUtils.decodeVIntSize(b2[s2 + 1]) + 1;
int lhs1 = Math.abs(WritableComparator.readVInt(b1, s1 + 1));
int lhs2 = Math.abs(WritableComparator.readVInt(b2, s2 + 1));
int cmp = PrimitiveUtils.compare(lhs1, lhs2);
if (cmp != 0) return cmp;
cmp = SOURCE_COMP.compare(b1, s1 + h1, l1 - h1, b2, s2 + h2, l2 - h2);
if (cmp != 0) return cmp;
cmp = TARGET_COMP.compare(b1, s1 + h1, l1 - h1, b2, s2 + h2, l2 - h2);
if (cmp != 0) return cmp;
return cmp = PrimitiveUtils.compare(b1[s1], b2[s2]);
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
}
public static class SourceandLHSPartitioner extends Partitioner<RuleWritable, FloatWritable> {
public int getPartition(RuleWritable key, FloatWritable value, int numPartitions) {
int hash = 163;
hash = 37 * hash + key.lhs;
hash = 37 * hash + Arrays.hashCode(key.source);
return (hash & Integer.MAX_VALUE) % numPartitions;
}
}
private static final FloatWritable ZERO = new FloatWritable(0.0f);
public void unaryGlueRuleScore(int nt, java.util.Map<Integer, Writable> map) {
map.put(Vocabulary.id(LABEL), ZERO);
}
public void binaryGlueRuleScore(int nt, java.util.Map<Integer, Writable> map) {
map.put(Vocabulary.id(LABEL), ZERO);
}
}
|
package org.komamitsu.retrofit.converter.msgpack;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import okhttp3.mockwebserver.MockResponse;
import okhttp3.mockwebserver.MockWebServer;
import okhttp3.mockwebserver.RecordedRequest;
import okio.Buffer;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.msgpack.jackson.dataformat.MessagePackFactory;
import retrofit2.Call;
import retrofit2.Response;
import retrofit2.Retrofit;
import retrofit2.http.Body;
import retrofit2.http.POST;
import java.io.IOException;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.*;
public class MessagePackConverterFactoryTest
{
@Rule
public final MockWebServer server = new MockWebServer();
private Service service;
public static class Pojo
{
private final int i;
private final float f;
private final String s;
public Pojo(
@JsonProperty("i") int i,
@JsonProperty("f") float f,
@JsonProperty("s") String s)
{
this.i = i;
this.f = f;
this.s = s;
}
@JsonProperty("i")
public int getI()
{
return i;
}
@JsonProperty("f")
public float getF()
{
return f;
}
@JsonProperty("s")
public String getS()
{
return s;
}
@Override
public boolean equals(Object o)
{
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Pojo pojo = (Pojo) o;
if (i != pojo.i) {
return false;
}
if (Float.compare(pojo.f, f) != 0) {
return false;
}
return s != null ? s.equals(pojo.s) : pojo.s == null;
}
@Override
public int hashCode()
{
int result = i;
result = 31 * result + (f != +0.0f ? Float.floatToIntBits(f) : 0);
result = 31 * result + (s != null ? s.hashCode() : 0);
return result;
}
}
interface Service
{
@POST("/")
Call<Pojo> postPojo(@Body Pojo pojo);
}
@Before
public void setUp()
{
Retrofit retrofit = new Retrofit.Builder()
.baseUrl(server.url("/"))
.addConverterFactory(MessagePackConverterFactory.create())
.build();
service = retrofit.create(Service.class);
}
@Test
public void requestBodyConverter()
throws IOException, InterruptedException
{
ObjectMapper objectMapper = new ObjectMapper(new MessagePackFactory());
Pojo requestPojo = new Pojo(42, (float) Math.PI, "Hello");
Pojo responsePojo = new Pojo(99, 1.23f, "World");
server.enqueue(new MockResponse().setBody(
new Buffer().write(objectMapper.writeValueAsBytes(responsePojo))));
Response<Pojo> response = service.postPojo(requestPojo).execute();
assertThat(response.body(), is(responsePojo));
RecordedRequest recordedRequest = server.takeRequest();
Pojo recordedPojo = objectMapper.readValue(recordedRequest.getBody().readByteArray(), Pojo.class);
assertThat(recordedPojo, is(requestPojo));
}
}
|
package com.cdyw.swsw.data.common.component;
import cn.hutool.core.io.FileUtil;
import com.cdyw.swsw.common.common.component.CommonPath;
import com.cdyw.swsw.common.common.component.CommonPathName;
import com.cdyw.swsw.common.common.component.CommonTable;
import com.cdyw.swsw.common.common.component.CommonTableName;
import com.cdyw.swsw.common.common.util.DateUtils;
import com.cdyw.swsw.common.common.util.RegexUtil;
import com.cdyw.swsw.common.domain.ao.enums.ProductEnum;
import com.cdyw.swsw.common.domain.ao.enums.TypeEnum;
import com.cdyw.swsw.common.domain.entity.file.FileEntity;
import com.cdyw.swsw.common.domain.entity.log.DataMonitorLog;
import com.cdyw.swsw.data.common.http.HttpDownloadApi;
import com.cdyw.swsw.data.domain.dao.common.CommonDataMapper;
import lombok.extern.slf4j.Slf4j;
import okhttp3.ResponseBody;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import retrofit2.Call;
import retrofit2.Callback;
import retrofit2.Response;
import java.io.*;
import java.nio.file.Path;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
/**
* 根据 源文件(名称或路径)获取 一级大类、二级站点、文件时间、三级产品,以及跟文件相关的所有操作
*
* @author jovi
*/
@Slf4j
@Component
public class CommonFileName {
private final CommonPath commonPath;
private final CommonTable commonTable;
private final CommonTableName commonTableName;
private final CommonPathName commonPathName;
private final CommonDataMapper commonDataMapper;
private final HttpDownloadApi httpDownloadApi;
@Autowired
public CommonFileName(CommonPath commonPath, CommonTable commonTable, CommonTableName commonTableName, CommonPathName commonPathName, CommonDataMapper commonDataMapper, HttpDownloadApi httpDownloadApi) {
this.commonPath = commonPath;
this.commonTable = commonTable;
this.commonTableName = commonTableName;
this.commonPathName = commonPathName;
this.commonDataMapper = commonDataMapper;
this.httpDownloadApi = httpDownloadApi;
}
private String[] getNameSplits(File fileSource) {
String fileSourceName = fileSource.getName();
return fileSourceName.split("_");
}
private String[] getNameSplits(String fileName) {
return fileName.split("_");
}
/**
* 根据 源文件(名称或路径)获取 时间
* 目前所有大类的文件全部都是”yyyyMMddHHmmss“格式,所以暂时使用统一处理方式
*
* @param fileSource 源文件
* @return 文件时间
*/
private String getTime(File fileSource) {
String regexTime = "^\\d{12,14}$";
return RegexUtil.getString(regexTime, getNameSplits(fileSource));
}
/**
* 根据 源文件名称获取 时间
* 目前所有大类的文件全部都是”yyyyMMddHHmmss“格式,所以暂时使用统一处理方式
*
* @param fileName 源文件
* @return 文件时间
*/
private String getTime(String fileName) {
String regexTime = "^\\d{12,14}$";
return RegexUtil.getString(regexTime, getNameSplits(fileName));
}
/**
* 根据 源文件(名称或路径)获取 二级站点
*
* @param typeEnum 一级大类
* @param fileSource 源文件
* @return 二级站点
*/
public String getStaIdC(TypeEnum typeEnum, File fileSource) {
String[] nameSplits = getNameSplits(fileSource);
String staIdC = null;
switch (typeEnum) {
case TYPE_RADAR_WEATHER:
// Z_RADR_I_Z9280_20201215215620_O_DOR_SC_CAP_FMT.bin.bz2
String regexStaRadarWea = "^\\w{1}\\d{4}$";
staIdC = RegexUtil.getString(regexStaRadarWea, nameSplits);
break;
case TYPE_RADAR_WIND_PROFILE:
// Z_RADA_I_56285_20201215000000_P_WPRD_LC_OOBS.TXT
String regexStaRadarWinPro = "^\\d{5}$";
staIdC = RegexUtil.getString(regexStaRadarWinPro, nameSplits);
break;
case TYPE_RADAR_PHASED_ARRAY:
// Z_RADR_I_ZCD02_20201210082115_O_DOR_DXK_CAR.bin.bz2
String staChongZhou = "ZCD01";
String staXinDu = "ZCD02";
String staTianFu = "ZCD03";
String regexStaRadarPhaArr = "^\\w{3}\\d{2}$";
staIdC = RegexUtil.getString(regexStaRadarPhaArr, nameSplits);
if (staIdC.contains(staChongZhou)) {
staIdC = commonPath.getRadarPhaArrStaChongZhou();
} else if (staIdC.contains(staXinDu)) {
staIdC = commonPath.getRadarPhaArrStaXinDu();
} else if (staIdC.contains(staTianFu)) {
staIdC = commonPath.getRadarPhaArrStaTianFu();
}
break;
default:
break;
}
return staIdC;
}
/**
* 根据 源文件(名称或路径)获取 二级站点
*
* @param typeEnum 一级大类
* @param fileName 源文件
* @return 二级站点
*/
private String getStaIdC(TypeEnum typeEnum, String fileName) {
String[] nameSplits = getNameSplits(fileName);
String staIdC = null;
switch (typeEnum) {
case TYPE_RADAR_WEATHER:
// Z_RADR_I_Z9280_20201215215620_O_DOR_SC_CAP_FMT.bin.bz2
String regexStaRadarWea = "^\\w{1}\\d{4}$";
staIdC = RegexUtil.getString(regexStaRadarWea, nameSplits);
break;
case TYPE_RADAR_WIND_PROFILE:
// Z_RADA_I_56285_20201215000000_P_WPRD_LC_OOBS.TXT
String regexStaRadarWinPro = "^\\d{5}$";
staIdC = RegexUtil.getString(regexStaRadarWinPro, nameSplits);
break;
case TYPE_RADAR_PHASED_ARRAY:
// Z_RADR_I_ZCD02_20201210082115_O_DOR_DXK_CAR.bin.bz2
String staChongZhou = "ZCD01";
String staXinDu = "ZCD02";
String staTianFu = "ZCD03";
String regexStaRadarPhaArr = "^\\w{3}\\d{2}$";
staIdC = RegexUtil.getString(regexStaRadarPhaArr, nameSplits);
if (staIdC.contains(staChongZhou)) {
staIdC = commonPath.getRadarPhaArrStaChongZhou();
} else if (staIdC.contains(staXinDu)) {
staIdC = commonPath.getRadarPhaArrStaXinDu();
} else if (staIdC.contains(staTianFu)) {
staIdC = commonPath.getRadarPhaArrStaTianFu();
}
break;
default:
break;
}
return staIdC;
}
/**
* 根据 源文件(名称或路径)获取 三级产品
*
* @param typeEnum 一级大类
* @param fileSource 源文件
* @return 三级产品
*/
private String getType(TypeEnum typeEnum, File fileSource) {
String[] nameSplits = getNameSplits(fileSource);
String type = null;
switch (typeEnum) {
case TYPE_RADAR_WEATHER:
// Z_RADR_I_Z9280_20201215215620_O_DOR_SC_CAP_FMT.bin.bz2
type = nameSplits[8];
break;
case TYPE_RADAR_WIND_PROFILE:
case TYPE_RADAR_PHASED_ARRAY:
// // Z_RADR_I_ZCD02_20201210082115_O_DOR_DXK_CAR.bin.bz2
// Z_RADA_I_56285_20201215000000_P_WPRD_LC_OOBS.TXT
type = nameSplits[nameSplits.length - 1].split("\\.")[0];
break;
case TYPE_ECMWF_HR:
case TYPE_SWC_WARR:
case TYPE_SWC_WARM:
// SWCWARR_20201213180000_F01_3KM.grb
// W_NAFP_C_ECMF_20201213174247_P_C1D12131200121312001.bz2 由于暂时没站点没产品,所以先暂时默认个”产品“值
type = commonTable.getSufBase();
break;
case TYPE_CLDAS_1KM:
// Z_NAFP_C_BABJ_20201222181201_P_HRCLDAS_RT_CHN-BCCD_0P01_HOR-QAIR-2020122302.GRB2
type = nameSplits[nameSplits.length - 1].split("-")[1];
break;
default:
break;
}
return type;
}
/**
* 根据 源文件(名称或路径)获取 三级产品
*
* @param typeEnum 一级大类
* @param fileName 源文件
* @return 三级产品
*/
private String getType(TypeEnum typeEnum, String fileName) {
String[] nameSplits = getNameSplits(fileName);
String type = null;
switch (typeEnum) {
case TYPE_RADAR_WEATHER:
// Z_RADR_I_Z9280_20201215215620_O_DOR_SC_CAP_FMT.bin.bz2
type = nameSplits[8];
break;
case TYPE_RADAR_WIND_PROFILE:
case TYPE_RADAR_PHASED_ARRAY:
// // Z_RADR_I_ZCD02_20201210082115_O_DOR_DXK_CAR.bin.bz2
// Z_RADA_I_56285_20201215000000_P_WPRD_LC_OOBS.TXT
type = nameSplits[nameSplits.length - 1].split("\\.")[0];
break;
case TYPE_ECMWF_HR:
case TYPE_SWC_WARR:
case TYPE_SWC_WARM:
// SWCWARR_20201213180000_F01_3KM.grb
// W_NAFP_C_ECMF_20201213174247_P_C1D12131200121312001.bz2 由于暂时没站点没产品,所以先暂时默认个”产品“值
type = commonTable.getSufBase();
break;
case TYPE_CLDAS_1KM:
// Z_NAFP_C_BABJ_20201222181201_P_HRCLDAS_RT_CHN-BCCD_0P01_HOR-QAIR-2020122302.GRB2
type = nameSplits[nameSplits.length - 1].split("-")[1];
break;
default:
break;
}
return type;
}
/**
* 根据 目标文件(名称或路径)获取 文件实体类
*
* @param typeEnum 一级大类
* @param destFile 目标文件
* @return 文件实体类
*/
public FileEntity getFileEntity(TypeEnum typeEnum, File destFile) {
FileEntity fileEntity = new FileEntity();
String name = destFile.getName();
fileEntity.setName(name);
fileEntity.setPosFile(destFile.getParent());
fileEntity.setFileSize(Integer.parseInt(String.valueOf(destFile.length())));
if (typeEnum.equals(TypeEnum.TYPE_RADAR_WIND_PROFILE)) {
// 风廓线比较特殊,type 值存 6、30、60
if (name.contains(ProductEnum.ROBS.name())) {
fileEntity.setType(6);
} else if (name.contains(ProductEnum.HOBS.name())) {
fileEntity.setType(30);
} else if (name.contains(ProductEnum.OOBS.name())) {
fileEntity.setType(60);
}
} else {
fileEntity.setType(typeEnum.getType());
}
// 将毫秒时间戳转化成秒时间戳
fileEntity.setCreateTime((System.currentTimeMillis()) / 1000);
fileEntity.setModifyType(0);
String patten = "yyyyMMddHHmmss";
String staIdC = null;
if (destFile.getPath().contains(commonTable.getSufParse())) {
switch (typeEnum) {
case TYPE_RADAR_WEATHER:
case TYPE_RADAR_PHASED_ARRAY:
Path filePath = destFile.toPath();
String replace = destFile.getName().split("\\.")[0].replace("_", "");
fileEntity.setTime(DateUtils.getDateUtc2TimeLocal(replace, patten));
staIdC = FileUtil.getPathEle(filePath, filePath.getNameCount() - 4).toString();
break;
default:
break;
}
} else {
fileEntity.setTime(DateUtils.getDateUtc2TimeLocal(getTime(destFile), patten));
staIdC = getStaIdC(typeEnum, destFile);
}
if (staIdC != null) {
fileEntity.setRadarcd(staIdC.replace("/", ""));
}
return fileEntity;
}
/**
* 根据 一级大类 和 源文件(名称或路径) 进行文件录入、数据录入、日志录入(仅限于 base 表操作)
*
* @param typeEnum 一级大类
* @param fileSource 源文件
* @return 插入数据表记录
*/
public int insertFileEntityBase(TypeEnum typeEnum, File fileSource) {
int size = 0;
String t = commonTable.getSufBase();
String staIdC = this.getStaIdC(typeEnum, fileSource);
String type = this.getType(typeEnum, fileSource);
String time = this.getTime(fileSource);
String fileTargetPathName = commonPathName.getPathNameByPara(typeEnum, staIdC, type, time, t);
// 最终目标文件路径
File fileDestPath = new File(fileTargetPathName);
boolean flag = fileDestPath.exists();
if (!flag) {
flag = fileDestPath.mkdirs();
}
// 如果此文件夹存在
if (flag) {
// ①:存储文件(此处返回的是路径)
FileUtil.copy(fileSource, fileDestPath, false);
File destFile = new File(fileDestPath.getPath() + "\\" + fileSource.getName());
// ②:写入数据库
String tableName = commonTableName.getTableNameByParam(typeEnum, staIdC, time, t);
FileEntity fileEntity = this.getFileEntity(typeEnum, destFile);
size = commonDataMapper.insertCommonInfo(fileEntity, tableName);
// 插入数据、存储文件后,记录日志
DataMonitorLog log = new DataMonitorLog();
log.setCreateTime(LocalDateTime.now());
String patten = "yyyyMMddHHmmss";
log.setDateTime(DateUtils.getDateUtc2Local(time, patten));
log.setType(typeEnum.getType());
if (size > 0) {
log.setStatus(1);
} else {
log.setStatus(0);
}
log.setMsg(LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")) + " 共新增 " + size + " 条数据");
// 日志表只需要一级大类
String logTableName = commonTableName.getTableNameByParam(typeEnum, null, time, commonTable.getLog());
commonDataMapper.insertCommonLog(log, logTableName);
}
return size;
}
public boolean downloadFile2Base(TypeEnum typeEnum, String fileName, String url) {
boolean flag = false;
String type = this.getType(typeEnum, fileName);
String time = this.getTime(fileName);
// ① 先查询监控路径
String monitorPath = commonPathName.getMonitorPathNameByPara(typeEnum, null, type, time, commonTable.getSufBase());
File monitorPathF = new File(monitorPath);
if (!monitorPathF.exists()) {
flag = monitorPathF.mkdirs();
}
File monitorPathFile = new File(monitorPath + "/" + fileName);
// ② 再获取文件
Call<ResponseBody> call = httpDownloadApi.downloadResult(url);
try {
call.enqueue(new Callback<ResponseBody>() {
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
if (response.isSuccessful()) {
ResponseBody body = response.body();
if (body != null) {
// ③ 将文件下载到指定监控路径下
boolean flag = downloadFileToBase(monitorPathFile, body);
if (flag) {
log.info("***** 下载成功: " + response.message() + " *****");
}
}
} else {
log.error("***** 下载失败: " + response.message() + " *****");
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
log.error("***** 下载失败: " + t.getMessage() + " *****");
}
});
flag = true;
} catch (Exception e) {
e.printStackTrace();
}
return flag;
}
private boolean downloadFileToBase(File monitorPathFile, ResponseBody body) {
try {
InputStream inputStream = null;
OutputStream outputStream = null;
try {
byte[] fileReader = new byte[4096];
inputStream = body.byteStream();
outputStream = new FileOutputStream(monitorPathFile);
while (true) {
int read = inputStream.read(fileReader);
if (read == -1) {
break;
}
outputStream.write(fileReader, 0, read);
}
outputStream.flush();
return true;
} catch (IOException e) {
return false;
} finally {
if (inputStream != null) {
inputStream.close();
}
if (outputStream != null) {
outputStream.close();
}
}
} catch (IOException e) {
return false;
}
}
}
|
package Lev05.task0526;
/*
Мужчина и женщина
1. Внутри класса Solution создай public static классы Man и Woman.
2. У классов должны быть поля: name (String), age (int), address (String).
3. Создай конструкторы, в которые передаются все возможные параметры.
4. Создай по два объекта каждого класса со всеми данными используя конструктор.
5. Объекты выведи на экран в таком формате: name + " " + age + " " + address
Требования:
1. В классе Solution создай public static класс Man.
2. В классе Solution создай public static класс Woman.
3. Класс Man должен содержать переменные: name(String), age(int) и address(String).
4. Класс Woman должен содержать переменные: name(String), age(int) и address(String).
5. У классов Man и Woman должны быть конструкторы, принимающие параметры с типами String, int и String.
6. Конструкторы должны инициализировать переменные класса.
7. В методе main необходимо создать по два объекта каждого типа.
8. Метод main должен выводить созданные объекты на экран в указанном формате.
*/
public class Solution {
public static class Man{
String name, address; int age;
public Man(String name, int age, String address) {
this.name = name;
this.address = address;
this.age = age;
}
@Override
public String toString() {
return "Man{" +
"name='" + name + '\'' +
", address='" + address + '\'' +
", age=" + age +
'}';
}
public String getName() {
return name;
}
public String getAddress() {
return address;
}
public int getAge() {
return age;
}
}
public static class Woman{
String name, address; int age;
public Woman(String name, int age, String address) {
this.name = name;
this.address = address;
this.age = age;
}
@Override
public String toString() {
return "Woman{" +
"name='" + name + '\'' +
", address='" + address + '\'' +
", age=" + age +
'}';
}
public String getName() {
return name;
}
public String getAddress() {
return address;
}
public int getAge() {
return age;
}
}
public static void main(String[] args) {
Man man1 = new Man("Tom", 18, "Home1");
Man man2 = new Man("Mot", 15,"Home2");
Woman woman1 = new Woman("Emma", 20, "Home1222");
Woman woman2 = new Woman("Emma Stone", 20,"Home4444");
System.out.println(man1.getName() + " " + man1.getAge() + " " + man1.getAddress());
System.out.println(man2.getName() + " " + man2.getAge() + " " + man2.getAddress());
System.out.println(woman1.getName() + " " + woman1.getAge() + " " + woman1.getAddress());
System.out.println(woman2.getName() + " " + woman2.getAge() + " " + woman2.getAddress());
}
//напишите тут ваш код
}
|
package gcj2014.r3;
import java.io.File;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Scanner;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public class A implements Runnable {
static final boolean LARGE = true;
static final boolean PROD = true;
static final int NTHREAD = 1;
static String BASEPATH = "x:\\gcj\\";
// static String BASEPATH = "/home/ec2-user/";
static String INPATH = BASEPATH + A.class.getSimpleName().charAt(0) + (LARGE ? "-large.in" : "-small-attempt0.in");
// static String INPATH = BASEPATH + TParallel.class.getSimpleName().charAt(0) + (LARGE ? "-large-practice.in" : "-small-practice.in");
static String OUTPATH = INPATH.substring(0, INPATH.length()-3) + new SimpleDateFormat("-HHmmss").format(new Date()) + ".out";
static String INPUT = "";
long p, q, r, s;
long[] a;
int n;
public void read() // not parallelized
{
n = ni();
p = in.nextLong();
q = in.nextLong();
r = in.nextLong();
s = in.nextLong();
a = new long[n];
for(int i = 0;i < n;i++){
a[i] = (i*p+q) % r + s;
}
}
public void process() // parallelized!
{
long low = -1, high = 2000005L*n;
while(high - low > 1){
long h = (high+low) / 2;
if(ok(h)){
high = h;
}else{
low = h;
}
}
long sum = 0;
for(long v : a)sum += v;
out.printf("%.12f\n", 1.-(double)high/sum);
}
boolean ok(long h)
{
long s = 0;
int i = 0;
for(;i < n && s+a[i] <= h;s += a[i],i++);
s = 0;
for(;i < n && s+a[i] <= h;s += a[i],i++);
s = 0;
for(;i < n && s+a[i] <= h;s += a[i],i++);
return i == n;
}
public static void preprocess()
{
}
Scanner in;
PrintWriter out;
StringWriter sw;
int cas;
static List<Status> running = new ArrayList<Status>();
@Override
public void run()
{
long S = System.nanoTime();
// register
synchronized(running){
Status st = new Status();
st.id = cas;
st.S = S;
running.add(st);
}
process();
// deregister
synchronized(running){
for(Status st : running){
if(st.id == cas){
running.remove(st);
break;
}
}
}
long G = System.nanoTime();
if(PROD){
System.err.println("case " + cas + " solved. [" + (G-S)/1000000 + "ms]");
synchronized(running){
StringBuilder sb = new StringBuilder("running : ");
for(Status st : running){
sb.append(st.id + ":" + (G-st.S)/1000000 + "ms, ");
}
System.err.println(sb);
}
}
}
private static class Status {
public int id;
public long S;
}
public A(int cas, Scanner in)
{
this.cas = cas;
this.in = in;
this.sw = new StringWriter();
this.out = new PrintWriter(this.sw);
}
private int ni() { return Integer.parseInt(in.next()); }
private long nl() { return Long.parseLong(in.next()); }
private int[] na(int n) { int[] a = new int[n]; for(int i = 0;i < n;i++)a[i] = ni(); return a; }
private double nd() { return Double.parseDouble(in.next()); }
private void tr(Object... o) { if(!PROD)System.out.println(Arrays.deepToString(o)); }
public static void main(String[] args) throws Exception
{
long start = System.nanoTime();
ExecutorService es = Executors.newFixedThreadPool(NTHREAD);
CompletionService<A> cs = new ExecutorCompletionService<A>(es);
if(PROD){
System.out.println("INPATH : " + INPATH);
System.out.println("OUTPATH : " + OUTPATH);
}
Scanner in = PROD ? new Scanner(new File(INPATH)) : new Scanner(INPUT);
PrintWriter out = PROD ? new PrintWriter(new File(OUTPATH)) : new PrintWriter(System.out);
int n = in.nextInt();
in.nextLine();
preprocess();
for(int i = 0;i < n;i++){
A runner = new A(i+1, in);
runner.read();
cs.submit(runner, runner);
}
es.shutdown();
String[] outs = new String[n];
for(int i = 0;i < n;i++){
A runner = cs.take().get(); // not ordered
runner.out.flush();
runner.out.close();
outs[runner.cas-1] = runner.sw.toString();
}
for(int i = 0;i < n;i++){
out.printf("Case #%d: ", i+1);
out.append(outs[i]);
out.flush();
}
long end = System.nanoTime();
System.out.println((end - start)/1000000 + "ms");
if(PROD){
System.out.println("INPATH : " + INPATH);
System.out.println("OUTPATH : " + OUTPATH);
}
}
}
|
package com.wordpress.kkaravitis.modules.books.catalog.service;
import book.model.Book;
import book.service.BookLocalServiceUtil;
import com.liferay.portal.kernel.exception.PortalException;
import com.wordpress.kkaravitis.modules.books.catalog.exception.ApplicationException;
import com.wordpress.kkaravitis.modules.books.catalog.model.BookDTO;
import org.springframework.beans.BeanUtils;
import org.springframework.stereotype.Service;
import java.util.List;
import java.util.stream.Collectors;
/**
* @author Konstantinos Karavitis
**/
@Service("bookService")
public class BookServiceImpl implements BookService {
@Override
public List<BookDTO> getBooks() {
return BookLocalServiceUtil.getBooks(0, BookLocalServiceUtil.getBooksCount()).stream().map(book -> {
BookDTO dto = new BookDTO();
dto.setAuthor(book.getAuthor());
dto.setTitle(book.getTitle());
dto.setIsbn(book.getIsbn());
return dto;
}).collect(Collectors.toList());
}
@Override
public void saveOrUpdateBook(BookDTO book) {
try {
update(book);
} catch (PortalException e) {
save(book);
}
}
@Override
public void deleteBook(String isbn) throws ApplicationException {
try {
BookLocalServiceUtil.deleteBook(isbn);
} catch(PortalException e) {
throw new ApplicationException(e);
}
}
private void save(BookDTO book) {
Book entity = BookLocalServiceUtil.createBook(book.getIsbn());
BeanUtils.copyProperties(book, entity);
BookLocalServiceUtil.updateBook(entity);
}
private void update(BookDTO book) throws PortalException {
Book entity = BookLocalServiceUtil.getBook(book.getIsbn());
BeanUtils.copyProperties(book, entity);
BookLocalServiceUtil.updateBook(entity);
}
}
|
/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.awssdk.services.s3.internal.crt;
import static software.amazon.awssdk.services.s3.internal.crt.S3InternalSdkHttpExecutionAttribute.OPERATION_NAME;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import software.amazon.awssdk.annotations.SdkInternalApi;
import software.amazon.awssdk.annotations.SdkTestInternalApi;
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
import software.amazon.awssdk.crt.http.HttpHeader;
import software.amazon.awssdk.crt.http.HttpRequest;
import software.amazon.awssdk.crt.s3.S3Client;
import software.amazon.awssdk.crt.s3.S3ClientOptions;
import software.amazon.awssdk.crt.s3.S3MetaRequest;
import software.amazon.awssdk.crt.s3.S3MetaRequestOptions;
import software.amazon.awssdk.http.Header;
import software.amazon.awssdk.http.SdkHttpRequest;
import software.amazon.awssdk.http.async.AsyncExecuteRequest;
import software.amazon.awssdk.http.async.SdkAsyncHttpClient;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.utils.AttributeMap;
import software.amazon.awssdk.utils.Logger;
/**
* An implementation of {@link SdkAsyncHttpClient} that uses an CRT S3 HTTP client {@link S3Client} to communicate with S3.
* Note that it does not work with other services
*/
@SdkInternalApi
public final class S3CrtAsyncHttpClient implements SdkAsyncHttpClient {
private static final Logger log = Logger.loggerFor(S3CrtAsyncHttpClient.class);
private final S3Client crtS3Client;
private final S3NativeClientConfiguration s3NativeClientConfiguration;
private S3CrtAsyncHttpClient(Builder builder) {
s3NativeClientConfiguration =
S3NativeClientConfiguration.builder()
.targetThroughputInGbps(builder.targetThroughputInGbps)
.partSizeInBytes(builder.minimalPartSizeInBytes)
.maxConcurrency(builder.maxConcurrency)
.signingRegion(builder.region == null ? null : builder.region.id())
.endpointOverride(builder.endpointOverride)
.credentialsProvider(builder.credentialsProvider)
.build();
S3ClientOptions s3ClientOptions =
new S3ClientOptions().withRegion(s3NativeClientConfiguration.signingRegion())
.withEndpoint(s3NativeClientConfiguration.endpointOverride() == null ? null :
s3NativeClientConfiguration.endpointOverride().toString())
.withCredentialsProvider(s3NativeClientConfiguration.credentialsProvider())
.withClientBootstrap(s3NativeClientConfiguration.clientBootstrap())
.withPartSize(s3NativeClientConfiguration.partSizeBytes())
.withComputeContentMd5(true)
.withThroughputTargetGbps(s3NativeClientConfiguration.targetThroughputInGbps());
this.crtS3Client = new S3Client(s3ClientOptions);
}
@SdkTestInternalApi
S3CrtAsyncHttpClient(S3Client crtS3Client, S3NativeClientConfiguration nativeClientConfiguration) {
this.crtS3Client = crtS3Client;
this.s3NativeClientConfiguration = nativeClientConfiguration;
}
@Override
public CompletableFuture<Void> execute(AsyncExecuteRequest asyncRequest) {
CompletableFuture<Void> executeFuture = new CompletableFuture<>();
URI uri = asyncRequest.request().getUri();
HttpRequest httpRequest = toCrtRequest(uri, asyncRequest);
S3CrtResponseHandlerAdapter responseHandler =
new S3CrtResponseHandlerAdapter(executeFuture, asyncRequest.responseHandler());
S3MetaRequestOptions.MetaRequestType requestType = requestType(asyncRequest);
S3MetaRequestOptions requestOptions = new S3MetaRequestOptions()
.withHttpRequest(httpRequest)
.withMetaRequestType(requestType)
.withResponseHandler(responseHandler)
.withEndpoint(s3NativeClientConfiguration.endpointOverride());
try (S3MetaRequest s3MetaRequest = crtS3Client.makeMetaRequest(requestOptions)) {
closeResourcesWhenComplete(executeFuture, s3MetaRequest, responseHandler);
}
return executeFuture;
}
@Override
public String clientName() {
return "s3crt";
}
private static S3MetaRequestOptions.MetaRequestType requestType(AsyncExecuteRequest asyncRequest) {
String operationName = asyncRequest.httpExecutionAttributes().getAttribute(OPERATION_NAME);
if (operationName != null) {
switch (operationName) {
case "GetObject":
return S3MetaRequestOptions.MetaRequestType.GET_OBJECT;
case "PutObject":
return S3MetaRequestOptions.MetaRequestType.PUT_OBJECT;
case "CopyObject":
return S3MetaRequestOptions.MetaRequestType.COPY_OBJECT;
default:
return S3MetaRequestOptions.MetaRequestType.DEFAULT;
}
}
return S3MetaRequestOptions.MetaRequestType.DEFAULT;
}
private static void closeResourcesWhenComplete(CompletableFuture<Void> executeFuture,
S3MetaRequest s3MetaRequest,
S3CrtResponseHandlerAdapter responseHandler) {
executeFuture.whenComplete((r, t) -> {
if (executeFuture.isCancelled()) {
log.debug(() -> "The request is cancelled, cancelling meta request");
responseHandler.cancelRequest();
s3MetaRequest.cancel();
}
s3MetaRequest.close();
});
}
private static HttpRequest toCrtRequest(URI uri, AsyncExecuteRequest asyncRequest) {
SdkHttpRequest sdkRequest = asyncRequest.request();
String method = sdkRequest.method().name();
String encodedPath = sdkRequest.encodedPath();
if (encodedPath == null || encodedPath.isEmpty()) {
encodedPath = "/";
}
String encodedQueryString = sdkRequest.encodedQueryParameters()
.map(value -> "?" + value)
.orElse("");
HttpHeader[] crtHeaderArray = createHttpHeaderList(uri, asyncRequest).toArray(new HttpHeader[0]);
S3CrtRequestBodyStreamAdapter sdkToCrtRequestPublisher =
new S3CrtRequestBodyStreamAdapter(asyncRequest.requestContentPublisher());
return new HttpRequest(method, encodedPath + encodedQueryString, crtHeaderArray, sdkToCrtRequestPublisher);
}
@Override
public void close() {
s3NativeClientConfiguration.close();
crtS3Client.close();
}
public static Builder builder() {
return new Builder();
}
public static final class Builder implements SdkAsyncHttpClient.Builder<S3CrtAsyncHttpClient.Builder> {
private AwsCredentialsProvider credentialsProvider;
private Region region;
private Long minimalPartSizeInBytes;
private Double targetThroughputInGbps;
private Integer maxConcurrency;
private URI endpointOverride;
/**
* Configure the credentials that should be used to authenticate with S3.
*/
public Builder credentialsProvider(AwsCredentialsProvider credentialsProvider) {
this.credentialsProvider = credentialsProvider;
return this;
}
/**
* Configure the region with which the SDK should communicate.
*/
public Builder region(Region region) {
this.region = region;
return this;
}
/**
* Sets the minimum part size for transfer parts. Decreasing the minimum part size causes
* multipart transfer to be split into a larger number of smaller parts. Setting this value too low
* has a negative effect on transfer speeds, causing extra latency and network communication for each part.
*/
public Builder minimumPartSizeInBytes(Long partSizeBytes) {
this.minimalPartSizeInBytes = partSizeBytes;
return this;
}
/**
* The target throughput for transfer requests. Higher value means more S3 connections
* will be opened. Whether the transfer manager can achieve the configured target throughput depends
* on various factors such as the network bandwidth of the environment and the configured {@link #maxConcurrency}.
*/
public Builder targetThroughputInGbps(Double targetThroughputInGbps) {
this.targetThroughputInGbps = targetThroughputInGbps;
return this;
}
/**
* Specifies the maximum number of S3 connections that should be established during
* a transfer.
*
* <p>
* If not provided, the TransferManager will calculate the optional number of connections
* based on {@link #targetThroughputInGbps}. If the value is too low, the S3TransferManager
* might not achieve the specified target throughput.
*
* @param maxConcurrency the max number of concurrent requests
* @return this builder for method chaining.
* @see #targetThroughputInGbps(Double)
*/
public Builder maxConcurrency(Integer maxConcurrency) {
this.maxConcurrency = maxConcurrency;
return this;
}
/**
* Configure the endpoint override with which the SDK should communicate.
*/
public Builder endpointOverride(URI endpointOverride) {
this.endpointOverride = endpointOverride;
return this;
}
@Override
public SdkAsyncHttpClient build() {
return new S3CrtAsyncHttpClient(this);
}
@Override
public SdkAsyncHttpClient buildWithDefaults(AttributeMap serviceDefaults) {
// Intentionally ignore serviceDefaults
return build();
}
}
private static List<HttpHeader> createHttpHeaderList(URI uri, AsyncExecuteRequest asyncRequest) {
SdkHttpRequest sdkRequest = asyncRequest.request();
List<HttpHeader> crtHeaderList = new ArrayList<>();
// Set Host Header if needed
if (!sdkRequest.firstMatchingHeader(Header.HOST).isPresent()) {
crtHeaderList.add(new HttpHeader(Header.HOST, uri.getHost()));
}
// Set Content-Length if needed
Optional<Long> contentLength = asyncRequest.requestContentPublisher().contentLength();
if (!sdkRequest.firstMatchingHeader(Header.CONTENT_LENGTH).isPresent() && contentLength.isPresent()) {
crtHeaderList.add(new HttpHeader(Header.CONTENT_LENGTH, Long.toString(contentLength.get())));
}
// Add the rest of the Headers
sdkRequest.forEachHeader((key, value) -> value.stream().map(val -> new HttpHeader(key, val))
.forEach(crtHeaderList::add));
return crtHeaderList;
}
}
|
package com.riiablo.map;
import com.badlogic.gdx.assets.AssetDescriptor;
import com.badlogic.gdx.assets.AssetLoaderParameters;
import com.badlogic.gdx.assets.AssetManager;
import com.badlogic.gdx.assets.loaders.AsynchronousAssetLoader;
import com.badlogic.gdx.assets.loaders.FileHandleResolver;
import com.badlogic.gdx.files.FileHandle;
import com.badlogic.gdx.utils.Array;
public class DT1Loader extends AsynchronousAssetLoader<DT1, DT1Loader.DT1LoaderParameters> {
DT1 dt1;
public DT1Loader(FileHandleResolver resolver) {
super(resolver);
}
@Override
public void loadAsync(AssetManager assets, String fileName, FileHandle file, DT1LoaderParameters params) {
dt1 = DT1.loadFromStream(fileName, file.read());
}
@Override
public DT1 loadSync(AssetManager assets, String fileName, FileHandle file, DT1LoaderParameters params) {
DT1 dt1 = this.dt1;
if (dt1 == null) {
dt1 = DT1.loadFromStream(fileName, file.read());
} else {
this.dt1 = null;
}
dt1.prepareTextures();
// if (params != null) params.dt1s.add(dt1); // See below note
return dt1;
}
@Override
public Array<AssetDescriptor> getDependencies(String fileName, FileHandle file, DT1LoaderParameters params) {
return null;
}
public static class DT1LoaderParameters extends AssetLoaderParameters<DT1> {
// This was never implemented -- should be handled by map -- kept in case it is eventually needed
// public DT1s dt1s;
//
// public static DT1LoaderParameters newInstance(DT1s dt1s) {
// DT1LoaderParameters params = new DT1LoaderParameters();
// params.dt1s = dt1s;
// return params;
// }
}
}
|
package mybatis.entity;
/**
* Created by rod bate on 2016/1/21.
*/
public class Blog {
private Integer id;
private String titleS;
private String content;
private Author author;
public Integer getId() {
return id;
}
public void setId(Integer id) {
this.id = id;
}
public String getTitleS() {
return titleS;
}
public void setTitleS(String titleS) {
this.titleS = titleS;
}
public String getContent() {
return content;
}
public void setContent(String content) {
this.content = content;
}
public Author getAuthor() {
return author;
}
public void setAuthor(Author author) {
this.author = author;
}
}
|
package creationalpatterns.factorymethod.creditcard;
// ˅
import creationalpatterns.factorymethod.framework.Product;
// ˄
public class CreditCard implements Product {
// ˅
// ˄
private final String owner;
public CreditCard(String owner) {
// ˅
this.owner = owner;
System.out.println("Make " + owner + "'s card.");
// ˄
}
@Override
public void use() {
// ˅
System.out.println("Use " + owner + "'s card.");
// ˄
}
// ˅
// ˄
}
// ˅
// ˄
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* {@link org.apache.solr.handler.component.SearchComponent} implementations for
* use in {@link org.apache.solr.handler.component.SearchHandler}
*/
package org.apache.solr.handler.component;
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package com.ostrichemulators.jfxhacc.model.vocabulary;
import org.openrdf.model.URI;
import org.openrdf.model.impl.URIImpl;
import org.openrdf.model.vocabulary.DCTERMS;
/**
*
* @author ryan
*/
public class Transactions {
private Transactions() {
}
public static final String PREFIX = "trans";
public static final String BASE = JfxHacc.BASE + "/transaction";
public static final String NAMESPACE = BASE + "/";
public static final URI TYPE = new URIImpl( BASE );
public static final URI DATE_PRED = DCTERMS.CREATED;
public static final URI PAYEE_PRED = new URIImpl( NAMESPACE + "payee" );
public static final URI SPLIT_PRED = new URIImpl( NAMESPACE + "entry" );
public static final URI NUMBER_PRED = new URIImpl( NAMESPACE + "number" );
public static final URI JOURNAL_PRED = new URIImpl( NAMESPACE + "journal" );
}
|
package org.jgroups.blocks.cs;
import org.jgroups.Address;
import java.io.Closeable;
import java.nio.ByteBuffer;
/**
* Represents a connection to a peer
*/
public abstract class Connection implements Closeable {
public static final byte[] cookie= { 'b', 'e', 'l', 'a' };
protected Address peer_addr; // address of the 'other end' of the connection
protected long last_access; // timestamp of the last access to this connection (read or write)
abstract public boolean isOpen();
abstract public boolean isConnected();
abstract public boolean isConnectionPending();
abstract public Address localAddress();
abstract public Address peerAddress();
abstract public boolean isExpired(long millis);
abstract public void flush(); // sends pending data
abstract public void connect(Address dest) throws Exception;
abstract public void start() throws Exception;
abstract public void send(byte[] buf, int offset, int length) throws Exception;
abstract public void send(ByteBuffer buf) throws Exception;
abstract public String status();
}
|
// --------------------------------------------------------------------------------
// Copyright 2002-2021 Echo Three, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// --------------------------------------------------------------------------------
package com.echothree.control.user.selector.common.spec;
import com.echothree.control.user.core.common.spec.UniversalEntitySpec;
public interface SelectorKindUniversalSpec
extends SelectorKindSpec, UniversalEntitySpec {
// Nothing additional beyond SelectorKindSpec, UniversalEntitySpec
}
|
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: google/cloud/datalabeling/v1beta1/data_labeling_service.proto
package com.google.cloud.datalabeling.v1beta1;
public interface GetAnnotatedDatasetRequestOrBuilder extends
// @@protoc_insertion_point(interface_extends:google.cloud.datalabeling.v1beta1.GetAnnotatedDatasetRequest)
com.google.protobuf.MessageOrBuilder {
/**
* <pre>
* Required. Name of the annotated dataset to get, format:
* projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
* {annotated_dataset_id}
* </pre>
*
* <code>string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }</code>
* @return The name.
*/
java.lang.String getName();
/**
* <pre>
* Required. Name of the annotated dataset to get, format:
* projects/{project_id}/datasets/{dataset_id}/annotatedDatasets/
* {annotated_dataset_id}
* </pre>
*
* <code>string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... }</code>
* @return The bytes for name.
*/
com.google.protobuf.ByteString
getNameBytes();
}
|
/**
* Copyright (C) 2015 The Gravitee team (http://gravitee.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gravitee.rest.api.service;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ser.PropertyFilter;
import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
import io.gravitee.definition.jackson.datatype.GraviteeMapper;
import io.gravitee.rest.api.model.api.ApiEntity;
import io.gravitee.rest.api.model.permissions.SystemRole;
import io.gravitee.rest.api.service.exceptions.ApiNotFoundException;
import io.gravitee.rest.api.service.exceptions.TechnicalManagementException;
import io.gravitee.rest.api.service.impl.ApiServiceImpl;
import io.gravitee.rest.api.service.jackson.filter.ApiPermissionFilter;
import io.gravitee.repository.exceptions.TechnicalException;
import io.gravitee.repository.management.api.ApiRepository;
import io.gravitee.repository.management.api.MembershipRepository;
import io.gravitee.repository.management.model.Api;
import io.gravitee.repository.management.model.Membership;
import io.gravitee.repository.management.model.MembershipReferenceType;
import io.gravitee.repository.management.model.RoleScope;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Spy;
import org.mockito.junit.MockitoJUnitRunner;
import java.util.Collections;
import java.util.Optional;
import static org.junit.Assert.assertNotNull;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* @author Azize Elamrani (azize dot elamrani at gmail dot com)
*/
@RunWith(MockitoJUnitRunner.class)
public class ApiService_FindByIdTest {
private static final String API_ID = "id-api";
private static final String USER_NAME = "myUser";
@InjectMocks
private ApiServiceImpl apiService = new ApiServiceImpl();
@Mock
private ApiRepository apiRepository;
@Mock
private MembershipRepository membershipRepository;
@Spy
private ObjectMapper objectMapper = new GraviteeMapper();
@Mock
private Api api;
@Mock
private UserService userService;
@Mock
private ParameterService parameterService;
@Mock
private EntrypointService entrypointService;
@Before
public void setUp() {
PropertyFilter apiMembershipTypeFilter = new ApiPermissionFilter();
objectMapper.setFilterProvider(new SimpleFilterProvider(Collections.singletonMap("apiMembershipTypeFilter", apiMembershipTypeFilter)));
}
@Test
public void shouldFindById() throws TechnicalException {
when(apiRepository.findById(API_ID)).thenReturn(Optional.of(api));
Membership po = new Membership(USER_NAME, API_ID, MembershipReferenceType.API);
po.setRoles(Collections.singletonMap(RoleScope.API.getId(), SystemRole.PRIMARY_OWNER.name()));
when(membershipRepository.findByReferenceAndRole(any(), any(), any(), any()))
.thenReturn(Collections.singleton(po));
final ApiEntity apiEntity = apiService.findById(API_ID);
assertNotNull(apiEntity);
}
@Test(expected = ApiNotFoundException.class)
public void shouldNotFindByNameBecauseNotExists() throws TechnicalException {
when(apiRepository.findById(API_ID)).thenReturn(Optional.empty());
apiService.findById(API_ID);
}
@Test(expected = TechnicalManagementException.class)
public void shouldNotFindByNameBecauseTechnicalException() throws TechnicalException {
when(apiRepository.findById(API_ID)).thenThrow(TechnicalException.class);
apiService.findById(API_ID);
}
}
|
/*
* Copyright (c) 2015, Ali Afroozeh and Anastasia Izmaylova, Centrum Wiskunde & Informatica (CWI)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
package iguana.regex;
import java.util.*;
public class Star extends AbstractRegularExpression {
private static final long serialVersionUID = 1L;
private final RegularExpression regex;
private final List<RegularExpression> separators;
public static Star from(RegularExpression s) {
return builder(s).build();
}
private Star(Builder builder) {
super(builder);
this.regex = builder.regex;
this.separators = Collections.unmodifiableList(builder.separators);
}
@Override
public int length() {
return regex.length();
}
@Override
public <T> T accept(RegularExpressionVisitor<T> visitor) {
return visitor.visit(this);
}
@Override
public boolean isNullable() {
return true;
}
@Override
public Set<CharRange> getFirstSet() {
return regex.getFirstSet();
}
@Override
public Set<CharRange> getNotFollowSet() {
return regex.getFirstSet();
}
public List<RegularExpression> getSeparators() {
return separators;
}
@Override
public Builder copyBuilder() {
return new Builder(this);
}
public RegularExpression getSymbol() {
return regex;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!(obj instanceof Star))
return false;
Star other = (Star) obj;
return regex.equals(other.regex) && separators.equals(other.separators);
}
@Override
public int hashCode() {
return regex.hashCode();
}
@Override
public String toString() {
return regex.toString() + "*";
}
public static Builder builder(RegularExpression s) {
return new Builder(s);
}
public static class Builder extends RegexBuilder<Star> {
private final RegularExpression regex;
private final List<RegularExpression> separators = new ArrayList<>();
private Builder() {
regex = null;
}
public Builder(RegularExpression regex) {
this.regex = regex;
}
public Builder(Star star) {
super(star);
this.regex = star.regex;
this.addSeparators(star.getSeparators());
}
public Builder addSeparator(RegularExpression symbol) {
separators.add(symbol);
return this;
}
public Builder addSeparators(List<RegularExpression> symbols) {
separators.addAll(symbols);
return this;
}
public Builder addSeparators(RegularExpression...symbols) {
separators.addAll(Arrays.asList(symbols));
return this;
}
@Override
public Star build() {
return new Star(this);
}
}
}
|
/*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.fenzo;
import com.netflix.fenzo.functions.Action0;
import com.netflix.fenzo.functions.Action1;
import com.netflix.fenzo.plugins.BinPackingFitnessCalculators;
import com.netflix.fenzo.queues.*;
import com.netflix.fenzo.queues.tiered.QueuableTaskProvider;
import org.junit.Assert;
import org.junit.Test;
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
public class TaskSchedulingServiceTest {
private final QAttributes tier1bktA = new QAttributes.QAttributesAdaptor(0, "A");
private final QAttributes tier1bktB = new QAttributes.QAttributesAdaptor(0, "B");
private final QAttributes tier1bktC = new QAttributes.QAttributesAdaptor(0, "C");
private final QAttributes tier1bktD1 = new QAttributes.QAttributesAdaptor(1, "D1");
private TaskSchedulingService getSchedulingService(TaskQueue queue, TaskScheduler scheduler, long loopMillis,
Action1<SchedulingResult> resultCallback) {
return getSchedulingService(queue, scheduler, loopMillis, loopMillis, resultCallback);
}
private TaskSchedulingService getSchedulingService(TaskQueue queue, TaskScheduler scheduler, long loopMillis,
long maxDelayMillis, Action1<SchedulingResult> resultCallback) {
return new TaskSchedulingService.Builder()
.withTaskQueue(queue)
.withLoopIntervalMillis(loopMillis)
.withMaxDelayMillis(maxDelayMillis)
.withPreSchedulingLoopHook(new Action0() {
@Override
public void call() {
//System.out.println("Pre-scheduling hook");
}
})
.withSchedulingResultCallback(resultCallback)
.withTaskScheduler(scheduler)
.build();
}
public TaskScheduler getScheduler() {
return new TaskScheduler.Builder()
.withLeaseOfferExpirySecs(1000000)
.withLeaseRejectAction(new Action1<VirtualMachineLease>() {
@Override
public void call(VirtualMachineLease virtualMachineLease) {
System.out.println("Rejecting offer on host " + virtualMachineLease.hostname());
}
})
.withFitnessCalculator(BinPackingFitnessCalculators.cpuMemBinPacker)
.build();
}
@Test
public void testOneTaskAssignment() throws Exception {
testOneTaskInternal(
QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(2, 2000, 1)),
() -> {}
);
}
private void testOneTaskInternal(QueuableTask queuableTask, Action0 action) throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
TaskQueue queue = TaskQueues.createTieredQueue(2);
final TaskScheduler scheduler = getScheduler();
Action1<SchedulingResult> resultCallback = new Action1<SchedulingResult>() {
@Override
public void call(SchedulingResult schedulingResult) {
//System.out.println("Got scheduling result with " + schedulingResult.getResultMap().size() + " results");
if (schedulingResult.getResultMap().size() > 0) {
//System.out.println("Assignment on host " + schedulingResult.getResultMap().values().iterator().next().getHostname());
latch.countDown();
scheduler.shutdown();
}
}
};
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, 1000L, resultCallback);
schedulingService.start();
List<VirtualMachineLease.Range> ports = new ArrayList<>();
ports.add(new VirtualMachineLease.Range(1, 10));
schedulingService.addLeases(
Collections.singletonList(LeaseProvider.getLeaseOffer(
"hostA", 4, 4000, ports,
ResourceSetsTests.getResSetsAttributesMap("ENIs", 2, 2))));
queue.queueTask(queuableTask);
if (!latch.await(20000, TimeUnit.MILLISECONDS))
Assert.fail("Did not assign resources in time");
if (action != null)
action.call();
}
@Test
public void testOneTaskWithResourceSet() throws Exception {
TaskRequest.NamedResourceSetRequest sr1 =
new TaskRequest.NamedResourceSetRequest("ENIs", "sg1", 1, 1);
final QueuableTask task = QueuableTaskProvider.wrapTask(
tier1bktA,
TaskRequestProvider.getTaskRequest("grp", 1, 100, 0, 0, 0,
null, null, Collections.singletonMap(sr1.getResName(), sr1))
);
testOneTaskInternal(
task,
() -> {
final TaskRequest.AssignedResources assignedResources = task.getAssignedResources();
Assert.assertNotNull(assignedResources);
final List<PreferentialNamedConsumableResourceSet.ConsumeResult> cnrs = assignedResources.getConsumedNamedResources();
Assert.assertNotNull(cnrs);
Assert.assertEquals(1, cnrs.size());
Assert.assertEquals(sr1.getResValue(), cnrs.get(0).getResName());
}
);
}
@Test
public void testMultipleTaskAssignments() throws Exception {
int numTasks = 4;
long loopMillis=100;
TaskQueue queue = TaskQueues.createTieredQueue(2);
final CountDownLatch latch = new CountDownLatch(numTasks);
final TaskScheduler scheduler = getScheduler();
final AtomicReference<TaskSchedulingService> ref = new AtomicReference<>();
Action1<SchedulingResult> resultCallback = new Action1<SchedulingResult>() {
@Override
public void call(SchedulingResult schedulingResult) {
//System.out.println("Got scheduling result with " + schedulingResult.getResultMap().size() + " results");
if (!schedulingResult.getExceptions().isEmpty()) {
Assert.fail(schedulingResult.getExceptions().get(0).getMessage());
}
else if (schedulingResult.getResultMap().size() > 0) {
final VMAssignmentResult vmAssignmentResult = schedulingResult.getResultMap().values().iterator().next();
// System.out.println("Assignment on host " + vmAssignmentResult.getHostname() +
// " with " + vmAssignmentResult.getTasksAssigned().size() + " tasks"
// );
for (TaskAssignmentResult r: vmAssignmentResult.getTasksAssigned()) {
latch.countDown();
}
ref.get().addLeases(
Collections.singletonList(LeaseProvider.getConsumedLease(vmAssignmentResult))
);
}
else {
final Map<TaskRequest, List<TaskAssignmentResult>> failures = schedulingResult.getFailures();
if (!failures.isEmpty()) {
Assert.fail(failures.values().iterator().next().iterator().next().toString());
}
}
}
};
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, loopMillis, resultCallback);
ref.set(schedulingService);
schedulingService.start();
schedulingService.addLeases(LeaseProvider.getLeases(1, 4, 4000, 1, 10));
for (int i=0; i<numTasks; i++) {
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 1000, 1)));
Thread.sleep(loopMillis); // simulate that tasks are added across different scheduling iterations
}
if (!latch.await(loopMillis * (numTasks + 2), TimeUnit.MILLISECONDS))
Assert.fail(latch.getCount() + " of " + numTasks + " not scheduled within time");
}
// Test that tasks are assigned in the order based on current usage among multiple buckets within a tier
@Test
public void testOrderedAssignments() throws Exception {
TaskQueue queue = TaskQueues.createTieredQueue(2);
final TaskScheduler scheduler = getScheduler();
final BlockingQueue<QueuableTask> assignmentResults = new LinkedBlockingQueue<>();
Action1<SchedulingResult> resultCallback = new Action1<SchedulingResult>() {
@Override
public void call(SchedulingResult schedulingResult) {
final Map<String, VMAssignmentResult> resultMap = schedulingResult.getResultMap();
if (!resultMap.isEmpty()) {
for (VMAssignmentResult r: resultMap.values()) {
for (TaskAssignmentResult t: r.getTasksAssigned()) {
assignmentResults.offer((QueuableTask)t.getRequest());
//System.out.println("******* Assignment for task " + t.getTaskId());
}
}
}
// final Map<TaskRequest, List<TaskAssignmentResult>> failures = schedulingResult.getFailures();
// if (!failures.isEmpty()) {
// for (Map.Entry<TaskRequest, List<TaskAssignmentResult>> entry: failures.entrySet()) {
// System.out.println("****** failures for task " + entry.getKey().getId());
// }
// }
}
};
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, 50L, resultCallback);
// First, fill 4 VMs, each with 8 cores, with A using 15 cores, B using 6 cores, and C using 11 cores, with
// memory used in the same ratios
for (int i=0; i<15; i++)
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 10, 1)));
for (int i=0; i<6; i++)
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktB, TaskRequestProvider.getTaskRequest(1, 10, 1)));
for (int i=0; i<11; i++)
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktC, TaskRequestProvider.getTaskRequest(1, 10, 1)));
schedulingService.start();
schedulingService.addLeases(LeaseProvider.getLeases(4, 8, 8000, 1, 1000));
int numTasks = 32;
while (numTasks > 0) {
final QueuableTask task = assignmentResults.poll(2000, TimeUnit.MILLISECONDS);
if (task == null)
Assert.fail("Time out waiting for task to get assigned");
else {
numTasks--;
}
}
// Now submit one task for each of A, B, and C, and create one offer that will only fit one of the tasks. Ensure
// that the only task assigned is from B.
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(4, 40, 1)));
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktB, TaskRequestProvider.getTaskRequest(4, 40, 1)));
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktC, TaskRequestProvider.getTaskRequest(4, 40, 1)));
schedulingService.addLeases(LeaseProvider.getLeases(1, 4, 4000, 1, 100));
QueuableTask task = assignmentResults.poll(1000, TimeUnit.MILLISECONDS);
if (task == null)
Assert.fail("Time out waiting for just one task to get assigned");
Assert.assertEquals(tier1bktB.getBucketName(), task.getQAttributes().getBucketName());
// queueTask another task for B and make sure it gets launched ahead of A and C after adding one more offer
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktB, TaskRequestProvider.getTaskRequest(4, 40, 1)));
schedulingService.addLeases(LeaseProvider.getLeases(1, 4, 4000, 1, 100));
task = assignmentResults.poll(1000, TimeUnit.MILLISECONDS);
if (task == null)
Assert.fail("Time out waiting for just one task to get assigned");
Assert.assertEquals(tier1bktB.getBucketName(), task.getQAttributes().getBucketName());
// now add another offer and ensure task from C gets launched
schedulingService.addLeases(LeaseProvider.getLeases(1, 4, 4000, 1, 100));
task = assignmentResults.poll(1000, TimeUnit.MILLISECONDS);
if (task == null)
Assert.fail("Time out waiting for just one task to get assigned");
Assert.assertEquals(tier1bktC.getBucketName(), task.getQAttributes().getBucketName());
// a final offer and the task from A should get launched
schedulingService.addLeases(LeaseProvider.getLeases(1, 4, 4000, 1, 100));
task = assignmentResults.poll(1000, TimeUnit.MILLISECONDS);
if (task == null)
Assert.fail("Time out waiting for just one task to get assigned");
Assert.assertEquals(tier1bktA.getBucketName(), task.getQAttributes().getBucketName());
}
@Test
public void testMultiTierAllocation() throws Exception {
TaskQueue queue = TaskQueues.createTieredQueue(2);
final TaskScheduler scheduler = getScheduler();
final BlockingQueue<QueuableTask> assignmentResults = new LinkedBlockingQueue<>();
Action1<SchedulingResult> resultCallback = new Action1<SchedulingResult>() {
@Override
public void call(SchedulingResult schedulingResult) {
final Map<String, VMAssignmentResult> resultMap = schedulingResult.getResultMap();
if (!resultMap.isEmpty()) {
for (VMAssignmentResult r: resultMap.values()) {
for (TaskAssignmentResult t: r.getTasksAssigned()) {
assignmentResults.offer((QueuableTask)t.getRequest());
//System.out.println("******* Assignment for task " + t.getTaskId());
}
}
}
// final Map<TaskRequest, List<TaskAssignmentResult>> failures = schedulingResult.getFailures();
// if (!failures.isEmpty()) {
// for (Map.Entry<TaskRequest, List<TaskAssignmentResult>> entry: failures.entrySet()) {
// System.out.println("****** failures for task " + entry.getKey().getId());
// }
// }
}
};
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, 50L, resultCallback);
// fill 4 hosts with tasks from A (tier 0) and tasks from D1 (tier 1)
for (int i=0; i<20; i++)
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 10, 1)));
for (int i=0; i<12; i++)
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktD1, TaskRequestProvider.getTaskRequest(1, 10, 1)));
schedulingService.start();
schedulingService.addLeases(LeaseProvider.getLeases(4, 8, 8000, 1, 1000));
int numTasks = 32;
while (numTasks > 0) {
final QueuableTask task = assignmentResults.poll(2000, TimeUnit.MILLISECONDS);
if (task == null)
Assert.fail("Time out waiting for task to get assigned");
else {
numTasks--;
}
}
// now submit a task from A that will only fill part of next offer, and a few tasks from D1 each of which
// can fill the entire offer. Ensure that only task from A gets launched
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 10, 1)));
for (int i=0; i<3; i++)
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktD1, TaskRequestProvider.getTaskRequest(4, 40, 1)));
schedulingService.addLeases(LeaseProvider.getLeases(1, 4, 4000, 1, 1000));
final QueuableTask task = assignmentResults.poll(1000, TimeUnit.MILLISECONDS);
Assert.assertNotNull("Time out waiting for just one task to get assigned", task);
Assert.assertEquals(tier1bktA.getBucketName(), task.getQAttributes().getBucketName());
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Map<TaskQueue.TaskState, Collection<QueuableTask>>> ref = new AtomicReference<>();
schedulingService.requestAllTasks(new Action1<Map<TaskQueue.TaskState, Collection<QueuableTask>>>() {
@Override
public void call(Map<TaskQueue.TaskState, Collection<QueuableTask>> stateCollectionMap) {
//System.out.println("**************** Got tasks collection");
final Collection<QueuableTask> tasks = stateCollectionMap.get(TaskQueue.TaskState.QUEUED);
//System.out.println("********* size=" + tasks.size());
// if (!tasks.isEmpty())
// System.out.println("******** bucket: " + tasks.iterator().next().getQAttributes().getBucketName());
ref.set(stateCollectionMap);
latch.countDown();
}
});
if (!latch.await(1000, TimeUnit.MILLISECONDS))
Assert.fail("Time out waiting for tasks collection");
final Map<TaskQueue.TaskState, Collection<QueuableTask>> map = ref.get();
Assert.assertNotNull(map);
Assert.assertNotNull(map.get(TaskQueue.TaskState.QUEUED));
Assert.assertEquals(tier1bktD1.getBucketName(), map.get(TaskQueue.TaskState.QUEUED).iterator().next().getQAttributes().getBucketName());
}
// test that dominant resource share works for ordering of buckets - test by having equal resource usage among two
// buckets A and B, then let A use more CPUs and B use more Memory.
@Test
public void testMultiResAllocation() throws Exception {
TaskQueue queue = TaskQueues.createTieredQueue(2);
final TaskScheduler scheduler = getScheduler();
final BlockingQueue<QueuableTask> assignmentResults = new LinkedBlockingQueue<>();
final AtomicReference<TaskSchedulingService> ref = new AtomicReference<>();
Action1<SchedulingResult> resultCallback = new Action1<SchedulingResult>() {
@Override
public void call(SchedulingResult schedulingResult) {
final Map<String, VMAssignmentResult> resultMap = schedulingResult.getResultMap();
if (!resultMap.isEmpty()) {
for (VMAssignmentResult r: resultMap.values()) {
for (TaskAssignmentResult t: r.getTasksAssigned()) {
assignmentResults.offer((QueuableTask)t.getRequest());
}
ref.get().addLeases(Collections.singletonList(LeaseProvider.getConsumedLease(r)));
}
}
// final Map<TaskRequest, List<TaskAssignmentResult>> failures = schedulingResult.getFailures();
// if (!failures.isEmpty()) {
// for (Map.Entry<TaskRequest, List<TaskAssignmentResult>> entry: failures.entrySet()) {
// System.out.println("****** failures for task " + entry.getKey().getId());
// }
// }
}
};
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, 50L, resultCallback);
ref.set(schedulingService);
// let us use VMs having 8 cores and 8000 MB of memory each and.
// create 2 VMs and fill their usage with A filling 2 CPUs at a time with little memory and B filling 2000 MB
// at a time with very little CPUs.
for (int i=0; i<4; i++)
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(3, 1000, 1)));
for (int i=0; i<4; i++)
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktB, TaskRequestProvider.getTaskRequest(1, 3000, 1)));
schedulingService.start();
List<VirtualMachineLease> leases = LeaseProvider.getLeases(2, 8, 8000, 1, 100);
schedulingService.addLeases(leases);
if (!unqueueTaskResults(8, assignmentResults))
Assert.fail("Timeout waiting for 16 tasks");
// now A is using 12 of the 16 total CPUs in use, and B is using 12,000 of 16,000 total MB, so their
// dominant resource usage share is equivalent.
// now submit a task from A to use 1 CPU and 10 MB memory and another task from B to use 1 CPU and 4000 MB memory
// ensure that they are assigned on a new VM
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 10, 1)));
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktB, TaskRequestProvider.getTaskRequest(1, 7000, 1)));
leases = LeaseProvider.getLeases(2, 1, 8, 8000, 1, 100);
schedulingService.addLeases(leases);
if (!unqueueTaskResults(2, assignmentResults))
Assert.fail("Timeout waiting for 2 task assignments");
// now submit a task from just A with 1 CPU, 10 memory, it should get assigned right away
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 10, 1)));
if (!unqueueTaskResults(1, assignmentResults))
Assert.fail("Timeout waiting for 1 task assignment");
// we now have 3 CPUs and 7020 MB memory being used out of 8 and 8000 respectively
// now submit 5 tasks from A with 1 CPU, 1 memory each to possibly fill the host as well as a task from B with 1 CPU, 1000 memory.
// ensure that only the tasks from A get assigned and that the task from B stays queued
for (int i=0; i<5; i++)
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 1, 1)));
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktB, TaskRequestProvider.getTaskRequest(1, 1000, 1)));
int numTasks = 2;
while (numTasks > 0) {
final QueuableTask task = assignmentResults.poll(2000, TimeUnit.MILLISECONDS);
if (task == null)
Assert.fail("Timeout waiting for task assignment");
Assert.assertEquals(tier1bktA.getBucketName(), task.getQAttributes().getBucketName());
numTasks--;
}
final AtomicReference<String> bucketRef = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
schedulingService.requestAllTasks(new Action1<Map<TaskQueue.TaskState, Collection<QueuableTask>>>() {
@Override
public void call(Map<TaskQueue.TaskState, Collection<QueuableTask>> stateCollectionMap) {
final Collection<QueuableTask> tasks = stateCollectionMap.get(TaskQueue.TaskState.QUEUED);
if (tasks != null && !tasks.isEmpty()) {
for (QueuableTask t : tasks)
bucketRef.set(t.getQAttributes().getBucketName());
latch.countDown();
}
}
});
if (!latch.await(2000, TimeUnit.MILLISECONDS))
Assert.fail("Can't get confirmation on task from B to be queued");
Assert.assertNotNull(bucketRef.get());
Assert.assertEquals(tier1bktB.getBucketName(), bucketRef.get());
}
@Test
public void testRemoveFromQueue() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
TaskQueue queue = TaskQueues.createTieredQueue(2);
final TaskScheduler scheduler = getScheduler();
Action1<SchedulingResult> resultCallback = new Action1<SchedulingResult>() {
@Override
public void call(SchedulingResult schedulingResult) {
//System.out.println("Got scheduling result with " + schedulingResult.getResultMap().size() + " results");
if (schedulingResult.getResultMap().size() > 0) {
//System.out.println("Assignment on host " + schedulingResult.getResultMap().values().iterator().next().getHostname());
latch.countDown();
}
}
};
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, 100L, 200L, resultCallback);
schedulingService.start();
final List<VirtualMachineLease> leases = LeaseProvider.getLeases(1, 4, 4000, 1, 10);
schedulingService.addLeases(leases);
final QueuableTask task = QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(2, 2000, 1));
queue.queueTask(task);
if (!latch.await(5, TimeUnit.SECONDS))
Assert.fail("Did not assign resources in time");
final CountDownLatch latch2 = new CountDownLatch(1);
final AtomicBoolean found = new AtomicBoolean();
schedulingService.requestVmCurrentStates(new Action1<List<VirtualMachineCurrentState>>() {
@Override
public void call(List<VirtualMachineCurrentState> states) {
for (VirtualMachineCurrentState s: states) {
for (TaskRequest t: s.getRunningTasks()) {
if (t.getId().equals(task.getId())) {
found.set(true);
latch2.countDown();
}
}
}
}
});
if (!latch2.await(5, TimeUnit.SECONDS)) {
Assert.fail("Didn't get vm states in time");
}
Assert.assertTrue("Did not find task on vm", found.get());
schedulingService.removeTask(task.getId(), task.getQAttributes(), leases.get(0).hostname());
found.set(false);
final CountDownLatch latch3 = new CountDownLatch(1);
schedulingService.requestVmCurrentStates(new Action1<List<VirtualMachineCurrentState>>() {
@Override
public void call(List<VirtualMachineCurrentState> states) {
for (VirtualMachineCurrentState s: states) {
for (TaskRequest t: s.getRunningTasks()) {
if (t.getId().equals(task.getId())) {
found.set(true);
latch3.countDown();
}
}
}
latch3.countDown();
}
});
if (!latch3.await(5, TimeUnit.SECONDS)) {
Assert.fail("Timeout waiting for vm states");
}
Assert.assertFalse("Unexpected to find removed task on vm", found.get());
scheduler.shutdown();
}
@Test
public void testMaxSchedIterDelay() throws Exception {
TaskQueue queue = TaskQueues.createTieredQueue(2);
final TaskScheduler scheduler = getScheduler();
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 100, 1)));
Action1<SchedulingResult> resultCallback = new Action1<SchedulingResult>() {
@Override
public void call(SchedulingResult schedulingResult) {
// no-op
}
};
final long maxDelay = 500L;
final long loopMillis = 50L;
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, loopMillis, maxDelay, resultCallback);
schedulingService.start();
long startAt = System.currentTimeMillis();
Thread.sleep(51L);
final AtomicLong gotTasksAt = new AtomicLong();
CountDownLatch latch = new CountDownLatch(1);
setupTaskGetter(schedulingService, gotTasksAt, latch);
if (!latch.await(maxDelay + 100L, TimeUnit.MILLISECONDS)) {
Assert.fail("Timeout waiting for tasks list");
}
Assert.assertTrue("Got task list too soon", (gotTasksAt.get() - startAt) > maxDelay);
// now test that when queue does change, we get it sooner
startAt = System.currentTimeMillis();
latch = new CountDownLatch(1);
setupTaskGetter(schedulingService, gotTasksAt, latch);
queue.queueTask(QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 100, 1)));
if (!latch.await(maxDelay + 100L, TimeUnit.MILLISECONDS)) {
Assert.fail("Timeout waiting for tasks list");
}
Assert.assertTrue("Got task list too late", (gotTasksAt.get() - startAt) < (maxDelay + 2 * loopMillis));
// repeat with adding lease
startAt = System.currentTimeMillis();
latch = new CountDownLatch(1);
setupTaskGetter(schedulingService, gotTasksAt, latch);
schedulingService.addLeases(LeaseProvider.getLeases(1, 1, 100, 1, 10));
if (!latch.await(maxDelay + 100L, TimeUnit.MILLISECONDS)) {
Assert.fail("Timeout waiting for tasks list");
}
Assert.assertTrue("Got tasks list too late", (gotTasksAt.get() - startAt) < (maxDelay + 2 * loopMillis));
}
@Test
public void testInitWithPrevRunningTasks() throws Exception {
TaskQueue queue = TaskQueues.createTieredQueue(2);
final TaskScheduler scheduler = getScheduler();
Action1<SchedulingResult> resultCallback = new Action1<SchedulingResult>() {
@Override
public void call(SchedulingResult schedulingResult) {
// no-op
}
};
final long maxDelay = 500L;
final long loopMillis = 50L;
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, loopMillis, maxDelay, resultCallback);
schedulingService.start();
final String hostname = "hostA";
schedulingService.initializeRunningTask(
QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 100, 1)),
hostname
);
final AtomicReference<String> ref = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
schedulingService.requestVmCurrentStates(
new Action1<List<VirtualMachineCurrentState>>() {
@Override
public void call(List<VirtualMachineCurrentState> states) {
if (states != null && !states.isEmpty()) {
final VirtualMachineCurrentState state = states.iterator().next();
ref.set(state.getHostname());
}
latch.countDown();
}
}
);
if (!latch.await(maxDelay * 2, TimeUnit.MILLISECONDS)) {
Assert.fail("Timeout waiting for vm states");
}
Assert.assertEquals(hostname, ref.get());
}
// Test with a large number of tasks captured from a run that caused problems to tier buckets' sorting. Ensure that
//
@Test
public void testLargeTasksToInitInRunningState() throws Exception {
final List<SampleLargeNumTasksToInit.Task> runningTasks = SampleLargeNumTasksToInit.getSampleTasksInRunningState();
System.out.println("GOT " + runningTasks.size() + " tasks");
TaskQueue queue = TaskQueues.createTieredQueue(2);
final TaskScheduler scheduler = getScheduler();
final CountDownLatch latch = new CountDownLatch(6);
final AtomicReference<List<Exception>> ref = new AtomicReference<>();
final AtomicBoolean printFailures = new AtomicBoolean();
Action1<SchedulingResult> resultCallback = new Action1<SchedulingResult>() {
@Override
public void call(SchedulingResult schedulingResult) {
final List<Exception> exceptions = schedulingResult.getExceptions();
if (exceptions != null && !exceptions.isEmpty())
ref.set(exceptions);
else if (!schedulingResult.getResultMap().isEmpty())
System.out.println("#Assignments: " + schedulingResult.getResultMap().values().iterator().next().getTasksAssigned().size());
else if(printFailures.get()) {
final Map<TaskRequest, List<TaskAssignmentResult>> failures = schedulingResult.getFailures();
if (!failures.isEmpty()) {
for (Map.Entry<TaskRequest, List<TaskAssignmentResult>> entry: failures.entrySet()) {
System.out.println(" Failure for " + entry.getKey().getId() + ":");
for(TaskAssignmentResult r: entry.getValue())
System.out.println(" " + r.toString());
}
}
}
latch.countDown();
}
};
final long maxDelay = 100L;
final long loopMillis = 20L;
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, loopMillis, maxDelay, resultCallback);
Map<String, SampleLargeNumTasksToInit.Task> uniqueTasks = new HashMap<>();
for(SampleLargeNumTasksToInit.Task t: runningTasks) {
if (!uniqueTasks.containsKey(t.getBucket()))
uniqueTasks.put(t.getBucket(), t);
schedulingService.initializeRunningTask(SampleLargeNumTasksToInit.toQueuableTask(t), t.getHost());
}
schedulingService.start();
// add a few new tasks
int id=0;
for(SampleLargeNumTasksToInit.Task t: uniqueTasks.values()) {
queue.queueTask(
SampleLargeNumTasksToInit.toQueuableTask(
new SampleLargeNumTasksToInit.Task("newTask-" + id++, t.getBucket(), t.getTier(), t.getCpu(), t.getMemory(), t.getNetworkMbps(), t.getDisk(), null)
)
);
}
schedulingService.addLeases(LeaseProvider.getLeases(1000, 1, 32, 500000, 2000, 0, 100));
Thread.sleep(loopMillis*2);
printFailures.set(true);
if (!latch.await(1000, TimeUnit.MILLISECONDS)) {
Assert.fail("Unexpected to not get enough sched iterations done");
}
final List<Exception> exceptions = ref.get();
if (exceptions != null) {
for(Exception e: exceptions) {
if (e instanceof TaskQueueMultiException) {
for (Exception ee : ((TaskQueueMultiException) e).getExceptions())
ee.printStackTrace();
}
else
e.printStackTrace();
}
}
Assert.assertNull(exceptions);
}
@Test
public void testNotReadyTask() throws Exception {
TaskQueue queue = TaskQueues.createTieredQueue(2);
final TaskScheduler scheduler = getScheduler();
final AtomicReference<List<Exception>> ref = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<List<String>> assignedTaskIds = new AtomicReference<>(new LinkedList<>());
final AtomicReference<List<String>> failedTaskIds = new AtomicReference<>(new LinkedList<>());
Action1<SchedulingResult> resultCallback = schedulingResult -> {
final List<Exception> exceptions = schedulingResult.getExceptions();
final Map<String, VMAssignmentResult> resultMap = schedulingResult.getResultMap();
if (exceptions != null && !exceptions.isEmpty())
ref.set(exceptions);
else if (!resultMap.isEmpty()) {
resultMap.forEach((key, value) -> value.getTasksAssigned().forEach(t -> assignedTaskIds.get().add(t.getTaskId())));
}
schedulingResult.getFailures().forEach((t, r) -> failedTaskIds.get().add(t.getId()));
latch.countDown();
};
final long maxDelay = 100L;
final long loopMillis = 20L;
final TaskSchedulingService schedulingService = getSchedulingService(queue, scheduler, loopMillis, maxDelay, resultCallback);
final QueuableTask task1 = QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 100, 1));
queue.queueTask(task1);
final QueuableTask task2 = QueuableTaskProvider.wrapTask(tier1bktA, TaskRequestProvider.getTaskRequest(1, 100, 1));
task2.safeSetReadyAt(System.currentTimeMillis() + 1000000L);
queue.queueTask(task2);
schedulingService.addLeases(LeaseProvider.getLeases(2, 4, 8000, 2000, 1, 100));
schedulingService.start();
if (!latch.await(2, TimeUnit.SECONDS)) {
Assert.fail("Unexpected to not get assignments in time");
}
Assert.assertEquals(1, assignedTaskIds.get().size());
Assert.assertEquals(task1.getId(), assignedTaskIds.get().iterator().next());
Assert.assertEquals(0, failedTaskIds.get().size());
schedulingService.shutdown();
}
private void setupTaskGetter(TaskSchedulingService schedulingService, final AtomicLong gotTasksAt, final CountDownLatch latch) throws TaskQueueException {
schedulingService.requestAllTasks(new Action1<Map<TaskQueue.TaskState, Collection<QueuableTask>>>() {
@Override
public void call(Map<TaskQueue.TaskState, Collection<QueuableTask>> stateCollectionMap) {
gotTasksAt.set(System.currentTimeMillis());
latch.countDown();
}
});
}
private boolean unqueueTaskResults(int numTasks, BlockingQueue<QueuableTask> assignmentResults) throws InterruptedException {
while (numTasks > 0) {
final QueuableTask task = assignmentResults.poll(2000, TimeUnit.MILLISECONDS);
if (task == null)
return false;
else
numTasks--;
}
return true;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.