index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/GuiceJobFactory.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.scheduler; import org.quartz.Job; import org.quartz.JobDetail; import org.quartz.SchedulerException; import org.quartz.spi.JobFactory; import org.quartz.spi.TriggerFiredBundle; import com.google.inject.Inject; import com.google.inject.Injector; public class GuiceJobFactory implements JobFactory { public final Injector guice; @Inject public GuiceJobFactory(Injector guice) { this.guice = guice; } @Override public Job newJob(TriggerFiredBundle bundle) throws SchedulerException { JobDetail jobDetail = bundle.getJobDetail(); Class<?> jobClass = jobDetail.getJobClass(); Job job = (Job) guice.getInstance(jobClass); guice.injectMembers(job); return job; } }
5,100
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/BlockingSubmitThreadPoolExecutor.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.scheduler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; /** * {@link ThreadPoolExecutor} that will block in the {@code submit()} method * until the task can be successfully added to the queue. */ public class BlockingSubmitThreadPoolExecutor extends ThreadPoolExecutor { private static final long DEFAULT_SLEEP = 100; private static final long DEFAULT_KEEP_ALIVE = 100; private static final Logger logger = LoggerFactory.getLogger(BlockingSubmitThreadPoolExecutor.class); private BlockingQueue<Runnable> queue; private long giveupTime; private AtomicInteger active; public BlockingSubmitThreadPoolExecutor(int maximumPoolSize, BlockingQueue<Runnable> workQueue, long timeoutAdding) { super(maximumPoolSize, maximumPoolSize, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, workQueue); this.queue = workQueue; this.giveupTime = timeoutAdding; this.active = new AtomicInteger(0); } /** * This is a thread safe way to avoid rejection exception... this is * implemented because we might want to hold the incoming requests till * there is a free thread. */ @Override public <T> Future<T> submit(Callable<T> task) { synchronized (this) { active.incrementAndGet(); long timeout = 0; while (queue.remainingCapacity() == 0) { try { if (timeout <= giveupTime) { Thread.sleep(DEFAULT_SLEEP); timeout += DEFAULT_SLEEP; } else { throw new RuntimeException("Timed out because TPE is too busy..."); } } catch (InterruptedException e) { throw new RuntimeException(e); } } return super.submit(task); } } @Override protected void afterExecute(Runnable r, Throwable t) { super.afterExecute(r, t); active.decrementAndGet(); } /** * blocking call to test if the threads are done or not. */ public void sleepTillEmpty() { long timeout = 0; while (!queue.isEmpty() || (active.get() > 0)) { try { if (timeout <= giveupTime) { Thread.sleep(DEFAULT_SLEEP); timeout += DEFAULT_SLEEP; logger.debug("After Sleeping for empty: {}, Count: {}", +queue.size(), active.get()); } else { throw new RuntimeException("Timed out because TPE is too busy..."); } } catch (InterruptedException e) { throw new RuntimeException(e); } } } }
5,101
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/ExecutionException.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.scheduler; public class ExecutionException extends Exception { private static final long serialVersionUID = 1L; public ExecutionException(String msg, Throwable th) { super(msg, th); } public ExecutionException(String msg) { super(msg); } public ExecutionException(Exception ex) { super(ex); } public ExecutionException(Throwable th) { super(th); } }
5,102
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/NamedThreadPoolExecutor.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.scheduler; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import com.google.common.util.concurrent.ThreadFactoryBuilder; public class NamedThreadPoolExecutor extends ThreadPoolExecutor { public NamedThreadPoolExecutor(int poolSize, String poolName) { this(poolSize, poolName, new LinkedBlockingQueue<Runnable>()); } public NamedThreadPoolExecutor(int poolSize, String poolName, BlockingQueue<Runnable> queue) { super(poolSize, poolSize, 1000, TimeUnit.MILLISECONDS, queue, new ThreadFactoryBuilder().setDaemon(true).setNameFormat(poolName + "-%d").build(), new LocalRejectedExecutionHandler(queue)); } private static class LocalRejectedExecutionHandler implements RejectedExecutionHandler { private final BlockingQueue<Runnable> queue; LocalRejectedExecutionHandler(BlockingQueue<Runnable> queue) { this.queue = queue; } public void rejectedExecution(Runnable task, ThreadPoolExecutor executor) { while (true) { if (executor.isShutdown()) throw new RejectedExecutionException("ThreadPoolExecutor has shut down"); try { if (queue.offer(task, 1000, TimeUnit.MILLISECONDS)) break; } catch (InterruptedException e) { //NOP } } } } }
5,103
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/RaigadScheduler.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.scheduler; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.utils.Sleeper; import org.quartz.JobDetail; import org.quartz.Scheduler; import org.quartz.SchedulerException; import org.quartz.SchedulerFactory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.text.ParseException; /** * Scheduling class to schedule Raigad tasks. Uses Quartz scheduler */ @Singleton public class RaigadScheduler { private static final Logger logger = LoggerFactory.getLogger(RaigadScheduler.class); private final Scheduler scheduler; private final GuiceJobFactory jobFactory; private final Sleeper sleeper; @Inject public RaigadScheduler(SchedulerFactory factory, GuiceJobFactory jobFactory, Sleeper sleeper) { try { this.scheduler = factory.getScheduler(); this.scheduler.setJobFactory(jobFactory); this.jobFactory = jobFactory; } catch (SchedulerException e) { throw new RuntimeException(e); } this.sleeper = sleeper; } /** * Add a task to the scheduler */ public void addTask(String name, Class<? extends Task> taskclass, TaskTimer timer) throws SchedulerException, ParseException { assert timer != null : "Cannot add scheduler task " + name + " as no timer is set"; JobDetail job = new JobDetail(name, Scheduler.DEFAULT_GROUP, taskclass); scheduler.scheduleJob(job, timer.getTrigger()); } /** * Add a delayed task to the scheduler */ public void addTaskWithDelay(final String name, Class<? extends Task> taskclass, final TaskTimer timer, final int delayInSeconds) throws SchedulerException, ParseException { assert timer != null : "Cannot add scheduler task " + name + " as no timer is set"; final JobDetail job = new JobDetail(name, Scheduler.DEFAULT_GROUP, taskclass); new Thread(new Runnable(){ public void run() { try { sleeper.sleepQuietly(delayInSeconds * 1000L); scheduler.scheduleJob(job, timer.getTrigger()); } catch (SchedulerException e) { logger.warn("problem occurred while scheduling a job with name " + name, e); } catch (ParseException e) { logger.warn("problem occurred while parsing a job with name " + name, e); } } }).start(); } public void runTaskNow(Class<? extends Task> taskclass) throws Exception { jobFactory.guice.getInstance(taskclass).execute(null); } public void deleteTask(String name) throws SchedulerException, ParseException { scheduler.deleteJob(name, Scheduler.DEFAULT_GROUP); } public final Scheduler getScheduler() { return scheduler; } public void shutdown() { try { scheduler.shutdown(); } catch (SchedulerException e) { throw new RuntimeException(e); } } public void start() { try { scheduler.start(); } catch (SchedulerException ex) { throw new RuntimeException(ex); } } }
5,104
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/Task.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.scheduler; import com.google.common.base.Throwables; import com.netflix.raigad.configuration.IConfiguration; import org.quartz.Job; import org.quartz.JobExecutionContext; import org.quartz.JobExecutionException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.management.MBeanServer; import javax.management.ObjectName; import java.lang.management.ManagementFactory; import java.util.concurrent.atomic.AtomicInteger; /** * Task class that should be implemented by all cron tasks. Jobconf will contain * any instance specific data * * NOTE: Constructor must not throw any exception. This will cause Quartz to set the job to failure */ public abstract class Task implements Job, TaskMBean { public STATE status = STATE.DONE; public static enum STATE { ERROR, RUNNING, DONE } protected final IConfiguration config; private static final Logger logger = LoggerFactory.getLogger(Task.class); private final AtomicInteger errors = new AtomicInteger(); private final AtomicInteger executions = new AtomicInteger(); protected Task(IConfiguration config) { this(config, ManagementFactory.getPlatformMBeanServer()); } protected Task(IConfiguration config, MBeanServer mBeanServer) { this.config = config; // TODO: don't do mbean registration here String mbeanName = "com.netflix.raigad.scheduler:type=" + this.getClass().getName(); try { mBeanServer.registerMBean(this, new ObjectName(mbeanName)); initialize(); } catch (Exception e) { throw Throwables.propagate(e); } } /** * This method has to be implemented and cannot thow any exception. */ public void initialize() throws ExecutionException { // nothing to intialize } public abstract void execute() throws Exception; /** * Main method to execute a task */ public void execute(JobExecutionContext context) throws JobExecutionException { executions.incrementAndGet(); try { if (status == STATE.RUNNING) return; status = STATE.RUNNING; execute(); } catch (Exception e) { status = STATE.ERROR; logger.error("Couldnt execute the task because of " + e.getMessage(), e); errors.incrementAndGet(); } catch (Throwable e) { status = STATE.ERROR; logger.error("Couldnt execute the task because of " + e.getMessage(), e); errors.incrementAndGet(); } if (status != STATE.ERROR) status = STATE.DONE; } public STATE state() { return status; } public int getErrorCount() { return errors.get(); } public int getExecutionCount() { return executions.get(); } public abstract String getName(); }
5,105
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/TaskMBean.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.scheduler; /** * MBean to monitor Task executions. * */ public interface TaskMBean { public int getErrorCount(); public int getExecutionCount(); public String getName(); }
5,106
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/scheduler/SimpleTimer.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.scheduler; import org.quartz.Scheduler; import org.quartz.SimpleTrigger; import org.quartz.Trigger; import java.text.ParseException; import java.util.Date; /** * SimpleTimer allows jobs to run starting from specified time occurring at * regular frequency's. Frequency of the execution timestamp since epoch. */ public class SimpleTimer implements TaskTimer { private SimpleTrigger trigger; public SimpleTimer(String name, long interval) { this.trigger = new SimpleTrigger(name, SimpleTrigger.REPEAT_INDEFINITELY, interval); } /** * Run once at given time... */ public SimpleTimer(String name, String group, long startTime) { this.trigger = new SimpleTrigger(name, group, new Date(startTime)); } /** * Run immediatly and dont do that again. */ public SimpleTimer(String name) { this.trigger = new SimpleTrigger(name, Scheduler.DEFAULT_GROUP); } public Trigger getTrigger() throws ParseException { trigger.setMisfireInstruction(SimpleTrigger.MISFIRE_INSTRUCTION_FIRE_NOW); return trigger; } }
5,107
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchTransportClientConnectionException.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import java.io.IOException; public class ElasticsearchTransportClientConnectionException extends IOException { private static final long serialVersionUID = 444L; public ElasticsearchTransportClientConnectionException(String message) { super(message); } public ElasticsearchTransportClientConnectionException(String message, Exception e) { super(message, e); } }
5,108
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/TuneElasticsearch.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import java.io.IOException; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; @Singleton public class TuneElasticsearch extends Task { public static final String JOBNAME = "Tune-Elasticsearch"; private final IElasticsearchTuner tuner; @Inject public TuneElasticsearch(IConfiguration config, IElasticsearchTuner tuner) { super(config); this.tuner = tuner; } public void execute() throws IOException { tuner.writeAllProperties(config.getYamlLocation(), null); } @Override public String getName() { return "Tune-Elasticsearch"; } public static TaskTimer getTimer() { return new SimpleTimer(JOBNAME); } }
5,109
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/SystemUtils.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import com.google.common.base.Charsets; import com.google.common.hash.HashCode; import com.google.common.hash.Hashing; import com.google.common.io.Files; import org.apache.commons.codec.binary.Base64; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.DefaultHttpClient; import org.apache.http.params.BasicHttpParams; import org.apache.http.params.HttpConnectionParams; import org.apache.http.params.HttpParams; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.*; import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.util.List; public class SystemUtils { public static final String NOT_FOUND_STR = "NOT_FOUND"; private static final Logger logger = LoggerFactory.getLogger(SystemUtils.class); public static String getDataFromUrl(String url) { HttpURLConnection connection = null; try { connection = (HttpURLConnection) new URL(url).openConnection(); connection.setConnectTimeout(1000); connection.setReadTimeout(1000); connection.setRequestMethod("GET"); if (connection.getResponseCode() == 404) { return NOT_FOUND_STR; } if (connection.getResponseCode() != 200) { throw new RuntimeException("Unable to get data from " + url); } byte[] byteArray = new byte[2048]; ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); DataInputStream dataInputStream = new DataInputStream((FilterInputStream) connection.getContent()); int character; while ((character = dataInputStream.read(byteArray, 0, byteArray.length)) != -1) { byteArrayOutputStream.write(byteArray, 0, character); } String requestResult = new String(byteArrayOutputStream.toByteArray(), Charsets.UTF_8); logger.info("Calling URL API: {}, response: {}", url, requestResult); return requestResult; } catch (Exception ex) { throw new RuntimeException(ex); } finally { if (connection != null) { connection.disconnect(); } } } public static String runHttpGetCommand(String url) throws Exception { DefaultHttpClient client = new DefaultHttpClient(); InputStream isStream = null; try { HttpParams httpParameters = new BasicHttpParams(); int timeoutConnection = 1000; int timeoutSocket = 1000; HttpConnectionParams.setConnectionTimeout(httpParameters, timeoutConnection); HttpConnectionParams.setSoTimeout(httpParameters, timeoutSocket); client.setParams(httpParameters); HttpGet getRequest = new HttpGet(url); getRequest.setHeader("Content-type", "application/json"); HttpResponse resp = client.execute(getRequest); if (resp == null || resp.getEntity() == null) { throw new ElasticsearchHttpException("Unable to execute GET URL (" + url + "), exception Message: < Null Response or Null HttpEntity >"); } isStream = resp.getEntity().getContent(); if (resp.getStatusLine().getStatusCode() != 200) { throw new ElasticsearchHttpException("Unable to execute GET URL (" + url + "), exception Message: (" + IOUtils.toString(isStream, StandardCharsets.UTF_8.toString()) + ")"); } String requestResult = IOUtils.toString(isStream, StandardCharsets.UTF_8.toString()); logger.debug("GET URL API: {} returns: {}", url, requestResult); return requestResult; } catch (Exception e) { throw new ElasticsearchHttpException("Caught an exception during execution of URL (" + url + "), exception Message: (" + e + ")"); } finally { if (isStream != null) { isStream.close(); } } } public static String runHttpPutCommand(String url, String jsonBody) throws IOException { String return_; DefaultHttpClient client = new DefaultHttpClient(); InputStream isStream = null; try { HttpParams httpParameters = new BasicHttpParams(); int timeoutConnection = 1000; int timeoutSocket = 1000; HttpConnectionParams.setConnectionTimeout(httpParameters, timeoutConnection); HttpConnectionParams.setSoTimeout(httpParameters, timeoutSocket); client.setParams(httpParameters); HttpPut putRequest = new HttpPut(url); putRequest.setEntity(new StringEntity(jsonBody, StandardCharsets.UTF_8)); putRequest.setHeader("Content-type", "application/json"); HttpResponse resp = client.execute(putRequest); if (resp == null || resp.getEntity() == null) { throw new ElasticsearchHttpException("Unable to execute PUT URL (" + url + "), exception message: < Null Response or Null HttpEntity >"); } isStream = resp.getEntity().getContent(); if (resp.getStatusLine().getStatusCode() != 200) { throw new ElasticsearchHttpException("Unable to execute PUT URL (" + url + "), exception message: (" + IOUtils.toString(isStream, StandardCharsets.UTF_8.toString()) + ")"); } String requestResult = IOUtils.toString(isStream, StandardCharsets.UTF_8.toString()); logger.debug("PUT URL API: {} with JSONBody {} returns: {}", url, jsonBody, requestResult); return requestResult; } catch (Exception e) { throw new ElasticsearchHttpException("Caught an exception during execution of URL (" + url + "), exception message: (" + e + ")"); } finally { if (isStream != null) { isStream.close(); } } } public static String runHttpPostCommand(String url, String jsonBody) throws IOException { String return_; DefaultHttpClient client = new DefaultHttpClient(); InputStream isStream = null; try { HttpParams httpParameters = new BasicHttpParams(); int timeoutConnection = 1000; int timeoutSocket = 1000; HttpConnectionParams.setConnectionTimeout(httpParameters, timeoutConnection); HttpConnectionParams.setSoTimeout(httpParameters, timeoutSocket); client.setParams(httpParameters); HttpPost postRequest = new HttpPost(url); if (StringUtils.isNotEmpty(jsonBody)) postRequest.setEntity(new StringEntity(jsonBody, StandardCharsets.UTF_8)); postRequest.setHeader("Content-type", "application/json"); HttpResponse resp = client.execute(postRequest); if (resp == null || resp.getEntity() == null) { throw new ElasticsearchHttpException("Unable to execute POST URL (" + url + ") Exception Message: < Null Response or Null HttpEntity >"); } isStream = resp.getEntity().getContent(); if (resp.getStatusLine().getStatusCode() != 200) { throw new ElasticsearchHttpException("Unable to execute POST URL (" + url + ") Exception Message: (" + IOUtils.toString(isStream, StandardCharsets.UTF_8.toString()) + ")"); } return_ = IOUtils.toString(isStream, StandardCharsets.UTF_8.toString()); logger.debug("POST URL API: {} with JSONBody {} returns: {}", url, jsonBody, return_); } catch (Exception e) { throw new ElasticsearchHttpException("Caught an exception during execution of URL (" + url + ")Exception Message: (" + e + ")"); } finally { if (isStream != null) isStream.close(); } return return_; } /** * delete all the files/dirs in the given Directory but dont delete the dir * itself. */ public static void cleanupDir(String dirPath, List<String> childdirs) throws IOException { if (childdirs == null || childdirs.size() == 0) FileUtils.cleanDirectory(new File(dirPath)); else { for (String cdir : childdirs) FileUtils.cleanDirectory(new File(dirPath + "/" + cdir)); } } public static void createDirs(String location) { File dirFile = new File(location); if (dirFile.exists() && dirFile.isFile()) { dirFile.delete(); dirFile.mkdirs(); } else if (!dirFile.exists()) dirFile.mkdirs(); } public static byte[] md5(byte[] buf) { try { MessageDigest mdigest = MessageDigest.getInstance("MD5"); mdigest.update(buf, 0, buf.length); return mdigest.digest(); } catch (Exception e) { throw new RuntimeException(e); } } /** * Get a Md5 string which is similar to OS Md5sum */ public static String md5(File file) { try { HashCode hc = Files.hash(file, Hashing.md5()); return toHex(hc.asBytes()); } catch (Exception e) { throw new RuntimeException(e); } } public static String toHex(byte[] digest) { StringBuffer sb = new StringBuffer(digest.length * 2); for (int i = 0; i < digest.length; i++) { String hex = Integer.toHexString(digest[i]); if (hex.length() == 1) { sb.append("0"); } else if (hex.length() == 8) { hex = hex.substring(6); } sb.append(hex); } return sb.toString().toLowerCase(); } public static String toBase64(byte[] md5) { byte encoded[] = Base64.encodeBase64(md5, false); return new String(encoded); } public static String formatDate(DateTime dateTime, String dateFormat) { DateTimeFormatter fmt = DateTimeFormat.forPattern(dateFormat); return dateTime.toString(fmt); } public static String[] getSecurityGroupIds(String MAC_ID) { String securityGroupIds = SystemUtils.getDataFromUrl( "http://169.254.169.254/latest/meta-data/network/interfaces/macs/" + MAC_ID + "/security-group-ids/").trim(); if (securityGroupIds.isEmpty()) { throw new RuntimeException("Security group ID's are null or empty"); } return securityGroupIds.split("\n"); } }
5,110
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/NFException.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; public class NFException { private final String cfKey; private final String pathName; private final String stacktrace; public NFException(String cfKey, String pathName, String stacktrace) { this.cfKey = cfKey; this.pathName = pathName; this.stacktrace = stacktrace; } public String getCfKey() { return cfKey; } public String getPathName() { return pathName; } public String getStacktrace() { return stacktrace; } }
5,111
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchTransportClient.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.transport.client.PreBuiltTransportClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.InetAddress; import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @Singleton public class ElasticsearchTransportClient { private static final Logger logger = LoggerFactory.getLogger(ElasticsearchTransportClient.class); private static AtomicReference<ElasticsearchTransportClient> elasticsearchTransportClientAtomicReference = new AtomicReference<>(null); private final TransportClient client; private final NodesStatsRequestBuilder nodeStatsRequestBuilder; /** * Hostname and port to talk to will be same server for now optionally we might want the IP to poll. * NOTE: This class shouldn't be a singleton and this shouldn't be cached. * This will work only if Elasticsearch runs. */ private ElasticsearchTransportClient(InetAddress host, IConfiguration configuration) { logger.info("Initializing client connection to {}", host.toString()); Map<String, String> transportClientSettings = new HashMap<>(); transportClientSettings.put("cluster.name", configuration.getAppName()); client = new PreBuiltTransportClient(Settings.builder().put(transportClientSettings).build()); client.addTransportAddress(new InetSocketTransportAddress(host, configuration.getTransportTcpPort())); nodeStatsRequestBuilder = client.admin().cluster().prepareNodesStats(configuration.getEsNodeName()).all(); } /** * Try to create if it is null * * @throws ElasticsearchTransportClientConnectionException */ public static ElasticsearchTransportClient instance(IConfiguration configuration) throws ElasticsearchTransportClientConnectionException { if (elasticsearchTransportClientAtomicReference.get() == null) { elasticsearchTransportClientAtomicReference.set(connect(configuration)); } return elasticsearchTransportClientAtomicReference.get(); } public static NodesStatsResponse getNodesStatsResponse(IConfiguration config) { try { return ElasticsearchTransportClient.instance(config).getNodeStatsRequestBuilder().execute().actionGet(); } catch (Exception e) { logger.error(e.getMessage(), e); } return null; } private static synchronized ElasticsearchTransportClient connect(final IConfiguration configuration) throws ElasticsearchTransportClientConnectionException { ElasticsearchTransportClient transportClient; // If Elasticsearch is started then only start the monitoring if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { logger.error("Elasticsearch is not yet started"); throw new ElasticsearchTransportClientConnectionException("Elasticsearch is not yet started"); } try { transportClient = new BoundedExponentialRetryCallable<ElasticsearchTransportClient>() { @Override public ElasticsearchTransportClient retriableCall() throws Exception { return new ElasticsearchTransportClient(InetAddress.getLoopbackAddress(), configuration); } }.call(); } catch (Exception e) { logger.error(e.getMessage(), e); throw new ElasticsearchTransportClientConnectionException(e.getMessage()); } return transportClient; } private NodesStatsRequestBuilder getNodeStatsRequestBuilder() { return nodeStatsRequestBuilder; } public Client getTransportClient() { return client; } }
5,112
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/FifoQueue.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import java.util.Comparator; import java.util.TreeSet; public class FifoQueue<E extends Comparable<E>> extends TreeSet<E> { private static final long serialVersionUID = -7388604551920505669L; private int capacity; public FifoQueue(int capacity) { super(new Comparator<E>() { @Override public int compare(E o1, E o2) { return o1.compareTo(o2); } }); this.capacity = capacity; } public FifoQueue(int capacity, Comparator<E> comparator) { super(comparator); this.capacity = capacity; } public synchronized void adjustAndAdd(E e) { add(e); if (capacity < size()) pollFirst(); } }
5,113
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/Sleeper.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import com.google.inject.ImplementedBy; /** * An abstraction to {@link Thread#sleep(long)} so we can mock it in tests. */ @ImplementedBy(ThreadSleeper.class) public interface Sleeper { void sleep(long waitTimeMs) throws InterruptedException; void sleepQuietly(long waitTimeMs); }
5,114
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchUtils.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.identity.RaigadInstance; import org.apache.commons.lang.StringUtils; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.elasticsearch.client.Client; import org.elasticsearch.snapshots.SnapshotInfo; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.json.simple.JSONArray; import org.json.simple.JSONObject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.util.ArrayList; import java.util.List; public class ElasticsearchUtils { private static final Logger logger = LoggerFactory.getLogger(ElasticsearchUtils.class); private static final String HOST_NAME = "host_name"; private static final String ID = "id"; private static final String APP_NAME = "app_name"; private static final String INSTANCE_ID = "instance_id"; private static final String AVAILABILITY_ZONE = "availability_zone"; private static final String PUBLIC_IP = "public_ip"; private static final String DC = "dc"; private static final String UPDATE_TIME = "update_time"; private static final String HTTP_TAG = "http://"; private static final String URL_PORT_SEPARATOR = ":"; private static final String ELASTICSEARCH_HTTP_PORT = "7104"; private static final String URL_PATH_SEPARATOR = "/"; private static final String URL_QUERY_SEPARATOR = "?"; private static final String REPOSITORY_VERIFICATION_PARAM = "_snapshot"; private static final String SNAPSHOT_COMPLETION_PARAM = "wait_for_completion=true"; private static final String DEFAULT_SNAPSHOT_IGNORE_AVAILABLE_PARAM = "true"; private static final char PATH_SEP = File.separatorChar; private static final String S3_REPO_DATE_FORMAT = "yyyyMMdd"; private static final DateTimeZone currentZone = DateTimeZone.UTC; @SuppressWarnings("unchecked") public static JSONObject transformRaigadInstanceToJson(List<RaigadInstance> instances) { JSONObject esJsonInstances = new JSONObject(); for (int i = 0; i < instances.size(); i++) { JSONObject jsInstance = new JSONObject(); jsInstance.put(HOST_NAME, instances.get(i).getHostName()); jsInstance.put(ID, instances.get(i).getId()); jsInstance.put(APP_NAME, instances.get(i).getApp()); jsInstance.put(INSTANCE_ID, instances.get(i).getInstanceId()); jsInstance.put(AVAILABILITY_ZONE, instances.get(i).getAvailabilityZone()); jsInstance.put(PUBLIC_IP, instances.get(i).getHostIP()); jsInstance.put(DC, instances.get(i).getDC()); jsInstance.put(UPDATE_TIME, instances.get(i).getUpdatetime()); JSONArray esJsonInstance = new JSONArray(); esJsonInstance.add(jsInstance); esJsonInstances.put("instance-" + i, jsInstance); } JSONObject allInstances = new JSONObject(); allInstances.put("instances", esJsonInstances); return allInstances; } public static List<RaigadInstance> getRaigadInstancesFromJson(JSONObject instances) { List<RaigadInstance> raigadInstances = new ArrayList<>(); JSONObject topLevelInstance = (JSONObject) instances.get("instances"); for (int i = 0; ; i++) { if (topLevelInstance.get("instance-" + i) == null) { break; } JSONObject eachInstance = (JSONObject) topLevelInstance.get("instance-" + i); // Build RaigadInstance RaigadInstance raigadInstance = new RaigadInstance(); raigadInstance.setApp((String) eachInstance.get(APP_NAME)); raigadInstance.setAvailabilityZone((String) eachInstance.get(AVAILABILITY_ZONE)); raigadInstance.setDC((String) eachInstance.get(DC)); raigadInstance.setHostIP((String) eachInstance.get(PUBLIC_IP)); raigadInstance.setHostName((String) eachInstance.get(HOST_NAME)); raigadInstance.setId((String) eachInstance.get(ID)); raigadInstance.setInstanceId((String) eachInstance.get(INSTANCE_ID)); raigadInstance.setUpdatetime((Long) eachInstance.get(UPDATE_TIME)); // Add to the list raigadInstances.add(raigadInstance); } return raigadInstances; } public static boolean amIMasterNode(IConfiguration config, HttpModule httpModule) throws Exception { String URL = httpModule.findMasterNodeURL(); String response = SystemUtils.runHttpGetCommand(URL); if (config.isDebugEnabled()) { logger.debug("Calling {} returned: {}", URL, response); } response = StringUtils.trim(response); // Check the response if (StringUtils.isEmpty(response)) { logger.error("Response from " + URL + " is empty"); return false; } // Checking if the current node is a master node if (response.equalsIgnoreCase(config.getHostIP()) || response.equalsIgnoreCase(config.getHostLocalIP())) { return true; } return false; } public static List<String> getAvailableSnapshots(Client transportClient, String repositoryName) { logger.info("Searching for available snapshots"); List<String> snapshots = new ArrayList<>(); GetSnapshotsResponse getSnapshotsResponse = transportClient.admin().cluster() .prepareGetSnapshots(repositoryName) .get(); for (SnapshotInfo snapshotInfo : getSnapshotsResponse.getSnapshots()) { snapshots.add(snapshotInfo.snapshotId().getName()); } return snapshots; } /** * Repository Name is Today's Date in yyyyMMdd format eg. 20140630 * * @return Repository Name */ public static String getS3RepositoryName() { DateTime dateTime = new DateTime(); DateTime dateTimeGmt = dateTime.withZone(currentZone); return formatDate(dateTimeGmt, S3_REPO_DATE_FORMAT); } public static String formatDate(DateTime dateTime, String dateFormat) { DateTimeFormatter fmt = DateTimeFormat.forPattern(dateFormat); return dateTime.toString(fmt); } }
5,115
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchHttpException.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import java.io.IOException; public class ElasticsearchHttpException extends IOException { private static final long serialVersionUID = 444L; public ElasticsearchHttpException(String message) { super(message); } public ElasticsearchHttpException(String message, Exception e) { super(message, e); } }
5,116
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/IElasticsearchTuner.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import com.google.inject.ImplementedBy; import com.netflix.raigad.defaultimpl.StandardTuner; import java.io.IOException; @ImplementedBy(StandardTuner.class) public interface IElasticsearchTuner { void writeAllProperties(String yamlLocation, String hostname) throws IOException; }
5,117
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/TribeUtils.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.DumperOptions; import org.yaml.snakeyaml.Yaml; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.util.Map; @Singleton public class TribeUtils { private static final Logger logger = LoggerFactory.getLogger(TribeUtils.class); private final IConfiguration config; @Inject public TribeUtils(IConfiguration config) { this.config = config; } public String getTribeClusterNameFromId(String tribeId) throws FileNotFoundException { DumperOptions options = new DumperOptions(); options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); Yaml yaml = new Yaml(options); File yamlFile = new File(config.getYamlLocation()); Map map = (Map) yaml.load(new FileInputStream(yamlFile)); String sourceClusterName = (String) map.get("tribe." + tribeId + ".cluster.name"); logger.info("Source cluster associated with tribe ID {} is {}", tribeId, sourceClusterName); return sourceClusterName; } }
5,118
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ThreadSleeper.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; /** * Sleeper impl that delegates to Thread.sleep */ public class ThreadSleeper implements Sleeper { @Override public void sleep(long waitTimeMs) throws InterruptedException { Thread.sleep(waitTimeMs); } public void sleepQuietly(long waitTimeMs) { try { sleep(waitTimeMs); } catch (InterruptedException e) { //no-op } } }
5,119
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/RetriableCallable.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import org.apache.commons.lang.exception.ExceptionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.Callable; import java.util.concurrent.CancellationException; public abstract class RetriableCallable<T> implements Callable<T> { public static final int DEFAULT_NUMBER_OF_RETRIES = 15; public static final long DEFAULT_WAIT_TIME = 100; private static final Logger logger = LoggerFactory.getLogger(RetriableCallable.class); private int retries; private long waitTime; public RetriableCallable() { this(DEFAULT_NUMBER_OF_RETRIES, DEFAULT_WAIT_TIME); } public RetriableCallable(int retries, long waitTime) { set(retries, waitTime); } public void set(int retries, long waitTime) { this.retries = retries; this.waitTime = waitTime; } public abstract T retriableCall() throws Exception; public T call() throws Exception { int retry = 0; int logCounter = 0; while (true) { try { return retriableCall(); } catch (CancellationException e) { throw e; } catch (Exception e) { retry++; if (retry == retries) { throw e; } logger.error(String.format("Retry #%d for: %s", retry, e.getMessage())); if (++logCounter == 1) { logger.error("Exception: " + ExceptionUtils.getFullStackTrace(e)); } Thread.sleep(waitTime); } finally { forEachExecution(); } } } public void forEachExecution() { // Do nothing by default } }
5,120
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ExponentialRetryCallable.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import java.util.concurrent.CancellationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public abstract class ExponentialRetryCallable<T> extends RetriableCallable<T> { public final static long MAX_SLEEP = 240000; public final static long MIN_SLEEP = 200; private static final Logger logger = LoggerFactory.getLogger(RetriableCallable.class); private long max; private long min; public ExponentialRetryCallable() { this.max = MAX_SLEEP; this.min = MIN_SLEEP; } public ExponentialRetryCallable(long minSleep, long maxSleep) { this.max = maxSleep; this.min = minSleep; } public T call() throws Exception { long delay = min;// ms while (true) { try { return retriableCall(); } catch (CancellationException e) { throw e; } catch (Exception e) { delay *= 2; if (delay > max) { throw e; } logger.error(e.getMessage()); Thread.sleep(delay); } finally { forEachExecution(); } } } }
5,121
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/BoundedExponentialRetryCallable.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import org.apache.commons.lang.exception.ExceptionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.CancellationException; public abstract class BoundedExponentialRetryCallable<T> extends RetriableCallable<T> { public final static long MAX_SLEEP = 10000; public final static long MIN_SLEEP = 1000; public final static int MAX_RETRIES = 10; private static final Logger logger = LoggerFactory.getLogger(BoundedExponentialRetryCallable.class); private long max; private long min; private int maxRetries; private final ThreadSleeper sleeper = new ThreadSleeper(); public BoundedExponentialRetryCallable() { this.max = MAX_SLEEP; this.min = MIN_SLEEP; this.maxRetries = MAX_RETRIES; } public BoundedExponentialRetryCallable(long minSleep, long maxSleep, int maxNumRetries) { this.max = maxSleep; this.min = minSleep; this.maxRetries = maxNumRetries; } public T call() throws Exception { long delay = min;// ms int retry = 0; int logCounter = 0; while (true) { try { return retriableCall(); } catch (CancellationException e) { throw e; } catch (Exception e) { retry++; if (delay < max && retry <= maxRetries) { delay *= 2; logger.error(String.format("Retry #%d for: %s", retry, e.getMessage())); if (++logCounter == 1) { logger.info("Exception --> " + ExceptionUtils.getFullStackTrace(e)); } sleeper.sleep(delay); } else if (delay >= max && retry <= maxRetries) { logger.error(String.format("Retry #%d for: %s", retry, ExceptionUtils.getFullStackTrace(e))); sleeper.sleep(max); } else { throw e; } } finally { forEachExecution(); } } } }
5,122
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/ElasticsearchProcessMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.concurrent.atomic.AtomicBoolean; /* * This task checks if the Elasticsearch process is running. */ @Singleton public class ElasticsearchProcessMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(ElasticsearchProcessMonitor.class); public static final String JOB_NAME = "ES_MONITOR_THREAD"; static final AtomicBoolean isElasticsearchRunningNow = new AtomicBoolean(false); static final AtomicBoolean wasElasticsearchStarted = new AtomicBoolean(false); @Inject protected ElasticsearchProcessMonitor(IConfiguration config) { super(config); } @Override public void execute() throws Exception { checkElasticsearchProcess(config.getElasticsearchProcessName()); } @Override public String getName() { return JOB_NAME; } Runtime getRuntime() { return Runtime.getRuntime(); } String getFirstLine(InputStream inputStream) throws IOException { BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream)); return StringUtils.trim(bufferedReader.readLine()); } void checkElasticsearchProcess(String elasticsearchProcessName) throws Exception { Process pgrepProcess = null; InputStream processInputStream = null; try { // This returns PID for the Elasticsearch process pgrepProcess = getRuntime().exec("pgrep -f " + elasticsearchProcessName); processInputStream = pgrepProcess.getInputStream(); String line = getFirstLine(processInputStream); if (StringUtils.isNotEmpty(line) && !isElasticsearchRunning()) { isElasticsearchRunningNow.set(true); if (!wasElasticsearchStarted.get()) { wasElasticsearchStarted.set(true); } } else if (StringUtils.isEmpty(line) && isElasticsearchRunning()) { isElasticsearchRunningNow.set(false); } } catch (Exception e) { logger.warn("Exception checking if process is running", e); isElasticsearchRunningNow.set(false); } finally { if (processInputStream != null) { processInputStream.close(); } if (pgrepProcess != null) { pgrepProcess.destroyForcibly(); } } } public static TaskTimer getTimer() { return new SimpleTimer(JOB_NAME, 10L * 1000); } public static Boolean isElasticsearchRunning() { return isElasticsearchRunningNow.get(); } public static Boolean getWasElasticsearchStarted() { return wasElasticsearchStarted.get(); } }
5,123
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/utils/HttpModule.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.utils; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; @Singleton public class HttpModule { private static final String HTTP_TAG = "http://"; private static final String LOCAL_HOST = "127.0.0.1"; private static final String URL_PORT_SEPARATOR = ":"; private static final String URL_PATH_SEPARATOR = "/"; private static final String MASTER_NODE_SUFFIX = "/_cat/master?h=ip"; private static final String SNAPSHOT_BKP_KEYWORD = "/_snapshot/"; private static final String SNAPSHOT_BKP_WAIT_FOR_COMPLETION_TAG = "?wait_for_completion="; private final IConfiguration config; @Inject public HttpModule(IConfiguration config) { this.config = config; } public String findMasterNodeURL() { StringBuilder builder = new StringBuilder(); builder.append(HTTP_TAG); builder.append(LOCAL_HOST); builder.append(URL_PORT_SEPARATOR); builder.append(config.getHttpPort()); builder.append(MASTER_NODE_SUFFIX); return builder.toString(); } public String runSnapshotBackupURL(String repositoryName, String snapshotName) { StringBuilder builder = new StringBuilder(); builder.append(HTTP_TAG); builder.append(LOCAL_HOST); builder.append(URL_PORT_SEPARATOR); builder.append(config.getHttpPort()); builder.append(SNAPSHOT_BKP_KEYWORD); builder.append(repositoryName); builder.append(URL_PATH_SEPARATOR); builder.append(snapshotName); builder.append(SNAPSHOT_BKP_WAIT_FOR_COMPLETION_TAG); builder.append(config.waitForCompletionOfBackup()); return builder.toString(); } }
5,124
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/defaultimpl/ElasticsearchProcessManager.java
/** * Copyright 2018 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.defaultimpl; import com.google.inject.Inject; import com.netflix.raigad.configuration.IConfiguration; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.concurrent.TimeUnit; public class ElasticsearchProcessManager implements IElasticsearchProcess { private static final Logger logger = LoggerFactory.getLogger(ElasticsearchProcessManager.class); private static final int SCRIPT_EXECUTE_WAIT_TIME_MS = 5000; private final IConfiguration config; @Inject public ElasticsearchProcessManager(IConfiguration config) { this.config = config; } String[] getStartupCommand() { return StringUtils.split(StringUtils.trimToEmpty(config.getElasticsearchStartupScript()), ' '); } String[] getStopCommand() { return StringUtils.split(StringUtils.trimToEmpty(config.getElasticsearchStopScript()), ' '); } void runCommand(String[] command) { Process process = null; try { ProcessBuilder processBuilder = new ProcessBuilder(command).redirectErrorStream(true); process = processBuilder.start(); process.waitFor(SCRIPT_EXECUTE_WAIT_TIME_MS, TimeUnit.MILLISECONDS); int exitCode = process.exitValue(); if (exitCode == 0) { logger.info(String.format("Successfully executed %s", StringUtils.join(command, ' '))); } else { logger.error(String.format("Error executing %s, exited with code %d", StringUtils.join(command, ' '), exitCode)); } } catch (Exception e) { logger.error(String.format("Exception executing %s", StringUtils.join(command, ' ')), e); } finally { if (process != null) { process.destroyForcibly(); } } } public void start() { logger.info("Starting Elasticsearch server"); String[] startupCommand = getStartupCommand(); if (startupCommand == null || startupCommand.length == 0) { logger.warn("Elasticsearch startup command was not specified"); return; } runCommand(startupCommand); } public void stop() { logger.info("Stopping Elasticsearch server"); String[] stopCommand = getStopCommand(); if (stopCommand == null || stopCommand.length == 0) { logger.warn("Elasticsearch stop command was not specified"); return; } runCommand(stopCommand); } void logProcessOutput(Process process) { InputStream inputStream = null; try { inputStream = process.getInputStream(); final String processOutputStream = readProcessStream(inputStream); logger.info("Standard/Error out: {}", processOutputStream); } catch (IOException e) { logger.warn("Failed to read the standard/error output stream", e); } finally { if (inputStream != null) { try { inputStream.close(); } catch (IOException e) { logger.warn("Failed to close the standard/error output stream", e); } } } } private String readProcessStream(InputStream inputStream) throws IOException { final byte[] buffer = new byte[512]; final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(buffer.length); int count; while ((count = inputStream.read(buffer)) != -1) { byteArrayOutputStream.write(buffer, 0, count); } return byteArrayOutputStream.toString(); } }
5,125
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/defaultimpl/IElasticsearchProcess.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.defaultimpl; import com.google.inject.ImplementedBy; import java.io.IOException; /** * Interface to aid in starting and stopping Elasticsearch. */ @ImplementedBy(ElasticsearchProcessManager.class) public interface IElasticsearchProcess { void start() throws IOException; void stop() throws IOException; }
5,126
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/defaultimpl/StandardTuner.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.defaultimpl; import com.google.inject.Inject; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.utils.IElasticsearchTuner; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.DumperOptions; import org.yaml.snakeyaml.Yaml; import java.io.File; import java.io.FileInputStream; import java.io.FileWriter; import java.io.IOException; import java.util.*; public class StandardTuner implements IElasticsearchTuner { private static final Logger logger = LoggerFactory.getLogger(StandardTuner.class); private static final String COMMA_SEPARATOR = ","; private static final String PARAM_SEPARATOR = "="; protected final IConfiguration config; @Inject public StandardTuner(IConfiguration config) { this.config = config; } @SuppressWarnings({"unchecked", "rawtypes"}) public void writeAllProperties(String yamlLocation, String hostname) throws IOException { logger.info("Using configuration of type [{}]", config.getClass()); DumperOptions options = new DumperOptions(); options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); Yaml yaml = new Yaml(options); File yamlFile = new File(yamlLocation); Map map = (Map) yaml.load(new FileInputStream(yamlFile)); map.put("cluster.name", config.getAppName()); map.put("node.name", config.getEsNodeName()); map.put("http.port", config.getHttpPort()); map.put("path.data", config.getDataFileLocation()); map.put("path.logs", config.getLogFileLocation()); if (config.isVPCExternal()) { map.put("network.publish_host", config.getHostIP()); map.put("http.publish_host", config.getHostname()); } else { map.put("network.publish_host", "_global_"); } if (config.isKibanaSetupRequired()) { map.put("http.cors.enabled", true); map.put("http.cors.allow-origin", "*"); } if (config.amITribeNode()) { String clusterParams = config.getCommaSeparatedSourceClustersForTribeNode(); assert (clusterParams != null) : "Source clusters for tribe nodes cannot be null"; String[] clusters = StringUtils.split(clusterParams, COMMA_SEPARATOR); assert (clusters.length != 0) : "At least one source cluster is needed"; List<Integer> tribePorts = new ArrayList<>(); tribePorts.add(config.getTransportTcpPort()); // Common settings for (int i = 0; i < clusters.length; i++) { String[] clusterNameAndPort = clusters[i].split(PARAM_SEPARATOR); assert (clusterNameAndPort.length != 2) : "Cluster name or transport port is missing in configuration"; assert (StringUtils.isNumeric(clusterNameAndPort[1])) : "Source tribe cluster port is invalid"; map.put("tribe.t" + i + ".cluster.name", clusterNameAndPort[0]); map.put("tribe.t" + i + ".transport.tcp.port", Integer.parseInt(clusterNameAndPort[1])); map.put("tribe.t" + i + ".discovery.zen.hosts_provider", config.getElasticsearchDiscoveryType()); map.put("tribe.t" + i + ".network.host", "_global_"); logger.info("Adding cluster [{}:{}]", clusterNameAndPort[0], clusterNameAndPort[1]); tribePorts.add(Integer.valueOf(clusterNameAndPort[1])); } Collections.sort(tribePorts); String transportPortRange = String.format("%d-%d", tribePorts.get(0), tribePorts.get(tribePorts.size() - 1)); logger.info("Setting tribe transport port range to {}", transportPortRange); // Adding port range to include tribe cluster port as well as transport for each source cluster map.put("transport.tcp.port", transportPortRange); map.put("node.master", false); map.put("node.data", false); if (config.amIWriteEnabledTribeNode()) { map.put("tribe.blocks.write", false); } else { map.put("tribe.blocks.write", true); } if (config.amIMetadataEnabledTribeNode()) { map.put("tribe.blocks.metadata", false); } else { map.put("tribe.blocks.metadata", true); } map.put("tribe.on_conflict", "prefer_" + config.getTribePreferredClusterIdOnConflict()); } else { map.put("transport.tcp.port", config.getTransportTcpPort()); map.put("discovery.zen.hosts_provider", config.getElasticsearchDiscoveryType()); map.put("discovery.zen.minimum_master_nodes", config.getMinimumMasterNodes()); // NOTE: When using awareness attributes, shards will not be allocated to nodes that // do not have values set for those attributes. Important in dedicated master nodes deployment map.put("cluster.routing.allocation.awareness.attributes", config.getClusterRoutingAttributes()); if (config.isShardPerNodeEnabled()) { map.put("cluster.routing.allocation.total_shards_per_node", config.getTotalShardsPerNode()); } if (config.isMultiDC()) { map.put("node.attr.rack_id", config.getDC()); } else { map.put("node.attr.rack_id", config.getRac()); } if (config.isAsgBasedDedicatedDeployment()) { if ("master".equalsIgnoreCase(config.getStackName())) { map.put("node.master", true); map.put("node.data", false); map.put("node.ingest", false); } else if ("data".equalsIgnoreCase(config.getStackName())) { map.put("node.master", false); map.put("node.data", true); map.put("node.ingest", false); } else if ("search".equalsIgnoreCase(config.getStackName())) { map.put("node.master", false); map.put("node.data", false); map.put("node.ingest", true); } else { map.put("node.master", false); map.put("node.data", false); map.put("node.ingest", false); } } } addExtraEsParams(map); logger.info(yaml.dump(map)); yaml.dump(map, new FileWriter(yamlFile)); } public void addExtraEsParams(Map map) { String extraConfigParams = config.getExtraConfigParams(); if (extraConfigParams == null) { logger.info("Updating elasticsearch.yml: no extra parameters"); return; } String[] pairs = extraConfigParams.trim().split(COMMA_SEPARATOR); logger.info("Updating elasticsearch.yml: adding extra parameters"); for (String pair : pairs) { String[] keyValue = pair.trim().split(PARAM_SEPARATOR); String raigadKey = keyValue[0].trim(); String esKey = keyValue[1].trim(); String esValue = config.getEsKeyName(raigadKey); logger.info("Updating YAML: Raigad key [{}], Elasticsearch key [{}], value [{}]", raigadKey, esKey, esValue); if (raigadKey == null || esKey == null || esValue == null) { logger.error("One of the extra keys or values is null, skipping..."); continue; } map.put(esKey, esValue); } } }
5,127
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/defaultimpl/ElasticsearchInstance.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.defaultimpl; import java.io.Serializable; public class ElasticsearchInstance implements Serializable { private static final long serialVersionUID = 5606412386974488659L; private String hostname; private long updatetime; private int Id; private String cluster; private String instanceId; private String availabilityZone; private String publicip; private String region; public int getId() { return Id; } public void setId(int id) { Id = id; } public String getCluster() { return cluster; } public ElasticsearchInstance setCluster(String cluster) { this.cluster = cluster; return this; } public String getInstanceId() { return instanceId; } public ElasticsearchInstance setInstanceId(String instanceId) { this.instanceId = instanceId; return this; } public String getAvailabilityZone() { return availabilityZone; } public ElasticsearchInstance setAvailabilityZone(String availabilityZone) { this.availabilityZone = availabilityZone; return this; } public String getHostName() { return hostname; } public String getHostIP() { return publicip; } public ElasticsearchInstance setHostName(String hostname) { this.hostname = hostname; return this; } public ElasticsearchInstance setHostIP(String publicip) { this.publicip = publicip; return this; } @Override public String toString() { return String .format("Hostname: %s, InstanceId: %s, Cluster_: %s, Availability Zone : %s Region %s", getHostName(), getInstanceId(), getCluster(), getAvailabilityZone(), getRegion()); } public String getRegion() { return region; } public ElasticsearchInstance setRegion(String location) { this.region = location; return this; } public long getUpdatetime() { return updatetime; } public void setUpdatetime(long updatetime) { this.updatetime = updatetime; } }
5,128
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/ClearCredential.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.aws; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.FileInputStream; import java.io.IOException; import java.util.Properties; /** * This is a basic implementation of ICredentials. User should prefer to * implement their own versions for more secured access. This class requires * clear AWS key and access. * * Set the following properties in "conf/awscredntial.properties" * */ public class ClearCredential implements ICredential { private static final Logger logger = LoggerFactory.getLogger(ClearCredential.class); private static final String CRED_FILE = "/etc/awscredential.properties"; private final Properties props; private final String AWS_ACCESS_ID; private final String AWS_KEY; public ClearCredential() { FileInputStream fis = null; try { fis = new FileInputStream(CRED_FILE); props = new Properties(); props.load(fis); AWS_ACCESS_ID = props.getProperty("AWSACCESSID") != null ? props.getProperty("AWSACCESSID").trim() : ""; AWS_KEY = props.getProperty("AWSKEY") != null ? props.getProperty("AWSKEY").trim() : ""; } catch (Exception e) { logger.error("Exception with credential file ", e); throw new RuntimeException("Problem reading credential file. Cannot start.", e); } finally { try { fis.close(); } catch (IOException e) { e.printStackTrace(); } } } public String getAccessKeyId() { return AWS_ACCESS_ID; } public String getSecretAccessKey() { return AWS_KEY; } public AWSCredentials getCredentials() { return new BasicAWSCredentials(getAccessKeyId(), getSecretAccessKey()); } @Override public AWSCredentialsProvider getAwsCredentialProvider() { return new AWSCredentialsProvider(){ public AWSCredentials getCredentials(){ return ClearCredential.this.getCredentials(); } @Override public void refresh() { // NOP } }; } }
5,129
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/AWSMembership.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.aws; import com.amazonaws.services.autoscaling.AmazonAutoScaling; import com.amazonaws.services.autoscaling.AmazonAutoScalingClient; import com.amazonaws.services.autoscaling.model.*; import com.amazonaws.services.autoscaling.model.Instance; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; import com.amazonaws.services.ec2.model.*; import com.google.common.collect.Lists; import com.google.inject.Inject; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.identity.IMembership; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.*; /** * Class to query amazon ASG for its members to provide - Number of valid nodes * in the ASG - Number of zones - Methods for adding ACLs for the nodes */ public class AWSMembership implements IMembership { private static final Logger logger = LoggerFactory.getLogger(AWSMembership.class); private final IConfiguration config; private final ICredential provider; @Inject public AWSMembership(IConfiguration config, ICredential provider) { this.config = config; this.provider = provider; } @Override public Map<String, List<String>> getRacMembership(Collection<String> autoScalingGroupNames) { if (CollectionUtils.isEmpty(autoScalingGroupNames)) { return Collections.emptyMap(); } AmazonAutoScaling client = null; try { client = getAutoScalingClient(); DescribeAutoScalingGroupsRequest describeAutoScalingGroupsRequest = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(autoScalingGroupNames); DescribeAutoScalingGroupsResult describeAutoScalingGroupsResult = client.describeAutoScalingGroups(describeAutoScalingGroupsRequest); Map<String, List<String>> asgs = new HashMap<>(); for (AutoScalingGroup autoScalingGroup : describeAutoScalingGroupsResult.getAutoScalingGroups()) { List<String> asgInstanceIds = Lists.newArrayList(); for (Instance asgInstance : autoScalingGroup.getInstances()) { if (!(asgInstance.getLifecycleState().equalsIgnoreCase("terminating") || asgInstance.getLifecycleState().equalsIgnoreCase("shutting-down") || asgInstance.getLifecycleState().equalsIgnoreCase("terminated"))) { asgInstanceIds.add(asgInstance.getInstanceId()); } } asgs.put(autoScalingGroup.getAutoScalingGroupName(), asgInstanceIds); logger.info("AWS returned the following instance ID's for {} ASG: {}", autoScalingGroup.getAutoScalingGroupName(), StringUtils.join(asgInstanceIds, ",")); } return asgs; } finally { if (client != null) { client.shutdown(); } } } /** * Actual membership AWS source of truth... */ @Override public int getRacMembershipSize() { AmazonAutoScaling client = null; try { client = getAutoScalingClient(); DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(config.getASGName()); DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq); int size = 0; for (AutoScalingGroup asg : res.getAutoScalingGroups()) { size += asg.getMaxSize(); } logger.info(String.format("Query on ASG returning %d instances", size)); return size; } finally { if (client != null) { client.shutdown(); } } } @Override public int getRacCount() { return config.getRacs().size(); } /** * Adds a list of IP's to the SG */ public void addACL(Collection<String> listIPs, int from, int to) { AmazonEC2 client = null; try { client = getEc2Client(); List<IpPermission> ipPermissions = new ArrayList<IpPermission>(); ipPermissions.add(new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to)); if (config.isDeployedInVPC()) { if (config.getACLGroupIdForVPC().isEmpty()) { throw new RuntimeException("ACLGroupIdForVPC cannot be empty, check if SetVPCSecurityGroupID had any errors"); } client.authorizeSecurityGroupIngress( new AuthorizeSecurityGroupIngressRequest() .withGroupId(config.getACLGroupIdForVPC()) .withIpPermissions(ipPermissions)); } else { client.authorizeSecurityGroupIngress( new AuthorizeSecurityGroupIngressRequest(config.getACLGroupName(), ipPermissions)); } logger.info("Added " + StringUtils.join(listIPs, ",") + " to ACL"); } finally { if (client != null) { client.shutdown(); } } } /** * Removes a list of IP's from the SG */ public void removeACL(Collection<String> listIPs, int from, int to) { AmazonEC2 client = null; try { client = getEc2Client(); List<IpPermission> ipPermissions = new ArrayList<IpPermission>(); ipPermissions.add(new IpPermission().withFromPort(from).withIpProtocol("tcp").withIpRanges(listIPs).withToPort(to)); if (config.isDeployedInVPC()) { if (config.getACLGroupIdForVPC().isEmpty()) { throw new RuntimeException("ACLGroupIdForVPC cannot be empty, check if SetVPCSecurityGroupID had any errors"); } client.revokeSecurityGroupIngress( new RevokeSecurityGroupIngressRequest() .withGroupId(config.getACLGroupIdForVPC()) .withIpPermissions(ipPermissions)); } else { client.revokeSecurityGroupIngress( new RevokeSecurityGroupIngressRequest(config.getACLGroupName(), ipPermissions)); } logger.info("Removed " + StringUtils.join(listIPs, ",") + " from ACL"); } finally { if (client != null) { client.shutdown(); } } } /** * List SG ACL's */ public List<String> listACL(int from, int to) { AmazonEC2 client = null; try { client = getEc2Client(); List<String> ipPermissions = new ArrayList<String>(); DescribeSecurityGroupsResult result; if (config.isDeployedInVPC()) { if (config.getACLGroupIdForVPC().isEmpty()) { throw new RuntimeException("ACLGroupIdForVPC cannot be empty, check if SetVPCSecurityGroupID had any errors"); } DescribeSecurityGroupsRequest describeSecurityGroupsRequest = new DescribeSecurityGroupsRequest().withGroupIds(config.getACLGroupIdForVPC()); result = client.describeSecurityGroups(describeSecurityGroupsRequest); } else { DescribeSecurityGroupsRequest describeSecurityGroupsRequest = new DescribeSecurityGroupsRequest().withGroupNames(Arrays.asList(config.getACLGroupName())); result = client.describeSecurityGroups(describeSecurityGroupsRequest); } for (SecurityGroup group : result.getSecurityGroups()) { for (IpPermission perm : group.getIpPermissions()) { if (perm.getFromPort() == from && perm.getToPort() == to) { ipPermissions.addAll(perm.getIpRanges()); } } } return ipPermissions; } finally { if (client != null) { client.shutdown(); } } } public Map<String, List<Integer>> getACLPortMap(String acl) { AmazonEC2 client = null; Map<String, List<Integer>> aclPortMap = new HashMap<String, List<Integer>>(); try { client = getEc2Client(); DescribeSecurityGroupsResult result; if (config.isDeployedInVPC()) { if (config.getACLGroupIdForVPC().isEmpty()) { throw new RuntimeException("ACLGroupIdForVPC cannot be empty, check if SetVPCSecurityGroupID had any errors"); } DescribeSecurityGroupsRequest describeSecurityGroupsRequest = new DescribeSecurityGroupsRequest().withGroupIds(config.getACLGroupIdForVPC()); result = client.describeSecurityGroups(describeSecurityGroupsRequest); } else { DescribeSecurityGroupsRequest describeSecurityGroupsRequest = new DescribeSecurityGroupsRequest().withGroupNames(Arrays.asList(config.getACLGroupName())); result = client.describeSecurityGroups(describeSecurityGroupsRequest); } for (SecurityGroup group : result.getSecurityGroups()) { for (IpPermission perm : group.getIpPermissions()) { for (String ipRange : perm.getIpRanges()) { // If given ACL matches from the list of IP ranges then look for "from" and "to" ports if (acl.equalsIgnoreCase(ipRange)) { List<Integer> fromToList = new ArrayList<Integer>(); fromToList.add(perm.getFromPort()); fromToList.add(perm.getToPort()); logger.info("ACL: {}, from: {}, to: {}", acl, perm.getFromPort(), perm.getToPort()); aclPortMap.put(acl, fromToList); } } } } return aclPortMap; } finally { if (client != null) { client.shutdown(); } } } @Override public void expandRacMembership(int count) { AmazonAutoScaling client = null; try { client = getAutoScalingClient(); DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(config.getASGName()); DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq); AutoScalingGroup asg = res.getAutoScalingGroups().get(0); UpdateAutoScalingGroupRequest ureq = new UpdateAutoScalingGroupRequest(); ureq.setAutoScalingGroupName(asg.getAutoScalingGroupName()); ureq.setMinSize(asg.getMinSize() + 1); ureq.setMaxSize(asg.getMinSize() + 1); ureq.setDesiredCapacity(asg.getMinSize() + 1); client.updateAutoScalingGroup(ureq); } finally { if (client != null) { client.shutdown(); } } } protected AmazonAutoScaling getAutoScalingClient() { AmazonAutoScaling client = new AmazonAutoScalingClient(provider.getAwsCredentialProvider()); client.setEndpoint("autoscaling." + config.getDC() + ".amazonaws.com"); return client; } protected AmazonEC2 getEc2Client() { AmazonEC2 client = new AmazonEC2Client(provider.getAwsCredentialProvider()); client.setEndpoint("ec2." + config.getDC() + ".amazonaws.com"); return client; } }
5,130
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/SetVPCSecurityGroupID.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.aws; import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.AmazonEC2Client; import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest; import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult; import com.amazonaws.services.ec2.model.SecurityGroup; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.utils.SystemUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Created by sloke on 11/16/15. * This class has been added especially for VPC Purposes. If SecurityGroup is deployed in VPC, * then SecurityGroupId is needed to make any modifications or querying to associated SecurityGroup * * Sets the Security Group Id for the VPC Security Group * If SecurityGroupId is not found for the matching the Security Group * then RuntimeException is thrown * */ @Singleton public class SetVPCSecurityGroupID { private static final Logger logger = LoggerFactory.getLogger(SetVPCSecurityGroupID.class); private final IConfiguration config; private final ICredential provider; @Inject public SetVPCSecurityGroupID(IConfiguration config, ICredential provider) { this.config = config; this.provider = provider; } public void execute() { AmazonEC2 client = null; try { client = getEc2Client(); //Get All the Existing Sec Group Ids String[] securityGroupIds = SystemUtils.getSecurityGroupIds(config.getMacIdForInstance()); DescribeSecurityGroupsRequest req = new DescribeSecurityGroupsRequest().withGroupIds(securityGroupIds); DescribeSecurityGroupsResult result = client.describeSecurityGroups(req); boolean securityGroupFound = false; for (SecurityGroup securityGroup : result.getSecurityGroups()) { logger.info("Read " + securityGroup.getGroupName()); if (securityGroup.getGroupName().equals(config.getACLGroupNameForVPC())) { logger.info("Found matching security group name: " + securityGroup.getGroupName()); // Setting configuration value with the correct SG ID config.setACLGroupIdForVPC(securityGroup.getGroupId()); securityGroupFound = true; break; } } // If correct SG was not found, throw Exception if (!securityGroupFound) { throw new RuntimeException("Cannot find matching security group for " + config.getACLGroupNameForVPC()); } } catch (Exception e) { throw new RuntimeException(e); } finally { if (client != null) { client.shutdown(); } } } private AmazonEC2 getEc2Client() { AmazonEC2 client = new AmazonEC2Client(provider.getAwsCredentialProvider()); client.setEndpoint("ec2." + config.getDC() + ".amazonaws.com"); return client; } }
5,131
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/UpdateSecuritySettings.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.aws; import com.google.common.collect.Lists; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.identity.IMembership; import com.netflix.raigad.identity.IRaigadInstanceFactory; import com.netflix.raigad.identity.InstanceManager; import com.netflix.raigad.identity.RaigadInstance; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Random; /** * This class will associate public IP's with a new instance so they can talk across the regions. * <p> * Requirements: * 1. Nodes in the same region needs to be able to talk to each other. * 2. Nodes in other regions needs to be able to talk to the others in the other region. * <p> * Assumptions: * 1. IRaigadInstanceFactory will provide the membership and will be visible across the regions * 2. IMembership amazon or any other implementation which can tell if the instance is a * part of the group (ASG in Amazon's case). */ @Singleton public class UpdateSecuritySettings extends Task { private static final Logger logger = LoggerFactory.getLogger(UpdateSecuritySettings.class); public static final String JOB_NAME = "Update_SG"; public static boolean firstTimeUpdated = false; private static final Random RANDOM = new Random(); private final IMembership membership; private final IRaigadInstanceFactory factory; @Inject public UpdateSecuritySettings(IConfiguration config, IMembership membership, IRaigadInstanceFactory factory) { super(config); this.membership = membership; this.factory = factory; } /** * Master nodes execute this at the specified interval, others run only on startup */ @Override public void execute() { int transportPort = config.getTransportTcpPort(); int restPort = config.getHttpPort(); List<String> accessControlLists = membership.listACL(transportPort, transportPort); // Get instances based on node types (tribe / non-tribe) List<RaigadInstance> instances = getInstanceList(); // Iterate cluster nodes and build a list of IP's List<String> ipsToAdd = Lists.newArrayList(); List<String> currentRanges = Lists.newArrayList(); for (RaigadInstance instance : instances) { String range = instance.getHostIP() + "/32"; currentRanges.add(range); if (!accessControlLists.contains(range)) { ipsToAdd.add(range); } } if (ipsToAdd.size() > 0) { logger.info("Adding IPs on ports {} and {}: {}", transportPort, restPort, ipsToAdd); membership.addACL(ipsToAdd, transportPort, transportPort); membership.addACL(ipsToAdd, restPort, restPort); firstTimeUpdated = true; } // Create a list of IP's to remove List<String> ipsToRemove = Lists.newArrayList(); for (String accessControlList : accessControlLists) { // Remove if not found if (!currentRanges.contains(accessControlList)) { ipsToRemove.add(accessControlList); } } if (ipsToRemove.size() > 0) { logger.info("Removing IPs on ports {} and {}: {}", transportPort, restPort, ipsToRemove); membership.removeACL(ipsToRemove, transportPort, transportPort); membership.removeACL(ipsToRemove, restPort, restPort); firstTimeUpdated = true; } } private List<RaigadInstance> getInstanceList() { List<RaigadInstance> instances = new ArrayList<>(); List<String> tribeClusters = new ArrayList<String>(Arrays.asList(StringUtils.split(config.getCommaSeparatedTribeClusterNames(), ","))); assert (tribeClusters.size() != 0) : "Need at least one tribe cluster"; tribeClusters.forEach(tribeClusterName -> instances.addAll(factory.getAllIds(tribeClusterName))); if (config.isDebugEnabled()) { instances.forEach(instance -> logger.debug(instance.toString())); } return instances; } public static TaskTimer getTimer(InstanceManager instanceManager) { // Only master nodes will update security group settings if (!instanceManager.isMaster()) { return new SimpleTimer(JOB_NAME); } else { return new SimpleTimer(JOB_NAME, 120 * 1000 + RANDOM.nextInt(120 * 1000)); } } @Override public String getName() { return JOB_NAME; } }
5,132
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/ICredential.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.aws; import com.amazonaws.auth.AWSCredentialsProvider; import com.google.inject.ImplementedBy; /** * Credential file interface for services supporting * Access ID and key authentication */ @ImplementedBy(ClearCredential.class) public interface ICredential { public AWSCredentialsProvider getAwsCredentialProvider(); }
5,133
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/UpdateTribeSecuritySettings.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.aws; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.identity.IMembership; import com.netflix.raigad.identity.IRaigadInstanceFactory; import com.netflix.raigad.identity.InstanceManager; import com.netflix.raigad.identity.RaigadInstance; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.*; /** * This class will associate public IP's with a new instance so they can talk across the regions. * <p> * Requirements: * (1) Nodes in the same region needs to be able to talk to each other. * (2) Nodes in other regions needs to be able to talk to the others in the other region. * <p> * Assumptions: * (1) IRaigadInstanceFactory will provide the membership and will be visible across the regions * (2) IMembership Amazon or any other implementation which can tell if the instance * is part of the group (ASG in Amazon's case). */ @Singleton public class UpdateTribeSecuritySettings extends Task { private static final Logger logger = LoggerFactory.getLogger(UpdateTribeSecuritySettings.class); public static final String JOB_NAME = "Update_TRIBE_SG"; public static boolean firstTimeUpdated = false; private static final String COMMA_SEPARATOR = ","; private static final String PARAM_SEPARATOR = "="; private static final Random ran = new Random(); private final IMembership membership; private final IRaigadInstanceFactory factory; /** * clusterPortMap * es_tribe : 8000 * es_tribe_source1 : 8001 * es_tribe_source2 : 8002 */ private final Map<String, Integer> clusterPortMap = new HashMap<String, Integer>(); @Inject public UpdateTribeSecuritySettings(IConfiguration config, IMembership membership, IRaigadInstanceFactory factory) { super(config); this.membership = membership; this.factory = factory; } /** * Master nodes execute this at the specified interval. * Other nodes run only on startup. */ @Override public void execute() { // Initializing cluster-port map from config properties initializeClusterPortMap(); List<String> accessControlLists = new ArrayList<>(); for (String clusterName : clusterPortMap.keySet()) { List<String> aclList = membership.listACL(clusterPortMap.get(clusterName), clusterPortMap.get(clusterName)); accessControlLists.addAll(aclList); } List<RaigadInstance> instances = getInstanceList(); Map<String, String> addAclClusterMap = new HashMap<>(); Map<String, String> currentIpClusterMap = new HashMap<>(); for (RaigadInstance instance : instances) { String range = instance.getHostIP() + "/32"; if (!accessControlLists.contains(range)) { addAclClusterMap.put(range, instance.getApp()); } // Just generating ranges currentIpClusterMap.put(range, instance.getApp()); } if (addAclClusterMap.keySet().size() > 0) { /** * clusterInstancesMap * es_tribe : 50.60.70.80,50.60.70.81 * es_tribe_source1 : 60.70.80.90,60.70.80.91 * es_tribe_source2 : 70.80.90.00,70.80.90.01 */ Map<String, List<String>> clusterInstancesMap = generateClusterToAclListMap(addAclClusterMap); for (String currentClusterName : clusterInstancesMap.keySet()) { if (currentClusterName.startsWith("es_tribe_")) { clusterPortMap.forEach((clusterName, transportPort) -> { logger.info("Adding IPs for {} on port {}: {}", currentClusterName, transportPort, clusterInstancesMap.get(currentClusterName)); membership.addACL(clusterInstancesMap.get(currentClusterName), transportPort, transportPort); }); } else { logger.info("Adding IPs for {} on port {}: {}", currentClusterName, clusterPortMap.get(currentClusterName), clusterInstancesMap.get(currentClusterName)); membership.addACL(clusterInstancesMap.get(currentClusterName), clusterPortMap.get(currentClusterName), clusterPortMap.get(currentClusterName)); } } firstTimeUpdated = true; } // Iterating to remove ACL's List<String> removeAclList = new ArrayList<>(); for (String acl : accessControlLists) { if (!currentIpClusterMap.containsKey(acl)) { removeAclList.add(acl); } } if (removeAclList.size() > 0) { for (String acl : removeAclList) { Map<String, List<Integer>> aclPortMap = membership.getACLPortMap(acl); int from = aclPortMap.get(acl).get(0); int to = aclPortMap.get(acl).get(1); membership.removeACL(Collections.singletonList(acl), from, to); } firstTimeUpdated = true; } } private void initializeClusterPortMap() { // Adding existing cluster-port mapping if (!clusterPortMap.containsKey(config.getAppName())) { clusterPortMap.put(config.getAppName(), config.getTransportTcpPort()); logger.info("Adding cluster [{}:{}]", config.getAppName(), config.getTransportTcpPort()); } String clusterParams = config.getCommaSeparatedSourceClustersForTribeNode(); assert (clusterParams != null) : "Clusters parameters cannot be null"; String[] clusters = StringUtils.split(clusterParams.trim(), COMMA_SEPARATOR); assert (clusters.length != 0) : "At least one cluster is needed"; //Common settings for (String cluster : clusters) { String[] clusterPort = cluster.trim().split(PARAM_SEPARATOR); assert (clusterPort.length != 2) : "Cluster name or transport port is missing in configuration"; if (!clusterPortMap.containsKey(clusterPort[0].trim())) { String sourceTribeClusterName = clusterPort[0].trim(); Integer sourceTribeClusterPort = Integer.parseInt(clusterPort[1].trim()); clusterPortMap.put(sourceTribeClusterName, sourceTribeClusterPort); logger.info("Adding cluster [{}:{}]", sourceTribeClusterName, sourceTribeClusterPort); } } } private Map<String, List<String>> generateClusterToAclListMap(Map<String, String> addAclClusterMap) { Map<String, List<String>> clusterAclsMap = new HashMap<>(); for (String acl : addAclClusterMap.keySet()) { if (clusterAclsMap.containsKey(addAclClusterMap.get(acl))) { clusterAclsMap.get(addAclClusterMap.get(acl)).add(acl); } else { List<String> aclList = new ArrayList<>(); aclList.add(acl); clusterAclsMap.put(addAclClusterMap.get(acl), aclList); } } return clusterAclsMap; } private List<RaigadInstance> getInstanceList() { List<RaigadInstance> instances = new ArrayList<>(); for (String clusterName : clusterPortMap.keySet()) { instances.addAll(factory.getAllIds(clusterName)); } if (config.isDebugEnabled()) { for (RaigadInstance instance : instances) { logger.debug(instance.toString()); } } return instances; } public static TaskTimer getTimer(InstanceManager instanceManager) { return new SimpleTimer(JOB_NAME, 120 * 1000 + ran.nextInt(120 * 1000)); } @Override public String getName() { return JOB_NAME; } }
5,134
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/aws/IAMCredential.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.aws; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.InstanceProfileCredentialsProvider; public class IAMCredential implements ICredential { private final InstanceProfileCredentialsProvider iamCredProvider; public IAMCredential() { this.iamCredProvider = new InstanceProfileCredentialsProvider(); } public AWSCredentialsProvider getAwsCredentialProvider() { return iamCredProvider; } }
5,135
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/dataobjects/MasterNodeInformation.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.dataobjects; import org.codehaus.jackson.annotate.JsonCreator; import org.codehaus.jackson.annotate.JsonProperty; /* [{ "id":"8sZZWYmmQaeNUKMq1S1uow", "host":"es-slokemsd-useast1d-master-i-9e1b62b4", "ip":"10.218.89.139", "node":"us-east-1d.i-9e1b62b4" }] */ public class MasterNodeInformation { private final String id; private final String host; private final String ip; private final String node; @JsonCreator public MasterNodeInformation(@JsonProperty("id") final String id, @JsonProperty("host") final String host, @JsonProperty("ip") final String ip, @JsonProperty("node") final String node) { this.id = id; this.host = host; this.ip = ip; this.node = node; } public String getId() { return id; } public String getHost() { return host; } public String getIp() { return ip; } public String getNode() { return node; } }
5,136
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/objectmapper/DefaultMasterNodeInfoMapper.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.objectmapper; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.Version; import org.codehaus.jackson.map.DeserializationConfig; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.SerializationConfig; import org.codehaus.jackson.map.module.SimpleModule; public class DefaultMasterNodeInfoMapper extends ObjectMapper { public DefaultMasterNodeInfoMapper() { this(null); } public DefaultMasterNodeInfoMapper(JsonFactory factory) { super(factory); SimpleModule serializerModule = new SimpleModule("default serializers", new Version(1, 0, 0, null)); registerModule(serializerModule); configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false); configure(SerializationConfig.Feature.FAIL_ON_EMPTY_BEANS, false); } }
5,137
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/objectmapper/DefaultIndexMapper.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.objectmapper; import org.codehaus.jackson.JsonFactory; import org.codehaus.jackson.Version; import org.codehaus.jackson.map.DeserializationConfig; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.SerializationConfig; import org.codehaus.jackson.map.module.SimpleModule; public class DefaultIndexMapper extends ObjectMapper { public DefaultIndexMapper() { this(null); } public DefaultIndexMapper(JsonFactory factory) { super(factory); SimpleModule serializerModule = new SimpleModule("default serializers", new Version(1, 0, 0, null)); registerModule(serializerModule); configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, false); configure(SerializationConfig.Feature.AUTO_DETECT_GETTERS, false); configure(SerializationConfig.Feature.AUTO_DETECT_FIELDS, false); configure(SerializationConfig.Feature.INDENT_OUTPUT, false); } }
5,138
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/ThreadPoolStatsMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.threadpool.ThreadPoolStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Iterator; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @Singleton public class ThreadPoolStatsMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(ThreadPoolStatsMonitor.class); public static final String METRIC_NAME = "Elasticsearch_ThreadPoolMonitor"; private final Elasticsearch_ThreadPoolStatsReporter tpStatsReporter; @Inject public ThreadPoolStatsMonitor(IConfiguration config) { super(config); tpStatsReporter = new Elasticsearch_ThreadPoolStatsReporter(); Monitors.registerObject(tpStatsReporter); } @Override public void execute() throws Exception { // If Elasticsearch is started then only start the monitoring if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } ThreadPoolStatsBean threadPoolStatsBean = new ThreadPoolStatsBean(); try { NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config); NodeStats nodeStats = null; List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes(); if (nodeStatsList.size() > 0) { nodeStats = nodeStatsList.get(0); } if (nodeStats == null) { logger.info("Thread pool stats are not available (node stats is not available)"); return; } ThreadPoolStats threadPoolStats = nodeStats.getThreadPool(); if (threadPoolStats == null) { logger.info("Thread pool stats are not available"); return; } Iterator<ThreadPoolStats.Stats> threadPoolStatsIterator = threadPoolStats.iterator(); while (threadPoolStatsIterator.hasNext()) { ThreadPoolStats.Stats stat = threadPoolStatsIterator.next(); if (stat.getName().equals("index")) { threadPoolStatsBean.indexThreads = stat.getThreads(); threadPoolStatsBean.indexQueue = stat.getQueue(); threadPoolStatsBean.indexActive = stat.getActive(); threadPoolStatsBean.indexRejected = stat.getRejected(); threadPoolStatsBean.indexLargest = stat.getLargest(); threadPoolStatsBean.indexCompleted = stat.getCompleted(); } else if (stat.getName().equals("get")) { threadPoolStatsBean.getThreads = stat.getThreads(); threadPoolStatsBean.getQueue = stat.getQueue(); threadPoolStatsBean.getActive = stat.getActive(); threadPoolStatsBean.getRejected = stat.getRejected(); threadPoolStatsBean.getLargest = stat.getLargest(); threadPoolStatsBean.getCompleted = stat.getCompleted(); } else if (stat.getName().equals("search")) { threadPoolStatsBean.searchThreads = stat.getThreads(); threadPoolStatsBean.searchQueue = stat.getQueue(); threadPoolStatsBean.searchActive = stat.getActive(); threadPoolStatsBean.searchRejected = stat.getRejected(); threadPoolStatsBean.searchLargest = stat.getLargest(); threadPoolStatsBean.searchCompleted = stat.getCompleted(); } else if (stat.getName().equals("bulk")) { threadPoolStatsBean.bulkThreads = stat.getThreads(); threadPoolStatsBean.bulkQueue = stat.getQueue(); threadPoolStatsBean.bulkActive = stat.getActive(); threadPoolStatsBean.bulkRejected = stat.getRejected(); threadPoolStatsBean.bulkLargest = stat.getLargest(); threadPoolStatsBean.bulkCompleted = stat.getCompleted(); } } } catch (Exception e) { logger.warn("Failed to load thread pool stats data", e); } tpStatsReporter.threadPoolBean.set(threadPoolStatsBean); } public class Elasticsearch_ThreadPoolStatsReporter { private final AtomicReference<ThreadPoolStatsBean> threadPoolBean; public Elasticsearch_ThreadPoolStatsReporter() { threadPoolBean = new AtomicReference<ThreadPoolStatsBean>(new ThreadPoolStatsBean()); } @Monitor(name = "IndexThreads", type = DataSourceType.GAUGE) public long getIndexThreads() { return threadPoolBean.get().indexThreads; } @Monitor(name = "IndexQueue", type = DataSourceType.GAUGE) public long getIndexQueue() { return threadPoolBean.get().indexQueue; } @Monitor(name = "indexActive", type = DataSourceType.GAUGE) public long getIndexActive() { return threadPoolBean.get().indexActive; } @Monitor(name = "indexRejected", type = DataSourceType.COUNTER) public long getIndexRejected() { return threadPoolBean.get().indexRejected; } @Monitor(name = "indexLargest", type = DataSourceType.GAUGE) public long getIndexLargest() { return threadPoolBean.get().indexLargest; } @Monitor(name = "indexCompleted", type = DataSourceType.COUNTER) public long getIndexCompleted() { return threadPoolBean.get().indexCompleted; } @Monitor(name = "getThreads", type = DataSourceType.GAUGE) public long getGetThreads() { return threadPoolBean.get().getThreads; } @Monitor(name = "getQueue", type = DataSourceType.GAUGE) public long getGetQueue() { return threadPoolBean.get().getQueue; } @Monitor(name = "getActive", type = DataSourceType.GAUGE) public long getGetActive() { return threadPoolBean.get().getActive; } @Monitor(name = "getRejected", type = DataSourceType.COUNTER) public long getGetRejected() { return threadPoolBean.get().getRejected; } @Monitor(name = "getLargest", type = DataSourceType.GAUGE) public long getGetLargest() { return threadPoolBean.get().getLargest; } @Monitor(name = "getCompleted", type = DataSourceType.COUNTER) public long getGetCompleted() { return threadPoolBean.get().getCompleted; } @Monitor(name = "searchThreads", type = DataSourceType.GAUGE) public long getSearchThreads() { return threadPoolBean.get().searchThreads; } @Monitor(name = "searchQueue", type = DataSourceType.GAUGE) public long getSearchQueue() { return threadPoolBean.get().searchQueue; } @Monitor(name = "searchActive", type = DataSourceType.GAUGE) public long getSearchActive() { return threadPoolBean.get().searchActive; } @Monitor(name = "searchRejected", type = DataSourceType.COUNTER) public long getSearchRejected() { return threadPoolBean.get().searchRejected; } @Monitor(name = "searchLargest", type = DataSourceType.GAUGE) public long getSearchLargest() { return threadPoolBean.get().searchLargest; } @Monitor(name = "searchCompleted", type = DataSourceType.COUNTER) public long getSearchCompleted() { return threadPoolBean.get().searchCompleted; } @Monitor(name = "bulkThreads", type = DataSourceType.GAUGE) public long getBulkThreads() { return threadPoolBean.get().bulkThreads; } @Monitor(name = "bulkQueue", type = DataSourceType.GAUGE) public long getBulkQueue() { return threadPoolBean.get().bulkQueue; } @Monitor(name = "bulkActive", type = DataSourceType.GAUGE) public long getBulkActive() { return threadPoolBean.get().bulkActive; } @Monitor(name = "bulkRejected", type = DataSourceType.COUNTER) public long getBulkRejected() { return threadPoolBean.get().bulkRejected; } @Monitor(name = "bulkLargest", type = DataSourceType.GAUGE) public long getBulkLargest() { return threadPoolBean.get().bulkLargest; } @Monitor(name = "bulkCompleted", type = DataSourceType.COUNTER) public long getBulkCompleted() { return threadPoolBean.get().bulkCompleted; } } private static class ThreadPoolStatsBean { private long indexThreads; private long indexQueue; private long indexActive; private long indexRejected; private long indexLargest; private long indexCompleted; private long getThreads; private long getQueue; private long getActive; private long getRejected; private long getLargest; private long getCompleted; private long searchThreads; private long searchQueue; private long searchActive; private long searchRejected; private long searchLargest; private long searchCompleted; private long bulkThreads; private long bulkQueue; private long bulkActive; private long bulkRejected; private long bulkLargest; private long bulkCompleted; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public String getName() { return METRIC_NAME; } }
5,139
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/OsStatsMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.monitor.os.OsStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @Singleton public class OsStatsMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(OsStatsMonitor.class); public static final String METRIC_NAME = "Elasticsearch_OsStatsMonitor"; private final Elasticsearch_OsStatsReporter osStatsReporter; @Inject public OsStatsMonitor(IConfiguration config) { super(config); osStatsReporter = new Elasticsearch_OsStatsReporter(); Monitors.registerObject(osStatsReporter); } @Override public void execute() throws Exception { // If Elasticsearch is started then only start the monitoring if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } OsStatsBean osStatsBean = new OsStatsBean(); try { NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config); NodeStats nodeStats = null; List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes(); if (nodeStatsList.size() > 0) { nodeStats = nodeStatsList.get(0); } if (nodeStats == null) { logger.info("OS stats is not available (node stats is not available)"); return; } OsStats osStats = nodeStats.getOs(); if (osStats == null) { logger.info("OS stats is not available"); return; } //Memory osStatsBean.freeInBytes = osStats.getMem().getFree().getBytes(); osStatsBean.usedInBytes = osStats.getMem().getUsed().getBytes(); osStatsBean.actualFreeInBytes = osStats.getMem().getFree().getBytes(); osStatsBean.actualUsedInBytes = osStats.getMem().getUsed().getBytes(); osStatsBean.freePercent = osStats.getMem().getFreePercent(); osStatsBean.usedPercent = osStats.getMem().getUsedPercent(); //CPU osStatsBean.cpuSys = osStats.getCpu().getPercent(); osStatsBean.cpuUser = 0; osStatsBean.cpuIdle = 0; osStatsBean.cpuStolen = 0; //Swap osStatsBean.swapFreeInBytes = osStats.getSwap().getFree().getBytes(); osStatsBean.swapUsedInBytes = osStats.getSwap().getUsed().getBytes(); //Uptime osStatsBean.uptimeInMillis = 0; //Timestamp osStatsBean.osTimestamp = osStats.getTimestamp(); } catch (Exception e) { logger.warn("Failed to load OS stats data", e); } osStatsReporter.osStatsBean.set(osStatsBean); } public class Elasticsearch_OsStatsReporter { private final AtomicReference<OsStatsBean> osStatsBean; public Elasticsearch_OsStatsReporter() { osStatsBean = new AtomicReference<OsStatsBean>(new OsStatsBean()); } @Monitor(name = "free_in_bytes", type = DataSourceType.GAUGE) public long getFreeInBytes() { return osStatsBean.get().freeInBytes; } @Monitor(name = "used_in_bytes", type = DataSourceType.GAUGE) public long getUsedInBytes() { return osStatsBean.get().usedInBytes; } @Monitor(name = "actual_free_in_bytes", type = DataSourceType.GAUGE) public long getActualFreeInBytes() { return osStatsBean.get().actualFreeInBytes; } @Monitor(name = "actual_used_in_bytes", type = DataSourceType.GAUGE) public long geActualUsedInBytes() { return osStatsBean.get().actualUsedInBytes; } @Monitor(name = "free_percent", type = DataSourceType.GAUGE) public short getFreePercent() { return osStatsBean.get().freePercent; } @Monitor(name = "used_percent", type = DataSourceType.GAUGE) public short getUsedPercent() { return osStatsBean.get().usedPercent; } @Monitor(name = "cpu_sys", type = DataSourceType.GAUGE) public short getCpuSys() { return osStatsBean.get().cpuSys; } @Monitor(name = "cpu_user", type = DataSourceType.GAUGE) public short getCpuUser() { return osStatsBean.get().cpuUser; } @Monitor(name = "cpu_idle", type = DataSourceType.GAUGE) public short getCpuIdle() { return osStatsBean.get().cpuIdle; } @Monitor(name = "cpu_stolen", type = DataSourceType.GAUGE) public short getCpuStolen() { return osStatsBean.get().cpuStolen; } @Monitor(name = "swap_used_in_bytes", type = DataSourceType.GAUGE) public long getSwapUsedInBytes() { return osStatsBean.get().swapUsedInBytes; } @Monitor(name = "swap_free_in_bytes", type = DataSourceType.GAUGE) public long getSwapFreeInBytes() { return osStatsBean.get().swapFreeInBytes; } @Monitor(name = "uptime_in_millis", type = DataSourceType.GAUGE) public double getUptimeInMillis() { return osStatsBean.get().uptimeInMillis; } @Monitor(name = "os_timestamp", type = DataSourceType.GAUGE) public long getOsTimestamp() { return osStatsBean.get().osTimestamp; } } private static class OsStatsBean { private long freeInBytes; private long usedInBytes; private long actualFreeInBytes; private long actualUsedInBytes; private short freePercent; private short usedPercent; private short cpuSys; private short cpuUser; private short cpuIdle; private short cpuStolen; private long swapUsedInBytes; private long swapFreeInBytes; private long uptimeInMillis; private long osTimestamp; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public String getName() { return METRIC_NAME; } }
5,140
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/JvmStatsMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.monitor.jvm.JvmStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Iterator; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @Singleton public class JvmStatsMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(JvmStatsMonitor.class); public static final String METRIC_NAME = "Elasticsearch_JvmStatsMonitor"; public static final String GC_YOUNG_TAG = "young"; public static final String GC_OLD_TAG = "old"; public static final String GC_SURVIVOR_TAG = "survivor"; private final Elasticsearch_JvmStatsReporter jvmStatsReporter; @Inject public JvmStatsMonitor(IConfiguration config) { super(config); jvmStatsReporter = new Elasticsearch_JvmStatsReporter(); Monitors.registerObject(jvmStatsReporter); } @Override public void execute() throws Exception { // Only start monitoring if Elasticsearch is started if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } JvmStatsBean jvmStatsBean = new JvmStatsBean(); try { NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config); NodeStats nodeStats = null; List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes(); if (nodeStatsList.size() > 0) { nodeStats = nodeStatsList.get(0); } if (nodeStats == null) { logger.info("JVM stats is not available (node stats is not available)"); return; } JvmStats jvmStats = nodeStats.getJvm(); if (jvmStats == null) { logger.info("JVM stats is not available"); return; } //Heap jvmStatsBean.heapCommittedInBytes = jvmStats.getMem().getHeapCommitted().getMb(); jvmStatsBean.heapMaxInBytes = jvmStats.getMem().getHeapMax().getMb(); jvmStatsBean.heapUsedInBytes = jvmStats.getMem().getHeapUsed().getMb(); jvmStatsBean.heapUsedPercent = jvmStats.getMem().getHeapUsedPercent(); jvmStatsBean.nonHeapCommittedInBytes = jvmStats.getMem().getNonHeapCommitted().getMb(); jvmStatsBean.nonHeapUsedInBytes = jvmStats.getMem().getNonHeapUsed().getMb(); Iterator<JvmStats.MemoryPool> memoryPoolIterator = jvmStats.getMem().iterator(); while (memoryPoolIterator.hasNext()) { JvmStats.MemoryPool memoryPoolStats = memoryPoolIterator.next(); if (memoryPoolStats.getName().equalsIgnoreCase(GC_YOUNG_TAG)) { jvmStatsBean.youngMaxInBytes = memoryPoolStats.getMax().getBytes(); jvmStatsBean.youngUsedInBytes = memoryPoolStats.getUsed().getBytes(); jvmStatsBean.youngPeakUsedInBytes = memoryPoolStats.getPeakUsed().getBytes(); jvmStatsBean.youngPeakMaxInBytes = memoryPoolStats.getPeakMax().getBytes(); } else if (memoryPoolStats.getName().equalsIgnoreCase(GC_SURVIVOR_TAG)) { jvmStatsBean.survivorMaxInBytes = memoryPoolStats.getMax().getBytes(); jvmStatsBean.survivorUsedInBytes = memoryPoolStats.getUsed().getBytes(); jvmStatsBean.survivorPeakUsedInBytes = memoryPoolStats.getPeakUsed().getBytes(); jvmStatsBean.survivorPeakMaxInBytes = memoryPoolStats.getPeakMax().getBytes(); } else if (memoryPoolStats.getName().equalsIgnoreCase(GC_OLD_TAG)) { jvmStatsBean.oldMaxInBytes = memoryPoolStats.getMax().getBytes(); jvmStatsBean.oldUsedInBytes = memoryPoolStats.getUsed().getBytes(); jvmStatsBean.oldPeakUsedInBytes = memoryPoolStats.getPeakUsed().getBytes(); jvmStatsBean.oldPeakMaxInBytes = memoryPoolStats.getPeakMax().getBytes(); } } //Threads jvmStatsBean.threadCount = jvmStats.getThreads().getCount(); jvmStatsBean.threadPeakCount = jvmStats.getThreads().getPeakCount(); jvmStatsBean.uptimeHours = jvmStats.getUptime().getHours(); //GC for (JvmStats.GarbageCollector garbageCollector : jvmStats.getGc().getCollectors()) { if (garbageCollector.getName().equalsIgnoreCase(GC_YOUNG_TAG)) { jvmStatsBean.youngCollectionCount = garbageCollector.getCollectionCount(); jvmStatsBean.youngCollectionTimeInMillis = garbageCollector.getCollectionTime().getMillis(); } else if (garbageCollector.getName().equalsIgnoreCase(GC_OLD_TAG)) { jvmStatsBean.oldCollectionCount = garbageCollector.getCollectionCount(); jvmStatsBean.oldCollectionTimeInMillis = garbageCollector.getCollectionTime().getMillis(); } } } catch (Exception e) { logger.warn("Failed to load JVM stats data", e); } jvmStatsReporter.jvmStatsBean.set(jvmStatsBean); } public class Elasticsearch_JvmStatsReporter { private final AtomicReference<JvmStatsBean> jvmStatsBean; public Elasticsearch_JvmStatsReporter() { jvmStatsBean = new AtomicReference<JvmStatsBean>(new JvmStatsBean()); } @Monitor(name = "heap_committed_in_bytes", type = DataSourceType.GAUGE) public long getHeapCommitedInBytes() { return jvmStatsBean.get().heapCommittedInBytes; } @Monitor(name = "heap_max_in_bytes", type = DataSourceType.GAUGE) public long getHeapMaxInBytes() { return jvmStatsBean.get().heapMaxInBytes; } @Monitor(name = "heap_used_in_bytes", type = DataSourceType.GAUGE) public long getHeapUsedInBytes() { return jvmStatsBean.get().heapUsedInBytes; } @Monitor(name = "non_heap_committed_in_bytes", type = DataSourceType.GAUGE) public long getNonHeapCommittedInBytes() { return jvmStatsBean.get().nonHeapCommittedInBytes; } @Monitor(name = "non_heap_used_in_bytes", type = DataSourceType.GAUGE) public long getNonHeapUsedInBytes() { return jvmStatsBean.get().nonHeapUsedInBytes; } @Monitor(name = "heap_used_percent", type = DataSourceType.GAUGE) public short getHeapUsedPercent() { return jvmStatsBean.get().heapUsedPercent; } @Monitor(name = "threads_count", type = DataSourceType.GAUGE) public long getThreadsCount() { return jvmStatsBean.get().threadCount; } @Monitor(name = "threads_peak_count", type = DataSourceType.GAUGE) public long getThreadsPeakCount() { return jvmStatsBean.get().threadPeakCount; } @Monitor(name = "uptime_hours", type = DataSourceType.GAUGE) public double getUptimeHours() { return jvmStatsBean.get().uptimeHours; } @Monitor(name = "young_collection_count", type = DataSourceType.GAUGE) public long getYoungCollectionCount() { return jvmStatsBean.get().youngCollectionCount; } @Monitor(name = "young_collection_time_in_millis", type = DataSourceType.GAUGE) public long getYoungCollectionTimeInMillis() { return jvmStatsBean.get().youngCollectionTimeInMillis; } @Monitor(name = "old_collection_count", type = DataSourceType.GAUGE) public long getOldCollectionCount() { return jvmStatsBean.get().oldCollectionCount; } @Monitor(name = "old_collection_time_in_millis", type = DataSourceType.GAUGE) public long getOldCollectionTimeInMillis() { return jvmStatsBean.get().oldCollectionTimeInMillis; } @Monitor(name = "young_used_in_bytes", type = DataSourceType.GAUGE) public long getYoungUsedInBytes() { return jvmStatsBean.get().youngUsedInBytes; } @Monitor(name = "young_max_in_bytes", type = DataSourceType.GAUGE) public long getYoungMaxInBytes() { return jvmStatsBean.get().youngMaxInBytes; } @Monitor(name = "young_peak_used_in_bytes", type = DataSourceType.GAUGE) public long getYoungPeakUsedInBytes() { return jvmStatsBean.get().youngPeakUsedInBytes; } @Monitor(name = "young_peak_max_in_bytes", type = DataSourceType.GAUGE) public long getYoungPeakMaxInBytes() { return jvmStatsBean.get().youngPeakMaxInBytes; } @Monitor(name = "survivor_used_in_bytes", type = DataSourceType.GAUGE) public long getSurvivorUsedInBytes() { return jvmStatsBean.get().survivorUsedInBytes; } @Monitor(name = "survivor_max_in_bytes", type = DataSourceType.GAUGE) public long getSurvivorMaxInBytes() { return jvmStatsBean.get().survivorMaxInBytes; } @Monitor(name = "survivor_peak_used_in_bytes", type = DataSourceType.GAUGE) public long getSurvivorPeakUsedInBytes() { return jvmStatsBean.get().survivorPeakUsedInBytes; } @Monitor(name = "survivor_peak_max_in_bytes", type = DataSourceType.GAUGE) public long getSurvivorPeakMaxInBytes() { return jvmStatsBean.get().survivorPeakMaxInBytes; } @Monitor(name = "old_used_in_bytes", type = DataSourceType.GAUGE) public long getOldUsedInBytes() { return jvmStatsBean.get().oldUsedInBytes; } @Monitor(name = "old_max_in_bytes", type = DataSourceType.GAUGE) public long getOldMaxInBytes() { return jvmStatsBean.get().oldMaxInBytes; } @Monitor(name = "old_peak_used_in_bytes", type = DataSourceType.GAUGE) public long getOldPeakUsedInBytes() { return jvmStatsBean.get().oldPeakUsedInBytes; } @Monitor(name = "old_peak_max_in_bytes", type = DataSourceType.GAUGE) public long getOldPeakMaxInBytes() { return jvmStatsBean.get().oldPeakMaxInBytes; } @Monitor(name = "young_last_gc_start_time", type = DataSourceType.GAUGE) public long getYoungLastGcStartTime() { return jvmStatsBean.get().youngLastGcStartTime; } @Monitor(name = "young_last_gc_end_time", type = DataSourceType.GAUGE) public long getYoungLastGcEndTime() { return jvmStatsBean.get().youngLastGcEndTime; } @Monitor(name = "young_last_gc_max_in_bytes", type = DataSourceType.GAUGE) public long getYoungLastGcMaxInBytes() { return jvmStatsBean.get().youngLastGcMaxInBytes; } @Monitor(name = "young_last_gc_before_used_in_bytes", type = DataSourceType.GAUGE) public long getYoungLastGcBeforeUsedInBytes() { return jvmStatsBean.get().youngLastGcBeforeUsedInBytes; } @Monitor(name = "young_last_gc_after_used_in_bytes", type = DataSourceType.GAUGE) public long getYoungLastGcAfterUsedInBytes() { return jvmStatsBean.get().youngLastGcAfterUsedInBytes; } @Monitor(name = "young_last_gc_duration", type = DataSourceType.GAUGE) public long getYoungLastGcDuration() { return jvmStatsBean.get().youngLastGcDuration; } @Monitor(name = "old_last_gc_start_time", type = DataSourceType.GAUGE) public long getOldLastGcStartTime() { return jvmStatsBean.get().oldLastGcStartTime; } @Monitor(name = "old_last_gc_end_time", type = DataSourceType.GAUGE) public long getOldLastGcEndTime() { return jvmStatsBean.get().oldLastGcEndTime; } @Monitor(name = "old_last_gc_max_in_bytes", type = DataSourceType.GAUGE) public long getOldLastGcMaxInBytes() { return jvmStatsBean.get().oldLastGcMaxInBytes; } @Monitor(name = "old_last_gc_before_used_in_bytes", type = DataSourceType.GAUGE) public long getOldLastGcBeforeUsedInBytes() { return jvmStatsBean.get().oldLastGcBeforeUsedInBytes; } @Monitor(name = "old_last_gc_after_used_in_bytes", type = DataSourceType.GAUGE) public long getOldLastGcAfterUsedInBytes() { return jvmStatsBean.get().oldLastGcAfterUsedInBytes; } @Monitor(name = "old_last_gc_duration", type = DataSourceType.GAUGE) public long getOldLastGcDuration() { return jvmStatsBean.get().oldLastGcDuration; } } private static class JvmStatsBean { private long heapCommittedInBytes; private long heapMaxInBytes; private long heapUsedInBytes; private long nonHeapCommittedInBytes; private long nonHeapUsedInBytes; private short heapUsedPercent; private int threadCount; private int threadPeakCount; private long uptimeHours; private long youngCollectionCount; private long youngCollectionTimeInMillis; private long oldCollectionCount; private long oldCollectionTimeInMillis; private long youngUsedInBytes; private long youngMaxInBytes; private long youngPeakUsedInBytes; private long youngPeakMaxInBytes; private long survivorUsedInBytes; private long survivorMaxInBytes; private long survivorPeakUsedInBytes; private long survivorPeakMaxInBytes; private long oldUsedInBytes; private long oldMaxInBytes; private long oldPeakUsedInBytes; private long oldPeakMaxInBytes; private long youngLastGcStartTime; private long youngLastGcEndTime; private long youngLastGcMaxInBytes; private long youngLastGcBeforeUsedInBytes; private long youngLastGcAfterUsedInBytes; private long youngLastGcDuration; private long oldLastGcStartTime; private long oldLastGcEndTime; private long oldLastGcMaxInBytes; private long oldLastGcBeforeUsedInBytes; private long oldLastGcAfterUsedInBytes; private long oldLastGcDuration; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public String getName() { return METRIC_NAME; } }
5,141
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/EstimatedHistogram.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import org.slf4j.Logger; import java.util.Arrays; import java.util.Objects; import java.util.concurrent.atomic.AtomicLongArray; public class EstimatedHistogram { /** * The series of values to which the counts in `buckets` correspond: * 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc. * Thus, a `buckets` of [0, 0, 1, 10] would mean we had seen one value of 3 and 10 values of 4. * <p> * The series starts at 1 and grows by 1.2 each time (rounding and removing duplicates). It goes from 1 * to around 36M by default (creating 90+1 buckets), which will give us timing resolution from microseconds to * 36 seconds, with less precision as the numbers get larger. * <p> * Each bucket represents values from (previous bucket offset, current offset]. */ private final long[] bucketOffsets; // buckets is one element longer than bucketOffsets -- the last element is values greater than the last offset final AtomicLongArray buckets; public EstimatedHistogram() { this(90); } public EstimatedHistogram(int bucketCount) { bucketOffsets = newOffsets(bucketCount); buckets = new AtomicLongArray(bucketOffsets.length + 1); } public EstimatedHistogram(long[] offsets, long[] bucketData) { assert bucketData.length == offsets.length + 1; bucketOffsets = offsets; buckets = new AtomicLongArray(bucketData); } private static long[] newOffsets(int size) { long[] result = new long[size]; long last = 1; result[0] = last; for (int i = 1; i < size; i++) { long next = Math.round(last * 1.2); if (next == last) next++; result[i] = next; last = next; } return result; } /** * @return the histogram values corresponding to each bucket index */ public long[] getBucketOffsets() { return bucketOffsets; } /** * Increments the count of the bucket closest to n, rounding UP. * * @param n */ public void add(long n) { int index = Arrays.binarySearch(bucketOffsets, n); if (index < 0) { // inexact match, take the first bucket higher than n index = -index - 1; } // else exact match; we're good buckets.incrementAndGet(index); } /** * @return the count in the given bucket */ long get(int bucket) { return buckets.get(bucket); } /** * @param reset zero out buckets afterwards if true * @return a long[] containing the current histogram buckets */ public long[] getBuckets(boolean reset) { final int len = buckets.length(); long[] rv = new long[len]; if (reset) for (int i = 0; i < len; i++) rv[i] = buckets.getAndSet(i, 0L); else for (int i = 0; i < len; i++) rv[i] = buckets.get(i); return rv; } /** * @return the smallest value that could have been added to this histogram */ public long min() { for (int i = 0; i < buckets.length(); i++) { if (buckets.get(i) > 0) return i == 0 ? 0 : 1 + bucketOffsets[i - 1]; } return 0; } /** * @return the largest value that could have been added to this histogram. If the histogram * overflowed, returns Long.MAX_VALUE. */ public long max() { int lastBucket = buckets.length() - 1; if (buckets.get(lastBucket) > 0) return Long.MAX_VALUE; for (int i = lastBucket - 1; i >= 0; i--) { if (buckets.get(i) > 0) return bucketOffsets[i]; } return 0; } /** * @param percentile * @return estimated value at given percentile */ public long percentile(double percentile) { assert percentile >= 0 && percentile <= 1.0; int lastBucket = buckets.length() - 1; if (buckets.get(lastBucket) > 0) throw new IllegalStateException("Unable to compute when histogram overflowed"); long pcount = (long) Math.floor(count() * percentile); if (pcount == 0) return 0; long elements = 0; for (int i = 0; i < lastBucket; i++) { elements += buckets.get(i); if (elements >= pcount) return bucketOffsets[i]; } return 0; } /** * @return the mean histogram value (average of bucket offsets, weighted by count) * @throws IllegalStateException if any values were greater than the largest bucket threshold */ public long mean() { int lastBucket = buckets.length() - 1; if (buckets.get(lastBucket) > 0) throw new IllegalStateException("Unable to compute ceiling for max when histogram overflowed"); long elements = 0; long sum = 0; for (int i = 0; i < lastBucket; i++) { long bCount = buckets.get(i); elements += bCount; sum += bCount * bucketOffsets[i]; } return (long) Math.ceil((double) sum / elements); } /** * @return the total number of non-zero values */ public long count() { long sum = 0L; for (int i = 0; i < buckets.length(); i++) sum += buckets.get(i); return sum; } /** * @return true if this histogram has overflowed -- that is, a value larger than our largest bucket could bound was added */ public boolean isOverflowed() { return buckets.get(buckets.length() - 1) > 0; } /** * log.debug() every record in the histogram * * @param log */ public void log(Logger log) { // only print overflow if there is any int nameCount; if (buckets.get(buckets.length() - 1) == 0) nameCount = buckets.length() - 1; else nameCount = buckets.length(); String[] names = new String[nameCount]; int maxNameLength = 0; for (int i = 0; i < nameCount; i++) { names[i] = nameOfRange(bucketOffsets, i); maxNameLength = Math.max(maxNameLength, names[i].length()); } // emit log records String formatstr = "%" + maxNameLength + "s: %d"; for (int i = 0; i < nameCount; i++) { long count = buckets.get(i); // sort-of-hack to not print empty ranges at the start that are only used to demarcate the // first populated range. for code clarity we don't omit this record from the maxNameLength // calculation, and accept the unnecessary whitespace prefixes that will occasionally occur if (i == 0 && count == 0) continue; log.debug(String.format(formatstr, names[i], count)); } } private static String nameOfRange(long[] bucketOffsets, int index) { StringBuilder sb = new StringBuilder(); appendRange(sb, bucketOffsets, index); return sb.toString(); } private static void appendRange(StringBuilder sb, long[] bucketOffsets, int index) { sb.append("["); if (index == 0) if (bucketOffsets[0] > 0) // by original definition, this histogram is for values greater than zero only; // if values of 0 or less are required, an entry of lb-1 must be inserted at the start sb.append("1"); else sb.append("-Inf"); else sb.append(bucketOffsets[index - 1] + 1); sb.append(".."); if (index == bucketOffsets.length) sb.append("Inf"); else sb.append(bucketOffsets[index]); sb.append("]"); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof EstimatedHistogram)) return false; EstimatedHistogram that = (EstimatedHistogram) o; return Arrays.equals(getBucketOffsets(), that.getBucketOffsets()) && Arrays.equals(getBuckets(false), that.getBuckets(false)); } @Override public int hashCode() { return Objects.hash(getBucketOffsets(), getBuckets(false)); } }
5,142
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/TransportStatsMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.transport.TransportStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @Singleton public class TransportStatsMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(TransportStatsMonitor.class); public static final String METRIC_NAME = "Elasticsearch_TransportMonitor"; private final Elasticsearch_TransportStatsReporter transportStatsReporter; @Inject public TransportStatsMonitor(IConfiguration config) { super(config); transportStatsReporter = new Elasticsearch_TransportStatsReporter(); Monitors.registerObject(transportStatsReporter); } @Override public void execute() throws Exception { // If Elasticsearch is started then only start the monitoring if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } TransportStatsBean transportStatsBean = new TransportStatsBean(); try { NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config); NodeStats nodeStats = null; List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes(); if (nodeStatsList.size() > 0) { nodeStats = nodeStatsList.get(0); } if (nodeStats == null) { logger.info("Transport stats are not available (node stats is not available)"); return; } TransportStats transportStats = nodeStats.getTransport(); if (transportStats == null) { logger.info("Transport stats are not available"); return; } transportStatsBean.serverOpen = transportStats.getServerOpen(); transportStatsBean.rxCount = transportStats.getRxCount(); transportStatsBean.rxSize = transportStats.getRxSize().getBytes(); transportStatsBean.rxSizeDelta = transportStats.getRxSize().getBytes() - transportStatsBean.rxSize; transportStatsBean.txCount = transportStats.getTxCount(); transportStatsBean.txSize = transportStats.getTxSize().getBytes(); transportStatsBean.txSizeDelta = transportStats.getTxSize().getBytes() - transportStatsBean.txSize; } catch (Exception e) { logger.warn("Failed to load transport stats data", e); } transportStatsReporter.transportStatsBean.set(transportStatsBean); } public class Elasticsearch_TransportStatsReporter { private final AtomicReference<TransportStatsBean> transportStatsBean; public Elasticsearch_TransportStatsReporter() { transportStatsBean = new AtomicReference<TransportStatsBean>(new TransportStatsBean()); } @Monitor(name = "server_open", type = DataSourceType.GAUGE) public long getServerOpen() { return transportStatsBean.get().serverOpen; } @Monitor(name = "rx_count", type = DataSourceType.GAUGE) public long getRxCount() { return transportStatsBean.get().rxCount; } @Monitor(name = "rx_size", type = DataSourceType.GAUGE) public long getRxSize() { return transportStatsBean.get().rxSize; } @Monitor(name = "rx_size_delta", type = DataSourceType.GAUGE) public long getRxSizeDelta() { return transportStatsBean.get().rxSizeDelta; } @Monitor(name = "tx_count", type = DataSourceType.GAUGE) public long getTxCount() { return transportStatsBean.get().txCount; } @Monitor(name = "tx_size", type = DataSourceType.GAUGE) public long getTxSize() { return transportStatsBean.get().txSize; } @Monitor(name = "tx_size_delta", type = DataSourceType.GAUGE) public long getTxSizeDelta() { return transportStatsBean.get().txSizeDelta; } } private static class TransportStatsBean { private long serverOpen; private long rxCount; private long rxSize; private long rxSizeDelta; private long txCount; private long txSize; private long txSizeDelta; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public String getName() { return METRIC_NAME; } }
5,143
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/NodeIndicesStatsMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.indices.NodeIndicesStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; /** * Note: percentiles over average latencies * <p> * Currently ES provides only cumulative query & index time along with cumulative query & index count. * Hence percentile values are calculated based on the average between consecutive time * (t1 & t2, t2 & t3, ... , tn-1 & tn) of metrics collection. */ @Singleton public class NodeIndicesStatsMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(NodeIndicesStatsMonitor.class); public static final String METRIC_NAME = "Elasticsearch_NodeIndicesMonitor"; private final Elasticsearch_NodeIndicesStatsReporter nodeIndicesStatsReporter; private final EstimatedHistogram latencySearchQuery95Histo = new EstimatedHistogram(); private final EstimatedHistogram latencySearchQuery99Histo = new EstimatedHistogram(); private final EstimatedHistogram latencySearchFetch95Histo = new EstimatedHistogram(); private final EstimatedHistogram latencySearchFetch99Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyGet95Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyGet99Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyGetExists95Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyGetExists99Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyGetMissing95Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyGetMissing99Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyIndexing95Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyIndexing99Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyIndexDelete95Histo = new EstimatedHistogram(); private final EstimatedHistogram latencyIndexDelete99Histo = new EstimatedHistogram(); private final double PERCENTILE_95 = 0.95; private final double PERCENTILE_99 = 0.99; private long cachedQueryCount; private long cachedFetchCount; private long cachedGetCount; private long cachedGetExistsCount; private long cachedGetMissingCount; private long cachedIndexingIndexTotal; private long cachedIndexingDeleteTotal; private long cachedSearchQueryTime; private long cachedSearchFetchTime; private long cachedGetTime; private long cachedGetExistsTime; private long cachedGetMissingTime; private long cachedIndexingTime; private long cachedIndexDeleteTime; @Inject public NodeIndicesStatsMonitor(IConfiguration config) { super(config); nodeIndicesStatsReporter = new Elasticsearch_NodeIndicesStatsReporter(); Monitors.registerObject(nodeIndicesStatsReporter); } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public void execute() throws Exception { // Only start monitoring if Elasticsearch is started if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } NodeIndicesStatsBean nodeIndicesStatsBean = new NodeIndicesStatsBean(); try { NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config); NodeStats nodeStats = null; List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes(); if (nodeStatsList.size() > 0) { nodeStats = nodeStatsList.get(0); } if (nodeStats == null) { logger.info("Node indices stats is not available (node stats is not available)"); return; } NodeIndicesStats nodeIndicesStats = nodeStats.getIndices(); if (nodeIndicesStats == null) { logger.info("Node indices stats is not available"); return; } updateStoreDocs(nodeIndicesStatsBean, nodeIndicesStats); updateRefreshFlush(nodeIndicesStatsBean, nodeIndicesStats); updateMerge(nodeIndicesStatsBean, nodeIndicesStats); updateCache(nodeIndicesStatsBean, nodeIndicesStats); updateSearch(nodeIndicesStatsBean, nodeIndicesStats); updateGet(nodeIndicesStatsBean, nodeIndicesStats); updateIndexing(nodeIndicesStatsBean, nodeIndicesStats); } catch (Exception e) { logger.warn("Failed to load indices stats data", e); } nodeIndicesStatsReporter.nodeIndicesStatsBean.set(nodeIndicesStatsBean); } private void updateStoreDocs(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) { nodeIndicesStatsBean.storeSize = nodeIndicesStats.getStore().getSizeInBytes(); nodeIndicesStatsBean.storeThrottleTime = nodeIndicesStats.getStore().getThrottleTime().millis(); nodeIndicesStatsBean.docsCount = nodeIndicesStats.getDocs().getCount(); nodeIndicesStatsBean.docsDeleted = nodeIndicesStats.getDocs().getDeleted(); } private void updateRefreshFlush(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) { nodeIndicesStatsBean.refreshTotal = nodeIndicesStats.getRefresh().getTotal(); nodeIndicesStatsBean.refreshTotalTime = nodeIndicesStats.getRefresh().getTotalTimeInMillis(); if (nodeIndicesStatsBean.refreshTotal != 0) { nodeIndicesStatsBean.refreshAvgTimeInMillisPerRequest = nodeIndicesStatsBean.refreshTotalTime / nodeIndicesStatsBean.refreshTotal; } nodeIndicesStatsBean.flushTotal = nodeIndicesStats.getFlush().getTotal(); nodeIndicesStatsBean.flushTotalTime = nodeIndicesStats.getFlush().getTotalTimeInMillis(); if (nodeIndicesStatsBean.flushTotal != 0) { nodeIndicesStatsBean.flushAvgTimeInMillisPerRequest = nodeIndicesStatsBean.flushTotalTime / nodeIndicesStatsBean.flushTotal; } } private void updateMerge(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) { nodeIndicesStatsBean.mergesCurrent = nodeIndicesStats.getMerge().getCurrent(); nodeIndicesStatsBean.mergesCurrentDocs = nodeIndicesStats.getMerge().getCurrentNumDocs(); nodeIndicesStatsBean.mergesCurrentSize = nodeIndicesStats.getMerge().getCurrentSizeInBytes(); nodeIndicesStatsBean.mergesTotal = nodeIndicesStats.getMerge().getTotal(); nodeIndicesStatsBean.mergesTotalTime = nodeIndicesStats.getMerge().getTotalTimeInMillis(); nodeIndicesStatsBean.mergesTotalSize = nodeIndicesStats.getMerge().getTotalSizeInBytes(); } private void updateCache(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) { nodeIndicesStatsBean.cacheFieldEvictions = nodeIndicesStats.getFieldData().getEvictions(); nodeIndicesStatsBean.cacheFieldSize = nodeIndicesStats.getFieldData().getMemorySizeInBytes(); } private void updateSearch(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) { nodeIndicesStatsBean.searchQueryTotal = nodeIndicesStats.getSearch().getTotal().getQueryCount(); nodeIndicesStatsBean.searchFetchTotal = nodeIndicesStats.getSearch().getTotal().getFetchCount(); nodeIndicesStatsBean.searchQueryCurrent = nodeIndicesStats.getSearch().getTotal().getQueryCurrent(); long tmpSearchQueryDelta = nodeIndicesStatsBean.searchQueryTotal - cachedQueryCount; nodeIndicesStatsBean.searchQueryDelta = tmpSearchQueryDelta < 0 ? 0 : tmpSearchQueryDelta; long tmpSearchFetchDelta = nodeIndicesStatsBean.searchFetchTotal - cachedFetchCount; nodeIndicesStatsBean.searchFetchDelta = tmpSearchFetchDelta < 0 ? 0 : tmpSearchFetchDelta; nodeIndicesStatsBean.searchQueryTime = nodeIndicesStats.getSearch().getTotal().getQueryTimeInMillis(); nodeIndicesStatsBean.searchFetchTime = nodeIndicesStats.getSearch().getTotal().getFetchTimeInMillis(); long searchQueryDeltaTimeInMillis = (nodeIndicesStatsBean.searchQueryTime - cachedSearchQueryTime); if (nodeIndicesStatsBean.searchQueryDelta != 0) { recordSearchQueryLatencies(searchQueryDeltaTimeInMillis / nodeIndicesStatsBean.searchQueryDelta, TimeUnit.MILLISECONDS); nodeIndicesStatsBean.latencySearchQuery95 = latencySearchQuery95Histo.percentile(PERCENTILE_95); nodeIndicesStatsBean.latencySearchQuery99 = latencySearchQuery99Histo.percentile(PERCENTILE_99); } else { nodeIndicesStatsBean.latencySearchQuery95 = 0; nodeIndicesStatsBean.latencySearchQuery99 = 0; } if (nodeIndicesStatsBean.searchQueryTotal != 0) { nodeIndicesStatsBean.searchQueryAvgTimeInMillisPerRequest = nodeIndicesStatsBean.searchQueryTime / nodeIndicesStatsBean.searchQueryTotal; } long searchFetchDeltaTimeInMillis = (nodeIndicesStatsBean.searchFetchTime - cachedSearchFetchTime); if (nodeIndicesStatsBean.searchFetchDelta != 0) { recordSearchFetchLatencies(searchFetchDeltaTimeInMillis / nodeIndicesStatsBean.searchFetchDelta, TimeUnit.MILLISECONDS); nodeIndicesStatsBean.latencySearchFetch95 = latencySearchFetch95Histo.percentile(PERCENTILE_95); nodeIndicesStatsBean.latencySearchFetch99 = latencySearchFetch99Histo.percentile(PERCENTILE_99); } else { nodeIndicesStatsBean.latencySearchFetch95 = 0; nodeIndicesStatsBean.latencySearchFetch99 = 0; } if (nodeIndicesStatsBean.searchFetchTotal != 0) { nodeIndicesStatsBean.searchFetchAvgTimeInMillisPerRequest = nodeIndicesStatsBean.searchFetchTime / nodeIndicesStatsBean.searchFetchTotal; } nodeIndicesStatsBean.searchFetchCurrent = nodeIndicesStats.getSearch().getTotal().getFetchCurrent(); cachedQueryCount += nodeIndicesStatsBean.searchQueryDelta; cachedFetchCount += nodeIndicesStatsBean.searchFetchDelta; cachedSearchQueryTime += searchQueryDeltaTimeInMillis; cachedSearchFetchTime += searchFetchDeltaTimeInMillis; } private void updateGet(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) { nodeIndicesStatsBean.getTotal = nodeIndicesStats.getGet().getCount(); nodeIndicesStatsBean.getExistsTotal = nodeIndicesStats.getGet().getExistsCount(); nodeIndicesStatsBean.getMissingTotal = nodeIndicesStats.getGet().getMissingCount(); nodeIndicesStatsBean.getTime = nodeIndicesStats.getGet().getTimeInMillis(); nodeIndicesStatsBean.getExistsTime = nodeIndicesStats.getGet().getExistsTimeInMillis(); nodeIndicesStatsBean.getMissingTime = nodeIndicesStats.getGet().getMissingTimeInMillis(); long tmpGetTotalDelta = nodeIndicesStatsBean.getTotal - cachedGetCount; nodeIndicesStatsBean.getTotalDelta = tmpGetTotalDelta < 0 ? 0 : tmpGetTotalDelta; long tmpGetExistsDelta = nodeIndicesStatsBean.getExistsTotal - cachedGetExistsCount; nodeIndicesStatsBean.getExistsDelta = tmpGetExistsDelta < 0 ? 0 : tmpGetExistsDelta; long tmpGetMissingDelta = nodeIndicesStatsBean.getMissingTotal - cachedGetMissingCount; nodeIndicesStatsBean.getMissingDelta = tmpGetMissingDelta < 0 ? 0 : tmpGetMissingDelta; long getDeltaTimeInMillis = (nodeIndicesStatsBean.getTime - cachedGetTime); if (nodeIndicesStatsBean.getTotalDelta != 0) { recordGetLatencies(getDeltaTimeInMillis / nodeIndicesStatsBean.getTotalDelta, TimeUnit.MILLISECONDS); nodeIndicesStatsBean.latencyGet95 = latencyGet95Histo.percentile(PERCENTILE_95); nodeIndicesStatsBean.latencyGet99 = latencyGet99Histo.percentile(PERCENTILE_99); } else { nodeIndicesStatsBean.latencyGet95 = 0; nodeIndicesStatsBean.latencyGet99 = 0; } if (nodeIndicesStatsBean.getTotal != 0) nodeIndicesStatsBean.getTotalAvgTimeInMillisPerRequest = nodeIndicesStatsBean.getTime / nodeIndicesStatsBean.getTotal; nodeIndicesStatsBean.getCurrent = nodeIndicesStats.getGet().current(); long getExistsDeltaTimeInMillies = (nodeIndicesStatsBean.getExistsTime - cachedGetExistsTime); if (nodeIndicesStatsBean.getExistsDelta != 0) { recordGetExistsLatencies(getExistsDeltaTimeInMillies / nodeIndicesStatsBean.getExistsDelta, TimeUnit.MILLISECONDS); nodeIndicesStatsBean.latencyGetExists95 = latencyGetExists95Histo.percentile(PERCENTILE_95); nodeIndicesStatsBean.latencyGetExists99 = latencyGetExists99Histo.percentile(PERCENTILE_99); } else { nodeIndicesStatsBean.latencyGetExists95 = 0; nodeIndicesStatsBean.latencyGetExists99 = 0; } if (nodeIndicesStatsBean.getExistsTotal != 0) nodeIndicesStatsBean.getExistsAvgTimeInMillisPerRequest = nodeIndicesStatsBean.getExistsTime / nodeIndicesStatsBean.getExistsTotal; long getMissingDeltaTimeInMillies = (nodeIndicesStatsBean.getMissingTime - cachedGetMissingTime); if (nodeIndicesStatsBean.getMissingDelta != 0) { recordGetMissingLatencies(getMissingDeltaTimeInMillies / nodeIndicesStatsBean.getMissingDelta, TimeUnit.MILLISECONDS); nodeIndicesStatsBean.latencyGetMissing95 = latencyGetMissing95Histo.percentile(PERCENTILE_95); nodeIndicesStatsBean.latencyGetMissing99 = latencyGetMissing99Histo.percentile(PERCENTILE_99); } else { nodeIndicesStatsBean.latencyGetMissing95 = 0; nodeIndicesStatsBean.latencyGetMissing99 = 0; } if (nodeIndicesStatsBean.getMissingTotal != 0) { nodeIndicesStatsBean.getMissingAvgTimeInMillisPerRequest = nodeIndicesStatsBean.getMissingTime / nodeIndicesStatsBean.getMissingTotal; } cachedGetCount += nodeIndicesStatsBean.getTotalDelta; cachedGetExistsCount += nodeIndicesStatsBean.getExistsDelta; cachedGetMissingCount += nodeIndicesStatsBean.getMissingDelta; cachedGetTime += getDeltaTimeInMillis; cachedGetExistsTime += getExistsDeltaTimeInMillies; cachedGetMissingTime += getMissingDeltaTimeInMillies; } private void updateIndexing(NodeIndicesStatsBean nodeIndicesStatsBean, NodeIndicesStats nodeIndicesStats) { nodeIndicesStatsBean.indexingIndexTotal = nodeIndicesStats.getIndexing().getTotal().getIndexCount(); nodeIndicesStatsBean.indexingDeleteTotal = nodeIndicesStats.getIndexing().getTotal().getDeleteCount(); nodeIndicesStatsBean.indexingIndexCurrent = nodeIndicesStats.getIndexing().getTotal().getIndexCurrent(); long tmpIndexingIndexDelta = (nodeIndicesStatsBean.indexingIndexTotal - cachedIndexingIndexTotal); nodeIndicesStatsBean.indexingIndexDelta = tmpIndexingIndexDelta < 0 ? 0 : tmpIndexingIndexDelta; long tmpIndexingDeleteDelta = (nodeIndicesStatsBean.indexingDeleteTotal - cachedIndexingDeleteTotal); nodeIndicesStatsBean.indexingDeleteDelta = tmpIndexingDeleteDelta < 0 ? 0 : tmpIndexingDeleteDelta; nodeIndicesStatsBean.indexingIndexTimeInMillis = nodeIndicesStats.getIndexing().getTotal().getIndexTime().getMillis(); nodeIndicesStatsBean.indexingDeleteTime = nodeIndicesStats.getIndexing().getTotal().getDeleteTime().getMillis(); long indexingTimeInMillis = (nodeIndicesStatsBean.indexingIndexTimeInMillis - cachedIndexingTime); if (nodeIndicesStatsBean.indexingIndexDelta != 0) { recordIndexingLatencies(indexingTimeInMillis / nodeIndicesStatsBean.indexingIndexDelta, TimeUnit.MILLISECONDS); nodeIndicesStatsBean.latencyIndexing95 = latencyIndexing95Histo.percentile(PERCENTILE_95); nodeIndicesStatsBean.latencyIndexing99 = latencyIndexing99Histo.percentile(PERCENTILE_99); } else { nodeIndicesStatsBean.latencyIndexing95 = 0; nodeIndicesStatsBean.latencyIndexing99 = 0; } if (nodeIndicesStatsBean.indexingIndexTotal != 0) { nodeIndicesStatsBean.indexingAvgTimeInMillisPerRequest = nodeIndicesStatsBean.indexingIndexTimeInMillis / nodeIndicesStatsBean.indexingIndexTotal; } long indexDeleteTimeInMillis = (nodeIndicesStatsBean.indexingDeleteTime - cachedIndexDeleteTime); if (nodeIndicesStatsBean.indexingDeleteDelta != 0) { recordIndexDeleteLatencies(indexDeleteTimeInMillis / nodeIndicesStatsBean.indexingDeleteDelta, TimeUnit.MILLISECONDS); nodeIndicesStatsBean.latencyIndexDelete95 = latencyIndexDelete95Histo.percentile(PERCENTILE_95); nodeIndicesStatsBean.latencyIndexDelete99 = latencyIndexDelete99Histo.percentile(PERCENTILE_99); } else { nodeIndicesStatsBean.latencyIndexDelete95 = 0; nodeIndicesStatsBean.latencyIndexDelete99 = 0; } if (nodeIndicesStatsBean.indexingDeleteTotal != 0) { nodeIndicesStatsBean.indexingDeleteAvgTimeInMillisPerRequest = nodeIndicesStatsBean.indexingDeleteTime / nodeIndicesStatsBean.indexingDeleteTotal; } nodeIndicesStatsBean.indexingDeleteCurrent = nodeIndicesStats.getIndexing().getTotal().getDeleteCurrent(); cachedIndexingIndexTotal += nodeIndicesStatsBean.indexingIndexDelta; cachedIndexingDeleteTotal += nodeIndicesStatsBean.indexingDeleteDelta; cachedIndexingTime += indexingTimeInMillis; cachedIndexDeleteTime += indexDeleteTimeInMillis; } private void recordSearchQueryLatencies(long duration, TimeUnit unit) { long searchQueryLatency = TimeUnit.MICROSECONDS.convert(duration, unit); latencySearchQuery95Histo.add(searchQueryLatency); latencySearchQuery99Histo.add(searchQueryLatency); } private void recordSearchFetchLatencies(long duration, TimeUnit unit) { long fetchQueryLatency = TimeUnit.MICROSECONDS.convert(duration, unit); latencySearchFetch95Histo.add(fetchQueryLatency); latencySearchFetch99Histo.add(fetchQueryLatency); } private void recordGetLatencies(long duration, TimeUnit unit) { long getLatency = TimeUnit.MICROSECONDS.convert(duration, unit); latencyGet95Histo.add(getLatency); latencyGet99Histo.add(getLatency); } private void recordGetExistsLatencies(long duration, TimeUnit unit) { long getExistsLatency = TimeUnit.MICROSECONDS.convert(duration, unit); latencyGetExists95Histo.add(getExistsLatency); latencyGetExists99Histo.add(getExistsLatency); } private void recordGetMissingLatencies(long duration, TimeUnit unit) { long getMissingLatency = TimeUnit.MICROSECONDS.convert(duration, unit); latencyGetMissing95Histo.add(getMissingLatency); latencyGetMissing99Histo.add(getMissingLatency); } private void recordIndexingLatencies(long duration, TimeUnit unit) { long indexingLatency = TimeUnit.MICROSECONDS.convert(duration, unit); latencyIndexing95Histo.add(indexingLatency); latencyIndexing99Histo.add(indexingLatency); } private void recordIndexDeleteLatencies(long duration, TimeUnit unit) { long indexDeleteLatency = TimeUnit.MICROSECONDS.convert(duration, unit); latencyIndexDelete95Histo.add(indexDeleteLatency); latencyIndexDelete99Histo.add(indexDeleteLatency); } @Override public String getName() { return METRIC_NAME; } private static class NodeIndicesStatsBean { private long storeSize; private long storeThrottleTime; private long docsCount; private long docsDeleted; private long indexingIndexTotal; private long indexingIndexTimeInMillis; private double indexingAvgTimeInMillisPerRequest; private long indexingIndexCurrent; private long indexingDeleteTotal; private long indexingDeleteTime; private double indexingDeleteAvgTimeInMillisPerRequest; private long indexingDeleteCurrent; private long indexingIndexDelta; private long indexingDeleteDelta; private long getTotal; private long getTime; private double getTotalAvgTimeInMillisPerRequest; private long getCurrent; private long getExistsTotal; private long getExistsTime; private double getExistsAvgTimeInMillisPerRequest; private long getMissingTotal; private long getMissingTime; private double getMissingAvgTimeInMillisPerRequest; private long getTotalDelta; private long getExistsDelta; private long getMissingDelta; private long searchQueryTotal; private long searchQueryTime; private double searchQueryAvgTimeInMillisPerRequest; private long searchQueryCurrent; private long searchQueryDelta; private long searchFetchTotal; private long searchFetchTime; private double searchFetchAvgTimeInMillisPerRequest; private long searchFetchCurrent; private long searchFetchDelta; private long cacheFieldEvictions; private long cacheFieldSize; private long cacheFilterEvictions; private long cacheFilterSize; private long mergesCurrent; private long mergesCurrentDocs; private long mergesCurrentSize; private long mergesTotal; private long mergesTotalTime; private long mergesTotalSize; private long refreshTotal; private long refreshTotalTime; private double refreshAvgTimeInMillisPerRequest; private long flushTotal; private long flushTotalTime; private double flushAvgTimeInMillisPerRequest; private double latencySearchQuery95; private double latencySearchQuery99; private double latencySearchFetch95; private double latencySearchFetch99; private double latencyGet95; private double latencyGet99; private double latencyGetExists95; private double latencyGetExists99; private double latencyGetMissing95; private double latencyGetMissing99; private double latencyIndexing95; private double latencyIndexing99; private double latencyIndexDelete95; private double latencyIndexDelete99; } public class Elasticsearch_NodeIndicesStatsReporter { private final AtomicReference<NodeIndicesStatsBean> nodeIndicesStatsBean; public Elasticsearch_NodeIndicesStatsReporter() { nodeIndicesStatsBean = new AtomicReference<NodeIndicesStatsBean>(new NodeIndicesStatsBean()); } @Monitor(name = "store_size", type = DataSourceType.GAUGE) public long getStoreSize() { return nodeIndicesStatsBean.get().storeSize; } @Monitor(name = "store_throttle_time", type = DataSourceType.GAUGE) public long getStoreThrottleTime() { return nodeIndicesStatsBean.get().storeThrottleTime; } @Monitor(name = "docs_count", type = DataSourceType.GAUGE) public long getDocsCount() { return nodeIndicesStatsBean.get().docsCount; } @Monitor(name = "docs_deleted", type = DataSourceType.GAUGE) public long getDocsDeleted() { return nodeIndicesStatsBean.get().docsDeleted; } //Indexing @Monitor(name = "indexing_index_total", type = DataSourceType.COUNTER) public long getIndexingIndexTotal() { return nodeIndicesStatsBean.get().indexingIndexTotal; } @Monitor(name = "indexing_index_time_in_millis", type = DataSourceType.COUNTER) public long getIndexingIndexTimeInMillis() { return nodeIndicesStatsBean.get().indexingIndexTimeInMillis; } @Monitor(name = "indexing_avg_time_in_millis_per_request", type = DataSourceType.GAUGE) public double getIndexingAvgTimeInMillisPerRequest() { return nodeIndicesStatsBean.get().indexingAvgTimeInMillisPerRequest; } @Monitor(name = "indexing_index_current", type = DataSourceType.GAUGE) public long getIndexingIndexCurrent() { return nodeIndicesStatsBean.get().indexingIndexCurrent; } @Monitor(name = "indexing_delete_total", type = DataSourceType.COUNTER) public long getIndexingDeleteTotal() { return nodeIndicesStatsBean.get().indexingDeleteTotal; } @Monitor(name = "indexing_delete_time", type = DataSourceType.COUNTER) public long getIndexingDeleteTime() { return nodeIndicesStatsBean.get().indexingDeleteTime; } @Monitor(name = "indexing_delete_avg_time_in_millis_per_request", type = DataSourceType.GAUGE) public double getIndexingDeleteAvgTimeInMillisPerRequest() { return nodeIndicesStatsBean.get().indexingDeleteAvgTimeInMillisPerRequest; } @Monitor(name = "indexing_delete_current", type = DataSourceType.GAUGE) public long getIndexingDeleteCurrent() { return nodeIndicesStatsBean.get().indexingDeleteCurrent; } @Monitor(name = "indexing_index_delta", type = DataSourceType.GAUGE) public long getIndexingIndexDelta() { return nodeIndicesStatsBean.get().indexingIndexDelta; } @Monitor(name = "indexing_delete_delta", type = DataSourceType.GAUGE) public long getIndexingDeleteDelta() { return nodeIndicesStatsBean.get().indexingDeleteDelta; } //Get @Monitor(name = "get_total", type = DataSourceType.COUNTER) public long getGetTotal() { return nodeIndicesStatsBean.get().getTotal; } @Monitor(name = "get_time", type = DataSourceType.COUNTER) public long getGetTime() { return nodeIndicesStatsBean.get().getTime; } @Monitor(name = "total_avg_time_in_millis_per_request", type = DataSourceType.GAUGE) public double getTotalAvgTimeInMillisPerRequest() { return nodeIndicesStatsBean.get().getTotalAvgTimeInMillisPerRequest; } @Monitor(name = "get_current", type = DataSourceType.GAUGE) public long getGetCurrent() { return nodeIndicesStatsBean.get().getCurrent; } @Monitor(name = "get_exists_total", type = DataSourceType.COUNTER) public long getGetExistsTotal() { return nodeIndicesStatsBean.get().getExistsTotal; } @Monitor(name = "get_exists_time", type = DataSourceType.COUNTER) public long getGetExistsTime() { return nodeIndicesStatsBean.get().getExistsTime; } @Monitor(name = "exists_avg_time_in_millis_per_request", type = DataSourceType.GAUGE) public double getExistsAvgTimeInMillisPerRequest() { return nodeIndicesStatsBean.get().getExistsAvgTimeInMillisPerRequest; } @Monitor(name = "get_missing_total", type = DataSourceType.COUNTER) public long getGetMissingTotal() { return nodeIndicesStatsBean.get().getMissingTotal; } @Monitor(name = "get_missing_time", type = DataSourceType.COUNTER) public long getGetMissingTime() { return nodeIndicesStatsBean.get().getMissingTime; } @Monitor(name = "missing_avg_time_in_millis_per_request", type = DataSourceType.GAUGE) public double getMissingAvgTimeInMillisPerRequest() { return nodeIndicesStatsBean.get().getMissingAvgTimeInMillisPerRequest; } //Search @Monitor(name = "get_total_delta", type = DataSourceType.GAUGE) public long getGetTotalDelta() { return nodeIndicesStatsBean.get().getTotalDelta; } @Monitor(name = "get_exists_delta", type = DataSourceType.GAUGE) public long getGetExistsDelta() { return nodeIndicesStatsBean.get().getExistsDelta; } @Monitor(name = "get_missing_delta", type = DataSourceType.GAUGE) public long getGetMissingDelta() { return nodeIndicesStatsBean.get().getMissingDelta; } @Monitor(name = "search_query_total", type = DataSourceType.COUNTER) public long getSearchQueryTotal() { return nodeIndicesStatsBean.get().searchQueryTotal; } @Monitor(name = "search_query_time", type = DataSourceType.COUNTER) public long getSearchQueryTime() { return nodeIndicesStatsBean.get().searchQueryTime; } @Monitor(name = "search_query_current", type = DataSourceType.GAUGE) public long getSearchQueryCurrent() { return nodeIndicesStatsBean.get().searchQueryCurrent; } @Monitor(name = "search_query_avg_time_in_millis_per_request", type = DataSourceType.GAUGE) public double getSearchQueryAvgTimeInMillisPerRequest() { return nodeIndicesStatsBean.get().searchQueryAvgTimeInMillisPerRequest; } @Monitor(name = "search_query_delta", type = DataSourceType.GAUGE) public long getSearchQueryDelta() { return nodeIndicesStatsBean.get().searchQueryDelta; } @Monitor(name = "search_fetch_total", type = DataSourceType.COUNTER) public long getSearchFetchTotal() { return nodeIndicesStatsBean.get().searchFetchTotal; } @Monitor(name = "search_fetch_time", type = DataSourceType.COUNTER) public long getSearchFetchTime() { return nodeIndicesStatsBean.get().searchFetchTime; } @Monitor(name = "search_fetch_avg_time_in_millis_per_request", type = DataSourceType.GAUGE) public double getSearchFetchAvgTimeInMillisPerRequest() { return nodeIndicesStatsBean.get().searchFetchAvgTimeInMillisPerRequest; } @Monitor(name = "search_fetch_current", type = DataSourceType.GAUGE) public long getSearchFetchCurrent() { return nodeIndicesStatsBean.get().searchFetchCurrent; } @Monitor(name = "search_fetch_delta", type = DataSourceType.GAUGE) public long getSearchFetchDelta() { return nodeIndicesStatsBean.get().searchFetchDelta; } //Cache @Monitor(name = "cache_field_evictions", type = DataSourceType.GAUGE) public long getCacheFieldEvictions() { return nodeIndicesStatsBean.get().cacheFieldEvictions; } @Monitor(name = "cache_field_size", type = DataSourceType.GAUGE) public long getCacheFieldSize() { return nodeIndicesStatsBean.get().cacheFieldSize; } @Monitor(name = "cache_filter_evictions", type = DataSourceType.GAUGE) public long getCacheFilterEvictions() { return nodeIndicesStatsBean.get().cacheFilterEvictions; } @Monitor(name = "cache_filter_size", type = DataSourceType.GAUGE) public long getCacheFilterSize() { return nodeIndicesStatsBean.get().cacheFilterSize; } //Merge @Monitor(name = "merges_current", type = DataSourceType.GAUGE) public long getMergesCurrent() { return nodeIndicesStatsBean.get().mergesCurrent; } @Monitor(name = "merges_current_docs", type = DataSourceType.GAUGE) public long getMergesCurrentDocs() { return nodeIndicesStatsBean.get().mergesCurrentDocs; } @Monitor(name = "merges_current_size", type = DataSourceType.GAUGE) public long getMergesCurrentSize() { return nodeIndicesStatsBean.get().mergesCurrentSize; } @Monitor(name = "merges_total", type = DataSourceType.COUNTER) public long getMergesTotal() { return nodeIndicesStatsBean.get().mergesTotal; } @Monitor(name = "merges_total_time", type = DataSourceType.COUNTER) public long getMergesTotalTime() { return nodeIndicesStatsBean.get().mergesTotalTime; } @Monitor(name = "merges_total_size", type = DataSourceType.GAUGE) public long getMergesTotalSize() { return nodeIndicesStatsBean.get().mergesTotalSize; } //Refresh @Monitor(name = "refresh_total", type = DataSourceType.COUNTER) public long getRefreshTotal() { return nodeIndicesStatsBean.get().refreshTotal; } @Monitor(name = "refresh_total_time", type = DataSourceType.COUNTER) public long getRefreshTotalTime() { return nodeIndicesStatsBean.get().refreshTotalTime; } @Monitor(name = "refresh_avg_time_in_millis_per_request", type = DataSourceType.GAUGE) public double getRefreshAvgTimeInMillisPerRequest() { return nodeIndicesStatsBean.get().refreshAvgTimeInMillisPerRequest; } //Flush @Monitor(name = "flush_total", type = DataSourceType.COUNTER) public long getFlushTotal() { return nodeIndicesStatsBean.get().flushTotal; } @Monitor(name = "flush_total_time", type = DataSourceType.COUNTER) public long getFlushTotalTime() { return nodeIndicesStatsBean.get().flushTotalTime; } @Monitor(name = "flush_avg_time_in_millis_per_request", type = DataSourceType.GAUGE) public double getFlushAvgTimeInMillisPerRequest() { return nodeIndicesStatsBean.get().flushAvgTimeInMillisPerRequest; } //Percentile Latencies @Monitor(name = "latencySearchQuery95", type = DataSourceType.GAUGE) public double getLatencySearchQuery95() { return nodeIndicesStatsBean.get().latencySearchQuery95; } @Monitor(name = "latencySearchQuery99", type = DataSourceType.GAUGE) public double getLatencySearchQuery99() { return nodeIndicesStatsBean.get().latencySearchQuery99; } @Monitor(name = "latencySearchFetch95", type = DataSourceType.GAUGE) public double getLatencySearchFetch95() { return nodeIndicesStatsBean.get().latencySearchFetch95; } @Monitor(name = "latencySearchFetch99", type = DataSourceType.GAUGE) public double getLatencySearchFetch99() { return nodeIndicesStatsBean.get().latencySearchFetch99; } @Monitor(name = "latencyGet95", type = DataSourceType.GAUGE) public double getLatencyGet95() { return nodeIndicesStatsBean.get().latencyGet95; } @Monitor(name = "latencyGet99", type = DataSourceType.GAUGE) public double getLatencyGet99() { return nodeIndicesStatsBean.get().latencyGet99; } @Monitor(name = "latencyGetExists95", type = DataSourceType.GAUGE) public double getLatencyGetExists95() { return nodeIndicesStatsBean.get().latencyGetExists95; } @Monitor(name = "latencyGetExists99", type = DataSourceType.GAUGE) public double getLatencyGetExists99() { return nodeIndicesStatsBean.get().latencyGetExists99; } @Monitor(name = "latencyGetMissing95", type = DataSourceType.GAUGE) public double getLatencyGetMissing95() { return nodeIndicesStatsBean.get().latencyGetMissing95; } @Monitor(name = "latencyGetMissing99", type = DataSourceType.GAUGE) public double getLatencyGetMissing99() { return nodeIndicesStatsBean.get().latencyGetMissing99; } @Monitor(name = "latencyIndexing95", type = DataSourceType.GAUGE) public double getLatencyIndexing95() { return nodeIndicesStatsBean.get().latencyIndexing95; } @Monitor(name = "latencyIndexing99", type = DataSourceType.GAUGE) public double getLatencyIndexing99() { return nodeIndicesStatsBean.get().latencyIndexing99; } @Monitor(name = "latencyIndexDelete95", type = DataSourceType.GAUGE) public double getLatencyIndexDelete95() { return nodeIndicesStatsBean.get().latencyIndexDelete95; } @Monitor(name = "latencyIndexDelete99", type = DataSourceType.GAUGE) public double getLatencyIndexDelete99() { return nodeIndicesStatsBean.get().latencyIndexDelete99; } } }
5,144
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/NodeHealthMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.atomic.AtomicReference; @Singleton public class NodeHealthMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(NodeHealthMonitor.class); public static final String METRIC_NAME = "Elasticsearch_NodeHealthMonitor"; private final ElasticsearchNodeHealthReporter healthReporter; @Inject public NodeHealthMonitor(IConfiguration config) { super(config); healthReporter = new ElasticsearchNodeHealthReporter(); Monitors.registerObject(healthReporter); } @Override public void execute() throws Exception { // If Elasticsearch is started then only start the monitoring if (!ElasticsearchProcessMonitor.getWasElasticsearchStarted()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } HealthBean healthBean = new HealthBean(); try { healthBean.esprocessrunning = 0; if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { logger.info("Elasticsearch process is up & running"); healthBean.esprocessrunning = 1; } } catch (Exception e) { resetHealthStats(healthBean); logger.warn("failed to check if Elasticsearch process is running", e); } healthReporter.healthBean.set(healthBean); } public class ElasticsearchNodeHealthReporter { private final AtomicReference<HealthBean> healthBean; public ElasticsearchNodeHealthReporter() { healthBean = new AtomicReference<HealthBean>(new HealthBean()); } @Monitor(name = "es_isesprocessdown", type = DataSourceType.GAUGE) public int getIsEsProcessDown() { return healthBean.get().esprocessrunning; } } private static class HealthBean { private int esprocessrunning = -1; } @Override public String getName() { return METRIC_NAME; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } private void resetHealthStats(HealthBean healthBean) { healthBean.esprocessrunning = -1; } }
5,145
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/SnapshotBackupMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.backup.SnapshotBackupManager; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.atomic.AtomicReference; @Singleton public class SnapshotBackupMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(SnapshotBackupMonitor.class); public static final String METRIC_NAME = "Elasticsearch_SnapshotBackupMonitor"; private final Elasticsearch_SnapshotBackupReporter snapshotBackupReporter; private final SnapshotBackupManager snapshotBackupManager; @Inject public SnapshotBackupMonitor(IConfiguration config, SnapshotBackupManager snapshotBackupManager) { super(config); snapshotBackupReporter = new Elasticsearch_SnapshotBackupReporter(); this.snapshotBackupManager = snapshotBackupManager; Monitors.registerObject(snapshotBackupReporter); } @Override public void execute() throws Exception { // If Elasticsearch is started then only start the monitoring if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } SnapshotBackupBean snapshotBackupBean = new SnapshotBackupBean(); try { snapshotBackupBean.snapshotSuccess = snapshotBackupManager.getNumSnapshotSuccess(); snapshotBackupBean.snapshotFailure = snapshotBackupManager.getNumSnapshotFailure(); } catch (Exception e) { logger.warn("failed to load Cluster SnapshotBackup Status", e); } snapshotBackupReporter.snapshotBackupBean.set(snapshotBackupBean); } public class Elasticsearch_SnapshotBackupReporter { private final AtomicReference<SnapshotBackupBean> snapshotBackupBean; public Elasticsearch_SnapshotBackupReporter() { snapshotBackupBean = new AtomicReference<SnapshotBackupBean>(new SnapshotBackupBean()); } @Monitor(name = "snapshot_success", type = DataSourceType.GAUGE) public int getSnapshotSuccess() { return snapshotBackupBean.get().snapshotSuccess; } @Monitor(name = "snapshot_failure", type = DataSourceType.GAUGE) public int getSnapshotFailure() { return snapshotBackupBean.get().snapshotFailure; } } private static class SnapshotBackupBean { private int snapshotSuccess; private int snapshotFailure; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 3600 * 1000); } @Override public String getName() { return METRIC_NAME; } }
5,146
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/AllCircuitBreakerStatsMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.indices.breaker.AllCircuitBreakerStats; import org.elasticsearch.indices.breaker.CircuitBreakerStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @Singleton public class AllCircuitBreakerStatsMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(AllCircuitBreakerStatsMonitor.class); public static final String METRIC_NAME = "Elasticsearch_AllCircuitBreakerStatsMonitor"; private final Elasticsearch_AllCircuitBreakerStatsReporter allCircuitBreakerStatsReporter; @Inject public AllCircuitBreakerStatsMonitor(IConfiguration config) { super(config); allCircuitBreakerStatsReporter = new Elasticsearch_AllCircuitBreakerStatsReporter(); Monitors.registerObject(allCircuitBreakerStatsReporter); } @Override public void execute() throws Exception { // Only start monitoring if Elasticsearch is started if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } AllCircuitBreakerStatsBean allCircuitBreakerStatsBean = new AllCircuitBreakerStatsBean(); try { NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config); NodeStats nodeStats = null; List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes(); if (nodeStatsList.size() > 0) { nodeStats = nodeStatsList.get(0); } if (nodeStats == null) { logger.info("Circuit breaker stats is not available (node stats is not available)"); return; } AllCircuitBreakerStats allCircuitBreakerStats = nodeStats.getBreaker(); if (allCircuitBreakerStats == null) { logger.info("Circuit breaker stats is not available"); return; } CircuitBreakerStats[] circuitBreakerStats = allCircuitBreakerStats.getAllStats(); if (circuitBreakerStats == null || circuitBreakerStats.length == 0) { logger.info("Circuit breaker stats is not available (stats are empty)"); return; } for (CircuitBreakerStats circuitBreakerStat : circuitBreakerStats) { if (CircuitBreaker.FIELDDATA.equals(circuitBreakerStat.getName())) { allCircuitBreakerStatsBean.fieldDataEstimatedSizeInBytes = circuitBreakerStat.getEstimated(); allCircuitBreakerStatsBean.fieldDataLimitMaximumSizeInBytes = circuitBreakerStat.getLimit(); allCircuitBreakerStatsBean.fieldDataOverhead = circuitBreakerStat.getOverhead(); allCircuitBreakerStatsBean.fieldDataTrippedCount = circuitBreakerStat.getTrippedCount(); } if (CircuitBreaker.REQUEST.equals(circuitBreakerStat.getName())) { allCircuitBreakerStatsBean.requestEstimatedSizeInBytes = circuitBreakerStat.getEstimated(); allCircuitBreakerStatsBean.requestLimitMaximumSizeInBytes = circuitBreakerStat.getLimit(); allCircuitBreakerStatsBean.requestOverhead = circuitBreakerStat.getOverhead(); allCircuitBreakerStatsBean.requestTrippedCount = circuitBreakerStat.getTrippedCount(); } } } catch (Exception e) { logger.warn("Failed to load circuit breaker stats data", e); } allCircuitBreakerStatsReporter.allCircuitBreakerStatsBean.set(allCircuitBreakerStatsBean); } public class Elasticsearch_AllCircuitBreakerStatsReporter { private final AtomicReference<AllCircuitBreakerStatsBean> allCircuitBreakerStatsBean; public Elasticsearch_AllCircuitBreakerStatsReporter() { allCircuitBreakerStatsBean = new AtomicReference<AllCircuitBreakerStatsBean>(new AllCircuitBreakerStatsBean()); } @Monitor(name = "field_data_estimated_size_in_bytes", type = DataSourceType.GAUGE) public long getFieldDataEstimatedSizeInBytes() { return allCircuitBreakerStatsBean.get().fieldDataEstimatedSizeInBytes; } @Monitor(name = "field_data_limit_maximum_size_in_bytes", type = DataSourceType.GAUGE) public long getFieldDataLimitMaximumSizeInBytes() { return allCircuitBreakerStatsBean.get().fieldDataLimitMaximumSizeInBytes; } @Monitor(name = "field_data_tripped_count", type = DataSourceType.GAUGE) public double getFieldDataTrippedCount() { return allCircuitBreakerStatsBean.get().fieldDataTrippedCount; } @Monitor(name = "field_data_overhead", type = DataSourceType.GAUGE) public double getFieldDataOverhead() { return allCircuitBreakerStatsBean.get().fieldDataOverhead; } @Monitor(name = "request_estimated_size_in_bytes", type = DataSourceType.GAUGE) public long getRequestEstimatedSizeInBytes() { return allCircuitBreakerStatsBean.get().requestEstimatedSizeInBytes; } @Monitor(name = "request_limit_maximum_size_in_bytes", type = DataSourceType.GAUGE) public long getRequestLimitMaximumSizeInBytes() { return allCircuitBreakerStatsBean.get().requestLimitMaximumSizeInBytes; } @Monitor(name = "request_tripped_count", type = DataSourceType.GAUGE) public double getRequestTrippedCount() { return allCircuitBreakerStatsBean.get().requestTrippedCount; } @Monitor(name = "request_overhead", type = DataSourceType.GAUGE) public double getRequestOverhead() { return allCircuitBreakerStatsBean.get().requestOverhead; } } private static class AllCircuitBreakerStatsBean { private long fieldDataEstimatedSizeInBytes; private long fieldDataLimitMaximumSizeInBytes; private long fieldDataTrippedCount; private double fieldDataOverhead; private long requestEstimatedSizeInBytes; private long requestLimitMaximumSizeInBytes; private long requestTrippedCount; private double requestOverhead; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public String getName() { return METRIC_NAME; } }
5,147
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/ProcessStatsMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.monitor.process.ProcessStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @Singleton public class ProcessStatsMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(ProcessStatsMonitor.class); public static final String METRIC_NAME = "Elasticsearch_ProcessStatsMonitor"; private final Elasticsearch_ProcessStatsReporter processStatsReporter; @Inject public ProcessStatsMonitor(IConfiguration config) { super(config); processStatsReporter = new Elasticsearch_ProcessStatsReporter(); Monitors.registerObject(processStatsReporter); } @Override public void execute() throws Exception { // Only start monitoring if Elasticsearch is started if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } ProcessStatsBean processStatsBean = new ProcessStatsBean(); try { NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config); NodeStats nodeStats = null; List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes(); if (nodeStatsList.size() > 0) { nodeStats = nodeStatsList.get(0); } if (nodeStats == null) { logger.info("Process stats are not available (node stats is not available)"); return; } ProcessStats processStats = nodeStats.getProcess(); if (processStats == null) { logger.info("Process stats are not available"); return; } //Memory processStatsBean.totalVirtualInBytes = processStats.getMem().getTotalVirtual().getBytes(); //CPU processStatsBean.cpuPercent = processStats.getCpu().getPercent(); processStatsBean.totalInMillis = processStats.getCpu().getTotal().getMillis(); //Open file descriptors processStatsBean.openFileDescriptors = processStats.getOpenFileDescriptors(); //Timestamp processStatsBean.cpuTimestamp = processStats.getTimestamp(); } catch (Exception e) { logger.warn("Failed to load process stats data", e); } processStatsReporter.processStatsBean.set(processStatsBean); } public class Elasticsearch_ProcessStatsReporter { private final AtomicReference<ProcessStatsBean> processStatsBean; public Elasticsearch_ProcessStatsReporter() { processStatsBean = new AtomicReference<ProcessStatsBean>(new ProcessStatsBean()); } @Monitor(name = "resident_in_bytes", type = DataSourceType.GAUGE) public long getResidentInBytes() { return processStatsBean.get().residentInBytes; } @Monitor(name = "share_in_bytes", type = DataSourceType.GAUGE) public long getShareInBytes() { return processStatsBean.get().shareInBytes; } @Monitor(name = "total_virtual_in_bytes", type = DataSourceType.GAUGE) public long getTotalVirtualInBytes() { return processStatsBean.get().totalVirtualInBytes; } @Monitor(name = "cpu_percent", type = DataSourceType.GAUGE) public short getCpuPercent() { return processStatsBean.get().cpuPercent; } @Monitor(name = "sys_in_millis", type = DataSourceType.GAUGE) public long getSysInMillis() { return processStatsBean.get().sysInMillis; } @Monitor(name = "user_in_millis", type = DataSourceType.GAUGE) public long getUserInMillis() { return processStatsBean.get().userInMillis; } @Monitor(name = "total_in_millis", type = DataSourceType.GAUGE) public long getTotalInMillis() { return processStatsBean.get().totalInMillis; } @Monitor(name = "open_file_descriptors", type = DataSourceType.GAUGE) public double getOpenFileDescriptors() { return processStatsBean.get().openFileDescriptors; } @Monitor(name = "cpu_timestamp", type = DataSourceType.GAUGE) public long getCpuTimestamp() { return processStatsBean.get().cpuTimestamp; } } private static class ProcessStatsBean { private long residentInBytes; private long shareInBytes; private long totalVirtualInBytes; private short cpuPercent; private long sysInMillis; private long userInMillis; private long totalInMillis; private long openFileDescriptors; private long cpuTimestamp; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public String getName() { return METRIC_NAME; } }
5,148
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/FsStatsMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.monitor.fs.FsInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @Singleton public class FsStatsMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(FsStatsMonitor.class); public static final String METRIC_NAME = "Elasticsearch_FsStatsMonitor"; private final Elasticsearch_FsStatsReporter fsStatsReporter; @Inject public FsStatsMonitor(IConfiguration config) { super(config); fsStatsReporter = new Elasticsearch_FsStatsReporter(); Monitors.registerObject(fsStatsReporter); } @Override public void execute() throws Exception { // Only start monitoring if Elasticsearch is started if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } FsStatsBean fsStatsBean = new FsStatsBean(); try { NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config); NodeStats nodeStats = null; List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes(); if (nodeStatsList.size() > 0) { nodeStats = nodeStatsList.get(0); } if (nodeStats == null) { logger.info("File system info is not available (node stats are not available)"); return; } FsInfo fsInfo = nodeStats.getFs(); if (fsInfo == null) { logger.info("File system info is not available"); return; } fsStatsBean.total = fsInfo.getTotal().getTotal().getBytes(); fsStatsBean.free = fsInfo.getTotal().getFree().getBytes(); fsStatsBean.available = fsInfo.getTotal().getAvailable().getBytes(); fsStatsBean.availableDiskPercent = (fsStatsBean.available * 100) / fsStatsBean.total; } catch (Exception e) { logger.warn("Failed to load file system stats data", e); } fsStatsReporter.fsStatsBean.set(fsStatsBean); } public class Elasticsearch_FsStatsReporter { private final AtomicReference<FsStatsBean> fsStatsBean; public Elasticsearch_FsStatsReporter() { fsStatsBean = new AtomicReference<FsStatsBean>(new FsStatsBean()); } @Monitor(name = "total_bytes", type = DataSourceType.GAUGE) public long getTotalBytes() { return fsStatsBean.get().total; } @Monitor(name = "free_bytes", type = DataSourceType.GAUGE) public long getFreeBytes() { return fsStatsBean.get().free; } @Monitor(name = "available_bytes", type = DataSourceType.GAUGE) public long getAvailableBytes() { return fsStatsBean.get().available; } @Monitor(name = "disk_reads", type = DataSourceType.GAUGE) public long geDiskReads() { return fsStatsBean.get().diskReads; } @Monitor(name = "disk_writes", type = DataSourceType.GAUGE) public long getDiskWrites() { return fsStatsBean.get().diskWrites; } @Monitor(name = "disk_read_bytes", type = DataSourceType.GAUGE) public long getDiskReadBytes() { return fsStatsBean.get().diskReadBytes; } @Monitor(name = "disk_write_bytes", type = DataSourceType.GAUGE) public long getDiskWriteBytes() { return fsStatsBean.get().diskWriteBytes; } @Monitor(name = "disk_queue", type = DataSourceType.GAUGE) public double getDiskQueue() { return fsStatsBean.get().diskQueue; } @Monitor(name = "disk_service_time", type = DataSourceType.GAUGE) public double getDiskServiceTime() { return fsStatsBean.get().diskServiceTime; } @Monitor(name = "available_disk_percent", type = DataSourceType.GAUGE) public long getAvailableDiskPercent() { return fsStatsBean.get().availableDiskPercent; } } private static class FsStatsBean { private long total; private long free; private long available; private long diskReads; private long diskWrites; private long diskReadBytes; private long diskWriteBytes; private double diskQueue; private double diskServiceTime; private long availableDiskPercent; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public String getName() { return METRIC_NAME; } }
5,149
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/HealthMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.discovery.DiscoveryClient; import com.netflix.discovery.DiscoveryManager; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.identity.InstanceManager; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.raigad.utils.ElasticsearchUtils; import com.netflix.raigad.utils.HttpModule; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.unit.TimeValue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.atomic.AtomicReference; @Singleton public class HealthMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(HealthMonitor.class); public static final String METRIC_NAME = "Elasticsearch_HealthMonitor"; private final Elasticsearch_HealthReporter healthReporter; private final InstanceManager instanceManager; private static TimeValue MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(60); private final DiscoveryClient discoveryClient; private final HttpModule httpModule; @Inject public HealthMonitor(IConfiguration config, InstanceManager instanceManager, HttpModule httpModule) { super(config); this.instanceManager = instanceManager; this.httpModule = httpModule; healthReporter = new Elasticsearch_HealthReporter(); discoveryClient = DiscoveryManager.getInstance().getDiscoveryClient(); Monitors.registerObject(healthReporter); } @Override public void execute() throws Exception { // Only start monitoring if Elasticsearch is started if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not running, check back again later"; logger.info(exceptionMsg); return; } // In case we configured only the master node to report metrics and this node is not a master - bail out if (config.reportMetricsFromMasterOnly() && !ElasticsearchUtils.amIMasterNode(config, httpModule)) { return; } HealthBean healthBean = new HealthBean(); try { Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient(); ClusterHealthStatus clusterHealthStatus = esTransportClient.admin().cluster().prepareHealth().setTimeout(MASTER_NODE_TIMEOUT).execute().get().getStatus(); ClusterHealthResponse clusterHealthResponse = esTransportClient.admin().cluster().prepareHealth().execute().actionGet(MASTER_NODE_TIMEOUT); if (clusterHealthStatus == null) { logger.info("ClusterHealthStatus is null, hence returning (no health)."); resetHealthStats(healthBean); return; } //Check if status = GREEN, YELLOW or RED if (clusterHealthStatus.name().equalsIgnoreCase("GREEN")) { healthBean.greenorredstatus = 0; healthBean.greenoryellowstatus = 0; } else if (clusterHealthStatus.name().equalsIgnoreCase("YELLOW")) { healthBean.greenoryellowstatus = 1; healthBean.greenorredstatus = 0; } else if (clusterHealthStatus.name().equalsIgnoreCase("RED")) { healthBean.greenorredstatus = 1; healthBean.greenoryellowstatus = 0; } if (config.isNodeMismatchWithDiscoveryEnabled()) { // Check if there is node mismatch between discovery and ES healthBean.nodematch = (clusterHealthResponse.getNumberOfNodes() == instanceManager.getAllInstances().size()) ? 0 : 1; } else { healthBean.nodematch = (clusterHealthResponse.getNumberOfNodes() == config.getDesiredNumberOfNodesInCluster()) ? 0 : 1; } if (config.isEurekaHealthCheckEnabled()) { healthBean.eurekanodematch = (clusterHealthResponse.getNumberOfNodes() == discoveryClient.getApplication(config.getAppName()).getInstances().size()) ? 0 : 1; } } catch (Exception e) { resetHealthStats(healthBean); logger.warn("Failed to load cluster health status", e); } healthReporter.healthBean.set(healthBean); } public class Elasticsearch_HealthReporter { private final AtomicReference<HealthBean> healthBean; public Elasticsearch_HealthReporter() { healthBean = new AtomicReference<HealthBean>(new HealthBean()); } @Monitor(name = "es_healthstatus_greenorred", type = DataSourceType.GAUGE) public int getEsHealthstatusGreenorred() { return healthBean.get().greenorredstatus; } @Monitor(name = "es_healthstatus_greenoryellow", type = DataSourceType.GAUGE) public int getEsHealthstatusGreenoryellow() { return healthBean.get().greenoryellowstatus; } @Monitor(name = "es_nodematchstatus", type = DataSourceType.GAUGE) public int getEsNodematchstatus() { return healthBean.get().nodematch; } @Monitor(name = "es_eurekanodematchstatus", type = DataSourceType.GAUGE) public int getEsEurekanodematchstatus() { return healthBean.get().eurekanodematch; } } private static class HealthBean { private int greenorredstatus = -1; private int greenoryellowstatus = -1; private int nodematch = -1; private int eurekanodematch = -1; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public String getName() { return METRIC_NAME; } private void resetHealthStats(HealthBean healthBean) { healthBean.greenorredstatus = -1; healthBean.greenoryellowstatus = -1; healthBean.nodematch = -1; healthBean.eurekanodematch = -1; } }
5,150
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/monitoring/HttpStatsMonitor.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.monitoring; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.servo.annotations.DataSourceType; import com.netflix.servo.annotations.Monitor; import com.netflix.servo.monitor.Monitors; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.http.HttpStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.concurrent.atomic.AtomicReference; @Singleton public class HttpStatsMonitor extends Task { private static final Logger logger = LoggerFactory.getLogger(HttpStatsMonitor.class); public static final String METRIC_NAME = "Elasticsearch_HttpStatsMonitor"; private final Elasticsearch_HttpStatsReporter httpStatsReporter; @Inject public HttpStatsMonitor(IConfiguration config) { super(config); httpStatsReporter = new Elasticsearch_HttpStatsReporter(); Monitors.registerObject(httpStatsReporter); } @Override public void execute() throws Exception { // If Elasticsearch is started then only start the monitoring if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, check back again later"; logger.info(exceptionMsg); return; } HttpStatsBean httpStatsBean = new HttpStatsBean(); try { NodesStatsResponse nodesStatsResponse = ElasticsearchTransportClient.getNodesStatsResponse(config); NodeStats nodeStats = null; List<NodeStats> nodeStatsList = nodesStatsResponse.getNodes(); if (nodeStatsList.size() > 0) { nodeStats = nodeStatsList.get(0); } if (nodeStats == null) { logger.info("HTTP stats is not available (node stats are not available)"); return; } HttpStats httpStats = nodeStats.getHttp(); if (httpStats == null) { logger.info("HTTP stats is not available"); return; } httpStatsBean.serverOpen = httpStats.getServerOpen(); httpStatsBean.totalOpen = httpStats.getTotalOpen(); } catch (Exception e) { logger.warn("Failed to load HTTP stats data", e); } httpStatsReporter.httpStatsBean.set(httpStatsBean); } public class Elasticsearch_HttpStatsReporter { private final AtomicReference<HttpStatsBean> httpStatsBean; public Elasticsearch_HttpStatsReporter() { httpStatsBean = new AtomicReference<>(new HttpStatsBean()); } @Monitor(name = "server_open", type = DataSourceType.GAUGE) public long getServerOpen() { return httpStatsBean.get().serverOpen; } @Monitor(name = "total_open", type = DataSourceType.GAUGE) public long getTotalOpen() { return httpStatsBean.get().totalOpen; } } private static class HttpStatsBean { private long serverOpen; private long totalOpen; } public static TaskTimer getTimer(String name) { return new SimpleTimer(name, 60 * 1000); } @Override public String getName() { return METRIC_NAME; } }
5,151
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/startup/RaigadServer.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.startup; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.aws.SetVPCSecurityGroupID; import com.netflix.raigad.aws.UpdateSecuritySettings; import com.netflix.raigad.aws.UpdateTribeSecuritySettings; import com.netflix.raigad.backup.RestoreBackupManager; import com.netflix.raigad.backup.SnapshotBackupManager; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.defaultimpl.IElasticsearchProcess; import com.netflix.raigad.identity.InstanceManager; import com.netflix.raigad.indexmanagement.ElasticsearchIndexManager; import com.netflix.raigad.monitoring.*; import com.netflix.raigad.scheduler.RaigadScheduler; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.HttpModule; import com.netflix.raigad.utils.Sleeper; import com.netflix.raigad.utils.TuneElasticsearch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Start all tasks here: Property update task, Backup task, Restore task, Incremental backup */ @Singleton public class RaigadServer { private static final Logger logger = LoggerFactory.getLogger(RaigadServer.class); private static final int ES_MONITORING_INITIAL_DELAY = 10; private static final int ES_SNAPSHOT_INITIAL_DELAY = 100; private static final int ES_HEALTH_MONITOR_DELAY = 600; private static final int ES_NODE_HEALTH_MONITOR_DELAY = 10; private final RaigadScheduler scheduler; private final IConfiguration config; private final Sleeper sleeper; private final IElasticsearchProcess esProcess; private final InstanceManager instanceManager; private final ElasticsearchIndexManager esIndexManager; private final SnapshotBackupManager snapshotBackupManager; private final HttpModule httpModule; private final SetVPCSecurityGroupID setVPCSecurityGroupID; @Inject public RaigadServer(IConfiguration config, RaigadScheduler scheduler, HttpModule httpModule, IElasticsearchProcess esProcess, Sleeper sleeper, InstanceManager instanceManager, ElasticsearchIndexManager esIndexManager, SnapshotBackupManager snapshotBackupManager, SetVPCSecurityGroupID setVPCSecurityGroupID) { this.config = config; this.scheduler = scheduler; this.httpModule = httpModule; this.esProcess = esProcess; this.sleeper = sleeper; this.instanceManager = instanceManager; this.esIndexManager = esIndexManager; this.snapshotBackupManager = snapshotBackupManager; this.setVPCSecurityGroupID = setVPCSecurityGroupID; } public void initialize() throws Exception { // Check if it's really needed if (instanceManager.getInstance().isOutOfService()) { return; } logger.info("Initializing Raigad server now..."); // Start to schedule jobs scheduler.start(); if (!config.isLocalModeEnabled()) { if (config.amITribeNode()) { logger.info("Updating security setting for the tribe node"); if (config.isDeployedInVPC()) { logger.info("Setting Security Group ID (VPC)"); setVPCSecurityGroupID.execute(); } // Update security settings scheduler.runTaskNow(UpdateTribeSecuritySettings.class); // Sleep for 60 seconds for the SG update to happen if (UpdateTribeSecuritySettings.firstTimeUpdated) { sleeper.sleep(60 * 1000); } scheduler.addTask(UpdateTribeSecuritySettings.JOB_NAME, UpdateTribeSecuritySettings.class, UpdateTribeSecuritySettings.getTimer(instanceManager)); } else { if (config.isSecurityGroupInMultiDC()) { logger.info("Updating security setting"); if (config.isDeployedInVPC()) { logger.info("Setting Security Group ID (VPC)"); setVPCSecurityGroupID.execute(); } if (config.amISourceClusterForTribeNode()) { // Update security settings scheduler.runTaskNow(UpdateSecuritySettings.class); // Sleep for 60 seconds for the SG update to happen if (UpdateSecuritySettings.firstTimeUpdated) { sleeper.sleep(60 * 1000); } scheduler.addTask(UpdateSecuritySettings.JOB_NAME, UpdateSecuritySettings.class, UpdateSecuritySettings.getTimer(instanceManager)); } } } } // Tune Elasticsearch scheduler.runTaskNow(TuneElasticsearch.class); logger.info("Trying to start Elasticsearch now..."); if (!config.doesElasticsearchStartManually()) { // Start Elasticsearch esProcess.start(); if (config.isRestoreEnabled()) { scheduler.addTaskWithDelay(RestoreBackupManager.JOBNAME, RestoreBackupManager.class, RestoreBackupManager.getTimer(config), config.getRestoreTaskInitialDelayInSeconds()); } } else { logger.info("config.doesElasticsearchStartManually() is set to True," + "hence Elasticsearch needs to be started manually. " + "Restore task needs to be started manually as well (if needed)."); } /* * Run the delayed task (after 10 seconds) to Monitor Elasticsearch Running Process */ scheduler.addTaskWithDelay(ElasticsearchProcessMonitor.JOB_NAME, ElasticsearchProcessMonitor.class, ElasticsearchProcessMonitor.getTimer(), ES_MONITORING_INITIAL_DELAY); /* * Run Snapshot Backup task */ if (config.isAsgBasedDedicatedDeployment()) { if (config.getASGName().toLowerCase().contains("master")) { // Run Snapshot task only on Master Nodes scheduler.addTaskWithDelay(SnapshotBackupManager.JOBNAME, SnapshotBackupManager.class, SnapshotBackupManager.getTimer(config), ES_SNAPSHOT_INITIAL_DELAY); // Run Index Management task only on Master Nodes scheduler.addTaskWithDelay(ElasticsearchIndexManager.JOB_NAME, ElasticsearchIndexManager.class, ElasticsearchIndexManager.getTimer(config), config.getAutoCreateIndexInitialStartDelaySeconds()); scheduler.addTaskWithDelay(HealthMonitor.METRIC_NAME, HealthMonitor.class, HealthMonitor.getTimer("HealthMonitor"), ES_HEALTH_MONITOR_DELAY); } else if (!config.reportMetricsFromMasterOnly()) { scheduler.addTaskWithDelay(HealthMonitor.METRIC_NAME, HealthMonitor.class, HealthMonitor.getTimer("HealthMonitor"), ES_HEALTH_MONITOR_DELAY); } } else { scheduler.addTaskWithDelay(SnapshotBackupManager.JOBNAME, SnapshotBackupManager.class, SnapshotBackupManager.getTimer(config), ES_SNAPSHOT_INITIAL_DELAY); scheduler.addTaskWithDelay(ElasticsearchIndexManager.JOB_NAME, ElasticsearchIndexManager.class, ElasticsearchIndexManager.getTimer(config), config.getAutoCreateIndexInitialStartDelaySeconds()); scheduler.addTaskWithDelay(HealthMonitor.METRIC_NAME, HealthMonitor.class, HealthMonitor.getTimer("HealthMonitor"), ES_HEALTH_MONITOR_DELAY); } /* * Starting Monitoring Jobs */ scheduler.addTask(ThreadPoolStatsMonitor.METRIC_NAME, ThreadPoolStatsMonitor.class, ThreadPoolStatsMonitor.getTimer("ThreadPoolStatsMonitor")); scheduler.addTask(TransportStatsMonitor.METRIC_NAME, TransportStatsMonitor.class, TransportStatsMonitor.getTimer("TransportStatsMonitor")); scheduler.addTask(NodeIndicesStatsMonitor.METRIC_NAME, NodeIndicesStatsMonitor.class, NodeIndicesStatsMonitor.getTimer("NodeIndicesStatsMonitor")); scheduler.addTask(FsStatsMonitor.METRIC_NAME, FsStatsMonitor.class, FsStatsMonitor.getTimer("FsStatsMonitor")); // TODO: 2X: Determine if this is necessary and if yes find an alternative //scheduler.addTask(NetworkStatsMonitor.METRIC_NAME, NetworkStatsMonitor.class, NetworkStatsMonitor.getTimer("NetworkStatsMonitor")); scheduler.addTask(JvmStatsMonitor.METRIC_NAME, JvmStatsMonitor.class, JvmStatsMonitor.getTimer("JvmStatsMonitor")); scheduler.addTask(OsStatsMonitor.METRIC_NAME, OsStatsMonitor.class, OsStatsMonitor.getTimer("OsStatsMonitor")); scheduler.addTask(ProcessStatsMonitor.METRIC_NAME, ProcessStatsMonitor.class, ProcessStatsMonitor.getTimer("ProcessStatsMonitor")); scheduler.addTask(HttpStatsMonitor.METRIC_NAME, HttpStatsMonitor.class, HttpStatsMonitor.getTimer("HttpStatsMonitor")); scheduler.addTask(AllCircuitBreakerStatsMonitor.METRIC_NAME, AllCircuitBreakerStatsMonitor.class, AllCircuitBreakerStatsMonitor.getTimer("AllCircuitBreakerStatsMonitor")); scheduler.addTask(SnapshotBackupMonitor.METRIC_NAME, SnapshotBackupMonitor.class, SnapshotBackupMonitor.getTimer("SnapshotBackupMonitor")); scheduler.addTaskWithDelay(NodeHealthMonitor.METRIC_NAME, NodeHealthMonitor.class, NodeHealthMonitor.getTimer("NodeHealthMonitor"), ES_NODE_HEALTH_MONITOR_DELAY); } public InstanceManager getInstanceManager() { return instanceManager; } public RaigadScheduler getScheduler() { return scheduler; } public IConfiguration getConfiguration() { return config; } }
5,152
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/AbstractRepository.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup; import com.google.inject.ImplementedBy; import com.google.inject.Inject; import com.google.inject.name.Named; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.utils.ElasticsearchTransportClient; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoriesMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @ImplementedBy(S3Repository.class) public abstract class AbstractRepository { private static final Logger logger = LoggerFactory.getLogger(AbstractRepository.class); public enum RepositoryType { s3, fs } protected final IConfiguration config; protected final AbstractRepositorySettingsParams repositorySettingsParams; @Inject protected AbstractRepository(IConfiguration config, @Named("s3") AbstractRepositorySettingsParams repositorySettingsParams) { this.config = config; this.repositorySettingsParams = repositorySettingsParams; } /** * Get Remote Repository Name */ public abstract String getRemoteRepositoryName(); public abstract String createOrGetSnapshotRepository() throws Exception; public abstract void createRestoreRepository(String s3RepoName, String basePathSuffix) throws Exception; public boolean doesRepositoryExists(String repositoryName, RepositoryType repositoryType) { boolean doesRepoExists = false; logger.info("Checking if repository <" + repositoryName + "> exists for type <" + repositoryType.name() + ">"); try { Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient(); ClusterStateResponse clusterStateResponse = esTransportClient.admin().cluster().prepareState().clear().setMetaData(true).get(); MetaData metaData = clusterStateResponse.getState().getMetaData(); RepositoriesMetaData repositoriesMetaData = metaData.custom(RepositoriesMetaData.TYPE); if (repositoriesMetaData != null) { for (RepositoryMetaData repositoryMetaData : repositoriesMetaData.repositories()) { if (repositoryMetaData.name().equalsIgnoreCase(repositoryName) && repositoryMetaData.type().equalsIgnoreCase(repositoryType.name())) { doesRepoExists = true; break; } } if (config.isDebugEnabled()) for (RepositoryMetaData repositoryMetaData : repositoriesMetaData.repositories()) logger.debug("Repository <" + repositoryMetaData.name() + ">"); } if (doesRepoExists) logger.info("Repository <" + repositoryName + "> already exists"); else logger.info("Repository <" + repositoryName + "> does NOT exist"); } catch (Exception e) { logger.warn("Exception thrown while listing Snapshot Repositories", e); } return doesRepoExists; } }
5,153
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/S3Repository.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.backup.exception.CreateRepositoryException; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.raigad.utils.SystemUtils; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * TODO: ADD following params to the repository * The following settings are supported: * <p> * bucket: The name of the bucket to be used for snapshots (mandatory) * region: The region where bucket is located, defaults to US Standard * base_path: Specifies the path within bucket to repository data. Defaults to root directory * access_key: The access key to use for authentication. Defaults to value of cloud.aws.access_key * secret_key: The secret key to use for authentication. Defaults to value of cloud.aws.secret_key * chunk_size: Big files can be broken down into chunks during the snapshotting if needed. The chunk size can be specified in bytes or by using size value notation, i.e. 1g, 10m, 5k. Defaults to 100m. * compress: When set to true metadata files are stored in compressed format. This setting doesn't affect index files that are already compressed by default. Defaults to false. * server_side_encryption: When set to true files are encrypted on server side using AES256 algorithm. Defaults to false. * max_retries: Number of retries in case of S3 errors. Defaults to 3. */ @Singleton public class S3Repository extends AbstractRepository { private static final Logger logger = LoggerFactory.getLogger(S3Repository.class); private static final String S3_REPO_DATE_FORMAT = "yyyyMMdd"; private static final DateTimeZone currentZone = DateTimeZone.UTC; private RepositoryType type; private AbstractRepositorySettingsParams repositorySettingsParams; @Inject private S3Repository(IConfiguration config, AbstractRepositorySettingsParams repositorySettingsParams) { super(config, repositorySettingsParams); this.type = RepositoryType.s3; this.repositorySettingsParams = repositorySettingsParams; } /** * 0.0.0.0:9200/_snapshot/s3_repo * { "type": "s3", * "settings": { "bucket": "us-east-1.es-test", * "base_path": "es_abc/20140410", * "region": "us-east-1" * } * } */ @Override public String createOrGetSnapshotRepository() throws Exception { String s3RepoName; try { s3RepoName = getRemoteRepositoryName(); logger.info("Snapshot repository name : <" + s3RepoName + ">"); //Set Snapshot Backup related parameters repositorySettingsParams.setBackupParams(); //Check if Repository Exists if (!doesRepositoryExists(s3RepoName, getRepositoryType())) { createNewRepository(s3RepoName); } } catch (Exception e) { throw new CreateRepositoryException("Failed creating snapshot repository!", e); } return s3RepoName; } @Override public void createRestoreRepository(String s3RepoName, String basePathSuffix) throws Exception { try { // Set restore related parameters repositorySettingsParams.setRestoreParams(basePathSuffix); //Check if repository exists createNewRepository(s3RepoName); } catch (Exception e) { throw new CreateRepositoryException("Failed creating restore repository!", e); } } public void createNewRepository(String s3RepoName) throws Exception { Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient(); //Creating new repository now PutRepositoryResponse putRepositoryResponse = getPutRepositoryResponse(esTransportClient, s3RepoName); if (putRepositoryResponse.isAcknowledged()) { logger.info("Successfully created repository <" + s3RepoName + "> " + getRepoParamPrint()); } else { throw new CreateRepositoryException("Failed creating repository failed <" + s3RepoName + "> " + getRepoParamPrint()); } } @Override public String getRemoteRepositoryName() { DateTime dateTime = new DateTime(); DateTime dateTimeGmt = dateTime.withZone(currentZone); return SystemUtils.formatDate(dateTimeGmt, S3_REPO_DATE_FORMAT); } public RepositoryType getRepositoryType() { return type; } public String getRepoParamPrint() { return "bucket: <" + repositorySettingsParams.getBucket() + "> " + "base_path: <" + repositorySettingsParams.getBase_path() + "> " + "region: <" + repositorySettingsParams.getRegion() + ">"; } /** * Following method is isolated so that it helps in unit testing for mocking */ public PutRepositoryResponse getPutRepositoryResponse(Client esTransportClient, String s3RepoName) { return esTransportClient.admin().cluster().preparePutRepository(s3RepoName) .setType(getRepositoryType().name()).setSettings(Settings.builder() .put("base_path", repositorySettingsParams.getBase_path()) .put("region", repositorySettingsParams.getRegion()) .put("bucket", repositorySettingsParams.getBucket()) ).get(); } }
5,154
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/SnapshotBackupManager.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup; import com.google.inject.Inject; import com.google.inject.Singleton; import com.google.inject.name.Named; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.CronTimer; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.*; import com.netflix.servo.monitor.*; import org.apache.commons.lang.StringUtils; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.client.Client; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.SnapshotShardFailure; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @Singleton public class SnapshotBackupManager extends Task { private static final Logger logger = LoggerFactory.getLogger(SnapshotBackupManager.class); public static String JOBNAME = "SnapshotBackupManager"; private final AbstractRepository repository; private final HttpModule httpModule; private final AtomicInteger snapshotSuccess = new AtomicInteger(0); private final AtomicInteger snapshotFailure = new AtomicInteger(0); private static final AtomicBoolean isSnapshotRunning = new AtomicBoolean(false); private static final DateTimeZone currentZone = DateTimeZone.UTC; private static final String S3_REPO_FOLDER_DATE_FORMAT = "yyyyMMddHHmm"; private static final String COMMA_SEPARATOR = ","; private static Timer snapshotDuration = new BasicTimer(MonitorConfig.builder("snapshotDuration").withTag("class", "Elasticsearch_SnapshotBackupReporter").build(), TimeUnit.SECONDS); static { Monitors.registerObject(snapshotDuration); } @Inject public SnapshotBackupManager(IConfiguration config, @Named("s3") AbstractRepository repository, HttpModule httpModule) { super(config); this.repository = repository; this.httpModule = httpModule; } @Override public void execute() { try { //Confirm if Current Node is a Master Node if (ElasticsearchUtils.amIMasterNode(config, httpModule)) { // If Elasticsearch is started then only start Snapshot Backup if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, hence not Starting Snapshot Operation"; logger.info(exceptionMsg); return; } logger.info("Current node is the Master Node."); if (!config.isSnapshotBackupEnabled()) { logger.info("Snapshot Backup is disabled, hence can not start Snapshot Backup."); return; } //Run Snapshot Backup runSnapshotBackup(); } else { if (config.isDebugEnabled()) logger.debug("Current node is not a Master Node yet, hence not running a Snapshot"); } } catch (Exception e) { snapshotFailure.incrementAndGet(); logger.warn("Exception thrown while running Snapshot Backup", e); } } public void runSnapshotBackup() throws Exception { // Create or Get Repository String repositoryName = repository.createOrGetSnapshotRepository(); // StartBackup String snapshotName = getSnapshotName(config.getCommaSeparatedIndicesToBackup(), config.includeIndexNameInSnapshot()); logger.info("Repository Name : <" + repositoryName + "> Snapshot Name : <" + snapshotName + "> Indices : <" + config.getCommaSeparatedIndicesToBackup() + "> \nRunning Snapshot now ... "); Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient(); Stopwatch snapshotTimer = snapshotDuration.start(); //This is a blocking call. It'll wait until Snapshot is finished. CreateSnapshotResponse createSnapshotResponse = getCreateSnapshotResponse(esTransportClient, repositoryName, snapshotName); logger.info("Snapshot Status = " + createSnapshotResponse.status().toString()); if (createSnapshotResponse.status() == RestStatus.OK) { //TODO Add Servo Monitoring so that it can be verified from dashboard printSnapshotDetails(createSnapshotResponse); snapshotSuccess.incrementAndGet(); } else if (createSnapshotResponse.status() == RestStatus.INTERNAL_SERVER_ERROR) { //TODO Add Servo Monitoring so that it can be verified from dashboard logger.info("Snapshot Completely Failed"); snapshotFailure.incrementAndGet(); } //Stop the timer snapshotTimer.stop(); } //TODO: Map to Java Class and Create JSON public void printSnapshotDetails(CreateSnapshotResponse createSnapshotResponse) { StringBuilder builder = new StringBuilder(); builder.append("Snapshot Details:"); builder.append("\n\t Name = " + createSnapshotResponse.getSnapshotInfo().snapshotId().getName()); builder.append("\n\t Indices : "); for (String index : createSnapshotResponse.getSnapshotInfo().indices()) { builder.append("\n\t\t Index = " + index); } builder.append("\n\t Start Time = " + createSnapshotResponse.getSnapshotInfo().startTime()); builder.append("\n\t End Time = " + createSnapshotResponse.getSnapshotInfo().endTime()); long minuteDuration = (createSnapshotResponse.getSnapshotInfo().endTime() - createSnapshotResponse.getSnapshotInfo().startTime()) / (1000 * 60); builder.append("\n\t Total Time Taken = " + minuteDuration + " Minutes"); builder.append("\n\t Total Shards = " + createSnapshotResponse.getSnapshotInfo().totalShards()); builder.append("\n\t Successful Shards = " + createSnapshotResponse.getSnapshotInfo().successfulShards()); builder.append("\n\t Total Failed Shards = " + createSnapshotResponse.getSnapshotInfo().failedShards()); if (createSnapshotResponse.getSnapshotInfo().failedShards() > 0) { for (SnapshotShardFailure failedShard : createSnapshotResponse.getSnapshotInfo().shardFailures()) { builder.append("\n\t Failed Shards : "); builder.append("\n\t\t Index = " + failedShard.index()); builder.append("\n\t\t Shard Id = " + failedShard.shardId()); builder.append("\n\t\t Node Id = " + failedShard.nodeId()); builder.append("\n\t\t Reason = " + failedShard.reason()); } } logger.info(builder.toString()); } public static TaskTimer getTimer(IConfiguration config) { if (config.isHourlySnapshotEnabled()) { return new SimpleTimer(JOBNAME, config.getBackupCronTimerInSeconds() * 1000); } else { int hour = config.getBackupHour(); return new CronTimer(hour, 1, 0); } } @Override public String getName() { return JOBNAME; } public String getSnapshotName(String indices, boolean includeIndexNameInSnapshot) { StringBuilder snapshotName = new StringBuilder(); if (includeIndexNameInSnapshot) { String indexName; if (indices.toLowerCase().equals("all")) indexName = "all"; else indexName = StringUtils.replace(indices, ",", "_"); snapshotName.append(indexName).append("_"); } DateTime dt = new DateTime(); DateTime dtGmt = dt.withZone(currentZone); String snapshotDate = SystemUtils.formatDate(dtGmt, S3_REPO_FOLDER_DATE_FORMAT); snapshotName.append(snapshotDate); return snapshotName.toString(); } public int getNumSnapshotSuccess() { return snapshotSuccess.get(); } public int getNumSnapshotFailure() { return snapshotFailure.get(); } public CreateSnapshotResponse getCreateSnapshotResponse(Client esTransportClient, String repositoryName, String snapshotName) { return esTransportClient.admin().cluster().prepareCreateSnapshot(repositoryName, snapshotName) .setWaitForCompletion(config.waitForCompletionOfBackup()) .setIndices(config.getCommaSeparatedIndicesToBackup().split(COMMA_SEPARATOR)) .setIncludeGlobalState(config.includeGlobalStateDuringBackup()) .setPartial(config.partiallyBackupIndices()).get(); } // (esTransportClient.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state());//, equalTo(SnapshotState.SUCCESS)); /* NON-Blocking SnapshotRequest ---------------------------- CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repositoryName,snapshotName); esTransportClient.admin().cluster().createSnapshot(createSnapshotRequest .indices(config.getCommaSeparatedIndicesToBackup()) .includeGlobalState(config.includeGlobalStateDuringBackup()) .waitForCompletion(config.waitForCompletionOfBackup()), new ActionListener<CreateSnapshotResponse>() { @Override public void onResponse(CreateSnapshotResponse createSnapshotResponse) { logger.info("Time take for Snapshot = ["+(createSnapshotResponse.getSnapshotInfo().endTime()-createSnapshotResponse.getSnapshotInfo().startTime())+"] Seconds"); } @Override public void onFailure(Throwable e) { logger.info("Snapshot Completely Failed"); } }); */ }
5,155
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/RestoreBackupManager.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup; import com.google.inject.Inject; import com.google.inject.Singleton; import com.google.inject.name.Named; import com.netflix.raigad.backup.exception.RestoreBackupException; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.scheduler.SimpleTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.ElasticsearchProcessMonitor; import com.netflix.raigad.utils.ElasticsearchTransportClient; import com.netflix.raigad.utils.ElasticsearchUtils; import com.netflix.raigad.utils.HttpModule; import org.apache.commons.lang.StringUtils; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.client.Client; import org.elasticsearch.rest.RestStatus; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; @Singleton public class RestoreBackupManager extends Task { private static final Logger logger = LoggerFactory.getLogger(RestoreBackupManager.class); public static String JOBNAME = "RestoreBackupManager"; private final AbstractRepository repository; private final HttpModule httpModule; private static final AtomicBoolean isRestoreRunning = new AtomicBoolean(false); private static final String ALL_INDICES_TAG = "_all"; private static final String SUFFIX_SEPARATOR_TAG = "-"; private static final String COMMA_SEPARATOR = ","; @Inject public RestoreBackupManager(IConfiguration config, @Named("s3") AbstractRepository repository, HttpModule httpModule) { super(config); this.repository = repository; this.httpModule = httpModule; } @Override public void execute() { try { //Confirm if Current Node is a Master Node if (ElasticsearchUtils.amIMasterNode(config, httpModule)) { // If Elasticsearch is started then only start Snapshot Backup if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { String exceptionMsg = "Elasticsearch is not yet started, hence not Starting Restore Operation"; logger.info(exceptionMsg); return; } logger.info("Current node is the Master Node. Running Restore now ..."); //TODO: Add Config properties for Rename Pattern and Rename Replacement runRestore(config.getRestoreRepositoryName(), config.getRestoreRepositoryType(), config.getRestoreSnapshotName(), config.getCommaSeparatedIndicesToRestore(), null, null); } else { logger.info("Current node is not a Master Node yet, hence not running a Restore"); } } catch (Exception e) { logger.warn("Exception thrown while running Restore Backup", e); } } public void runRestore(String sourceRepositoryName, String repositoryType, String snapshotName, String indices, String renamePattern, String renameReplacement) throws Exception { Client esTransportClient = ElasticsearchTransportClient.instance(config).getTransportClient(); // Get Repository Name : This will serve as BasePath Suffix String sourceRepoName = StringUtils.isBlank(sourceRepositoryName) ? config.getRestoreRepositoryName() : sourceRepositoryName; if (StringUtils.isBlank(sourceRepoName)) throw new RestoreBackupException("Repository Name is Null or Empty"); //Attach suffix to the repository name so that it does not conflict with Snapshot Repository name String restoreRepositoryName = sourceRepoName + SUFFIX_SEPARATOR_TAG + config.getRestoreSourceClusterName(); String repoType = StringUtils.isBlank(repositoryType) ? config.getRestoreRepositoryType().toLowerCase() : repositoryType; if (StringUtils.isBlank(repoType)) { logger.info("RepositoryType is empty, hence Defaulting to <s3> type"); repoType = AbstractRepository.RepositoryType.s3.name(); } if (!repository.doesRepositoryExists(restoreRepositoryName, AbstractRepository.RepositoryType.valueOf(repoType.toLowerCase()))) { //If repository does not exist, create new one repository.createRestoreRepository(restoreRepositoryName, sourceRepoName); } // Get Snapshot Name String snapshotN = StringUtils.isBlank(snapshotName) ? config.getRestoreSnapshotName() : snapshotName; if (StringUtils.isBlank(snapshotN)) { //Pick the last Snapshot from the available Snapshots List<String> snapshots = ElasticsearchUtils.getAvailableSnapshots(esTransportClient, restoreRepositoryName); if (snapshots.isEmpty()) throw new RestoreBackupException("No available snapshots in <" + restoreRepositoryName + "> repository."); //Sorting Snapshot names in Reverse Order Collections.sort(snapshots, Collections.reverseOrder()); //Use the Last available snapshot snapshotN = snapshots.get(0); } logger.info("Snapshot Name : <" + snapshotN + ">"); // Get Names of Indices String commaSeparatedIndices = StringUtils.isBlank(indices) ? config.getCommaSeparatedIndicesToRestore() : indices; if (StringUtils.isBlank(commaSeparatedIndices) || commaSeparatedIndices.equalsIgnoreCase(ALL_INDICES_TAG)) { commaSeparatedIndices = null; logger.info("Restoring all Indices."); } logger.info("Indices param : <" + commaSeparatedIndices + ">"); RestoreSnapshotResponse restoreSnapshotResponse = getRestoreSnapshotResponse(esTransportClient, commaSeparatedIndices, restoreRepositoryName, snapshotN, renamePattern, renameReplacement); logger.info("Restore Status = " + restoreSnapshotResponse.status().toString()); if (restoreSnapshotResponse.status() == RestStatus.OK) { printRestoreDetails(restoreSnapshotResponse); } else if (restoreSnapshotResponse.status() == RestStatus.INTERNAL_SERVER_ERROR) logger.info("Restore Completely Failed"); } //TODO: Map to Java Class and Create JSON public void printRestoreDetails(RestoreSnapshotResponse restoreSnapshotResponse) { StringBuilder builder = new StringBuilder(); builder.append("Restore Details:"); builder.append("\n\t Name = " + restoreSnapshotResponse.getRestoreInfo().name()); builder.append("\n\t Indices : "); for (String index : restoreSnapshotResponse.getRestoreInfo().indices()) { builder.append("\n\t\t Index = " + index); } builder.append("\n\t Total Shards = " + restoreSnapshotResponse.getRestoreInfo().totalShards()); builder.append("\n\t Successful Shards = " + restoreSnapshotResponse.getRestoreInfo().successfulShards()); builder.append("\n\t Total Failed Shards = " + restoreSnapshotResponse.getRestoreInfo().failedShards()); logger.info(builder.toString()); } public static TaskTimer getTimer(IConfiguration config) { return new SimpleTimer(JOBNAME); } @Override public String getName() { return JOBNAME; } public RestoreSnapshotResponse getRestoreSnapshotResponse(Client esTransportClient, String commaSeparatedIndices, String restoreRepositoryName, String snapshotN, String renamePattern, String renameReplacement) { RestoreSnapshotRequestBuilder restoreSnapshotRequestBuilder; if (commaSeparatedIndices != null && !commaSeparatedIndices.equalsIgnoreCase(ALL_INDICES_TAG)) { //This is a blocking call. It'll wait until Restore is finished. restoreSnapshotRequestBuilder = esTransportClient.admin().cluster().prepareRestoreSnapshot(restoreRepositoryName, snapshotN) .setWaitForCompletion(true) .setIndices(commaSeparatedIndices.split(COMMA_SEPARATOR)); //"test-idx-*", "-test-idx-2" } else { // Not Setting Indices explicitly -- Seems to be a bug in Elasticsearch restoreSnapshotRequestBuilder = esTransportClient.admin().cluster().prepareRestoreSnapshot(restoreRepositoryName, snapshotN) .setWaitForCompletion(true); } if ((renamePattern != null && renameReplacement != null) && (!renamePattern.isEmpty() || !renameReplacement.isEmpty())) { logger.info("Rename Pattern = {}, Rename Replacement = {}", renamePattern, renameReplacement); restoreSnapshotRequestBuilder.setRenamePattern(renamePattern).setRenameReplacement(renameReplacement); } return restoreSnapshotRequestBuilder.execute().actionGet(); } }
5,156
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/S3RepositorySettingsParams.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.backup.exception.CreateRepositoryException; import com.netflix.raigad.backup.exception.RestoreBackupException; import com.netflix.raigad.configuration.IConfiguration; import org.apache.commons.lang.StringUtils; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; @Singleton public class S3RepositorySettingsParams extends AbstractRepositorySettingsParams { private static final Logger logger = LoggerFactory.getLogger(S3RepositorySettingsParams.class); private final char PATH_SEP = File.separatorChar; private final String S3_REPO_DATE_FORMAT = "yyyyMMdd"; @Inject public S3RepositorySettingsParams(IConfiguration config) { super(config); } @Override public void setBackupParams() throws CreateRepositoryException { this.bucket = config.getBackupLocation(); if(StringUtils.isEmpty(this.bucket)) throw new CreateRepositoryException("Backup Location is not set in configuration."); this.region = config.getDC(); this.base_path = getSnapshotBackupBasePath(); logger.info("Bucket : <"+bucket+"> Region : <"+region+"> Base_path : <"+base_path+">"); } @Override public void setRestoreParams(String basePathSuffix) throws RestoreBackupException { if(StringUtils.isNotBlank(config.getRestoreLocation())) this.bucket = config.getRestoreLocation(); else { logger.info("config.getRestoreLocation() is Blank, hence setting bucket = config.getBackupLocation()"); this.bucket = config.getBackupLocation(); } if(StringUtils.isNotBlank(config.getRestoreSourceRepositoryRegion())) this.region = config.getRestoreSourceRepositoryRegion(); else { logger.info("config.getRestoreSourceRepositoryRegion() is Blank, hence setting region = config.getDC()"); this.region = config.getDC(); } this.base_path = getRestoreBackupBasePath(basePathSuffix); logger.info("Bucket : <"+bucket+"> Region : <"+region+"> Base_path : <"+base_path+">"); } //"base_path": "es_{current_cluster_name}/20140410" public String getSnapshotBackupBasePath() { StringBuilder basePath = new StringBuilder(); basePath.append(config.getAppName()); basePath.append(PATH_SEP); String repoSuffix = getS3RepositoryName(); basePath.append(repoSuffix); logger.info("S3 Repository Snapshot Base Path : <"+basePath.toString()+">"); return basePath.toString(); } /* base_path = basePathPrefix + basePathSuffix Here you can provide custom base_path *Prefix* instead of using default source_cluster_name */ //"base_path": "es_{source_cluster_name}/20140410" public String getRestoreBackupBasePath(String basePathSuffix) throws RestoreBackupException { StringBuilder basePath = new StringBuilder(); if(StringUtils.isNotBlank(config.getRestoreSourceClusterName())) basePath.append(config.getRestoreSourceClusterName()); else throw new RestoreBackupException("No Source Cluster for Restore yet chosen."); basePath.append(PATH_SEP); basePath.append(basePathSuffix); logger.info("S3 Repository Restore Base Path : <"+basePath.toString()+">"); return basePath.toString(); } public String getS3RepositoryName() { DateTime dt = new DateTime(); DateTime dtGmt = dt.withZone(DateTimeZone.UTC); return formatDate(dtGmt,S3_REPO_DATE_FORMAT); } public String formatDate(DateTime dateTime, String dateFormat) { DateTimeFormatter fmt = DateTimeFormat.forPattern(dateFormat); return dateTime.toString(fmt); } }
5,157
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/AbstractRepositorySettingsParams.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup; import com.google.inject.ImplementedBy; import com.netflix.raigad.backup.exception.CreateRepositoryException; import com.netflix.raigad.backup.exception.RestoreBackupException; import com.netflix.raigad.configuration.IConfiguration; @ImplementedBy(S3RepositorySettingsParams.class) public abstract class AbstractRepositorySettingsParams { /** * 0.0.0.0:9200/_snapshot/20140410 * { "type": "s3", * "settings": { "bucket": "us-east-1.netflix-cassandra-archive-test", * "base_path": "es_abc/20140410", * "region": "us-east-1" * } * } */ protected String bucket; protected String base_path; protected String region; protected final IConfiguration config; public AbstractRepositorySettingsParams(IConfiguration config) { this.config = config; } public abstract void setBackupParams() throws CreateRepositoryException; public abstract void setRestoreParams(String basePathSuffix) throws RestoreBackupException; public String getBucket() { return bucket; } public String getBase_path() { return base_path; } public String getRegion() { return region; } }
5,158
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/MultipleMasterNodesException.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup.exception; public class MultipleMasterNodesException extends Exception { private static final long serialVersionUID = 1L; public MultipleMasterNodesException(String msg, Throwable th) { super(msg, th); } public MultipleMasterNodesException(String msg) { super(msg); } public MultipleMasterNodesException(Exception ex) { super(ex); } public MultipleMasterNodesException(Throwable th) { super(th); } }
5,159
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/DuplicateRepositoryNameException.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup.exception; public class DuplicateRepositoryNameException extends Exception { private static final long serialVersionUID = 1L; public DuplicateRepositoryNameException(String msg, Throwable th) { super(msg, th); } public DuplicateRepositoryNameException(String msg) { super(msg); } public DuplicateRepositoryNameException(Exception ex) { super(ex); } public DuplicateRepositoryNameException(Throwable th) { super(th); } }
5,160
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/CreateRepositoryException.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup.exception; public class CreateRepositoryException extends Exception { private static final long serialVersionUID = 1L; public CreateRepositoryException(String msg, Throwable th) { super(msg, th); } public CreateRepositoryException(String msg) { super(msg); } public CreateRepositoryException(Exception ex) { super(ex); } public CreateRepositoryException(Throwable th) { super(th); } }
5,161
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/NoRepositoryException.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup.exception; public class NoRepositoryException extends Exception { private static final long serialVersionUID = 1L; public NoRepositoryException(String msg, Throwable th) { super(msg, th); } public NoRepositoryException(String msg) { super(msg); } public NoRepositoryException(Exception ex) { super(ex); } public NoRepositoryException(Throwable th) { super(th); } }
5,162
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/NoMasterNodeException.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup.exception; public class NoMasterNodeException extends Exception { private static final long serialVersionUID = 1L; public NoMasterNodeException(String msg, Throwable th) { super(msg, th); } public NoMasterNodeException(String msg) { super(msg); } public NoMasterNodeException(Exception ex) { super(ex); } public NoMasterNodeException(Throwable th) { super(th); } }
5,163
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/RestoreBackupException.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup.exception; public class RestoreBackupException extends Exception { private static final long serialVersionUID = 1L; public RestoreBackupException(String msg, Throwable th) { super(msg, th); } public RestoreBackupException(String msg) { super(msg); } public RestoreBackupException(Exception ex) { super(ex); } public RestoreBackupException(Throwable th) { super(th); } }
5,164
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/backup/exception/SnapshotBackupException.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.backup.exception; public class SnapshotBackupException extends Exception { private static final long serialVersionUID = 1L; public SnapshotBackupException(String msg, Throwable th) { super(msg, th); } public SnapshotBackupException(String msg) { super(msg); } public SnapshotBackupException(Exception ex) { super(ex); } public SnapshotBackupException(Throwable th) { super(th); } }
5,165
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/IIndexNameFilter.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.indexmanagement; public interface IIndexNameFilter { boolean filter(String name); }
5,166
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/IndexMetadata.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.indexmanagement; import com.netflix.raigad.indexmanagement.exception.UnsupportedAutoIndexException; import com.netflix.raigad.indexmanagement.indexfilters.DatePatternIndexNameFilter; import org.codehaus.jackson.annotate.JsonCreator; import org.codehaus.jackson.annotate.JsonProperty; import org.joda.time.DateTime; import org.joda.time.Period; import org.joda.time.format.DateTimeFormat; import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.ISOPeriodFormat; public class IndexMetadata { private static Period[] AMOUNTS = new Period[] { Period.minutes(1), Period.hours(1), Period.days(1), Period.weeks(1), Period.months(1), Period.years(1) }; public enum RETENTION_TYPE { HOURLY("YYYYMMddHH", "PT%dH"), DAILY("YYYYMMdd", "P%dD"), MONTHLY("YYYYMM", "P%dM"), YEARLY("YYYY", "P%dY"); public final String datePattern; public final String periodFormat; RETENTION_TYPE(String datePattern, String periodFormat) { this.datePattern = datePattern; this.periodFormat = periodFormat; } } private final String indexNamePattern; private final DateTimeFormatter formatter; private final Period retentionPeriod; private final IIndexNameFilter indexNameFilter; private final boolean preCreate; @JsonCreator public IndexMetadata( @JsonProperty("indexName") String indexName, @JsonProperty("indexNamePattern") String indexNamePattern, @JsonProperty("retentionType") String retentionType, @JsonProperty("retentionPeriod") String retentionPeriod, @JsonProperty("preCreate") Boolean preCreate) throws UnsupportedAutoIndexException { if (retentionType == null) { retentionType = "DAILY"; } RETENTION_TYPE retType = RETENTION_TYPE.valueOf(retentionType.toUpperCase()); // If legacy prefix is used, then quote it so it will be used as plain text in // date pattern String prefix = (indexName == null) ? "" : "'" + indexName + "'"; String namePattern = (indexNamePattern == null) ? prefix + retType.datePattern : indexNamePattern; this.indexNamePattern = (indexName == null && indexNamePattern == null) ? null : namePattern; this.formatter = DateTimeFormat.forPattern(namePattern).withZoneUTC(); this.indexNameFilter = new DatePatternIndexNameFilter(formatter); if (retentionPeriod == null) { this.retentionPeriod = null; } else if (retentionPeriod.startsWith("P")) { this.retentionPeriod = ISOPeriodFormat.standard().parsePeriod(retentionPeriod); } else { Integer num = Integer.parseInt(retentionPeriod); String period = String.format(retType.periodFormat, num); this.retentionPeriod = ISOPeriodFormat.standard().parsePeriod(period); } this.preCreate = preCreate == null ? false : preCreate; } @Override public String toString() { return String.format("{\"indexNamePattern\": \"%s\", \"retentionPeriod\": \"%s\", \"preCreate\": %b}", indexNamePattern, retentionPeriod, preCreate); } public String getIndexNamePattern() { return indexNamePattern; } public Period getRetentionPeriod() { return retentionPeriod; } public IIndexNameFilter getIndexNameFilter() { return indexNameFilter; } public boolean isPreCreate() { return preCreate; } public boolean isActionable() { return indexNamePattern != null && retentionPeriod != null; } public DateTime getPastRetentionCutoffDate(DateTime currentDateTime) { // After computing the cutoff we print then reparse the cutoff time to round to // the significant aspects of the time based on the formatter. For example: // // currentDateTime = 2018-02-03T23:47 // retentionPeriod = P2Y // cutoff = 2016-02-03T23:47 // // If the index pattern is yyyy, then a 2016 index would be before the cutoff so it // would get dropped. We want to floor the cutoff time to only the significant aspects // which for this example would be the year. DateTime cutoff = currentDateTime.minus(retentionPeriod); return formatter.parseDateTime(formatter.print(cutoff)); } public DateTime getDateForIndexName(String name) { return formatter.parseDateTime(name); } public String getIndexNameToPreCreate(DateTime currentDateTime) throws UnsupportedAutoIndexException { String currentIndexName = formatter.print(currentDateTime); for (int i = 0; i < AMOUNTS.length; ++i) { String newIndexName = formatter.print(currentDateTime.plus(AMOUNTS[i])); if (!currentIndexName.equals(newIndexName)) { return newIndexName; } } throw new UnsupportedAutoIndexException("Invalid date pattern, do not know how to pre create"); } }
5,167
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/ElasticsearchIndexManager.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.indexmanagement; import com.google.inject.Inject; import com.google.inject.Singleton; import com.netflix.raigad.configuration.IConfiguration; import com.netflix.raigad.indexmanagement.exception.UnsupportedAutoIndexException; import com.netflix.raigad.scheduler.CronTimer; import com.netflix.raigad.scheduler.Task; import com.netflix.raigad.scheduler.TaskTimer; import com.netflix.raigad.utils.*; import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.client.Client; import org.joda.time.DateTime; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; import java.util.Map; import java.util.Set; /** * Index retention will delete indices older than certain date e.g. if the current date is 10/28/2014, * retention period is 4, and given the following indices: * <p> * test_index20141024 * test_index20141025 * test_index20141026 * test_index20141027 * test_index20141028 * <p> * Index to be deleted is test_index20141024. * <p> * If pre-create option is enabled, then one future index will be pre-created. Using the input data from above, * the following index will be pre-created: test_index20141029 */ @Singleton public class ElasticsearchIndexManager extends Task { private static final Logger logger = LoggerFactory.getLogger(ElasticsearchIndexManager.class); public static String JOB_NAME = "ElasticsearchIndexManager"; private final HttpModule httpModule; @Inject protected ElasticsearchIndexManager(IConfiguration config, HttpModule httpModule) { super(config); this.httpModule = httpModule; } Client getTransportClient() throws ElasticsearchTransportClientConnectionException { return ElasticsearchTransportClient.instance(config).getTransportClient(); } @Override public void execute() { try { if (!config.isIndexAutoCreationEnabled()) { logger.info("Index management is disabled"); return; } // Check is Elasticsearch is started if (!ElasticsearchProcessMonitor.isElasticsearchRunning()) { logger.info("Elasticsearch is not yet started, skipping index management"); return; } // Only active master can perform index management if (!ElasticsearchUtils.amIMasterNode(config, httpModule)) { if (config.isDebugEnabled()) { logger.debug("Cannot perform index management: current node is not an active master node"); } return; } runIndexManagement(); } catch (Exception e) { logger.warn("Exception while performing index management", e); } } public void runIndexManagement() throws Exception { logger.info("Starting index management"); String serializedIndexMetadata = config.getIndexMetadata(); List<IndexMetadata> indexMetadataList; try { indexMetadataList = IndexUtils.parseIndexMetadata(serializedIndexMetadata); } catch (Exception e) { logger.error(String.format("Failed to build index metadata from %s", serializedIndexMetadata), e); return; } Client esTransportClient = getTransportClient(); DateTime dateTime = new DateTime(); runIndexManagement(esTransportClient, indexMetadataList, dateTime); } void runIndexManagement(Client esTransportClient, List<IndexMetadata> indexMetadataList, DateTime dateTime) { // Find all the indices IndicesStatsResponse indicesStatsResponse = getIndicesStatsResponse(esTransportClient); Map<String, IndexStats> indexStatsMap = indicesStatsResponse.getIndices(); if (indexStatsMap == null || indexStatsMap.isEmpty()) { logger.info("Cluster is empty, no indices found"); return; } for (IndexMetadata indexMetadata : indexMetadataList) { if (!indexMetadata.isActionable()) { logger.warn(String.format("Index metadata %s is not actionable, skipping", indexMetadata)); continue; } try { checkIndexRetention(esTransportClient, indexStatsMap.keySet(), indexMetadata, dateTime); if (indexMetadata.isPreCreate()) { preCreateIndex(esTransportClient, indexMetadata, dateTime); } } catch (Exception e) { logger.error("Caught an exception while building index metadata information from configuration property", e); return; } } } @Override public String getName() { return JOB_NAME; } public static TaskTimer getTimer(IConfiguration config) { return new CronTimer(config.getAutoCreateIndexScheduleMinutes(), 0, JOB_NAME); } void checkIndexRetention(Client esTransportClient, Set<String> indices, IndexMetadata indexMetadata, DateTime dateTime) throws UnsupportedAutoIndexException { // Calculate the past retention date DateTime pastRetentionCutoffDate = indexMetadata.getPastRetentionCutoffDate(dateTime); logger.info("Deleting indices that are older than {}", pastRetentionCutoffDate); indices.forEach(indexName -> { logger.info("Processing index [{}]", indexName); if (indexMetadata.getIndexNameFilter().filter(indexName)) { // Extract date from the index name DateTime indexDate = indexMetadata.getDateForIndexName(indexName); if (indexDate.isBefore(pastRetentionCutoffDate)) { logger.info("Date {} for index {} is past the retention date of {}, deleting it", indexDate, indexName, pastRetentionCutoffDate); deleteIndices(esTransportClient, indexName, config.getAutoCreateIndexTimeout()); } } }); } void preCreateIndex(Client client, IndexMetadata indexMetadata, DateTime dateTime) throws UnsupportedAutoIndexException { logger.info("Pre-creating indices for {}*", indexMetadata.getIndexNamePattern()); IndicesStatsResponse indicesStatsResponse = getIndicesStatsResponse(client); Map<String, IndexStats> indexStatsMap = indicesStatsResponse.getIndices(); if (indexStatsMap == null || indexStatsMap.isEmpty()) { logger.info("No existing indices, no need to pre-create"); return; } indexStatsMap.keySet().stream() .filter(indexName -> indexMetadata.getIndexNameFilter().filter(indexName)) .findFirst() .ifPresent(indexName -> { try { createIndex(client, indexMetadata.getIndexNameToPreCreate(dateTime)); } catch (UnsupportedAutoIndexException e) { logger.error("Invalid index metadata: " + indexMetadata.toString(), e); } }); } void createIndex(Client client, String indexName) { if (!client.admin().indices().prepareExists(indexName).execute().actionGet(config.getAutoCreateIndexTimeout()).isExists()) { client.admin().indices().prepareCreate(indexName).execute().actionGet(config.getAutoCreateIndexTimeout()); logger.info(indexName + " has been created"); } else { logger.warn(indexName + " already exists"); } } void deleteIndices(Client client, String indexName, int timeout) { DeleteIndexResponse deleteIndexResponse = client.admin().indices().prepareDelete(indexName).execute().actionGet(timeout); if (deleteIndexResponse.isAcknowledged()) { logger.info(indexName + " deleted"); } else { logger.warn("Failed to delete " + indexName); throw new RuntimeException("Failed to delete " + indexName); } } /** * Following method is isolated so that it helps in Unit Testing for Mocking * * @param esTransportClient * @return */ IndicesStatsResponse getIndicesStatsResponse(Client esTransportClient) { return esTransportClient.admin().indices().prepareStats("_all").execute().actionGet(config.getAutoCreateIndexTimeout()); } }
5,168
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/IndexUtils.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.indexmanagement; import com.netflix.raigad.objectmapper.DefaultIndexMapper; import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.type.TypeReference; import java.io.IOException; import java.util.List; public class IndexUtils { /** * Convert the JSON String of parameters to IndexMetadata objects * * @param serializedIndexMetadata : JSON string with parameters * @return list of IndexMetadata objects * @throws IOException */ public static List<IndexMetadata> parseIndexMetadata(String serializedIndexMetadata) throws IOException { ObjectMapper jsonMapper = new DefaultIndexMapper(); TypeReference<List<IndexMetadata>> typeRef = new TypeReference<List<IndexMetadata>>() {}; return jsonMapper.readValue(serializedIndexMetadata, typeRef); } }
5,169
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/indexfilters/DatePatternIndexNameFilter.java
/** * Copyright 2018 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.indexmanagement.indexfilters; import com.netflix.raigad.indexmanagement.IIndexNameFilter; import org.joda.time.MutableDateTime; import org.joda.time.format.DateTimeFormatter; public class DatePatternIndexNameFilter implements IIndexNameFilter { private final DateTimeFormatter formatter; public DatePatternIndexNameFilter(DateTimeFormatter formatter) { this.formatter = formatter; } @Override public boolean filter(String name) { try { MutableDateTime instant = new MutableDateTime(); int pos = formatter.parseInto(instant, name, 0); return pos > 0 && pos == name.length() && checkYear(instant) && reproducible(name, instant); } catch (IllegalArgumentException e) { return false; } } private boolean checkYear(MutableDateTime instant) { // When using a pattern like YYYY, it will match strings like 201802 as a large // year. For our use-cases this is more likely a separate index with a year and // month pattern. To avoid this the year is checked and rejected if more than four // digits. return instant.getYear() < 10000; } private boolean reproducible(String expected, MutableDateTime instant) { // The date time parser is sometimes more lenient for parsing than what it would // be able to generate. For example a pattern like YYYYMM would match both 20131 // and 201301. This check ensures that the printed form matches. So for the example // 20131 would not match, but 201301 would. String actual = formatter.print(instant); return actual.equals(expected); } }
5,170
0
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement
Create_ds/Raigad/raigad/src/main/java/com/netflix/raigad/indexmanagement/exception/UnsupportedAutoIndexException.java
/** * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.indexmanagement.exception; public class UnsupportedAutoIndexException extends Exception { private static final long serialVersionUID = 1L; public UnsupportedAutoIndexException(String msg, Throwable th) { super(msg, th); } public UnsupportedAutoIndexException(String msg) { super(msg); } public UnsupportedAutoIndexException(Exception ex) { super(ex); } public UnsupportedAutoIndexException(Throwable th) { super(th); } }
5,171
0
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/RaigadInstance.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.discovery; import java.io.Serializable; public class RaigadInstance implements Serializable { private static final long serialVersionUID = 5606412386974488659L; private String hostname; private long updatetime; private boolean outOfService; private String Id; private String app; private String instanceId; private String availabilityZone; private String publicip; private String dc; private String asgName; @Override public String toString() { return String .format("Host name [%s], instance ID [%s], app [%s], AZ [%s], ID [%s], IP [%s], DC [%s], ASG [%s], update time [%s]", getHostName(), getInstanceId(), getApp(), getAvailabilityZone(), getId(), getHostIP(), getDC(), getAsg(), getUpdatetime()); } public String getId() { return Id; } public void setId(String id) { this.Id = id; } public String getApp() { return app; } public void setApp(String app) { this.app = app; } public String getInstanceId() { return instanceId; } public void setInstanceId(String instanceId) { this.instanceId = instanceId; } public String getAvailabilityZone() { return availabilityZone; } public void setAvailabilityZone(String availabilityZone) { this.availabilityZone = availabilityZone; } public String getHostName() { return hostname; } public String getHostIP() { return publicip; } public void setHostName(String hostname) { this.hostname = hostname; } public void setHostIP(String publicip) { this.publicip = publicip; } public String getDC() { return dc; } public void setDC(String dc) { this.dc = dc; } public String getAsg() { return asgName; } public void setAsg(String asgName) { this.asgName = asgName; } public long getUpdatetime() { return updatetime; } public void setUpdatetime(long updatetime) { this.updatetime = updatetime; } public boolean isOutOfService() { return outOfService; } public void setOutOfService(boolean outOfService) { this.outOfService = outOfService; } }
5,172
0
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/RaigadDiscoveryPlugin.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.discovery; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.TransportService; import java.util.Collections; import java.util.Map; import java.util.function.Supplier; public class RaigadDiscoveryPlugin extends Plugin implements DiscoveryPlugin { private static final Logger logger = Loggers.getLogger(RaigadDiscoveryPlugin.class); private final Settings settings; public RaigadDiscoveryPlugin(Settings settings) { this.settings = settings; logger.info("Starting Raigad discovery"); } @Override public Map<String, Supplier<UnicastHostsProvider>> getZenHostsProviders( TransportService transportService, NetworkService networkService) { return Collections.singletonMap( "raigad", () -> new RaigadUnicastHostsProvider(settings, transportService)); } }
5,173
0
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/RaigadUnicastHostsProvider.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.discovery; import com.netflix.raigad.discovery.utils.DataFetcher; import com.netflix.raigad.discovery.utils.ElasticsearchUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.List; public class RaigadUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider { private static final String GET_NODES_ISLAND_URL = "http://127.0.0.1:8080/Raigad/REST/v1/esconfig/get_nodes"; private static final String GET_NODES_TRIBE_URL_PREFIX = "http://127.0.0.1:8080/Raigad/REST/v1/esconfig/get_tribe_nodes/"; private final String nodeName; private final TransportService transportService; RaigadUnicastHostsProvider(Settings settings, TransportService transportService) { super(settings); this.transportService = transportService; nodeName = settings.get("node.name"); logger.info("[raigad-discovery] Node name [{}]", nodeName); } @Override public List<DiscoveryNode> buildDynamicNodes() { final List<DiscoveryNode> discoveryNodes = new ArrayList<>(); try { //Extract tribe ID from name field of settings and query accordingly String discoveryNodesJsonString; if (isTribeNode()) { String tribeId = nodeName.substring(nodeName.indexOf("/") + 1); logger.debug("[raigad-discovery] Tribe ID detected [{}]", tribeId); discoveryNodesJsonString = DataFetcher.fetchData(GET_NODES_TRIBE_URL_PREFIX + tribeId, logger); } else { discoveryNodesJsonString = DataFetcher.fetchData(GET_NODES_ISLAND_URL, logger); } List<RaigadInstance> instances = ElasticsearchUtil.getRaigadInstancesFromJsonString(discoveryNodesJsonString, logger); for (RaigadInstance instance : instances) { try { TransportAddress[] addresses = transportService.addressesFromString(instance.getHostIP(), 1); if (addresses != null && addresses.length > 0) { logger.info("[raigad-discovery] Adding instance [{}], address [{}], transport address [{}]", instance.getId(), instance.getHostIP(), addresses[0]); discoveryNodes.add(new DiscoveryNode(instance.getId(), addresses[0], Version.CURRENT.minimumCompatibilityVersion())); } } catch (Exception e) { logger.warn("[raigad-discovery] Failed to add instance [{}], address [{}]", e, instance.getId(), instance.getHostIP()); } } } catch (Exception e) { logger.error("[raigad-discovery] Exception while trying to build dynamic discovery nodes", e); throw new RuntimeException(e); } logger.debug("[raigad-discovery] Using dynamic discovery nodes {}", discoveryNodes); return discoveryNodes; } private boolean isTribeNode() { if (nodeName == null || nodeName.isEmpty()) { return false; } return nodeName.contains("/t"); } }
5,174
0
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/utils/ElasticsearchUtil.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.discovery.utils; import com.netflix.raigad.discovery.RaigadInstance; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; public class ElasticsearchUtil { private static final String TOP_LEVEL_ELEMENT = "instances"; private static final String ID = "id"; private static final String APP_NAME = "app_name"; private static final String HOST_NAME = "host_name"; private static final String INSTANCE_ID = "instance_id"; private static final String AVAILABILITY_ZONE = "availability_zone"; private static final String PUBLIC_IP = "public_ip"; private static final String DC = "dc"; private static final String UPDATE_TIME = "update_time"; @SuppressWarnings("unchecked") public static List<RaigadInstance> getRaigadInstancesFromJsonString(String jsonInstances, Logger logger) { List<RaigadInstance> raigadInstances = new ArrayList<RaigadInstance>(); try { Map<String, Object> topLevelInstanceMap = (Map<String, Object>) jsonToMap(jsonInstances).get(TOP_LEVEL_ELEMENT); for (String instanceKey : topLevelInstanceMap.keySet()) { Map<String, Object> instParamMap = (Map<String, Object>) topLevelInstanceMap.get(instanceKey); RaigadInstance raigadInstance = new RaigadInstance(); raigadInstance.setApp((String) instParamMap.get(APP_NAME)); raigadInstance.setAvailabilityZone((String) instParamMap.get(AVAILABILITY_ZONE)); raigadInstance.setDC((String) instParamMap.get(DC)); raigadInstance.setHostIP((String) instParamMap.get(PUBLIC_IP)); raigadInstance.setHostName((String) instParamMap.get(HOST_NAME)); raigadInstance.setId((String) instParamMap.get(ID)); raigadInstance.setInstanceId((String) instParamMap.get(INSTANCE_ID)); raigadInstance.setUpdatetime((Long) instParamMap.get(UPDATE_TIME)); logger.info("Raigad instance: {}", raigadInstance.toString()); //Add to the list raigadInstances.add(raigadInstance); } } catch (IOException e) { logger.error("Error caught while parsing JSON", e); } return raigadInstances; } private static Map<String, Object> jsonToMap(String jsonString) throws IOException { try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, jsonString)) { return parser.mapOrdered(); } } }
5,175
0
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery
Create_ds/Raigad/raigad-discovery-plugin/src/main/java/com/netflix/raigad/discovery/utils/DataFetcher.java
/** * Copyright 2017 Netflix, Inc. * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.raigad.discovery.utils; import org.apache.logging.log4j.Logger; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.FilterInputStream; import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; public class DataFetcher { public static String fetchData(String url, Logger logger) { HttpURLConnection httpConnection = null; DataInputStream responseStream = null; try { httpConnection = (HttpURLConnection) new URL(url).openConnection(); httpConnection.setConnectTimeout(1000); httpConnection.setReadTimeout(10000); httpConnection.setRequestMethod("GET"); if (httpConnection.getResponseCode() != 200) { logger.error("Unable to get data from URL [" + url + "]"); throw new RuntimeException("Unable to fetch data from Raigad API"); } byte[] bytes = new byte[2048]; ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); responseStream = new DataInputStream((FilterInputStream) httpConnection.getContent()); int bytesRead; while ((bytesRead = responseStream.read(bytes, 0, bytes.length)) != -1) { byteArrayOutputStream.write(bytes, 0, bytesRead); } String result = new String(byteArrayOutputStream.toByteArray(), StandardCharsets.UTF_8); logger.info("Raigad ({}) returned {}", url, result); return result; } catch (Exception ex) { throw new RuntimeException(ex); } finally { try { if (responseStream != null) { responseStream.close(); } } catch (Exception e) { logger.warn("Failed to close response stream from Raigad", e); } if (httpConnection != null) httpConnection.disconnect(); } } }
5,176
0
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler/SchedulerTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; import static org.junit.Assert.*; import static org.mockito.Mockito.*; import com.airbnb.conveyor.async.AsyncSqsClient; import com.airbnb.dynein.api.DyneinJobSpec; import com.airbnb.dynein.api.JobSchedulePolicy; import com.airbnb.dynein.api.JobScheduleType; import com.airbnb.dynein.common.job.JacksonJobSpecTransformer; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.JacksonTokenManager; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.scheduler.metrics.NoOpMetricsImpl; import com.fasterxml.jackson.databind.ObjectMapper; import java.time.Clock; import java.time.Instant; import java.time.ZoneId; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class SchedulerTest { @Mock private AsyncSqsClient asyncClient; @Mock private ScheduleManager scheduleManager; private Scheduler scheduler; private JobSpecTransformer jobSpecTransformer; @Before public void setUp() { ObjectMapper mapper = new ObjectMapper(); jobSpecTransformer = new JacksonJobSpecTransformer(mapper); TokenManager tokenManager = new JacksonTokenManager(mapper); scheduler = new Scheduler( asyncClient, "inbound-test", jobSpecTransformer, tokenManager, scheduleManager, Clock.fixed(Instant.now(), ZoneId.of("UTC")), new NoOpMetricsImpl()); } @Test public void testScheduledJob() { DyneinJobSpec jobSpec = DyneinJobSpec.builder() .name("AddJob") .queueName("test-queue") .schedulePolicy( JobSchedulePolicy.builder() .type(JobScheduleType.SCHEDULED) .delayMillis(1000L) .build()) .build(); when(asyncClient.add(jobSpecTransformer.serializeJobSpec(jobSpec), "inbound-test")) .thenReturn(CompletableFuture.completedFuture(null)); CompletableFuture<Void> ret = scheduler.createJob(jobSpec); verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "inbound-test"); assertNull(ret.join()); } @Test public void testImmediateJob() { when(asyncClient.add(any(String.class), eq("test-queue"))) .thenReturn(CompletableFuture.completedFuture(null)); DyneinJobSpec jobSpec = DyneinJobSpec.builder() .name("AddJob") .queueName("test-queue") .schedulePolicy( JobSchedulePolicy.builder().type(JobScheduleType.IMMEDIATE).delayMillis(0L).build()) .build(); CompletableFuture<Void> ret = scheduler.createJob(jobSpec); verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-queue"); assertNull(ret.join()); } @Test public void testSQSDelayedJob_withInstant() { when(asyncClient.add(any(String.class), eq("test-queue"), any(Integer.class))) .thenReturn(CompletableFuture.completedFuture(null)); DyneinJobSpec jobSpec = DyneinJobSpec.builder() .name("AddJob") .queueName("test-queue") .schedulePolicy( JobSchedulePolicy.builder() .type(JobScheduleType.SQS_DELAYED) .delayMillis(5000L) .build()) .build(); CompletableFuture<Void> ret = scheduler.createJob(jobSpec); verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-queue", 5); assertNull(ret.join()); } @Test public void testSQSDelayedJob_withDelay() { when(asyncClient.add(any(String.class), eq("test-queue"), any(Integer.class))) .thenReturn(CompletableFuture.completedFuture(null)); DyneinJobSpec jobSpec = DyneinJobSpec.builder() .name("AddJob") .queueName("test-queue") .schedulePolicy( JobSchedulePolicy.builder() .type(JobScheduleType.SQS_DELAYED) .epochMillis(Instant.now().plusMillis(5000L).toEpochMilli()) .build()) .build(); CompletableFuture<Void> ret = scheduler.createJob(jobSpec); verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-queue", 5); assertNull(ret.join()); } @Test(expected = IllegalArgumentException.class) public void testSQSDelayedJob_DelayTooLong() throws Throwable { DyneinJobSpec jobSpec = DyneinJobSpec.builder() .name("AddJob") .queueName("error-queue") .schedulePolicy( JobSchedulePolicy.builder() .type(JobScheduleType.SQS_DELAYED) .epochMillis(Instant.now().plusMillis(1000000L).toEpochMilli()) .build()) .build(); try { scheduler.createJob(jobSpec).join(); } catch (Exception e) { Throwable t = e; while (t.getCause() != null) { t = t.getCause(); } throw t; } } @Test public void testScheduledJobFailure() { DyneinJobSpec jobSpec = DyneinJobSpec.builder() .name("AddJob") .queueName("error-queue") .schedulePolicy( JobSchedulePolicy.builder() .type(JobScheduleType.SCHEDULED) .delayMillis(1000L) .build()) .build(); CompletableFuture<Void> error = new CompletableFuture<>(); error.completeExceptionally(new Exception()); when(asyncClient.add(jobSpecTransformer.serializeJobSpec(jobSpec), "inbound-test")) .thenReturn(error); CompletableFuture<Void> ret = scheduler.createJob(jobSpec); verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "inbound-test"); try { ret.get(1000, TimeUnit.MILLISECONDS); } catch (TimeoutException timeout) { fail("Future does not seem to complete when failure in adding to inbound queue (SCHEDULED)."); } catch (Exception ex) { } } @Test public void testImmediateJobFailure() { DyneinJobSpec jobSpec = DyneinJobSpec.builder() .name("AddJob") .queueName("test-error") .schedulePolicy( JobSchedulePolicy.builder().type(JobScheduleType.IMMEDIATE).delayMillis(0L).build()) .build(); CompletableFuture<Void> error = new CompletableFuture<>(); error.completeExceptionally(new Exception()); when(asyncClient.add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-error")) .thenReturn(error); CompletableFuture<Void> ret = scheduler.createJob(jobSpec); verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-error"); try { ret.get(1000, TimeUnit.MILLISECONDS); } catch (TimeoutException timeout) { fail( "Future does not seem to complete when failure in adding to destination queue (IMMEDIATE)."); } catch (Exception ex) { } } @Test public void testSQSDelayedJobFailure() { DyneinJobSpec jobSpec = DyneinJobSpec.builder() .name("AddJob") .queueName("test-error") .schedulePolicy( JobSchedulePolicy.builder() .type(JobScheduleType.SQS_DELAYED) .epochMillis(Instant.now().plusMillis(5000L).toEpochMilli()) .build()) .build(); CompletableFuture<Void> error = new CompletableFuture<>(); error.completeExceptionally(new Exception()); when(asyncClient.add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-error", 5)) .thenReturn(error); CompletableFuture<Void> ret = scheduler.createJob(jobSpec); verify(asyncClient).add(jobSpecTransformer.serializeJobSpec(jobSpec), "test-error", 5); try { ret.get(1000, TimeUnit.MILLISECONDS); } catch (TimeoutException timeout) { fail( "Future does not seem to complete when failure in adding to destination queue (SQS_DELAYED)."); } catch (Exception ex) { } } }
5,177
0
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler/SchedulerManagerTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; import com.airbnb.dynein.api.*; import com.airbnb.dynein.common.job.JacksonJobSpecTransformer; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.JacksonTokenManager; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.scheduler.Schedule.JobStatus; import com.airbnb.dynein.scheduler.metrics.NoOpMetricsImpl; import com.fasterxml.jackson.databind.ObjectMapper; import java.time.Clock; import java.time.Instant; import java.time.ZoneId; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class SchedulerManagerTest { private static final byte[] SERIALIZED_JOB_DATA = {0, 0, 0, 0}; private JobSpecTransformer transformer; private TokenManager tokenManager; private ScheduleManager scheduleManager; private Clock clock; @Before public void setUp() { ObjectMapper mapper = new ObjectMapper(); transformer = new JacksonJobSpecTransformer(mapper); tokenManager = new JacksonTokenManager(mapper); clock = Clock.fixed(Instant.now(), ZoneId.of("UTC")); int maxShardId = 64; scheduleManager = new NoOpScheduleManager( maxShardId, tokenManager, transformer, clock, new NoOpMetricsImpl()); } private DyneinJobSpec getTestJobSpec(String token) { JobSchedulePolicy policy = JobSchedulePolicy.builder() .type(JobScheduleType.SCHEDULED) .epochMillis(Instant.now(clock).plusSeconds(1000).toEpochMilli()) .build(); return DyneinJobSpec.builder() .jobToken(token) .name("AddJob") .queueType("PRODUCTION") .queueName("test-queue") .createAtInMillis(Instant.now().minusMillis(10).toEpochMilli()) .schedulePolicy(policy) .serializedJob(SERIALIZED_JOB_DATA) .build(); } /** * This test is to ensure that we always use the scheduled time in the jobSpec to make the {@code * Schedule} rather than the one in the token. */ @Test public void testMakeSchedule() throws InvalidTokenException { String token = tokenManager.generateToken(1L, "test-cluster", Instant.now(clock).toEpochMilli()); String serializedJobSpec = transformer.serializeJobSpec(getTestJobSpec(token)); Schedule schedule = scheduleManager.makeSchedule(serializedJobSpec); Assert.assertEquals( schedule, new Schedule( Instant.now(clock).plusSeconds(1000).toEpochMilli() + "#" + token, JobStatus.SCHEDULED, serializedJobSpec, "1")); } }
5,178
0
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler/NoOpScheduleManager.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.scheduler.Schedule.JobStatus; import com.airbnb.dynein.scheduler.metrics.Metrics; import java.time.Clock; import java.time.Instant; import java.util.concurrent.CompletableFuture; public class NoOpScheduleManager extends ScheduleManager { public NoOpScheduleManager( int maxShardId, TokenManager tokenManager, JobSpecTransformer jobSpecTransformer, Clock clock, Metrics metrics) { super(maxShardId, tokenManager, jobSpecTransformer, clock, metrics); } @Override public CompletableFuture<Void> recoverStuckJobs(String partition, Instant instant) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> addJob(Schedule schedule) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Schedule> getJob(String token) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> deleteJob(String token) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<SchedulesQueryResponse> getOverdueJobs( String partition, Instant instant) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Schedule> updateStatus( Schedule schedule, JobStatus oldStatus, JobStatus newStatus) { return CompletableFuture.completedFuture(null); } @Override public CompletableFuture<Void> deleteDispatchedJob(Schedule schedule) { return CompletableFuture.completedFuture(null); } @Override public void close() {} }
5,179
0
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler/NoOpScheduleManagerFactory.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.scheduler.metrics.Metrics; import com.airbnb.dynein.scheduler.metrics.NoOpMetricsImpl; import java.time.Clock; public class NoOpScheduleManagerFactory extends ScheduleManagerFactory { public NoOpScheduleManagerFactory( int maxShardId, TokenManager tokenManager, JobSpecTransformer jobSpecTransformer, Clock clock, Metrics metrics) { super(maxShardId, tokenManager, jobSpecTransformer, clock, metrics); } @Override public ScheduleManager get() { return new NoOpScheduleManager( maxShardId, tokenManager, jobSpecTransformer, clock, new NoOpMetricsImpl()); } }
5,180
0
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler/dynamodb/DynamoDBTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.dynamodb; import static java.util.Arrays.asList; import static org.junit.Assert.*; import static org.mockito.Mockito.*; import com.airbnb.dynein.api.DyneinJobSpec; import com.airbnb.dynein.api.InvalidTokenException; import com.airbnb.dynein.api.JobSchedulePolicy; import com.airbnb.dynein.api.JobScheduleType; import com.airbnb.dynein.api.JobTokenPayload; import com.airbnb.dynein.common.job.JacksonJobSpecTransformer; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.JacksonTokenManager; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.common.utils.TimeUtils; import com.airbnb.dynein.scheduler.Schedule; import com.airbnb.dynein.scheduler.Schedule.JobStatus; import com.airbnb.dynein.scheduler.ScheduleManager; import com.airbnb.dynein.scheduler.ScheduleManager.SchedulesQueryResponse; import com.airbnb.dynein.scheduler.config.DynamoDBConfiguration; import com.airbnb.dynein.scheduler.dynamodb.DynamoDBUtils.Attribute; import com.airbnb.dynein.scheduler.metrics.Metrics; import com.airbnb.dynein.scheduler.metrics.NoOpMetricsImpl; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableMap; import java.time.Clock; import java.time.Instant; import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; import software.amazon.awssdk.services.dynamodb.model.DeleteItemResponse; import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; import software.amazon.awssdk.services.dynamodb.model.PutItemResponse; import software.amazon.awssdk.services.dynamodb.model.QueryRequest; import software.amazon.awssdk.services.dynamodb.model.QueryResponse; import software.amazon.awssdk.services.dynamodb.model.ReturnValue; import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; @RunWith(MockitoJUnitRunner.class) public class DynamoDBTest { private static final byte[] SERIALIZED_JOB_DATA = {0, 0, 0, 0}; @Mock private DynamoDbAsyncClient ddbClient; private JobSpecTransformer transformer; private TokenManager tokenManager; private ScheduleManager scheduleManager; private Clock clock; private String validToken; private String tableName; private int maxShardId; private Metrics metrics; private DynamoDBConfiguration ddbConfig; @Before public void setUp() { ddbConfig = new DynamoDBConfiguration(); maxShardId = 64; ObjectMapper mapper = new ObjectMapper(); transformer = new JacksonJobSpecTransformer(mapper); tokenManager = new JacksonTokenManager(mapper); tableName = ddbConfig.getSchedulesTableName(); clock = Clock.fixed(Instant.now(), ZoneId.of("UTC")); validToken = tokenManager.generateToken(2, "test-cluster", clock.millis() + 1000); metrics = spy(new NoOpMetricsImpl()); scheduleManager = new DynamoDBScheduleManager( maxShardId, tokenManager, transformer, clock, metrics, ddbClient, ddbConfig); } // lifted from original DyneinTest private DyneinJobSpec getTestJobSpec(String token, String queueName) { JobSchedulePolicy policy = JobSchedulePolicy.builder() .type(JobScheduleType.SCHEDULED) .epochMillis(Instant.now(clock).plusMillis(1000).toEpochMilli()) .build(); return DyneinJobSpec.builder() .jobToken(token) .name("AddJob") .queueType("PRODUCTION") .queueName(queueName) .createAtInMillis(Instant.now().minusMillis(10).toEpochMilli()) .schedulePolicy(policy) .serializedJob(SERIALIZED_JOB_DATA) .build(); } private String getToken(int id) { return tokenManager.generateToken(id, null, (long) 10); } private Schedule jobSpecToSchedule(DyneinJobSpec jobSpec) throws InvalidTokenException { String date = Long.toString(TimeUtils.getInstant(jobSpec.getSchedulePolicy(), clock).toEpochMilli()); JobTokenPayload token = tokenManager.decodeToken(jobSpec.getJobToken()); int shard = token.getLogicalShard(); String message = transformer.serializeJobSpec(jobSpec); return new Schedule( String.format("%s#%s", date, jobSpec.getJobToken()), Schedule.JobStatus.SCHEDULED, message, Integer.toString(shard % maxShardId)); } public <T> Throwable getException(CompletableFuture<T> future) { Throwable t = null; try { future.get(1000, TimeUnit.MILLISECONDS); } catch (ExecutionException ex) { t = ex.getCause(); } catch (Exception e) { throw new RuntimeException(e); } return t; } @Test public void testScheduleJob() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec); Map<String, AttributeValue> item = DynamoDBUtils.toAttributeMap(schedule); PutItemRequest putItemRequest = PutItemRequest.builder().tableName(tableName).item(item).build(); when(ddbClient.putItem(putItemRequest)).thenReturn(CompletableFuture.completedFuture(null)); CompletableFuture<Void> response = scheduleManager.addJob(schedule); response.get(1000, TimeUnit.MILLISECONDS); verify(ddbClient, times(1)).putItem(putItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testScheduleJob_Failure() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test2"); Schedule schedule = jobSpecToSchedule(jobSpec); Map<String, AttributeValue> item = DynamoDBUtils.toAttributeMap(schedule); CompletableFuture<PutItemResponse> ret = new CompletableFuture<>(); Exception putException = new Exception(); ret.completeExceptionally(putException); PutItemRequest putItemRequest = PutItemRequest.builder().tableName(tableName).item(item).build(); when(ddbClient.putItem(putItemRequest)).thenReturn(ret); CompletableFuture<Void> response = scheduleManager.addJob(schedule); assertSame(getException(response), putException); verify(ddbClient, times(1)).putItem(putItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testDeleteJob() throws Exception { String token = getToken(1); JobTokenPayload tokenPayload = tokenManager.decodeToken(token); Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKeyFromToken(token, tokenPayload, maxShardId); Map<String, String> attributeNames = new HashMap<>(); Map<String, AttributeValue> attributeValues = new HashMap<>(); attributeNames.put("#jobStatus", DynamoDBUtils.Attribute.JOB_STATUS.columnName); attributeValues.put( ":scheduled", AttributeValue.builder().s(Schedule.JobStatus.SCHEDULED.toString()).build()); DeleteItemRequest deleteItemRequest = DeleteItemRequest.builder() .tableName(tableName) .conditionExpression("#jobStatus = :scheduled") .key(primaryKey) .expressionAttributeNames(attributeNames) .expressionAttributeValues(attributeValues) .build(); when(ddbClient.deleteItem(deleteItemRequest)) .thenReturn(CompletableFuture.completedFuture(null)); CompletableFuture<Void> response = scheduleManager.deleteJob(token); response.get(1000, TimeUnit.MILLISECONDS); verify(ddbClient, times(1)).deleteItem(deleteItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testDeleteJob_Failure() throws Exception { String token = getToken(2); JobTokenPayload tokenPayload = tokenManager.decodeToken(token); Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKeyFromToken(token, tokenPayload, maxShardId); Map<String, String> attributeNames = new HashMap<>(); Map<String, AttributeValue> attributeValues = new HashMap<>(); attributeNames.put("#jobStatus", DynamoDBUtils.Attribute.JOB_STATUS.columnName); attributeValues.put( ":scheduled", AttributeValue.builder().s(Schedule.JobStatus.SCHEDULED.toString()).build()); DeleteItemRequest deleteItemRequest = DeleteItemRequest.builder() .tableName(tableName) .conditionExpression("#jobStatus = :scheduled") .key(primaryKey) .expressionAttributeNames(attributeNames) .expressionAttributeValues(attributeValues) .build(); CompletableFuture<DeleteItemResponse> ret = new CompletableFuture<>(); Exception exception = new Exception(); ret.completeExceptionally(exception); when(ddbClient.deleteItem(deleteItemRequest)).thenReturn(ret); CompletableFuture<Void> response = scheduleManager.deleteJob(token); assertSame(getException(response), exception); verify(ddbClient, times(1)).deleteItem(deleteItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testGetJob() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec); JobTokenPayload tokenPayload = tokenManager.decodeToken(validToken); Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKeyFromToken(validToken, tokenPayload, maxShardId); GetItemRequest getItemRequest = GetItemRequest.builder() .key(primaryKey) .tableName(tableName) .attributesToGet(Collections.singletonList(DynamoDBUtils.Attribute.JOB_SPEC.columnName)) .build(); when(ddbClient.getItem(getItemRequest)) .thenReturn( CompletableFuture.completedFuture( GetItemResponse.builder() .item( ImmutableMap.of( DynamoDBUtils.Attribute.JOB_SPEC.columnName, AttributeValue.builder().s(schedule.getJobSpec()).build())) .build())); CompletableFuture<Schedule> response = scheduleManager.getJob(validToken); Assert.assertEquals(response.get(1000, TimeUnit.MILLISECONDS), schedule); verify(ddbClient, times(1)).getItem(getItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testGetJob_Failure() throws Throwable { String token = getToken(4); JobTokenPayload tokenPayload = tokenManager.decodeToken(token); Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKeyFromToken(token, tokenPayload, maxShardId); GetItemRequest getItemRequest = GetItemRequest.builder() .key(primaryKey) .tableName(tableName) .attributesToGet(Collections.singletonList(DynamoDBUtils.Attribute.JOB_SPEC.columnName)) .build(); CompletableFuture<GetItemResponse> ret = new CompletableFuture<>(); Exception exception = new Exception(); ret.completeExceptionally(exception); when(ddbClient.getItem(getItemRequest)).thenReturn(ret); CompletableFuture<Schedule> response = scheduleManager.getJob(token); assertSame(getException(response), exception); verify(ddbClient, times(1)).getItem(getItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testGetOverdueJobs() throws Exception { String partition = "test-partition"; QueryRequest queryRequest = getQueryRequest(partition, JobStatus.SCHEDULED, Instant.now(clock)); DyneinJobSpec jobSpec1 = getTestJobSpec(validToken, "test1"); Schedule schedule1 = jobSpecToSchedule(jobSpec1); DyneinJobSpec jobSpec2 = getTestJobSpec(getToken(4), "test2"); Schedule schedule2 = jobSpecToSchedule(jobSpec2); QueryResponse queryResponse = QueryResponse.builder() .items( asList( DynamoDBUtils.toAttributeMap(schedule1), DynamoDBUtils.toAttributeMap(schedule2))) .count(2) .lastEvaluatedKey(ImmutableMap.of()) .build(); when(ddbClient.query(queryRequest)) .thenReturn(CompletableFuture.completedFuture(queryResponse)); CompletableFuture<SchedulesQueryResponse> response = scheduleManager.getOverdueJobs(partition); Assert.assertEquals( response.get(1, TimeUnit.SECONDS), SchedulesQueryResponse.of(asList(schedule1, schedule2), false)); verify(ddbClient, times(1)).query(queryRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testGetOverdueJobs_queryLimit() throws Exception { String partition = "test-partition"; QueryRequest queryRequest = getQueryRequest(partition, JobStatus.SCHEDULED, Instant.now(clock)); DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec); List<Schedule> queryLimitSchedules = new ArrayList<>(); for (int i = 0; i < ddbConfig.getQueryLimit(); i++) { queryLimitSchedules.add(schedule); } QueryResponse queryResponse = QueryResponse.builder() .items( queryLimitSchedules .stream() .map(DynamoDBUtils::toAttributeMap) .collect(Collectors.toList())) .count(ddbConfig.getQueryLimit()) .lastEvaluatedKey(ImmutableMap.of()) .build(); when(ddbClient.query(queryRequest)) .thenReturn(CompletableFuture.completedFuture(queryResponse)); CompletableFuture<SchedulesQueryResponse> response = scheduleManager.getOverdueJobs(partition); Assert.assertEquals( response.get(1, TimeUnit.SECONDS), SchedulesQueryResponse.of(queryLimitSchedules, true)); verify(ddbClient, times(1)).query(queryRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testGetOverdueJobs_pagination() throws Exception { String partition = "test-partition"; QueryRequest queryRequest = getQueryRequest(partition, JobStatus.SCHEDULED, Instant.now(clock)); DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec); List<Schedule> queryLimitSchedules = new ArrayList<>(); for (int i = 0; i < ddbConfig.getQueryLimit() - 1; i++) { queryLimitSchedules.add(schedule); } QueryResponse queryResponse = QueryResponse.builder() .items( queryLimitSchedules .stream() .map(DynamoDBUtils::toAttributeMap) .collect(Collectors.toList())) .count(ddbConfig.getQueryLimit() - 1) .lastEvaluatedKey( ImmutableMap.of("random", AttributeValue.builder().s("thing").build())) .build(); when(ddbClient.query(queryRequest)) .thenReturn(CompletableFuture.completedFuture(queryResponse)); CompletableFuture<SchedulesQueryResponse> response = scheduleManager.getOverdueJobs(partition); Assert.assertEquals( response.get(1, TimeUnit.SECONDS), SchedulesQueryResponse.of(queryLimitSchedules, true)); verify(ddbClient, times(1)).query(queryRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testGetOverdueJobs_failure() { String partition = "test-partition"; QueryRequest queryRequest = getQueryRequest(partition, JobStatus.SCHEDULED, Instant.now(clock)); Exception exception = new Exception(); CompletableFuture<QueryResponse> fut = new CompletableFuture<>(); fut.completeExceptionally(exception); when(ddbClient.query(queryRequest)).thenReturn(fut); CompletableFuture<SchedulesQueryResponse> response = scheduleManager.getOverdueJobs(partition); assertSame(getException(response), exception); verify(ddbClient, times(1)).query(queryRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testUpdateStatus() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec); UpdateItemRequest updateItemRequest = getUpdateItemReq(schedule, JobStatus.SCHEDULED, JobStatus.ACQUIRED); when(ddbClient.updateItem(updateItemRequest)) .thenReturn( CompletableFuture.completedFuture( UpdateItemResponse.builder() .attributes( ImmutableMap.of( Attribute.JOB_STATUS.columnName, AttributeValue.builder().s(JobStatus.ACQUIRED.name()).build())) .build())); CompletableFuture<Schedule> response = scheduleManager.updateStatus(schedule, JobStatus.SCHEDULED, JobStatus.ACQUIRED); Assert.assertEquals(response.get(1, TimeUnit.SECONDS), schedule.withStatus(JobStatus.ACQUIRED)); verify(ddbClient, times(1)).updateItem(updateItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testUpdateStatus_emptyResponse() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec); UpdateItemRequest updateItemRequest = getUpdateItemReq(schedule, JobStatus.SCHEDULED, JobStatus.ACQUIRED); when(ddbClient.updateItem(updateItemRequest)) .thenReturn(CompletableFuture.completedFuture(UpdateItemResponse.builder().build())); CompletableFuture<Schedule> response = scheduleManager.updateStatus(schedule, JobStatus.SCHEDULED, JobStatus.ACQUIRED); Throwable exception = getException(response); assertTrue(exception instanceof IllegalStateException); assertEquals(exception.getMessage(), "Status update successful but status isn't returned."); verify(ddbClient, times(1)).updateItem(updateItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testUpdateStatus_unknownResponse() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec); UpdateItemRequest updateItemRequest = getUpdateItemReq(schedule, JobStatus.SCHEDULED, JobStatus.ACQUIRED); when(ddbClient.updateItem(updateItemRequest)) .thenReturn( CompletableFuture.completedFuture( UpdateItemResponse.builder() .attributes( ImmutableMap.of( Attribute.JOB_STATUS.columnName, AttributeValue.builder().s("magic").build())) .build())); CompletableFuture<Schedule> response = scheduleManager.updateStatus(schedule, JobStatus.SCHEDULED, JobStatus.ACQUIRED); Throwable exception = getException(response); assertTrue(exception instanceof IllegalArgumentException); assertEquals( exception.getMessage(), "No enum constant com.airbnb.dynein.scheduler.Schedule.JobStatus.magic"); verify(ddbClient, times(1)).updateItem(updateItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testUpdateStatus_failure() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec); UpdateItemRequest updateItemRequest = getUpdateItemReq(schedule, JobStatus.SCHEDULED, JobStatus.ACQUIRED); Exception exception = new Exception(); CompletableFuture<UpdateItemResponse> response = new CompletableFuture<>(); response.completeExceptionally(exception); when(ddbClient.updateItem(updateItemRequest)).thenReturn(response); CompletableFuture<Schedule> ret = scheduleManager.updateStatus(schedule, JobStatus.SCHEDULED, JobStatus.ACQUIRED); assertSame(getException(ret), exception); verify(ddbClient, times(1)).updateItem(updateItemRequest); verifyNoMoreInteractions(ddbClient); } @Test public void testDeleteDispatchedJob() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec).withStatus(JobStatus.ACQUIRED); DeleteItemRequest request = getDeleteItemRequest(schedule); when(ddbClient.deleteItem(request)) .thenReturn(CompletableFuture.completedFuture(DeleteItemResponse.builder().build())); CompletableFuture<Void> ret = scheduleManager.deleteDispatchedJob(schedule); assertNull(ret.get(1, TimeUnit.SECONDS)); verify(ddbClient, times(1)).deleteItem(request); verifyNoMoreInteractions(ddbClient); } @Test public void testDeleteDispatchedJob_failure() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test1"); Schedule schedule = jobSpecToSchedule(jobSpec).withStatus(JobStatus.ACQUIRED); DeleteItemRequest request = getDeleteItemRequest(schedule); Exception exception = new Exception(); CompletableFuture<DeleteItemResponse> response = new CompletableFuture<>(); response.completeExceptionally(exception); when(ddbClient.deleteItem(request)).thenReturn(response); CompletableFuture<Void> ret = scheduleManager.deleteDispatchedJob(schedule); assertSame(getException(ret), exception); verify(ddbClient, times(1)).deleteItem(request); verifyNoMoreInteractions(ddbClient); } private UpdateItemRequest getUpdateItemReq( Schedule schedule, Schedule.JobStatus oldStatus, Schedule.JobStatus newStatus) { Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKey(schedule); Map<String, String> attributeNames = new HashMap<>(); Map<String, AttributeValue> attributeValues = new HashMap<>(); attributeNames.put("#jobStatus", DynamoDBUtils.Attribute.JOB_STATUS.columnName); attributeValues.put(":oldStatus", AttributeValue.builder().s(oldStatus.name()).build()); attributeValues.put(":newStatus", AttributeValue.builder().s(newStatus.name()).build()); String updated = "SET #jobStatus = :newStatus"; return UpdateItemRequest.builder() .tableName(this.tableName) .key(primaryKey) .conditionExpression("#jobStatus = :oldStatus") .expressionAttributeNames(attributeNames) .expressionAttributeValues(attributeValues) .updateExpression(updated) .returnValues(ReturnValue.UPDATED_NEW) .build(); } private DeleteItemRequest getDeleteItemRequest(Schedule schedule) { Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKey(schedule); Map<String, String> attributeNames = new HashMap<>(); Map<String, AttributeValue> attributeValues = new HashMap<>(); attributeNames.put("#jobStatus", DynamoDBUtils.Attribute.JOB_STATUS.columnName); attributeValues.put( ":acquired", AttributeValue.builder().s(Schedule.JobStatus.ACQUIRED.toString()).build()); return DeleteItemRequest.builder() .tableName(tableName) .conditionExpression("#jobStatus = :acquired") .key(primaryKey) .expressionAttributeNames(attributeNames) .expressionAttributeValues(attributeValues) .build(); } private QueryRequest getQueryRequest(String partition, JobStatus jobStatus, Instant instant) { Map<String, AttributeValue> values = new HashMap<>(); Map<String, String> names = new HashMap<>(); String keyCondition = "#shardId = :shardId and #dateToken < :dateToken"; String filter = "#jobStatus = :jobStatus"; String now = Long.toString(instant.toEpochMilli()); values.put(":shardId", AttributeValue.builder().s(partition).build()); values.put(":dateToken", AttributeValue.builder().s(now).build()); values.put(":jobStatus", AttributeValue.builder().s(jobStatus.toString()).build()); names.put("#shardId", DynamoDBUtils.Attribute.SHARD_ID.columnName); names.put("#dateToken", DynamoDBUtils.Attribute.DATE_TOKEN.columnName); names.put("#jobStatus", DynamoDBUtils.Attribute.JOB_STATUS.columnName); return QueryRequest.builder() .tableName(tableName) .keyConditionExpression(keyCondition) .filterExpression(filter) .expressionAttributeValues(values) .expressionAttributeNames(names) .limit(ddbConfig.getQueryLimit()) .build(); } }
5,181
0
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/test/java/com/airbnb/dynein/scheduler/worker/PartitionWorkerTest.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.worker; import static java.util.Arrays.asList; import static org.junit.Assert.*; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.*; import com.airbnb.conveyor.async.AsyncSqsClient; import com.airbnb.dynein.api.*; import com.airbnb.dynein.common.job.JacksonJobSpecTransformer; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.JacksonTokenManager; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.scheduler.NoOpScheduleManager; import com.airbnb.dynein.scheduler.Schedule; import com.airbnb.dynein.scheduler.Schedule.JobStatus; import com.airbnb.dynein.scheduler.ScheduleManager; import com.airbnb.dynein.scheduler.ScheduleManager.SchedulesQueryResponse; import com.airbnb.dynein.scheduler.config.WorkersConfiguration; import com.airbnb.dynein.scheduler.metrics.NoOpMetricsImpl; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.eventbus.EventBus; import java.time.Clock; import java.time.Instant; import java.time.ZoneId; import java.util.List; import java.util.concurrent.*; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class PartitionWorkerTest { private static final byte[] SERIALIZED_JOB_DATA = {0, 0, 0, 0}; @Mock private AsyncSqsClient asyncClient; private TokenManager tokenManager; private Clock clock; private JobSpecTransformer transformer; private PartitionWorker worker; private String validToken; @Mock private ScheduleManager scheduleManager; @Before public void setUp() { clock = Clock.fixed(Instant.now(), ZoneId.of("UTC")); tokenManager = new JacksonTokenManager(new ObjectMapper()); validToken = tokenManager.generateToken(2, "test-cluster", clock.millis() + 1000); ObjectMapper mapper = new ObjectMapper(); transformer = new JacksonJobSpecTransformer(mapper); tokenManager = new JacksonTokenManager(mapper); scheduleManager = spy(new NoOpScheduleManager(64, tokenManager, transformer, clock, new NoOpMetricsImpl())); worker = new PartitionWorker( 1, asyncClient, new EventBus(), clock, scheduleManager, transformer, new WorkersConfiguration(1, 10, 1000), new NoOpMetricsImpl()); worker.startExecutor(); when(scheduleManager.updateStatus( any(Schedule.class), eq(JobStatus.SCHEDULED), eq(JobStatus.ACQUIRED))) .thenAnswer( invocation -> CompletableFuture.completedFuture( ((Schedule) invocation.getArguments()[0]).withStatus(JobStatus.ACQUIRED))); } // lifted from original DyneinTest private DyneinJobSpec getTestJobSpec(String token, String queueName) { JobSchedulePolicy policy; policy = JobSchedulePolicy.builder() .type(JobScheduleType.SCHEDULED) .epochMillis(Instant.now().plusMillis(1000).toEpochMilli()) .build(); return DyneinJobSpec.builder() .jobToken(token) .name("AddJob") .queueType("PRODUCTION") .queueName(queueName) .createAtInMillis(Instant.now().minusMillis(10).toEpochMilli()) .schedulePolicy(policy) .serializedJob(SERIALIZED_JOB_DATA) .build(); } private Schedule getSchedule(DyneinJobSpec jobSpec, boolean failure) throws Exception { JobTokenPayload payload = tokenManager.decodeToken(jobSpec.getJobToken()); int shard = !failure ? payload.getLogicalShard() : 1; String message = transformer.serializeJobSpec(jobSpec); return new Schedule( payload .getEpochMillisOptional() .orElseThrow( () -> new IllegalStateException("no tokens without epochMillis allowed")) + "-" + jobSpec.getJobToken(), Schedule.JobStatus.SCHEDULED, message, Integer.toString(shard)); } @Test public void testDispatchJob() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test3"); Schedule schedule = getSchedule(jobSpec, false); when(asyncClient.add(schedule.getJobSpec(), "test3")) .thenReturn(CompletableFuture.completedFuture(null)); CompletableFuture<Void> ret = worker.dispatchToDestination(schedule); ret.get(1000, TimeUnit.MILLISECONDS); verify(asyncClient, times(1)).add(schedule.getJobSpec(), "test3"); verify(scheduleManager, times(1)).deleteDispatchedJob(eq(schedule)); verifyNoMoreInteractions(asyncClient, scheduleManager); } @Test public void testDispatch_EnqueueFail() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test4"); Schedule schedule = getSchedule(jobSpec, false); Exception addException = new Exception(); CompletableFuture<Void> error = new CompletableFuture<>(); error.completeExceptionally(addException); when(asyncClient.add(schedule.getJobSpec(), "test4")).thenReturn(error); CompletableFuture<Void> ret = worker.dispatchToDestination(schedule); try { ret.get(1000, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { assertSame(e.getCause(), addException); } verify(asyncClient, times(1)).add(schedule.getJobSpec(), "test4"); verify(scheduleManager, times(1)) .updateStatus(schedule, Schedule.JobStatus.ACQUIRED, Schedule.JobStatus.SCHEDULED); verifyNoMoreInteractions(scheduleManager, asyncClient); } public void testDispatch_ResetToScheduledFail() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test5"); Schedule schedule = getSchedule(jobSpec, false); CompletableFuture<Schedule> updateError = new CompletableFuture<>(); updateError.completeExceptionally(new Exception()); CompletableFuture<Void> queueError = new CompletableFuture<>(); queueError.completeExceptionally(new Exception()); when(asyncClient.add(schedule.getJobSpec(), "test5")).thenReturn(queueError); when(scheduleManager.updateStatus( schedule, Schedule.JobStatus.ACQUIRED, Schedule.JobStatus.SCHEDULED)) .thenReturn(updateError); CompletableFuture<Void> ret = worker.dispatchToDestination(schedule); Throwable caught = null; try { ret.get(1000, TimeUnit.MILLISECONDS); } catch (ExecutionException e) { caught = e.getCause(); } assertSame(caught, updateError); verify(asyncClient, times(1)).add(schedule.getJobSpec(), "test5"); verify(scheduleManager, times(1)) .updateStatus(schedule, Schedule.JobStatus.ACQUIRED, Schedule.JobStatus.SCHEDULED); verifyNoMoreInteractions(scheduleManager, asyncClient); } @Test public void testDispatch_DeleteFromTableFail() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test6"); Schedule schedule = getSchedule(jobSpec, false); CompletableFuture<Void> response = new CompletableFuture<>(); Exception exception = new Exception(); response.completeExceptionally(exception); when(asyncClient.add(schedule.getJobSpec(), "test6")) .thenReturn(CompletableFuture.completedFuture(null)); when(scheduleManager.deleteDispatchedJob(schedule)).thenReturn(response); CompletableFuture<Void> ret = worker.dispatchToDestination(schedule); ret.get(1000, TimeUnit.MILLISECONDS); verify(asyncClient, times(1)).add(schedule.getJobSpec(), "test6"); verify(scheduleManager, times(1)).deleteDispatchedJob(schedule); verifyNoMoreInteractions(scheduleManager, asyncClient); } @Test public void testDispatchOverdue() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test7"); Schedule schedule = getSchedule(jobSpec, false); Schedule scheduleAcquired = schedule.withStatus(JobStatus.ACQUIRED); List<Schedule> items = asList(schedule, schedule); String partition = "1"; SchedulesQueryResponse response = SchedulesQueryResponse.of(items, false); when(scheduleManager.getOverdueJobs(partition)) .thenReturn(CompletableFuture.completedFuture(response)); when(asyncClient.add(schedule.getJobSpec(), "test7")) .thenReturn(CompletableFuture.completedFuture(null)); when(scheduleManager.deleteDispatchedJob(scheduleAcquired)) .thenReturn(CompletableFuture.completedFuture(null)); CompletableFuture<Boolean> ret = worker.dispatchOverdue(partition); Assert.assertEquals( ret.get(1000, TimeUnit.MILLISECONDS), response.isShouldImmediatelyQueryAgain()); verify(scheduleManager, times(1)).getOverdueJobs(partition); verify(scheduleManager, times(2)) .updateStatus(schedule, Schedule.JobStatus.SCHEDULED, Schedule.JobStatus.ACQUIRED); verify(asyncClient, times(2)).add(schedule.getJobSpec(), "test7"); verify(scheduleManager, times(2)).deleteDispatchedJob(scheduleAcquired); verifyNoMoreInteractions(scheduleManager, asyncClient); } @Test public void testDispatchOverdue_scanForOverdueFailure() throws Exception { DyneinJobSpec jobSpec = getTestJobSpec(validToken, "test8"); Schedule schedule = getSchedule(jobSpec, false); String partition = "failure1"; CompletableFuture<SchedulesQueryResponse> response = new CompletableFuture<>(); response.completeExceptionally(new Exception()); when(scheduleManager.getOverdueJobs(partition)).thenReturn(response); CompletableFuture<Boolean> ret = worker.dispatchOverdue(partition); ret.get(1000, TimeUnit.MILLISECONDS); verify(scheduleManager, times(1)).getOverdueJobs(partition); verifyNoMoreInteractions(scheduleManager, asyncClient); } @Test public void testDispatchOverdue_SetAcquiredPartialFailure() throws Exception { DyneinJobSpec jobSpec_1 = getTestJobSpec(validToken, "test9"); Schedule schedule_1 = getSchedule(jobSpec_1, false); DyneinJobSpec jobSpec_2 = getTestJobSpec(validToken, "test9"); Schedule schedule_2 = getSchedule(jobSpec_2, true); String partition = "failure2"; CompletableFuture<Schedule> updateResponse = new CompletableFuture<>(); updateResponse.completeExceptionally(new Exception()); when(scheduleManager.getOverdueJobs(partition)) .thenReturn( CompletableFuture.completedFuture( SchedulesQueryResponse.of(asList(schedule_1, schedule_2), false))); when(scheduleManager.updateStatus(schedule_2, JobStatus.SCHEDULED, JobStatus.ACQUIRED)) .thenReturn(updateResponse); when(asyncClient.add(schedule_1.getJobSpec(), "test9")) .thenReturn(CompletableFuture.completedFuture(null)); CompletableFuture<Boolean> ret = worker.dispatchOverdue("failure2"); ret.get(1000, TimeUnit.MILLISECONDS); verify(scheduleManager, times(1)).getOverdueJobs(partition); verify(scheduleManager, times(1)) .updateStatus(schedule_1, JobStatus.SCHEDULED, JobStatus.ACQUIRED); verify(scheduleManager, times(1)) .updateStatus(schedule_2, JobStatus.SCHEDULED, JobStatus.ACQUIRED); verify(asyncClient, times(1)).add(schedule_1.getJobSpec(), "test9"); verify(scheduleManager, times(1)) .deleteDispatchedJob(schedule_1.withStatus(JobStatus.ACQUIRED)); verifyNoMoreInteractions(scheduleManager, asyncClient); } // test dispatched overdue w/ dispatch fail @Test public void testDispatchOverdue_dispatchPartialFailure() throws Exception { String partition = "failure3"; DyneinJobSpec jobSpec_1 = getTestJobSpec(validToken, "test10"); Schedule schedule_1 = getSchedule(jobSpec_1, false); DyneinJobSpec jobSpec_2 = getTestJobSpec(validToken, "test10.1"); Schedule schedule_2 = getSchedule(jobSpec_2, true); CompletableFuture<Void> dispatchResponse = new CompletableFuture<>(); dispatchResponse.completeExceptionally(new Exception()); when(scheduleManager.getOverdueJobs(partition)) .thenReturn( CompletableFuture.completedFuture( SchedulesQueryResponse.of(asList(schedule_1, schedule_2), false))); when(asyncClient.add(schedule_1.getJobSpec(), "test10")) .thenReturn(CompletableFuture.completedFuture(null)); when(asyncClient.add(schedule_2.getJobSpec(), "test10.1")).thenReturn(dispatchResponse); when(scheduleManager.updateStatus( schedule_2.withStatus(JobStatus.ACQUIRED), Schedule.JobStatus.ACQUIRED, Schedule.JobStatus.SCHEDULED)) .thenReturn(CompletableFuture.completedFuture(schedule_2)); CompletableFuture<Boolean> ret = worker.dispatchOverdue(partition); ret.get(1000, TimeUnit.MILLISECONDS); verify(scheduleManager, times(1)).getOverdueJobs(partition); verify(scheduleManager, times(1)) .updateStatus(schedule_1, JobStatus.SCHEDULED, JobStatus.ACQUIRED); verify(scheduleManager, times(1)) .updateStatus(schedule_2, JobStatus.SCHEDULED, JobStatus.ACQUIRED); verify(asyncClient, times(1)).add(schedule_1.getJobSpec(), "test10"); verify(asyncClient, times(1)).add(schedule_2.getJobSpec(), "test10.1"); verify(scheduleManager, times(1)) .deleteDispatchedJob(schedule_1.withStatus(JobStatus.ACQUIRED)); verify(scheduleManager, times(1)) .updateStatus( schedule_2.withStatus(JobStatus.ACQUIRED), JobStatus.ACQUIRED, JobStatus.SCHEDULED); verifyNoMoreInteractions(scheduleManager, asyncClient); } }
5,182
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/ManagedInboundJobQueueConsumer.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; import com.airbnb.conveyor.async.AsyncConsumer; import com.airbnb.conveyor.async.AsyncSqsClient; import com.airbnb.dynein.api.InvalidTokenException; import com.airbnb.dynein.common.utils.ExecutorUtils; import io.dropwizard.lifecycle.Managed; import java.util.concurrent.ExecutorService; import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; @Slf4j @AllArgsConstructor public class ManagedInboundJobQueueConsumer implements Managed { private static final long EXECUTOR_TIMEOUT_SECONDS = 15; private final AsyncSqsClient asyncClient; private final ExecutorService executorService; private final ScheduleManager scheduleManager; private final String inboundQueueName; @Override public void start() { log.info("Starting to consume from inbound job queue {}", inboundQueueName); executorService.execute(this::run); log.info("Started consuming from inbound job queue {}", inboundQueueName); } @Override public void stop() { log.info("Stopping consumption from inbound job queue {}", inboundQueueName); ExecutorUtils.twoPhaseExecutorShutdown(executorService, EXECUTOR_TIMEOUT_SECONDS); log.info("Stopped consumption from inbound job queue {}", inboundQueueName); } private void run() { while (!executorService.isShutdown()) { doRun(); } } private void doRun() { AsyncConsumer<String> consumer = (message, executor) -> { try { return scheduleManager.addJob(scheduleManager.makeSchedule(message)); } catch (InvalidTokenException e) { throw new RuntimeException(e); } }; asyncClient .consume(consumer, inboundQueueName) .whenComplete( (it, ex) -> { if (ex != null) { log.error("Error consuming from inbound queue {}", inboundQueueName, ex); } else if (!it) { log.debug("Inbound job queue {} is empty", inboundQueueName); } else { log.info("Successfully consumed job from inbound queue"); } }); } }
5,183
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/Schedule.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; import lombok.AllArgsConstructor; import lombok.Value; import lombok.experimental.Wither; import lombok.extern.slf4j.Slf4j; @Slf4j @Value @AllArgsConstructor @Wither public class Schedule { private final String dateToken; private final JobStatus status; private final String jobSpec; private final String shardId; public enum JobStatus { SCHEDULED, ACQUIRED } }
5,184
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/Scheduler.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; import com.airbnb.conveyor.async.AsyncSqsClient; import com.airbnb.dynein.api.DyneinJobSpec; import com.airbnb.dynein.api.JobSchedulePolicy; import com.airbnb.dynein.api.PrepareJobRequest; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.common.utils.TimeUtils; import com.airbnb.dynein.scheduler.metrics.Metrics; import java.time.Clock; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import javax.inject.Inject; import javax.inject.Named; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; @Slf4j @RequiredArgsConstructor(onConstructor = @__(@Inject)) public class Scheduler { @NonNull @Named(Constants.SQS_PRODUCER) private final AsyncSqsClient asyncClient; @NonNull @Named(Constants.INBOUND_QUEUE_NAME) private final String inboundQueueName; @NonNull private final JobSpecTransformer jobSpecTransformer; @NonNull private final TokenManager tokenManager; @NonNull private final ScheduleManager scheduleManager; @NonNull private final Clock clock; @NonNull private final Metrics metrics; public String prepareJob(@NonNull PrepareJobRequest request) { return tokenManager.generateToken( request.getAssociatedId(), null, request .getSchedulePolicyOptional() .flatMap(JobSchedulePolicy::getEpochMillisOptional) .orElse(null)); } public CompletableFuture<Void> createJob(@NonNull DyneinJobSpec jobSpec) { CompletableFuture<Void> ret = new CompletableFuture<>(); log.info("Create new job {}", jobSpec); String serializedJobSpec = jobSpecTransformer.serializeJobSpec(jobSpec); switch (jobSpec.getSchedulePolicy().getType()) { case IMMEDIATE: { ret = asyncClient .add(serializedJobSpec, jobSpec.getQueueName()) .whenComplete( (it, ex) -> { if (ex == null) { log.info( "DyneinJob {} triggered, enqueue to queue {}, type: IMMEDIATE", jobSpec.getJobToken(), jobSpec.getQueueName()); metrics.dispatchJob(jobSpec.getQueueName()); } else { log.error( "Failed to trigger DyneinJob {} to queue {}, type: IMMEDIATE", jobSpec.getJobToken(), jobSpec.getQueueName(), ex); } }); break; } case SCHEDULED: { ret = asyncClient .add(serializedJobSpec, inboundQueueName) .whenComplete( (it, ex) -> { if (ex == null) { log.info( "Added job spec {} to inbound job queue {}, with token {}", jobSpec.getName(), inboundQueueName, jobSpec.getJobToken()); metrics.enqueueToInboundQueue(jobSpec); } else { log.error( "Exception when adding job spec {} to inbound job queue {}, with token {}", jobSpec.getName(), inboundQueueName, jobSpec.getJobToken(), ex); } }); break; } case SQS_DELAYED: { long delayMs = TimeUtils.getDelayMillis(jobSpec.getSchedulePolicy(), clock); if (delayMs > TimeUnit.MINUTES.toMillis(15)) { ret.completeExceptionally( new IllegalArgumentException( "Delay " + delayMs + "ms is longer than 15 minutes, cannot be scheduled with SQS_DELAYED")); } else { ret = asyncClient .add( serializedJobSpec, jobSpec.getQueueName(), (int) TimeUnit.MILLISECONDS.toSeconds(delayMs)) .whenComplete( (it, ex) -> { if (ex == null) { log.info( "DyneinJob {} triggered, enqueue to queue {}, type: SQS_DELAYED", jobSpec.getJobToken(), jobSpec.getQueueName()); metrics.dispatchJob(jobSpec.getQueueName()); } else { log.error( "Failed to trigger DyneinJob {} to queue {}, type: SQS_DELAYED", jobSpec.getJobToken(), jobSpec.getQueueName(), ex); } }); } break; } default: ret.completeExceptionally( new UnsupportedOperationException("Unsupported Job Schedule Policy Type")); } return ret; } public CompletableFuture<Void> deleteJob(String token) { return scheduleManager.deleteJob(token); } public CompletableFuture<DyneinJobSpec> getJob(String token) { return scheduleManager .getJob(token) .thenApply(schedule -> jobSpecTransformer.deserializeJobSpec(schedule.getJobSpec())); } }
5,185
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/ScheduleManagerFactory.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.scheduler.metrics.Metrics; import java.time.Clock; import javax.inject.Provider; import lombok.AccessLevel; import lombok.AllArgsConstructor; @AllArgsConstructor(access = AccessLevel.PROTECTED) public abstract class ScheduleManagerFactory implements Provider<ScheduleManager> { protected final int maxShardId; protected final TokenManager tokenManager; protected final JobSpecTransformer jobSpecTransformer; protected final Clock clock; protected final Metrics metrics; @Override public abstract ScheduleManager get(); }
5,186
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/ScheduleManager.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; import com.airbnb.dynein.api.DyneinJobSpec; import com.airbnb.dynein.api.InvalidTokenException; import com.airbnb.dynein.api.JobTokenPayload; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.common.utils.TimeUtils; import com.airbnb.dynein.scheduler.metrics.Metrics; import java.io.Closeable; import java.time.Clock; import java.time.Instant; import java.util.List; import java.util.concurrent.CompletableFuture; import lombok.AccessLevel; import lombok.AllArgsConstructor; import lombok.Value; @AllArgsConstructor(access = AccessLevel.PROTECTED) public abstract class ScheduleManager implements Closeable { @Value @AllArgsConstructor(staticName = "of") public static class SchedulesQueryResponse { List<Schedule> schedules; boolean shouldImmediatelyQueryAgain; } protected final int maxShardId; protected final TokenManager tokenManager; protected final JobSpecTransformer jobSpecTransformer; protected final Clock clock; protected final Metrics metrics; public abstract CompletableFuture<Void> addJob(Schedule schedule); public abstract CompletableFuture<Schedule> getJob(String token); public abstract CompletableFuture<Void> deleteJob(String token); public abstract CompletableFuture<SchedulesQueryResponse> getOverdueJobs( String partition, Instant instant); public abstract CompletableFuture<Void> recoverStuckJobs(String partition, Instant instant); public final CompletableFuture<SchedulesQueryResponse> getOverdueJobs(String partition) { return getOverdueJobs(partition, Instant.now(clock)); } public abstract CompletableFuture<Schedule> updateStatus( Schedule schedule, Schedule.JobStatus oldStatus, Schedule.JobStatus newStatus); public abstract CompletableFuture<Void> deleteDispatchedJob(Schedule schedule); protected Schedule makeSchedule(String serializedJobSpec) throws InvalidTokenException { DyneinJobSpec jobSpec = jobSpecTransformer.deserializeJobSpec(serializedJobSpec); String date = Long.toString(TimeUtils.getInstant(jobSpec.getSchedulePolicy(), clock).toEpochMilli()); JobTokenPayload token = tokenManager.decodeToken(jobSpec.getJobToken()); int shard = token.getLogicalShard(); return new Schedule( String.format("%s#%s", date, jobSpec.getJobToken()), Schedule.JobStatus.SCHEDULED, serializedJobSpec, Integer.toString(Math.floorMod(shard, maxShardId))); } }
5,187
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/Constants.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler; public class Constants { public static final String MAX_PARTITIONS = "maxPartitions"; public static final String SQS_CONSUMER = "sqsConsumer"; public static final String SQS_PRODUCER = "sqsProducer"; public static final String INBOUND_QUEUE_NAME = "inboundQueueName"; }
5,188
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/metrics/NoOpMetricsImpl.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.metrics; public class NoOpMetricsImpl implements Metrics {}
5,189
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/metrics/Metrics.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.metrics; import com.airbnb.dynein.api.DyneinJobSpec; public interface Metrics { default void storeJob(final long elapsedTime, DyneinJobSpec jobSpec, String partition) {} default void storeJobError(Throwable throwable, String queueName) {} default void queryOverdue(String partition) {} default void queryOverdueError(Throwable throwable, String partition) {} default void updateJobStatus(String oldStatus, String newStatus) {} default void updateJobStatusError(Throwable throwable, String oldStatus, String newStatus) {} default void deleteDispatchedJob(String queueName) {} default void deleteDispatchedJobError(Throwable throwable, String queueName) {} default void dispatchJob(String queueName) {} default void dispatchScheduledJob(long timeNs, DyneinJobSpec jobSpec, String partition) {} default void dispatchJobError(Throwable t, String queueName) {} default void enqueueToInboundQueue(DyneinJobSpec jobSpec) {} default void dispatchOverdue(long timeNs, String partition) {} default void updatePartitions(long timeNs) {} default void updatePartitionsError(Throwable t) {} default void countPartitionWorkers(int count) {} default void restartWorker(int workerId) {} default void recoverStuckJob(String partition, int successCount, int totalCount) {} default void recoverStuckJobsError(String partition) {} }
5,190
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/config/SQSConfiguration.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.config; import java.net.URI; import lombok.Value; @Value public class SQSConfiguration { private static final String DEFAULT_REGION = "us-east-1"; private String region = DEFAULT_REGION; private URI endpoint; private TimeoutRegistry timeouts = new TimeoutRegistry(); private String inboundQueueName; public URI getEndpoint() { return endpoint == null ? URI.create("https://sqs." + region + ".amazonaws.com") : endpoint; } }
5,191
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/config/WorkersConfiguration.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.config; import java.util.List; import lombok.Value; @Value public class WorkersConfiguration { public enum PartitionPolicyChoice { K8S, STATIC; } private int numberOfWorkers; private int threadPoolSize; private long recoverStuckJobsLookAheadMs; private boolean recoverStuckJobsAtWorkerRotation = true; private PartitionPolicyChoice partitionPolicy = PartitionPolicyChoice.K8S; private List<Integer> staticPartitionList = null; }
5,192
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/config/DynamoDBConfiguration.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.config; import java.net.URI; import java.util.Optional; import lombok.Data; import lombok.NoArgsConstructor; @Data @NoArgsConstructor public class DynamoDBConfiguration { public static final String DEFAULT_REGION = "us-east-1"; public static final String DEFAULT_TABLE_NAME = "job_schedules"; private String region = DEFAULT_REGION; private URI endpoint = null; private String schedulesTableName = DEFAULT_TABLE_NAME; private int queryLimit = 10; public URI getEndpoint() { return Optional.ofNullable(endpoint) .orElseGet(() -> URI.create("https://dynamodb." + region + ".amazonaws.com")); } }
5,193
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/config/TimeoutRegistry.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.config; import lombok.Data; import lombok.RequiredArgsConstructor; @Data @RequiredArgsConstructor public class TimeoutRegistry { // Execution timeout: total time allocated to the SQS client, include all of the retry attempts private int apiCallTimeout = 25 * 1000; // Request timeout: time allocated for each individual attempt private int apiCallAttemptTimeout = 2000; // Retry policy: scale factor for exponential backoff policy private int baseDelay = 100; // Retry policy: maximum backoff time for exponential backoff policy private int maxBackoffTime = 10 * 1000; }
5,194
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/config/SchedulerConfiguration.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.config; import com.airbnb.dynein.scheduler.heartbeat.HeartbeatConfiguration; import javax.validation.constraints.NotNull; import lombok.Value; @Value public class SchedulerConfiguration { @NotNull private WorkersConfiguration workers; @NotNull private HeartbeatConfiguration heartbeat; @NotNull private SQSConfiguration sqs; @NotNull private DynamoDBConfiguration dynamoDb; private int maxPartitions; private boolean consumeInboundJobs; }
5,195
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/dynamodb/DynamoDBUtils.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.dynamodb; import static java.util.Arrays.asList; import com.airbnb.dynein.api.JobTokenPayload; import com.airbnb.dynein.scheduler.Schedule; import com.airbnb.dynein.scheduler.Schedule.JobStatus; import com.google.common.collect.ImmutableMap; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.stream.Collectors; import lombok.AllArgsConstructor; import lombok.Getter; import lombok.experimental.UtilityClass; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; @UtilityClass class DynamoDBUtils { @AllArgsConstructor @Getter public enum Attribute { SHARD_ID("shard_id", "#shardId"), DATE_TOKEN("date_token", "#dateToken"), JOB_STATUS("job_status", "#jobStatus"), JOB_SPEC("job_spec", "#jobSpec"); public final String columnName; public final String attributeName; } @AllArgsConstructor public enum Value { SHARD_ID(":shardId"), JOB_STATUS(":jobStatus"), DATE_TOKEN(":dateToken"), OLD_STATUS(":oldStatus"), NEW_STATUS(":newStatus"), SCHEDULED_STATUS(":scheduled"), ACQUIRED_STATUS(":acquired"); public final String name; } @AllArgsConstructor(staticName = "of") public class Condition { private Attribute attribute; private String operator; private Value value; public String toString() { return String.format("%s %s %s", attribute.attributeName, operator, value.name); } } Map<String, AttributeValue> attributeValuesMap(Map<Value, String> input) { return input .entrySet() .stream() .collect( Collectors.toMap( (Entry<Value, String> e) -> e.getKey().name, (Entry<Value, String> e) -> AttributeValue.builder().s(e.getValue()).build())); } @Getter(lazy = true) private final Map<String, String> jobStatusAttributeMap = initJobStatusAttributeMap(); @Getter(lazy = true) private final Map<String, String> getOverdueJobsAttributeMap = initGetOverdueJobsAttributeNameMap(); private Map<String, String> initAttributeNameMap(List<Attribute> attributes) { return ImmutableMap.copyOf( attributes .stream() .collect(Collectors.toMap(Attribute::getAttributeName, Attribute::getColumnName))); } private Map<String, String> initJobStatusAttributeMap() { return initAttributeNameMap(Collections.singletonList(Attribute.JOB_STATUS)); } private Map<String, String> initGetOverdueJobsAttributeNameMap() { return initAttributeNameMap( asList(Attribute.SHARD_ID, Attribute.JOB_STATUS, Attribute.DATE_TOKEN)); } Map<String, AttributeValue> getPrimaryKeyFromToken( String token, JobTokenPayload payload, int maxShardId) { Map<String, AttributeValue> primaryKey = new HashMap<>(); String date = String.valueOf(payload.getEpochMillis()); int shard = payload.getLogicalShard(); primaryKey.put( Attribute.SHARD_ID.columnName, AttributeValue.builder().s(Integer.toString(shard % maxShardId)).build()); primaryKey.put( Attribute.DATE_TOKEN.columnName, AttributeValue.builder().s(String.format("%s#%s", date, token)).build()); return primaryKey; } Schedule decodeSchedule(Map<String, AttributeValue> item) { return new Schedule( item.get(Attribute.DATE_TOKEN.columnName).s(), JobStatus.valueOf(item.get(Attribute.JOB_STATUS.columnName).s()), item.get(Attribute.JOB_SPEC.columnName).s(), item.get(Attribute.SHARD_ID.columnName).s()); } Map<String, AttributeValue> getPrimaryKey(Schedule schedule) { Map<String, AttributeValue> item = new HashMap<>(); item.put( Attribute.SHARD_ID.columnName, AttributeValue.builder().s(schedule.getShardId()).build()); item.put( Attribute.DATE_TOKEN.columnName, AttributeValue.builder().s(schedule.getDateToken()).build()); return item; } Map<String, AttributeValue> toAttributeMap(Schedule schedule) { Map<String, AttributeValue> item = new HashMap<>(); item.put( Attribute.SHARD_ID.columnName, AttributeValue.builder().s(schedule.getShardId()).build()); item.put( Attribute.DATE_TOKEN.columnName, AttributeValue.builder().s(schedule.getDateToken()).build()); item.put( Attribute.JOB_STATUS.columnName, AttributeValue.builder().s(schedule.getStatus().toString()).build()); item.put( Attribute.JOB_SPEC.columnName, AttributeValue.builder().s(schedule.getJobSpec()).build()); return item; } }
5,196
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/dynamodb/DynamoDBScheduleManagerFactory.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.dynamodb; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.scheduler.Constants; import com.airbnb.dynein.scheduler.ScheduleManagerFactory; import com.airbnb.dynein.scheduler.config.DynamoDBConfiguration; import com.airbnb.dynein.scheduler.metrics.Metrics; import java.time.Clock; import javax.inject.Inject; import javax.inject.Named; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; public class DynamoDBScheduleManagerFactory extends ScheduleManagerFactory { private final DynamoDBConfiguration dynamoDBConfiguration; @Inject public DynamoDBScheduleManagerFactory( @Named(Constants.MAX_PARTITIONS) Integer maxShardId, TokenManager tokenManager, JobSpecTransformer jobSpecTransformer, Clock clock, Metrics metrics, DynamoDBConfiguration dynamoDBConfiguration) { super(maxShardId, tokenManager, jobSpecTransformer, clock, metrics); this.dynamoDBConfiguration = dynamoDBConfiguration; } private DynamoDbAsyncClient getDdbAsyncClient() { return DynamoDbAsyncClient.builder() .region(Region.of(dynamoDBConfiguration.getRegion())) .endpointOverride(dynamoDBConfiguration.getEndpoint()) .build(); } @Override public DynamoDBScheduleManager get() { return new DynamoDBScheduleManager( maxShardId, tokenManager, jobSpecTransformer, clock, metrics, getDdbAsyncClient(), dynamoDBConfiguration); } }
5,197
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/dynamodb/DynamoDBScheduleManager.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.dynamodb; import com.airbnb.dynein.api.DyneinJobSpec; import com.airbnb.dynein.api.InvalidTokenException; import com.airbnb.dynein.api.JobTokenPayload; import com.airbnb.dynein.common.job.JobSpecTransformer; import com.airbnb.dynein.common.token.TokenManager; import com.airbnb.dynein.scheduler.Schedule; import com.airbnb.dynein.scheduler.Schedule.JobStatus; import com.airbnb.dynein.scheduler.ScheduleManager; import com.airbnb.dynein.scheduler.config.DynamoDBConfiguration; import com.airbnb.dynein.scheduler.dynamodb.DynamoDBUtils.Attribute; import com.airbnb.dynein.scheduler.dynamodb.DynamoDBUtils.Condition; import com.airbnb.dynein.scheduler.dynamodb.DynamoDBUtils.Value; import com.airbnb.dynein.scheduler.metrics.Metrics; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableMap; import io.reactivex.Flowable; import java.time.Clock; import java.time.Instant; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; import software.amazon.awssdk.services.dynamodb.model.QueryRequest; import software.amazon.awssdk.services.dynamodb.model.ReturnValue; import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; @Slf4j public class DynamoDBScheduleManager extends ScheduleManager { private final DynamoDbAsyncClient ddbClient; private final DynamoDBConfiguration ddbConfig; DynamoDBScheduleManager( int maxShardId, TokenManager tokenManager, JobSpecTransformer jobSpecTransformer, Clock clock, Metrics metrics, DynamoDbAsyncClient ddbClient, DynamoDBConfiguration ddbConfig) { super(maxShardId, tokenManager, jobSpecTransformer, clock, metrics); this.ddbClient = ddbClient; this.ddbConfig = ddbConfig; } @Override public CompletableFuture<Void> addJob(Schedule schedule) { CompletableFuture<Void> ret = new CompletableFuture<>(); Stopwatch stopwatch = Stopwatch.createStarted(); DyneinJobSpec jobSpec = jobSpecTransformer.deserializeJobSpec(schedule.getJobSpec()); try { Map<String, AttributeValue> item = DynamoDBUtils.toAttributeMap(schedule); PutItemRequest putItemRequest = PutItemRequest.builder().tableName(ddbConfig.getSchedulesTableName()).item(item).build(); ddbClient .putItem(putItemRequest) .whenComplete( (it, ex) -> { if (ex != null) { stopwatch.stop(); log.error("Error scheduling job {}", jobSpec.getJobToken(), ex); metrics.storeJobError(ex, jobSpec.getQueueName()); ret.completeExceptionally(ex); } else { stopwatch.stop(); log.info("Scheduled job {}", jobSpec.getJobToken()); long time = stopwatch.elapsed(TimeUnit.NANOSECONDS); metrics.storeJob(time, jobSpec, schedule.getShardId()); ret.complete(null); } }); } catch (Exception ex) { log.error("Error scheduling job {}", jobSpec.getJobToken(), ex); metrics.storeJobError(ex, jobSpec.getQueueName()); ret.completeExceptionally(ex); } return ret; } private <T> CompletableFuture<Void> asyncForeach( Flowable<T> f, Function<T, CompletableFuture<Void>> function) { CompletableFuture<List<T>> fut = new CompletableFuture<>(); f.toList().doOnError(fut::completeExceptionally).subscribe(fut::complete); return fut.thenCompose( items -> CompletableFuture.allOf( items.stream().map(function).toArray(CompletableFuture[]::new))); } @Override public CompletableFuture<Void> recoverStuckJobs(String partition, Instant instant) { AtomicInteger totalCount = new AtomicInteger(0); AtomicInteger successCount = new AtomicInteger(0); log.info("Starting recoverStuckJobs for partition: {}, with lookAhead: {}", partition, instant); return asyncForeach( Flowable.fromPublisher( ddbClient.queryPaginator( makeQueryRequestForOverdueJobs(partition, instant, JobStatus.ACQUIRED))) .flatMap(queryResponse -> Flowable.fromIterable(queryResponse.items())) .map(DynamoDBUtils::decodeSchedule), schedule -> { log.info( "Recovering job {} in partition {} from ACQUIRED to SCHEDULED", schedule.getDateToken(), schedule.getShardId()); return updateStatus(schedule, JobStatus.ACQUIRED, JobStatus.SCHEDULED) .handle( (result, ex) -> { totalCount.incrementAndGet(); if (ex == null && result.getStatus().equals(JobStatus.SCHEDULED)) { successCount.incrementAndGet(); log.info( "Successfully recovered job {} in partition {} from ACQUIRED to SCHEDULED", schedule.getDateToken(), schedule.getShardId()); } else { log.warn( "Failed to recover job {} in partition {} from ACQUIRED to SCHEDULED", schedule.getDateToken(), schedule.getShardId()); } return null; }); }) .whenComplete( (result, ex) -> { log.info( "Recovered {} of {} jobs from ACQUIRED state back to SCHEDULED state in partition {}.", successCount.get(), totalCount.get(), partition); metrics.recoverStuckJob(partition, successCount.get(), totalCount.get()); }); } @Override public CompletableFuture<Schedule> getJob(String token) { try { JobTokenPayload tokenPayload = tokenManager.decodeToken(token); Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKeyFromToken(token, tokenPayload, maxShardId); GetItemRequest getItemRequest = GetItemRequest.builder() .key(primaryKey) .tableName(ddbConfig.getSchedulesTableName()) .attributesToGet(Collections.singletonList(Attribute.JOB_SPEC.columnName)) .build(); return ddbClient .getItem(getItemRequest) .thenApply( item -> { try { return makeSchedule(item.item().get(Attribute.JOB_SPEC.columnName).s()); } catch (InvalidTokenException e) { throw new RuntimeException(e); } }); } catch (InvalidTokenException ex) { CompletableFuture<Schedule> future = new CompletableFuture<>(); future.completeExceptionally(ex); return future; } } @Override public CompletableFuture<Void> deleteJob(String token) { try { JobTokenPayload tokenPayload = tokenManager.decodeToken(token); Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKeyFromToken(token, tokenPayload, maxShardId); Map<String, AttributeValue> attributeValues = DynamoDBUtils.attributeValuesMap( ImmutableMap.of(Value.SCHEDULED_STATUS, Schedule.JobStatus.SCHEDULED.toString())); DeleteItemRequest deleteItemRequest = DeleteItemRequest.builder() .tableName(ddbConfig.getSchedulesTableName()) .conditionExpression( Condition.of(Attribute.JOB_STATUS, "=", Value.SCHEDULED_STATUS).toString()) .key(primaryKey) .expressionAttributeNames(DynamoDBUtils.getJobStatusAttributeMap()) .expressionAttributeValues(attributeValues) .build(); return ddbClient .deleteItem(deleteItemRequest) .whenComplete( (response, ex) -> { if (ex != null) { log.error( "Error deleting job {} from table {}", token, ddbConfig.getSchedulesTableName(), ex); } else { log.info( "Deleted job {} from table {}", token, ddbConfig.getSchedulesTableName()); } }) .thenApply(response -> null); } catch (InvalidTokenException ex) { CompletableFuture<Void> future = new CompletableFuture<>(); future.completeExceptionally(ex); return future; } } private QueryRequest makeQueryRequestForOverdueJobs( String partition, Instant instant, JobStatus jobStatus) { String keyCondition = Condition.of(Attribute.SHARD_ID, "=", Value.SHARD_ID) + " and " + Condition.of(Attribute.DATE_TOKEN, "<", Value.DATE_TOKEN); String filter = Condition.of(Attribute.JOB_STATUS, "=", Value.JOB_STATUS).toString(); String now = Long.toString(instant.toEpochMilli()); Map<String, AttributeValue> attributeValues = DynamoDBUtils.attributeValuesMap( ImmutableMap.of( Value.SHARD_ID, partition, Value.DATE_TOKEN, now, Value.JOB_STATUS, jobStatus.toString())); return QueryRequest.builder() .tableName(ddbConfig.getSchedulesTableName()) .keyConditionExpression(keyCondition) .filterExpression(filter) .expressionAttributeValues(attributeValues) .expressionAttributeNames(DynamoDBUtils.getGetOverdueJobsAttributeMap()) .limit(ddbConfig.getQueryLimit()) .build(); } @Override public CompletableFuture<SchedulesQueryResponse> getOverdueJobs( @NonNull String partition, Instant instant) { return ddbClient .query(makeQueryRequestForOverdueJobs(partition, instant, JobStatus.SCHEDULED)) .whenComplete( (res, ex) -> { if (ex == null) { log.info( "Query for overdue jobs in partition {} at time {} successful", partition, instant.toEpochMilli()); metrics.queryOverdue(partition); } else { log.error( "Query for overdue jobs in partition {} at time {} failed", partition, instant.toEpochMilli(), ex); metrics.queryOverdueError(ex, partition); } }) .thenApply( queryResponse -> SchedulesQueryResponse.of( queryResponse .items() .stream() .map(DynamoDBUtils::decodeSchedule) .collect(Collectors.toList()), queryResponse.count() == ddbConfig.getQueryLimit() || !queryResponse.lastEvaluatedKey().isEmpty())); } @Override public CompletableFuture<Schedule> updateStatus( Schedule schedule, JobStatus oldStatus, JobStatus newStatus) { Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKey(schedule); Map<String, AttributeValue> attributeValues = DynamoDBUtils.attributeValuesMap( ImmutableMap.of( Value.OLD_STATUS, oldStatus.toString(), Value.NEW_STATUS, newStatus.toString())); String updated = "SET " + Condition.of(Attribute.JOB_STATUS, "=", Value.NEW_STATUS); UpdateItemRequest updateItemRequest = UpdateItemRequest.builder() .tableName(ddbConfig.getSchedulesTableName()) .key(primaryKey) .conditionExpression( Condition.of(Attribute.JOB_STATUS, "=", Value.OLD_STATUS).toString()) .expressionAttributeNames(DynamoDBUtils.getJobStatusAttributeMap()) .expressionAttributeValues(attributeValues) .updateExpression(updated) .returnValues(ReturnValue.UPDATED_NEW) .build(); return ddbClient .updateItem(updateItemRequest) .whenComplete( (response, exception) -> { DyneinJobSpec jobSpec = jobSpecTransformer.deserializeJobSpec(schedule.getJobSpec()); if (exception != null) { log.error( "Failed to set job {} to {}", jobSpec.getJobToken(), newStatus.toString(), exception); metrics.updateJobStatusError(exception, oldStatus.toString(), newStatus.toString()); } else { log.info("Set job {} to {}", jobSpec.getJobToken(), newStatus.toString()); metrics.updateJobStatus(oldStatus.toString(), newStatus.toString()); } }) .thenApply( response -> { JobStatus updatedStatus = Optional.ofNullable(response.attributes().get(Attribute.JOB_STATUS.columnName)) .map(attr -> JobStatus.valueOf(attr.s())) .orElseThrow( () -> new IllegalStateException( "Status update successful but status isn't returned.")); return schedule.withStatus(updatedStatus); }); } @Override public CompletableFuture<Void> deleteDispatchedJob(Schedule schedule) { Map<String, AttributeValue> primaryKey = DynamoDBUtils.getPrimaryKey(schedule); Map<String, AttributeValue> attributeValues = DynamoDBUtils.attributeValuesMap( ImmutableMap.of(Value.ACQUIRED_STATUS, Schedule.JobStatus.ACQUIRED.toString())); DeleteItemRequest deleteItemRequest = DeleteItemRequest.builder() .tableName(ddbConfig.getSchedulesTableName()) .conditionExpression( Condition.of(Attribute.JOB_STATUS, "=", Value.ACQUIRED_STATUS).toString()) .key(primaryKey) .expressionAttributeNames(DynamoDBUtils.getJobStatusAttributeMap()) .expressionAttributeValues(attributeValues) .build(); return ddbClient .deleteItem(deleteItemRequest) .whenComplete( (response, exception) -> { DyneinJobSpec jobSpec = jobSpecTransformer.deserializeJobSpec(schedule.getJobSpec()); if (exception != null) { log.error( "Error deleting job {} from table {}", jobSpec.getJobToken(), ddbConfig.getSchedulesTableName(), exception); metrics.deleteDispatchedJobError(exception, jobSpec.getQueueName()); } else { log.info( "Deleted job {} from table {}", jobSpec.getJobToken(), ddbConfig.getSchedulesTableName()); metrics.deleteDispatchedJob(jobSpec.getQueueName()); } }) .thenApply(response -> null); } @Override public void close() { ddbClient.close(); } }
5,198
0
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler
Create_ds/dynein/dynein/src/main/java/com/airbnb/dynein/scheduler/heartbeat/HeartbeatManager.java
/** * Copyright 2019 Airbnb. Licensed under Apache-2.0. See LICENSE in the project root for license * information. */ package com.airbnb.dynein.scheduler.heartbeat; import com.google.common.eventbus.EventBus; import com.google.common.eventbus.Subscribe; import io.dropwizard.lifecycle.Managed; import java.time.Clock; import java.time.Instant; import java.util.List; import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import javax.inject.Inject; import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; @Slf4j @AllArgsConstructor(onConstructor = @__(@Inject)) public class HeartbeatManager implements Managed { private static final long EXECUTOR_TIMEOUT_SECONDS = 15; private final ConcurrentHashMap<Integer, Long> checkIns; private final ScheduledExecutorService executorService; private final EventBus eventBus; private final Clock clock; private final HeartbeatConfiguration heartbeatConfiguration; @Override public void start() { log.info("Attempting to start heartbeat"); executorService.scheduleWithFixedDelay( this::scanMap, 0, heartbeatConfiguration.getMonitorInterval(), TimeUnit.MILLISECONDS); log.info("Started heartbeat"); } @Override public void stop() { log.info("Stopping heartbeat"); executorService.shutdown(); eventBus.unregister(this); try { if (!executorService.awaitTermination(EXECUTOR_TIMEOUT_SECONDS, TimeUnit.SECONDS)) { executorService.shutdownNow(); } } catch (InterruptedException ex) { throw new RuntimeException(); } log.info("Stopped heartbeat"); } @Subscribe public void workerCheckIn(PartitionWorkerIterationEvent iterationEvent) { log.debug( "Checking in index {} at time {}", iterationEvent.getIndex(), iterationEvent.getTimestamp()); checkIns.put(iterationEvent.getIndex(), iterationEvent.getTimestamp()); } private void scanMap() { long now = Instant.now(clock).toEpochMilli(); List<Integer> stalled = checkIns .entrySet() .stream() .filter(entry -> entry.getValue() < now - heartbeatConfiguration.getStallTolerance()) .map(Entry::getKey) .collect(Collectors.toList()); if (stalled.size() > 0) { eventBus.post(new PartitionWorkerStalledEvent(stalled)); } log.debug("Completed scanning PartitionWorker check in times."); } }
5,199